blob: af00551e8b21562dcac59b38eb3da54a67425054 [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
Dan Handleye83b0ca2014-01-14 18:17:09 +00002 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
Achin Gupta4f6ad662013-10-25 09:08:21 +01003 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
Dan Handley97043ac2014-04-09 13:14:54 +010031#include <arch.h>
Achin Gupta4f6ad662013-10-25 09:08:21 +010032#include <arch_helpers.h>
Dan Handley97043ac2014-04-09 13:14:54 +010033#include <assert.h>
Jeenu Viswambharancaa84932014-02-06 10:36:15 +000034#include <runtime_svc.h>
35#include <debug.h>
Dan Handley35e98e52014-04-09 13:13:04 +010036#include "psci_private.h"
Achin Gupta4f6ad662013-10-25 09:08:21 +010037
38/*******************************************************************************
39 * PSCI frontend api for servicing SMCs. Described in the PSCI spec.
40 ******************************************************************************/
41int psci_cpu_on(unsigned long target_cpu,
42 unsigned long entrypoint,
43 unsigned long context_id)
44
45{
46 int rc;
Achin Gupta0959db52013-12-02 17:33:04 +000047 unsigned int start_afflvl, end_afflvl;
Soby Mathew78879b92015-01-06 15:36:38 +000048 entry_point_info_t ep;
Achin Gupta4f6ad662013-10-25 09:08:21 +010049
50 /* Determine if the cpu exists of not */
51 rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
52 if (rc != PSCI_E_SUCCESS) {
Soby Mathew539dced2014-10-02 16:56:51 +010053 return PSCI_E_INVALID_PARAMS;
54 }
55
56 /* Validate the entrypoint using platform pm_ops */
57 if (psci_plat_pm_ops->validate_ns_entrypoint) {
58 rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
59 if (rc != PSCI_E_SUCCESS) {
60 assert(rc == PSCI_E_INVALID_PARAMS);
61 return PSCI_E_INVALID_PARAMS;
62 }
Achin Gupta4f6ad662013-10-25 09:08:21 +010063 }
64
Achin Gupta0959db52013-12-02 17:33:04 +000065 /*
Soby Mathew78879b92015-01-06 15:36:38 +000066 * Verify and derive the re-entry information for
67 * the non-secure world from the non-secure state from
68 * where this call originated.
69 */
70 rc = psci_get_ns_ep_info(&ep, entrypoint, context_id);
71 if (rc != PSCI_E_SUCCESS)
72 return rc;
73
74
75 /*
Achin Gupta0959db52013-12-02 17:33:04 +000076 * To turn this cpu on, specify which affinity
77 * levels need to be turned on
78 */
79 start_afflvl = MPIDR_AFFLVL0;
80 end_afflvl = get_max_afflvl();
Achin Gupta4f6ad662013-10-25 09:08:21 +010081 rc = psci_afflvl_on(target_cpu,
Soby Mathew78879b92015-01-06 15:36:38 +000082 &ep,
Achin Gupta4f6ad662013-10-25 09:08:21 +010083 start_afflvl,
Achin Gupta0959db52013-12-02 17:33:04 +000084 end_afflvl);
Achin Gupta4f6ad662013-10-25 09:08:21 +010085
Achin Gupta4f6ad662013-10-25 09:08:21 +010086 return rc;
87}
88
89unsigned int psci_version(void)
90{
91 return PSCI_MAJOR_VER | PSCI_MINOR_VER;
92}
93
94int psci_cpu_suspend(unsigned int power_state,
95 unsigned long entrypoint,
96 unsigned long context_id)
97{
98 int rc;
Achin Gupta0959db52013-12-02 17:33:04 +000099 unsigned int target_afflvl, pstate_type;
Soby Mathew78879b92015-01-06 15:36:38 +0000100 entry_point_info_t ep;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100101
Vikram Kanigiri759ec932014-04-01 19:26:26 +0100102 /* Check SBZ bits in power state are zero */
103 if (psci_validate_power_state(power_state))
104 return PSCI_E_INVALID_PARAMS;
105
Achin Gupta4f6ad662013-10-25 09:08:21 +0100106 /* Sanity check the requested state */
Achin Gupta0959db52013-12-02 17:33:04 +0000107 target_afflvl = psci_get_pstate_afflvl(power_state);
Soby Mathew264999f2014-10-02 17:24:19 +0100108 if (target_afflvl > get_max_afflvl())
Vikram Kanigirid118f9f2014-03-21 11:57:10 +0000109 return PSCI_E_INVALID_PARAMS;
110
Soby Mathew539dced2014-10-02 16:56:51 +0100111 /* Validate the power_state using platform pm_ops */
112 if (psci_plat_pm_ops->validate_power_state) {
113 rc = psci_plat_pm_ops->validate_power_state(power_state);
114 if (rc != PSCI_E_SUCCESS) {
115 assert(rc == PSCI_E_INVALID_PARAMS);
116 return PSCI_E_INVALID_PARAMS;
117 }
118 }
119
120 /* Validate the entrypoint using platform pm_ops */
121 if (psci_plat_pm_ops->validate_ns_entrypoint) {
122 rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
123 if (rc != PSCI_E_SUCCESS) {
124 assert(rc == PSCI_E_INVALID_PARAMS);
125 return PSCI_E_INVALID_PARAMS;
126 }
127 }
128
Achin Gupta317ba092014-05-09 19:32:25 +0100129 /* Determine the 'state type' in the 'power_state' parameter */
Vikram Kanigirid118f9f2014-03-21 11:57:10 +0000130 pstate_type = psci_get_pstate_type(power_state);
Achin Gupta317ba092014-05-09 19:32:25 +0100131
132 /*
133 * Ensure that we have a platform specific handler for entering
134 * a standby state.
135 */
Vikram Kanigirid118f9f2014-03-21 11:57:10 +0000136 if (pstate_type == PSTATE_TYPE_STANDBY) {
Achin Gupta317ba092014-05-09 19:32:25 +0100137 if (!psci_plat_pm_ops->affinst_standby)
Vikram Kanigirid118f9f2014-03-21 11:57:10 +0000138 return PSCI_E_INVALID_PARAMS;
Achin Gupta317ba092014-05-09 19:32:25 +0100139
Soby Mathew539dced2014-10-02 16:56:51 +0100140 psci_plat_pm_ops->affinst_standby(power_state);
141 return PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100142 }
143
Achin Gupta317ba092014-05-09 19:32:25 +0100144 /*
Soby Mathew78879b92015-01-06 15:36:38 +0000145 * Verify and derive the re-entry information for
146 * the non-secure world from the non-secure state from
147 * where this call originated.
148 */
149 rc = psci_get_ns_ep_info(&ep, entrypoint, context_id);
150 if (rc != PSCI_E_SUCCESS)
151 return rc;
152
Soby Mathew31244d72014-09-30 11:19:51 +0100153 /* Save PSCI power state parameter for the core in suspend context */
154 psci_set_suspend_power_state(power_state);
155
Soby Mathew78879b92015-01-06 15:36:38 +0000156 /*
Achin Gupta317ba092014-05-09 19:32:25 +0100157 * Do what is needed to enter the power down state. Upon success,
Soby Mathew539dced2014-10-02 16:56:51 +0100158 * enter the final wfi which will power down this CPU.
Achin Gupta317ba092014-05-09 19:32:25 +0100159 */
Soby Mathew539dced2014-10-02 16:56:51 +0100160 psci_afflvl_suspend(&ep,
161 MPIDR_AFFLVL0,
162 target_afflvl);
163
Soby Mathew31244d72014-09-30 11:19:51 +0100164 /* Reset PSCI power state parameter for the core. */
165 psci_set_suspend_power_state(PSCI_INVALID_DATA);
Soby Mathew539dced2014-10-02 16:56:51 +0100166 return PSCI_E_SUCCESS;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100167}
168
169int psci_cpu_off(void)
170{
171 int rc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100172 int target_afflvl = get_max_afflvl();
173
Achin Gupta4f6ad662013-10-25 09:08:21 +0100174 /*
175 * Traverse from the highest to the lowest affinity level. When the
176 * lowest affinity level is hit, all the locks are acquired. State
177 * management is done immediately followed by cpu, cluster ...
178 * ..target_afflvl specific actions as this function unwinds back.
179 */
Andrew Thoelke56378aa2014-06-09 12:44:21 +0100180 rc = psci_afflvl_off(MPIDR_AFFLVL0, target_afflvl);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100181
Achin Gupta3140a9e2013-12-02 16:23:12 +0000182 /*
183 * The only error cpu_off can return is E_DENIED. So check if that's
184 * indeed the case.
185 */
Achin Gupta317ba092014-05-09 19:32:25 +0100186 assert (rc == PSCI_E_DENIED);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100187
188 return rc;
189}
190
191int psci_affinity_info(unsigned long target_affinity,
192 unsigned int lowest_affinity_level)
193{
194 int rc = PSCI_E_INVALID_PARAMS;
195 unsigned int aff_state;
Dan Handleyfb037bf2014-04-10 15:37:22 +0100196 aff_map_node_t *node;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100197
Achin Gupta75f73672013-12-05 16:33:10 +0000198 if (lowest_affinity_level > get_max_afflvl())
199 return rc;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100200
201 node = psci_get_aff_map_node(target_affinity, lowest_affinity_level);
202 if (node && (node->state & PSCI_AFF_PRESENT)) {
Achin Gupta75f73672013-12-05 16:33:10 +0000203
204 /*
205 * TODO: For affinity levels higher than 0 i.e. cpu, the
206 * state will always be either ON or OFF. Need to investigate
207 * how critical is it to support ON_PENDING here.
208 */
209 aff_state = psci_get_state(node);
Achin Gupta4f6ad662013-10-25 09:08:21 +0100210
211 /* A suspended cpu is available & on for the OS */
212 if (aff_state == PSCI_STATE_SUSPEND) {
213 aff_state = PSCI_STATE_ON;
214 }
215
216 rc = aff_state;
217 }
Achin Gupta75f73672013-12-05 16:33:10 +0000218
Achin Gupta4f6ad662013-10-25 09:08:21 +0100219 return rc;
220}
221
Soby Mathew8991eed2014-10-23 10:35:34 +0100222int psci_migrate(unsigned long target_cpu)
Achin Gupta4f6ad662013-10-25 09:08:21 +0100223{
Soby Mathew8991eed2014-10-23 10:35:34 +0100224 int rc;
225 unsigned long resident_cpu_mpidr;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100226
Soby Mathew8991eed2014-10-23 10:35:34 +0100227 rc = psci_spd_migrate_info(&resident_cpu_mpidr);
228 if (rc != PSCI_TOS_UP_MIG_CAP)
229 return (rc == PSCI_TOS_NOT_UP_MIG_CAP) ?
230 PSCI_E_DENIED : PSCI_E_NOT_SUPPORTED;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100231
Achin Gupta4f6ad662013-10-25 09:08:21 +0100232 /*
Soby Mathew8991eed2014-10-23 10:35:34 +0100233 * Migrate should only be invoked on the CPU where
234 * the Secure OS is resident.
Achin Gupta4f6ad662013-10-25 09:08:21 +0100235 */
Soby Mathew8991eed2014-10-23 10:35:34 +0100236 if (resident_cpu_mpidr != read_mpidr_el1())
237 return PSCI_E_NOT_PRESENT;
238
239 /* Check the validity of the specified target cpu */
240 rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
241 if (rc != PSCI_E_SUCCESS)
242 return PSCI_E_INVALID_PARAMS;
243
244 assert(psci_spd_pm && psci_spd_pm->svc_migrate);
245
246 rc = psci_spd_pm->svc_migrate(read_mpidr_el1(), target_cpu);
247 assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
248
249 return rc;
250}
251
252int psci_migrate_info_type(void)
253{
254 unsigned long resident_cpu_mpidr;
255
256 return psci_spd_migrate_info(&resident_cpu_mpidr);
257}
258
259long psci_migrate_info_up_cpu(void)
260{
261 unsigned long resident_cpu_mpidr;
262 int rc;
263
264 /*
265 * Return value of this depends upon what
266 * psci_spd_migrate_info() returns.
267 */
268 rc = psci_spd_migrate_info(&resident_cpu_mpidr);
269 if (rc != PSCI_TOS_NOT_UP_MIG_CAP && rc != PSCI_TOS_UP_MIG_CAP)
270 return PSCI_E_INVALID_PARAMS;
271
272 return resident_cpu_mpidr;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100273}
274
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000275/*******************************************************************************
276 * PSCI top level handler for servicing SMCs.
277 ******************************************************************************/
278uint64_t psci_smc_handler(uint32_t smc_fid,
279 uint64_t x1,
280 uint64_t x2,
281 uint64_t x3,
282 uint64_t x4,
283 void *cookie,
284 void *handle,
285 uint64_t flags)
286{
Andrew Thoelke5003eca2014-06-10 16:37:37 +0100287 if (is_caller_secure(flags))
288 SMC_RET1(handle, SMC_UNK);
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000289
Andrew Thoelke5003eca2014-06-10 16:37:37 +0100290 if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
291 /* 32-bit PSCI function, clear top parameter bits */
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000292
Andrew Thoelke5003eca2014-06-10 16:37:37 +0100293 x1 = (uint32_t)x1;
294 x2 = (uint32_t)x2;
295 x3 = (uint32_t)x3;
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000296
Andrew Thoelke5003eca2014-06-10 16:37:37 +0100297 switch (smc_fid) {
298 case PSCI_VERSION:
299 SMC_RET1(handle, psci_version());
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000300
Andrew Thoelke5003eca2014-06-10 16:37:37 +0100301 case PSCI_CPU_OFF:
Achin Guptab51da822014-06-26 09:58:52 +0100302 SMC_RET1(handle, psci_cpu_off());
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000303
Andrew Thoelke5003eca2014-06-10 16:37:37 +0100304 case PSCI_CPU_SUSPEND_AARCH32:
Achin Guptab51da822014-06-26 09:58:52 +0100305 SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000306
Andrew Thoelke5003eca2014-06-10 16:37:37 +0100307 case PSCI_CPU_ON_AARCH32:
308 SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000309
Andrew Thoelke5003eca2014-06-10 16:37:37 +0100310 case PSCI_AFFINITY_INFO_AARCH32:
311 SMC_RET1(handle, psci_affinity_info(x1, x2));
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000312
Andrew Thoelke5003eca2014-06-10 16:37:37 +0100313 case PSCI_MIG_AARCH32:
314 SMC_RET1(handle, psci_migrate(x1));
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000315
Andrew Thoelke5003eca2014-06-10 16:37:37 +0100316 case PSCI_MIG_INFO_TYPE:
317 SMC_RET1(handle, psci_migrate_info_type());
318
319 case PSCI_MIG_INFO_UP_CPU_AARCH32:
320 SMC_RET1(handle, psci_migrate_info_up_cpu());
321
Juan Castillod5f13092014-08-12 11:17:06 +0100322 case PSCI_SYSTEM_OFF:
323 psci_system_off();
324 /* We should never return from psci_system_off() */
325
326 case PSCI_SYSTEM_RESET:
327 psci_system_reset();
328 /* We should never return from psci_system_reset() */
329
Andrew Thoelke5003eca2014-06-10 16:37:37 +0100330 default:
331 break;
332 }
333 } else {
334 /* 64-bit PSCI function */
335
336 switch (smc_fid) {
337 case PSCI_CPU_SUSPEND_AARCH64:
Achin Guptab51da822014-06-26 09:58:52 +0100338 SMC_RET1(handle, psci_cpu_suspend(x1, x2, x3));
Andrew Thoelke5003eca2014-06-10 16:37:37 +0100339
340 case PSCI_CPU_ON_AARCH64:
341 SMC_RET1(handle, psci_cpu_on(x1, x2, x3));
342
343 case PSCI_AFFINITY_INFO_AARCH64:
344 SMC_RET1(handle, psci_affinity_info(x1, x2));
345
346 case PSCI_MIG_AARCH64:
347 SMC_RET1(handle, psci_migrate(x1));
348
349 case PSCI_MIG_INFO_UP_CPU_AARCH64:
350 SMC_RET1(handle, psci_migrate_info_up_cpu());
351
352 default:
353 break;
354 }
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000355 }
356
Andrew Thoelke5003eca2014-06-10 16:37:37 +0100357 WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid);
358 SMC_RET1(handle, SMC_UNK);
Jeenu Viswambharancaa84932014-02-06 10:36:15 +0000359}