blob: 59362131b79f015e5ac4613cf609556401e7e9fe [file] [log] [blame]
Marc Zyngieraa024c2f2013-01-20 18:28:13 -05001/*
2 * Copyright (C) 2012 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/kvm_host.h>
19#include <linux/wait.h>
20
Marc Zyngier79c64882013-10-18 18:19:03 +010021#include <asm/cputype.h>
Marc Zyngieraa024c2f2013-01-20 18:28:13 -050022#include <asm/kvm_emulate.h>
23#include <asm/kvm_psci.h>
24
25/*
26 * This is an implementation of the Power State Coordination Interface
27 * as described in ARM document number ARM DEN 0022A.
28 */
29
30static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
31{
32 vcpu->arch.pause = true;
33}
34
35static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
36{
37 struct kvm *kvm = source_vcpu->kvm;
Marc Zyngier79c64882013-10-18 18:19:03 +010038 struct kvm_vcpu *vcpu = NULL, *tmp;
Marc Zyngieraa024c2f2013-01-20 18:28:13 -050039 wait_queue_head_t *wq;
40 unsigned long cpu_id;
Marc Zyngier79c64882013-10-18 18:19:03 +010041 unsigned long mpidr;
Marc Zyngieraa024c2f2013-01-20 18:28:13 -050042 phys_addr_t target_pc;
Marc Zyngier79c64882013-10-18 18:19:03 +010043 int i;
Marc Zyngieraa024c2f2013-01-20 18:28:13 -050044
45 cpu_id = *vcpu_reg(source_vcpu, 1);
46 if (vcpu_mode_is_32bit(source_vcpu))
47 cpu_id &= ~((u32) 0);
48
Marc Zyngier79c64882013-10-18 18:19:03 +010049 kvm_for_each_vcpu(i, tmp, kvm) {
50 mpidr = kvm_vcpu_get_mpidr(tmp);
51 if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) {
52 vcpu = tmp;
53 break;
54 }
55 }
56
Christoffer Dall478a8232013-11-19 17:43:19 -080057 /*
58 * Make sure the caller requested a valid CPU and that the CPU is
59 * turned off.
60 */
61 if (!vcpu || !vcpu->arch.pause)
Anup Patel7d0f84a2014-04-29 11:24:16 +053062 return PSCI_RET_INVALID_PARAMS;
Marc Zyngieraa024c2f2013-01-20 18:28:13 -050063
64 target_pc = *vcpu_reg(source_vcpu, 2);
65
Marc Zyngieraa024c2f2013-01-20 18:28:13 -050066 kvm_reset_vcpu(vcpu);
67
68 /* Gracefully handle Thumb2 entry point */
69 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
70 target_pc &= ~((phys_addr_t) 1);
71 vcpu_set_thumb(vcpu);
72 }
73
Marc Zyngierce94fe92013-11-05 14:12:15 +000074 /* Propagate caller endianness */
75 if (kvm_vcpu_is_be(source_vcpu))
76 kvm_vcpu_set_be(vcpu);
77
Marc Zyngieraa024c2f2013-01-20 18:28:13 -050078 *vcpu_pc(vcpu) = target_pc;
79 vcpu->arch.pause = false;
80 smp_mb(); /* Make sure the above is visible */
81
Christoffer Dall478a8232013-11-19 17:43:19 -080082 wq = kvm_arch_vcpu_wq(vcpu);
Marc Zyngieraa024c2f2013-01-20 18:28:13 -050083 wake_up_interruptible(wq);
84
Anup Patel7d0f84a2014-04-29 11:24:16 +053085 return PSCI_RET_SUCCESS;
86}
87
Anup Patel4b123822014-04-29 11:24:20 +053088static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
89{
90 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
91 vcpu->run->system_event.type = type;
92 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
93}
94
95static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
96{
97 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
98}
99
100static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
101{
102 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
103}
104
Anup Patel7d0f84a2014-04-29 11:24:16 +0530105int kvm_psci_version(struct kvm_vcpu *vcpu)
106{
107 if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
108 return KVM_ARM_PSCI_0_2;
109
110 return KVM_ARM_PSCI_0_1;
111}
112
Anup Patele8e7fcc2014-04-29 11:24:18 +0530113static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
Anup Patel7d0f84a2014-04-29 11:24:16 +0530114{
Anup Patel4b123822014-04-29 11:24:20 +0530115 int ret = 1;
Anup Patel7d0f84a2014-04-29 11:24:16 +0530116 unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
117 unsigned long val;
118
119 switch (psci_fn) {
120 case PSCI_0_2_FN_PSCI_VERSION:
121 /*
122 * Bits[31:16] = Major Version = 0
123 * Bits[15:0] = Minor Version = 2
124 */
125 val = 2;
126 break;
127 case PSCI_0_2_FN_CPU_OFF:
128 kvm_psci_vcpu_off(vcpu);
129 val = PSCI_RET_SUCCESS;
130 break;
131 case PSCI_0_2_FN_CPU_ON:
132 case PSCI_0_2_FN64_CPU_ON:
133 val = kvm_psci_vcpu_on(vcpu);
134 break;
Anup Patel4b123822014-04-29 11:24:20 +0530135 case PSCI_0_2_FN_SYSTEM_OFF:
136 kvm_psci_system_off(vcpu);
137 /*
138 * We should'nt be going back to guest VCPU after
139 * receiving SYSTEM_OFF request.
140 *
141 * If user space accidently/deliberately resumes
142 * guest VCPU after SYSTEM_OFF request then guest
143 * VCPU should see internal failure from PSCI return
144 * value. To achieve this, we preload r0 (or x0) with
145 * PSCI return value INTERNAL_FAILURE.
146 */
147 val = PSCI_RET_INTERNAL_FAILURE;
148 ret = 0;
149 break;
150 case PSCI_0_2_FN_SYSTEM_RESET:
151 kvm_psci_system_reset(vcpu);
152 /*
153 * Same reason as SYSTEM_OFF for preloading r0 (or x0)
154 * with PSCI return value INTERNAL_FAILURE.
155 */
156 val = PSCI_RET_INTERNAL_FAILURE;
157 ret = 0;
158 break;
Anup Patel7d0f84a2014-04-29 11:24:16 +0530159 case PSCI_0_2_FN_CPU_SUSPEND:
160 case PSCI_0_2_FN_AFFINITY_INFO:
161 case PSCI_0_2_FN_MIGRATE:
162 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
163 case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU:
Anup Patel7d0f84a2014-04-29 11:24:16 +0530164 case PSCI_0_2_FN64_CPU_SUSPEND:
165 case PSCI_0_2_FN64_AFFINITY_INFO:
166 case PSCI_0_2_FN64_MIGRATE:
167 case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
168 val = PSCI_RET_NOT_SUPPORTED;
169 break;
170 default:
Anup Patele8e7fcc2014-04-29 11:24:18 +0530171 return -EINVAL;
Anup Patel7d0f84a2014-04-29 11:24:16 +0530172 }
173
174 *vcpu_reg(vcpu, 0) = val;
Anup Patel4b123822014-04-29 11:24:20 +0530175 return ret;
Anup Patel7d0f84a2014-04-29 11:24:16 +0530176}
177
Anup Patele8e7fcc2014-04-29 11:24:18 +0530178static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
Anup Patel7d0f84a2014-04-29 11:24:16 +0530179{
180 unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
181 unsigned long val;
182
183 switch (psci_fn) {
184 case KVM_PSCI_FN_CPU_OFF:
185 kvm_psci_vcpu_off(vcpu);
186 val = PSCI_RET_SUCCESS;
187 break;
188 case KVM_PSCI_FN_CPU_ON:
189 val = kvm_psci_vcpu_on(vcpu);
190 break;
191 case KVM_PSCI_FN_CPU_SUSPEND:
192 case KVM_PSCI_FN_MIGRATE:
193 val = PSCI_RET_NOT_SUPPORTED;
194 break;
195 default:
Anup Patele8e7fcc2014-04-29 11:24:18 +0530196 return -EINVAL;
Anup Patel7d0f84a2014-04-29 11:24:16 +0530197 }
198
199 *vcpu_reg(vcpu, 0) = val;
Anup Patele8e7fcc2014-04-29 11:24:18 +0530200 return 1;
Marc Zyngieraa024c2f2013-01-20 18:28:13 -0500201}
202
203/**
204 * kvm_psci_call - handle PSCI call if r0 value is in range
205 * @vcpu: Pointer to the VCPU struct
206 *
Dave P Martin24a7f672013-05-01 17:49:28 +0100207 * Handle PSCI calls from guests through traps from HVC instructions.
Anup Patele8e7fcc2014-04-29 11:24:18 +0530208 * The calling convention is similar to SMC calls to the secure world
209 * where the function number is placed in r0.
210 *
211 * This function returns: > 0 (success), 0 (success but exit to user
212 * space), and < 0 (errors)
213 *
214 * Errors:
215 * -EINVAL: Unrecognized PSCI function
Marc Zyngieraa024c2f2013-01-20 18:28:13 -0500216 */
Anup Patele8e7fcc2014-04-29 11:24:18 +0530217int kvm_psci_call(struct kvm_vcpu *vcpu)
Marc Zyngieraa024c2f2013-01-20 18:28:13 -0500218{
Anup Patel7d0f84a2014-04-29 11:24:16 +0530219 switch (kvm_psci_version(vcpu)) {
220 case KVM_ARM_PSCI_0_2:
221 return kvm_psci_0_2_call(vcpu);
222 case KVM_ARM_PSCI_0_1:
223 return kvm_psci_0_1_call(vcpu);
Marc Zyngieraa024c2f2013-01-20 18:28:13 -0500224 default:
Anup Patele8e7fcc2014-04-29 11:24:18 +0530225 return -EINVAL;
Anup Patel7d0f84a2014-04-29 11:24:16 +0530226 };
Marc Zyngieraa024c2f2013-01-20 18:28:13 -0500227}