2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 #include <linux/kvm_host.h>
21 #include <asm/kvm_arm.h>
22 #include <asm/kvm_emulate.h>
23 #include <trace/events/kvm.h>
27 #define VCPU_NR_MODES 6
28 #define VCPU_REG_OFFSET_USR 0
29 #define VCPU_REG_OFFSET_FIQ 1
30 #define VCPU_REG_OFFSET_IRQ 2
31 #define VCPU_REG_OFFSET_SVC 3
32 #define VCPU_REG_OFFSET_ABT 4
33 #define VCPU_REG_OFFSET_UND 5
34 #define REG_OFFSET(_reg) \
35 (offsetof(struct kvm_regs, _reg) / sizeof(u32))
37 #define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
39 static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
40 /* USR/SYS Registers */
41 [VCPU_REG_OFFSET_USR] = {
42 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
43 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
44 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
45 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
46 USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
50 [VCPU_REG_OFFSET_FIQ] = {
51 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
52 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
53 USR_REG_OFFSET(6), USR_REG_OFFSET(7),
54 REG_OFFSET(fiq_regs[0]), /* r8 */
55 REG_OFFSET(fiq_regs[1]), /* r9 */
56 REG_OFFSET(fiq_regs[2]), /* r10 */
57 REG_OFFSET(fiq_regs[3]), /* r11 */
58 REG_OFFSET(fiq_regs[4]), /* r12 */
59 REG_OFFSET(fiq_regs[5]), /* r13 */
60 REG_OFFSET(fiq_regs[6]), /* r14 */
64 [VCPU_REG_OFFSET_IRQ] = {
65 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
66 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
67 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
68 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
70 REG_OFFSET(irq_regs[0]), /* r13 */
71 REG_OFFSET(irq_regs[1]), /* r14 */
75 [VCPU_REG_OFFSET_SVC] = {
76 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
77 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
78 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
79 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
81 REG_OFFSET(svc_regs[0]), /* r13 */
82 REG_OFFSET(svc_regs[1]), /* r14 */
86 [VCPU_REG_OFFSET_ABT] = {
87 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
88 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
89 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
90 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
92 REG_OFFSET(abt_regs[0]), /* r13 */
93 REG_OFFSET(abt_regs[1]), /* r14 */
97 [VCPU_REG_OFFSET_UND] = {
98 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
99 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
100 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
101 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
103 REG_OFFSET(und_regs[0]), /* r13 */
104 REG_OFFSET(und_regs[1]), /* r14 */
109 * Return a pointer to the register number valid in the current mode of
112 u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
114 u32 *reg_array = (u32 *)&vcpu->arch.regs;
115 u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK;
118 case USR_MODE...SVC_MODE:
119 mode &= ~MODE32_BIT; /* 0 ... 3 */
123 mode = VCPU_REG_OFFSET_ABT;
127 mode = VCPU_REG_OFFSET_UND;
131 mode = VCPU_REG_OFFSET_USR;
138 return reg_array + vcpu_reg_offsets[mode][reg_num];
142 * Return the SPSR for the current mode of the virtual CPU.
144 u32 *vcpu_spsr(struct kvm_vcpu *vcpu)
146 u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK;
149 return &vcpu->arch.regs.KVM_ARM_SVC_spsr;
151 return &vcpu->arch.regs.KVM_ARM_ABT_spsr;
153 return &vcpu->arch.regs.KVM_ARM_UND_spsr;
155 return &vcpu->arch.regs.KVM_ARM_IRQ_spsr;
157 return &vcpu->arch.regs.KVM_ARM_FIQ_spsr;
164 * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
165 * @vcpu: the vcpu pointer
166 * @run: the kvm_run structure pointer
168 * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
169 * halt execution of world-switches and schedule other host processes until
170 * there is an incoming IRQ or FIQ to the VM.
172 int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
174 trace_kvm_wfi(*vcpu_pc(vcpu));
175 kvm_vcpu_block(vcpu);
180 * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
181 * @vcpu: The VCPU pointer
183 * When exceptions occur while instructions are executed in Thumb IF-THEN
184 * blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have
185 * to do this little bit of work manually. The fields map like this:
187 * IT[7:0] -> CPSR[26:25],CPSR[15:10]
189 static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
191 unsigned long itbits, cond;
192 unsigned long cpsr = *vcpu_cpsr(vcpu);
193 bool is_arm = !(cpsr & PSR_T_BIT);
195 BUG_ON(is_arm && (cpsr & PSR_IT_MASK));
197 if (!(cpsr & PSR_IT_MASK))
200 cond = (cpsr & 0xe000) >> 13;
201 itbits = (cpsr & 0x1c00) >> (10 - 2);
202 itbits |= (cpsr & (0x3 << 25)) >> 25;
204 /* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */
205 if ((itbits & 0x7) == 0)
208 itbits = (itbits << 1) & 0x1f;
210 cpsr &= ~PSR_IT_MASK;
212 cpsr |= (itbits & 0x1c) << (10 - 2);
213 cpsr |= (itbits & 0x3) << 25;
214 *vcpu_cpsr(vcpu) = cpsr;
218 * kvm_skip_instr - skip a trapped instruction and proceed to the next
219 * @vcpu: The vcpu pointer
221 void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
225 is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
226 if (is_thumb && !is_wide_instr)
230 kvm_adjust_itstate(vcpu);
234 /******************************************************************************
235 * Inject exceptions into the guest
238 static u32 exc_vector_base(struct kvm_vcpu *vcpu)
240 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
241 u32 vbar = vcpu->arch.cp15[c12_VBAR];
245 else /* always have security exceptions */
250 * kvm_inject_undefined - inject an undefined exception into the guest
251 * @vcpu: The VCPU to receive the undefined exception
253 * It is assumed that this code is called from the VCPU thread and that the
254 * VCPU therefore is not currently executing guest code.
256 * Modelled after TakeUndefInstrException() pseudocode.
258 void kvm_inject_undefined(struct kvm_vcpu *vcpu)
262 u32 cpsr = *vcpu_cpsr(vcpu);
263 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
264 bool is_thumb = (cpsr & PSR_T_BIT);
266 u32 return_offset = (is_thumb) ? 2 : 4;
268 new_spsr_value = cpsr;
269 new_lr_value = *vcpu_pc(vcpu) - return_offset;
271 *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE;
272 *vcpu_cpsr(vcpu) |= PSR_I_BIT;
273 *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
275 if (sctlr & SCTLR_TE)
276 *vcpu_cpsr(vcpu) |= PSR_T_BIT;
277 if (sctlr & SCTLR_EE)
278 *vcpu_cpsr(vcpu) |= PSR_E_BIT;
280 /* Note: These now point to UND banked copies */
281 *vcpu_spsr(vcpu) = cpsr;
282 *vcpu_reg(vcpu, 14) = new_lr_value;
284 /* Branch to exception vector */
285 *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
289 * Modelled after TakeDataAbortException() and TakePrefetchAbortException
292 static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
296 u32 cpsr = *vcpu_cpsr(vcpu);
297 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
298 bool is_thumb = (cpsr & PSR_T_BIT);
300 u32 return_offset = (is_thumb) ? 4 : 0;
303 new_spsr_value = cpsr;
304 new_lr_value = *vcpu_pc(vcpu) + return_offset;
306 *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE;
307 *vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT;
308 *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
310 if (sctlr & SCTLR_TE)
311 *vcpu_cpsr(vcpu) |= PSR_T_BIT;
312 if (sctlr & SCTLR_EE)
313 *vcpu_cpsr(vcpu) |= PSR_E_BIT;
315 /* Note: These now point to ABT banked copies */
316 *vcpu_spsr(vcpu) = cpsr;
317 *vcpu_reg(vcpu, 14) = new_lr_value;
324 /* Branch to exception vector */
325 *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
328 /* Set DFAR and DFSR */
329 vcpu->arch.cp15[c6_IFAR] = addr;
330 is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
331 /* Always give debug fault for now - should give guest a clue */
333 vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22;
335 vcpu->arch.cp15[c5_IFSR] = 2;
337 /* Set DFAR and DFSR */
338 vcpu->arch.cp15[c6_DFAR] = addr;
339 is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
340 /* Always give debug fault for now - should give guest a clue */
342 vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22;
344 vcpu->arch.cp15[c5_DFSR] = 2;
350 * kvm_inject_dabt - inject a data abort into the guest
351 * @vcpu: The VCPU to receive the undefined exception
352 * @addr: The address to report in the DFAR
354 * It is assumed that this code is called from the VCPU thread and that the
355 * VCPU therefore is not currently executing guest code.
357 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
359 inject_abt(vcpu, false, addr);
363 * kvm_inject_pabt - inject a prefetch abort into the guest
364 * @vcpu: The VCPU to receive the undefined exception
365 * @addr: The address to report in the DFAR
367 * It is assumed that this code is called from the VCPU thread and that the
368 * VCPU therefore is not currently executing guest code.
370 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
372 inject_abt(vcpu, true, addr);