blob: 779a5872a2c5fb5f9aa9b49af6f77391aefc2336 [file] [log] [blame]
Marc Zyngier83a49792012-12-10 13:27:52 +00001/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/kvm_emulate.h
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#ifndef __ARM64_KVM_EMULATE_H__
23#define __ARM64_KVM_EMULATE_H__
24
25#include <linux/kvm_host.h>
Mark Rutlandc6d01a92014-11-24 13:59:30 +000026
27#include <asm/esr.h>
Marc Zyngier83a49792012-12-10 13:27:52 +000028#include <asm/kvm_arm.h>
29#include <asm/kvm_mmio.h>
30#include <asm/ptrace.h>
Andre Przywara4429fc62014-06-02 15:37:13 +020031#include <asm/cputype.h>
Marc Zyngier83a49792012-12-10 13:27:52 +000032
Marc Zyngierb5476312013-02-06 19:40:29 +000033unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
34unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
35
Marc Zyngier27b190b2013-02-06 19:54:04 +000036bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
37void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
38
Marc Zyngier83a49792012-12-10 13:27:52 +000039void kvm_inject_undefined(struct kvm_vcpu *vcpu);
40void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
41void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
42
Christoffer Dallb856a592014-10-16 17:21:16 +020043static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
44{
45 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
Marc Zyngier801f6772015-01-11 14:10:11 +010046 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
47 vcpu->arch.hcr_el2 &= ~HCR_RW;
Christoffer Dallb856a592014-10-16 17:21:16 +020048}
49
Marc Zyngier3c1e7162014-12-19 16:05:31 +000050static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
51{
52 return vcpu->arch.hcr_el2;
53}
54
55static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
56{
57 vcpu->arch.hcr_el2 = hcr;
58}
59
Marc Zyngier83a49792012-12-10 13:27:52 +000060static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
61{
62 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
63}
64
65static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu)
66{
67 return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
68}
69
70static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
71{
72 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
73}
74
75static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
76{
Marc Zyngierb5476312013-02-06 19:40:29 +000077 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
Marc Zyngier83a49792012-12-10 13:27:52 +000078}
79
80static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
81{
Marc Zyngier27b190b2013-02-06 19:54:04 +000082 if (vcpu_mode_is_32bit(vcpu))
83 return kvm_condition_valid32(vcpu);
84
85 return true;
Marc Zyngier83a49792012-12-10 13:27:52 +000086}
87
88static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
89{
Marc Zyngier27b190b2013-02-06 19:54:04 +000090 if (vcpu_mode_is_32bit(vcpu))
91 kvm_skip_instr32(vcpu, is_wide_instr);
92 else
93 *vcpu_pc(vcpu) += 4;
Marc Zyngier83a49792012-12-10 13:27:52 +000094}
95
96static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
97{
Marc Zyngierb5476312013-02-06 19:40:29 +000098 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
Marc Zyngier83a49792012-12-10 13:27:52 +000099}
100
Marc Zyngierc0f09632015-11-16 10:28:17 +0000101/*
Pavel Fedinf6be5632015-12-04 15:03:14 +0300102 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
103 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
104 * AArch32 with banked registers.
Marc Zyngierc0f09632015-11-16 10:28:17 +0000105 */
Pavel Fedinbc45a512015-12-04 15:03:11 +0300106static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
107 u8 reg_num)
108{
109 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
110}
111
112static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
113 unsigned long val)
114{
115 if (reg_num != 31)
116 vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
117}
118
Marc Zyngier83a49792012-12-10 13:27:52 +0000119/* Get vcpu SPSR for current mode */
120static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
121{
Marc Zyngierb5476312013-02-06 19:40:29 +0000122 if (vcpu_mode_is_32bit(vcpu))
123 return vcpu_spsr32(vcpu);
124
Marc Zyngier83a49792012-12-10 13:27:52 +0000125 return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
126}
127
128static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
129{
Shannon Zhao9586a2e2016-01-13 17:16:39 +0800130 u32 mode;
Marc Zyngier83a49792012-12-10 13:27:52 +0000131
Shannon Zhao9586a2e2016-01-13 17:16:39 +0800132 if (vcpu_mode_is_32bit(vcpu)) {
133 mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
Marc Zyngierb5476312013-02-06 19:40:29 +0000134 return mode > COMPAT_PSR_MODE_USR;
Shannon Zhao9586a2e2016-01-13 17:16:39 +0800135 }
136
137 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
Marc Zyngierb5476312013-02-06 19:40:29 +0000138
Marc Zyngier83a49792012-12-10 13:27:52 +0000139 return mode != PSR_MODE_EL0t;
140}
141
142static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
143{
144 return vcpu->arch.fault.esr_el2;
145}
146
147static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
148{
149 return vcpu->arch.fault.far_el2;
150}
151
152static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
153{
154 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
155}
156
Wei Huang0d97f8842015-01-12 11:53:36 -0500157static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
158{
Paolo Bonzini1c6007d2015-01-23 13:39:51 +0100159 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
Wei Huang0d97f8842015-01-12 11:53:36 -0500160}
161
Marc Zyngier83a49792012-12-10 13:27:52 +0000162static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
163{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000164 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
Marc Zyngier83a49792012-12-10 13:27:52 +0000165}
166
167static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
168{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000169 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR);
Marc Zyngier83a49792012-12-10 13:27:52 +0000170}
171
172static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
173{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000174 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
Marc Zyngier83a49792012-12-10 13:27:52 +0000175}
176
177static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
178{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000179 return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
Marc Zyngier83a49792012-12-10 13:27:52 +0000180}
181
182static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
183{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000184 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_EA);
Marc Zyngier83a49792012-12-10 13:27:52 +0000185}
186
187static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
188{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000189 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
Marc Zyngier83a49792012-12-10 13:27:52 +0000190}
191
192static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
193{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000194 return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
Marc Zyngier83a49792012-12-10 13:27:52 +0000195}
196
197/* This one is not specific to Data Abort */
198static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
199{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000200 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
Marc Zyngier83a49792012-12-10 13:27:52 +0000201}
202
203static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
204{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000205 return kvm_vcpu_get_hsr(vcpu) >> ESR_ELx_EC_SHIFT;
Marc Zyngier83a49792012-12-10 13:27:52 +0000206}
207
208static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
209{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000210 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
Marc Zyngier83a49792012-12-10 13:27:52 +0000211}
212
213static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
214{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000215 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
Christoffer Dall0496daa52014-09-26 12:29:34 +0200216}
217
218static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
219{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000220 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
Marc Zyngier83a49792012-12-10 13:27:52 +0000221}
222
Andre Przywara4429fc62014-06-02 15:37:13 +0200223static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
Marc Zyngier79c64882013-10-18 18:19:03 +0100224{
Andre Przywara4429fc62014-06-02 15:37:13 +0200225 return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
Marc Zyngier79c64882013-10-18 18:19:03 +0100226}
227
Marc Zyngierce94fe92013-11-05 14:12:15 +0000228static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
229{
230 if (vcpu_mode_is_32bit(vcpu))
231 *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
232 else
233 vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25);
234}
235
Marc Zyngier6d89d2d2013-02-12 12:40:22 +0000236static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
237{
238 if (vcpu_mode_is_32bit(vcpu))
239 return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
240
241 return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
242}
243
244static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
245 unsigned long data,
246 unsigned int len)
247{
248 if (kvm_vcpu_is_be(vcpu)) {
249 switch (len) {
250 case 1:
251 return data & 0xff;
252 case 2:
253 return be16_to_cpu(data & 0xffff);
254 case 4:
255 return be32_to_cpu(data & 0xffffffff);
256 default:
257 return be64_to_cpu(data);
258 }
Victor Kamenskyb3007082014-06-12 09:30:08 -0700259 } else {
260 switch (len) {
261 case 1:
262 return data & 0xff;
263 case 2:
264 return le16_to_cpu(data & 0xffff);
265 case 4:
266 return le32_to_cpu(data & 0xffffffff);
267 default:
268 return le64_to_cpu(data);
269 }
Marc Zyngier6d89d2d2013-02-12 12:40:22 +0000270 }
271
272 return data; /* Leave LE untouched */
273}
274
275static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
276 unsigned long data,
277 unsigned int len)
278{
279 if (kvm_vcpu_is_be(vcpu)) {
280 switch (len) {
281 case 1:
282 return data & 0xff;
283 case 2:
284 return cpu_to_be16(data & 0xffff);
285 case 4:
286 return cpu_to_be32(data & 0xffffffff);
287 default:
288 return cpu_to_be64(data);
289 }
Victor Kamenskyb3007082014-06-12 09:30:08 -0700290 } else {
291 switch (len) {
292 case 1:
293 return data & 0xff;
294 case 2:
295 return cpu_to_le16(data & 0xffff);
296 case 4:
297 return cpu_to_le32(data & 0xffffffff);
298 default:
299 return cpu_to_le64(data);
300 }
Marc Zyngier6d89d2d2013-02-12 12:40:22 +0000301 }
302
303 return data; /* Leave LE untouched */
304}
305
Marc Zyngier83a49792012-12-10 13:27:52 +0000306#endif /* __ARM64_KVM_EMULATE_H__ */