2019-05-22 18:47:05 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* Hyp portion of the (not much of an) Emulation layer for 32bit guests.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012,2013 - ARM Ltd
|
|
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
|
|
*
|
|
|
|
* based on arch/arm/kvm/emulate.c
|
|
|
|
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
|
|
|
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <asm/kvm_emulate.h>
|
|
|
|
#include <asm/kvm_hyp.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* stolen from arch/arm/kernel/opcodes.c
|
|
|
|
*
|
|
|
|
* condition code lookup table
|
|
|
|
* index into the table is test code: EQ, NE, ... LT, GT, AL, NV
|
|
|
|
*
|
|
|
|
* bit position in short is condition code: NZCV
|
|
|
|
*/
|
|
|
|
static const unsigned short cc_map[16] = {
|
|
|
|
0xF0F0, /* EQ == Z set */
|
|
|
|
0x0F0F, /* NE */
|
|
|
|
0xCCCC, /* CS == C set */
|
|
|
|
0x3333, /* CC */
|
|
|
|
0xFF00, /* MI == N set */
|
|
|
|
0x00FF, /* PL */
|
|
|
|
0xAAAA, /* VS == V set */
|
|
|
|
0x5555, /* VC */
|
|
|
|
0x0C0C, /* HI == C set && Z clear */
|
|
|
|
0xF3F3, /* LS == C clear || Z set */
|
|
|
|
0xAA55, /* GE == (N==V) */
|
|
|
|
0x55AA, /* LT == (N!=V) */
|
|
|
|
0x0A05, /* GT == (!Z && (N==V)) */
|
|
|
|
0xF5FA, /* LE == (Z || (N!=V)) */
|
|
|
|
0xFFFF, /* AL always */
|
|
|
|
0 /* NV */
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if a trapped instruction should have been executed or not.
|
|
|
|
*/
|
2020-06-25 14:14:19 +01:00
|
|
|
bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
|
2019-05-22 18:47:05 +01:00
|
|
|
{
|
|
|
|
unsigned long cpsr;
|
|
|
|
u32 cpsr_cond;
|
|
|
|
int cond;
|
|
|
|
|
KVM: arm64: AArch32: Fix spurious trapping of conditional instructions
We recently upgraded the view of ESR_EL2 to 64bit, in keeping with
the requirements of the architecture.
However, the AArch32 emulation code was left unaudited, and the
(already dodgy) code that triages whether a trap is spurious or not
(because the condition code failed) broke in a subtle way:
If ESR_EL2.ISS2 is ever non-zero (unlikely, but hey, this is the ARM
architecture we're talking about), the hack that tests the top bits
of ESR_EL2.EC will break in an interesting way.
Instead, use kvm_vcpu_trap_get_class() to obtain the EC, and list
all the possible ECs that can fail a condition code check.
While we're at it, add SMC32 to the list, as it is explicitly listed
as being allowed to trap despite failing a condition code check (as
described in the HCR_EL2.TSC documentation).
Fixes: 0b12620fddb8 ("KVM: arm64: Treat ESR_EL2 as a 64-bit register")
Cc: stable@vger.kernel.org
Acked-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20240524141956.1450304-4-maz@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
2024-05-24 15:19:56 +01:00
|
|
|
/*
|
|
|
|
* These are the exception classes that could fire with a
|
|
|
|
* conditional instruction.
|
|
|
|
*/
|
|
|
|
switch (kvm_vcpu_trap_get_class(vcpu)) {
|
|
|
|
case ESR_ELx_EC_CP15_32:
|
|
|
|
case ESR_ELx_EC_CP15_64:
|
|
|
|
case ESR_ELx_EC_CP14_MR:
|
|
|
|
case ESR_ELx_EC_CP14_LS:
|
|
|
|
case ESR_ELx_EC_FP_ASIMD:
|
|
|
|
case ESR_ELx_EC_CP10_ID:
|
|
|
|
case ESR_ELx_EC_CP14_64:
|
|
|
|
case ESR_ELx_EC_SVC32:
|
|
|
|
break;
|
|
|
|
default:
|
2019-05-22 18:47:05 +01:00
|
|
|
return true;
|
KVM: arm64: AArch32: Fix spurious trapping of conditional instructions
We recently upgraded the view of ESR_EL2 to 64bit, in keeping with
the requirements of the architecture.
However, the AArch32 emulation code was left unaudited, and the
(already dodgy) code that triages whether a trap is spurious or not
(because the condition code failed) broke in a subtle way:
If ESR_EL2.ISS2 is ever non-zero (unlikely, but hey, this is the ARM
architecture we're talking about), the hack that tests the top bits
of ESR_EL2.EC will break in an interesting way.
Instead, use kvm_vcpu_trap_get_class() to obtain the EC, and list
all the possible ECs that can fail a condition code check.
While we're at it, add SMC32 to the list, as it is explicitly listed
as being allowed to trap despite failing a condition code check (as
described in the HCR_EL2.TSC documentation).
Fixes: 0b12620fddb8 ("KVM: arm64: Treat ESR_EL2 as a 64-bit register")
Cc: stable@vger.kernel.org
Acked-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20240524141956.1450304-4-maz@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
2024-05-24 15:19:56 +01:00
|
|
|
}
|
2019-05-22 18:47:05 +01:00
|
|
|
|
|
|
|
/* Is condition field valid? */
|
|
|
|
cond = kvm_vcpu_get_condition(vcpu);
|
|
|
|
if (cond == 0xE)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
cpsr = *vcpu_cpsr(vcpu);
|
|
|
|
|
|
|
|
if (cond < 0) {
|
|
|
|
/* This can happen in Thumb mode: examine IT state. */
|
|
|
|
unsigned long it;
|
|
|
|
|
|
|
|
it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
|
|
|
|
|
|
|
|
/* it == 0 => unconditional. */
|
|
|
|
if (it == 0)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/* The cond for this insn works out as the top 4 bits. */
|
|
|
|
cond = (it >> 4);
|
|
|
|
}
|
|
|
|
|
|
|
|
cpsr_cond = cpsr >> 28;
|
|
|
|
|
|
|
|
if (!((cc_map[cond] >> cpsr_cond) & 1))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2024-01-17 15:07:07 -08:00
|
|
|
* kvm_adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
|
2019-05-22 18:47:05 +01:00
|
|
|
* @vcpu: The VCPU pointer
|
|
|
|
*
|
|
|
|
* When exceptions occur while instructions are executed in Thumb IF-THEN
|
|
|
|
* blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
|
|
|
|
* to do this little bit of work manually. The fields map like this:
|
|
|
|
*
|
|
|
|
* IT[7:0] -> CPSR[26:25],CPSR[15:10]
|
|
|
|
*/
|
2020-06-25 14:14:19 +01:00
|
|
|
static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
|
2019-05-22 18:47:05 +01:00
|
|
|
{
|
|
|
|
unsigned long itbits, cond;
|
|
|
|
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
|
|
|
bool is_arm = !(cpsr & PSR_AA32_T_BIT);
|
|
|
|
|
|
|
|
if (is_arm || !(cpsr & PSR_AA32_IT_MASK))
|
|
|
|
return;
|
|
|
|
|
|
|
|
cond = (cpsr & 0xe000) >> 13;
|
|
|
|
itbits = (cpsr & 0x1c00) >> (10 - 2);
|
|
|
|
itbits |= (cpsr & (0x3 << 25)) >> 25;
|
|
|
|
|
|
|
|
/* Perform ITAdvance (see page A2-52 in ARM DDI 0406C) */
|
|
|
|
if ((itbits & 0x7) == 0)
|
|
|
|
itbits = cond = 0;
|
|
|
|
else
|
|
|
|
itbits = (itbits << 1) & 0x1f;
|
|
|
|
|
|
|
|
cpsr &= ~PSR_AA32_IT_MASK;
|
|
|
|
cpsr |= cond << 13;
|
|
|
|
cpsr |= (itbits & 0x1c) << (10 - 2);
|
|
|
|
cpsr |= (itbits & 0x3) << 25;
|
|
|
|
*vcpu_cpsr(vcpu) = cpsr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2024-01-17 15:07:07 -08:00
|
|
|
* kvm_skip_instr32 - skip a trapped instruction and proceed to the next
|
2019-05-22 18:47:05 +01:00
|
|
|
* @vcpu: The vcpu pointer
|
|
|
|
*/
|
2020-10-13 11:14:38 +01:00
|
|
|
void kvm_skip_instr32(struct kvm_vcpu *vcpu)
|
2019-05-22 18:47:05 +01:00
|
|
|
{
|
2020-04-29 11:21:55 +01:00
|
|
|
u32 pc = *vcpu_pc(vcpu);
|
2019-05-22 18:47:05 +01:00
|
|
|
bool is_thumb;
|
|
|
|
|
|
|
|
is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
|
2020-10-13 11:14:38 +01:00
|
|
|
if (is_thumb && !kvm_vcpu_trap_il_is32bit(vcpu))
|
2020-04-29 11:21:55 +01:00
|
|
|
pc += 2;
|
2019-05-22 18:47:05 +01:00
|
|
|
else
|
2020-04-29 11:21:55 +01:00
|
|
|
pc += 4;
|
|
|
|
|
|
|
|
*vcpu_pc(vcpu) = pc;
|
|
|
|
|
2019-05-22 18:47:05 +01:00
|
|
|
kvm_adjust_itstate(vcpu);
|
|
|
|
}
|