[PATCH 4.14 031/173] KVM: arm64: Only sign-extend MMIO up to register width

From: Greg Kroah-Hartman
Date: Thu Feb 13 2020 - 10:59:13 EST


From: Christoffer Dall <christoffer.dall@xxxxxxx>

commit b6ae256afd32f96bec0117175b329d0dd617655e upstream.

On AArch64 you can do a sign-extended load to either a 32-bit or 64-bit
register, and we should only sign extend the register up to the width of
the register as specified in the operation (by using the 32-bit Wn or
64-bit Xn register specifier).

As it turns out, the architecture provides this decoding information in
the SF ("Sixty-Four" -- how cute...) bit.

Let's take advantage of this with the usual 32-bit/64-bit header file
dance and do the right thing on AArch64 hosts.

Signed-off-by: Christoffer Dall <christoffer.dall@xxxxxxx>
Signed-off-by: Marc Zyngier <maz@xxxxxxxxxx>
Cc: stable@xxxxxxxxxxxxxxx
Link: https://lore.kernel.org/r/20191212195055.5541-1-christoffer.dall@xxxxxxx
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>

---
arch/arm/include/asm/kvm_emulate.h | 5 +++++
arch/arm/include/asm/kvm_mmio.h | 2 ++
arch/arm64/include/asm/kvm_emulate.h | 5 +++++
arch/arm64/include/asm/kvm_mmio.h | 6 ++----
virt/kvm/arm/mmio.c | 6 ++++++
5 files changed, 20 insertions(+), 4 deletions(-)

--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -144,6 +144,11 @@ static inline bool kvm_vcpu_dabt_issext(
return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
}

+static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
{
return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
--- a/arch/arm/include/asm/kvm_mmio.h
+++ b/arch/arm/include/asm/kvm_mmio.h
@@ -26,6 +26,8 @@
struct kvm_decode {
unsigned long rt;
bool sign_extend;
+ /* Not used on 32-bit arm */
+ bool sixty_four;
};

void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -188,6 +188,11 @@ static inline bool kvm_vcpu_dabt_issext(
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
}

+static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
+}
+
static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
{
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
--- a/arch/arm64/include/asm/kvm_mmio.h
+++ b/arch/arm64/include/asm/kvm_mmio.h
@@ -21,13 +21,11 @@
#include <linux/kvm_host.h>
#include <asm/kvm_arm.h>

-/*
- * This is annoying. The mmio code requires this, even if we don't
- * need any decoding. To be fixed.
- */
struct kvm_decode {
unsigned long rt;
bool sign_extend;
+ /* Witdth of the register accessed by the faulting instruction is 64-bits */
+ bool sixty_four;
};

void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
--- a/virt/kvm/arm/mmio.c
+++ b/virt/kvm/arm/mmio.c
@@ -117,6 +117,9 @@ int kvm_handle_mmio_return(struct kvm_vc
data = (data ^ mask) - mask;
}

+ if (!vcpu->arch.mmio_decode.sixty_four)
+ data = data & 0xffffffff;
+
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
&data);
data = vcpu_data_host_to_guest(vcpu, data, len);
@@ -137,6 +140,7 @@ static int decode_hsr(struct kvm_vcpu *v
unsigned long rt;
int access_size;
bool sign_extend;
+ bool sixty_four;

if (kvm_vcpu_dabt_iss1tw(vcpu)) {
/* page table accesses IO mem: tell guest to fix its TTBR */
@@ -150,11 +154,13 @@ static int decode_hsr(struct kvm_vcpu *v

*is_write = kvm_vcpu_dabt_iswrite(vcpu);
sign_extend = kvm_vcpu_dabt_issext(vcpu);
+ sixty_four = kvm_vcpu_dabt_issf(vcpu);
rt = kvm_vcpu_dabt_get_rd(vcpu);

*len = access_size;
vcpu->arch.mmio_decode.sign_extend = sign_extend;
vcpu->arch.mmio_decode.rt = rt;
+ vcpu->arch.mmio_decode.sixty_four = sixty_four;

return 0;
}