Re: [PATCH v2 1/4] KVM: PPC: Allow nested guest creation when L0 hv_guest_state > L1

From: Fabiano Rosas
Date: Wed Dec 09 2020 - 09:26:51 EST


Ravi Bangoria <ravi.bangoria@xxxxxxxxxxxxx> writes:

> On powerpc, L1 hypervisor takes help of L0 using H_ENTER_NESTED
> hcall to load L2 guest state in cpu. L1 hypervisor prepares the
> L2 state in struct hv_guest_state and passes a pointer to it via
> hcall. Using that pointer, L0 reads/writes that state directly
> from/to L1 memory. Thus L0 must be aware of hv_guest_state layout
> of L1. Currently it uses version field to achieve this. i.e. If
> L0 hv_guest_state.version != L1 hv_guest_state.version, L0 won't
> allow nested kvm guest.
>
> This restriction can be loosen up a bit. L0 can be taught to
> understand older layout of hv_guest_state, if we restrict the
> new member to be added only at the end. i.e. we can allow
> nested guest even when L0 hv_guest_state.version > L1
> hv_guest_state.version. Though, the other way around is not
> possible.
>
> Signed-off-by: Ravi Bangoria <ravi.bangoria@xxxxxxxxxxxxx>

Reviewed-by: Fabiano Rosas <farosas@xxxxxxxxxxxxx>

> ---
> arch/powerpc/include/asm/hvcall.h | 17 +++++++--
> arch/powerpc/kvm/book3s_hv_nested.c | 53 ++++++++++++++++++++++++-----
> 2 files changed, 59 insertions(+), 11 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
> index fbb377055471..a7073fddb657 100644
> --- a/arch/powerpc/include/asm/hvcall.h
> +++ b/arch/powerpc/include/asm/hvcall.h
> @@ -524,9 +524,12 @@ struct h_cpu_char_result {
> u64 behaviour;
> };
>
> -/* Register state for entering a nested guest with H_ENTER_NESTED */
> +/*
> + * Register state for entering a nested guest with H_ENTER_NESTED.
> + * New member must be added at the end.
> + */
> struct hv_guest_state {
> - u64 version; /* version of this structure layout */
> + u64 version; /* version of this structure layout, must be first */
> u32 lpid;
> u32 vcpu_token;
> /* These registers are hypervisor privileged (at least for writing) */
> @@ -560,6 +563,16 @@ struct hv_guest_state {
> /* Latest version of hv_guest_state structure */
> #define HV_GUEST_STATE_VERSION 1
>
> +static inline int hv_guest_state_size(unsigned int version)
> +{
> + switch (version) {
> + case 1:
> + return offsetofend(struct hv_guest_state, ppr);
> + default:
> + return -1;
> + }
> +}
> +
> #endif /* __ASSEMBLY__ */
> #endif /* __KERNEL__ */
> #endif /* _ASM_POWERPC_HVCALL_H */
> diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
> index 33b58549a9aa..2b433c3bacea 100644
> --- a/arch/powerpc/kvm/book3s_hv_nested.c
> +++ b/arch/powerpc/kvm/book3s_hv_nested.c
> @@ -215,6 +215,45 @@ static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
> }
> }
>
> +static int kvmhv_read_guest_state_and_regs(struct kvm_vcpu *vcpu,
> + struct hv_guest_state *l2_hv,
> + struct pt_regs *l2_regs,
> + u64 hv_ptr, u64 regs_ptr)
> +{
> + int size;
> +
> + if (kvm_vcpu_read_guest(vcpu, hv_ptr, &(l2_hv->version),
> + sizeof(l2_hv->version)))
> + return -1;
> +
> + if (kvmppc_need_byteswap(vcpu))
> + l2_hv->version = swab64(l2_hv->version);
> +
> + size = hv_guest_state_size(l2_hv->version);
> + if (size < 0)
> + return -1;
> +
> + return kvm_vcpu_read_guest(vcpu, hv_ptr, l2_hv, size) ||
> + kvm_vcpu_read_guest(vcpu, regs_ptr, l2_regs,
> + sizeof(struct pt_regs));
> +}
> +
> +static int kvmhv_write_guest_state_and_regs(struct kvm_vcpu *vcpu,
> + struct hv_guest_state *l2_hv,
> + struct pt_regs *l2_regs,
> + u64 hv_ptr, u64 regs_ptr)
> +{
> + int size;
> +
> + size = hv_guest_state_size(l2_hv->version);
> + if (size < 0)
> + return -1;
> +
> + return kvm_vcpu_write_guest(vcpu, hv_ptr, l2_hv, size) ||
> + kvm_vcpu_write_guest(vcpu, regs_ptr, l2_regs,
> + sizeof(struct pt_regs));
> +}
> +
> long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
> {
> long int err, r;
> @@ -235,17 +274,15 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
> hv_ptr = kvmppc_get_gpr(vcpu, 4);
> regs_ptr = kvmppc_get_gpr(vcpu, 5);
> vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
> - err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv,
> - sizeof(struct hv_guest_state)) ||
> - kvm_vcpu_read_guest(vcpu, regs_ptr, &l2_regs,
> - sizeof(struct pt_regs));
> + err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
> + hv_ptr, regs_ptr);
> srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
> if (err)
> return H_PARAMETER;
>
> if (kvmppc_need_byteswap(vcpu))
> byteswap_hv_regs(&l2_hv);
> - if (l2_hv.version != HV_GUEST_STATE_VERSION)
> + if (l2_hv.version > HV_GUEST_STATE_VERSION)
> return H_P2;
>
> if (kvmppc_need_byteswap(vcpu))
> @@ -325,10 +362,8 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
> byteswap_pt_regs(&l2_regs);
> }
> vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
> - err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv,
> - sizeof(struct hv_guest_state)) ||
> - kvm_vcpu_write_guest(vcpu, regs_ptr, &l2_regs,
> - sizeof(struct pt_regs));
> + err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
> + hv_ptr, regs_ptr);
> srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
> if (err)
> return H_AUTHORITY;