Re: [PATCH 07/13] KVM: x86: API changes for SMM support

From: Radim KrÄmÃÅ
Date: Mon May 04 2015 - 11:38:30 EST


2015-04-30 13:36+0200, Paolo Bonzini:
> This patch includes changes to the external API for SMM support.
> All the changes are predicated by the availability of a new
> capability, KVM_CAP_X86_SMM, which is added at the end of the
> patch series.
>
> Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx>
> ---
> diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
> @@ -820,11 +820,19 @@ struct kvm_vcpu_events {
> } nmi;
> __u32 sipi_vector;
> __u32 flags;
> + struct {
> + __u8 smm;

34.3.1 Entering SMM:
Subsequent SMI requests are not acknowledged while the processor is in
SMM. The first SMI interrupt request that occurs while the processor
is in SMM (that is, after SMM has been acknowledged to external
hardware) is latched and serviced when the processor exits SMM with
the RSM instruction. The processor will latch only one SMI while in
SMM.

The final code doesn't handle pending SMI's at all, so we'll need to
store it somewhere and expose to userspace here.

> + __u8 pad[3];
> + } smi;
> };
>
> -KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that
> -interrupt.shadow contains a valid state. Otherwise, this field is undefined.
> +Only two fields are defined in the flags field:

(Aren't KVM_VCPUEVENT_VALID_NMI_PENDING/KVM_VCPUEVENT_VALID_SIPI_VECTOR
defined and always 1/0?)

> diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
> @@ -281,6 +283,7 @@ struct kvm_reinject_control {
> #define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001
> #define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
> #define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
> +#define KVM_VCPUEVENT_VALID_SMM 0x00000008
>
> /* Interrupt shadow states */
> #define KVM_X86_SHADOW_INT_MOV_SS 0x01
> @@ -309,6 +312,10 @@ struct kvm_vcpu_events {
> } nmi;
> __u32 sipi_vector;
> __u32 flags;
> + struct {
> + __u8 smm;
> + __u8 pad[3];
> + } smi;
> __u32 reserved[10];

Nicely padded to use reserved[9] here :)

> };
>
> diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
> +static inline bool is_smm(struct kvm_vcpu *vcpu)
> +{
> + return vcpu->arch.hflags & HF_SMM_MASK;
> +}
> @@ -3116,29 +3121,31 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
> struct kvm_vcpu_events *events)
> {
> + events->smi.smm = is_smm(vcpu);
>
> @@ -3172,6 +3180,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
> kvm_vcpu_has_lapic(vcpu))
> vcpu->arch.apic->sipi_vector = events->sipi_vector;
>
> + if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
> + if (events->smi.smm)
> + vcpu->arch.hflags |= HF_SMM_MASK;
> + else
> + vcpu->arch.hflags &= ~HF_SMM_MASK;
> + }

SMM is not really an event ... is this interface intended to be used for
emulating RSM in userspace (and after resume)?

> kvm_make_request(KVM_REQ_EVENT, vcpu);
>
> return 0;
> @@ -3431,6 +3446,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
> r = kvm_vcpu_ioctl_nmi(vcpu);
> break;
> }
> + case KVM_SMI: {
> + r = kvm_vcpu_ioctl_smi(vcpu);
> + break;
> + }
> case KVM_SET_CPUID: {
> struct kvm_cpuid __user *cpuid_arg = argp;
> struct kvm_cpuid cpuid;
> @@ -6067,6 +6086,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
> struct kvm_run *kvm_run = vcpu->run;
>
> kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
> + kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;

This is a mirror smm from events -- was it added for optimization?

> kvm_run->cr8 = kvm_get_cr8(vcpu);
> kvm_run->apic_base = kvm_get_apic_base(vcpu);
> if (irqchip_in_kernel(vcpu->kvm))
> diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
> index 4b60056776d1..cd51f0289e9f 100644
> --- a/include/uapi/linux/kvm.h
> +++ b/include/uapi/linux/kvm.h
> @@ -202,7 +202,7 @@ struct kvm_run {
> __u32 exit_reason;
> __u8 ready_for_interrupt_injection;
> __u8 if_flag;
> - __u8 padding2[2];
> + __u16 flags;

(I think u8 would be better, it's not yet clear that we are going to
have that many flags. Little endianess of x86 makes later extension to
u16 easy, so u8 would just be keeping more options open.)

>
> /* in (pre_kvm_run), out (post_kvm_run) */
> __u64 cr8;
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/