[PATCH v2 06/15] kvm: x86: Add support for activate/de-activate APICv at runtime

From: Suthikulpanit, Suravee
Date: Thu Aug 15 2019 - 12:25:20 EST


Certain runtime conditions require APICv to be temporary deactivated.
However, current implementation only support permanently deactivate
APICv at runtime (mainly used when running Hyper-V guest).

In addtion, for AMD, when activate / deactivate APICv during runtime,
all vcpus in the VM has to be operating in the same APICv mode, which
requires the requesting (main) vcpu to notify others.

So, introduce interfaces to request all vcpus to activate/deactivate
APICv.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 9 +++++
arch/x86/kvm/x86.c | 76 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 85 insertions(+)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 04d7066..dfb7c3d 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -76,6 +76,10 @@
#define KVM_REQ_HV_STIMER KVM_ARCH_REQ(22)
#define KVM_REQ_LOAD_EOI_EXITMAP KVM_ARCH_REQ(23)
#define KVM_REQ_GET_VMCS12_PAGES KVM_ARCH_REQ(24)
+#define KVM_REQ_APICV_ACTIVATE \
+ KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_APICV_DEACTIVATE \
+ KVM_ARCH_REQ_FLAGS(26, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)

#define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
@@ -1089,6 +1093,7 @@ struct kvm_x86_ops {
void (*enable_irq_window)(struct kvm_vcpu *vcpu);
void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
bool (*get_enable_apicv)(struct kvm *kvm);
+ void (*pre_update_apicv_exec_ctrl)(struct kvm_vcpu *vcpu, bool activate);
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
@@ -1552,6 +1557,10 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,

void kvm_make_mclock_inprogress_request(struct kvm *kvm);
void kvm_make_scan_ioapic_request(struct kvm *kvm);
+void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
+void kvm_vcpu_activate_apicv(struct kvm_vcpu *vcpu);
+void kvm_make_apicv_activate_request(struct kvm_vcpu *vcpu);
+void kvm_make_apicv_deactivate_request(struct kvm_vcpu *vcpu, bool disable);

void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f9c3f63..40a20bf 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -26,6 +26,7 @@
#include "cpuid.h"
#include "pmu.h"
#include "hyperv.h"
+#include "lapic.h"

#include <linux/clocksource.h>
#include <linux/interrupt.h>
@@ -7163,6 +7164,22 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
}

+void kvm_vcpu_activate_apicv(struct kvm_vcpu *vcpu)
+{
+ if (!lapic_in_kernel(vcpu)) {
+ WARN_ON_ONCE(!vcpu->arch.apicv_active);
+ return;
+ }
+ if (vcpu->arch.apicv_active)
+ return;
+
+ vcpu->arch.apicv_active = true;
+ kvm_apic_update_apicv(vcpu);
+
+ kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_activate_apicv);
+
void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
{
if (!lapic_in_kernel(vcpu)) {
@@ -7173,8 +7190,11 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
return;

vcpu->arch.apicv_active = false;
+ kvm_apic_update_apicv(vcpu);
+
kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu);
}
+EXPORT_SYMBOL_GPL(kvm_vcpu_deactivate_apicv);

int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
@@ -7668,6 +7688,58 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm)
kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
}

+void kvm_make_apicv_activate_request(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct kvm_vcpu *v;
+ struct kvm *kvm = vcpu->kvm;
+
+ mutex_lock(&kvm->arch.apicv_lock);
+ if (kvm->arch.apicv_state != APICV_DEACTIVATED) {
+ mutex_unlock(&kvm->arch.apicv_lock);
+ return;
+ }
+
+ kvm_for_each_vcpu(i, v, kvm)
+ kvm_clear_request(KVM_REQ_APICV_DEACTIVATE, v);
+
+ if (kvm_x86_ops->pre_update_apicv_exec_ctrl)
+ kvm_x86_ops->pre_update_apicv_exec_ctrl(vcpu, true);
+
+ kvm->arch.apicv_state = APICV_ACTIVATED;
+
+ kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_ACTIVATE);
+
+ mutex_unlock(&kvm->arch.apicv_lock);
+}
+EXPORT_SYMBOL_GPL(kvm_make_apicv_activate_request);
+
+void kvm_make_apicv_deactivate_request(struct kvm_vcpu *vcpu, bool disable)
+{
+ int i;
+ struct kvm_vcpu *v;
+ struct kvm *kvm = vcpu->kvm;
+
+ mutex_lock(&kvm->arch.apicv_lock);
+ if (kvm->arch.apicv_state == APICV_DEACTIVATED) {
+ mutex_unlock(&kvm->arch.apicv_lock);
+ return;
+ }
+
+ kvm_for_each_vcpu(i, v, kvm)
+ kvm_clear_request(KVM_REQ_APICV_ACTIVATE, v);
+
+ if (kvm_x86_ops->pre_update_apicv_exec_ctrl)
+ kvm_x86_ops->pre_update_apicv_exec_ctrl(vcpu, false);
+
+ kvm->arch.apicv_state = disable ? APICV_DISABLED : APICV_DEACTIVATED;
+
+ kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_DEACTIVATE);
+
+ mutex_unlock(&kvm->arch.apicv_lock);
+}
+EXPORT_SYMBOL_GPL(kvm_make_apicv_deactivate_request);
+
static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
{
if (!kvm_apic_present(vcpu))
@@ -7854,6 +7926,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
*/
if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
kvm_hv_process_stimers(vcpu);
+ if (kvm_check_request(KVM_REQ_APICV_ACTIVATE, vcpu))
+ kvm_vcpu_activate_apicv(vcpu);
+ if (kvm_check_request(KVM_REQ_APICV_DEACTIVATE, vcpu))
+ kvm_vcpu_deactivate_apicv(vcpu);
}

if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
--
1.8.3.1