[PART1 RFC v2 02/10] KVM: x86: Introducing kvm_x86_ops VCPU blocking/unblocking

From: Suravee Suthikulpanit
Date: Fri Mar 04 2016 - 15:48:49 EST


This patch add new function hooks to the struct kvm_x86_ops,
and calling them from the kvm_arch_vcpu[blocking/unblocking].

This will be used later on by SVM AVIC code.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 17 +++++++++++++++--
arch/x86/kvm/svm.c | 12 ++++++++++++
2 files changed, 27 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 44adbb8..9a61669 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -969,6 +969,10 @@ struct kvm_x86_ops {
*/
int (*pre_block)(struct kvm_vcpu *vcpu);
void (*post_block)(struct kvm_vcpu *vcpu);
+
+ void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
+ void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);
+
int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
};
@@ -1320,7 +1324,16 @@ bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
struct kvm_lapic_irq *irq);

-static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
+{
+ if (kvm_x86_ops->vcpu_blocking)
+ kvm_x86_ops->vcpu_blocking(vcpu);
+}
+
+static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
+{
+ if (kvm_x86_ops->vcpu_unblocking)
+ kvm_x86_ops->vcpu_unblocking(vcpu);
+}

#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c13a64b..28f8618 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1265,6 +1265,16 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
}

+static void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
+{
+ avic_set_running(vcpu, false);
+}
+
+static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
+{
+ avic_set_running(vcpu, true);
+}
+
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
{
return to_svm(vcpu)->vmcb->save.rflags;
@@ -4321,6 +4331,8 @@ static struct kvm_x86_ops svm_x86_ops = {
.prepare_guest_switch = svm_prepare_guest_switch,
.vcpu_load = svm_vcpu_load,
.vcpu_put = svm_vcpu_put,
+ .vcpu_blocking = svm_vcpu_blocking,
+ .vcpu_unblocking = svm_vcpu_unblocking,

.update_bp_intercept = update_bp_intercept,
.get_msr = svm_get_msr,
--
1.9.1