Re: [PATCH v5 6/6] KVM: nVMX: Enable nested posted interrupt processing

From: Paolo Bonzini
Date: Tue Feb 03 2015 - 09:02:50 EST




On 03/02/2015 10:17, Wincy Van wrote:
> +static int vmx_accomp_nested_posted_interrupt(struct kvm_vcpu *vcpu)

Replace accomp with complete.

> +{
> + struct vcpu_vmx *vmx = to_vmx(vcpu);
> + int max_irr;
> + void *vapic_page;
> + u16 status;
> +
> + if (vmx->nested.posted_intr_nv != -1 &&

Testing posted_intr_nv is not necessary.

> + vmx->nested.pi_desc &&
> + vmx->nested.accomp_pir) {

Replace accomp_pir with pi_pending.

The patch has corrupted spaces and tabs like the others, too.

Paolo

> + vmx->nested.accomp_pir = false;
> + if (!pi_test_and_clear_on(vmx->nested.pi_desc))
> + return 0;
> +
> + max_irr = find_last_bit(
> + (unsigned long *)vmx->nested.pi_desc->pir, 256);
> +
> + if (max_irr == 256)
> + return 0;
> +
> + vapic_page = kmap(vmx->nested.virtual_apic_page);
> + if (!vapic_page) {
> + WARN_ON(1);
> + return -ENOMEM;
> + }
> + __kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page);
> + kunmap(vmx->nested.virtual_apic_page);
> +
> + status = vmcs_read16(GUEST_INTR_STATUS);
> + if ((u8)max_irr > ((u8)status & 0xff)) {
> + status &= ~0xff;
> + status |= (u8)max_irr;
> + vmcs_write16(GUEST_INTR_STATUS, status);
> + }
> + }
> + return 0;
> +}
> +
> +static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
> + int vector)
> +{
> + struct vcpu_vmx *vmx = to_vmx(vcpu);
> +
> + if (is_guest_mode(vcpu) &&
> + vector == vmx->nested.posted_intr_nv) {
> + /* the PIR and ON have been set by L1. */
> + if (vcpu->mode == IN_GUEST_MODE)
> + apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
> + POSTED_INTR_VECTOR);
> + /*
> + * If a posted intr is not recognized by hardware,
> + * we will accomplish it in the next vmentry.
> + */
> + vmx->nested.accomp_pir = true;
> + kvm_make_request(KVM_REQ_EVENT, vcpu);
> + return 0;
> + }
> + return -1;
> +}
> /*
> * Send interrupt to vcpu via posted interrupt way.
> * 1. If target vcpu is running(non-root mode), send posted interrupt
> @@ -4329,6 +4405,10 @@ static void vmx_deliver_posted_interrupt(struct
> kvm_vcpu *vcpu, int vector)
> struct vcpu_vmx *vmx = to_vmx(vcpu);
> int r;
>
> + r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
> + if (!r)
> + return;
> +
> if (pi_test_and_set_pir(vector, &vmx->pi_desc))
> return;
>
> @@ -6591,6 +6671,7 @@ static inline void nested_release_vmcs12(struct
> vcpu_vmx *vmx)
> vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
> vmcs_write64(VMCS_LINK_POINTER, -1ull);
> }
> + vmx->nested.posted_intr_nv = -1;
> kunmap(vmx->nested.current_vmcs12_page);
> nested_release_page(vmx->nested.current_vmcs12_page);
> vmx->nested.current_vmptr = -1ull;
> @@ -6619,6 +6700,12 @@ static void free_nested(struct vcpu_vmx *vmx)
> nested_release_page(vmx->nested.virtual_apic_page);
> vmx->nested.virtual_apic_page = NULL;
> }
> + if (vmx->nested.pi_desc_page) {
> + kunmap(vmx->nested.pi_desc_page);
> + nested_release_page(vmx->nested.pi_desc_page);
> + vmx->nested.pi_desc_page = NULL;
> + vmx->nested.pi_desc = NULL;
> + }
>
> nested_free_all_saved_vmcss(vmx);
> }
> @@ -8333,6 +8420,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct
> kvm *kvm, unsigned int id)
> if (nested)
> nested_vmx_setup_ctls_msrs(vmx);
>
> + vmx->nested.posted_intr_nv = -1;
> vmx->nested.current_vmptr = -1ull;
> vmx->nested.current_vmcs12 = NULL;
>
> @@ -8578,6 +8666,31 @@ static bool nested_get_vmcs12_pages(struct
> kvm_vcpu *vcpu,
> return false;
> }
>
> + if (nested_cpu_has_posted_intr(vmcs12)) {
> + if (!IS_ALIGNED(vmcs12->posted_intr_desc_addr, 64))
> + return false;
> +
> + if (vmx->nested.pi_desc_page) { /* shouldn't happen */
> + kunmap(vmx->nested.pi_desc_page);
> + nested_release_page(vmx->nested.pi_desc_page);
> + }
> + vmx->nested.pi_desc_page =
> + nested_get_page(vcpu, vmcs12->posted_intr_desc_addr);
> + if (!vmx->nested.pi_desc_page)
> + return false;
> +
> + vmx->nested.pi_desc =
> + (struct pi_desc *)kmap(vmx->nested.pi_desc_page);
> + if (!vmx->nested.pi_desc) {
> + nested_release_page_clean(vmx->nested.pi_desc_page);
> + return false;
> + }
> + vmx->nested.pi_desc =
> + (struct pi_desc *)((void *)vmx->nested.pi_desc +
> + (unsigned long)(vmcs12->posted_intr_desc_addr &
> + (PAGE_SIZE - 1)));
> + }
> +
> return true;
> }
>
> @@ -8713,7 +8826,8 @@ static int
> nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
> {
> if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
> !nested_cpu_has_apic_reg_virt(vmcs12) &&
> - !nested_cpu_has_vid(vmcs12))
> + !nested_cpu_has_vid(vmcs12) &&
> + !nested_cpu_has_posted_intr(vmcs12))
> return 0;
>
> /*
> @@ -8732,6 +8846,17 @@ static int
> nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
> !nested_exit_on_intr(vcpu))
> return -EINVAL;
>
> + /*
> + * bits 15:8 should be zero in posted_intr_nv,
> + * the descriptor address has been already checked
> + * in nested_get_vmcs12_pages.
> + */
> + if (nested_cpu_has_posted_intr(vmcs12) &&
> + (!nested_cpu_has_vid(vmcs12) ||
> + !nested_exit_intr_ack_set(vcpu) ||
> + vmcs12->posted_intr_nv & 0xff00))
> + return -EINVAL;
> +
> /* tpr shadow is needed by all apicv features. */
> if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
> return -EINVAL;
> @@ -8974,8 +9099,20 @@ static void prepare_vmcs02(struct kvm_vcpu
> *vcpu, struct vmcs12 *vmcs12)
>
> exec_control = vmcs12->pin_based_vm_exec_control;
> exec_control |= vmcs_config.pin_based_exec_ctrl;
> - exec_control &= ~(PIN_BASED_VMX_PREEMPTION_TIMER |
> - PIN_BASED_POSTED_INTR);
> + exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
> +
> + if (nested_cpu_has_posted_intr(vmcs12)) {
> + /* Note that we use L0's vector to avoid unexpected intr. */
> + vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
> + vmx->nested.accomp_pir = false;
> + vmcs_write64(POSTED_INTR_NV, POSTED_INTR_VECTOR);
> + vmcs_write64(POSTED_INTR_DESC_ADDR,
> + page_to_phys(vmx->nested.pi_desc_page) +
> + (unsigned long)(vmcs12->posted_intr_desc_addr &
> + (PAGE_SIZE - 1)));
> + } else
> + exec_control &= ~PIN_BASED_POSTED_INTR;
> +
> vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
>
> vmx->nested.preemption_timer_expired = false;
> @@ -9511,9 +9648,10 @@ static int vmx_check_nested_events(struct
> kvm_vcpu *vcpu, bool external_intr)
> if (vmx->nested.nested_run_pending)
> return -EBUSY;
> nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
> + return 0;
> }
>
> - return 0;
> + return vmx_accomp_nested_posted_interrupt(vcpu);
> }
>
> static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
> @@ -9891,6 +10029,12 @@ static void nested_vmx_vmexit(struct kvm_vcpu
> *vcpu, u32 exit_reason,
> nested_release_page(vmx->nested.virtual_apic_page);
> vmx->nested.virtual_apic_page = NULL;
> }
> + if (vmx->nested.pi_desc_page) {
> + kunmap(vmx->nested.pi_desc_page);
> + nested_release_page(vmx->nested.pi_desc_page);
> + vmx->nested.pi_desc_page = NULL;
> + vmx->nested.pi_desc = NULL;
> + }
>
> /*
> * We are now running in L2, mmu_notifier will force to reload the
> --
> 1.7.1
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/