Re: [PATCH 11/14] KVM: arm64: Make changes block->table to leaf PTEs parallel-aware

From: Ricardo Koller
Date: Tue Sep 13 2022 - 20:52:05 EST


On Tue, Aug 30, 2022 at 07:51:01PM +0000, Oliver Upton wrote:
> In order to service stage-2 faults in parallel, stage-2 table walkers
> must take exclusive ownership of the PTE being worked on. An additional
> requirement of the architecture is that software must perform a
> 'break-before-make' operation when changing the block size used for
> mapping memory.
>
> Roll these two concepts together into helpers for performing a
> 'break-before-make' sequence. Use a special PTE value to indicate a PTE
> has been locked by a software walker. Additionally, use an atomic
> compare-exchange to 'break' the PTE when the stage-2 page tables are
> possibly shared with another software walker. Elide the DSB + TLBI if
> the evicted PTE was invalid (and thus not subject to break-before-make).
>
> All of the atomics do nothing for now, as the stage-2 walker isn't fully
> ready to perform parallel walks.
>
> Signed-off-by: Oliver Upton <oliver.upton@xxxxxxxxx>
> ---
> arch/arm64/kvm/hyp/pgtable.c | 87 +++++++++++++++++++++++++++++++++---
> 1 file changed, 82 insertions(+), 5 deletions(-)
>
> diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
> index 61a4437c8c16..71ae96608752 100644
> --- a/arch/arm64/kvm/hyp/pgtable.c
> +++ b/arch/arm64/kvm/hyp/pgtable.c
> @@ -49,6 +49,12 @@
> #define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2)
> #define KVM_MAX_OWNER_ID 1
>
> +/*
> + * Used to indicate a pte for which a 'break-before-make' sequence is in
> + * progress.
> + */
> +#define KVM_INVALID_PTE_LOCKED BIT(10)
> +
> struct kvm_pgtable_walk_data {
> struct kvm_pgtable *pgt;
> struct kvm_pgtable_walker *walker;
> @@ -586,6 +592,8 @@ struct stage2_map_data {
>
> /* Force mappings to page granularity */
> bool force_pte;
> +
> + bool shared;
> };
>
> u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
> @@ -691,6 +699,11 @@ static bool stage2_pte_is_counted(kvm_pte_t pte)
> return kvm_pte_valid(pte) || kvm_invalid_pte_owner(pte);
> }
>
> +static bool stage2_pte_is_locked(kvm_pte_t pte)
> +{
> + return !kvm_pte_valid(pte) && (pte & KVM_INVALID_PTE_LOCKED);
> +}
> +
> static bool stage2_try_set_pte(kvm_pte_t *ptep, kvm_pte_t old, kvm_pte_t new, bool shared)
> {
> if (!shared) {
> @@ -701,6 +714,69 @@ static bool stage2_try_set_pte(kvm_pte_t *ptep, kvm_pte_t old, kvm_pte_t new, bo
> return cmpxchg(ptep, old, new) == old;
> }
>
> +/**
> + * stage2_try_break_pte() - Invalidates a pte according to the
> + * 'break-before-make' requirements of the
> + * architecture.
> + *
> + * @ptep: Pointer to the pte to break
> + * @old: The previously observed value of the pte
> + * @addr: IPA corresponding to the pte
> + * @level: Table level of the pte
> + * @shared: true if the stage-2 page tables could be shared by multiple software
> + * walkers
> + *
> + * Returns: true if the pte was successfully broken.
> + *
> + * If the removed pte was valid, performs the necessary serialization and TLB
> + * invalidation for the old value. For counted ptes, drops the reference count
> + * on the containing table page.
> + */
> +static bool stage2_try_break_pte(kvm_pte_t *ptep, kvm_pte_t old, u64 addr, u32 level,
> + struct stage2_map_data *data)
> +{
> + struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
> +
> + if (stage2_pte_is_locked(old)) {
> + /*
> + * Should never occur if this walker has exclusive access to the
> + * page tables.
> + */
> + WARN_ON(!data->shared);
> + return false;
> + }

The above check is not needed as the cmpxchg() will return false if the
old pte is equal to "new" (KVM_INVALID_PTE_LOCKED).

> +
> + if (!stage2_try_set_pte(ptep, old, KVM_INVALID_PTE_LOCKED, data->shared))
> + return false;
> +
> + /*
> + * Perform the appropriate TLB invalidation based on the evicted pte
> + * value (if any).
> + */
> + if (kvm_pte_table(old, level))
> + kvm_call_hyp(__kvm_tlb_flush_vmid, data->mmu);
> + else if (kvm_pte_valid(old))
> + kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level);
> +
> + if (stage2_pte_is_counted(old))
> + mm_ops->put_page(ptep);
> +
> + return true;
> +}
> +
> +static void stage2_make_pte(kvm_pte_t *ptep, kvm_pte_t old, kvm_pte_t new,
> + struct stage2_map_data *data)
> +{
> + struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
> +
> + WARN_ON(!stage2_pte_is_locked(*ptep));
> +
> + if (stage2_pte_is_counted(new))
> + mm_ops->get_page(ptep);
> +
> + smp_store_release(ptep, new);
> +}
> +
> static void stage2_put_pte(kvm_pte_t *ptep, struct kvm_s2_mmu *mmu, u64 addr,
> u32 level, struct kvm_pgtable_mm_ops *mm_ops)
> {
> @@ -836,17 +912,18 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
> if (!childp)
> return -ENOMEM;
>
> + if (!stage2_try_break_pte(ptep, *old, addr, level, data)) {
> + mm_ops->put_page(childp);
> + return -EAGAIN;
> + }
> +
> /*
> * If we've run into an existing block mapping then replace it with
> * a table. Accesses beyond 'end' that fall within the new table
> * will be mapped lazily.
> */
> - if (stage2_pte_is_counted(pte))
> - stage2_put_pte(ptep, data->mmu, addr, level, mm_ops);
> -
> new = kvm_init_table_pte(childp, mm_ops);
> - mm_ops->get_page(ptep);
> - smp_store_release(ptep, new);
> + stage2_make_pte(ptep, *old, new, data);
> *old = new;
>
> return 0;
> --
> 2.37.2.672.g94769d06f0-goog
>