Re: [PATCH v10 7/9] KVM: Update lpage info when private/shared memory are mixed

From: Chao Peng
Date: Sat Jan 28 2023 - 09:03:05 EST


On Fri, Jan 13, 2023 at 11:16:27PM +0000, Sean Christopherson wrote:
> On Fri, Dec 02, 2022, Chao Peng wrote:
> > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> > index 9a07380f8d3c..5aefcff614d2 100644
> > --- a/arch/x86/kvm/x86.c
> > +++ b/arch/x86/kvm/x86.c
> > @@ -12362,6 +12362,8 @@ static int kvm_alloc_memslot_metadata(struct kvm *kvm,
> > if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
> > linfo[lpages - 1].disallow_lpage = 1;
> > ugfn = slot->userspace_addr >> PAGE_SHIFT;
> > + if (kvm_slot_can_be_private(slot))
> > + ugfn |= slot->restricted_offset >> PAGE_SHIFT;
> > /*
> > * If the gfn and userspace address are not aligned wrt each
> > * other, disable large page support for this slot.
>
> Forgot to talk about the bug. This code needs to handle the scenario where a
> memslot is created with existing, non-uniform attributes. It might be a bit ugly
> (I didn't even try to write the code), but it's definitely possible, and since
> memslot updates are already slow I think it's best to handle things here.
>
> In the meantime, I added this so we don't forget to fix it before merging.
>
> #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
> pr_crit_once("FIXME: Walk the memory attributes of the slot and set the mixed status appropriately");
> #endif

Here is the code to fix (based on your latest github repo).

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e552374f2357..609ff1cba9c5 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -2195,4 +2195,9 @@ int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
KVM_X86_QUIRK_FIX_HYPERCALL_INSN | \
KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS)

+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+void kvm_memory_attributes_create_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot);
+#endif
+
#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index eda615f3951c..8833d7201e41 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -7201,10 +7201,11 @@ static bool has_mixed_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
return false;
}

-void kvm_arch_set_memory_attributes(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- unsigned long attrs,
- gfn_t start, gfn_t end)
+static void kvm_update_lpage_mixed_flag(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ bool set_attrs,
+ unsigned long attrs,
+ gfn_t start, gfn_t end)
{
unsigned long pages, mask;
gfn_t gfn, gfn_end, first, last;
@@ -7231,25 +7232,53 @@ void kvm_arch_set_memory_attributes(struct kvm *kvm,
first = start & mask;
last = (end - 1) & mask;

- /*
- * We only need to scan the head and tail page, for middle pages
- * we know they will not be mixed.
- */
+ /* head page */
gfn = max(first, slot->base_gfn);
gfn_end = min(first + pages, slot->base_gfn + slot->npages);
+ if(!set_attrs)
+ attrs = kvm_get_memory_attributes(kvm, gfn);
mixed = has_mixed_attrs(kvm, slot, level, attrs, gfn, gfn_end);
linfo_update_mixed(gfn, slot, level, mixed);

if (first == last)
return;

- for (gfn = first + pages; gfn < last; gfn += pages)
- linfo_update_mixed(gfn, slot, level, false);
+ /* middle pages */
+ for (gfn = first + pages; gfn < last; gfn += pages) {
+ if (set_attrs) {
+ mixed = false;
+ } else {
+ gfn_end = gfn + pages;
+ attrs = kvm_get_memory_attributes(kvm, gfn);
+ mixed = has_mixed_attrs(kvm, slot, level, attrs,
+ gfn, gfn_end);
+ }
+ linfo_update_mixed(gfn, slot, level, mixed);
+ }

+ /* tail page */
gfn = last;
gfn_end = min(last + pages, slot->base_gfn + slot->npages);
+ if(!set_attrs)
+ attrs = kvm_get_memory_attributes(kvm, gfn);
mixed = has_mixed_attrs(kvm, slot, level, attrs, gfn, gfn_end);
linfo_update_mixed(gfn, slot, level, mixed);
}
}
+
+void kvm_arch_set_memory_attributes(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ unsigned long attrs,
+ gfn_t start, gfn_t end)
+{
+ kvm_update_lpage_mixed_flag(kvm, slot, true, attrs, start, end);
+}
+
+void kvm_memory_attributes_create_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot)
+{
+
+ kvm_update_lpage_mixed_flag(kvm, slot, false, 0, slot->base_gfn,
+ slot->base_gfn + slot->npages);
+}
#endif
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 268c3d16894d..c1074aecf2d0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -12443,7 +12443,7 @@ static int kvm_alloc_memslot_metadata(struct kvm *kvm,
}

#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
- pr_crit_once("FIXME: Walk the memory attributes of the slot and set the mixed status appropriately");
+ kvm_memory_attributes_create_memslot(kvm, slot);
#endif

if (kvm_page_track_create_memslot(kvm, slot, npages))