[PATCH v2 30/32] KVM: SVM: Add a helper to allocate and initialize permissions bitmaps

From: Sean Christopherson
Date: Tue Jun 10 2025 - 19:06:32 EST


Add a helper to allocate and initialize an MSR or I/O permissions map, as
the logic is identical between the two map types, the only difference is
the size of the bitmap. Opportunistically add a comment to explain why
the bitmaps are initialized with 0xff, e.g. instead of the more common
zero-initialized behavior, which is the main motivation for deduplicating
the code.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx>
---
arch/x86/kvm/svm/svm.c | 31 +++++++++++++++----------------
arch/x86/kvm/svm/svm.h | 8 +++++++-
2 files changed, 22 insertions(+), 17 deletions(-)

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index fc41ec70b6de..e3c49c763225 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -728,19 +728,23 @@ void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
svm->nested.force_msr_bitmap_recalc = true;
}

-void *svm_vcpu_alloc_msrpm(void)
+void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask)
{
- unsigned int order = get_order(MSRPM_SIZE);
- struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, order);
- void *msrpm;
+ unsigned int order = get_order(size);
+ struct page *pages = alloc_pages(gfp_mask, order);
+ void *pm;

if (!pages)
return NULL;

- msrpm = page_address(pages);
- memset(msrpm, 0xff, PAGE_SIZE * (1 << order));
+ /*
+ * Set all bits in the permissions map so that all MSR and I/O accesses
+ * are intercepted by default.
+ */
+ pm = page_address(pages);
+ memset(pm, 0xff, PAGE_SIZE * (1 << order));

- return msrpm;
+ return pm;
}

static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu)
@@ -5325,11 +5329,8 @@ static __init void svm_set_cpu_caps(void)

static __init int svm_hardware_setup(void)
{
- int cpu;
- struct page *iopm_pages;
void *iopm_va;
- int r;
- unsigned int order = get_order(IOPM_SIZE);
+ int cpu, r;

/*
* NX is required for shadow paging and for NPT if the NX huge pages
@@ -5410,13 +5411,11 @@ static __init int svm_hardware_setup(void)
pr_info("LBR virtualization supported\n");
}

- iopm_pages = alloc_pages(GFP_KERNEL, order);
- if (!iopm_pages)
+ iopm_va = svm_alloc_permissions_map(IOPM_SIZE, GFP_KERNEL);
+ if (!iopm_va)
return -ENOMEM;

- iopm_va = page_address(iopm_pages);
- memset(iopm_va, 0xff, PAGE_SIZE * (1 << order));
- iopm_base = __sme_page_pa(iopm_pages);
+ iopm_base = __sme_set(__pa(iopm_va));

/*
* Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 489adc2ca3f5..8d3279563261 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -667,7 +667,13 @@ BUILD_SVM_MSR_BITMAP_HELPERS(void, set, __set)
/* svm.c */
extern bool dump_invalid_vmcb;

-void *svm_vcpu_alloc_msrpm(void);
+void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask);
+
+static inline void *svm_vcpu_alloc_msrpm(void)
+{
+ return svm_alloc_permissions_map(MSRPM_SIZE, GFP_KERNEL_ACCOUNT);
+}
+
void svm_vcpu_free_msrpm(void *msrpm);
void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
void svm_enable_lbrv(struct kvm_vcpu *vcpu);
--
2.50.0.rc0.642.g800a2b2222-goog