Re: [PATCH 0/4] mm/userfaultfd: modulize memory types

From: Nikita Kalyazin
Date: Thu Jun 26 2025 - 12:10:02 EST




On 25/06/2025 21:17, Peter Xu wrote:
On Wed, Jun 25, 2025 at 05:56:23PM +0100, Nikita Kalyazin wrote:


On 20/06/2025 20:03, Peter Xu wrote:
[based on akpm/mm-new]

This series is an alternative proposal of what Nikita proposed here on the
initial three patches:

https://lore.kernel.org/r/20250404154352.23078-1-kalyazin@xxxxxxxxxx

This is not yet relevant to any guest-memfd support, but paving way for it.

Hi Peter,

Hi, Nikita,


Thanks for posting this. I confirmed that minor fault handling was working
for guest_memfd based on this series and looked simple (a draft based on
mmap support in guest_memfd v7 [1]):

Thanks for the quick spin, glad to know it works. Some trivial things to
mention below..

Following up, I drafted UFFDIO_COPY support for guest_memfd to confirm it works as well:

diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 8c44e4b9f5f8..b5458a22fff4 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -349,12 +349,19 @@ static bool kvm_gmem_offset_is_shared(struct file *file, pgoff_t index)

static vm_fault_t kvm_gmem_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
struct inode *inode = file_inode(vmf->vma->vm_file);
struct folio *folio;
vm_fault_t ret = VM_FAULT_LOCKED;

filemap_invalidate_lock_shared(inode->i_mapping);

+ folio = filemap_get_entry(inode->i_mapping, vmf->pgoff);
+ if (!folio && vma && userfaultfd_missing(vma)) {
+ filemap_invalidate_unlock_shared(inode->i_mapping);
+ return handle_userfault(vmf, VM_UFFD_MISSING);
+ }
+
folio = kvm_gmem_get_folio(inode, vmf->pgoff);
if (IS_ERR(folio)) {
int err = PTR_ERR(folio);
@@ -438,10 +445,57 @@ static int kvm_gmem_uffd_get_folio(struct inode *inode, pgoff_t pgoff,
return 0;
}

+static int kvm_gmem_mfill_atomic_pte(pmd_t *dst_pmd,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr,
+ unsigned long src_addr,
+ uffd_flags_t flags,
+ struct folio **foliop)
+{
+ struct inode *inode = file_inode(dst_vma->vm_file);
+ pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
+ struct folio *folio;
+ int ret;
+
+ folio = kvm_gmem_get_folio(inode, pgoff);
+ if (IS_ERR(folio)) {
+ ret = PTR_ERR(folio);
+ goto out;
+ }
+
+ folio_unlock(folio);
+
+ if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) {
+ void *vaddr = kmap_local_folio(folio, 0);
+ ret = copy_from_user(vaddr, (const void __user *)src_addr, PAGE_SIZE);
+ kunmap_local(vaddr);
+ if (unlikely(ret)) {
+ *foliop = folio;
+ ret = -ENOENT;
+ goto out;
+ }
+ } else { /* ZEROPAGE */
+ clear_user_highpage(&folio->page, dst_addr);
+ }
+
+ kvm_gmem_mark_prepared(folio);
+
+ ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
+ &folio->page, true, flags);
+
+ if (ret)
+ folio_put(folio);
+out:
+ return ret;
+}
+
static const vm_uffd_ops kvm_gmem_uffd_ops = {
- .uffd_features = VM_UFFD_MINOR,
- .uffd_ioctls = BIT(_UFFDIO_CONTINUE),
+ .uffd_features = VM_UFFD_MISSING | VM_UFFD_MINOR,
+ .uffd_ioctls = BIT(_UFFDIO_COPY) |
+ BIT(_UFFDIO_ZEROPAGE) |
+ BIT(_UFFDIO_CONTINUE),
.uffd_get_folio = kvm_gmem_uffd_get_folio,
+ .uffd_copy = kvm_gmem_mfill_atomic_pte,
};
#endif



diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 5abb6d52a375..6ddc73419724 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -5,6 +5,9 @@
#include <linux/pagemap.h>
#include <linux/anon_inodes.h>
#include <linux/set_memory.h>
+#ifdef CONFIG_USERFAULTFD

This ifdef not needed, userfaultfd_k.h has taken care of all cases.

Good to know, thanks.

+#include <linux/userfaultfd_k.h>
+#endif

#include "kvm_mm.h"

@@ -396,6 +399,14 @@ static vm_fault_t kvm_gmem_fault(struct vm_fault *vmf)
kvm_gmem_mark_prepared(folio);
}

+#ifdef CONFIG_USERFAULTFD

Same here. userfaultfd_minor() is always defined.

Thank you.

I'll wait for a few more days for reviewers, and likely send v2 before next
week.

Thanks,

--
Peter Xu