[RFC PATCH v3 13/13] mm/damon/vaddr: Apply filters in migrate_{hot/cold}
From: Bijan Tabatabai
Date: Wed Jul 02 2025 - 16:17:28 EST
From: Bijan Tabatabai <bijantabatab@xxxxxxxxxx>
The paddr versions of migrate_{hot/cold} filter out folios from
migration based on the scheme's filters. This patch does the same for
the vaddr versions of those schemes.
The filtering code is mostly the same for the paddr and vaddr versions.
The exception is the young filter. paddr determines if a page is young
by doing a folio rmap walk to find the page table entries corresponding
to the folio. However, vaddr schemes have easier access to the page
tables, so we add some logic to avoid the extra work.
Co-developed-by: Ravi Shankar Jonnalagadda <ravis.opensrc@xxxxxxxxxx>
Signed-off-by: Ravi Shankar Jonnalagadda <ravis.opensrc@xxxxxxxxxx>
Signed-off-by: Bijan Tabatabai <bijantabatab@xxxxxxxxxx>
---
mm/damon/vaddr.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 64 insertions(+)
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 5f230a427fdc..2a485bf19101 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -611,6 +611,62 @@ static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
return max_nr_accesses;
}
+static bool damos_va_filter_young(struct damos_filter *filter,
+ struct folio *folio, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, pmd_t *pmdp)
+{
+ bool young;
+
+ if (ptep) {
+ young = pte_young(*ptep);
+ } else if (pmdp) {
+ young = pmd_young(*pmdp);
+ } else {
+ WARN_ONCE(1, "Neither ptep nor pmdp provided");
+ return false;
+ }
+
+ young = young || !folio_test_idle(folio) ||
+ mmu_notifier_test_young(vma->vm_mm, addr);
+
+ if (young && ptep)
+ damon_ptep_mkold(ptep, vma, addr);
+ else if (young && pmdp)
+ damon_pmdp_mkold(pmdp, vma, addr);
+
+ return young == filter->matching;
+}
+
+static bool damos_va_filter_out(struct damos *scheme, struct folio *folio,
+ struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep, pmd_t *pmdp)
+{
+ struct damos_filter *filter;
+ bool matched;
+
+ if (scheme->core_filters_allowed)
+ return false;
+
+ damos_for_each_ops_filter(filter, scheme) {
+ /*
+ * damos_folio_filter_match checks the young filter by doing an
+ * rmap on the folio to find its page table. However, being the
+ * vaddr scheme, we have direct access to the page tables, so
+ * use that instead.
+ */
+ if (filter->type == DAMOS_FILTER_TYPE_YOUNG) {
+ matched = damos_va_filter_young(filter, folio, vma,
+ addr, ptep, pmdp);
+ } else {
+ matched = damos_folio_filter_match(filter, folio);
+ }
+
+ if (matched)
+ return !filter->allow;
+ }
+ return scheme->ops_filters_default_reject;
+}
+
struct damos_va_migrate_private {
struct list_head *migration_lists;
struct damos *scheme;
@@ -695,8 +751,12 @@ static int damos_va_migrate_pmd_entry(pmd_t *pmd, unsigned long addr,
if (!folio)
goto unlock;
+ if (damos_va_filter_out(s, folio, walk->vma, addr, NULL, pmd))
+ goto put_folio;
+
damos_va_migrate_folio(folio, walk->vma, addr, dests, migration_lists);
+put_folio:
folio_put(folio);
unlock:
spin_unlock(ptl);
@@ -724,8 +784,12 @@ static int damos_va_migrate_pte_entry(pte_t *pte, unsigned long addr,
if (!folio)
return 0;
+ if (damos_va_filter_out(s, folio, walk->vma, addr, pte, NULL))
+ goto put_folio;
+
damos_va_migrate_folio(folio, walk->vma, addr, dests, migration_lists);
+put_folio:
folio_put(folio);
return 0;
}
--
2.43.5