[PATCH] hugetlb: make follow_huge_pgd support FOLL_GET

From: Mike Kravetz
Date: Wed Aug 17 2022 - 17:32:10 EST


The existing version of follow_huge_pgd was very primitive and only
provided limited functionality. Specifically, it did not support
FOLL_GET. Update follow_huge_pgd with modifications similar to those
made for follow_huge_pud in commit 3a194f3f8ad0 ("mm/hugetlb: make
pud_huge() and follow_huge_pud() aware of non-present pud entry").

Note, common code should be factored out of follow_huge_p*d routines.
This will be done in future modifications.

Signed-off-by: Mike Kravetz <mike.kravetz@xxxxxxxxxx>
---
mm/hugetlb.c | 32 ++++++++++++++++++++++++++++++--
1 file changed, 30 insertions(+), 2 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ea1c7bfa1cc3..6f32d2bd1ca9 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -7055,10 +7055,38 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
struct page * __weak
follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
{
- if (flags & (FOLL_GET | FOLL_PIN))
+ struct page *page = NULL;
+ spinlock_t *ptl;
+ pte_t pte;
+
+ if (WARN_ON_ONCE(flags & FOLL_PIN))
return NULL;

- return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
+retry:
+ ptl = huge_pte_lock(hstate_sizelog(PGDIR_SHIFT), mm, (pte_t *)pgd);
+ if (!pgd_huge(*pgd))
+ goto out;
+ pte = huge_ptep_get((pte_t *)pgd);
+ if (pte_present(pte)) {
+ page = pgd_page(*pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
+ if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
+ page = NULL;
+ goto out;
+ }
+ } else {
+ if (is_hugetlb_entry_migration(pte)) {
+ spin_unlock(ptl);
+ __migration_entry_wait(mm, (pte_t *)pgd, ptl);
+ goto retry;
+ }
+ /*
+ * hwpoisoned entry is treated as no_page_table in
+ * follow_page_mask().
+ */
+ }
+out:
+ spin_unlock(ptl);
+ return page;
}

int isolate_hugetlb(struct page *page, struct list_head *list)
--
2.37.1