[PATCH 5/6] mm: honor PF_MEMALLOC_NOMOVABLE for all allocations

From: Pavel Tatashin
Date: Wed Dec 02 2020 - 00:24:40 EST


PF_MEMALLOC_NOMOVABLE is only honored for CMA allocations, extend
this flag to work for any allocations by removing __GFP_MOVABLE from
gfp_mask when this flag is passed in the current context, thus
prohibiting allocations from ZONE_MOVABLE.

Signed-off-by: Pavel Tatashin <pasha.tatashin@xxxxxxxxxx>
---
mm/hugetlb.c | 2 +-
mm/page_alloc.c | 26 ++++++++++++++++----------
2 files changed, 17 insertions(+), 11 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 02213c74ed6b..00e786201d8b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1036,7 +1036,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
bool nomovable = !!(current->flags & PF_MEMALLOC_NOMOVABLE);

list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
- if (nomovable && is_migrate_cma_page(page))
+ if (nomovable && is_migrate_movable(get_pageblock_migratetype(page)))
continue;

if (PageHWPoison(page))
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 611799c72da5..7a6d86d0bc5f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3766,20 +3766,25 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
return alloc_flags;
}

-static inline unsigned int current_alloc_flags(gfp_t gfp_mask,
- unsigned int alloc_flags)
+static inline unsigned int cma_alloc_flags(gfp_t gfp_mask,
+ unsigned int alloc_flags)
{
#ifdef CONFIG_CMA
- unsigned int pflags = current->flags;
-
- if (!(pflags & PF_MEMALLOC_NOMOVABLE) &&
- gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
+ if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
alloc_flags |= ALLOC_CMA;
-
#endif
return alloc_flags;
}

+static inline gfp_t current_gfp_checkmovable(gfp_t gfp_mask)
+{
+ unsigned int pflags = current->flags;
+
+ if ((pflags & PF_MEMALLOC_NOMOVABLE))
+ return gfp_mask & ~__GFP_MOVABLE;
+ return gfp_mask;
+}
+
/*
* get_page_from_freelist goes through the zonelist trying to allocate
* a page.
@@ -4423,7 +4428,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
} else if (unlikely(rt_task(current)) && !in_interrupt())
alloc_flags |= ALLOC_HARDER;

- alloc_flags = current_alloc_flags(gfp_mask, alloc_flags);
+ alloc_flags = cma_alloc_flags(gfp_mask, alloc_flags);

return alloc_flags;
}
@@ -4725,7 +4730,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,

reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
if (reserve_flags)
- alloc_flags = current_alloc_flags(gfp_mask, reserve_flags);
+ alloc_flags = cma_alloc_flags(gfp_mask, reserve_flags);

/*
* Reset the nodemask and zonelist iterators if memory policies can be
@@ -4894,7 +4899,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
if (should_fail_alloc_page(gfp_mask, order))
return false;

- *alloc_flags = current_alloc_flags(gfp_mask, *alloc_flags);
+ *alloc_flags = cma_alloc_flags(gfp_mask, *alloc_flags);

/* Dirty zone balancing only done in the fast path */
ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
@@ -4932,6 +4937,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
}

gfp_mask &= gfp_allowed_mask;
+ gfp_mask = current_gfp_checkmovable(gfp_mask);
alloc_mask = gfp_mask;
if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
return NULL;
--
2.25.1