Re: [PATCH 5/8] mm/kasan, mm/vmalloc: Respect GFP flags in kasan_populate_vmalloc()
From: Uladzislau Rezki
Date: Fri Aug 08 2025 - 06:18:44 EST
On Thu, Aug 07, 2025 at 06:05:21PM +0200, Andrey Ryabinin wrote:
>
> On 8/7/25 9:58 AM, Uladzislau Rezki (Sony) wrote:
>
> > diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
> > index d2c70cd2afb1..5edfc1f6b53e 100644
> > --- a/mm/kasan/shadow.c
> > +++ b/mm/kasan/shadow.c
> > @@ -335,13 +335,13 @@ static void ___free_pages_bulk(struct page **pages, int nr_pages)
> > }
> > }
> >
> > -static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
> > +static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask)
> > {
> > unsigned long nr_populated, nr_total = nr_pages;
> > struct page **page_array = pages;
> >
> > while (nr_pages) {
> > - nr_populated = alloc_pages_bulk(GFP_KERNEL, nr_pages, pages);
> > + nr_populated = alloc_pages_bulk(gfp_mask, nr_pages, pages);
> > if (!nr_populated) {
> > ___free_pages_bulk(page_array, nr_total - nr_pages);
> > return -ENOMEM;
> > @@ -353,25 +353,33 @@ static int ___alloc_pages_bulk(struct page **pages, int nr_pages)
> > return 0;
> > }
> >
> > -static int __kasan_populate_vmalloc(unsigned long start, unsigned long end)
> > +static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask)
> > {
> > unsigned long nr_pages, nr_total = PFN_UP(end - start);
> > + bool noblock = !gfpflags_allow_blocking(gfp_mask);
> > struct vmalloc_populate_data data;
> > + unsigned int flags;
> > int ret = 0;
>
> gfp_mask = (gfp_mask & GFP_RECLAIM_MASK);
>
>
> But it might be better to do this in alloc_vmap_area().
> In alloc_vmap_area() we have this:
>
> retry:
> if (IS_ERR_VALUE(addr)) {
> preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
>
> which probably needs GFP_RECLAIM_MASK too.
>
Thank you for pointing to this. I will check it!
> >
> > - data.pages = (struct page **)__get_free_page(GFP_KERNEL | __GFP_ZERO);
> > + data.pages = (struct page **)__get_free_page(gfp_mask | __GFP_ZERO);
> > if (!data.pages)
> > return -ENOMEM;
> >
> > while (nr_total) {
> > nr_pages = min(nr_total, PAGE_SIZE / sizeof(data.pages[0]));
> > - ret = ___alloc_pages_bulk(data.pages, nr_pages);
> > + ret = ___alloc_pages_bulk(data.pages, nr_pages, gfp_mask);
> > if (ret)
> > break;
> >
> > data.start = start;
> > + if (noblock)
> > + flags = memalloc_noreclaim_save();
> > +
>
>
> This should be the same as in __vmalloc_area_node():
>
> if (noblock)
> flags = memalloc_noreclaim_save();
> else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO)
> flags = memalloc_nofs_save();
> else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0)
> flags = memalloc_noio_save();
>
>
> It would be better to fix noio/nofs stuff first with separate patch, as it's
> bug and needs cc stable. And add support for noblock in follow up.
>
Right. KASAN was not fixed together with vmalloc. I will look into it.
> It might be a good idea to consolidate such logic in separate function,
> memalloc_save(gfp_mask)/memalloc_restore(gfp_mask, flags) ?
>
> > ret = apply_to_page_range(&init_mm, start, nr_pages * PAGE_SIZE,
> > kasan_populate_vmalloc_pte, &data);
> > + if (noblock)
> > + memalloc_noreclaim_restore(flags);
> > +
> > ___free_pages_bulk(data.pages, nr_pages);
> > if (ret)
>
Sounds good.
Thank you.
--
Uladzislau Rezki