[...]
Manfred, could you please have a look and if you have a time test it?
I mean if it solves your issue. You can take over this patch and resend
it, otherwise i can send it myself later if we all agree with it.
<snip>I do now know the mm code well enough to understand the side effects of the change. And doubt that it is suitable for stable, i.e. we need the simple patch first.
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d2a00ad4e1dd..b82db44fea60 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1717,17 +1717,10 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
return true;
}
-/*
- * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
- * is already purging.
- */
-static void try_purge_vmap_area_lazy(void)
-{
- if (mutex_trylock(&vmap_purge_lock)) {
- __purge_vmap_area_lazy(ULONG_MAX, 0);
- mutex_unlock(&vmap_purge_lock);
- }
-}
+static void purge_vmap_area_lazy(void);
+static void drain_vmap_area(struct work_struct *work);
+static DECLARE_WORK(drain_vmap_area_work, drain_vmap_area);
+static atomic_t drain_vmap_area_work_in_progress;
/*
* Kick off a purge of the outstanding lazy areas.
@@ -1740,6 +1733,22 @@ static void purge_vmap_area_lazy(void)
mutex_unlock(&vmap_purge_lock);
}
+static void drain_vmap_area(struct work_struct *work)
+{
+ mutex_lock(&vmap_purge_lock);
+ __purge_vmap_area_lazy(ULONG_MAX, 0);
+ mutex_unlock(&vmap_purge_lock);
+
+ /*
+ * Check if rearming is still required. If not, we are
+ * done and can let a next caller to initiate a new drain.
+ */
+ if (atomic_long_read(&vmap_lazy_nr) > lazy_max_pages())
+ schedule_work(&drain_vmap_area_work);
+ else
+ atomic_set(&drain_vmap_area_work_in_progress, 0);
+}
+
/*
* Free a vmap area, caller ensuring that the area has been unmapped
* and flush_cache_vunmap had been called for the correct range
@@ -1766,7 +1775,8 @@ static void free_vmap_area_noflush(struct vmap_area *va)
/* After this point, we may free va at any time */
if (unlikely(nr_lazy > lazy_max_pages()))
- try_purge_vmap_area_lazy();
+ if (!atomic_xchg(&drain_vmap_area_work_in_progress, 1))
+ schedule_work(&drain_vmap_area_work);
}
/*
<snip>