Re: [PATCH v2 2/7] mm: vmscan: make global slab shrink lockless

From: Qi Zheng
Date: Sun Feb 26 2023 - 08:56:49 EST




On 2023/2/26 05:28, Kirill Tkhai wrote:
On 25.02.2023 19:37, Qi Zheng wrote:


On 2023/2/26 00:17, Kirill Tkhai wrote:
On 25.02.2023 18:57, Qi Zheng wrote:

<...>
How about this?

diff --git a/mm/vmscan.c b/mm/vmscan.c
index ffddbd204259..9d8c53075298 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1012,7 +1012,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
                                   int priority)
   {
          unsigned long ret, freed = 0;
-       struct shrinker *shrinker;
+       struct shrinker *shrinker = NULL;
          int srcu_idx, generation;

          /*
@@ -1025,11 +1025,15 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
          if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
                  return shrink_slab_memcg(gfp_mask, nid, memcg, priority);

+again:
          srcu_idx = srcu_read_lock(&shrinker_srcu);

          generation = atomic_read(&shrinker_srcu_generation);
-       list_for_each_entry_srcu(shrinker, &shrinker_list, list,
-                                srcu_read_lock_held(&shrinker_srcu)) {
+       if (!shrinker)
+               shrinker = list_entry_rcu(shrinker_list.next, struct shrinker, list);
+       else
+               shrinker = list_entry_rcu(shrinker->list.next, struct shrinker, list);
+       list_for_each_entry_from_rcu(shrinker, &shrinker_list, list) {
                  struct shrink_control sc = {
                          .gfp_mask = gfp_mask,
                          .nid = nid,
@@ -1042,8 +1046,9 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
                  freed += ret;

                  if (atomic_read(&shrinker_srcu_generation) != generation) {
-                       freed = freed ? : 1;
-                       break;
+                       srcu_read_unlock(&shrinker_srcu, srcu_idx);

After SRCU in unlocked we can't believe @shrinker anymore. So, above list_entry_rcu(shrinker->list.next)
dereferences some random memory.

Indeed.


+                       cond_resched();
+                       goto again;
                  }
          }


diff --git a/mm/vmscan.c b/mm/vmscan.c
index 27ef9946ae8a..0b197bba1257 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -204,6 +204,7 @@ static void set_task_reclaim_state(struct task_struct *task,
    LIST_HEAD(shrinker_list);
    DEFINE_MUTEX(shrinker_mutex);
    DEFINE_SRCU(shrinker_srcu);
+static atomic_t shrinker_srcu_generation = ATOMIC_INIT(0);
      #ifdef CONFIG_MEMCG
    static int shrinker_nr_max;
@@ -782,6 +783,7 @@ void unregister_shrinker(struct shrinker *shrinker)
        debugfs_entry = shrinker_debugfs_remove(shrinker);
        mutex_unlock(&shrinker_mutex);
    +    atomic_inc(&shrinker_srcu_generation);
        synchronize_srcu(&shrinker_srcu);
          debugfs_remove_recursive(debugfs_entry);
@@ -799,6 +801,7 @@ EXPORT_SYMBOL(unregister_shrinker);
     */
    void synchronize_shrinkers(void)
    {
+    atomic_inc(&shrinker_srcu_generation);
        synchronize_srcu(&shrinker_srcu);
    }
    EXPORT_SYMBOL(synchronize_shrinkers);
@@ -908,18 +911,19 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
    {
        struct shrinker_info *info;
        unsigned long ret, freed = 0;
-    int srcu_idx;
-    int i;
+    int srcu_idx, generation;
+    int i = 0;
          if (!mem_cgroup_online(memcg))
            return 0;
-
+again:
        srcu_idx = srcu_read_lock(&shrinker_srcu);
        info = shrinker_info_srcu(memcg, nid);
        if (unlikely(!info))
            goto unlock;
    -    for_each_set_bit(i, info->map, info->map_nr_max) {
+    generation = atomic_read(&shrinker_srcu_generation);
+    for_each_set_bit_from(i, info->map, info->map_nr_max) {
            struct shrink_control sc = {
                .gfp_mask = gfp_mask,
                .nid = nid,
@@ -965,6 +969,11 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
                    set_shrinker_bit(memcg, nid, i);
            }
            freed += ret;
+
+        if (atomic_read(&shrinker_srcu_generation) != generation) {
+            srcu_read_unlock(&shrinker_srcu, srcu_idx);

Maybe we can add the following code here, so as to avoid repeating the
current id and avoid triggering softlockup:

              i++;

This is OK.

              cond_resched();

Possible, existing cond_resched() in do_shrink_slab() is enough.

Yeah.

I will add this patch in the next version. May I mark you as the author
of this patch?

I think, yes

Thanks. :)

Qi



And this. :)

Thanks,
Qi


Thanks,
Qi

+            goto again;
+        }
        }
    unlock:
        srcu_read_unlock(&shrinker_srcu, srcu_idx);
@@ -1004,7 +1013,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
    {
        unsigned long ret, freed = 0;
        struct shrinker *shrinker;
-    int srcu_idx;
+    int srcu_idx, generation;
          /*
         * The root memcg might be allocated even though memcg is disabled
@@ -1017,6 +1026,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
            return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
          srcu_idx = srcu_read_lock(&shrinker_srcu);
+    generation = atomic_read(&shrinker_srcu_generation);
          list_for_each_entry_srcu(shrinker, &shrinker_list, list,
                     srcu_read_lock_held(&shrinker_srcu)) {
@@ -1030,6 +1040,11 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
            if (ret == SHRINK_EMPTY)
                ret = 0;
            freed += ret;
+
+        if (atomic_read(&shrinker_srcu_generation) != generation) {
+            freed = freed ? : 1;
+            break;
+        }
        }
          srcu_read_unlock(&shrinker_srcu, srcu_idx);









--
Thanks,
Qi