[PATCH 22/34] mm: page-replace-shrink-new.patch

From: Peter Zijlstra
Date: Wed Mar 22 2006 - 17:33:21 EST



From: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>

Add a general shrinker that policies can make use of.
The policy defines MM_POLICY_HAS_SHRINKER when it does _NOT_ want
to make use of this framework.

API:
unsigned long __page_replace_nr_scan(struct zone *);

return the number of pages in the scanlist for this zone.

void page_replace_candidates(struct zone *, int, struct list_head *);

fill the @list with at most @nr pages from @zone.

void page_replace_reinsert_zone(struct zone *, struct list_head *, int);

reinsert @list into @zone where @nr pages were freed - reinsert those pages that
could not be freed.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>
Signed-off-by: Marcelo Tosatti <marcelo.tosatti@xxxxxxxxxxxx>

---

include/linux/mm_page_replace.h | 6 +++++
include/linux/mm_use_once_policy.h | 2 +
mm/vmscan.c | 43 +++++++++++++++++++++++++++++++++++++
3 files changed, 51 insertions(+)

Index: linux-2.6-git/include/linux/mm_page_replace.h
===================================================================
--- linux-2.6-git.orig/include/linux/mm_page_replace.h
+++ linux-2.6-git/include/linux/mm_page_replace.h
@@ -128,5 +128,11 @@ static inline void page_replace_add_drai
put_cpu();
}

+#if ! defined MM_POLICY_HAS_SHRINKER
+/* unsigned long __page_replace_nr_scan(struct zone *); */
+void page_replace_candidates(struct zone *, int, struct list_head *);
+void page_replace_reinsert_zone(struct zone *, struct list_head *, int);
+#endif
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_PAGE_REPLACE_H */
Index: linux-2.6-git/mm/vmscan.c
===================================================================
--- linux-2.6-git.orig/mm/vmscan.c
+++ linux-2.6-git/mm/vmscan.c
@@ -958,6 +958,49 @@ int should_reclaim_mapped(struct zone *z
return 0;
}

+#if ! defined MM_POLICY_HAS_SHRINKER
+void page_replace_shrink(struct zone *zone, struct scan_control *sc)
+{
+ unsigned long nr_scan = 0;
+
+ atomic_inc(&zone->reclaim_in_progress);
+
+ if (unlikely(sc->swap_cluster_max > SWAP_CLUSTER_MAX)) {
+ nr_scan = zone->policy.nr_scan;
+ zone->policy.nr_scan =
+ sc->swap_cluster_max + SWAP_CLUSTER_MAX - 1;
+ } else
+ zone->policy.nr_scan +=
+ (__page_replace_nr_scan(zone) >> sc->priority) + 1;
+
+ while (zone->policy.nr_scan >= SWAP_CLUSTER_MAX) {
+ LIST_HEAD(page_list);
+ int nr_freed;
+
+ zone->policy.nr_scan -= SWAP_CLUSTER_MAX;
+ page_replace_candidates(zone, SWAP_CLUSTER_MAX, &page_list);
+ if (list_empty(&page_list))
+ continue;
+
+ nr_freed = shrink_list(&page_list, sc);
+
+ local_irq_disable();
+ if (current_is_kswapd())
+ __mod_page_state(kswapd_steal, nr_freed);
+ __mod_page_state_zone(zone, pgsteal, nr_freed);
+ local_irq_enable();
+
+ page_replace_reinsert_zone(zone, &page_list, nr_freed);
+ }
+ if (nr_scan)
+ zone->policy.nr_scan = nr_scan;
+
+ atomic_dec(&zone->reclaim_in_progress);
+
+ throttle_vm_writeout();
+}
+#endif
+
/*
* This is the direct reclaim path, for page-allocating processes. We only
* try to reclaim pages from zones which will satisfy the caller's allocation
Index: linux-2.6-git/include/linux/mm_use_once_policy.h
===================================================================
--- linux-2.6-git.orig/include/linux/mm_use_once_policy.h
+++ linux-2.6-git/include/linux/mm_use_once_policy.h
@@ -169,5 +169,7 @@ static inline unsigned long __page_repla
return zone->policy.nr_active + zone->policy.nr_inactive;
}

+#define MM_POLICY_HAS_SHRINKER
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_USEONCE_POLICY_H */
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/