[PATCH] factor out LRU isolation accounting.
From: Minchan Kim
Date: Tue Jan 03 2017 - 20:04:36 EST
Not-yet-signed-off-by: Minchan Kim <minchan@xxxxxxxxxx>
---
include/trace/events/vmscan.h | 14 +++++----
mm/vmscan.c | 68 ++++++++++++++++++-------------------------
2 files changed, 37 insertions(+), 45 deletions(-)
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index 79b3cd9c7048..5fc3a94a14cd 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -364,14 +364,15 @@ TRACE_EVENT(mm_vmscan_writepage,
TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
TP_PROTO(int nid,
- unsigned long nr_scanned, unsigned long nr_reclaimed,
+ unsigned long nr_taken,
+ unsigned long nr_reclaimed,
int priority, int file),
- TP_ARGS(nid, nr_scanned, nr_reclaimed, priority, file),
+ TP_ARGS(nid, nr_taken, nr_reclaimed, priority, file),
TP_STRUCT__entry(
__field(int, nid)
- __field(unsigned long, nr_scanned)
+ __field(unsigned long, nr_taken)
__field(unsigned long, nr_reclaimed)
__field(int, priority)
__field(int, reclaim_flags)
@@ -379,15 +380,16 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
TP_fast_assign(
__entry->nid = nid;
- __entry->nr_scanned = nr_scanned;
+ __entry->nr_taken = nr_taken;
__entry->nr_reclaimed = nr_reclaimed;
__entry->priority = priority;
__entry->reclaim_flags = trace_shrink_flags(file);
),
- TP_printk("nid=%d nr_scanned=%ld nr_reclaimed=%ld priority=%d flags=%s",
+ TP_printk("nid=%d nr_taken=%ld nr_reclaimed=%ld priority=%d flags=%s",
__entry->nid,
- __entry->nr_scanned, __entry->nr_reclaimed,
+ __entry->nr_taken,
+ __entry->nr_reclaimed,
__entry->priority,
show_reclaim_flags(__entry->reclaim_flags))
);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 37ccd4e0b349..74f55f39f963 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1454,16 +1454,16 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
* @nr_to_scan: The number of pages to look through on the list.
* @lruvec: The LRU vector to pull pages from.
* @dst: The temp list to put pages on to.
- * @nr_scanned: The number of pages that were scanned.
* @sc: The scan_control struct for this reclaim session
* @mode: One of the LRU isolation modes
* @lru: LRU list id for isolating
*
* returns how many pages were moved onto *@dst.
*/
-static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
+static unsigned long isolate_lru_pages(struct pglist_data *pgdat,
+ unsigned long nr_to_scan,
struct lruvec *lruvec, struct list_head *dst,
- unsigned long *nr_scanned, struct scan_control *sc,
+ struct scan_control *sc,
isolate_mode_t mode, enum lru_list lru)
{
struct list_head *src = &lruvec->lists[lru];
@@ -1471,8 +1471,11 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
unsigned long scan, nr_pages;
+ struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
LIST_HEAD(pages_skipped);
+ int file = is_file_lru(lru);
+ spin_lock_irq(&pgdat->lru_lock);
for (scan = 0; scan < nr_to_scan && nr_taken < nr_to_scan &&
!list_empty(src);) {
struct page *page;
@@ -1540,10 +1543,25 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
list_splice(&pages_skipped, src);
}
- *nr_scanned = scan;
trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, scan,
nr_taken, mode, is_file_lru(lru));
update_lru_sizes(lruvec, lru, nr_zone_taken, nr_taken);
+
+ __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
+ reclaim_stat->recent_scanned[file] += nr_taken;
+
+ if (global_reclaim(sc))
+ __mod_node_page_state(pgdat, NR_PAGES_SCANNED, scan);
+ if (is_active_lru(lru)) {
+ __count_vm_events(PGREFILL, scan);
+ } else {
+ if (current_is_kswapd())
+ __count_vm_events(PGSCAN_KSWAPD, scan);
+ else
+ __count_vm_events(PGSCAN_DIRECT, scan);
+ }
+ spin_unlock_irq(&pgdat->lru_lock);
+
return nr_taken;
}
@@ -1735,7 +1753,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
struct scan_control *sc, enum lru_list lru)
{
LIST_HEAD(page_list);
- unsigned long nr_scanned;
unsigned long nr_reclaimed = 0;
unsigned long nr_taken;
unsigned long nr_dirty = 0;
@@ -1746,7 +1763,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
isolate_mode_t isolate_mode = 0;
int file = is_file_lru(lru);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
- struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
if (!inactive_reclaimable_pages(lruvec, sc, lru))
return 0;
@@ -1766,23 +1782,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
if (!sc->may_writepage)
isolate_mode |= ISOLATE_CLEAN;
- spin_lock_irq(&pgdat->lru_lock);
-
- nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
- &nr_scanned, sc, isolate_mode, lru);
-
- __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
- reclaim_stat->recent_scanned[file] += nr_taken;
-
- if (global_reclaim(sc)) {
- __mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
- if (current_is_kswapd())
- __count_vm_events(PGSCAN_KSWAPD, nr_scanned);
- else
- __count_vm_events(PGSCAN_DIRECT, nr_scanned);
- }
- spin_unlock_irq(&pgdat->lru_lock);
-
+ nr_taken = isolate_lru_pages(pgdat, nr_to_scan, lruvec, &page_list,
+ sc, isolate_mode, lru);
if (nr_taken == 0)
return 0;
@@ -1866,7 +1867,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
wait_iff_congested(pgdat, BLK_RW_ASYNC, HZ/10);
trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
- nr_scanned, nr_reclaimed,
+ nr_taken,
+ nr_reclaimed,
sc->priority, file);
return nr_reclaimed;
}
@@ -1943,18 +1945,17 @@ static void shrink_active_list(unsigned long nr_to_scan,
enum lru_list lru)
{
unsigned long nr_taken;
- unsigned long nr_scanned;
unsigned long vm_flags;
LIST_HEAD(l_hold); /* The pages which were snipped off */
LIST_HEAD(l_active);
LIST_HEAD(l_inactive);
struct page *page;
- struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
unsigned nr_deactivate, nr_activate;
unsigned nr_rotated = 0;
isolate_mode_t isolate_mode = 0;
int file = is_file_lru(lru);
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+ struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
lru_add_drain();
@@ -1963,19 +1964,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
if (!sc->may_writepage)
isolate_mode |= ISOLATE_CLEAN;
- spin_lock_irq(&pgdat->lru_lock);
-
- nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
- &nr_scanned, sc, isolate_mode, lru);
-
- __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
- reclaim_stat->recent_scanned[file] += nr_taken;
-
- if (global_reclaim(sc))
- __mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
- __count_vm_events(PGREFILL, nr_scanned);
-
- spin_unlock_irq(&pgdat->lru_lock);
+ nr_taken = isolate_lru_pages(pgdat, nr_to_scan, lruvec, &l_hold,
+ sc, isolate_mode, lru);
while (!list_empty(&l_hold)) {
cond_resched();
--
2.7.4
With this,
./scripts/bloat-o-meter vmlinux.old vmlinux.new.new
add/remove: 1/1 grow/shrink: 0/9 up/down: 1394/-1636 (-242)
function old new delta
isolate_lru_pages - 1394 +1394
print_fmt_mm_vmscan_lru_shrink_inactive 359 355 -4
vermagic 64 58 -6
perf_trace_mm_vmscan_lru_shrink_active 264 256 -8
trace_raw_output_mm_vmscan_lru_shrink_active 203 193 -10
trace_event_raw_event_mm_vmscan_lru_shrink_active 241 225 -16
print_fmt_mm_vmscan_lru_shrink_active 458 426 -32
trace_event_define_fields_mm_vmscan_lru_shrink_active 384 336 -48
shrink_inactive_list 1430 1271 -159
shrink_active_list 1265 1082 -183
isolate_lru_pages.isra 1170 - -1170
Total: Before=26268743, After=26268501, chg -0.00%
We can save 242 bytes.
If we consider binary size, 424 bytes save.
#> ls -l vmlinux.old vmlinux.new.new
194092840 vmlinux.old
194092416 vmlinux.new.new
> with other tracepoints but that can be helpful because you do not have
> all the tracepoints enabled all the time. So unless you see this
> particular thing as a road block I would rather keep it.
I didn't know how long this thread becomes lenghy. To me, it was no worth
to discuss. I did best effot to explain my stand with valid points, I think
and don't want to go infinite loop. If you don't agree still, separate
the patch. One includes only necessary things with removing nr_scanned, which
I am happy to ack it. Based upon it, add one more patch you want adding
nr_scanned with your claim. I will reply that thread with my claim and
let's keep an eye on it that whether maintainer will take it or not.
If maintainer will take it, it's good indication which will represent
we can add more extra tracepoint easily with "might be helpful with someone
although it's redunant" so do not prevent others who want to do
in the future.
>
> > > The inactive counterpart does that for quite some time already. So why
> >
> > It couldn't be a reason. If it was duplicated in there, it would be
> > better to fix it rather than adding more duplciated work to match both
> > sides.
>
> I really do not see this as a bad thing.
>
> > > exactly does that matter? Don't take me wrong but isn't this more on a
> > > nit picking side than necessary? Or do I just misunderstand your
> > > concenrs? It is not like we are providing a stable user API as the
> >
> > My concern is that I don't see what we can get benefit from those
> > duplicated work. If it doesn't give benefit to us, I don't want to add.
> > I hope you think another reasonable reasons.
> >
> > > tracepoint is clearly implementation specific and not something to be
> > > used for anything other than debugging.
> >
> > My point is we already had things "LRU isolation effectivness". Namely,
> > isolate_lru_pages.
> >
> > >
> > > > > - nr_rotated pages which tells us that we are hitting referenced
> > > > > pages which are deactivated. If this is a large part of the
> > > > > reported nr_deactivated pages then the active list is too small
> > > >
> > > > It might be but not exactly. If your goal is to know LRU size, it can be
> > > > done in get_scan_count. I tend to agree LRU size is helpful for
> > > > performance analysis because decreased LRU size signals memory shortage
> > > > then performance drop.
> > >
> > > No, I am not really interested in the exact size but rather to allow to
> > > find whether we are aging the active list too early...
> >
> > Could you elaborate it more that how we can get active list early aging
> > with nr_rotated?
>
> If you see too many referenced pages on the active list then they have
> been used since promoted and that is an indication that they might be
> reclaimed too early. If you are debugging a performance issue and see
> this happening then it might be a good indication to look at.
This is better than "active list is too small". I hope you change
description with this.