[PATCH] mm: remove lock_page_memcg() from rmap

From: Johannes Weiner
Date: Mon Nov 07 2022 - 12:05:09 EST


rmap changes (mapping and unmapping) of a page currently take
lock_page_memcg() to serialize 1) update of the mapcount and the
cgroup mapped counter with 2) cgroup moving the page and updating the
old cgroup and the new cgroup counters based on page_mapped().

Before b2052564e66d ("mm: memcontrol: continue cache reclaim from
offlined groups"), we used to reassign all pages that could be found
on a cgroup's LRU list on deletion - something that rmap didn't
naturally serialize against. Since that commit, however, the only
pages that get moved are those mapped into page tables of a task
that's being migrated. In that case, the pte lock is always held (and
we know the page is mapped), which keeps rmap changes at bay already.

The additional lock_page_memcg() by rmap is redundant. Remove it.

NOT-Signed-off-by: Johannes Weiner <hannes@xxxxxxxxxxx>
---
mm/memcontrol.c | 27 ++++++++++++---------------
mm/rmap.c | 30 ++++++++++++------------------
2 files changed, 24 insertions(+), 33 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2d8549ae1b30..f7716e9038e9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5666,7 +5666,10 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
* @from: mem_cgroup which the page is moved from.
* @to: mem_cgroup which the page is moved to. @from != @to.
*
- * The caller must make sure the page is not on LRU (isolate_page() is useful.)
+ * This function acquires folio_lock() and folio_lock_memcg(). The
+ * caller must exclude all other possible ways of accessing
+ * page->memcg, such as LRU isolation (to lock out isolation) and
+ * having the page mapped and pte-locked (to lock out rmap).
*
* This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
* from old cgroup.
@@ -5685,6 +5688,7 @@ static int mem_cgroup_move_account(struct page *page,
VM_BUG_ON(from == to);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
VM_BUG_ON(compound && !folio_test_large(folio));
+ VM_WARN_ON_ONCE(!folio_mapped(folio));

/*
* Prevent mem_cgroup_migrate() from looking at
@@ -5705,30 +5709,23 @@ static int mem_cgroup_move_account(struct page *page,
folio_memcg_lock(folio);

if (folio_test_anon(folio)) {
- if (folio_mapped(folio)) {
- __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
- __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
- if (folio_test_transhuge(folio)) {
- __mod_lruvec_state(from_vec, NR_ANON_THPS,
- -nr_pages);
- __mod_lruvec_state(to_vec, NR_ANON_THPS,
- nr_pages);
- }
+ __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
+ __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
+ if (folio_test_transhuge(folio)) {
+ __mod_lruvec_state(from_vec, NR_ANON_THPS, -nr_pages);
+ __mod_lruvec_state(to_vec, NR_ANON_THPS, nr_pages);
}
} else {
__mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
__mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
+ __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
+ __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);

if (folio_test_swapbacked(folio)) {
__mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
__mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
}

- if (folio_mapped(folio)) {
- __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
- __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
- }
-
if (folio_test_dirty(folio)) {
struct address_space *mapping = folio_mapping(folio);

diff --git a/mm/rmap.c b/mm/rmap.c
index 2ec925e5fa6a..60c31375f274 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1197,11 +1197,6 @@ void page_add_anon_rmap(struct page *page,
bool compound = flags & RMAP_COMPOUND;
bool first;

- if (unlikely(PageKsm(page)))
- lock_page_memcg(page);
- else
- VM_BUG_ON_PAGE(!PageLocked(page), page);
-
if (compound) {
atomic_t *mapcount;
VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -1217,19 +1212,19 @@ void page_add_anon_rmap(struct page *page,
if (first) {
int nr = compound ? thp_nr_pages(page) : 1;
/*
- * We use the irq-unsafe __{inc|mod}_zone_page_stat because
- * these counters are not modified in interrupt context, and
- * pte lock(a spinlock) is held, which implies preemption
- * disabled.
+ * We use the irq-unsafe __{inc|mod}_zone_page_stat
+ * because these counters are not modified in
+ * interrupt context, and pte lock(a spinlock) is
+ * held, which implies preemption disabled.
+ *
+ * The pte lock also stabilizes page->memcg wrt
+ * mem_cgroup_move_account().
*/
if (compound)
__mod_lruvec_page_state(page, NR_ANON_THPS, nr);
__mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
}

- if (unlikely(PageKsm(page)))
- unlock_page_memcg(page);
-
/* address might be in next vma when migration races vma_adjust */
else if (first)
__page_set_anon_rmap(page, vma, address,
@@ -1290,7 +1285,6 @@ void page_add_file_rmap(struct page *page,
int i, nr = 0;

VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
- lock_page_memcg(page);
if (compound && PageTransHuge(page)) {
int nr_pages = thp_nr_pages(page);

@@ -1311,6 +1305,7 @@ void page_add_file_rmap(struct page *page,
if (nr == nr_pages && PageDoubleMap(page))
ClearPageDoubleMap(page);

+ /* The pte lock stabilizes page->memcg */
if (PageSwapBacked(page))
__mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED,
nr_pages);
@@ -1328,7 +1323,6 @@ void page_add_file_rmap(struct page *page,
out:
if (nr)
__mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
- unlock_page_memcg(page);

mlock_vma_page(page, vma, compound);
}
@@ -1356,6 +1350,7 @@ static void page_remove_file_rmap(struct page *page, bool compound)
}
if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
goto out;
+ /* The pte lock stabilizes page->memcg */
if (PageSwapBacked(page))
__mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED,
-nr_pages);
@@ -1423,8 +1418,6 @@ static void page_remove_anon_compound_rmap(struct page *page)
void page_remove_rmap(struct page *page,
struct vm_area_struct *vma, bool compound)
{
- lock_page_memcg(page);
-
if (!PageAnon(page)) {
page_remove_file_rmap(page, compound);
goto out;
@@ -1443,6 +1436,9 @@ void page_remove_rmap(struct page *page,
* We use the irq-unsafe __{inc|mod}_zone_page_stat because
* these counters are not modified in interrupt context, and
* pte lock(a spinlock) is held, which implies preemption disabled.
+ *
+ * The pte lock also stabilizes page->memcg wrt
+ * mem_cgroup_move_account().
*/
__dec_lruvec_page_state(page, NR_ANON_MAPPED);

@@ -1459,8 +1455,6 @@ void page_remove_rmap(struct page *page,
* faster for those pages still in swapcache.
*/
out:
- unlock_page_memcg(page);
-
munlock_vma_page(page, vma, compound);
}

--
2.38.1