[PATCH 3/4] Factor zap_pte() out of zap_pte_range()

From: Matthew Wilcox
Date: Fri Jul 25 2014 - 09:46:01 EST


zap_pte() can be called while holding the PTE lock, which is important for
a follow-on patch. This patch should *only* move code into a separate
function; other changes to make zap_pte() usable are in subsequent
patches.
---
mm/memory.c | 190 ++++++++++++++++++++++++++++++++----------------------------
1 file changed, 101 insertions(+), 89 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index cf06c97..6a35f98 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1071,6 +1071,105 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
return ret;
}

+/* Returns true to break out of the loop */
+static bool zap_pte(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ pte_t *pte, unsigned long addr,
+ struct zap_details *details, int *rss,
+ int *force_flush)
+{
+ struct mm_struct *mm = tlb->mm;
+ pte_t ptent = *pte;
+
+ if (pte_none(ptent))
+ return false;
+
+ if (pte_present(ptent)) {
+ struct page *page;
+
+ page = vm_normal_page(vma, addr, ptent);
+ if (unlikely(details) && page) {
+ /*
+ * unmap_shared_mapping_pages() wants to
+ * invalidate cache without truncating:
+ * unmap shared but keep private pages.
+ */
+ if (details->check_mapping &&
+ details->check_mapping != page->mapping)
+ return false;
+ /*
+ * Each page->index must be checked when
+ * invalidating or truncating nonlinear.
+ */
+ if (details->nonlinear_vma &&
+ (page->index < details->first_index ||
+ page->index > details->last_index))
+ return false;
+ }
+ ptent = ptep_get_and_clear_full(mm, addr, pte,
+ tlb->fullmm);
+ tlb_remove_tlb_entry(tlb, pte, addr);
+ if (unlikely(!page))
+ return false;
+ if (unlikely(details) && details->nonlinear_vma
+ && linear_page_index(details->nonlinear_vma,
+ addr) != page->index) {
+ pte_t ptfile = pgoff_to_pte(page->index);
+ if (pte_soft_dirty(ptent))
+ pte_file_mksoft_dirty(ptfile);
+ set_pte_at(mm, addr, pte, ptfile);
+ }
+ if (PageAnon(page))
+ rss[MM_ANONPAGES]--;
+ else {
+ if (pte_dirty(ptent)) {
+ *force_flush = 1;
+ set_page_dirty(page);
+ }
+ if (pte_young(ptent) &&
+ likely(!(vma->vm_flags & VM_SEQ_READ)))
+ mark_page_accessed(page);
+ rss[MM_FILEPAGES]--;
+ }
+ page_remove_rmap(page);
+ if (unlikely(page_mapcount(page) < 0))
+ print_bad_pte(vma, addr, ptent, page);
+ if (unlikely(!__tlb_remove_page(tlb, page))) {
+ *force_flush = 1;
+ return true;
+ }
+ return false;
+ }
+ /*
+ * If details->check_mapping, we leave swap entries;
+ * if details->nonlinear_vma, we leave file entries.
+ */
+ if (unlikely(details))
+ return false;
+ if (pte_file(ptent)) {
+ if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
+ print_bad_pte(vma, addr, ptent, NULL);
+ } else {
+ swp_entry_t entry = pte_to_swp_entry(ptent);
+
+ if (!non_swap_entry(entry))
+ rss[MM_SWAPENTS]--;
+ else if (is_migration_entry(entry)) {
+ struct page *page;
+
+ page = migration_entry_to_page(entry);
+
+ if (PageAnon(page))
+ rss[MM_ANONPAGES]--;
+ else
+ rss[MM_FILEPAGES]--;
+ }
+ if (unlikely(!free_swap_and_cache(entry)))
+ print_bad_pte(vma, addr, ptent, NULL);
+ }
+ pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
+ return false;
+}
+
static unsigned long zap_pte_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
@@ -1089,95 +1188,8 @@ again:
pte = start_pte;
arch_enter_lazy_mmu_mode();
do {
- pte_t ptent = *pte;
- if (pte_none(ptent)) {
- continue;
- }
-
- if (pte_present(ptent)) {
- struct page *page;
-
- page = vm_normal_page(vma, addr, ptent);
- if (unlikely(details) && page) {
- /*
- * unmap_shared_mapping_pages() wants to
- * invalidate cache without truncating:
- * unmap shared but keep private pages.
- */
- if (details->check_mapping &&
- details->check_mapping != page->mapping)
- continue;
- /*
- * Each page->index must be checked when
- * invalidating or truncating nonlinear.
- */
- if (details->nonlinear_vma &&
- (page->index < details->first_index ||
- page->index > details->last_index))
- continue;
- }
- ptent = ptep_get_and_clear_full(mm, addr, pte,
- tlb->fullmm);
- tlb_remove_tlb_entry(tlb, pte, addr);
- if (unlikely(!page))
- continue;
- if (unlikely(details) && details->nonlinear_vma
- && linear_page_index(details->nonlinear_vma,
- addr) != page->index) {
- pte_t ptfile = pgoff_to_pte(page->index);
- if (pte_soft_dirty(ptent))
- pte_file_mksoft_dirty(ptfile);
- set_pte_at(mm, addr, pte, ptfile);
- }
- if (PageAnon(page))
- rss[MM_ANONPAGES]--;
- else {
- if (pte_dirty(ptent)) {
- force_flush = 1;
- set_page_dirty(page);
- }
- if (pte_young(ptent) &&
- likely(!(vma->vm_flags & VM_SEQ_READ)))
- mark_page_accessed(page);
- rss[MM_FILEPAGES]--;
- }
- page_remove_rmap(page);
- if (unlikely(page_mapcount(page) < 0))
- print_bad_pte(vma, addr, ptent, page);
- if (unlikely(!__tlb_remove_page(tlb, page))) {
- force_flush = 1;
- break;
- }
- continue;
- }
- /*
- * If details->check_mapping, we leave swap entries;
- * if details->nonlinear_vma, we leave file entries.
- */
- if (unlikely(details))
- continue;
- if (pte_file(ptent)) {
- if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
- print_bad_pte(vma, addr, ptent, NULL);
- } else {
- swp_entry_t entry = pte_to_swp_entry(ptent);
-
- if (!non_swap_entry(entry))
- rss[MM_SWAPENTS]--;
- else if (is_migration_entry(entry)) {
- struct page *page;
-
- page = migration_entry_to_page(entry);
-
- if (PageAnon(page))
- rss[MM_ANONPAGES]--;
- else
- rss[MM_FILEPAGES]--;
- }
- if (unlikely(!free_swap_and_cache(entry)))
- print_bad_pte(vma, addr, ptent, NULL);
- }
- pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
+ if (zap_pte(tlb, vma, pte, addr, details, rss, &force_flush))
+ break;
} while (pte++, addr += PAGE_SIZE, addr != end);

add_mm_rss_vec(mm, rss);
--
2.0.1


--NU0Ex4SbNnrxsi6C
Content-Type: text/x-diff; charset=us-ascii
Content-Disposition: attachment; filename="0004-mm-Introduce-zap_pte_single.patch"