[PATCH V3] arm64/mm: Intercept pfn changes in set_pte_at()

From: Anshuman Khandual
Date: Mon Jan 30 2023 - 07:15:23 EST


Changing pfn on a user page table mapped entry, without first going through
break-before-make (BBM) procedure is unsafe. This just updates set_pte_at()
to intercept such changes, via an updated pgattr_change_is_safe(). This new
check happens via __check_racy_pte_update(), which has now been renamed as
__check_safe_pte_update().

Cc: Catalin Marinas <catalin.marinas@xxxxxxx>
Cc: Will Deacon <will@xxxxxxxxxx>
Cc: Mark Rutland <mark.rutland@xxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: linux-arm-kernel@xxxxxxxxxxxxxxxxxxx
Cc: linux-kernel@xxxxxxxxxxxxxxx
Acked-by: Mark Rutland <mark.rutland@xxxxxxx>
Signed-off-by: Anshuman Khandual <anshuman.khandual@xxxxxxx>
---
This applies on v6.2-rc6

Changes in V3:

- Changed pgattr_change_is_safe() as suggested by Mark

Changes in V2:

https://lore.kernel.org/all/20230109052816.405335-1-anshuman.khandual@xxxxxxx/

Changes in V1:

https://lore.kernel.org/all/20221116031001.292236-1-anshuman.khandual@xxxxxxx/

arch/arm64/include/asm/pgtable.h | 8 ++++++--
arch/arm64/mm/mmu.c | 8 ++++++--
2 files changed, 12 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 65e78999c75d..27455bfd64bc 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -275,6 +275,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
}

extern void __sync_icache_dcache(pte_t pteval);
+bool pgattr_change_is_safe(u64 old, u64 new);

/*
* PTE bits configuration in the presence of hardware Dirty Bit Management
@@ -292,7 +293,7 @@ extern void __sync_icache_dcache(pte_t pteval);
* PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
*/

-static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
+static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep,
pte_t pte)
{
pte_t old_pte;
@@ -318,6 +319,9 @@ static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
"%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
__func__, pte_val(old_pte), pte_val(pte));
+ VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)),
+ "%s: unsafe attribute change: 0x%016llx -> 0x%016llx",
+ __func__, pte_val(old_pte), pte_val(pte));
}

static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
@@ -346,7 +350,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
mte_sync_tags(old_pte, pte);
}

- __check_racy_pte_update(mm, ptep, pte);
+ __check_safe_pte_update(mm, ptep, pte);

set_pte(ptep, pte);
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index d77c9f56b7b4..6f9d8898a025 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -133,7 +133,7 @@ static phys_addr_t __init early_pgtable_alloc(int shift)
return phys;
}

-static bool pgattr_change_is_safe(u64 old, u64 new)
+bool pgattr_change_is_safe(u64 old, u64 new)
{
/*
* The following mapping attributes may be updated in live
@@ -142,9 +142,13 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;

/* creating or taking down mappings is always safe */
- if (old == 0 || new == 0)
+ if (!pte_valid(__pte(old)) || !pte_valid(__pte(new)))
return true;

+ /* A live entry's pfn should not change */
+ if (pte_pfn(__pte(old)) != pte_pfn(__pte(new)))
+ return false;
+
/* live contiguous mappings may not be manipulated at all */
if ((old | new) & PTE_CONT)
return false;
--
2.30.2