[PATCH 5/6] powerpc/64s: Do not disable preemption in lazy MMU mode
From: Alexander Gordeev
Date: Thu Jun 12 2025 - 13:37:08 EST
Commit b9ef323ea168 ("powerpc/64s: Disable preemption in hash lazy
mmu mode") is not necessary anymore, since the lazy MMU mode is
entered with a spinlock held and powerpc does not support Real-Time.
Thus, upon entering the lazy mode the preemption is already disabled.
Signed-off-by: Alexander Gordeev <agordeev@xxxxxxxxxxxxx>
---
arch/powerpc/include/asm/book3s/64/tlbflush-hash.h | 13 ++++++-------
1 file changed, 6 insertions(+), 7 deletions(-)
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
index 146287d9580f..aeac22b576c8 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
@@ -30,13 +30,9 @@ static inline void arch_enter_lazy_mmu_mode(void)
{
struct ppc64_tlb_batch *batch;
+ VM_WARN_ON_ONCE(preemptible());
if (radix_enabled())
return;
- /*
- * apply_to_page_range can call us this preempt enabled when
- * operating on kernel page tables.
- */
- preempt_disable();
batch = this_cpu_ptr(&ppc64_tlb_batch);
batch->active = 1;
}
@@ -45,6 +41,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
{
struct ppc64_tlb_batch *batch;
+ VM_WARN_ON_ONCE(preemptible());
if (radix_enabled())
return;
batch = this_cpu_ptr(&ppc64_tlb_batch);
@@ -52,10 +49,12 @@ static inline void arch_leave_lazy_mmu_mode(void)
if (batch->index)
__flush_tlb_pending(batch);
batch->active = 0;
- preempt_enable();
}
-#define arch_flush_lazy_mmu_mode() do {} while (0)
+static inline void arch_flush_lazy_mmu_mode(void)
+{
+ VM_WARN_ON_ONCE(preemptible());
+}
extern void hash__tlbiel_all(unsigned int action);
--
2.48.1