The gstage_set_pte() and gstage_op_pte() should flush TLB only when
a leaf PTE changes so that unnecessary TLB flushes can be avoided.
Signed-off-by: Anup Patel <apatel@xxxxxxxxxxxxxxxx>
---
arch/riscv/kvm/mmu.c | 14 +++++++++-----
1 file changed, 9 insertions(+), 5 deletions(-)
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index 1087ea74567b..29f1bd853a66 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -167,9 +167,11 @@ static int gstage_set_pte(struct kvm *kvm, u32 level,
ptep = &next_ptep[gstage_pte_index(addr, current_level)];
}
- set_pte(ptep, *new_pte);
- if (gstage_pte_leaf(ptep))
- gstage_remote_tlb_flush(kvm, current_level, addr);
+ if (pte_val(*ptep) != pte_val(*new_pte)) {
+ set_pte(ptep, *new_pte);
+ if (gstage_pte_leaf(ptep))
+ gstage_remote_tlb_flush(kvm, current_level, addr);
+ }
return 0;
}
@@ -229,7 +231,7 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
pte_t *ptep, u32 ptep_level, enum gstage_op op)
{
int i, ret;
- pte_t *next_ptep;
+ pte_t old_pte, *next_ptep;
u32 next_ptep_level;
unsigned long next_page_size, page_size;
@@ -258,11 +260,13 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
if (op == GSTAGE_OP_CLEAR)
put_page(virt_to_page(next_ptep));
} else {
+ old_pte = *ptep;
if (op == GSTAGE_OP_CLEAR)
set_pte(ptep, __pte(0));
else if (op == GSTAGE_OP_WP)
set_pte(ptep, __pte(pte_val(ptep_get(ptep)) & ~_PAGE_WRITE));
- gstage_remote_tlb_flush(kvm, ptep_level, addr);
+ if (pte_val(*ptep) != pte_val(old_pte))
+ gstage_remote_tlb_flush(kvm, ptep_level, addr);
}
}