[PATCH v2] x86/mm/tlb: Skip tracing when flush is not done

From: Nadav Amit
Date: Mon Jul 11 2022 - 20:36:16 EST


From: Nadav Amit <namit@xxxxxxxxxx>

Currently, if flush_tlb_func() does not flush for some reason, the
tracing of the flush will be done only in certain cases, depending on
the reason of the flush. Be consistent and just do not trace in all
cases when the flush was eventually not done.

Suggested-by: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Cc: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxx>
Signed-off-by: Nadav Amit <namit@xxxxxxxxxx>

---

v1->v2:
* Remove comment [Andy]
---
arch/x86/mm/tlb.c | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 0f346c51dd99..f012445f6d94 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -736,7 +736,7 @@ static void flush_tlb_func(void *info)
u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
bool local = smp_processor_id() == f->initiating_cpu;
- unsigned long nr_invalidate = 0;
+ unsigned long nr_invalidate;
u64 mm_tlb_gen;

/* This code cannot presently handle being reentered. */
@@ -795,7 +795,7 @@ static void flush_tlb_func(void *info)
* be handled can catch us all the way up, leaving no work for
* the second flush.
*/
- goto done;
+ return;
}

WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
@@ -870,8 +870,6 @@ static void flush_tlb_func(void *info)
/* Both paths above update our state to mm_tlb_gen. */
this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);

- /* Tracing is done in a unified manner to reduce the code size */
-done:
trace_tlb_flush(!local ? TLB_REMOTE_SHOOTDOWN :
(f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN :
TLB_LOCAL_MM_SHOOTDOWN,
--
2.25.1