[PATCH v3 27/39] x86/ibt,ftrace: Annotate ftrace code patching

From: Peter Zijlstra
Date: Thu Mar 03 2022 - 06:33:51 EST


These are code patching sites, not indirect targets.

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
arch/x86/kernel/ftrace_64.S | 7 +++++++
1 file changed, 7 insertions(+)

--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -145,6 +145,7 @@ SYM_FUNC_START(ftrace_caller)
movq %rcx, RSP(%rsp)

SYM_INNER_LABEL(ftrace_caller_op_ptr, SYM_L_GLOBAL)
+ ANNOTATE_NOENDBR
/* Load the ftrace_ops into the 3rd parameter */
movq function_trace_op(%rip), %rdx

@@ -155,6 +156,7 @@ SYM_INNER_LABEL(ftrace_caller_op_ptr, SY
movq $0, CS(%rsp)

SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
+ ANNOTATE_NOENDBR
call ftrace_stub

/* Handlers can change the RIP */
@@ -169,6 +171,7 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBA
* layout here.
*/
SYM_INNER_LABEL(ftrace_caller_end, SYM_L_GLOBAL)
+ ANNOTATE_NOENDBR

jmp ftrace_epilogue
SYM_FUNC_END(ftrace_caller);
@@ -192,6 +195,7 @@ SYM_FUNC_START(ftrace_regs_caller)
/* save_mcount_regs fills in first two parameters */

SYM_INNER_LABEL(ftrace_regs_caller_op_ptr, SYM_L_GLOBAL)
+ ANNOTATE_NOENDBR
/* Load the ftrace_ops into the 3rd parameter */
movq function_trace_op(%rip), %rdx

@@ -221,6 +225,7 @@ SYM_INNER_LABEL(ftrace_regs_caller_op_pt
leaq (%rsp), %rcx

SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
+ ANNOTATE_NOENDBR
call ftrace_stub

/* Copy flags back to SS, to restore them */
@@ -248,6 +253,7 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_
*/
testq %rax, %rax
SYM_INNER_LABEL(ftrace_regs_caller_jmp, SYM_L_GLOBAL)
+ ANNOTATE_NOENDBR
jnz 1f

restore_mcount_regs
@@ -261,6 +267,7 @@ SYM_INNER_LABEL(ftrace_regs_caller_jmp,
* to the return.
*/
SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
+ ANNOTATE_NOENDBR
jmp ftrace_epilogue

/* Swap the flags with orig_rax */