[for-next][PATCH 06/30] function_graph: Remove the use of FTRACE_NOTRACE_DEPTH

From: Steven Rostedt
Date: Wed Dec 05 2018 - 18:50:46 EST


From: "Steven Rostedt (VMware)" <rostedt@xxxxxxxxxxx>

The curr_ret_stack is no longer set to a negative value when a function is
not to be traced by the function graph tracer. Remove the usage of
FTRACE_NOTRACE_DEPTH, as it is no longer needed.

Reviewed-by: Joel Fernandes (Google) <joel@xxxxxxxxxxxxxxxxx>
Signed-off-by: Steven Rostedt (VMware) <rostedt@xxxxxxxxxxx>
---
include/linux/ftrace.h | 1 -
kernel/trace/fgraph.c | 19 -------------------
kernel/trace/trace_functions_graph.c | 11 -----------
3 files changed, 31 deletions(-)

diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 10bd46434908..98625f10d982 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -790,7 +790,6 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
*/
#define __notrace_funcgraph notrace

-#define FTRACE_NOTRACE_DEPTH 65536
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index e852b69c0e64..de887a983ac7 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -112,16 +112,6 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,

index = current->curr_ret_stack;

- /*
- * A negative index here means that it's just returned from a
- * notrace'd function. Recover index to get an original
- * return address. See ftrace_push_return_trace().
- *
- * TODO: Need to check whether the stack gets corrupted.
- */
- if (index < 0)
- index += FTRACE_NOTRACE_DEPTH;
-
if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
ftrace_graph_stop();
WARN_ON(1);
@@ -190,15 +180,6 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
*/
barrier();
current->curr_ret_stack--;
- /*
- * The curr_ret_stack can be less than -1 only if it was
- * filtered out and it's about to return from the function.
- * Recover the index and continue to trace normal functions.
- */
- if (current->curr_ret_stack < -1) {
- current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
- return ret;
- }

if (unlikely(!ret)) {
ftrace_graph_stop();
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index ecf543df943b..eaf9b1629956 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -115,9 +115,6 @@ unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
if (ret != (unsigned long)return_to_handler)
return ret;

- if (index < -1)
- index += FTRACE_NOTRACE_DEPTH;
-
if (index < 0)
return ret;

@@ -675,10 +672,6 @@ print_graph_entry_leaf(struct trace_iterator *iter,

cpu_data = per_cpu_ptr(data->cpu_data, cpu);

- /* If a graph tracer ignored set_graph_notrace */
- if (call->depth < -1)
- call->depth += FTRACE_NOTRACE_DEPTH;
-
/*
* Comments display at + 1 to depth. Since
* this is a leaf function, keep the comments
@@ -721,10 +714,6 @@ print_graph_entry_nested(struct trace_iterator *iter,
struct fgraph_cpu_data *cpu_data;
int cpu = iter->cpu;

- /* If a graph tracer ignored set_graph_notrace */
- if (call->depth < -1)
- call->depth += FTRACE_NOTRACE_DEPTH;
-
cpu_data = per_cpu_ptr(data->cpu_data, cpu);
cpu_data->depth = call->depth;

--
2.19.1