[PATCH 17/19] perf sched timehist: Add support for context-switch event

From: David Ahern
Date: Wed Aug 07 2013 - 22:52:04 EST


Context switch events are 64 bytes; sched_switch events are 136 bytes.
Both indicate scheduling changes, so allow user to leverage the smaller
event. If both events exist in a data file, then context-switch event is
ignored.

Signed-off-by: David Ahern <dsahern@xxxxxxxxx>
Cc: Frederic Weisbecker <fweisbec@xxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Jiri Olsa <jolsa@xxxxxxxxxx>
Cc: Mike Galbraith <efault@xxxxxx>
Cc: Namhyung Kim <namhyung@xxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Stephane Eranian <eranian@xxxxxxxxxx>
---
tools/perf/builtin-sched.c | 67 +++++++++++++++++++++++++++++++++++---------
1 file changed, 54 insertions(+), 13 deletions(-)

diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index a45a40f..f5e98f1 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -177,6 +177,7 @@ struct perf_sched {
bool no_callchain;
unsigned int max_stack_depth;
bool show_cpu_visual;
+ bool skip_cs;
};

/* per thread run time data */
@@ -1674,8 +1675,17 @@ static int timehist_check_attr(struct perf_sched *sched,
{
struct perf_evsel *evsel;
struct evsel_runtime *er;
+ const char *evname;
+ bool have_cs_event = false, have_sched_event = false;

list_for_each_entry(evsel, &evlist->entries, node) {
+ evname = perf_evsel__name(evsel);
+ if (strcmp(evname, "cs") == 0 ||
+ strcmp(evname, "context-switch") == 0)
+ have_cs_event = true;
+ else if (strcmp(evname, "sched:sched_switch") == 0)
+ have_sched_event = true;
+
er = perf_evsel__get_runtime(evsel);
if (er == NULL) {
pr_err("Failed to allocate memory for evsel runtime data\n");
@@ -1689,6 +1699,11 @@ static int timehist_check_attr(struct perf_sched *sched,
}
}

+ if (have_cs_event && have_sched_event) {
+ pr_debug("Both schedule change events exist. Ignoring context-switch event\n");
+ sched->skip_cs = true;
+ }
+
return 0;
}

@@ -1926,6 +1941,30 @@ out:
return rc;
}

+static int timehist_cs_event(struct perf_tool *tool __maybe_unused,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine __maybe_unused)
+{
+ return timehist_sched_change_event(tool, evsel, sample, machine);
+}
+
+static void timehist_set_cs_handler(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+ const char *evname;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ evname = perf_evsel__name(evsel);
+ if (strcmp(evname, "cs") == 0 ||
+ strcmp(evname, "context-switch") == 0) {
+ evsel->handler.func = timehist_cs_event;
+ }
+ }
+
+ return;
+}
+
static int timehist_sched_switch_event(struct perf_tool *tool,
struct perf_evsel *evsel,
struct perf_sample *sample,
@@ -2132,24 +2171,26 @@ static int perf_sched__timehist(struct perf_sched *sched)
}

/* setup per-evsel handlers */
- if (perf_session__set_tracepoints_handlers(session, handlers))
- goto out;
+ if (!sched->skip_cs)
+ timehist_set_cs_handler(session->evlist);

- if (perf_session__has_traces(session, "record -R")) {
- if (sched->show_events)
- timehist_header(sched);
+ if (perf_session__has_traces(session, "sched record") &&
+ perf_session__set_tracepoints_handlers(session, handlers))
+ goto out;

- err = perf_session__process_events(session, &sched->tool);
- if (err) {
- pr_err("Failed to process events, error %d", err);
- goto out;
- }
+ if (sched->show_events)
+ timehist_header(sched);

- sched->nr_events = session->stats.nr_events[0];
- sched->nr_lost_events = session->stats.total_lost;
- sched->nr_lost_chunks = session->stats.nr_events[PERF_RECORD_LOST];
+ err = perf_session__process_events(session, &sched->tool);
+ if (err) {
+ pr_err("Failed to process events, error %d", err);
+ goto out;
}

+ sched->nr_events = session->stats.nr_events[0];
+ sched->nr_lost_events = session->stats.total_lost;
+ sched->nr_lost_chunks = session->stats.nr_events[PERF_RECORD_LOST];
+
timehist_print_summary(session);

out:
--
1.7.10.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/