tracing/fastboot: use sched switch tracer from boot tracer
Impact: enhance boot trace output with scheduling events
Use the sched_switch tracer from the boot tracer.
We also can trace schedule events inside the initcalls.
Sched tracing is disabled after the initcall has finished and
then reenabled before the next one is started.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e4c40c8..50d7018 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3251,6 +3251,8 @@
register_tracer(&nop_trace);
#ifdef CONFIG_BOOT_TRACER
+ /* We don't want to launch sched_switch tracer yet */
+ global_trace.ctrl = 0;
register_tracer(&boot_tracer);
current_trace = &boot_tracer;
current_trace->init(&global_trace);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 8465ad0..9911277 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -49,6 +49,7 @@
unsigned long parent_ip;
};
extern struct tracer boot_tracer;
+extern struct tracer sched_switch_trace; /* Used by the boot tracer */
/*
* Context switch trace entry - which task (and prio) we switched from/to:
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index d104d5b4..6bbc879 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -27,10 +27,14 @@
void enable_boot_trace(void)
{
+ if (pre_initcalls_finished)
+ tracing_start_cmdline_record();
}
void disable_boot_trace(void)
{
+ if (pre_initcalls_finished)
+ tracing_stop_cmdline_record();
}
void reset_boot_trace(struct trace_array *tr)
@@ -45,6 +49,8 @@
for_each_cpu_mask(cpu, cpu_possible_map)
tracing_reset(tr, cpu);
+
+ sched_switch_trace.init(tr);
}
static void boot_trace_ctrl_update(struct trace_array *tr)
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 96620c7..9d7bdac 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -127,6 +127,7 @@
long ref;
mutex_lock(&tracepoint_mutex);
+ tracer_enabled = 1;
ref = atomic_inc_return(&sched_ref);
if (ref == 1)
tracing_sched_register();
@@ -138,6 +139,7 @@
long ref;
mutex_lock(&tracepoint_mutex);
+ tracer_enabled = 0;
ref = atomic_dec_and_test(&sched_ref);
if (ref)
tracing_sched_unregister();
@@ -158,12 +160,10 @@
{
sched_switch_reset(tr);
tracing_start_cmdline_record();
- tracer_enabled = 1;
}
static void stop_sched_trace(struct trace_array *tr)
{
- tracer_enabled = 0;
tracing_stop_cmdline_record();
}
@@ -190,7 +190,7 @@
stop_sched_trace(tr);
}
-static struct tracer sched_switch_trace __read_mostly =
+struct tracer sched_switch_trace __read_mostly =
{
.name = "sched_switch",
.init = sched_switch_trace_init,