sched debug: BKL usage statistics
add per task and per rq BKL usage statistics.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/kernel/sched.c b/kernel/sched.c
index f33608e..5004dff 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -356,6 +356,9 @@
/* try_to_wake_up() stats */
unsigned long ttwu_cnt;
unsigned long ttwu_local;
+
+ /* BKL stats */
+ unsigned long bkl_cnt;
#endif
struct lock_class_key rq_lock_key;
};
@@ -3414,6 +3417,12 @@
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
schedstat_inc(this_rq(), sched_cnt);
+#ifdef CONFIG_SCHEDSTATS
+ if (unlikely(prev->lock_depth >= 0)) {
+ schedstat_inc(this_rq(), bkl_cnt);
+ schedstat_inc(prev, sched_info.bkl_cnt);
+ }
+#endif
}
/*