tracing/bkl: Add bkl ftrace events
authorFrederic Weisbecker <fweisbec@gmail.com>
Fri, 31 Jul 2009 23:34:24 +0000 (01:34 +0200)
committerFrederic Weisbecker <fweisbec@gmail.com>
Thu, 24 Sep 2009 13:16:31 +0000 (15:16 +0200)
Add two events lock_kernel and unlock_kernel() to trace the bkl uses.
This opens the door for userspace tools to perform statistics about
the callsites that use it, dependencies with other locks (by pairing
the trace with lock events), use with recursivity and so on...

The {__reacquire,release}_kernel_lock() events are not traced because
these are called from schedule, thus the sched events are sufficient
to trace them.

Example of a trace:

hald-addon-stor-4152  [000]   165.875501: unlock_kernel: depth: 0, fs/block_dev.c:1358 __blkdev_put()
hald-addon-stor-4152  [000]   167.832974: lock_kernel: depth: 0, fs/block_dev.c:1167 __blkdev_get()

How to get the callsites that acquire it recursively:

cd /debug/tracing/events/bkl
echo "lock_depth > 0" > filter

firefox-4951  [001]   206.276967: unlock_kernel: depth: 1, fs/reiserfs/super.c:575 reiserfs_dirty_inode()

You can also filter by file and/or line.

v2: Use of FILTER_PTR_STRING attribute for files and lines fields to
    make them traceable.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Li Zefan <lizf@cn.fujitsu.com>
include/linux/smp_lock.h
include/trace/events/bkl.h [new file with mode: 0644]
lib/kernel_lock.c

index 813be59bf3458785c00b3042645adb8a68a6b558..d48cc77ba70db09707444d42ffa12affa691a99f 100644 (file)
@@ -3,6 +3,7 @@
 
 #ifdef CONFIG_LOCK_KERNEL
 #include <linux/sched.h>
+#include <trace/events/bkl.h>
 
 #define kernel_locked()                (current->lock_depth >= 0)
 
@@ -24,8 +25,18 @@ static inline int reacquire_kernel_lock(struct task_struct *task)
        return 0;
 }
 
-extern void __lockfunc lock_kernel(void)       __acquires(kernel_lock);
-extern void __lockfunc unlock_kernel(void)     __releases(kernel_lock);
+extern void __lockfunc _lock_kernel(void)      __acquires(kernel_lock);
+extern void __lockfunc _unlock_kernel(void)    __releases(kernel_lock);
+
+#define lock_kernel()  {                                       \
+       trace_lock_kernel(__func__, __FILE__, __LINE__);        \
+       _lock_kernel();                                         \
+}
+
+#define unlock_kernel()        {                                       \
+       trace_unlock_kernel(__func__, __FILE__, __LINE__);      \
+       _unlock_kernel();                                       \
+}
 
 /*
  * Various legacy drivers don't really need the BKL in a specific
@@ -41,8 +52,8 @@ static inline void cycle_kernel_lock(void)
 
 #else
 
-#define lock_kernel()                          do { } while(0)
-#define unlock_kernel()                                do { } while(0)
+#define lock_kernel()     trace_lock_kernel(__func__, __FILE__, __LINE__);
+#define unlock_kernel()    trace_unlock_kernel(__func__, __FILE__, __LINE__);
 #define release_kernel_lock(task)              do { } while(0)
 #define cycle_kernel_lock()                    do { } while(0)
 #define reacquire_kernel_lock(task)            0
diff --git a/include/trace/events/bkl.h b/include/trace/events/bkl.h
new file mode 100644 (file)
index 0000000..8abd620
--- /dev/null
@@ -0,0 +1,61 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM bkl
+
+#if !defined(_TRACE_BKL_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_BKL_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(lock_kernel,
+
+       TP_PROTO(const char *func, const char *file, int line),
+
+       TP_ARGS(func, file, line),
+
+       TP_STRUCT__entry(
+               __field(        int,            lock_depth              )
+               __field_ext(    const char *,   func, FILTER_PTR_STRING )
+               __field_ext(    const char *,   file, FILTER_PTR_STRING )
+               __field(        int,            line                    )
+       ),
+
+       TP_fast_assign(
+               /* We want to record the lock_depth after lock is acquired */
+               __entry->lock_depth = current->lock_depth + 1;
+               __entry->func = func;
+               __entry->file = file;
+               __entry->line = line;
+       ),
+
+       TP_printk("depth: %d, %s:%d %s()", __entry->lock_depth,
+                 __entry->file, __entry->line, __entry->func)
+);
+
+TRACE_EVENT(unlock_kernel,
+
+       TP_PROTO(const char *func, const char *file, int line),
+
+       TP_ARGS(func, file, line),
+
+       TP_STRUCT__entry(
+               __field(int,            lock_depth)
+               __field(const char *,   func)
+               __field(const char *,   file)
+               __field(int,            line)
+       ),
+
+       TP_fast_assign(
+               __entry->lock_depth = current->lock_depth;
+               __entry->func = func;
+               __entry->file = file;
+               __entry->line = line;
+       ),
+
+       TP_printk("depth: %d, %s:%d %s()", __entry->lock_depth,
+                 __entry->file, __entry->line, __entry->func)
+);
+
+#endif /* _TRACE_BKL_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 39f1029e352586ca4640d3d0e6b1b4b28f120312..5c10b2e1fd0888826d52515da4b01ad966ed3615 100644 (file)
@@ -5,10 +5,11 @@
  * relegated to obsolescence, but used by various less
  * important (or lazy) subsystems.
  */
-#include <linux/smp_lock.h>
 #include <linux/module.h>
 #include <linux/kallsyms.h>
 #include <linux/semaphore.h>
+#define CREATE_TRACE_POINTS
+#include <linux/smp_lock.h>
 
 /*
  * The 'big kernel lock'
@@ -113,7 +114,7 @@ static inline void __unlock_kernel(void)
  * This cannot happen asynchronously, so we only need to
  * worry about other CPU's.
  */
-void __lockfunc lock_kernel(void)
+void __lockfunc _lock_kernel(void)
 {
        int depth = current->lock_depth+1;
        if (likely(!depth))
@@ -121,13 +122,13 @@ void __lockfunc lock_kernel(void)
        current->lock_depth = depth;
 }
 
-void __lockfunc unlock_kernel(void)
+void __lockfunc _unlock_kernel(void)
 {
        BUG_ON(current->lock_depth < 0);
        if (likely(--current->lock_depth < 0))
                __unlock_kernel();
 }
 
-EXPORT_SYMBOL(lock_kernel);
-EXPORT_SYMBOL(unlock_kernel);
+EXPORT_SYMBOL(_lock_kernel);
+EXPORT_SYMBOL(_unlock_kernel);