1 #include <linux/percpu.h>
2 #include <linux/jump_label.h>
6 struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
8 void opal_tracepoint_regfunc(void)
10 static_key_slow_inc(&opal_tracepoint_key);
13 void opal_tracepoint_unregfunc(void)
15 static_key_slow_dec(&opal_tracepoint_key);
19 * We optimise OPAL calls by placing opal_tracepoint_refcount
20 * directly in the TOC so we can check if the opal tracepoints are
21 * enabled via a single load.
24 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
25 extern long opal_tracepoint_refcount;
27 void opal_tracepoint_regfunc(void)
29 opal_tracepoint_refcount++;
32 void opal_tracepoint_unregfunc(void)
34 opal_tracepoint_refcount--;
39 * Since the tracing code might execute OPAL calls we need to guard against
42 static DEFINE_PER_CPU(unsigned int, opal_trace_depth);
44 void __trace_opal_entry(unsigned long opcode, unsigned long *args)
49 local_irq_save(flags);
51 depth = this_cpu_ptr(&opal_trace_depth);
58 trace_opal_entry(opcode, args);
62 local_irq_restore(flags);
65 void __trace_opal_exit(long opcode, unsigned long retval)
70 local_irq_save(flags);
72 depth = this_cpu_ptr(&opal_trace_depth);
78 trace_opal_exit(opcode, retval);
83 local_irq_restore(flags);