Skip to content

Commit 5f151b2

Browse files
committed
ftrace: Fix function_profiler and function tracer together
The latest rewrite of ftrace removed the separate ftrace_ops of the function tracer and the function graph tracer and had them share the same ftrace_ops. This simplified the accounting by removing the multiple layers of functions called, where the global_ops func would call a special list that would iterate over the other ops that were registered within it (like function and function graph), which itself was registered to the ftrace ops list of all functions currently active. If that sounds confusing, the code that implemented it was also confusing and its removal is a good thing. The problem with this change was that it assumed that the function and function graph tracer can never be used at the same time. This is mostly true, but there is an exception. That is when the function profiler uses the function graph tracer to profile. The function profiler can be activated the same time as the function tracer, and this breaks the assumption and the result is that ftrace will crash (it detects the error and shuts itself down, it does not cause a kernel oops). To solve this issue, a previous change allowed the hash tables for the functions traced by a ftrace_ops to be a pointer and let multiple ftrace_ops share the same hash. This allows the function and function_graph tracer to have separate ftrace_ops, but still share the hash, which is what is done. Now the function and function graph tracers have separate ftrace_ops again, and the function tracer can be run while the function_profile is active. Cc: [email protected] # 3.16 (apply after 3.17-rc4 is out) Signed-off-by: Steven Rostedt <[email protected]>
1 parent bce0b6c commit 5f151b2

File tree

1 file changed

+38
-22
lines changed

1 file changed

+38
-22
lines changed

kernel/trace/ftrace.c

Lines changed: 38 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,12 @@
6868
#define INIT_OPS_HASH(opsname) \
6969
.func_hash = &opsname.local_hash, \
7070
.local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
71+
#define ASSIGN_OPS_HASH(opsname, val) \
72+
.func_hash = val, \
73+
.local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
7174
#else
7275
#define INIT_OPS_HASH(opsname)
76+
#define ASSIGN_OPS_HASH(opsname, val)
7377
#endif
7478

7579
static struct ftrace_ops ftrace_list_end __read_mostly = {
@@ -4663,7 +4667,6 @@ void __init ftrace_init(void)
46634667
static struct ftrace_ops global_ops = {
46644668
.func = ftrace_stub,
46654669
.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4666-
INIT_OPS_HASH(global_ops)
46674670
};
46684671

46694672
static int __init ftrace_nodyn_init(void)
@@ -5197,6 +5200,17 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
51975200

51985201
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
51995202

5203+
static struct ftrace_ops graph_ops = {
5204+
.func = ftrace_stub,
5205+
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
5206+
FTRACE_OPS_FL_INITIALIZED |
5207+
FTRACE_OPS_FL_STUB,
5208+
#ifdef FTRACE_GRAPH_TRAMP_ADDR
5209+
.trampoline = FTRACE_GRAPH_TRAMP_ADDR,
5210+
#endif
5211+
ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
5212+
};
5213+
52005214
static int ftrace_graph_active;
52015215

52025216
int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
@@ -5359,12 +5373,28 @@ static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
53595373
*/
53605374
static void update_function_graph_func(void)
53615375
{
5362-
if (ftrace_ops_list == &ftrace_list_end ||
5363-
(ftrace_ops_list == &global_ops &&
5364-
global_ops.next == &ftrace_list_end))
5365-
ftrace_graph_entry = __ftrace_graph_entry;
5366-
else
5376+
struct ftrace_ops *op;
5377+
bool do_test = false;
5378+
5379+
/*
5380+
* The graph and global ops share the same set of functions
5381+
* to test. If any other ops is on the list, then
5382+
* the graph tracing needs to test if its the function
5383+
* it should call.
5384+
*/
5385+
do_for_each_ftrace_op(op, ftrace_ops_list) {
5386+
if (op != &global_ops && op != &graph_ops &&
5387+
op != &ftrace_list_end) {
5388+
do_test = true;
5389+
/* in double loop, break out with goto */
5390+
goto out;
5391+
}
5392+
} while_for_each_ftrace_op(op);
5393+
out:
5394+
if (do_test)
53675395
ftrace_graph_entry = ftrace_graph_entry_test;
5396+
else
5397+
ftrace_graph_entry = __ftrace_graph_entry;
53685398
}
53695399

53705400
static struct notifier_block ftrace_suspend_notifier = {
@@ -5405,16 +5435,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
54055435
ftrace_graph_entry = ftrace_graph_entry_test;
54065436
update_function_graph_func();
54075437

5408-
/* Function graph doesn't use the .func field of global_ops */
5409-
global_ops.flags |= FTRACE_OPS_FL_STUB;
5410-
5411-
#ifdef CONFIG_DYNAMIC_FTRACE
5412-
/* Optimize function graph calling (if implemented by arch) */
5413-
if (FTRACE_GRAPH_TRAMP_ADDR != 0)
5414-
global_ops.trampoline = FTRACE_GRAPH_TRAMP_ADDR;
5415-
#endif
5416-
5417-
ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
5438+
ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
54185439

54195440
out:
54205441
mutex_unlock(&ftrace_lock);
@@ -5432,12 +5453,7 @@ void unregister_ftrace_graph(void)
54325453
ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
54335454
ftrace_graph_entry = ftrace_graph_entry_stub;
54345455
__ftrace_graph_entry = ftrace_graph_entry_stub;
5435-
ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
5436-
global_ops.flags &= ~FTRACE_OPS_FL_STUB;
5437-
#ifdef CONFIG_DYNAMIC_FTRACE
5438-
if (FTRACE_GRAPH_TRAMP_ADDR != 0)
5439-
global_ops.trampoline = 0;
5440-
#endif
5456+
ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
54415457
unregister_pm_notifier(&ftrace_suspend_notifier);
54425458
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
54435459

0 commit comments

Comments
 (0)