8
8
#include <linux/efi.h>
9
9
#include <linux/export.h>
10
10
#include <linux/ftrace.h>
11
+ #include <linux/kprobes.h>
11
12
#include <linux/sched.h>
12
13
#include <linux/sched/debug.h>
13
14
#include <linux/sched/task_stack.h>
18
19
#include <asm/stack_pointer.h>
19
20
#include <asm/stacktrace.h>
20
21
22
+ /*
23
+ * Kernel unwind state
24
+ *
25
+ * @common: Common unwind state.
26
+ * @task: The task being unwound.
27
+ * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
28
+ * associated with the most recently encountered replacement lr
29
+ * value.
30
+ */
31
+ struct kunwind_state {
32
+ struct unwind_state common ;
33
+ struct task_struct * task ;
34
+ #ifdef CONFIG_KRETPROBES
35
+ struct llist_node * kr_cur ;
36
+ #endif
37
+ };
38
+
39
+ static __always_inline void
40
+ kunwind_init (struct kunwind_state * state ,
41
+ struct task_struct * task )
42
+ {
43
+ unwind_init_common (& state -> common );
44
+ state -> task = task ;
45
+ }
46
+
21
47
/*
22
48
* Start an unwind from a pt_regs.
23
49
*
26
52
* The regs must be on a stack currently owned by the calling task.
27
53
*/
28
54
static __always_inline void
29
- unwind_init_from_regs (struct unwind_state * state ,
30
- struct pt_regs * regs )
55
+ kunwind_init_from_regs (struct kunwind_state * state ,
56
+ struct pt_regs * regs )
31
57
{
32
- unwind_init_common (state , current );
58
+ kunwind_init (state , current );
33
59
34
- state -> fp = regs -> regs [29 ];
35
- state -> pc = regs -> pc ;
60
+ state -> common . fp = regs -> regs [29 ];
61
+ state -> common . pc = regs -> pc ;
36
62
}
37
63
38
64
/*
@@ -44,12 +70,12 @@ unwind_init_from_regs(struct unwind_state *state,
44
70
* The function which invokes this must be noinline.
45
71
*/
46
72
static __always_inline void
47
- unwind_init_from_caller (struct unwind_state * state )
73
+ kunwind_init_from_caller (struct kunwind_state * state )
48
74
{
49
- unwind_init_common (state , current );
75
+ kunwind_init (state , current );
50
76
51
- state -> fp = (unsigned long )__builtin_frame_address (1 );
52
- state -> pc = (unsigned long )__builtin_return_address (0 );
77
+ state -> common . fp = (unsigned long )__builtin_frame_address (1 );
78
+ state -> common . pc = (unsigned long )__builtin_return_address (0 );
53
79
}
54
80
55
81
/*
@@ -63,35 +89,38 @@ unwind_init_from_caller(struct unwind_state *state)
63
89
* call this for the current task.
64
90
*/
65
91
static __always_inline void
66
- unwind_init_from_task (struct unwind_state * state ,
67
- struct task_struct * task )
92
+ kunwind_init_from_task (struct kunwind_state * state ,
93
+ struct task_struct * task )
68
94
{
69
- unwind_init_common (state , task );
95
+ kunwind_init (state , task );
70
96
71
- state -> fp = thread_saved_fp (task );
72
- state -> pc = thread_saved_pc (task );
97
+ state -> common . fp = thread_saved_fp (task );
98
+ state -> common . pc = thread_saved_pc (task );
73
99
}
74
100
75
101
static __always_inline int
76
- unwind_recover_return_address (struct unwind_state * state )
102
+ kunwind_recover_return_address (struct kunwind_state * state )
77
103
{
78
104
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
79
105
if (state -> task -> ret_stack &&
80
- (state -> pc == (unsigned long )return_to_handler )) {
106
+ (state -> common . pc == (unsigned long )return_to_handler )) {
81
107
unsigned long orig_pc ;
82
- orig_pc = ftrace_graph_ret_addr (state -> task , NULL , state -> pc ,
83
- (void * )state -> fp );
84
- if (WARN_ON_ONCE (state -> pc == orig_pc ))
108
+ orig_pc = ftrace_graph_ret_addr (state -> task , NULL ,
109
+ state -> common .pc ,
110
+ (void * )state -> common .fp );
111
+ if (WARN_ON_ONCE (state -> common .pc == orig_pc ))
85
112
return - EINVAL ;
86
- state -> pc = orig_pc ;
113
+ state -> common . pc = orig_pc ;
87
114
}
88
115
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
89
116
90
117
#ifdef CONFIG_KRETPROBES
91
- if (is_kretprobe_trampoline (state -> pc )) {
92
- state -> pc = kretprobe_find_ret_addr (state -> task ,
93
- (void * )state -> fp ,
94
- & state -> kr_cur );
118
+ if (is_kretprobe_trampoline (state -> common .pc )) {
119
+ unsigned long orig_pc ;
120
+ orig_pc = kretprobe_find_ret_addr (state -> task ,
121
+ (void * )state -> common .fp ,
122
+ & state -> kr_cur );
123
+ state -> common .pc = orig_pc ;
95
124
}
96
125
#endif /* CONFIG_KRETPROBES */
97
126
@@ -106,38 +135,38 @@ unwind_recover_return_address(struct unwind_state *state)
106
135
* and the location (but not the fp value) of B.
107
136
*/
108
137
static __always_inline int
109
- unwind_next (struct unwind_state * state )
138
+ kunwind_next (struct kunwind_state * state )
110
139
{
111
140
struct task_struct * tsk = state -> task ;
112
- unsigned long fp = state -> fp ;
141
+ unsigned long fp = state -> common . fp ;
113
142
int err ;
114
143
115
144
/* Final frame; nothing to unwind */
116
145
if (fp == (unsigned long )task_pt_regs (tsk )-> stackframe )
117
146
return - ENOENT ;
118
147
119
- err = unwind_next_frame_record (state );
148
+ err = unwind_next_frame_record (& state -> common );
120
149
if (err )
121
150
return err ;
122
151
123
- state -> pc = ptrauth_strip_kernel_insn_pac (state -> pc );
152
+ state -> common . pc = ptrauth_strip_kernel_insn_pac (state -> common . pc );
124
153
125
- return unwind_recover_return_address (state );
154
+ return kunwind_recover_return_address (state );
126
155
}
127
156
128
157
static __always_inline void
129
- unwind (struct unwind_state * state , stack_trace_consume_fn consume_entry ,
130
- void * cookie )
158
+ do_kunwind (struct kunwind_state * state , stack_trace_consume_fn consume_entry ,
159
+ void * cookie )
131
160
{
132
- if (unwind_recover_return_address (state ))
161
+ if (kunwind_recover_return_address (state ))
133
162
return ;
134
163
135
164
while (1 ) {
136
165
int ret ;
137
166
138
- if (!consume_entry (cookie , state -> pc ))
167
+ if (!consume_entry (cookie , state -> common . pc ))
139
168
break ;
140
- ret = unwind_next (state );
169
+ ret = kunwind_next (state );
141
170
if (ret < 0 )
142
171
break ;
143
172
}
@@ -190,22 +219,24 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
190
219
STACKINFO_EFI ,
191
220
#endif
192
221
};
193
- struct unwind_state state = {
194
- .stacks = stacks ,
195
- .nr_stacks = ARRAY_SIZE (stacks ),
222
+ struct kunwind_state state = {
223
+ .common = {
224
+ .stacks = stacks ,
225
+ .nr_stacks = ARRAY_SIZE (stacks ),
226
+ },
196
227
};
197
228
198
229
if (regs ) {
199
230
if (task != current )
200
231
return ;
201
- unwind_init_from_regs (& state , regs );
232
+ kunwind_init_from_regs (& state , regs );
202
233
} else if (task == current ) {
203
- unwind_init_from_caller (& state );
234
+ kunwind_init_from_caller (& state );
204
235
} else {
205
- unwind_init_from_task (& state , task );
236
+ kunwind_init_from_task (& state , task );
206
237
}
207
238
208
- unwind (& state , consume_entry , cookie );
239
+ do_kunwind (& state , consume_entry , cookie );
209
240
}
210
241
211
242
static bool dump_backtrace_entry (void * arg , unsigned long where )
0 commit comments