@@ -76,6 +76,44 @@ void rt_thread_resume_sethook(void (*hook)(rt_thread_t thread))
76
76
RT_OBJECT_HOOKLIST_DEFINE (rt_thread_inited );
77
77
#endif /* defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR) */
78
78
79
+ #ifdef RT_USING_MUTEX
80
+ static void _thread_detach_from_mutex (rt_thread_t thread )
81
+ {
82
+ rt_list_t * node ;
83
+ rt_list_t * tmp_list ;
84
+ struct rt_mutex * mutex ;
85
+ rt_base_t level ;
86
+
87
+ level = rt_spin_lock_irqsave (& thread -> spinlock );
88
+
89
+ /* check if thread is waiting on a mutex */
90
+ if ((thread -> pending_object ) &&
91
+ (rt_object_get_type (thread -> pending_object ) == RT_Object_Class_Mutex ))
92
+ {
93
+ /* remove it from its waiting list */
94
+ struct rt_mutex * mutex = (struct rt_mutex * )thread -> pending_object ;
95
+ rt_mutex_drop_thread (mutex , thread );
96
+ thread -> pending_object = RT_NULL ;
97
+ }
98
+
99
+ /* free taken mutex after detaching from waiting, so we don't lost mutex just got */
100
+ rt_list_for_each_safe (node , tmp_list , & (thread -> taken_object_list ))
101
+ {
102
+ mutex = rt_list_entry (node , struct rt_mutex , taken_list );
103
+ LOG_D ("Thread [%s] exits while holding mutex [%s].\n" , thread -> parent .name , mutex -> parent .parent .name );
104
+ /* recursively take */
105
+ mutex -> hold = 1 ;
106
+ rt_mutex_release (mutex );
107
+ }
108
+
109
+ rt_spin_unlock_irqrestore (& thread -> spinlock , level );
110
+ }
111
+
112
+ #else
113
+
114
+ static void _thread_detach_from_mutex (rt_thread_t thread ) {}
115
+ #endif
116
+
79
117
static void _thread_exit (void )
80
118
{
81
119
struct rt_thread * thread ;
@@ -88,6 +126,8 @@ static void _thread_exit(void)
88
126
89
127
rt_thread_close (thread );
90
128
129
+ _thread_detach_from_mutex (thread );
130
+
91
131
/* insert to defunct thread list */
92
132
rt_thread_defunct_enqueue (thread );
93
133
@@ -133,41 +173,6 @@ static void _thread_timeout(void *parameter)
133
173
rt_sched_unlock_n_resched (slvl );
134
174
}
135
175
136
- #ifdef RT_USING_MUTEX
137
- static void _thread_detach_from_mutex (rt_thread_t thread )
138
- {
139
- rt_list_t * node ;
140
- rt_list_t * tmp_list ;
141
- struct rt_mutex * mutex ;
142
- rt_base_t level ;
143
-
144
- level = rt_spin_lock_irqsave (& thread -> spinlock );
145
-
146
- /* check if thread is waiting on a mutex */
147
- if ((thread -> pending_object ) &&
148
- (rt_object_get_type (thread -> pending_object ) == RT_Object_Class_Mutex ))
149
- {
150
- /* remove it from its waiting list */
151
- struct rt_mutex * mutex = (struct rt_mutex * )thread -> pending_object ;
152
- rt_mutex_drop_thread (mutex , thread );
153
- thread -> pending_object = RT_NULL ;
154
- }
155
-
156
- /* free taken mutex after detaching from waiting, so we don't lost mutex just got */
157
- rt_list_for_each_safe (node , tmp_list , & (thread -> taken_object_list ))
158
- {
159
- mutex = rt_list_entry (node , struct rt_mutex , taken_list );
160
- rt_mutex_release (mutex );
161
- }
162
-
163
- rt_spin_unlock_irqrestore (& thread -> spinlock , level );
164
- }
165
-
166
- #else
167
-
168
- static void _thread_detach_from_mutex (rt_thread_t thread ) {}
169
- #endif
170
-
171
176
static rt_err_t _thread_init (struct rt_thread * thread ,
172
177
const char * name ,
173
178
void (* entry )(void * parameter ),
0 commit comments