@@ -47,7 +47,7 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
47
47
* not.
48
48
*/
49
49
#ifdef CONFIG_DYNAMIC_OBJECTS
50
- static struct k_spinlock lists_lock ; /* kobj rbtree/ dlist */
50
+ static struct k_spinlock lists_lock ; /* kobj dlist */
51
51
static struct k_spinlock objfree_lock ; /* k_object_free */
52
52
53
53
#ifdef CONFIG_GEN_PRIV_STACKS
@@ -167,8 +167,7 @@ uint8_t *z_priv_stack_find(k_thread_stack_t *stack)
167
167
168
168
struct dyn_obj_base {
169
169
struct z_object kobj ;
170
- sys_dnode_t dobj_list ;
171
- struct rbnode node ; /* must be immediately before data member */
170
+ sys_dnode_t dobj_list ; /* must be immediately before data member */
172
171
};
173
172
174
173
struct dyn_obj {
@@ -192,25 +191,14 @@ extern struct z_object *z_object_gperf_find(const void *obj);
192
191
extern void z_object_gperf_wordlist_foreach (_wordlist_cb_func_t func ,
193
192
void * context );
194
193
195
- static bool node_lessthan (struct rbnode * a , struct rbnode * b );
196
-
197
- /*
198
- * Red/black tree of allocated kernel objects, for reasonably fast lookups
199
- * based on object pointer values.
200
- */
201
- static struct rbtree obj_rb_tree = {
202
- .lessthan_fn = node_lessthan
203
- };
204
-
205
194
/*
206
195
* Linked list of allocated kernel objects, for iteration over all allocated
207
196
* objects (and potentially deleting them during iteration).
208
197
*/
209
198
static sys_dlist_t obj_list = SYS_DLIST_STATIC_INIT (& obj_list );
210
199
211
200
/*
212
- * TODO: Write some hash table code that will replace both obj_rb_tree
213
- * and obj_list.
201
+ * TODO: Write some hash table code that will replace obj_list.
214
202
*/
215
203
216
204
static size_t obj_size_get (enum k_objects otype )
@@ -250,15 +238,9 @@ static size_t obj_align_get(enum k_objects otype)
250
238
return ret ;
251
239
}
252
240
253
- static bool node_lessthan (struct rbnode * a , struct rbnode * b )
254
- {
255
- return a < b ;
256
- }
257
-
258
241
static struct dyn_obj_base * dyn_object_find (void * obj )
259
242
{
260
- struct rbnode * node ;
261
- struct dyn_obj_base * ret ;
243
+ struct dyn_obj_base * node ;
262
244
k_spinlock_key_t key ;
263
245
264
246
/* For any dynamically allocated kernel object, the object
@@ -268,20 +250,19 @@ static struct dyn_obj_base *dyn_object_find(void *obj)
268
250
*/
269
251
key = k_spin_lock (& lists_lock );
270
252
271
- RB_FOR_EACH (& obj_rb_tree , node ) {
272
- ret = CONTAINER_OF (node , struct dyn_obj_base , node );
273
- if (ret -> kobj .name == obj ) {
253
+ SYS_DLIST_FOR_EACH_CONTAINER (& obj_list , node , dobj_list ) {
254
+ if (node -> kobj .name == obj ) {
274
255
goto end ;
275
256
}
276
257
}
277
258
278
259
/* No object found */
279
- ret = NULL ;
260
+ node = NULL ;
280
261
281
262
end :
282
263
k_spin_unlock (& lists_lock , key );
283
264
284
- return ret ;
265
+ return node ;
285
266
}
286
267
287
268
/**
@@ -407,7 +388,6 @@ static struct z_object *dynamic_object_create(enum k_objects otype, size_t align
407
388
408
389
k_spinlock_key_t key = k_spin_lock (& lists_lock );
409
390
410
- rb_insert (& obj_rb_tree , & dyn -> node );
411
391
sys_dlist_append (& obj_list , & dyn -> dobj_list );
412
392
k_spin_unlock (& lists_lock , key );
413
393
@@ -502,7 +482,6 @@ void k_object_free(void *obj)
502
482
503
483
dyn = dyn_object_find (obj );
504
484
if (dyn != NULL ) {
505
- rb_remove (& obj_rb_tree , & dyn -> node );
506
485
sys_dlist_remove (& dyn -> dobj_list );
507
486
508
487
if (dyn -> kobj .type == K_OBJ_THREAD ) {
@@ -612,7 +591,6 @@ static void unref_check(struct z_object *ko, uintptr_t index)
612
591
break ;
613
592
}
614
593
615
- rb_remove (& obj_rb_tree , & dyn -> node );
616
594
sys_dlist_remove (& dyn -> dobj_list );
617
595
k_free (dyn );
618
596
out :
0 commit comments