Skip to content

Commit db5b0fd

Browse files
committed
[examples][testcases] Add slab memory management algorithm test case
1 parent e0b0ec1 commit db5b0fd

File tree

3 files changed

+337
-0
lines changed

3 files changed

+337
-0
lines changed

examples/utest/testcases/kernel/Kconfig

+5
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,11 @@ config UTEST_SMALL_MEM_TC
1010
default y
1111
depends on RT_USING_SMALL_MEM
1212

13+
config UTEST_SLAB_TC
14+
bool "slab test"
15+
default n
16+
depends on RT_USING_SLAB
17+
1318
config UTEST_IRQ_TC
1419
bool "IRQ test"
1520
default n

examples/utest/testcases/kernel/SConscript

+3
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,9 @@ if GetDepend(['UTEST_MEMHEAP_TC']):
1111
if GetDepend(['UTEST_SMALL_MEM_TC']):
1212
src += ['mem_tc.c']
1313

14+
if GetDepend(['UTEST_SLAB_TC']):
15+
src += ['slab_tc.c']
16+
1417
if GetDepend(['UTEST_IRQ_TC']):
1518
src += ['irq_tc.c']
1619

+329
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,329 @@
1+
/*
2+
* Copyright (c) 2006-2019, RT-Thread Development Team
3+
*
4+
* SPDX-License-Identifier: Apache-2.0
5+
*
6+
* Change Logs:
7+
* Date Author Notes
8+
* 2021-10-14 tyx the first version
9+
*/
10+
11+
#include <rtthread.h>
12+
#include <stdlib.h>
13+
#include "utest.h"
14+
15+
#define TEST_SLAB_SIZE 1024 * 1024
16+
17+
static int _mem_cmp(void *ptr, rt_uint8_t v, rt_size_t size)
18+
{
19+
while (size-- != 0)
20+
{
21+
if (*(rt_uint8_t *)ptr != v)
22+
return *(rt_uint8_t *)ptr - v;
23+
}
24+
return 0;
25+
}
26+
27+
struct slab_alloc_context
28+
{
29+
rt_list_t node;
30+
rt_size_t size;
31+
rt_uint8_t magic;
32+
};
33+
34+
struct slab_alloc_head
35+
{
36+
rt_list_t list;
37+
rt_size_t count;
38+
rt_tick_t start;
39+
rt_tick_t end;
40+
rt_tick_t interval;
41+
};
42+
43+
#define SLAB_RANG_ALLOC_BLK_MIN 2
44+
#define SLAB_RANG_ALLOC_BLK_MAX 5
45+
#define SLAB_RANG_ALLOC_TEST_TIME 10
46+
47+
static void slab_alloc_test(void)
48+
{
49+
struct slab_alloc_head head;
50+
rt_uint8_t *begin, *buf;
51+
struct rt_slab *heap;
52+
rt_size_t buf_size, size;
53+
struct slab_alloc_context *ctx;
54+
55+
/* init */
56+
rt_list_init(&head.list);
57+
head.count = 0;
58+
head.start = rt_tick_get();
59+
head.end = rt_tick_get() + rt_tick_from_millisecond(SLAB_RANG_ALLOC_TEST_TIME * 1000);
60+
head.interval = (head.end - head.start) / 20;
61+
buf = rt_malloc(TEST_SLAB_SIZE);
62+
uassert_not_null(buf);
63+
uassert_int_equal(RT_ALIGN((rt_ubase_t)buf, RT_ALIGN_SIZE), (rt_ubase_t)buf);
64+
heap = (struct rt_slab *)buf;
65+
begin = (rt_uint8_t *)&heap[1];
66+
buf_size = buf + TEST_SLAB_SIZE - begin;
67+
rt_memset(buf, 0xAA, TEST_SLAB_SIZE);
68+
rt_slab_init(heap, "slab_tc", begin, buf_size);
69+
// test run
70+
while (head.end - head.start < RT_TICK_MAX / 2)
71+
{
72+
if (rt_tick_get() - head.start >= head.interval)
73+
{
74+
head.start = rt_tick_get();
75+
rt_kprintf("#");
76+
}
77+
// %60 probability to perform alloc operation
78+
if (rand() % 10 >= 4)
79+
{
80+
size = rand() % SLAB_RANG_ALLOC_BLK_MAX + SLAB_RANG_ALLOC_BLK_MIN;
81+
size *= sizeof(struct slab_alloc_context);
82+
ctx = rt_slab_alloc(heap, size);
83+
if (ctx == RT_NULL)
84+
{
85+
if (head.count == 0)
86+
{
87+
break;
88+
}
89+
size = head.count / 2;
90+
while (size != head.count)
91+
{
92+
ctx = rt_list_first_entry(&head.list, struct slab_alloc_context, node);
93+
rt_list_remove(&ctx->node);
94+
if (ctx->size > sizeof(*ctx))
95+
{
96+
if (_mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx)) != 0)
97+
{
98+
uassert_true(0);
99+
}
100+
}
101+
rt_memset(ctx, 0xAA, ctx->size);
102+
rt_slab_free(heap, ctx);
103+
head.count --;
104+
}
105+
continue;
106+
}
107+
//if (RT_ALIGN((rt_ubase_t)ctx, RT_ALIGN_SIZE) != (rt_ubase_t)ctx)
108+
//{
109+
// uassert_int_equal(RT_ALIGN((rt_ubase_t)ctx, RT_ALIGN_SIZE), (rt_ubase_t)ctx);
110+
//}
111+
rt_memset(ctx, 0, size);
112+
rt_list_init(&ctx->node);
113+
ctx->size = size;
114+
ctx->magic = rand() & 0xff;
115+
if (ctx->size > sizeof(*ctx))
116+
{
117+
rt_memset(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
118+
}
119+
rt_list_insert_after(&head.list, &ctx->node);
120+
head.count += 1;
121+
}
122+
else
123+
{
124+
if (!rt_list_isempty(&head.list))
125+
{
126+
ctx = rt_list_first_entry(&head.list, struct slab_alloc_context, node);
127+
rt_list_remove(&ctx->node);
128+
if (ctx->size > sizeof(*ctx))
129+
{
130+
if (_mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx)) != 0)
131+
{
132+
uassert_true(0);
133+
}
134+
}
135+
rt_memset(ctx, 0xAA, ctx->size);
136+
rt_slab_free(heap, ctx);
137+
head.count --;
138+
}
139+
}
140+
}
141+
while (!rt_list_isempty(&head.list))
142+
{
143+
ctx = rt_list_first_entry(&head.list, struct slab_alloc_context, node);
144+
rt_list_remove(&ctx->node);
145+
if (ctx->size > sizeof(*ctx))
146+
{
147+
if (_mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx)) != 0)
148+
{
149+
uassert_true(0);
150+
}
151+
}
152+
rt_memset(ctx, 0xAA, ctx->size);
153+
rt_slab_free(heap, ctx);
154+
head.count --;
155+
}
156+
uassert_int_equal(head.count, 0);
157+
// slab heap deinit
158+
rt_slab_detach(heap);
159+
/* release test resources */
160+
rt_free(buf);
161+
}
162+
163+
#define SLAB_RANG_REALLOC_BLK_MIN 0
164+
#define SLAB_RANG_REALLOC_BLK_MAX 5
165+
#define SLAB_RANG_REALLOC_TEST_TIME 10
166+
167+
struct slab_realloc_context
168+
{
169+
rt_size_t size;
170+
rt_uint8_t magic;
171+
};
172+
173+
struct slab_realloc_head
174+
{
175+
struct slab_realloc_context **ctx_tab;
176+
rt_size_t count;
177+
rt_tick_t start;
178+
rt_tick_t end;
179+
rt_tick_t interval;
180+
};
181+
182+
static void slab_realloc_test(void)
183+
{
184+
struct slab_realloc_head head;
185+
rt_uint8_t *begin, *buf;
186+
struct rt_slab *heap;
187+
rt_size_t buf_size, size, idx;
188+
struct slab_realloc_context *ctx;
189+
int res;
190+
191+
size = RT_ALIGN(sizeof(struct slab_realloc_context), RT_ALIGN_SIZE) + RT_ALIGN_SIZE;
192+
size = TEST_SLAB_SIZE / size;
193+
/* init */
194+
head.ctx_tab = RT_NULL;
195+
head.count = size;
196+
head.start = rt_tick_get();
197+
head.end = rt_tick_get() + rt_tick_from_millisecond(SLAB_RANG_ALLOC_TEST_TIME * 1000);
198+
head.interval = (head.end - head.start) / 20;
199+
buf = rt_malloc(TEST_SLAB_SIZE);
200+
uassert_not_null(buf);
201+
uassert_int_equal(RT_ALIGN((rt_ubase_t)buf, RT_ALIGN_SIZE), (rt_ubase_t)buf);
202+
heap = (struct rt_slab *)buf;
203+
begin = (rt_uint8_t *)&heap[1];
204+
buf_size = buf + TEST_SLAB_SIZE - begin;
205+
rt_memset(buf, 0xAA, TEST_SLAB_SIZE);
206+
rt_slab_init(heap, "slab_tc", begin, buf_size);
207+
/* init ctx tab */
208+
size = head.count * sizeof(struct slab_realloc_context *);
209+
head.ctx_tab = rt_slab_alloc(heap, size);
210+
uassert_not_null(head.ctx_tab);
211+
rt_memset(head.ctx_tab, 0, size);
212+
// test run
213+
while (head.end - head.start < RT_TICK_MAX / 2)
214+
{
215+
if (rt_tick_get() - head.start >= head.interval)
216+
{
217+
head.start = rt_tick_get();
218+
rt_kprintf("#");
219+
}
220+
size = rand() % SLAB_RANG_ALLOC_BLK_MAX + SLAB_RANG_ALLOC_BLK_MIN;
221+
size *= sizeof(struct slab_realloc_context);
222+
idx = rand() % head.count;
223+
ctx = rt_slab_realloc(heap, head.ctx_tab[idx], size);
224+
if (ctx == RT_NULL)
225+
{
226+
if (size == 0)
227+
{
228+
if (head.ctx_tab[idx])
229+
{
230+
head.ctx_tab[idx] = RT_NULL;
231+
}
232+
}
233+
else
234+
{
235+
for (idx = 0; idx < head.count; idx++)
236+
{
237+
ctx = head.ctx_tab[idx];
238+
if (rand() % 2 && ctx)
239+
{
240+
if (ctx->size > sizeof(*ctx))
241+
{
242+
res = _mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
243+
if (res != 0)
244+
{
245+
uassert_int_equal(res, 0);
246+
}
247+
}
248+
rt_memset(ctx, 0xAA, ctx->size);
249+
rt_slab_realloc(heap, ctx, 0);
250+
head.ctx_tab[idx] = RT_NULL;
251+
}
252+
}
253+
}
254+
continue;
255+
}
256+
/* check slab */
257+
if (head.ctx_tab[idx] != RT_NULL)
258+
{
259+
res = 0;
260+
if (ctx->size < size)
261+
{
262+
if (ctx->size > sizeof(*ctx))
263+
{
264+
res = _mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
265+
}
266+
}
267+
else
268+
{
269+
if (size > sizeof(*ctx))
270+
{
271+
res = _mem_cmp(&ctx[1], ctx->magic, size - sizeof(*ctx));
272+
}
273+
}
274+
if (res != 0)
275+
{
276+
uassert_int_equal(res, 0);
277+
}
278+
}
279+
/* init slab */
280+
ctx->magic = rand() & 0xff;
281+
ctx->size = size;
282+
if (ctx->size > sizeof(*ctx))
283+
{
284+
rt_memset(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
285+
}
286+
head.ctx_tab[idx] = ctx;
287+
}
288+
// free all slab
289+
for (idx = 0; idx < head.count; idx++)
290+
{
291+
ctx = head.ctx_tab[idx];
292+
if (ctx == RT_NULL)
293+
{
294+
continue;
295+
}
296+
if (ctx->size > sizeof(*ctx))
297+
{
298+
res = _mem_cmp(&ctx[1], ctx->magic, ctx->size - sizeof(*ctx));
299+
if (res != 0)
300+
{
301+
uassert_int_equal(res, 0);
302+
}
303+
}
304+
rt_memset(ctx, 0xAA, ctx->size);
305+
rt_slab_realloc(heap, ctx, 0);
306+
head.ctx_tab[idx] = RT_NULL;
307+
}
308+
// slab heap deinit
309+
rt_slab_detach(heap);
310+
/* release test resources */
311+
rt_free(buf);
312+
}
313+
314+
static rt_err_t utest_tc_init(void)
315+
{
316+
return RT_EOK;
317+
}
318+
319+
static rt_err_t utest_tc_cleanup(void)
320+
{
321+
return RT_EOK;
322+
}
323+
324+
static void testcase(void)
325+
{
326+
UTEST_UNIT_RUN(slab_alloc_test);
327+
UTEST_UNIT_RUN(slab_realloc_test);
328+
}
329+
UTEST_TC_EXPORT(testcase, "testcases.kernel.slab_tc", utest_tc_init, utest_tc_cleanup, 10);

0 commit comments

Comments
 (0)