Skip to content

Commit 2e0d00b

Browse files
committed
tests: kernel: timer: test delay for schedule in timer callback
This verifies that when a second timer is scheduled within a delayed timer callback the lateness of the callback (as reflected by unprocessed announced ticks) does not impact the timeout of the newly scheduled timer. Relates to issue #12332. Signed-off-by: Peter A. Bigot <[email protected]>
1 parent 2d971c7 commit 2e0d00b

File tree

6 files changed

+210
-0
lines changed

6 files changed

+210
-0
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
cmake_minimum_required(VERSION 3.13.1)
2+
include($ENV{ZEPHYR_BASE}/cmake/app/boilerplate.cmake NO_POLICY_SCOPE)
3+
project(timer_schedule)
4+
5+
FILE(GLOB app_sources src/*.c)
6+
target_sources(app PRIVATE ${app_sources})
+27
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
$ make run
2+
3+
[QEMU] CPU: qemu32
4+
Running test suite test_timer_api
5+
tc_start() - test_timer_duration_period
6+
===================================================================
7+
PASS - test_timer_duration_period.
8+
tc_start() - test_timer_period_0
9+
===================================================================
10+
PASS - test_timer_period_0.
11+
tc_start() - test_timer_expirefn_null
12+
===================================================================
13+
PASS - test_timer_expirefn_null.
14+
tc_start() - test_timer_status_get
15+
===================================================================
16+
PASS - test_timer_status_get.
17+
tc_start() - test_timer_status_get_anytime
18+
===================================================================
19+
PASS - test_timer_status_get_anytime.
20+
tc_start() - test_timer_status_sync
21+
===================================================================
22+
PASS - test_timer_status_sync.
23+
tc_start() - test_timer_k_define
24+
===================================================================
25+
PASS - test_timer_k_define.
26+
===================================================================
27+
PROJECT EXECUTION SUCCESSFUL
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
CONFIG_ZTEST=y
2+
CONFIG_QEMU_TICKLESS_WORKAROUND=y
3+
CONFIG_SYS_CLOCK_TICKS_PER_SEC=100
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
CONFIG_ZTEST=y
2+
CONFIG_SYS_POWER_MANAGEMENT=y
3+
CONFIG_TICKLESS_KERNEL=y
4+
CONFIG_SYS_CLOCK_TICKS_PER_SEC=100
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,161 @@
1+
/*
2+
* Copyright (c) 2018 Peter Bigot Consulting, LLC
3+
*
4+
* SPDX-License-Identifier: Apache-2.0
5+
*/
6+
7+
/* T1 is a periodic timer with a 100 ms interval. When it fires, it
8+
* schedules T2 as a one-shot timer due in 50 ms.
9+
*
10+
* To produce the theoretical mis-handling we need to construct a
11+
* situation where tick processing is delayed such that when T1 fires
12+
* there is at least one tick remaining that is used to prematurely
13+
* reduce the delay of the T2 that gets scheduled when T1 is
14+
* processed.
15+
*
16+
* We do this by having the main loop wait until T2 fires the 3d time,
17+
* indicated by a semaphore. When it can take the semaphore it locks
18+
* interrupt handling for T1's period minus half of T2's timeout,
19+
* which means the next T1 will fire half T2's timeout late, and the
20+
* delay for T2 should be reduced by half. It then waits for T2 to
21+
* run. The delay for T2 will be shorter than in the non-blocking
22+
* case if the mis-handling occurs.
23+
*/
24+
25+
#include <zephyr.h>
26+
#include <ztest.h>
27+
28+
#define T1_PERIOD 1000 /* [ms] */
29+
#define T2_TIMEOUT 50 /* [ms] */
30+
#define CYC_PER_TICK (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC \
31+
/ CONFIG_SYS_CLOCK_TICKS_PER_SEC)
32+
33+
#define T2_TIMEOUT_TICK (K_MSEC(T2_TIMEOUT) \
34+
* CONFIG_SYS_CLOCK_TICKS_PER_SEC \
35+
/ MSEC_PER_SEC)
36+
37+
static struct k_timer timer1;
38+
static struct k_timer timer2;
39+
static struct k_timer sync_timer;
40+
static struct k_sem semaphore;
41+
42+
static struct state {
43+
unsigned int run;
44+
45+
/** k_uptime_get_32() when T1 last expired */
46+
u32_t t1_exec_ut;
47+
/** k_cycle_get_32() when T1 last expired */
48+
u32_t t1_exec_ct;
49+
50+
/** Difference in k_cycle_get() between most recent two T1 expires */
51+
s32_t t1_delay_ct;
52+
/** Difference in k_uptime_get() between most recent two T1 expires */
53+
s32_t t1_delay_ut;
54+
/** Difference in k_cycle_get() between T2 start and callback */
55+
s32_t t2_delay_ct;
56+
/** Difference in k_uptime_get() between T2 start and callback */
57+
s32_t t2_delay_ut;
58+
/** Tick-corrected measured realtime between T2 start and callback */
59+
s32_t t2_delay_us;
60+
} state;
61+
62+
static void timer1_expire(struct k_timer *timer)
63+
{
64+
state.t1_exec_ut = k_uptime_get_32();
65+
state.t1_exec_ct = k_cycle_get_32();
66+
k_timer_start(&timer2, K_MSEC(T2_TIMEOUT), 0);
67+
}
68+
69+
static void timer2_expire(struct k_timer *timer)
70+
{
71+
static u32_t t1_prev_ct;
72+
static u32_t t1_prev_ut;
73+
u32_t now_ct = k_cycle_get_32();
74+
u32_t now_ut = k_uptime_get_32();
75+
76+
state.t1_delay_ct = state.t1_exec_ct - t1_prev_ct;
77+
state.t1_delay_ut = state.t1_exec_ut - t1_prev_ut;
78+
state.t2_delay_ct = now_ct - state.t1_exec_ct;
79+
state.t2_delay_ut = now_ut - state.t1_exec_ut;
80+
81+
if (USEC_PER_SEC < sys_clock_hw_cycles_per_sec()) {
82+
u32_t div = sys_clock_hw_cycles_per_sec()
83+
/ USEC_PER_SEC;
84+
state.t2_delay_us = state.t2_delay_ct / div;
85+
} else {
86+
state.t2_delay_us = state.t2_delay_ct
87+
* (u64_t)USEC_PER_SEC
88+
/ sys_clock_hw_cycles_per_sec();
89+
}
90+
t1_prev_ct = state.t1_exec_ct;
91+
t1_prev_ut = state.t1_exec_ut;
92+
93+
k_sem_give(&semaphore);
94+
}
95+
96+
97+
static void test_schedule(void)
98+
{
99+
k_timer_init(&timer1, timer1_expire, NULL);
100+
k_timer_init(&timer2, timer2_expire, NULL);
101+
k_sem_init(&semaphore, 0, 1);
102+
103+
TC_PRINT("T1 interval %u ms, T2 timeout %u ms, %u sysclock per tick\n",
104+
T1_PERIOD, T2_TIMEOUT, sys_clock_hw_cycles_per_sec());
105+
106+
k_timer_init(&sync_timer, NULL, NULL);
107+
k_timer_start(&sync_timer, 0, 1);
108+
k_timer_status_sync(&sync_timer);
109+
k_timer_stop(&sync_timer);
110+
111+
k_timer_start(&timer1, K_MSEC(T1_PERIOD), K_MSEC(T1_PERIOD));
112+
113+
while (state.run < 6) {
114+
static s32_t t2_lower_tick = T2_TIMEOUT_TICK - 1;
115+
static s32_t t2_upper_tick = T2_TIMEOUT_TICK + 1;
116+
s32_t t2_delay_tick;
117+
118+
k_sem_take(&semaphore, K_FOREVER);
119+
120+
if (state.run > 0) {
121+
t2_delay_tick = state.t2_delay_us
122+
* (u64_t)CONFIG_SYS_CLOCK_TICKS_PER_SEC
123+
/ USEC_PER_SEC;
124+
125+
TC_PRINT("Run %u timer1 last %u interval %d/%d; "
126+
" timer2 delay %d/%d = %d us = %d tick\n",
127+
state.run, state.t1_exec_ut,
128+
state.t1_delay_ct, state.t1_delay_ut,
129+
state.t2_delay_ct, state.t2_delay_ut,
130+
state.t2_delay_us, t2_delay_tick);
131+
132+
zassert_true(t2_delay_tick >= t2_lower_tick,
133+
"expected delay %d >= %d",
134+
t2_delay_tick, t2_lower_tick);
135+
zassert_true(t2_delay_tick <= t2_upper_tick,
136+
"expected delay %d <= %d",
137+
t2_delay_tick, t2_upper_tick);
138+
}
139+
140+
if (state.run == 3) {
141+
unsigned int key;
142+
143+
TC_PRINT("blocking\n");
144+
145+
key = irq_lock();
146+
k_busy_wait(K_MSEC(T1_PERIOD - T2_TIMEOUT / 2)
147+
* USEC_PER_MSEC);
148+
irq_unlock(key);
149+
}
150+
151+
++state.run;
152+
}
153+
154+
k_timer_stop(&timer1);
155+
}
156+
157+
void test_main(void)
158+
{
159+
ztest_test_suite(timer_fn, ztest_unit_test(test_schedule));
160+
ztest_run_test_suite(timer_fn);
161+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
tests:
2+
kernel.timer:
3+
tags: kernel
4+
arch_exclude: riscv32 nios2 posix
5+
kernel.timer.tickless:
6+
build_only: true
7+
extra_args: CONF_FILE="prj_tickless.conf"
8+
arch_exclude: riscv32 nios2 posix
9+
tags: kernel

0 commit comments

Comments
 (0)