diff --git a/include/kernel.h b/include/kernel.h index faa03f6a3cfd..7687ea2128ad 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -389,7 +389,7 @@ struct _thread_base { /* this thread's entry in a ready/wait queue */ union { - sys_dlist_t qnode_dlist; + sys_dnode_t qnode_dnode; struct rbnode qnode_rb; }; @@ -874,9 +874,6 @@ __syscall void k_thread_start(k_tid_t thread); /* timeout has timed out and is not on _timeout_q anymore */ #define _EXPIRED (-2) -/* timeout is not in use */ -#define _INACTIVE (-1) - struct _static_thread_data { struct k_thread *init_thread; k_thread_stack_t *init_stack; @@ -1335,7 +1332,7 @@ struct k_timer { #define _K_TIMER_INITIALIZER(obj, expiry, stop) \ { \ - .timeout.dticks = _INACTIVE, \ + .timeout.dticks = 0, \ .timeout.fn = _timer_expiration_handler, \ .wait_q = _WAIT_Q_INIT(&obj.wait_q), \ .expiry_fn = expiry, \ diff --git a/include/misc/dlist.h b/include/misc/dlist.h index 937a55c28f18..5a8352f3f455 100644 --- a/include/misc/dlist.h +++ b/include/misc/dlist.h @@ -181,7 +181,7 @@ typedef struct _dnode sys_dnode_t; __cns = SYS_DLIST_PEEK_NEXT_CONTAINER(__dl, __cn, __n)) /** - * @brief initialize list + * @brief initialize list to its empty state * * @param list the doubly-linked list * @@ -196,6 +196,33 @@ static inline void sys_dlist_init(sys_dlist_t *list) #define SYS_DLIST_STATIC_INIT(ptr_to_list) { {(ptr_to_list)}, {(ptr_to_list)} } +/** + * @brief initialize node to its state when not in a list + * + * @param node the node + * + * @return N/A + */ + +static inline void sys_dnode_init(sys_dnode_t *node) +{ + node->next = NULL; + node->prev = NULL; +} + +/** + * @brief check if a node is a member of any list + * + * @param node the node + * + * @return true if node is linked into a list, false if it is not + */ + +static inline bool sys_dnode_is_linked(const sys_dnode_t *node) +{ + return node->next != NULL; +} + /** * @brief check if a node is the list's head * @@ -500,6 +527,7 @@ static inline void sys_dlist_remove(sys_dnode_t *node) { node->prev->next = node->next; node->next->prev = node->prev; + sys_dnode_init(node); } /** @@ -524,6 +552,68 @@ static inline sys_dnode_t *sys_dlist_get(sys_dlist_t *list) return node; } +/** + * @brief place the contents of one list at the end of another list. + * + * The @p to and @p from lists must be distinct. On completion @p from + * will be empty, all of its elements having been appended in original + * order to @p to. + * + * @param to a list, possibly non-empty, to which from will be appended + * @param from the list providing the elements to append + * + * @return N/A + */ +static inline void sys_dlist_join(sys_dlist_t *to, + sys_dlist_t *from) +{ + if (!sys_dlist_is_empty(from)) { + from->head->prev = to->tail; + to->tail->next = from->head; + + from->tail->next = to; + to->tail = from->tail; + + sys_dlist_init(from); + } +} + +/** + * @brief split a list at a node + * + * list will be updated to start at node. Any nodes before node will + * be appended to prefix. + * + * This and other sys_dlist_*() functions are not thread safe. + * + * @param prefix a list to which items in @p list before @p node + * will be appended + * @param list a non-empty list + * @param node a node within @p list + * + * @return N/A + */ +static inline void sys_dlist_split(sys_dlist_t *prefix, + sys_dlist_t *list, + sys_dnode_t *node) +{ + sys_dnode_t *old_pfx_tail = prefix->tail; + sys_dnode_t *new_pfx_tail = node->prev; + + if (sys_dlist_peek_head(list) == node) { + return; + } + + list->head->prev = old_pfx_tail; + old_pfx_tail->next = list->head; + + prefix->tail = new_pfx_tail; + new_pfx_tail->next = prefix; + + list->head = node; + node->prev = list; +} + #ifdef __cplusplus } #endif diff --git a/kernel/include/ksched.h b/kernel/include/ksched.h index f44e88da2708..f44df94894d7 100644 --- a/kernel/include/ksched.h +++ b/kernel/include/ksched.h @@ -8,6 +8,7 @@ #define ZEPHYR_KERNEL_INCLUDE_KSCHED_H_ #include +#include #include #include @@ -83,11 +84,7 @@ static inline int _is_thread_prevented_from_running(struct k_thread *thread) static inline bool _is_thread_timeout_active(struct k_thread *thread) { -#ifdef CONFIG_SYS_CLOCK_EXISTS - return thread->base.timeout.dticks != _INACTIVE; -#else - return false; -#endif + return !_is_inactive_timeout(&thread->base.timeout); } static inline bool _is_thread_ready(struct k_thread *thread) diff --git a/kernel/include/timeout_q.h b/kernel/include/timeout_q.h index a8d6428c91f1..10e3b887eac6 100644 --- a/kernel/include/timeout_q.h +++ b/kernel/include/timeout_q.h @@ -22,13 +22,19 @@ extern "C" { static inline void _init_timeout(struct _timeout *t, _timeout_func_t fn) { - t->dticks = _INACTIVE; + sys_dnode_init(&t->node); + t->dticks = 0; } void _add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks); int _abort_timeout(struct _timeout *to); +static inline bool _is_inactive_timeout(struct _timeout *t) +{ + return !sys_dnode_is_linked(&t->node); +} + static inline void _init_thread_timeout(struct _thread_base *thread_base) { _init_timeout(&thread_base->timeout, NULL); @@ -58,6 +64,7 @@ s32_t z_timeout_remaining(struct _timeout *timeout); #define _init_thread_timeout(t) do {} while (0) #define _add_thread_timeout(th, to) do {} while (0 && (void *)to && (void *)th) #define _abort_thread_timeout(t) (0) +#define _is_inactive_timeout(t) 0 #define _get_next_timeout_expiry() (K_FOREVER) #define z_set_timeout_expiry(t, i) do {} while (0) diff --git a/kernel/include/wait_q.h b/kernel/include/wait_q.h index 302909ce5c9c..c41a12ab37e6 100644 --- a/kernel/include/wait_q.h +++ b/kernel/include/wait_q.h @@ -43,7 +43,7 @@ static inline struct k_thread *_waitq_head(_wait_q_t *w) #define _WAIT_Q_FOR_EACH(wq, thread_ptr) \ SYS_DLIST_FOR_EACH_CONTAINER(&((wq)->waitq), thread_ptr, \ - base.qnode_dlist) + base.qnode_dnode) static inline void _waitq_init(_wait_q_t *w) { diff --git a/kernel/pipes.c b/kernel/pipes.c index 0d84c85235a6..9d69b549856d 100644 --- a/kernel/pipes.c +++ b/kernel/pipes.c @@ -369,7 +369,7 @@ static bool pipe_xfer_prepare(sys_dlist_t *xfer_list, * Add it to the transfer list. */ _unpend_thread(thread); - sys_dlist_append(xfer_list, &thread->base.qnode_dlist); + sys_dlist_append(xfer_list, &thread->base.qnode_dnode); } *waiter = (num_bytes > bytes_to_xfer) ? thread : NULL; diff --git a/kernel/poll.c b/kernel/poll.c index 4ff876e67b79..6c0dff797b07 100644 --- a/kernel/poll.c +++ b/kernel/poll.c @@ -133,20 +133,22 @@ static inline int register_event(struct k_poll_event *event, /* must be called with interrupts locked */ static inline void clear_event_registration(struct k_poll_event *event) { + bool remove = false; + event->poller = NULL; switch (event->type) { case K_POLL_TYPE_SEM_AVAILABLE: __ASSERT(event->sem != NULL, "invalid semaphore\n"); - sys_dlist_remove(&event->_node); + remove = true; break; case K_POLL_TYPE_DATA_AVAILABLE: __ASSERT(event->queue != NULL, "invalid queue\n"); - sys_dlist_remove(&event->_node); + remove = true; break; case K_POLL_TYPE_SIGNAL: __ASSERT(event->signal != NULL, "invalid poll signal\n"); - sys_dlist_remove(&event->_node); + remove = true; break; case K_POLL_TYPE_IGNORE: /* nothing to do */ @@ -155,6 +157,9 @@ static inline void clear_event_registration(struct k_poll_event *event) __ASSERT(false, "invalid event type\n"); break; } + if (remove && sys_dnode_is_linked(&event->_node)) { + sys_dlist_remove(&event->_node); + } } /* must be called with interrupts locked */ diff --git a/kernel/sched.c b/kernel/sched.c index b3e4bd33563f..9a923173a9a0 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -571,29 +571,33 @@ void _priq_dumb_add(sys_dlist_t *pq, struct k_thread *thread) __ASSERT_NO_MSG(!_is_idle(thread)); - SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dlist) { + SYS_DLIST_FOR_EACH_CONTAINER(pq, t, base.qnode_dnode) { if (_is_t1_higher_prio_than_t2(thread, t)) { - sys_dlist_insert_before(pq, &t->base.qnode_dlist, - &thread->base.qnode_dlist); + sys_dlist_insert_before(pq, &t->base.qnode_dnode, + &thread->base.qnode_dnode); return; } } - sys_dlist_append(pq, &thread->base.qnode_dlist); + sys_dlist_append(pq, &thread->base.qnode_dnode); } void _priq_dumb_remove(sys_dlist_t *pq, struct k_thread *thread) { __ASSERT_NO_MSG(!_is_idle(thread)); - sys_dlist_remove(&thread->base.qnode_dlist); + sys_dlist_remove(&thread->base.qnode_dnode); } struct k_thread *_priq_dumb_best(sys_dlist_t *pq) { + struct k_thread *t = NULL; sys_dnode_t *n = sys_dlist_peek_head(pq); - return CONTAINER_OF(n, struct k_thread, base.qnode_dlist); + if (n != NULL) { + t = CONTAINER_OF(n, struct k_thread, base.qnode_dnode); + } + return t; } bool _priq_rb_lessthan(struct rbnode *a, struct rbnode *b) @@ -648,9 +652,13 @@ void _priq_rb_remove(struct _priq_rb *pq, struct k_thread *thread) struct k_thread *_priq_rb_best(struct _priq_rb *pq) { + struct k_thread *t = NULL; struct rbnode *n = rb_get_min(&pq->tree); - return CONTAINER_OF(n, struct k_thread, base.qnode_rb); + if (n != NULL) { + t = CONTAINER_OF(n, struct k_thread, base.qnode_rb); + } + return t; } #ifdef CONFIG_SCHED_MULTIQ @@ -663,7 +671,7 @@ void _priq_mq_add(struct _priq_mq *pq, struct k_thread *thread) { int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO; - sys_dlist_append(&pq->queues[priority_bit], &thread->base.qnode_dlist); + sys_dlist_append(&pq->queues[priority_bit], &thread->base.qnode_dnode); pq->bitmask |= (1 << priority_bit); } @@ -671,7 +679,7 @@ void _priq_mq_remove(struct _priq_mq *pq, struct k_thread *thread) { int priority_bit = thread->base.prio - K_HIGHEST_THREAD_PRIO; - sys_dlist_remove(&thread->base.qnode_dlist); + sys_dlist_remove(&thread->base.qnode_dnode); if (sys_dlist_is_empty(&pq->queues[priority_bit])) { pq->bitmask &= ~(1 << priority_bit); } @@ -683,10 +691,14 @@ struct k_thread *_priq_mq_best(struct _priq_mq *pq) return NULL; } + struct k_thread *t = NULL; sys_dlist_t *l = &pq->queues[__builtin_ctz(pq->bitmask)]; sys_dnode_t *n = sys_dlist_peek_head(l); - return CONTAINER_OF(n, struct k_thread, base.qnode_dlist); + if (n != NULL) { + t = CONTAINER_OF(n, struct k_thread, base.qnode_dnode); + } + return t; } int _unpend_all(_wait_q_t *wait_q) @@ -884,7 +896,7 @@ void _impl_k_wakeup(k_tid_t thread) return; } - if (_abort_thread_timeout(thread) == _INACTIVE) { + if (_abort_thread_timeout(thread) < 0) { irq_unlock(key); return; } diff --git a/kernel/timeout.c b/kernel/timeout.c index e435acff4dc8..c2d1f5a2752a 100644 --- a/kernel/timeout.c +++ b/kernel/timeout.c @@ -23,9 +23,6 @@ static struct k_spinlock timeout_lock; static bool can_wait_forever; -/* Cycles left to process in the currently-executing z_clock_announce() */ -static int announce_remaining; - #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC; #endif @@ -44,37 +41,25 @@ static struct _timeout *next(struct _timeout *t) return n == NULL ? NULL : CONTAINER_OF(n, struct _timeout, node); } -static void remove_timeout(struct _timeout *t) -{ - if (t->node.next != NULL && t->node.prev != NULL) { - if (next(t) != NULL) { - next(t)->dticks += t->dticks; - } - - sys_dlist_remove(&t->node); - } - t->node.next = t->node.prev = NULL; - t->dticks = _INACTIVE; -} - -static s32_t elapsed(void) -{ - return announce_remaining == 0 ? z_clock_elapsed() : 0; -} - void _add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks) { - __ASSERT(to->dticks < 0, ""); + __ASSERT(!sys_dnode_is_linked(&to->node), ""); to->fn = fn; + + /* @todo This really ought to be removed to allow scheduling + * with negative delays, since the floor operation fails to + * maintain correct periodicity for timers that are so late + * they missed more than an interval. But without it + * kernel/timer/timer_api:test_timer_periodicity fails. Is + * the test making incorrect assumptions about how to trick + * the system? */ ticks = max(1, ticks); LOCKED(&timeout_lock) { struct _timeout *t; - to->dticks = ticks + elapsed(); + to->dticks = ticks + z_clock_elapsed(); for (t = first(); t != NULL; t = next(t)) { - __ASSERT(t->dticks >= 0, ""); - if (t->dticks > to->dticks) { t->dticks -= to->dticks; sys_dlist_insert_before(&timeout_list, @@ -96,14 +81,18 @@ void _add_timeout(struct _timeout *to, _timeout_func_t fn, s32_t ticks) int _abort_timeout(struct _timeout *to) { - int ret = _INACTIVE; + int ret = -EINVAL; LOCKED(&timeout_lock) { - if (to->dticks != _INACTIVE) { - remove_timeout(to); + if (sys_dnode_is_linked(&to->node)) { + if (next(to) != NULL) { + next(to)->dticks += to->dticks; + } + sys_dlist_remove(&to->node); ret = 0; } } + to->dticks = 0; return ret; } @@ -112,7 +101,7 @@ s32_t z_timeout_remaining(struct _timeout *timeout) { s32_t ticks = 0; - if (timeout->dticks == _INACTIVE) { + if (_is_inactive_timeout(timeout)) { return 0; } @@ -136,7 +125,7 @@ s32_t _get_next_timeout_expiry(void) LOCKED(&timeout_lock) { struct _timeout *to = first(); - ret = to == NULL ? maxw : max(0, to->dticks - elapsed()); + ret = to == NULL ? maxw : max(0, to->dticks - z_clock_elapsed()); } #ifdef CONFIG_TIMESLICING @@ -172,33 +161,51 @@ void z_clock_announce(s32_t ticks) z_time_slice(ticks); #endif + sys_dlist_t ready; + sys_dnode_t *node; + s32_t remaining_ticks = ticks; k_spinlock_key_t key = k_spin_lock(&timeout_lock); + struct _timeout *t = first(); - announce_remaining = ticks; - - while (first() != NULL && first()->dticks <= announce_remaining) { - struct _timeout *t = first(); - int dt = t->dticks; + curr_tick += ticks; - curr_tick += dt; - announce_remaining -= dt; - t->dticks = 0; - remove_timeout(t); + if (!t) { + /* Fast exit, no timeouts */ + goto out; + } - k_spin_unlock(&timeout_lock, key); - t->fn(t); - key = k_spin_lock(&timeout_lock); + /* Find the first timeout that isn't at/past its deadline */ + while ((t != NULL) && (t->dticks <= remaining_ticks)) { + remaining_ticks -= t->dticks; + t = next(t); } - if (first() != NULL) { - first()->dticks -= announce_remaining; + sys_dlist_init(&ready); + if (t == NULL) { + sys_dlist_join(&ready, &timeout_list); + } else { + sys_dlist_split(&ready, &timeout_list, &t->node); + t->dticks -= remaining_ticks; } - curr_tick += announce_remaining; - announce_remaining = 0; + /* Invoke the callback of each expired timeout */ + node = sys_dlist_peek_head(&ready); + if (node) { + k_spin_unlock(&timeout_lock, key); + do { + sys_dlist_remove(node); + t = CONTAINER_OF(node, struct _timeout, node); + t->dticks -= ticks; - z_clock_set_timeout(_get_next_timeout_expiry(), false); + t->fn(t); + node = sys_dlist_peek_head(&ready); + } while (node != NULL); + key = k_spin_lock(&timeout_lock); + } + +out: + z_clock_set_timeout(_get_next_timeout_expiry(), false); k_spin_unlock(&timeout_lock, key); } diff --git a/kernel/timer.c b/kernel/timer.c index 544c5b1731f8..b160af72f833 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -57,7 +57,7 @@ void _timer_expiration_handler(struct _timeout *t) if (timer->period > 0) { key = irq_lock(); _add_timeout(&timer->timeout, _timer_expiration_handler, - timer->period); + timer->timeout.dticks + timer->period); irq_unlock(key); } @@ -150,7 +150,7 @@ Z_SYSCALL_HANDLER(k_timer_start, timer, duration_p, period_p) void _impl_k_timer_stop(struct k_timer *timer) { unsigned int key = irq_lock(); - bool inactive = (_abort_timeout(&timer->timeout) == _INACTIVE); + int inactive = _abort_timeout(&timer->timeout) != 0; irq_unlock(key); @@ -203,7 +203,7 @@ u32_t _impl_k_timer_status_sync(struct k_timer *timer) u32_t result = timer->status; if (result == 0) { - if (timer->timeout.dticks != _INACTIVE) { + if (!_is_inactive_timeout(&timer->timeout)) { /* wait for timer to expire or stop */ (void)_pend_current_thread(key, &timer->wait_q, K_FOREVER); diff --git a/tests/kernel/common/src/dlist.c b/tests/kernel/common/src/dlist.c index 686f4396833f..64ef73b9f3b1 100644 --- a/tests/kernel/common/src/dlist.c +++ b/tests/kernel/common/src/dlist.c @@ -8,6 +8,7 @@ #include static sys_dlist_t test_list; +static sys_dlist_t test_list2; struct container_node { sys_dnode_t node; @@ -189,9 +190,13 @@ void test_dlist(void) "test_list head/tail are wrong"); /* Finding and removing node 1 */ + zassert_true(sys_dnode_is_linked(&test_node_1.node), + "node1 is not linked"); sys_dlist_remove(&test_node_1.node); zassert_true((verify_emptyness(&test_list)), "test_list should be empty"); + zassert_false(sys_dnode_is_linked(&test_node_1.node), + "node1 is still linked"); /* Prepending node 1 */ sys_dlist_prepend(&test_list, &test_node_1.node); @@ -277,6 +282,74 @@ void test_dlist(void) zassert_true((verify_emptyness(&test_list)), "test_list should be empty"); + + /* Catenate an empty list to a non-empty list */ + sys_dlist_append(&test_list, &test_node_1.node); + sys_dlist_init(&test_list2); + sys_dlist_join(&test_list, &test_list2); + zassert_true(sys_dlist_is_empty(&test_list2), + "list2 not empty"); + zassert_true((verify_tail_head(&test_list, &test_node_1.node, + &test_node_1.node, true)), + "test_list head/tail are wrong"); + + /* Catenate a non-empty list to an empty list moves elements. */ + sys_dlist_join(&test_list2, &test_list); + zassert_true(sys_dlist_is_empty(&test_list), + "list not empty"); + zassert_true((verify_tail_head(&test_list2, &test_node_1.node, + &test_node_1.node, true)), + "test_list2 head/tail are wrong"); + + /* Catenate a non-empty list to a non-empty list moves elements. */ + sys_dlist_append(&test_list, &test_node_2.node); + sys_dlist_append(&test_list, &test_node_3.node); + zassert_true((verify_tail_head(&test_list, &test_node_2.node, + &test_node_3.node, false)), + "test_list head/tail are wrong"); + sys_dlist_join(&test_list2, &test_list); + zassert_true(sys_dlist_is_empty(&test_list), + "list not empty"); + zassert_true((verify_tail_head(&test_list2, &test_node_1.node, + &test_node_3.node, false)), + "test_list2 head/tail are wrong"); + zassert_equal(test_node_1.node.next, &test_node_2.node, + "node2 not after node1"); + zassert_equal(test_node_2.node.prev, &test_node_1.node, + "node1 not before node2"); + + /* Split list at head does nothing */ + sys_dlist_split(&test_list, &test_list2, &test_node_1.node); + zassert_true(sys_dlist_is_empty(&test_list), + "list not empty"); + + /* Split list after head moves */ + sys_dlist_split(&test_list, &test_list2, &test_node_2.node); + zassert_true((verify_tail_head(&test_list, &test_node_1.node, + &test_node_1.node, true)), + "test_list head/tail are wrong"); + zassert_true((verify_tail_head(&test_list2, &test_node_2.node, + &test_node_3.node, false)), + "test_list2 head/tail are wrong"); + + /* Split list after head moves */ + sys_dlist_split(&test_list, &test_list2, &test_node_3.node); + zassert_true((verify_tail_head(&test_list, &test_node_1.node, + &test_node_2.node, false)), + "test_list head/tail are wrong"); + zassert_true((verify_tail_head(&test_list2, &test_node_3.node, + &test_node_3.node, true)), + "test_list2 head/tail are wrong"); + + sys_dlist_remove(&test_node_1.node); + sys_dlist_remove(&test_node_2.node); + zassert_true(sys_dlist_is_empty(&test_list), + "list not empty"); + + sys_dlist_remove(&test_node_3.node); + zassert_true(sys_dlist_is_empty(&test_list2), + "list2 not empty"); + /* test iterator from a node */ struct data_node { sys_dnode_t node; diff --git a/tests/kernel/timer/timer_schedule/CMakeLists.txt b/tests/kernel/timer/timer_schedule/CMakeLists.txt new file mode 100644 index 000000000000..a564f3e682e4 --- /dev/null +++ b/tests/kernel/timer/timer_schedule/CMakeLists.txt @@ -0,0 +1,6 @@ +cmake_minimum_required(VERSION 3.13.1) +include($ENV{ZEPHYR_BASE}/cmake/app/boilerplate.cmake NO_POLICY_SCOPE) +project(timer_schedule) + +FILE(GLOB app_sources src/*.c) +target_sources(app PRIVATE ${app_sources}) diff --git a/tests/kernel/timer/timer_schedule/README b/tests/kernel/timer/timer_schedule/README new file mode 100644 index 000000000000..15b34f29574b --- /dev/null +++ b/tests/kernel/timer/timer_schedule/README @@ -0,0 +1,27 @@ +$ make run + +[QEMU] CPU: qemu32 +Running test suite test_timer_api +tc_start() - test_timer_duration_period +=================================================================== +PASS - test_timer_duration_period. +tc_start() - test_timer_period_0 +=================================================================== +PASS - test_timer_period_0. +tc_start() - test_timer_expirefn_null +=================================================================== +PASS - test_timer_expirefn_null. +tc_start() - test_timer_status_get +=================================================================== +PASS - test_timer_status_get. +tc_start() - test_timer_status_get_anytime +=================================================================== +PASS - test_timer_status_get_anytime. +tc_start() - test_timer_status_sync +=================================================================== +PASS - test_timer_status_sync. +tc_start() - test_timer_k_define +=================================================================== +PASS - test_timer_k_define. +=================================================================== +PROJECT EXECUTION SUCCESSFUL diff --git a/tests/kernel/timer/timer_schedule/prj.conf b/tests/kernel/timer/timer_schedule/prj.conf new file mode 100644 index 000000000000..1c022a235763 --- /dev/null +++ b/tests/kernel/timer/timer_schedule/prj.conf @@ -0,0 +1,3 @@ +CONFIG_ZTEST=y +CONFIG_QEMU_TICKLESS_WORKAROUND=y +CONFIG_SYS_CLOCK_TICKS_PER_SEC=100 diff --git a/tests/kernel/timer/timer_schedule/prj_tickless.conf b/tests/kernel/timer/timer_schedule/prj_tickless.conf new file mode 100644 index 000000000000..51deb94360e2 --- /dev/null +++ b/tests/kernel/timer/timer_schedule/prj_tickless.conf @@ -0,0 +1,4 @@ +CONFIG_ZTEST=y +CONFIG_SYS_POWER_MANAGEMENT=y +CONFIG_TICKLESS_KERNEL=y +CONFIG_SYS_CLOCK_TICKS_PER_SEC=100 diff --git a/tests/kernel/timer/timer_schedule/src/main.c b/tests/kernel/timer/timer_schedule/src/main.c new file mode 100644 index 000000000000..907086569a79 --- /dev/null +++ b/tests/kernel/timer/timer_schedule/src/main.c @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2018 Peter Bigot Consulting, LLC + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/* T1 is a periodic timer with a 100 ms interval. When it fires, it + * schedules T2 as a one-shot timer due in 50 ms. + * + * To produce the theoretical mis-handling we need to construct a + * situation where tick processing is delayed such that when T1 fires + * there is at least one tick remaining that is used to prematurely + * reduce the delay of the T2 that gets scheduled when T1 is + * processed. + * + * We do this by having the main loop wait until T2 fires the 3d time, + * indicated by a semaphore. When it can take the semaphore it locks + * interrupt handling for T1's period minus half of T2's timeout, + * which means the next T1 will fire half T2's timeout late, and the + * delay for T2 should be reduced by half. It then waits for T2 to + * run. The delay for T2 will be shorter than in the non-blocking + * case if the mis-handling occurs. + */ + +#include +#include + +#define T1_PERIOD 1000 /* [ms] */ +#define T2_TIMEOUT 50 /* [ms] */ +#define CYC_PER_TICK (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC \ + / CONFIG_SYS_CLOCK_TICKS_PER_SEC) + +#define T2_TIMEOUT_TICK (K_MSEC(T2_TIMEOUT) \ + * CONFIG_SYS_CLOCK_TICKS_PER_SEC \ + / MSEC_PER_SEC) + +static struct k_timer timer1; +static struct k_timer timer2; +static struct k_timer sync_timer; +static struct k_sem semaphore; + +static struct state { + unsigned int run; + + /** k_uptime_get_32() when T1 last expired */ + u32_t t1_exec_ut; + /** k_cycle_get_32() when T1 last expired */ + u32_t t1_exec_ct; + + /** Difference in k_cycle_get() between most recent two T1 expires */ + s32_t t1_delay_ct; + /** Difference in k_uptime_get() between most recent two T1 expires */ + s32_t t1_delay_ut; + /** Difference in k_cycle_get() between T2 start and callback */ + s32_t t2_delay_ct; + /** Difference in k_uptime_get() between T2 start and callback */ + s32_t t2_delay_ut; + /** Tick-corrected measured realtime between T2 start and callback */ + s32_t t2_delay_us; +} state; + +static void timer1_expire(struct k_timer *timer) +{ + state.t1_exec_ut = k_uptime_get_32(); + state.t1_exec_ct = k_cycle_get_32(); + k_timer_start(&timer2, K_MSEC(T2_TIMEOUT), 0); +} + +static void timer2_expire(struct k_timer *timer) +{ + static u32_t t1_prev_ct; + static u32_t t1_prev_ut; + u32_t now_ct = k_cycle_get_32(); + u32_t now_ut = k_uptime_get_32(); + + state.t1_delay_ct = state.t1_exec_ct - t1_prev_ct; + state.t1_delay_ut = state.t1_exec_ut - t1_prev_ut; + state.t2_delay_ct = now_ct - state.t1_exec_ct; + state.t2_delay_ut = now_ut - state.t1_exec_ut; + + if (USEC_PER_SEC < sys_clock_hw_cycles_per_sec()) { + u32_t div = sys_clock_hw_cycles_per_sec() + / USEC_PER_SEC; + state.t2_delay_us = state.t2_delay_ct / div; + } else { + state.t2_delay_us = state.t2_delay_ct + * (u64_t)USEC_PER_SEC + / sys_clock_hw_cycles_per_sec(); + } + t1_prev_ct = state.t1_exec_ct; + t1_prev_ut = state.t1_exec_ut; + + k_sem_give(&semaphore); +} + + +static void test_schedule(void) +{ + k_timer_init(&timer1, timer1_expire, NULL); + k_timer_init(&timer2, timer2_expire, NULL); + k_sem_init(&semaphore, 0, 1); + + TC_PRINT("T1 interval %u ms, T2 timeout %u ms, %u sysclock per tick\n", + T1_PERIOD, T2_TIMEOUT, sys_clock_hw_cycles_per_sec()); + + k_timer_init(&sync_timer, NULL, NULL); + k_timer_start(&sync_timer, 0, 1); + k_timer_status_sync(&sync_timer); + k_timer_stop(&sync_timer); + + k_timer_start(&timer1, K_MSEC(T1_PERIOD), K_MSEC(T1_PERIOD)); + + while (state.run < 6) { + static s32_t t2_lower_tick = T2_TIMEOUT_TICK - 1; + static s32_t t2_upper_tick = T2_TIMEOUT_TICK + 1; + s32_t t2_delay_tick; + + k_sem_take(&semaphore, K_FOREVER); + + if (state.run > 0) { + t2_delay_tick = state.t2_delay_us + * (u64_t)CONFIG_SYS_CLOCK_TICKS_PER_SEC + / USEC_PER_SEC; + + TC_PRINT("Run %u timer1 last %u interval %d/%d; " + " timer2 delay %d/%d = %d us = %d tick\n", + state.run, state.t1_exec_ut, + state.t1_delay_ct, state.t1_delay_ut, + state.t2_delay_ct, state.t2_delay_ut, + state.t2_delay_us, t2_delay_tick); + + zassert_true(t2_delay_tick >= t2_lower_tick, + "expected delay %d >= %d", + t2_delay_tick, t2_lower_tick); + zassert_true(t2_delay_tick <= t2_upper_tick, + "expected delay %d <= %d", + t2_delay_tick, t2_upper_tick); + } + + if (state.run == 3) { + unsigned int key; + + TC_PRINT("blocking\n"); + + key = irq_lock(); + k_busy_wait(K_MSEC(T1_PERIOD - T2_TIMEOUT / 2) + * USEC_PER_MSEC); + irq_unlock(key); + } + + ++state.run; + } + + k_timer_stop(&timer1); +} + +void test_main(void) +{ + ztest_test_suite(timer_fn, ztest_unit_test(test_schedule)); + ztest_run_test_suite(timer_fn); +} diff --git a/tests/kernel/timer/timer_schedule/testcase.yaml b/tests/kernel/timer/timer_schedule/testcase.yaml new file mode 100644 index 000000000000..b3cb5211c90c --- /dev/null +++ b/tests/kernel/timer/timer_schedule/testcase.yaml @@ -0,0 +1,9 @@ +tests: + kernel.timer: + tags: kernel + arch_exclude: riscv32 nios2 posix + kernel.timer.tickless: + build_only: true + extra_args: CONF_FILE="prj_tickless.conf" + arch_exclude: riscv32 nios2 posix + tags: kernel