diff --git a/arch/arc/core/timestamp.c b/arch/arc/core/timestamp.c index 8d5f2a855b8f..d3d0a7635ff3 100644 --- a/arch/arc/core/timestamp.c +++ b/arch/arc/core/timestamp.c @@ -15,9 +15,6 @@ #include #include -extern volatile u64_t _sys_clock_tick_count; -extern int sys_clock_hw_cycles_per_tick; - /* * @brief Read 64-bit timestamp value * @@ -33,10 +30,10 @@ u64_t _tsc_read(void) u32_t count; key = irq_lock(); - t = (u64_t)_sys_clock_tick_count; + t = (u64_t)z_tick_get(); count = _arc_v2_aux_reg_read(_ARC_V2_TMR0_COUNT); irq_unlock(key); - t *= (u64_t)sys_clock_hw_cycles_per_tick; + t *= (u64_t)sys_clock_hw_cycles_per_tick(); t += (u64_t)count; return t; } diff --git a/arch/arm/core/cortex_m/vector_table.S b/arch/arm/core/cortex_m/vector_table.S index b3f039f1ba79..6a7fbe6f9f2d 100644 --- a/arch/arm/core/cortex_m/vector_table.S +++ b/arch/arm/core/cortex_m/vector_table.S @@ -19,7 +19,6 @@ #include #include #include -#include #include "vector_table.h" _ASM_FILE_PROLOGUE diff --git a/arch/arm/core/irq_relay.S b/arch/arm/core/irq_relay.S index e711ae9842f1..e337b0ee31b6 100644 --- a/arch/arm/core/irq_relay.S +++ b/arch/arm/core/irq_relay.S @@ -24,7 +24,6 @@ #include #include #include -#include _ASM_FILE_PROLOGUE diff --git a/drivers/adc/adc_ti_adc108s102.c b/drivers/adc/adc_ti_adc108s102.c index 4d483a1c0d3d..4dd63b04a338 100644 --- a/drivers/adc/adc_ti_adc108s102.c +++ b/drivers/adc/adc_ti_adc108s102.c @@ -185,7 +185,7 @@ static int ti_adc108s102_read(struct device *dev, /* convert to milliseconds */ delay = (s32_t)((MSEC_PER_SEC * (u64_t)delay) / - sys_clock_ticks_per_sec); + CONFIG_SYS_CLOCK_TICKS_PER_SEC); k_sleep(delay); diff --git a/drivers/sensor/dht/dht.c b/drivers/sensor/dht/dht.c index adf928fa15c8..781e79282f35 100644 --- a/drivers/sensor/dht/dht.c +++ b/drivers/sensor/dht/dht.c @@ -30,7 +30,7 @@ static s8_t dht_measure_signal_duration(struct dht_data *drv_data, u32_t elapsed_cycles; u32_t max_wait_cycles = (u32_t)( (u64_t)DHT_SIGNAL_MAX_WAIT_DURATION * - (u64_t)sys_clock_hw_cycles_per_sec / + (u64_t)sys_clock_hw_cycles_per_sec() / (u64_t)USEC_PER_SEC ); u32_t start_cycles = k_cycle_get_32(); @@ -46,7 +46,7 @@ static s8_t dht_measure_signal_duration(struct dht_data *drv_data, return (u64_t)elapsed_cycles * (u64_t)USEC_PER_SEC / - (u64_t)sys_clock_hw_cycles_per_sec; + (u64_t)sys_clock_hw_cycles_per_sec(); } static int dht_sample_fetch(struct device *dev, enum sensor_channel chan) diff --git a/drivers/timer/altera_avalon_timer_hal.c b/drivers/timer/altera_avalon_timer_hal.c index 822c71167a6a..78ee7f8b0d48 100644 --- a/drivers/timer/altera_avalon_timer_hal.c +++ b/drivers/timer/altera_avalon_timer_hal.c @@ -15,6 +15,8 @@ static u32_t accumulated_cycle_count; +static s32_t _sys_idle_elapsed_ticks = 1; + static void timer_irq_handler(void *unused) { ARG_UNUSED(unused); @@ -24,12 +26,12 @@ static void timer_irq_handler(void *unused) read_timer_start_of_tick_handler(); #endif - accumulated_cycle_count += sys_clock_hw_cycles_per_tick; + accumulated_cycle_count += sys_clock_hw_cycles_per_tick(); /* Clear the interrupt */ alt_handle_irq((void *)TIMER_0_BASE, TIMER_0_IRQ); - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); #ifdef CONFIG_EXECUTION_BENCHMARKING extern void read_timer_end_of_tick_handler(void); @@ -37,20 +39,20 @@ static void timer_irq_handler(void *unused) #endif } -int _sys_clock_driver_init(struct device *device) +int z_clock_driver_init(struct device *device) { ARG_UNUSED(device); IOWR_ALTERA_AVALON_TIMER_PERIODL(TIMER_0_BASE, - sys_clock_hw_cycles_per_tick & 0xFFFF); + sys_clock_hw_cycles_per_tick() & 0xFFFF); IOWR_ALTERA_AVALON_TIMER_PERIODH(TIMER_0_BASE, - (sys_clock_hw_cycles_per_tick >> 16) & 0xFFFF); + (sys_clock_hw_cycles_per_tick() >> 16) & 0xFFFF); IRQ_CONNECT(TIMER_0_IRQ, 0, timer_irq_handler, NULL, 0); irq_enable(TIMER_0_IRQ); alt_avalon_timer_sc_init((void *)TIMER_0_BASE, 0, - TIMER_0_IRQ, sys_clock_hw_cycles_per_tick); + TIMER_0_IRQ, sys_clock_hw_cycles_per_tick()); return 0; } diff --git a/drivers/timer/arcv2_timer0.c b/drivers/timer/arcv2_timer0.c index e487493f7630..4f33e1af7a72 100644 --- a/drivers/timer/arcv2_timer0.c +++ b/drivers/timer/arcv2_timer0.c @@ -15,7 +15,7 @@ * be programmed to wake the system in N >= TICKLESS_IDLE_THRESH ticks. The * kernel invokes _timer_idle_enter() to program the up counter to trigger an * interrupt in N ticks. When the timer expires (or when another interrupt is - * detected), the kernel's interrupt stub invokes _timer_idle_exit() to leave + * detected), the kernel's interrupt stub invokes z_clock_idle_exit() to leave * the tickless idle state. * * @internal @@ -28,13 +28,13 @@ * * 2. The act of entering tickless idle may potentially straddle a tick * boundary. This can be detected in _timer_idle_enter() after Timer0 is - * programmed with the new limit and acted upon in _timer_idle_exit(). + * programmed with the new limit and acted upon in z_clock_idle_exit(). * * 3. Tickless idle may be prematurely aborted due to a straddled tick. See * previous factor. * * 4. Tickless idle may end naturally. This is detected and handled in - * _timer_idle_exit(). + * z_clock_idle_exit(). * * 5. Tickless idle may be prematurely aborted due to a non-timer interrupt. * If this occurs, Timer0 is reprogrammed to trigger at the next tick. @@ -59,6 +59,8 @@ #include +#include "legacy_api.h" + #define _ARC_V2_TMR_CTRL_IE 0x1 /* interrupt enable */ #define _ARC_V2_TMR_CTRL_NH 0x2 /* count only while not halted */ #define _ARC_V2_TMR_CTRL_W 0x4 /* watchdog mode enable */ @@ -67,11 +69,11 @@ /* running total of timer count */ static u32_t __noinit cycles_per_tick; static volatile u32_t accumulated_cycle_count; +static s32_t _sys_idle_elapsed_ticks = 1; #ifdef CONFIG_TICKLESS_IDLE static u32_t __noinit max_system_ticks; static u32_t __noinit programmed_ticks; -extern s32_t _sys_idle_elapsed_ticks; #ifndef CONFIG_TICKLESS_KERNEL static u32_t __noinit programmed_limit; static int straddled_tick_on_idle_enter; @@ -196,7 +198,7 @@ void _timer_int_handler(void *unused) #ifdef CONFIG_TICKLESS_KERNEL if (!programmed_ticks) { if (_sys_clock_always_on) { - _sys_clock_tick_count = _get_elapsed_clock_time(); + z_tick_set(z_clock_uptime()); program_max_cycles(); } return; @@ -212,11 +214,11 @@ void _timer_int_handler(void *unused) programmed_ticks = 0; timer_expired = 1; - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); - /* _sys_clock_tick_announce() could cause new programming */ + /* z_clock_announce(_sys_idle_elapsed_ticks) could cause new programming */ if (!programmed_ticks && _sys_clock_always_on) { - _sys_clock_tick_count = _get_elapsed_clock_time(); + z_tick_set(z_clock_uptime()); program_max_cycles(); } #else @@ -227,9 +229,10 @@ void _timer_int_handler(void *unused) timer_count <= (cycles_per_tick - 1), "timer_count: %d, limit %d\n", timer_count, cycles_per_tick - 1); - _sys_clock_final_tick_announce(); + _sys_idle_elapsed_ticks = 1; + z_clock_announce(_sys_idle_elapsed_ticks); #else - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); #endif update_accumulated_count(); @@ -280,7 +283,7 @@ void _set_time(u32_t time) programmed_ticks = time > max_system_ticks ? max_system_ticks : time; - _sys_clock_tick_count = _get_elapsed_clock_time(); + z_tick_set(z_clock_uptime()); timer0_limit_register_set(programmed_ticks * cycles_per_tick); timer0_count_register_set(0); @@ -306,12 +309,12 @@ static inline u64_t get_elapsed_count(void) elapsed = timer0_count_register_get(); } - elapsed += _sys_clock_tick_count * cycles_per_tick; + elapsed += z_tick_get() * cycles_per_tick; return elapsed; } -u64_t _get_elapsed_clock_time(void) +u64_t z_clock_uptime(void) { return get_elapsed_count() / cycles_per_tick; } @@ -398,7 +401,7 @@ void _timer_idle_enter(s32_t ticks) * RETURNS: N/A */ -void _timer_idle_exit(void) +void z_clock_idle_exit(void) { #ifdef CONFIG_TICKLESS_KERNEL if (!programmed_ticks && _sys_clock_always_on) { @@ -433,7 +436,7 @@ void _timer_idle_exit(void) _sys_idle_elapsed_ticks = programmed_ticks - 1; update_accumulated_count(); - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); __ASSERT_EVAL({}, u32_t timer_count = timer0_count_register_get(), @@ -449,7 +452,7 @@ void _timer_idle_exit(void) _sys_idle_elapsed_ticks = current_count / cycles_per_tick; if (_sys_idle_elapsed_ticks > 0) { update_accumulated_count(); - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); } /* @@ -479,7 +482,7 @@ static void tickless_idle_init(void) {} * * @return 0 */ -int _sys_clock_driver_init(struct device *device) +int z_clock_driver_init(struct device *device) { ARG_UNUSED(device); @@ -487,7 +490,7 @@ int _sys_clock_driver_init(struct device *device) timer0_control_register_set(0); timer0_count_register_set(0); - cycles_per_tick = sys_clock_hw_cycles_per_tick; + cycles_per_tick = sys_clock_hw_cycles_per_tick(); IRQ_CONNECT(IRQ_TIMER0, CONFIG_ARCV2_TIMER_IRQ_PRIORITY, _timer_int_handler, NULL, 0); @@ -544,7 +547,7 @@ static int sys_clock_resume(struct device *dev) * Implements the driver control management functionality * the *context may include IN data or/and OUT data */ -int sys_clock_device_ctrl(struct device *port, u32_t ctrl_command, +int z_clock_device_ctrl(struct device *port, u32_t ctrl_command, void *context) { if (ctrl_command == DEVICE_PM_SET_POWER_STATE) { diff --git a/drivers/timer/cortex_m_systick.c b/drivers/timer/cortex_m_systick.c index 7f709500e990..ac0a79031978 100644 --- a/drivers/timer/cortex_m_systick.c +++ b/drivers/timer/cortex_m_systick.c @@ -42,6 +42,8 @@ static volatile u32_t clock_accumulated_count; #include +#include "legacy_api.h" + #ifdef CONFIG_TICKLESS_IDLE #define TIMER_MODE_PERIODIC 0 /* normal running mode */ #define TIMER_MODE_ONE_SHOT 1 /* emulated, since sysTick has 1 mode */ @@ -57,9 +59,7 @@ extern void _NanoIdleValClear(void); extern void _sys_power_save_idle_exit(s32_t ticks); #endif -#ifdef CONFIG_TICKLESS_IDLE -extern s32_t _sys_idle_elapsed_ticks; -#endif +static s32_t _sys_idle_elapsed_ticks = 1; #ifdef CONFIG_TICKLESS_IDLE static u32_t __noinit default_load_value; /* default count */ @@ -242,7 +242,7 @@ void _timer_int_handler(void *unused) #if defined(CONFIG_TICKLESS_KERNEL) if (!idle_original_ticks) { if (_sys_clock_always_on) { - _sys_clock_tick_count = _get_elapsed_clock_time(); + z_tick_set(z_clock_uptime()); /* clear overflow tracking flag as it is accounted */ timer_overflow = 0; sysTickStop(); @@ -268,11 +268,11 @@ void _timer_int_handler(void *unused) */ idle_original_ticks = 0; - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); - /* _sys_clock_tick_announce() could cause new programming */ + /* z_clock_announce(_sys_idle_elapsed_ticks) could cause new programming */ if (!idle_original_ticks && _sys_clock_always_on) { - _sys_clock_tick_count = _get_elapsed_clock_time(); + z_tick_set(z_clock_uptime()); /* clear overflow tracking flag as it is accounted */ timer_overflow = 0; sysTickStop(); @@ -283,7 +283,7 @@ void _timer_int_handler(void *unused) #else /* * If this a wakeup from a completed tickless idle or after - * _timer_idle_exit has processed a partial idle, return + * z_clock_idle_exit has processed a partial idle, return * to the normal tick cycle. */ if (timer_mode == TIMER_MODE_ONE_SHOT) { @@ -300,9 +300,10 @@ void _timer_int_handler(void *unused) idle_mode = IDLE_NOT_TICKLESS; _sys_idle_elapsed_ticks = idle_original_ticks + 1; /* actual # of idle ticks */ - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); } else { - _sys_clock_final_tick_announce(); + _sys_idle_elapsed_ticks = 1; + z_clock_announce(_sys_idle_elapsed_ticks); } /* accumulate total counter value */ @@ -313,9 +314,9 @@ void _timer_int_handler(void *unused) * No tickless idle: * Update the total tick count and announce this tick to the kernel. */ - clock_accumulated_count += sys_clock_hw_cycles_per_tick; + clock_accumulated_count += sys_clock_hw_cycles_per_tick(); - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); #endif /* CONFIG_TICKLESS_IDLE */ numIdleTicks = _NanoIdleValGet(); /* get # of idle ticks requested */ @@ -326,7 +327,7 @@ void _timer_int_handler(void *unused) /* * Complete idle processing. * Note that for tickless idle, nothing will be done in - * _timer_idle_exit. + * z_clock_idle_exit. */ _sys_power_save_idle_exit(numIdleTicks); } @@ -336,13 +337,13 @@ void _timer_int_handler(void *unused) #else /* !CONFIG_SYS_POWER_MANAGEMENT */ /* accumulate total counter value */ - clock_accumulated_count += sys_clock_hw_cycles_per_tick; + clock_accumulated_count += sys_clock_hw_cycles_per_tick(); /* * one more tick has occurred -- don't need to do anything special since * timer is already configured to interrupt on the following tick */ - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); #endif /* CONFIG_SYS_POWER_MANAGEMENT */ @@ -389,7 +390,7 @@ void _set_time(u32_t time) idle_original_ticks = time > max_system_ticks ? max_system_ticks : time; - _sys_clock_tick_count = _get_elapsed_clock_time(); + z_tick_set(z_clock_uptime()); /* clear overflow tracking flag as it is accounted */ timer_overflow = 0; @@ -415,19 +416,19 @@ static inline u64_t get_elapsed_count(void) if ((SysTick->CTRL & SysTick_CTRL_COUNTFLAG_Msk) || (timer_overflow)) { elapsed = SysTick->LOAD; /* Keep track of overflow till it is accounted in - * _sys_clock_tick_count as COUNTFLAG bit is clear on read + * z_tick_get() as COUNTFLAG bit is clear on read */ timer_overflow = 1; } else { elapsed = (SysTick->LOAD - SysTick->VAL); } - elapsed += (_sys_clock_tick_count * default_load_value); + elapsed += (z_tick_get() * default_load_value); return elapsed; } -u64_t _get_elapsed_clock_time(void) +u64_t z_clock_uptime(void) { return get_elapsed_count() / default_load_value; } @@ -598,13 +599,13 @@ void _timer_idle_enter(s32_t ticks /* system ticks */ * * @return N/A */ -void _timer_idle_exit(void) +void z_clock_idle_exit(void) { #ifdef CONFIG_TICKLESS_KERNEL if (idle_mode == IDLE_TICKLESS) { idle_mode = IDLE_NOT_TICKLESS; if (!idle_original_ticks && _sys_clock_always_on) { - _sys_clock_tick_count = _get_elapsed_clock_time(); + z_tick_set(z_clock_uptime()); timer_overflow = 0; sysTickReloadSet(max_load_value); sysTickStart(); @@ -644,7 +645,7 @@ void _timer_idle_exit(void) * for it. */ _sys_idle_elapsed_ticks = idle_original_ticks - 1; - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); } else { u32_t elapsed; /* elapsed "counter time" */ u32_t remaining; /* remaining "counter time" */ @@ -674,7 +675,7 @@ void _timer_idle_exit(void) _sys_idle_elapsed_ticks = elapsed / default_load_value; if (_sys_idle_elapsed_ticks) { - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); } } @@ -696,7 +697,7 @@ void _timer_idle_exit(void) * * @return 0 */ -int _sys_clock_driver_init(struct device *device) +int z_clock_driver_init(struct device *device) { /* enable counter, interrupt and set clock src to system clock */ u32_t ctrl = SysTick_CTRL_ENABLE_Msk | SysTick_CTRL_TICKINT_Msk | @@ -709,9 +710,9 @@ int _sys_clock_driver_init(struct device *device) */ /* systick supports 24-bit H/W counter */ - __ASSERT(sys_clock_hw_cycles_per_tick <= (1 << 24), - "sys_clock_hw_cycles_per_tick too large"); - sysTickReloadSet(sys_clock_hw_cycles_per_tick - 1); + __ASSERT(sys_clock_hw_cycles_per_tick() <= (1 << 24), + "sys_clock_hw_cycles_per_tick() too large"); + sysTickReloadSet(sys_clock_hw_cycles_per_tick() - 1); #ifdef CONFIG_TICKLESS_IDLE @@ -755,7 +756,7 @@ return (u32_t) get_elapsed_count(); #ifdef CONFIG_TICKLESS_IDLE /* When we leave a tickless period the reload value of the timer * can be set to a remaining value to wait until end of tick. - * (see _timer_idle_exit). The remaining value is always smaller + * (see z_clock_idle_exit). The remaining value is always smaller * than default_load_value. In this case the time elapsed until * the timer restart was not yet added to * clock_accumulated_count. To retrieve a correct cycle count diff --git a/drivers/timer/hpet.c b/drivers/timer/hpet.c index ef364d99e5d8..d5b4748c1f63 100644 --- a/drivers/timer/hpet.c +++ b/drivers/timer/hpet.c @@ -51,6 +51,8 @@ #include +#include "legacy_api.h" + /* HPET register offsets */ #define GENERAL_CAPS_REG 0 /* 64-bit register */ @@ -159,6 +161,7 @@ #define HPET_IOAPIC_FLAGS (IOAPIC_LEVEL | IOAPIC_LOW) #endif +extern int z_clock_hw_cycles_per_sec; #ifdef CONFIG_INT_LATENCY_BENCHMARK static u32_t main_count_first_irq_value; @@ -173,12 +176,12 @@ extern u32_t _hw_irq_to_c_handler_latency; #define DBG(...) #endif +static s32_t _sys_idle_elapsed_ticks = 1; + #ifdef CONFIG_TICKLESS_IDLE /* additional globals, locals, and forward declarations */ -extern s32_t _sys_idle_elapsed_ticks; - /* main counter units per system tick */ static u32_t __noinit counter_load_value; /* counter value for most recent tick */ @@ -261,7 +264,7 @@ void _timer_int_handler(void *unused) * timer is already configured to interrupt on the following tick */ - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); #else @@ -271,7 +274,7 @@ void _timer_int_handler(void *unused) /* If timer not programmed or already consumed exit */ if (!programmed_ticks) { if (_sys_clock_always_on) { - _sys_clock_tick_count = _get_elapsed_clock_time(); + z_tick_set(z_clock_uptime()); program_max_cycles(); } return; @@ -296,11 +299,11 @@ void _timer_int_handler(void *unused) * announce already consumed elapsed time */ programmed_ticks = 0; - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); - /* _sys_clock_tick_announce() could cause new programming */ + /* z_clock_announce(_sys_idle_elapsed_ticks) could cause new programming */ if (!programmed_ticks && _sys_clock_always_on) { - _sys_clock_tick_count = _get_elapsed_clock_time(); + z_tick_set(z_clock_uptime()); program_max_cycles(); } #else @@ -308,7 +311,8 @@ void _timer_int_handler(void *unused) *_HPET_TIMER0_CONFIG_CAPS |= HPET_Tn_VAL_SET_CNF; *_HPET_TIMER0_COMPARATOR = counter_last_value + counter_load_value; programmed_ticks = 1; - _sys_clock_final_tick_announce(); + _sys_idle_elapsed_ticks = 1; + z_clock_announce(_sys_idle_elapsed_ticks); #endif #endif /* !CONFIG_TICKLESS_IDLE */ @@ -353,7 +357,7 @@ void _set_time(u32_t time) programmed_ticks = time; - _sys_clock_tick_count = _get_elapsed_clock_time(); + z_tick_set(z_clock_uptime()); stale_irq_check = 1; @@ -370,11 +374,11 @@ void _enable_sys_clock(void) } } -u64_t _get_elapsed_clock_time(void) +u64_t z_clock_uptime(void) { u64_t elapsed; - elapsed = _sys_clock_tick_count; + elapsed = z_tick_get(); elapsed += ((s64_t)(_hpetMainCounterAtomic() - counter_last_value) / counter_load_value); @@ -452,7 +456,7 @@ void _timer_idle_enter(s32_t ticks /* system ticks */ * */ -void _timer_idle_exit(void) +void z_clock_idle_exit(void) { #ifdef CONFIG_TICKLESS_KERNEL if (!programmed_ticks && _sys_clock_always_on) { @@ -480,7 +484,7 @@ void _timer_idle_exit(void) * that the timer ISR will execute first before the tick event * is serviced. */ - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); /* timer interrupt handler reprograms the timer for the next * tick @@ -531,7 +535,7 @@ void _timer_idle_exit(void) if (_sys_idle_elapsed_ticks) { /* Announce elapsed ticks to the kernel */ - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); } /* @@ -556,7 +560,7 @@ void _timer_idle_exit(void) * @return 0 */ -int _sys_clock_driver_init(struct device *device) +int z_clock_driver_init(struct device *device) { u64_t hpetClockPeriod; u64_t tickFempto; @@ -583,12 +587,12 @@ int _sys_clock_driver_init(struct device *device) * Get tick time (in femptoseconds). */ - tickFempto = 1000000000000000ull / sys_clock_ticks_per_sec; + tickFempto = 1000000000000000ull / CONFIG_SYS_CLOCK_TICKS_PER_SEC; /* * This driver shall read the COUNTER_CLK_PERIOD value from the general * capabilities register rather than rely on a board.h provide macro - * (or the global variable 'sys_clock_hw_cycles_per_tick') + * (or the global variable 'sys_clock_hw_cycles_per_tick()') * to determine the frequency of clock applied to the HPET device. */ @@ -610,11 +614,10 @@ int _sys_clock_driver_init(struct device *device) DBG("HPET: timer0: available interrupts mask 0x%x\n", (u32_t)(*_HPET_TIMER0_CONFIG_CAPS >> 32)); - /* Initialize sys_clock_hw_cycles_per_tick/sec */ + /* Initialize sys_clock_hw_cycles_per_sec */ - sys_clock_hw_cycles_per_tick = counter_load_value; - sys_clock_hw_cycles_per_sec = sys_clock_hw_cycles_per_tick * - sys_clock_ticks_per_sec; + z_clock_hw_cycles_per_sec = counter_load_value * + CONFIG_SYS_CLOCK_TICKS_PER_SEC; #ifdef CONFIG_INT_LATENCY_BENCHMARK diff --git a/drivers/timer/legacy_api.h b/drivers/timer/legacy_api.h new file mode 100644 index 000000000000..de1990c76bcc --- /dev/null +++ b/drivers/timer/legacy_api.h @@ -0,0 +1,34 @@ +#ifndef ZEPHYR_LEGACY_SET_TIME_H__ +#define ZEPHYR_LEGACY_SET_TIME_H__ + +/* Stub implementation of z_clock_set_timeout() in terms of the + * original APIs. Used by older timer drivers. Should be replaced. + * + * Yes, this "header" includes a function definition and must be + * included only once in a single compilation. + */ + +#ifdef CONFIG_TICKLESS_IDLE +void _timer_idle_enter(s32_t ticks); +void z_clock_idle_exit(void); +#endif + +#ifdef CONFIG_TICKLESS_KERNEL +void _set_time(u32_t time); +extern u32_t _get_program_time(void); +extern u32_t _get_remaining_program_time(void); +extern u32_t _get_elapsed_program_time(void); +#endif + +extern void z_clock_set_timeout(s32_t ticks, bool idle) +{ +#ifdef CONFIG_TICKLESS_KERNEL + if (idle) { + _timer_idle_enter(ticks); + } else { + _set_time(ticks == K_FOREVER ? 0 : ticks); + } +#endif +} + +#endif /* ZEPHYR_LEGACY_SET_TIME_H__ */ diff --git a/drivers/timer/loapic_timer.c b/drivers/timer/loapic_timer.c index 6365c4238adb..6d2a33e6ddca 100644 --- a/drivers/timer/loapic_timer.c +++ b/drivers/timer/loapic_timer.c @@ -26,7 +26,7 @@ * kernel invokes _timer_idle_enter() to program the down counter in one-shot * mode to trigger an interrupt in N ticks. When the timer expires or when * another interrupt is detected, the kernel's interrupt stub invokes - * _timer_idle_exit() to leave the tickless idle state. + * z_clock_idle_exit() to leave the tickless idle state. * * @internal * Factors that increase the driver's complexity: @@ -76,6 +76,8 @@ #include #include +#include "legacy_api.h" + /* Local APIC Timer Bits */ #define LOAPIC_TIMER_DIVBY_2 0x0 /* Divide by 2 */ @@ -124,9 +126,8 @@ do {/* nothing */ \ } while (0) #endif /* !CONFIG_TICKLESS_IDLE */ -#if defined(CONFIG_TICKLESS_IDLE) -extern s32_t _sys_idle_elapsed_ticks; -#endif /* CONFIG_TICKLESS_IDLE */ + +static s32_t _sys_idle_elapsed_ticks = 1; /* computed counter 0 initial count value */ static u32_t __noinit cycles_per_tick; @@ -295,7 +296,7 @@ void _timer_int_handler(void *unused /* parameter is not used */ #if defined(CONFIG_TICKLESS_KERNEL) if (!programmed_full_ticks) { if (_sys_clock_always_on) { - _sys_clock_tick_count = _get_elapsed_clock_time(); + z_tick_set(z_clock_uptime()); program_max_cycles(); } return; @@ -317,11 +318,11 @@ void _timer_int_handler(void *unused /* parameter is not used */ */ programmed_full_ticks = 0; - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); - /* _sys_clock_tick_announce() could cause new programming */ + /* z_clock_announce(_sys_idle_elapsed_ticks) could cause new programming */ if (!programmed_full_ticks && _sys_clock_always_on) { - _sys_clock_tick_count = _get_elapsed_clock_time(); + z_tick_set(z_clock_uptime()); program_max_cycles(); } #else @@ -334,7 +335,7 @@ void _timer_int_handler(void *unused /* parameter is not used */ * The timer fired unexpectedly. This is due to one of two cases: * 1. Entering tickless idle straddled a tick. * 2. Leaving tickless idle straddled the final tick. - * Due to the timer reprogramming in _timer_idle_exit(), case #2 + * Due to the timer reprogramming in z_clock_idle_exit(), case #2 * can be handled as a fall-through. * * NOTE: Although the cycle count is supposed to stop decrementing @@ -357,9 +358,10 @@ void _timer_int_handler(void *unused /* parameter is not used */ timer_mode = TIMER_MODE_PERIODIC; } - _sys_clock_final_tick_announce(); + _sys_idle_elapsed_ticks = 1; + z_clock_announce(_sys_idle_elapsed_ticks); #else - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); #endif /*CONFIG_TICKLESS_IDLE*/ #endif #ifdef CONFIG_EXECUTION_BENCHMARKING @@ -409,7 +411,7 @@ void _set_time(u32_t time) programmed_full_ticks = time > max_system_ticks ? max_system_ticks : time; - _sys_clock_tick_count = _get_elapsed_clock_time(); + z_tick_set(z_clock_uptime()); programmed_cycles = programmed_full_ticks * cycles_per_tick; initial_count_register_set(programmed_cycles); @@ -422,11 +424,11 @@ void _enable_sys_clock(void) } } -u64_t _get_elapsed_clock_time(void) +u64_t z_clock_uptime(void) { u64_t elapsed; - elapsed = _sys_clock_tick_count; + elapsed = z_tick_get(); if (programmed_cycles) { elapsed += (programmed_cycles - @@ -540,7 +542,7 @@ void _timer_idle_enter(s32_t ticks /* system ticks */ * * @return N/A */ -void _timer_idle_exit(void) +void z_clock_idle_exit(void) { #ifdef CONFIG_TICKLESS_KERNEL if (!programmed_full_ticks && _sys_clock_always_on) { @@ -584,7 +586,7 @@ void _timer_idle_exit(void) * (The timer ISR reprograms the timer for the next tick.) */ - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); timer_known_to_have_expired = true; @@ -602,7 +604,7 @@ void _timer_idle_exit(void) * * NOTE #1: In the case of a straddled tick, the '_sys_idle_elapsed_ticks' * calculation below may result in either 0 or 1. If 1, then this may - * result in a harmless extra call to _sys_clock_tick_announce(). + * result in a harmless extra call to z_clock_announce(_sys_idle_elapsed_ticks). * * NOTE #2: In the case of a straddled tick, it is assumed that when the * timer is reprogrammed, it will be reprogrammed with a cycle count @@ -615,7 +617,7 @@ void _timer_idle_exit(void) _sys_idle_elapsed_ticks = programmed_full_ticks - remaining_full_ticks; if (_sys_idle_elapsed_ticks > 0) { - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); } if (remaining_full_ticks > 0) { @@ -641,14 +643,14 @@ void _timer_idle_exit(void) * * @return 0 */ -int _sys_clock_driver_init(struct device *device) +int z_clock_driver_init(struct device *device) { ARG_UNUSED(device); /* determine the timer counter value (in timer clock cycles/system tick) */ - cycles_per_tick = sys_clock_hw_cycles_per_tick; + cycles_per_tick = sys_clock_hw_cycles_per_tick(); tickless_idle_init(); @@ -731,7 +733,7 @@ static int sys_clock_resume(struct device *dev) * Implements the driver control management functionality * the *context may include IN data or/and OUT data */ -int sys_clock_device_ctrl(struct device *port, u32_t ctrl_command, +int z_clock_device_ctrl(struct device *port, u32_t ctrl_command, void *context) { if (ctrl_command == DEVICE_PM_SET_POWER_STATE) { @@ -765,7 +767,7 @@ u32_t _timer_cycle_get_32(void) u64_t tsc; /* 64-bit math to avoid overflows */ - tsc = _tsc_read() * (u64_t)sys_clock_hw_cycles_per_sec / + tsc = _tsc_read() * (u64_t)sys_clock_hw_cycles_per_sec() / (u64_t) CONFIG_TSC_CYCLES_PER_SEC; return (u32_t)tsc; #else diff --git a/drivers/timer/native_posix_timer.c b/drivers/timer/native_posix_timer.c index bc70222abaa2..cea8d9e66605 100644 --- a/drivers/timer/native_posix_timer.c +++ b/drivers/timer/native_posix_timer.c @@ -20,8 +20,11 @@ #include "soc.h" #include "posix_trace.h" +#include "legacy_api.h" + static u64_t tick_period; /* System tick period in number of hw cycles */ static s64_t silent_ticks; +static s32_t _sys_idle_elapsed_ticks = 1; /** * Return the current HW cycle counter @@ -48,7 +51,7 @@ void _timer_idle_enter(s32_t sys_ticks) posix_print_warning("native timer: Re-entering idle mode with " "%i ticks pending\n", silent_ticks); - _timer_idle_exit(); + z_clock_idle_exit(); /* LCOV_EXCL_STOP */ } if (sys_ticks < 0) { @@ -72,12 +75,12 @@ void _timer_idle_enter(s32_t sys_ticks) * Note that we do not assume this function is called before the interrupt is * raised (the interrupt can handle it announcing all ticks) */ -void _timer_idle_exit(void) +void z_clock_idle_exit(void) { silent_ticks -= hwtimer_get_pending_silent_ticks(); if (silent_ticks > 0) { _sys_idle_elapsed_ticks = silent_ticks; - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); } silent_ticks = 0; hwtimer_set_silent_ticks(0); @@ -93,17 +96,17 @@ static void sp_timer_isr(void *arg) ARG_UNUSED(arg); _sys_idle_elapsed_ticks = silent_ticks + 1; silent_ticks = 0; - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); } /* * Enable the hw timer, setting its tick period, and setup its interrupt */ -int _sys_clock_driver_init(struct device *device) +int z_clock_driver_init(struct device *device) { ARG_UNUSED(device); - tick_period = 1000000ul / sys_clock_ticks_per_sec; + tick_period = 1000000ul / CONFIG_SYS_CLOCK_TICKS_PER_SEC; hwtimer_enable(tick_period); diff --git a/drivers/timer/nrf_rtc_timer.c b/drivers/timer/nrf_rtc_timer.c index 02d27a01ecb6..572e40d57369 100644 --- a/drivers/timer/nrf_rtc_timer.c +++ b/drivers/timer/nrf_rtc_timer.c @@ -11,6 +11,8 @@ #include #include "nrf_rtc.h" +#include "legacy_api.h" + /* * Convenience defines. */ @@ -38,14 +40,16 @@ /* * rtc_past holds the value of RTC_COUNTER at the time the last sys tick was * announced, in RTC ticks. It is therefore always a multiple of - * sys_clock_hw_cycles_per_tick. + * sys_clock_hw_cycles_per_tick(). */ static u32_t rtc_past; +static s32_t _sys_idle_elapsed_ticks = 1; + #ifdef CONFIG_TICKLESS_IDLE /* * Holds the maximum sys ticks the kernel expects to see in the next - * _sys_clock_tick_announce(). + * z_clock_announce(_sys_idle_elapsed_ticks). */ static u32_t expected_sys_ticks; #endif /* CONFIG_TICKLESS_IDLE */ @@ -89,10 +93,10 @@ static void rtc_compare_set(u32_t rtc_ticks) * * This function is not reentrant. It is called from: * - * * _timer_idle_exit(), which in turn is called with interrupts disabled when + * * z_clock_idle_exit(), which in turn is called with interrupts disabled when * an interrupt fires. * * rtc1_nrf5_isr(), which runs with interrupts enabled but at that time the - * device cannot be idle and hence _timer_idle_exit() cannot be called. + * device cannot be idle and hence z_clock_idle_exit() cannot be called. * * Since this function can be preempted, we need to take some provisions to * announce all expected sys ticks that have passed. @@ -114,12 +118,12 @@ static void rtc_announce_set_next(void) /* If no sys ticks have elapsed, there is no point in incrementing the * counters or announcing it. */ - if (rtc_elapsed >= sys_clock_hw_cycles_per_tick) { + if (rtc_elapsed >= sys_clock_hw_cycles_per_tick()) { #ifdef CONFIG_TICKLESS_IDLE /* Calculate how many sys ticks elapsed since the last sys tick * and notify the kernel if necessary. */ - sys_elapsed = rtc_elapsed / sys_clock_hw_cycles_per_tick; + sys_elapsed = rtc_elapsed / sys_clock_hw_cycles_per_tick(); if (sys_elapsed > expected_sys_ticks) { /* Never announce more sys ticks than the kernel asked @@ -141,15 +145,15 @@ static void rtc_announce_set_next(void) * has passed. */ rtc_past = (rtc_past + - (sys_elapsed * sys_clock_hw_cycles_per_tick) + (sys_elapsed * sys_clock_hw_cycles_per_tick()) ) & RTC_MASK; _sys_idle_elapsed_ticks = sys_elapsed; - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); } /* Set the RTC to the next sys tick */ - rtc_compare_set(rtc_past + sys_clock_hw_cycles_per_tick); + rtc_compare_set(rtc_past + sys_clock_hw_cycles_per_tick()); } #endif @@ -196,8 +200,8 @@ void _timer_idle_enter(s32_t sys_ticks) #else /* Restrict ticks to max supported by RTC without risking overflow*/ if ((sys_ticks < 0) || - (sys_ticks > (RTC_HALF / sys_clock_hw_cycles_per_tick))) { - sys_ticks = RTC_HALF / sys_clock_hw_cycles_per_tick; + (sys_ticks > (RTC_HALF / sys_clock_hw_cycles_per_tick()))) { + sys_ticks = RTC_HALF / sys_clock_hw_cycles_per_tick(); } expected_sys_ticks = sys_ticks; @@ -205,7 +209,7 @@ void _timer_idle_enter(s32_t sys_ticks) /* If ticks is 0, the RTC interrupt handler will be set pending * immediately, meaning that we will not go to sleep. */ - rtc_compare_set(rtc_past + (sys_ticks * sys_clock_hw_cycles_per_tick)); + rtc_compare_set(rtc_past + (sys_ticks * sys_clock_hw_cycles_per_tick())); #endif } @@ -213,20 +217,20 @@ void _timer_idle_enter(s32_t sys_ticks) #ifdef CONFIG_TICKLESS_KERNEL /* * Set RTC Counter Compare (CC) register to max value - * and update the _sys_clock_tick_count. + * and update the z_tick_get(). */ static inline void program_max_cycles(void) { u32_t max_cycles = _get_max_clock_time(); - _sys_clock_tick_count = _get_elapsed_clock_time(); + z_tick_set(z_clock_uptime()); /* Update rtc_past to track rtc timer count*/ - rtc_past = (_sys_clock_tick_count * - sys_clock_hw_cycles_per_tick) & RTC_MASK; + rtc_past = (z_tick_get() * + sys_clock_hw_cycles_per_tick()) & RTC_MASK; /* Programe RTC compare register to generate interrupt*/ rtc_compare_set(rtc_past + - (max_cycles * sys_clock_hw_cycles_per_tick)); + (max_cycles * sys_clock_hw_cycles_per_tick())); } @@ -283,7 +287,7 @@ u32_t _get_elapsed_program_time(void) rtc_elapsed = (RTC_COUNTER - rtc_past_copy) & RTC_MASK; /* Convert number of Machine cycles to SYS_TICKS */ - return (rtc_elapsed / sys_clock_hw_cycles_per_tick); + return (rtc_elapsed / sys_clock_hw_cycles_per_tick()); } @@ -305,16 +309,16 @@ void _set_time(u32_t time) /* Update expected_sys_ticls to time to programe*/ expected_sys_ticks = time; - _sys_clock_tick_count = _get_elapsed_clock_time(); + z_tick_set(z_clock_uptime()); /* Update rtc_past to track rtc timer count*/ - rtc_past = (_sys_clock_tick_count * sys_clock_hw_cycles_per_tick) & RTC_MASK; + rtc_past = (z_tick_get() * sys_clock_hw_cycles_per_tick()) & RTC_MASK; expected_sys_ticks = expected_sys_ticks > _get_max_clock_time() ? _get_max_clock_time() : expected_sys_ticks; /* Programe RTC compare register to generate interrupt*/ rtc_compare_set(rtc_past + - (expected_sys_ticks * sys_clock_hw_cycles_per_tick)); + (expected_sys_ticks * sys_clock_hw_cycles_per_tick())); } @@ -336,8 +340,8 @@ s32_t _get_max_clock_time(void) rtc_away = rtc_away > RTC_HALF ? RTC_HALF : rtc_away; /* Convert RTC Ticks to SYS TICKS*/ - if (rtc_away >= sys_clock_hw_cycles_per_tick) { - sys_away = rtc_away / sys_clock_hw_cycles_per_tick; + if (rtc_away >= sys_clock_hw_cycles_per_tick()) { + sys_away = rtc_away / sys_clock_hw_cycles_per_tick(); } return sys_away; @@ -363,13 +367,13 @@ void _enable_sys_clock(void) * returns : total number of sys ticks passed since device bootup. */ -u64_t _get_elapsed_clock_time(void) +u64_t z_clock_uptime(void) { u64_t elapsed; u32_t rtc_elapsed, rtc_past_copy; - /* Read _sys_clock_tick_count and rtc_past before RTC_COUNTER */ - elapsed = _sys_clock_tick_count; + /* Read z_tick_get() and rtc_past before RTC_COUNTER */ + elapsed = z_tick_get(); rtc_past_copy = rtc_past; /* Make sure that compiler will not reverse access to RTC and @@ -378,9 +382,9 @@ u64_t _get_elapsed_clock_time(void) compiler_barrier(); rtc_elapsed = (RTC_COUNTER - rtc_past_copy) & RTC_MASK; - if (rtc_elapsed >= sys_clock_hw_cycles_per_tick) { + if (rtc_elapsed >= sys_clock_hw_cycles_per_tick()) { /* Update total number of SYS_TICKS passed */ - elapsed += (rtc_elapsed / sys_clock_hw_cycles_per_tick); + elapsed += (rtc_elapsed / sys_clock_hw_cycles_per_tick()); } return elapsed; @@ -414,7 +418,7 @@ u64_t _get_elapsed_clock_time(void) * b) Schedule next sys tick at 400. * */ -void _timer_idle_exit(void) +void z_clock_idle_exit(void) { #ifdef CONFIG_TICKLESS_KERNEL if (!expected_sys_ticks && _sys_clock_always_on) { @@ -431,7 +435,7 @@ void _timer_idle_exit(void) rtc_announce_set_next(); /* After exiting idle, the kernel no longer expects more than one sys - * ticks to have passed when _sys_clock_tick_announce() is called. + * ticks to have passed when z_clock_announce(_sys_idle_elapsed_ticks) is called. */ expected_sys_ticks = 1; #endif @@ -487,9 +491,9 @@ void rtc1_nrf5_isr(void *arg) */ expected_sys_ticks = 0; /* Anounce elapsed of _sys_idle_elapsed_ticks systicks*/ - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); - /* _sys_clock_tick_announce() could cause new programming */ + /* z_clock_announce(_sys_idle_elapsed_ticks) could cause new programming */ if (!expected_sys_ticks && _sys_clock_always_on) { program_max_cycles(); } @@ -505,7 +509,7 @@ void rtc1_nrf5_isr(void *arg) } -int _sys_clock_driver_init(struct device *device) +int z_clock_driver_init(struct device *device) { struct device *clock; @@ -526,7 +530,7 @@ int _sys_clock_driver_init(struct device *device) /* TODO: replace with counter driver to access RTC */ SYS_CLOCK_RTC->PRESCALER = 0; - nrf_rtc_cc_set(SYS_CLOCK_RTC, RTC_CC_IDX, sys_clock_hw_cycles_per_tick); + nrf_rtc_cc_set(SYS_CLOCK_RTC, RTC_CC_IDX, sys_clock_hw_cycles_per_tick()); nrf_rtc_event_enable(SYS_CLOCK_RTC, RTC_EVTENSET_COMPARE0_Msk); nrf_rtc_int_enable(SYS_CLOCK_RTC, RTC_INTENSET_COMPARE0_Msk); @@ -549,17 +553,17 @@ u32_t _timer_cycle_get_32(void) u32_t elapsed_cycles; /* Number of timer cycles announced as ticks so far. */ - ticked_cycles = _sys_clock_tick_count * sys_clock_hw_cycles_per_tick; + ticked_cycles = z_tick_get() * sys_clock_hw_cycles_per_tick(); /* Make sure that compiler will not reverse access to RTC and - * _sys_clock_tick_count. + * z_tick_get(). */ compiler_barrier(); /* Number of timer cycles since last announced tick we know about. * * The value of RTC_COUNTER is not reset on tick, so it will - * compensate potentialy missed update of _sys_clock_tick_count + * compensate potentialy missed update of z_tick_get() * which could have happen between the ticked_cycles calculation * and the code below. */ diff --git a/drivers/timer/pulpino_timer.c b/drivers/timer/pulpino_timer.c index d9dd6d6bbd8b..effa0da2e128 100644 --- a/drivers/timer/pulpino_timer.c +++ b/drivers/timer/pulpino_timer.c @@ -31,16 +31,16 @@ static void pulpino_timer_irq_handler(void *unused) /* Reset counter */ timer->val = 0; - accumulated_cycle_count += sys_clock_hw_cycles_per_tick; + accumulated_cycle_count += sys_clock_hw_cycles_per_tick(); - _sys_clock_tick_announce(); + z_clock_announce(1); } #ifdef CONFIG_TICKLESS_IDLE #error "Tickless idle not yet implemented for pulpino timer" #endif -int _sys_clock_driver_init(struct device *device) +int z_clock_driver_init(struct device *device) { ARG_UNUSED(device); IRQ_CONNECT(PULP_TIMER_A_CMP_IRQ, 0, @@ -50,10 +50,10 @@ int _sys_clock_driver_init(struct device *device) /* * Initialize timer. * Reset counter and set timer to generate interrupt - * every sys_clock_hw_cycles_per_tick + * every sys_clock_hw_cycles_per_tick() */ timer->val = 0; - timer->cmp = sys_clock_hw_cycles_per_tick; + timer->cmp = sys_clock_hw_cycles_per_tick(); timer->ctrl = TIMER_CTRL_EN; return 0; diff --git a/drivers/timer/riscv_machine_timer.c b/drivers/timer/riscv_machine_timer.c index 6b9f5949f61d..167ceb12b2db 100644 --- a/drivers/timer/riscv_machine_timer.c +++ b/drivers/timer/riscv_machine_timer.c @@ -51,9 +51,9 @@ static ALWAYS_INLINE void riscv_machine_rearm_timer(void) /* * Rearm timer to generate an interrupt after - * sys_clock_hw_cycles_per_tick + * sys_clock_hw_cycles_per_tick() */ - rtc += sys_clock_hw_cycles_per_tick; + rtc += sys_clock_hw_cycles_per_tick(); mtimecmp->val_low = (u32_t)(rtc & 0xffffffff); mtimecmp->val_high = (u32_t)((rtc >> 32) & 0xffffffff); @@ -69,7 +69,7 @@ static void riscv_machine_timer_irq_handler(void *unused) read_timer_start_of_tick_handler(); #endif - _sys_clock_tick_announce(); + z_clock_announce(1); /* Rearm timer */ riscv_machine_rearm_timer(); @@ -84,7 +84,7 @@ static void riscv_machine_timer_irq_handler(void *unused) #error "Tickless idle not yet implemented for riscv-machine timer" #endif -int _sys_clock_driver_init(struct device *device) +int z_clock_driver_init(struct device *device) { ARG_UNUSED(device); diff --git a/drivers/timer/sys_clock_init.c b/drivers/timer/sys_clock_init.c index f6139ddf20aa..3dda77782045 100644 --- a/drivers/timer/sys_clock_init.c +++ b/drivers/timer/sys_clock_init.c @@ -16,5 +16,16 @@ #include #include -SYS_DEVICE_DEFINE("sys_clock", _sys_clock_driver_init, sys_clock_device_ctrl, +SYS_DEVICE_DEFINE("sys_clock", z_clock_driver_init, z_clock_device_ctrl, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY); + +int __weak z_clock_driver_init(struct device *device) +{ + return 0; +} + +int __weak z_clock_device_ctrl(struct device *device, + u32_t ctrl_command, void *context) +{ + return 0; +} diff --git a/drivers/timer/xtensa_sys_timer.c b/drivers/timer/xtensa_sys_timer.c index 1ac83daf9ce6..880b81f9cb51 100644 --- a/drivers/timer/xtensa_sys_timer.c +++ b/drivers/timer/xtensa_sys_timer.c @@ -14,6 +14,8 @@ #include "xtensa_rtos.h" +#include "legacy_api.h" + /* * This device driver can be also used with an extenal timer instead of * the internal one that may simply not exist. @@ -96,6 +98,8 @@ #define MIN_TIMER_PROG_DELAY 50 /* TODO: Update this value */ #endif /* CONFIG_XTENSA_INTERNAL_TIMER || (CONFIG_XTENSA_TIMER_IRQ < 0) */ +static s32_t _sys_idle_elapsed_ticks = 1; + #ifdef CONFIG_TICKLESS_IDLE #define TIMER_MODE_PERIODIC 0 /* normal running mode */ #define TIMER_MODE_ONE_SHOT 1 /* emulated, since sysTick has 1 mode */ @@ -103,8 +107,6 @@ #define IDLE_NOT_TICKLESS 0 /* non-tickless idle mode */ #define IDLE_TICKLESS 1 /* tickless idle mode */ -extern s32_t _sys_idle_elapsed_ticks; - static u32_t __noinit cycles_per_tick; static u32_t __noinit max_system_ticks; static u32_t idle_original_ticks; @@ -165,7 +167,7 @@ static inline void _set_max_clock_time(void) unsigned int key; key = irq_lock(); - _sys_clock_tick_count = _get_elapsed_clock_time(); + z_tick_set(z_clock_uptime()); last_timer_value = GET_TIMER_CURRENT_TIME(); irq_unlock(key); SET_TIMER_FIRE_TIME(MAX_TIMER_CYCLES); /* Program timer to max value */ @@ -189,7 +191,7 @@ void _set_time(u32_t time) } key = irq_lock(); /* Update System Level Ticks Time Keeping */ - _sys_clock_tick_count = _get_elapsed_clock_time(); + z_tick_set(z_clock_uptime()); C = GET_TIMER_CURRENT_TIME(); last_timer_value = C; irq_unlock(key); @@ -222,7 +224,7 @@ void _enable_sys_clock(void) } /* Total number of ticks passed since device bootup. */ -u64_t _get_elapsed_clock_time(void) +u64_t z_clock_uptime(void) { u32_t C; unsigned int key; @@ -233,7 +235,7 @@ u64_t _get_elapsed_clock_time(void) C = GET_TIMER_CURRENT_TIME(); elapsed = (last_timer_value <= C) ? (C - last_timer_value) : (MAX_TIMER_CYCLES - last_timer_value) + C; - total = (_sys_clock_tick_count + (elapsed / cycles_per_tick)); + total = (z_tick_get() + (elapsed / cycles_per_tick)); irq_unlock(key); return total; @@ -242,7 +244,7 @@ u64_t _get_elapsed_clock_time(void) static ALWAYS_INLINE void tickless_idle_init(void) { - cycles_per_tick = sys_clock_hw_cycles_per_tick; + cycles_per_tick = sys_clock_hw_cycles_per_tick(); /* calculate the max number of ticks with this 32-bit H/W counter */ max_system_ticks = MAX_TIMER_CYCLES / cycles_per_tick; max_load_value = max_system_ticks * cycles_per_tick; @@ -327,7 +329,7 @@ void _timer_idle_enter(s32_t ticks) * * @return N/A */ -void _timer_idle_exit(void) +void z_clock_idle_exit(void) { #ifdef CONFIG_TICKLESS_KERNEL if (!idle_original_ticks) { @@ -409,7 +411,7 @@ void _timer_idle_exit(void) SET_TIMER_FIRE_TIME(F); } if (_sys_idle_elapsed_ticks) { - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); } /* Exit timer idle mode */ @@ -527,7 +529,7 @@ void _timer_int_handler(void *params) _sys_idle_elapsed_ticks = idle_original_ticks; idle_original_ticks = 0; /* Anounce elapsed of _sys_idle_elapsed_ticks systicks */ - _sys_clock_tick_announce(); + z_clock_announce(_sys_idle_elapsed_ticks); /* Program timer incase it is not Prgrammed */ if (!idle_original_ticks) { @@ -536,7 +538,8 @@ void _timer_int_handler(void *params) } #else /* Announce the tick event to the kernel. */ - _sys_clock_final_tick_announce(); + _sys_idle_elapsed_ticks = 1; + z_clock_announce(_sys_idle_elapsed_ticks); #endif /* CONFIG_TICKLESS_KERNEL */ #ifdef CONFIG_EXECUTION_BENCHMARKING @@ -555,7 +558,7 @@ void _timer_int_handler(void *params) * * @return 0 */ -int _sys_clock_driver_init(struct device *device) +int z_clock_driver_init(struct device *device) { IRQ_CONNECT(TIMER_IRQ, 0, _timer_int_handler, 0, 0); diff --git a/ext/hal/libmetal/libmetal/lib/system/zephyr/time.c b/ext/hal/libmetal/libmetal/lib/system/zephyr/time.c index 3458e841316a..67e040ac2277 100644 --- a/ext/hal/libmetal/libmetal/lib/system/zephyr/time.c +++ b/ext/hal/libmetal/libmetal/lib/system/zephyr/time.c @@ -12,10 +12,8 @@ #include #include -extern volatile u64_t _sys_clock_tick_count; - unsigned long long metal_get_timestamp(void) { - return (unsigned long long)_sys_clock_tick_count; + return (unsigned long long)z_tick_get(); } diff --git a/include/drivers/system_timer.h b/include/drivers/system_timer.h index 9fa129a1817d..0b1d47f21efa 100644 --- a/include/drivers/system_timer.h +++ b/include/drivers/system_timer.h @@ -19,70 +19,94 @@ extern "C" { #endif -#ifdef _ASMLANGUAGE - -GTEXT(_timer_int_handler) - -#else /* _ASMLANGUAGE */ - +#include #include -extern int _sys_clock_driver_init(struct device *device); - -extern void _timer_int_handler(void *arg); - -#ifdef CONFIG_SYSTEM_CLOCK_DISABLE -extern void sys_clock_disable(void); -#endif - -#ifdef CONFIG_TICKLESS_IDLE -extern void _timer_idle_enter(s32_t ticks); -extern void _timer_idle_exit(void); -#else -#define _timer_idle_enter(ticks) do { } while ((0)) -#define _timer_idle_exit() do { } while ((0)) -#endif /* CONFIG_TICKLESS_IDLE */ - -extern void _nano_sys_clock_tick_announce(s32_t ticks); -#ifdef CONFIG_TICKLESS_KERNEL -extern void _set_time(u32_t time); -extern u32_t _get_program_time(void); -extern u32_t _get_remaining_program_time(void); -extern u32_t _get_elapsed_program_time(void); -extern u64_t _get_elapsed_clock_time(void); -#endif +/** + * @brief Initialize system clock driver + * + * The system clock is a Zephyr device created globally. This is its + * initialization callback. It is a weak symbol that will be + * implemented as a noop if undefined in the clock driver. + */ +extern int z_clock_driver_init(struct device *device); -extern int sys_clock_device_ctrl(struct device *device, +/** + * @brief Initialize system clock driver + * + * The system clock is a Zephyr device created globally. This is its + * device control callback, used in a few devices for power + * management. It is a weak symbol that will be implemented as a noop + * if undefined in the clock driver. + */ +extern int z_clock_device_ctrl(struct device *device, u32_t ctrl_command, void *context); -/* - * Currently regarding timers, only loapic timer and arcv2_timer0 implements - * device pm functionality. For other timers, use default handler in case - * the app enables CONFIG_DEVICE_POWER_MANAGEMENT. +/** + * @brief Set system clock timeout + * + * Informs the system clock driver that the next needed call to + * z_clock_announce() will not be until the specified number of ticks + * from the the current time have elapsed. Note that spurious calls + * to z_clock_announce() are allowed (i.e. it's legal to announce + * every tick and implement this function as a noop), the requirement + * is that one tick announcement should occur within one tick after + * the specified expiration. + * + * Note that ticks can also be passed the special value K_FOREVER, + * indicating that no future timer interrupts are expected or required + * and that the system is permitted to enter an indefinite sleep even + * if this could cause rolloever of the internal counter (i.e. the + * system uptime counter is allowed to be wrong, see + * k_enable_sys_clock_always_on(). + * + * Note also that it is conventional for the kernel to pass INT_MAX + * for ticks if it wants to preserve the uptime tick count but doesn't + * have a specific event to await. The intent here is that the driver + * will schedule any needed timeout as far into the future as + * possible. For the specific case of INT_MAX, the next call to + * z_clock_announce() may occur at any point in the future, not just + * at INT_MAX ticks. But the correspondence between the announced + * ticks and real-world time must be correct. + * + * @param ticks Timeout in tick units + * @param idle Hint to the driver that the system is about to enter + * the idle state immediately after setting the timeout */ -#if !defined(CONFIG_LOAPIC_TIMER) && !defined(CONFIG_ARCV2_TIMER) -#define sys_clock_device_ctrl device_pm_control_nop -#endif +extern void z_clock_set_timeout(s32_t ticks, bool idle); + +/** + * @brief Timer idle exit notification + * + * This notifies the timer driver that the system is exiting the idle + * and allows it to do whatever bookeeping is needed to restore timer + * operation and compute elapsed ticks. + * + * @note Legacy timer drivers also use this opportunity to call back + * into z_clock_announce() to notify the kernel of expired ticks. + * This is allowed for compatibility, but not recommended. The kernel + * will figure that out on its own. + */ +extern void z_clock_idle_exit(void); -extern s32_t _sys_idle_elapsed_ticks; -#define _sys_clock_tick_announce() \ - _nano_sys_clock_tick_announce(_sys_idle_elapsed_ticks) +/** + * @brief Announce time progress to the kernel + * + * Informs the kernel that the specified number of ticks have elapsed + * since the last call to z_clock_announce() (or system startup for + * the first call). + * + * @param ticks Elapsed time, in ticks + */ +extern void z_clock_announce(s32_t ticks); /** - * @brief Account for the tick due to the timer interrupt + * @brief System uptime in ticks * - * @return N/A + * Queries the clock driver for the current time elapsed since system + * bootup in ticks. */ -static inline void _sys_clock_final_tick_announce(void) -{ - /* - * Ticks are both announced and immediately processed at interrupt - * level. Thus there is only one tick left to announce (and process). - */ - _sys_idle_elapsed_ticks = 1; - _sys_clock_tick_announce(); -} -#endif /* _ASMLANGUAGE */ +extern u64_t z_clock_uptime(void); #ifdef __cplusplus } diff --git a/include/kernel.h b/include/kernel.h index cd97e486a8d3..5a15b4fbff4b 100644 --- a/include/kernel.h +++ b/include/kernel.h @@ -354,21 +354,6 @@ struct __packed _k_thread_stack_element { }; typedef struct _k_thread_stack_element k_thread_stack_t; -/* timeouts */ - -struct _timeout; -typedef void (*_timeout_func_t)(struct _timeout *t); - -struct _timeout { - sys_dnode_t node; - struct k_thread *thread; - sys_dlist_t *wait_q; - s32_t delta_ticks_from_prev; - _timeout_func_t func; -}; - -extern s32_t _timeout_remaining_get(struct _timeout *timeout); - /** * @typedef k_thread_entry_t * @brief Thread entry point function type. @@ -1227,8 +1212,6 @@ __syscall void *k_thread_custom_data_get(void); * @} */ -#include - /** * @addtogroup clock_apis * @{ @@ -1310,81 +1293,6 @@ __syscall void *k_thread_custom_data_get(void); * @cond INTERNAL_HIDDEN */ -/* kernel clocks */ - -#ifdef CONFIG_SYS_CLOCK_EXISTS - -/* - * If timer frequency is known at compile time, a simple (32-bit) - * tick <-> ms conversion could be used for some combinations of - * hardware timer frequency and tick rate. Otherwise precise - * (64-bit) calculations are used. - */ - -#if !defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) -#if (sys_clock_hw_cycles_per_sec % sys_clock_ticks_per_sec) != 0 - #define _NEED_PRECISE_TICK_MS_CONVERSION -#elif (MSEC_PER_SEC % sys_clock_ticks_per_sec) != 0 - #define _NON_OPTIMIZED_TICKS_PER_SEC -#endif -#endif - -#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) || \ - defined(_NON_OPTIMIZED_TICKS_PER_SEC) - #define _NEED_PRECISE_TICK_MS_CONVERSION -#endif -#endif - -static ALWAYS_INLINE s32_t _ms_to_ticks(s32_t ms) -{ -#ifdef CONFIG_SYS_CLOCK_EXISTS - -#ifdef _NEED_PRECISE_TICK_MS_CONVERSION - /* use 64-bit math to keep precision */ - return (s32_t)ceiling_fraction( - (s64_t)ms * sys_clock_hw_cycles_per_sec, - ((s64_t)MSEC_PER_SEC * sys_clock_hw_cycles_per_sec) / - sys_clock_ticks_per_sec); -#else - /* simple division keeps precision */ - s32_t ms_per_tick = MSEC_PER_SEC / sys_clock_ticks_per_sec; - - return (s32_t)ceiling_fraction(ms, ms_per_tick); -#endif - -#else - __ASSERT(ms == 0, "ms not zero"); - return 0; -#endif -} - -static inline s64_t __ticks_to_ms(s64_t ticks) -{ -#ifdef CONFIG_SYS_CLOCK_EXISTS - -#ifdef _NEED_PRECISE_TICK_MS_CONVERSION - /* use 64-bit math to keep precision */ - return (u64_t)ticks * MSEC_PER_SEC / sys_clock_ticks_per_sec; -#else - /* simple multiplication keeps precision */ - u32_t ms_per_tick = MSEC_PER_SEC / sys_clock_ticks_per_sec; - - return (u64_t)ticks * ms_per_tick; -#endif - -#else - __ASSERT(ticks == 0, "ticks not zero"); - return 0; -#endif -} - -/* added tick needed to account for tick in progress */ -#ifdef CONFIG_TICKLESS_KERNEL -#define _TICK_ALIGN 0 -#else -#define _TICK_ALIGN 1 -#endif - struct k_timer { /* * _timeout structure must be first here if we want to use @@ -1646,26 +1554,16 @@ __syscall s64_t k_uptime_get(void); /** * @brief Enable clock always on in tickless kernel * - * This routine enables keeping the clock running when - * there are no timer events programmed in tickless kernel - * scheduling. This is necessary if the clock is used to track - * passage of time. + * This routine enables keeping the clock running (that is, it always + * keeps an active timer interrupt scheduled) when there are no timer + * events programmed in tickless kernel scheduling. This is necessary + * if the clock is used to track passage of time (e.g. via + * k_uptime_get_32()), otherwise the internal hardware counter may + * roll over between interrupts. * * @retval prev_status Previous status of always on flag */ -static inline int k_enable_sys_clock_always_on(void) -{ -#ifdef CONFIG_TICKLESS_KERNEL - int prev_status = _sys_clock_always_on; - - _sys_clock_always_on = 1; - _enable_sys_clock(); - - return prev_status; -#else - return -ENOTSUP; -#endif -} +int k_enable_sys_clock_always_on(void); /** * @brief Disable clock always on in tickless kernel @@ -1675,12 +1573,7 @@ static inline int k_enable_sys_clock_always_on(void) * scheduling. To save power, this routine should be called * immediately when clock is not used to track time. */ -static inline void k_disable_sys_clock_always_on(void) -{ -#ifdef CONFIG_TICKLESS_KERNEL - _sys_clock_always_on = 0; -#endif -} +void k_disable_sys_clock_always_on(void); /** * @brief Get system uptime (32-bit version). diff --git a/include/kernel_includes.h b/include/kernel_includes.h index c701f1d37903..283a9f19412e 100644 --- a/include/kernel_includes.h +++ b/include/kernel_includes.h @@ -33,5 +33,6 @@ #include #include #include +#include #endif /* ZEPHYR_INCLUDE_KERNEL_INCLUDES_H_ */ diff --git a/include/sys_clock.h b/include/sys_clock.h index 326f1a959b29..abcda46504de 100644 --- a/include/sys_clock.h +++ b/include/sys_clock.h @@ -16,49 +16,50 @@ #ifndef ZEPHYR_INCLUDE_SYS_CLOCK_H_ #define ZEPHYR_INCLUDE_SYS_CLOCK_H_ +#include +#include + #ifdef __cplusplus extern "C" { #endif -#ifndef _ASMLANGUAGE #include #include -#if defined(CONFIG_SYS_CLOCK_EXISTS) && \ - (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC == 0) -#error "SYS_CLOCK_HW_CYCLES_PER_SEC must be non-zero!" -#endif - #ifdef CONFIG_TICKLESS_KERNEL -#define sys_clock_ticks_per_sec \ - (1000000 / (CONFIG_TICKLESS_KERNEL_TIME_UNIT_IN_MICRO_SECS)) extern int _sys_clock_always_on; extern void _enable_sys_clock(void); -#else -#define sys_clock_ticks_per_sec CONFIG_SYS_CLOCK_TICKS_PER_SEC #endif +static inline int sys_clock_hw_cycles_per_sec(void) +{ #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) -extern int sys_clock_hw_cycles_per_sec; + extern int z_clock_hw_cycles_per_sec; + + return z_clock_hw_cycles_per_sec; #else -#define sys_clock_hw_cycles_per_sec CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC + return CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC; #endif +} -/* - * sys_clock_us_per_tick global variable represents a number - * of microseconds in one OS timer tick - * - * Note: This variable is deprecated and will be removed soon! +/* Note that some systems with comparatively slow cycle counters + * experience precision loss when doing math like this. In the + * general case it is not correct that "cycles" are much faster than + * "ticks". */ -__deprecated extern int sys_clock_us_per_tick; +static inline int sys_clock_hw_cycles_per_tick(void) +{ +#ifdef CONFIG_SYS_CLOCK_EXISTS + return sys_clock_hw_cycles_per_sec() / CONFIG_SYS_CLOCK_TICKS_PER_SEC; +#else + return 1; /* Just to avoid a division by zero */ +#endif +} -/* - * sys_clock_hw_cycles_per_tick global variable represents a number - * of platform clock ticks in one OS timer tick. - * sys_clock_hw_cycles_per_tick often represents a value of divider - * of the board clock frequency - */ -extern int sys_clock_hw_cycles_per_tick; +#if defined(CONFIG_SYS_CLOCK_EXISTS) && \ + (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC == 0) +#error "SYS_CLOCK_HW_CYCLES_PER_SEC must be non-zero!" +#endif /* number of nsec per usec */ #define NSEC_PER_USEC 1000 @@ -76,9 +77,84 @@ extern int sys_clock_hw_cycles_per_tick; #define NSEC_PER_SEC ((NSEC_PER_USEC) * (USEC_PER_MSEC) * (MSEC_PER_SEC)) +/* kernel clocks */ + +#ifdef CONFIG_SYS_CLOCK_EXISTS + +/* + * If timer frequency is known at compile time, a simple (32-bit) + * tick <-> ms conversion could be used for some combinations of + * hardware timer frequency and tick rate. Otherwise precise + * (64-bit) calculations are used. + */ + +#if !defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) +#if (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC % CONFIG_SYS_CLOCK_TICKS_PER_SEC) != 0 + #define _NEED_PRECISE_TICK_MS_CONVERSION +#elif (MSEC_PER_SEC % CONFIG_SYS_CLOCK_TICKS_PER_SEC) != 0 + #define _NON_OPTIMIZED_TICKS_PER_SEC +#endif +#endif + +#if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) || \ + defined(_NON_OPTIMIZED_TICKS_PER_SEC) + #define _NEED_PRECISE_TICK_MS_CONVERSION +#endif +#endif + +static ALWAYS_INLINE s32_t _ms_to_ticks(s32_t ms) +{ +#ifdef CONFIG_SYS_CLOCK_EXISTS + +#ifdef _NEED_PRECISE_TICK_MS_CONVERSION + /* use 64-bit math to keep precision */ + return (s32_t)ceiling_fraction( + (s64_t)ms * sys_clock_hw_cycles_per_sec(), + ((s64_t)MSEC_PER_SEC * sys_clock_hw_cycles_per_sec()) / + CONFIG_SYS_CLOCK_TICKS_PER_SEC); +#else + /* simple division keeps precision */ + s32_t ms_per_tick = MSEC_PER_SEC / CONFIG_SYS_CLOCK_TICKS_PER_SEC; + + return (s32_t)ceiling_fraction(ms, ms_per_tick); +#endif + +#else + __ASSERT(ms == 0, "ms not zero"); + return 0; +#endif +} + +static inline s64_t __ticks_to_ms(s64_t ticks) +{ +#ifdef CONFIG_SYS_CLOCK_EXISTS + +#ifdef _NEED_PRECISE_TICK_MS_CONVERSION + /* use 64-bit math to keep precision */ + return (u64_t)ticks * MSEC_PER_SEC / CONFIG_SYS_CLOCK_TICKS_PER_SEC; +#else + /* simple multiplication keeps precision */ + u32_t ms_per_tick = MSEC_PER_SEC / CONFIG_SYS_CLOCK_TICKS_PER_SEC; + + return (u64_t)ticks * ms_per_tick; +#endif + +#else + __ASSERT(ticks == 0, "ticks not zero"); + return 0; +#endif +} + +/* added tick needed to account for tick in progress */ +#ifdef CONFIG_TICKLESS_KERNEL +#define _TICK_ALIGN 0 +#else +#define _TICK_ALIGN 1 +#endif + /* SYS_CLOCK_HW_CYCLES_TO_NS64 converts CPU clock cycles to nanoseconds */ #define SYS_CLOCK_HW_CYCLES_TO_NS64(X) \ - (((u64_t)(X) * NSEC_PER_SEC) / sys_clock_hw_cycles_per_sec) + (((u64_t)(X) * NSEC_PER_SEC) / sys_clock_hw_cycles_per_sec()) /* * SYS_CLOCK_HW_CYCLES_TO_NS_AVG converts CPU clock cycles to nanoseconds @@ -109,21 +185,59 @@ extern int sys_clock_hw_cycles_per_tick; * @} end defgroup clock_apis */ -extern volatile u64_t _sys_clock_tick_count; +/** + * + * @brief Return the lower part of the current system tick count + * + * @return the current system tick count + * + */ +u32_t z_tick_get_32(void); + +/** + * + * @brief Return the current system tick count + * + * @return the current system tick count + * + */ +s64_t z_tick_get(void); + +/** + * + * @brief Sets the current system tick count + * + * @param ticks Ticks since system start + * + */ +void z_tick_set(s64_t ticks); + +/* timeouts */ + +struct _timeout; +typedef void (*_timeout_func_t)(struct _timeout *t); + +struct _timeout { + sys_dnode_t node; + struct k_thread *thread; + sys_dlist_t *wait_q; + s32_t delta_ticks_from_prev; + _timeout_func_t func; +}; + +extern s32_t _timeout_remaining_get(struct _timeout *timeout); /* * Number of ticks for x seconds. NOTE: With MSEC() or USEC(), * since it does an integer division, x must be greater or equal to - * 1000/sys_clock_ticks_per_sec to get a non-zero value. + * 1000/CONFIG_SYS_CLOCK_TICKS_PER_SEC to get a non-zero value. * You may want to raise CONFIG_SYS_CLOCK_TICKS_PER_SEC depending on * your requirements. */ -#define SECONDS(x) ((x) * sys_clock_ticks_per_sec) +#define SECONDS(x) ((x) * CONFIG_SYS_CLOCK_TICKS_PER_SEC) #define MSEC(x) (SECONDS(x) / MSEC_PER_SEC) #define USEC(x) (MSEC(x) / USEC_PER_MSEC) -#endif /* !_ASMLANGUAGE */ - #ifdef __cplusplus } #endif diff --git a/kernel/Kconfig b/kernel/Kconfig index bd1172542daa..f9a081357bd8 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -516,6 +516,11 @@ config SYS_CLOCK_TICKS_PER_SEC can require a non-trivial extra amount of stack space (e.g. around 80 bytes on x86). + Note that when available and enabled, in "tickless" mode + this config variable specifies the minimum available timing + granularity, not necessarily the number or frequency of + interrupts delivered to the kernel. + config SYS_CLOCK_HW_CYCLES_PER_SEC int "System clock's h/w timer frequency" help diff --git a/kernel/Kconfig.power_mgmt b/kernel/Kconfig.power_mgmt index 3f0cf0a75125..dcc53d86ea22 100644 --- a/kernel/Kconfig.power_mgmt +++ b/kernel/Kconfig.power_mgmt @@ -97,18 +97,6 @@ config TICKLESS_KERNEL clock interrupt generation would be stopped at all times. This option requires Tickless Idle option to be enabled. -config TICKLESS_KERNEL_TIME_UNIT_IN_MICRO_SECS - int "Tickless kernel time unit in micro seconds" - default 1000 - depends on TICKLESS_KERNEL - help - This option makes the system clock and scheduling granularity. - The default will be one millisecond. This option also determines - the time unit passed in functions like _sys_soc_suspend. The - value should be determined based what the timer hardware and driver - can support. Specifying too small a time unit than what the overall - system speed can support would cause scheduling errors. - config BUSY_WAIT_USES_ALTERNATE_CLOCK bool "Busy wait uses alternate clock in tickless kernel mode" help diff --git a/kernel/idle.c b/kernel/idle.c index 2afd7ac40158..1174705ad6a6 100644 --- a/kernel/idle.c +++ b/kernel/idle.c @@ -13,6 +13,8 @@ #include #include +extern u64_t z_last_tick_announced; + #if defined(CONFIG_TICKLESS_IDLE) /* * Idle time must be this value or higher for timer to go into tickless idle @@ -72,7 +74,7 @@ static void sys_power_save_idle(s32_t ticks) { #ifdef CONFIG_TICKLESS_KERNEL if (ticks != K_FOREVER) { - ticks -= _get_elapsed_program_time(); + ticks -= (int)(z_clock_uptime() - z_last_tick_announced); if (!ticks) { /* * Timer has expired or about to expire @@ -97,7 +99,7 @@ static void sys_power_save_idle(s32_t ticks) * reprogram timer only if the currently programmed time * duration is smaller than the idle time. */ - _timer_idle_enter(ticks); + z_clock_set_timeout(ticks, true); } set_kernel_idle_time_in_ticks(ticks); @@ -145,7 +147,7 @@ void _sys_power_save_idle_exit(s32_t ticks) if (_must_enter_tickless_idle(ticks)) { /* Resume normal periodic system timer interrupts */ - _timer_idle_exit(); + z_clock_idle_exit(); } } diff --git a/kernel/include/timeout_q.h b/kernel/include/timeout_q.h index e40234e12db8..5d01ce15036b 100644 --- a/kernel/include/timeout_q.h +++ b/kernel/include/timeout_q.h @@ -21,6 +21,8 @@ extern "C" { #endif +extern u64_t z_last_tick_announced; + /* initialize the timeouts part of k_thread when enabled in the kernel */ static inline void _init_timeout(struct _timeout *t, _timeout_func_t func) @@ -172,6 +174,16 @@ static inline void _dump_timeout_q(void) #endif } +/* find the closest deadline in the timeout queue */ + +static inline s32_t _get_next_timeout_expiry(void) +{ + struct _timeout *t = (struct _timeout *) + sys_dlist_peek_head(&_timeout_q); + + return t ? t->delta_ticks_from_prev : K_FOREVER; +} + /* * Add timeout to timeout queue. Record waiting thread and wait queue if any. * @@ -229,11 +241,9 @@ static inline void _add_timeout(struct k_thread *thread, * This is like adding this timout back in history. */ u32_t adjusted_timeout; - u32_t program_time = _get_program_time(); - if (program_time > 0) { - *delta += _get_elapsed_program_time(); - } + *delta += (int)(z_clock_uptime() - z_last_tick_announced); + adjusted_timeout = *delta; #endif SYS_DLIST_FOR_EACH_CONTAINER(&_timeout_q, in_q, node) { @@ -255,8 +265,8 @@ static inline void _add_timeout(struct k_thread *thread, _dump_timeout_q(); #ifdef CONFIG_TICKLESS_KERNEL - if (!program_time || (adjusted_timeout < program_time)) { - _set_time(adjusted_timeout); + if (adjusted_timeout < _get_next_timeout_expiry()) { + z_clock_set_timeout(adjusted_timeout, false); } #endif } @@ -276,16 +286,6 @@ static inline void _add_thread_timeout(struct k_thread *thread, _add_timeout(thread, &thread->base.timeout, wait_q, timeout_in_ticks); } -/* find the closest deadline in the timeout queue */ - -static inline s32_t _get_next_timeout_expiry(void) -{ - struct _timeout *t = (struct _timeout *) - sys_dlist_peek_head(&_timeout_q); - - return t ? t->delta_ticks_from_prev : K_FOREVER; -} - #ifdef __cplusplus } #endif diff --git a/kernel/mempool.c b/kernel/mempool.c index 6b6a140bf621..7e3341c02fc0 100644 --- a/kernel/mempool.c +++ b/kernel/mempool.c @@ -16,8 +16,6 @@ extern struct k_mem_pool _k_mem_pool_list_start[]; extern struct k_mem_pool _k_mem_pool_list_end[]; -s64_t _tick_get(void); - static struct k_mem_pool *get_pool(int id) { return &_k_mem_pool_list_start[id]; @@ -57,7 +55,7 @@ int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block, __ASSERT(!(_is_in_isr() && timeout != K_NO_WAIT), ""); if (timeout > 0) { - end = _tick_get() + _ms_to_ticks(timeout); + end = z_tick_get() + _ms_to_ticks(timeout); } while (true) { @@ -95,7 +93,7 @@ int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block, (void)_pend_current_thread(irq_lock(), &p->wait_q, timeout); if (timeout != K_FOREVER) { - timeout = end - _tick_get(); + timeout = end - z_tick_get(); if (timeout < 0) { break; diff --git a/kernel/sched.c b/kernel/sched.c index 37d4cfc3fe93..a7724e8c02e0 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -634,7 +634,7 @@ int _is_thread_time_slicing(struct k_thread *thread) void z_reset_timeslice(void) { if (_is_thread_time_slicing(_get_next_ready_thread())) { - _set_time(_time_slice_duration); + z_clock_set_timeout(_time_slice_duration, false); } } #endif @@ -648,17 +648,11 @@ void _update_time_slice_before_swap(void) return; } - u32_t remaining = _get_remaining_program_time(); - - if (!remaining || (_time_slice_duration < remaining)) { - _set_time(_time_slice_duration); - } else { - /* Account previous elapsed time and reprogram - * timer with remaining time - */ - _set_time(remaining); - } + int elapsed = (int)(z_clock_uptime() - z_last_tick_announced); + int next_timeout = _get_next_timeout_expiry() - elapsed; + int t = min(_time_slice_duration, next_timeout); + z_clock_set_timeout(t, false); #endif /* Restart time slice count at new thread switch */ _time_slice_elapsed = 0; diff --git a/kernel/sys_clock.c b/kernel/sys_clock.c index ef46b2915e0c..400ee657dd8f 100644 --- a/kernel/sys_clock.c +++ b/kernel/sys_clock.c @@ -21,25 +21,22 @@ #endif #ifdef CONFIG_SYS_CLOCK_EXISTS -int sys_clock_us_per_tick = 1000000 / sys_clock_ticks_per_sec; -int sys_clock_hw_cycles_per_tick = - CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC / sys_clock_ticks_per_sec; #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) -int sys_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC; +int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC; #endif #else -/* don't initialize to avoid division-by-zero error */ -int sys_clock_us_per_tick; -int sys_clock_hw_cycles_per_tick; #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME) -int sys_clock_hw_cycles_per_sec; +int z_clock_hw_cycles_per_sec; #endif #endif -/* updated by timer driver for tickless, stays at 1 for non-tickless */ -s32_t _sys_idle_elapsed_ticks = 1; +/* Note that this value is 64 bits, and thus non-atomic on almost all + * Zephyr archtictures. And of course it's routinely updated inside + * timer interrupts. Access to it must be locked. + */ +static volatile u64_t tick_count; -volatile u64_t _sys_clock_tick_count; +u64_t z_last_tick_announced; #ifdef CONFIG_TICKLESS_KERNEL /* @@ -48,26 +45,19 @@ volatile u64_t _sys_clock_tick_count; * system clock to track passage of time without interruption. * To save power, this should be turned on only when required. */ -int _sys_clock_always_on; +int _sys_clock_always_on = 1; static u32_t next_ts; #endif -/** - * - * @brief Return the lower part of the current system tick count - * - * @return the current system tick count - * - */ -u32_t _tick_get_32(void) + +u32_t z_tick_get_32(void) { #ifdef CONFIG_TICKLESS_KERNEL - return (u32_t)_get_elapsed_clock_time(); + return (u32_t)z_clock_uptime(); #else - return (u32_t)_sys_clock_tick_count; + return (u32_t)tick_count; #endif } -FUNC_ALIAS(_tick_get_32, sys_tick_get_32, u32_t); u32_t _impl_k_uptime_get_32(void) { @@ -75,7 +65,7 @@ u32_t _impl_k_uptime_get_32(void) __ASSERT(_sys_clock_always_on, "Call k_enable_sys_clock_always_on to use clock API"); #endif - return __ticks_to_ms(_tick_get_32()); + return __ticks_to_ms(z_tick_get_32()); } #ifdef CONFIG_USERSPACE @@ -88,33 +78,26 @@ Z_SYSCALL_HANDLER(k_uptime_get_32) } #endif -/** - * - * @brief Return the current system tick count - * - * @return the current system tick count - * - */ -s64_t _tick_get(void) +s64_t z_tick_get(void) { - s64_t tmp_sys_clock_tick_count; - /* - * Lock the interrupts when reading _sys_clock_tick_count 64-bit - * variable. Some architectures (x86) do not handle 64-bit atomically, - * so we have to lock the timer interrupt that causes change of - * _sys_clock_tick_count - */ - unsigned int imask = irq_lock(); - #ifdef CONFIG_TICKLESS_KERNEL - tmp_sys_clock_tick_count = _get_elapsed_clock_time(); + return z_clock_uptime(); #else - tmp_sys_clock_tick_count = _sys_clock_tick_count; + unsigned int key = irq_lock(); + s64_t ret = tick_count; + + irq_unlock(key); + return ret; #endif - irq_unlock(imask); - return tmp_sys_clock_tick_count; } -FUNC_ALIAS(_tick_get, sys_tick_get, s64_t); + +void z_tick_set(s64_t val) +{ + unsigned int key = irq_lock(); + + tick_count = val; + irq_unlock(key); +} s64_t _impl_k_uptime_get(void) { @@ -122,7 +105,7 @@ s64_t _impl_k_uptime_get(void) __ASSERT(_sys_clock_always_on, "Call k_enable_sys_clock_always_on to use clock API"); #endif - return __ticks_to_ms(_tick_get()); + return __ticks_to_ms(z_tick_get()); } #ifdef CONFIG_USERSPACE @@ -300,7 +283,7 @@ static void handle_time_slicing(s32_t ticks) /** * - * @brief Announce a tick to the kernel + * @brief Announce ticks to the kernel * * This function is only to be called by the system clock timer driver when a * tick is to be announced to the kernel. It takes care of dequeuing the @@ -308,8 +291,10 @@ static void handle_time_slicing(s32_t ticks) * * @return N/A */ -void _nano_sys_clock_tick_announce(s32_t ticks) +void z_clock_announce(s32_t ticks) { + z_last_tick_announced += ticks; + #ifdef CONFIG_SMP /* sys_clock timekeeping happens only on the main CPU */ if (_arch_curr_cpu()->id) { @@ -322,9 +307,8 @@ void _nano_sys_clock_tick_announce(s32_t ticks) K_DEBUG("ticks: %d\n", ticks); - /* 64-bit value, ensure atomic access with irq lock */ key = irq_lock(); - _sys_clock_tick_count += ticks; + tick_count += ticks; irq_unlock(key); #endif handle_timeouts(ticks); @@ -339,11 +323,31 @@ void _nano_sys_clock_tick_announce(s32_t ticks) next_to = !next_to || (next_ts && next_to) > next_ts ? next_ts : next_to; - u32_t remaining = _get_remaining_program_time(); - - if ((!remaining && next_to) || (next_to < remaining)) { + if (next_to) { /* Clears current program if next_to = 0 and remaining > 0 */ - _set_time(next_to); + int dt = next_to ? next_to : (_sys_clock_always_on ? INT_MAX : K_FOREVER); + z_clock_set_timeout(dt, false); } #endif } + +int k_enable_sys_clock_always_on(void) +{ +#ifdef CONFIG_TICKLESS_KERNEL + int prev_status = _sys_clock_always_on; + + _sys_clock_always_on = 1; + _enable_sys_clock(); + + return prev_status; +#else + return -ENOTSUP; +#endif +} + +void k_disable_sys_clock_always_on(void) +{ +#ifdef CONFIG_TICKLESS_KERNEL + _sys_clock_always_on = 0; +#endif +} diff --git a/kernel/thread.c b/kernel/thread.c index 7c6a34ee74ab..1996b4d3d5d0 100644 --- a/kernel/thread.c +++ b/kernel/thread.c @@ -104,7 +104,7 @@ int saved_always_on = k_enable_sys_clock_always_on(); /* use 64-bit math to prevent overflow when multiplying */ u32_t cycles_to_wait = (u32_t)( (u64_t)usec_to_wait * - (u64_t)sys_clock_hw_cycles_per_sec / + (u64_t)sys_clock_hw_cycles_per_sec() / (u64_t)USEC_PER_SEC ); u32_t start_cycles = k_cycle_get_32(); diff --git a/misc/reboot.c b/misc/reboot.c index 864aa0dd5ba7..5e72237b61bf 100644 --- a/misc/reboot.c +++ b/misc/reboot.c @@ -16,6 +16,7 @@ #include extern void sys_arch_reboot(int type); +extern void sys_clock_disable(void); void sys_reboot(int type) { diff --git a/samples/net/zperf/src/zperf_internal.h b/samples/net/zperf/src/zperf_internal.h index 93d34e842849..4abeef2e62c1 100644 --- a/samples/net/zperf/src/zperf_internal.h +++ b/samples/net/zperf/src/zperf_internal.h @@ -31,7 +31,7 @@ #define HW_CYCLES_TO_USEC(__hw_cycle__) \ ( \ ((u64_t)(__hw_cycle__) * (u64_t)USEC_PER_SEC) / \ - ((u64_t)sys_clock_hw_cycles_per_sec) \ + ((u64_t)sys_clock_hw_cycles_per_sec()) \ ) #define HW_CYCLES_TO_SEC(__hw_cycle__) \ @@ -42,7 +42,7 @@ #define USEC_TO_HW_CYCLES(__usec__) \ ( \ - ((u64_t)(__usec__) * (u64_t)sys_clock_hw_cycles_per_sec) / \ + ((u64_t)(__usec__) * (u64_t)sys_clock_hw_cycles_per_sec()) / \ ((u64_t)USEC_PER_SEC) \ ) diff --git a/samples/philosophers/src/main.c b/samples/philosophers/src/main.c index b9d77e120943..bd3e03d01eb9 100644 --- a/samples/philosophers/src/main.c +++ b/samples/philosophers/src/main.c @@ -141,9 +141,7 @@ static s32_t get_random_delay(int id, int period_in_ms) * and the current uptime to create some pseudo-randomness. It produces * a value between 0 and 31. */ - k_enable_sys_clock_always_on(); s32_t delay = (k_uptime_get_32()/100 * (id + 1)) & 0x1f; - k_disable_sys_clock_always_on(); /* add 1 to not generate a delay of 0 */ s32_t ms = (delay + 1) * period_in_ms; diff --git a/subsys/net/lib/sntp/sntp.c b/subsys/net/lib/sntp/sntp.c index cd44d1a1c84e..3614a29711a4 100644 --- a/subsys/net/lib/sntp/sntp.c +++ b/subsys/net/lib/sntp/sntp.c @@ -152,9 +152,7 @@ static u32_t get_uptime_in_sec(void) { u64_t time; - k_enable_sys_clock_always_on(); time = k_uptime_get_32(); - k_disable_sys_clock_always_on(); return time / MSEC_PER_SEC; } diff --git a/subsys/power/policy/policy_residency.c b/subsys/power/policy/policy_residency.c index 5bcdfc45e663..47a78043c7b3 100644 --- a/subsys/power/policy/policy_residency.c +++ b/subsys/power/policy/policy_residency.c @@ -13,11 +13,7 @@ #include LOG_MODULE_DECLARE(power); -#ifdef CONFIG_TICKLESS_KERNEL -#define SECS_TO_TICKS CONFIG_TICKLESS_KERNEL_TIME_UNIT_IN_MICRO_SECS -#else #define SECS_TO_TICKS CONFIG_SYS_CLOCK_TICKS_PER_SEC -#endif #if !(defined(CONFIG_SYS_POWER_STATE_CPU_LPS_SUPPORTED) || \ defined(CONFIG_SYS_POWER_STATE_CPU_LPS_1_SUPPORTED) || \ diff --git a/tests/benchmarks/app_kernel/src/master.h b/tests/benchmarks/app_kernel/src/master.h index 53148373e23e..81d86c21dabe 100644 --- a/tests/benchmarks/app_kernel/src/master.h +++ b/tests/benchmarks/app_kernel/src/master.h @@ -35,10 +35,10 @@ /* length of the output line */ #define SLINE_LEN 256 -#define SLEEP_TIME ((sys_clock_ticks_per_sec / 4) > 0 ? \ - sys_clock_ticks_per_sec / 4 : 1) -#define WAIT_TIME ((sys_clock_ticks_per_sec / 10) > 0 ? \ - sys_clock_ticks_per_sec / 10 : 1) +#define SLEEP_TIME ((CONFIG_SYS_CLOCK_TICKS_PER_SEC / 4) > 0 ? \ + CONFIG_SYS_CLOCK_TICKS_PER_SEC / 4 : 1) +#define WAIT_TIME ((CONFIG_SYS_CLOCK_TICKS_PER_SEC / 10) > 0 ? \ + CONFIG_SYS_CLOCK_TICKS_PER_SEC / 10 : 1) #define NR_OF_NOP_RUNS 10000 #define NR_OF_FIFO_RUNS 500 #define NR_OF_SEMA_RUNS 500 @@ -48,7 +48,7 @@ #define NR_OF_EVENT_RUNS 1000 #define NR_OF_MBOX_RUNS 128 #define NR_OF_PIPE_RUNS 256 -/* #define SEMA_WAIT_TIME (5 * sys_clock_ticks_per_sec) */ +/* #define SEMA_WAIT_TIME (5 * CONFIG_SYS_CLOCK_TICKS_PER_SEC) */ #define SEMA_WAIT_TIME (5000) /* global data */ extern char msg[MAX_MSG]; diff --git a/tests/benchmarks/footprint/README.txt b/tests/benchmarks/footprint/README.txt index 1ef71bce8a62..90f07f9e60e5 100644 --- a/tests/benchmarks/footprint/README.txt +++ b/tests/benchmarks/footprint/README.txt @@ -79,9 +79,9 @@ minimal ------- This configuration does NOT produce any output. To observe its operation, invoke it using gdb and observe that: -- the kernel's timer ISR & main thread increment "_sys_clock_tick_count" on a +- the kernel's timer ISR & main thread increment "z_tick_get()" on a regular basis -- k_cpu_idle() is invoked by the idle task each time _sys_clock_tick_count +- k_cpu_idle() is invoked by the idle task each time z_tick_get() is incremented regular diff --git a/tests/benchmarks/sys_kernel/src/syskernel.c b/tests/benchmarks/sys_kernel/src/syskernel.c index 344a8026be05..a2dd95bef919 100644 --- a/tests/benchmarks/sys_kernel/src/syskernel.c +++ b/tests/benchmarks/sys_kernel/src/syskernel.c @@ -147,11 +147,11 @@ void main(void) /* The following code is needed to make the benchmakring run on * slower platforms. */ - u64_t time_stamp = _sys_clock_tick_count; + u64_t time_stamp = z_tick_get(); k_sleep(1); - u64_t time_stamp_2 = _sys_clock_tick_count; + u64_t time_stamp_2 = z_tick_get(); if (time_stamp_2 - time_stamp > 1) { number_of_loops = 10; diff --git a/tests/kernel/common/src/clock.c b/tests/kernel/common/src/clock.c index 9fba11f11afc..cf189a6479fc 100644 --- a/tests/kernel/common/src/clock.c +++ b/tests/kernel/common/src/clock.c @@ -101,7 +101,7 @@ void test_clock_cycle(void) c32 = k_cycle_get_32(); /*break if cycle counter wrap around*/ while (k_cycle_get_32() > c32 && - k_cycle_get_32() < (c32 + sys_clock_hw_cycles_per_tick)) + k_cycle_get_32() < (c32 + sys_clock_hw_cycles_per_tick())) #if defined(CONFIG_ARCH_POSIX) posix_halt_cpu(); #else @@ -124,7 +124,7 @@ void test_clock_cycle(void) if (c1 > c0) { /* delta cycle should be greater than 1 milli-second*/ zassert_true((c1 - c0) > - (sys_clock_hw_cycles_per_sec / MSEC_PER_SEC), NULL); + (sys_clock_hw_cycles_per_sec() / MSEC_PER_SEC), NULL); /* delta NS should be greater than 1 milli-second */ zassert_true(SYS_CLOCK_HW_CYCLES_TO_NS(c1 - c0) > (NSEC_PER_SEC / MSEC_PER_SEC), NULL); diff --git a/tests/kernel/context/src/main.c b/tests/kernel/context/src/main.c index 6c0f540287fc..08b61e4d0d0b 100644 --- a/tests/kernel/context/src/main.c +++ b/tests/kernel/context/src/main.c @@ -24,6 +24,7 @@ #include #include #include +#include /* * Include board.h from platform to get IRQ number. @@ -95,9 +96,6 @@ -extern u32_t _tick_get_32(void); -extern s64_t _tick_get(void); - typedef struct { int command; /* command to process */ int error; /* error value (if any) */ @@ -310,15 +308,15 @@ static void _test_kernel_interrupts(disable_int_func disable_int, int imask; /* Align to a "tick boundary" */ - tick = _tick_get_32(); - while (_tick_get_32() == tick) { + tick = z_tick_get_32(); + while (z_tick_get_32() == tick) { #if defined(CONFIG_ARCH_POSIX) k_busy_wait(1000); #endif } tick++; - while (_tick_get_32() == tick) { + while (z_tick_get_32() == tick) { #if defined(CONFIG_ARCH_POSIX) k_busy_wait(1000); #endif @@ -335,15 +333,15 @@ static void _test_kernel_interrupts(disable_int_func disable_int, count <<= 4; imask = disable_int(irq); - tick = _tick_get_32(); + tick = z_tick_get_32(); for (i = 0; i < count; i++) { - _tick_get_32(); + z_tick_get_32(); #if defined(CONFIG_ARCH_POSIX) k_busy_wait(1000); #endif } - tick2 = _tick_get_32(); + tick2 = z_tick_get_32(); /* * Re-enable interrupts before returning (for both success and failure @@ -356,13 +354,13 @@ static void _test_kernel_interrupts(disable_int_func disable_int, /* Now repeat with interrupts unlocked. */ for (i = 0; i < count; i++) { - _tick_get_32(); + z_tick_get_32(); #if defined(CONFIG_ARCH_POSIX) k_busy_wait(1000); #endif } - tick2 = _tick_get_32(); + tick2 = z_tick_get_32(); zassert_not_equal(tick, tick2, "tick didn't advance as expected"); } diff --git a/tests/kernel/early_sleep/src/main.c b/tests/kernel/early_sleep/src/main.c index 590547d705a7..ba4eeeb9a366 100644 --- a/tests/kernel/early_sleep/src/main.c +++ b/tests/kernel/early_sleep/src/main.c @@ -61,7 +61,7 @@ static int ticks_to_sleep(int ticks) k_sleep(__ticks_to_ms(ticks)); stop_time = k_cycle_get_32(); - return (stop_time - start_time) / sys_clock_hw_cycles_per_tick; + return (stop_time - start_time) / sys_clock_hw_cycles_per_tick(); } diff --git a/tests/kernel/fp_sharing/src/main.c b/tests/kernel/fp_sharing/src/main.c index a9d5b2fce03a..aaf00590a76a 100644 --- a/tests/kernel/fp_sharing/src/main.c +++ b/tests/kernel/fp_sharing/src/main.c @@ -97,7 +97,6 @@ int fpu_sharing_error; static volatile unsigned int load_store_low_count; static volatile unsigned int load_store_high_count; -extern u32_t _tick_get_32(void); extern void calculate_pi_low(void); extern void calculate_pi_high(void); @@ -169,11 +168,11 @@ void load_store_low(void) * thread an opportunity to run when the low priority thread is * using the floating point registers. * - * IMPORTANT: This logic requires that sys_tick_get_32() not + * IMPORTANT: This logic requires that z_tick_get_32() not * perform any floating point operations! */ - while ((_tick_get_32() % 5) != 0) { + while ((z_tick_get_32() % 5) != 0) { /* * Use a volatile variable to prevent compiler * optimizing out the spin loop. diff --git a/tests/kernel/mem_pool/mem_pool/src/main.c b/tests/kernel/mem_pool/mem_pool/src/main.c index 3741adfdc6fd..4c1f47cf370a 100644 --- a/tests/kernel/mem_pool/mem_pool/src/main.c +++ b/tests/kernel/mem_pool/mem_pool/src/main.c @@ -22,8 +22,8 @@ #include #include -#define ONE_SECOND (sys_clock_ticks_per_sec) -#define TENTH_SECOND (sys_clock_ticks_per_sec / 10) +#define ONE_SECOND (CONFIG_SYS_CLOCK_TICKS_PER_SEC) +#define TENTH_SECOND (CONFIG_SYS_CLOCK_TICKS_PER_SEC / 10) #define NUM_BLOCKS 64 diff --git a/tests/kernel/mem_protect/app_memory/src/main.c b/tests/kernel/mem_protect/app_memory/src/main.c index a289c1be8324..61c4d77f97b2 100644 --- a/tests/kernel/mem_protect/app_memory/src/main.c +++ b/tests/kernel/mem_protect/app_memory/src/main.c @@ -8,6 +8,7 @@ #include #include #include +#include /** * @brief Memory protection tests @@ -28,7 +29,7 @@ struct test_struct __kernel_bss kernel_bss; struct test_struct __kernel_noinit kernel_noinit; /* Real kernel variable, check it is in the right place */ -extern volatile u64_t _sys_clock_tick_count; +extern struct _kernel _kernel; struct test_struct app_data = {3, 4, NULL}; struct test_struct app_bss; @@ -73,7 +74,7 @@ void test_app_memory(void) zassert_true(kernel_loc(&kernel_bss), "not in kernel memory"); zassert_true(kernel_loc(&kernel_noinit), "not in kernel memory"); - zassert_true(kernel_loc((void *)&_sys_clock_tick_count), + zassert_true(kernel_loc((void *)&_kernel), "not in kernel memory"); } diff --git a/tests/kernel/timer/timer_monotonic/src/main.c b/tests/kernel/timer/timer_monotonic/src/main.c index f49a78087b73..895fc7f805fc 100644 --- a/tests/kernel/timer/timer_monotonic/src/main.c +++ b/tests/kernel/timer/timer_monotonic/src/main.c @@ -19,10 +19,10 @@ int test_frequency(void) end = k_cycle_get_32(); delta = end - start; - pct = (u64_t)delta * 100 / sys_clock_hw_cycles_per_sec; + pct = (u64_t)delta * 100 / sys_clock_hw_cycles_per_sec(); printk("delta: %u expected: %u %u%%\n", delta, - sys_clock_hw_cycles_per_sec, pct); + sys_clock_hw_cycles_per_sec(), pct); /* Heuristic: if we're more than 10% off, throw an error */ if (pct < 90 || pct > 110) { @@ -45,7 +45,7 @@ int test_frequency(void) * * @ingroup kernel_timer_tests * - * @see k_cycle_get_32(), sys_clock_hw_cycles_per_sec + * @see k_cycle_get_32(), sys_clock_hw_cycles_per_sec() */ void test_timer(void) { @@ -54,10 +54,10 @@ void test_timer(void) errors = 0; - TC_PRINT("sys_clock_hw_cycles_per_tick = %d\n", - sys_clock_hw_cycles_per_tick); - TC_PRINT("sys_clock_hw_cycles_per_sec = %d\n", - sys_clock_hw_cycles_per_sec); + TC_PRINT("sys_clock_hw_cycles_per_tick() = %d\n", + sys_clock_hw_cycles_per_tick()); + TC_PRINT("sys_clock_hw_cycles_per_sec() = %d\n", + sys_clock_hw_cycles_per_sec()); TC_START("test monotonic timer"); diff --git a/tests/ztest/include/ztest.h b/tests/ztest/include/ztest.h index 24254e112249..ff7594294e6a 100644 --- a/tests/ztest/include/ztest.h +++ b/tests/ztest/include/ztest.h @@ -39,6 +39,8 @@ extern "C" { #define CONFIG_NUM_COOP_PRIORITIES 16 #define CONFIG_COOP_ENABLED 1 #define CONFIG_PREEMPT_ENABLED 1 +#define CONFIG_SYS_CLOCK_TICKS_PER_SEC 100 +#define CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC 10000000 /* FIXME: Properly integrate with Zephyr's arch specific code */ #define CONFIG_X86 1 #define PRINT printf