5
5
* SPDX-License-Identifier: Apache-2.0
6
6
*/
7
7
8
- #include "posix_clock.h"
9
-
10
8
#include <zephyr/kernel.h>
11
9
#include <errno.h>
12
10
#include <zephyr/posix/time.h>
13
11
#include <zephyr/posix/sys/time.h>
14
12
#include <zephyr/posix/unistd.h>
15
- #include <zephyr/internal/syscall_handler.h>
16
- #include <zephyr/spinlock.h>
13
+ #include <zephyr/sys/realtime.h>
14
+
15
+ static bool __posix_clock_validate_timespec (const struct timespec * ts )
16
+ {
17
+ return ts -> tv_sec >= 0 && ts -> tv_nsec >= 0 && ts -> tv_nsec < NSEC_PER_SEC ;
18
+ }
19
+
20
+ static void __posix_clock_k_ticks_to_timespec (struct timespec * ts , int64_t ticks )
21
+ {
22
+ uint64_t elapsed_secs = ticks / CONFIG_SYS_CLOCK_TICKS_PER_SEC ;
23
+ uint64_t nremainder = ticks - elapsed_secs * CONFIG_SYS_CLOCK_TICKS_PER_SEC ;
24
+
25
+ ts -> tv_sec = (time_t ) elapsed_secs ;
26
+ /* For ns 32 bit conversion can be used since its smaller than 1sec. */
27
+ ts -> tv_nsec = (int32_t ) k_ticks_to_ns_floor32 (nremainder );
28
+ }
29
+
30
+ static void __posix_clock_get_monotonic (struct timespec * ts )
31
+ {
32
+ __posix_clock_k_ticks_to_timespec (ts , k_uptime_ticks ());
33
+ }
34
+
35
+ static void __posix_clock_msec_to_timespec (struct timespec * ts , const int64_t * ms )
36
+ {
37
+ ts -> tv_sec = * ms / MSEC_PER_SEC ;
38
+ ts -> tv_nsec = (* ms % MSEC_PER_SEC ) * NSEC_PER_MSEC ;
39
+ }
40
+
41
+ static uint64_t __posix_clock_timespec_to_usec (const struct timespec * ts )
42
+ {
43
+ uint64_t usec ;
44
+
45
+ usec = ts -> tv_sec ;
46
+ usec *= USEC_PER_SEC ;
47
+ usec += DIV_ROUND_UP (ts -> tv_nsec , NSEC_PER_USEC );
48
+ return usec ;
49
+ }
50
+
51
+ static uint64_t __posix_clock_timespec_to_msec (const struct timespec * ts )
52
+ {
53
+ uint64_t msec ;
54
+
55
+ msec = ts -> tv_sec ;
56
+ msec *= MSEC_PER_SEC ;
57
+ msec += DIV_ROUND_UP (ts -> tv_nsec , NSEC_PER_MSEC );
58
+ return msec ;
59
+ }
60
+
61
+ /* Check if a_ts is less than b_ts (a_ts < b_ts) */
62
+ static bool __posix_clock_timespec_less_than (const struct timespec * a_ts ,
63
+ const struct timespec * b_ts )
64
+ {
65
+ return (a_ts -> tv_sec < b_ts -> tv_sec ) ||
66
+ (a_ts -> tv_sec == b_ts -> tv_sec && a_ts -> tv_nsec < b_ts -> tv_nsec );
67
+ }
17
68
18
69
/*
19
- * `k_uptime_get` returns a timestamp based on an always increasing
20
- * value from the system start. To support the `CLOCK_REALTIME`
21
- * clock, this `rt_clock_base` records the time that the system was
22
- * started. This can either be set via 'clock_settime', or could be
23
- * set from a real time clock, if such hardware is present.
70
+ * Subtract b_ts from a_ts placing result in res_ts (ret_ts = a_ts - b_ts)
71
+ * Presumes a_ts >= b_ts
24
72
*/
25
- static struct timespec rt_clock_base ;
26
- static struct k_spinlock rt_clock_base_lock ;
73
+ static void __posix_clock_timespec_subtract (struct timespec * res_ts ,
74
+ const struct timespec * a_ts ,
75
+ const struct timespec * b_ts )
76
+ {
77
+ res_ts -> tv_sec = a_ts -> tv_sec - b_ts -> tv_sec ;
27
78
28
- /**
29
- * @brief Get clock time specified by clock_id.
30
- *
31
- * See IEEE 1003.1
32
- */
33
- int z_impl___posix_clock_get_base (clockid_t clock_id , struct timespec * base )
79
+ if (b_ts -> tv_nsec <= a_ts -> tv_nsec ) {
80
+ res_ts -> tv_nsec = a_ts -> tv_nsec - b_ts -> tv_nsec ;
81
+ } else {
82
+ res_ts -> tv_sec -- ;
83
+ res_ts -> tv_nsec = a_ts -> tv_nsec + NSEC_PER_SEC - b_ts -> tv_nsec ;
84
+ }
85
+ }
86
+
87
+ /* Add b_ts to a_ts placing result in res_ts (ret_ts = a_ts + b_ts) */
88
+ static void __posix_clock_timespec_add (struct timespec * res_ts ,
89
+ const struct timespec * a_ts ,
90
+ const struct timespec * b_ts )
34
91
{
35
- switch (clock_id ) {
36
- case CLOCK_MONOTONIC :
37
- base -> tv_sec = 0 ;
38
- base -> tv_nsec = 0 ;
39
- break ;
92
+ res_ts -> tv_sec = a_ts -> tv_sec + b_ts -> tv_sec ;
93
+ res_ts -> tv_nsec = a_ts -> tv_nsec + b_ts -> tv_nsec ;
40
94
41
- case CLOCK_REALTIME :
42
- K_SPINLOCK ( & rt_clock_base_lock ) {
43
- * base = rt_clock_base ;
44
- }
45
- break ;
95
+ if ( res_ts -> tv_nsec >= NSEC_PER_SEC ) {
96
+ res_ts -> tv_sec ++ ;
97
+ res_ts -> tv_nsec -= NSEC_PER_SEC ;
98
+ }
99
+ }
46
100
47
- default :
48
- errno = EINVAL ;
49
- return -1 ;
101
+ static void __posix_clock_timespec_copy (struct timespec * des_ts , const struct timespec * src_ts )
102
+ {
103
+ des_ts -> tv_sec = src_ts -> tv_sec ;
104
+ des_ts -> tv_nsec = src_ts -> tv_nsec ;
105
+ }
106
+
107
+ static void __posix_clock_get_realtime (struct timespec * ts )
108
+ {
109
+ int res ;
110
+ int64_t timestamp_ms ;
111
+
112
+ res = sys_realtime_get_timestamp (& timestamp_ms );
113
+ if (timestamp_ms < 0 ) {
114
+ /* timespec can't be negative */
115
+ ts -> tv_sec = 0 ;
116
+ ts -> tv_nsec = 0 ;
50
117
}
51
118
52
- return 0 ;
119
+ __posix_clock_msec_to_timespec ( ts , & timestamp_ms ) ;
53
120
}
54
121
55
- #ifdef CONFIG_USERSPACE
56
- int z_vrfy___posix_clock_get_base (clockid_t clock_id , struct timespec * ts )
122
+ static int __posix_clock_set_realtime (const struct timespec * ts )
57
123
{
58
- K_OOPS (K_SYSCALL_MEMORY_WRITE (ts , sizeof (* ts )));
59
- return z_impl___posix_clock_get_base (clock_id , ts );
124
+ int64_t timestamp_ms ;
125
+ int res ;
126
+
127
+ timestamp_ms = (int64_t )__posix_clock_timespec_to_msec (ts );
128
+
129
+ res = sys_realtime_set_timestamp (& timestamp_ms );
130
+ if (res < 0 ) {
131
+ errno = EINVAL ;
132
+ return -1 ;
133
+ }
134
+
135
+ return 0 ;
60
136
}
61
- #include <zephyr/syscalls/__posix_clock_get_base_mrsh.c>
62
- #endif
63
137
64
138
int clock_gettime (clockid_t clock_id , struct timespec * ts )
65
139
{
66
- struct timespec base ;
140
+ int res ;
67
141
68
142
switch (clock_id ) {
69
143
case CLOCK_MONOTONIC :
70
- base . tv_sec = 0 ;
71
- base . tv_nsec = 0 ;
144
+ __posix_clock_get_monotonic ( ts ) ;
145
+ res = 0 ;
72
146
break ;
73
147
74
148
case CLOCK_REALTIME :
75
- (void )__posix_clock_get_base (clock_id , & base );
149
+ __posix_clock_get_realtime (ts );
150
+ res = 0 ;
76
151
break ;
77
152
78
153
default :
79
154
errno = EINVAL ;
80
- return -1 ;
155
+ res = -1 ;
81
156
}
82
157
83
- uint64_t ticks = k_uptime_ticks ();
84
- uint64_t elapsed_secs = ticks / CONFIG_SYS_CLOCK_TICKS_PER_SEC ;
85
- uint64_t nremainder = ticks - elapsed_secs * CONFIG_SYS_CLOCK_TICKS_PER_SEC ;
86
-
87
- ts -> tv_sec = (time_t ) elapsed_secs ;
88
- /* For ns 32 bit conversion can be used since its smaller than 1sec. */
89
- ts -> tv_nsec = (int32_t ) k_ticks_to_ns_floor32 (nremainder );
90
-
91
- ts -> tv_sec += base .tv_sec ;
92
- ts -> tv_nsec += base .tv_nsec ;
93
- if (ts -> tv_nsec >= NSEC_PER_SEC ) {
94
- ts -> tv_sec ++ ;
95
- ts -> tv_nsec -= NSEC_PER_SEC ;
96
- }
97
-
98
- return 0 ;
158
+ return res ;
99
159
}
100
160
101
161
int clock_getres (clockid_t clock_id , struct timespec * res )
@@ -130,31 +190,12 @@ int clock_getres(clockid_t clock_id, struct timespec *res)
130
190
*/
131
191
int clock_settime (clockid_t clock_id , const struct timespec * tp )
132
192
{
133
- struct timespec base ;
134
- k_spinlock_key_t key ;
135
-
136
- if (clock_id != CLOCK_REALTIME ) {
193
+ if (clock_id != CLOCK_REALTIME || !__posix_clock_validate_timespec (tp )) {
137
194
errno = EINVAL ;
138
195
return -1 ;
139
196
}
140
197
141
- if (tp -> tv_nsec < 0 || tp -> tv_nsec >= NSEC_PER_SEC ) {
142
- errno = EINVAL ;
143
- return -1 ;
144
- }
145
-
146
- uint64_t elapsed_nsecs = k_ticks_to_ns_floor64 (k_uptime_ticks ());
147
- int64_t delta = (int64_t )NSEC_PER_SEC * tp -> tv_sec + tp -> tv_nsec
148
- - elapsed_nsecs ;
149
-
150
- base .tv_sec = delta / NSEC_PER_SEC ;
151
- base .tv_nsec = delta % NSEC_PER_SEC ;
152
-
153
- key = k_spin_lock (& rt_clock_base_lock );
154
- rt_clock_base = base ;
155
- k_spin_unlock (& rt_clock_base_lock , key );
156
-
157
- return 0 ;
198
+ return __posix_clock_set_realtime (tp );
158
199
}
159
200
160
201
/*
@@ -195,10 +236,10 @@ int usleep(useconds_t useconds)
195
236
static int __z_clock_nanosleep (clockid_t clock_id , int flags , const struct timespec * rqtp ,
196
237
struct timespec * rmtp )
197
238
{
198
- uint64_t ns ;
199
- uint64_t us ;
200
- uint64_t uptime_ns ;
201
- k_spinlock_key_t key ;
239
+ volatile uint64_t usec ;
240
+ struct timespec clock_ts ;
241
+ struct timespec rel_ts ;
242
+ struct timespec abs_ts ;
202
243
const bool update_rmtp = rmtp != NULL ;
203
244
204
245
if (!(clock_id == CLOCK_REALTIME || clock_id == CLOCK_MONOTONIC )) {
@@ -211,42 +252,35 @@ static int __z_clock_nanosleep(clockid_t clock_id, int flags, const struct times
211
252
return -1 ;
212
253
}
213
254
214
- if (rqtp -> tv_sec < 0 || rqtp -> tv_nsec < 0 || rqtp -> tv_nsec >= NSEC_PER_SEC ) {
255
+ if (! __posix_clock_validate_timespec ( rqtp ) ) {
215
256
errno = EINVAL ;
216
257
return -1 ;
217
258
}
218
259
219
- if ((flags & TIMER_ABSTIME ) == 0 &&
220
- unlikely (rqtp -> tv_sec >= ULLONG_MAX / NSEC_PER_SEC )) {
221
-
222
- ns = rqtp -> tv_nsec + NSEC_PER_SEC
223
- + k_sleep (K_SECONDS (rqtp -> tv_sec - 1 )) * NSEC_PER_MSEC ;
224
- } else {
225
- ns = rqtp -> tv_sec * NSEC_PER_SEC + rqtp -> tv_nsec ;
226
- }
227
-
228
- uptime_ns = k_cyc_to_ns_ceil64 (k_cycle_get_32 ());
260
+ if ((flags & TIMER_ABSTIME ) && clock_id == CLOCK_REALTIME ) {
261
+ __posix_clock_get_realtime (& clock_ts );
229
262
230
- if (flags & TIMER_ABSTIME && clock_id == CLOCK_REALTIME ) {
231
- key = k_spin_lock (& rt_clock_base_lock );
232
- ns -= rt_clock_base .tv_sec * NSEC_PER_SEC + rt_clock_base .tv_nsec ;
233
- k_spin_unlock (& rt_clock_base_lock , key );
234
- }
263
+ if (__posix_clock_timespec_less_than (rqtp , & clock_ts )) {
264
+ goto post_sleep ;
265
+ }
235
266
236
- if ((flags & TIMER_ABSTIME ) == 0 ) {
237
- ns += uptime_ns ;
267
+ __posix_clock_timespec_subtract (& rel_ts , rqtp , & clock_ts );
268
+ __posix_clock_get_monotonic (& clock_ts );
269
+ __posix_clock_timespec_add (& abs_ts , & rel_ts , & clock_ts );
270
+ } else if (flags & TIMER_ABSTIME ) {
271
+ __posix_clock_timespec_copy (& abs_ts , rqtp );
272
+ } else {
273
+ __posix_clock_get_monotonic (& clock_ts );
274
+ __posix_clock_timespec_add (& abs_ts , rqtp , & clock_ts );
238
275
}
239
276
240
- if (ns <= uptime_ns ) {
241
- goto do_rmtp_update ;
242
- }
277
+ usec = __posix_clock_timespec_to_usec (& abs_ts );
243
278
244
- us = DIV_ROUND_UP (ns , NSEC_PER_USEC );
245
279
do {
246
- us = k_sleep (K_TIMEOUT_ABS_US (us )) * 1000 ;
247
- } while (us != 0 );
280
+ usec = k_sleep (K_TIMEOUT_ABS_US (usec )) * USEC_PER_MSEC ;
281
+ } while (usec != 0 );
248
282
249
- do_rmtp_update :
283
+ post_sleep :
250
284
if (update_rmtp ) {
251
285
rmtp -> tv_sec = 0 ;
252
286
rmtp -> tv_nsec = 0 ;
@@ -298,23 +332,3 @@ int clock_getcpuclockid(pid_t pid, clockid_t *clock_id)
298
332
299
333
return 0 ;
300
334
}
301
-
302
- #ifdef CONFIG_ZTEST
303
- #include <zephyr/ztest.h>
304
- static void reset_clock_base (void )
305
- {
306
- K_SPINLOCK (& rt_clock_base_lock ) {
307
- rt_clock_base = (struct timespec ){0 };
308
- }
309
- }
310
-
311
- static void clock_base_reset_rule_after (const struct ztest_unit_test * test , void * data )
312
- {
313
- ARG_UNUSED (test );
314
- ARG_UNUSED (data );
315
-
316
- reset_clock_base ();
317
- }
318
-
319
- ZTEST_RULE (clock_base_reset_rule , NULL , clock_base_reset_rule_after );
320
- #endif /* CONFIG_ZTEST */
0 commit comments