@@ -172,11 +172,11 @@ static inline void clear_event_registration(struct k_poll_event *event)
172
172
173
173
/* must be called with interrupts locked */
174
174
static inline void clear_event_registrations (struct k_poll_event * events ,
175
- int last_registered ,
175
+ int num_events ,
176
176
k_spinlock_key_t key )
177
177
{
178
- for (; last_registered >= 0 ; last_registered -- ) {
179
- clear_event_registration (& events [last_registered ]);
178
+ while ( num_events -- ) {
179
+ clear_event_registration (& events [num_events ]);
180
180
k_spin_unlock (& lock , key );
181
181
key = k_spin_lock (& lock );
182
182
}
@@ -188,36 +188,77 @@ static inline void set_event_ready(struct k_poll_event *event, u32_t state)
188
188
event -> state |= state ;
189
189
}
190
190
191
- int z_impl_k_poll (struct k_poll_event * events , int num_events , s32_t timeout )
191
+ static inline int register_events (struct k_poll_event * events ,
192
+ int num_events ,
193
+ struct _poller * poller ,
194
+ bool just_check )
192
195
{
193
- __ASSERT (!z_arch_is_in_isr (), "" );
194
- __ASSERT (events != NULL , "NULL events\n" );
195
- __ASSERT (num_events > 0 , "zero events\n" );
196
-
197
- int last_registered = -1 , rc ;
198
- k_spinlock_key_t key ;
199
-
200
- struct _poller poller = { .thread = _current , .is_polling = true, };
196
+ int events_registered = 0 ;
201
197
202
- /* find events whose condition is already fulfilled */
203
198
for (int ii = 0 ; ii < num_events ; ii ++ ) {
199
+ k_spinlock_key_t key ;
204
200
u32_t state ;
205
201
206
202
key = k_spin_lock (& lock );
207
203
if (is_condition_met (& events [ii ], & state )) {
208
204
set_event_ready (& events [ii ], state );
209
- poller . is_polling = false;
210
- } else if (timeout != K_NO_WAIT && poller . is_polling ) {
211
- rc = register_event (& events [ii ], & poller );
205
+ poller -> is_polling = false;
206
+ } else if (! just_check && poller -> is_polling ) {
207
+ int rc = register_event (& events [ii ], poller );
212
208
if (rc == 0 ) {
213
- ++ last_registered ;
209
+ events_registered += 1 ;
214
210
} else {
215
211
__ASSERT (false, "unexpected return code\n" );
216
212
}
217
213
}
218
214
k_spin_unlock (& lock , key );
219
215
}
220
216
217
+ return events_registered ;
218
+ }
219
+
220
+ static int k_poll_poller_cb (struct k_poll_event * event , u32_t state )
221
+ {
222
+ struct k_thread * thread = event -> poller -> thread ;
223
+
224
+ __ASSERT (thread != NULL , "poller should have a thread\n" );
225
+
226
+ if (!z_is_thread_pending (thread )) {
227
+ return 0 ;
228
+ }
229
+
230
+ if (z_is_thread_timeout_expired (thread )) {
231
+ return - EAGAIN ;
232
+ }
233
+
234
+ z_unpend_thread (thread );
235
+ z_arch_thread_return_value_set (thread ,
236
+ state == K_POLL_STATE_CANCELLED ? - EINTR : 0 );
237
+
238
+ if (!z_is_thread_ready (thread )) {
239
+ return 0 ;
240
+ }
241
+
242
+ z_ready_thread (thread );
243
+
244
+ return 0 ;
245
+ }
246
+
247
+ int z_impl_k_poll (struct k_poll_event * events , int num_events , s32_t timeout )
248
+ {
249
+ int events_registered ;
250
+ k_spinlock_key_t key ;
251
+ struct _poller poller = { .is_polling = true,
252
+ .thread = _current ,
253
+ .cb = k_poll_poller_cb };
254
+
255
+ __ASSERT (!z_is_in_isr (), "" );
256
+ __ASSERT (events != NULL , "NULL events\n" );
257
+ __ASSERT (num_events > 0 , "zero events\n" );
258
+
259
+ events_registered = register_events (events , num_events , & poller ,
260
+ (timeout == K_NO_WAIT ));
261
+
221
262
key = k_spin_lock (& lock );
222
263
223
264
/*
@@ -226,7 +267,7 @@ int z_impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
226
267
* because one of the events registered has had its state changed.
227
268
*/
228
269
if (!poller .is_polling ) {
229
- clear_event_registrations (events , last_registered , key );
270
+ clear_event_registrations (events , events_registered , key );
230
271
k_spin_unlock (& lock , key );
231
272
return 0 ;
232
273
}
@@ -252,7 +293,7 @@ int z_impl_k_poll(struct k_poll_event *events, int num_events, s32_t timeout)
252
293
* return code first, which invalidates the whole list of event states.
253
294
*/
254
295
key = k_spin_lock (& lock );
255
- clear_event_registrations (events , last_registered , key );
296
+ clear_event_registrations (events , events_registered , key );
256
297
k_spin_unlock (& lock , key );
257
298
258
299
return swap_rc ;
@@ -338,38 +379,23 @@ static inline int z_vrfy_k_poll(struct k_poll_event *events,
338
379
/* must be called with interrupts locked */
339
380
static int signal_poll_event (struct k_poll_event * event , u32_t state )
340
381
{
341
- if (!event -> poller ) {
342
- goto ready_event ;
343
- }
344
-
345
- struct k_thread * thread = event -> poller -> thread ;
346
-
347
- __ASSERT (event -> poller -> thread != NULL ,
348
- "poller should have a thread\n" );
349
-
350
- event -> poller -> is_polling = false;
382
+ struct _poller * poller = event -> poller ;
383
+ int retcode = 0 ;
351
384
352
- if (!z_is_thread_pending (thread )) {
353
- goto ready_event ;
354
- }
355
-
356
- if (z_is_thread_timeout_expired (thread )) {
357
- return - EAGAIN ;
358
- }
385
+ if (poller ) {
386
+ if (poller -> cb != NULL ) {
387
+ retcode = poller -> cb (event , state );
388
+ }
359
389
360
- z_unpend_thread (thread );
361
- z_arch_thread_return_value_set (thread ,
362
- state == K_POLL_STATE_CANCELLED ? - EINTR : 0 );
390
+ poller -> is_polling = false;
363
391
364
- if (!z_is_thread_ready (thread )) {
365
- goto ready_event ;
392
+ if (retcode < 0 ) {
393
+ return retcode ;
394
+ }
366
395
}
367
396
368
- z_ready_thread (thread );
369
-
370
- ready_event :
371
397
set_event_ready (event , state );
372
- return 0 ;
398
+ return retcode ;
373
399
}
374
400
375
401
void z_handle_obj_poll_events (sys_dlist_t * events , u32_t state )
0 commit comments