@@ -289,13 +289,18 @@ static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
289
289
290
290
static DEFINE_PER_CPU (unsigned long, irqsave_flags ) ;
291
291
292
- notrace BPF_CALL_1 ( bpf_spin_lock , struct bpf_spin_lock * , lock )
292
+ static inline void __bpf_spin_lock_irqsave ( struct bpf_spin_lock * lock )
293
293
{
294
294
unsigned long flags ;
295
295
296
296
local_irq_save (flags );
297
297
__bpf_spin_lock (lock );
298
298
__this_cpu_write (irqsave_flags , flags );
299
+ }
300
+
301
+ notrace BPF_CALL_1 (bpf_spin_lock , struct bpf_spin_lock * , lock )
302
+ {
303
+ __bpf_spin_lock_irqsave (lock );
299
304
return 0 ;
300
305
}
301
306
@@ -306,13 +311,18 @@ const struct bpf_func_proto bpf_spin_lock_proto = {
306
311
.arg1_type = ARG_PTR_TO_SPIN_LOCK ,
307
312
};
308
313
309
- notrace BPF_CALL_1 ( bpf_spin_unlock , struct bpf_spin_lock * , lock )
314
+ static inline void __bpf_spin_unlock_irqrestore ( struct bpf_spin_lock * lock )
310
315
{
311
316
unsigned long flags ;
312
317
313
318
flags = __this_cpu_read (irqsave_flags );
314
319
__bpf_spin_unlock (lock );
315
320
local_irq_restore (flags );
321
+ }
322
+
323
+ notrace BPF_CALL_1 (bpf_spin_unlock , struct bpf_spin_lock * , lock )
324
+ {
325
+ __bpf_spin_unlock_irqrestore (lock );
316
326
return 0 ;
317
327
}
318
328
@@ -333,9 +343,9 @@ void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
333
343
else
334
344
lock = dst + map -> spin_lock_off ;
335
345
preempt_disable ();
336
- ____bpf_spin_lock (lock );
346
+ __bpf_spin_lock_irqsave (lock );
337
347
copy_map_value (map , dst , src );
338
- ____bpf_spin_unlock (lock );
348
+ __bpf_spin_unlock_irqrestore (lock );
339
349
preempt_enable ();
340
350
}
341
351
0 commit comments