@@ -500,14 +500,17 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
500
500
.arg5_type = ARG_CONST_SIZE_OR_ZERO ,
501
501
};
502
502
503
- static DEFINE_PER_CPU (struct pt_regs , bpf_pt_regs ) ;
504
- static DEFINE_PER_CPU (struct perf_sample_data , bpf_misc_sd ) ;
503
+ static DEFINE_PER_CPU (int , bpf_event_output_nest_level ) ;
504
+ struct bpf_nested_pt_regs {
505
+ struct pt_regs regs [3 ];
506
+ };
507
+ static DEFINE_PER_CPU (struct bpf_nested_pt_regs , bpf_pt_regs ) ;
508
+ static DEFINE_PER_CPU (struct bpf_trace_sample_data , bpf_misc_sds ) ;
505
509
506
510
u64 bpf_event_output (struct bpf_map * map , u64 flags , void * meta , u64 meta_size ,
507
511
void * ctx , u64 ctx_size , bpf_ctx_copy_t ctx_copy )
508
512
{
509
- struct perf_sample_data * sd = this_cpu_ptr (& bpf_misc_sd );
510
- struct pt_regs * regs = this_cpu_ptr (& bpf_pt_regs );
513
+ int nest_level = this_cpu_inc_return (bpf_event_output_nest_level );
511
514
struct perf_raw_frag frag = {
512
515
.copy = ctx_copy ,
513
516
.size = ctx_size ,
@@ -522,12 +525,25 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
522
525
.data = meta ,
523
526
},
524
527
};
528
+ struct perf_sample_data * sd ;
529
+ struct pt_regs * regs ;
530
+ u64 ret ;
531
+
532
+ if (WARN_ON_ONCE (nest_level > ARRAY_SIZE (bpf_misc_sds .sds ))) {
533
+ ret = - EBUSY ;
534
+ goto out ;
535
+ }
536
+ sd = this_cpu_ptr (& bpf_misc_sds .sds [nest_level - 1 ]);
537
+ regs = this_cpu_ptr (& bpf_pt_regs .regs [nest_level - 1 ]);
525
538
526
539
perf_fetch_caller_regs (regs );
527
540
perf_sample_data_init (sd , 0 , 0 );
528
541
sd -> raw = & raw ;
529
542
530
- return __bpf_perf_event_output (regs , map , flags , sd );
543
+ ret = __bpf_perf_event_output (regs , map , flags , sd );
544
+ out :
545
+ this_cpu_dec (bpf_event_output_nest_level );
546
+ return ret ;
531
547
}
532
548
533
549
BPF_CALL_0 (bpf_get_current_task )
0 commit comments