@@ -193,8 +193,8 @@ struct perf_sched {
193
193
* weird events, such as a task being switched away that is not current.
194
194
*/
195
195
struct perf_cpu max_cpu ;
196
- u32 curr_pid [ MAX_CPUS ] ;
197
- struct thread * curr_thread [ MAX_CPUS ] ;
196
+ u32 * curr_pid ;
197
+ struct thread * * curr_thread ;
198
198
char next_shortname1 ;
199
199
char next_shortname2 ;
200
200
unsigned int replay_repeat ;
@@ -224,7 +224,7 @@ struct perf_sched {
224
224
u64 run_avg ;
225
225
u64 all_runtime ;
226
226
u64 all_count ;
227
- u64 cpu_last_switched [ MAX_CPUS ] ;
227
+ u64 * cpu_last_switched ;
228
228
struct rb_root_cached atom_root , sorted_atom_root , merged_atom_root ;
229
229
struct list_head sort_list , cmp_pid ;
230
230
bool force ;
@@ -3590,7 +3590,22 @@ int cmd_sched(int argc, const char **argv)
3590
3590
3591
3591
mutex_init (& sched .start_work_mutex );
3592
3592
mutex_init (& sched .work_done_wait_mutex );
3593
- for (i = 0 ; i < ARRAY_SIZE (sched .curr_pid ); i ++ )
3593
+ sched .curr_thread = calloc (MAX_CPUS , sizeof (* sched .curr_thread ));
3594
+ if (!sched .curr_thread ) {
3595
+ ret = - ENOMEM ;
3596
+ goto out ;
3597
+ }
3598
+ sched .cpu_last_switched = calloc (MAX_CPUS , sizeof (* sched .cpu_last_switched ));
3599
+ if (!sched .cpu_last_switched ) {
3600
+ ret = - ENOMEM ;
3601
+ goto out ;
3602
+ }
3603
+ sched .curr_pid = malloc (MAX_CPUS * sizeof (* sched .curr_pid ));
3604
+ if (!sched .curr_pid ) {
3605
+ ret = - ENOMEM ;
3606
+ goto out ;
3607
+ }
3608
+ for (i = 0 ; i < MAX_CPUS ; i ++ )
3594
3609
sched .curr_pid [i ] = -1 ;
3595
3610
3596
3611
argc = parse_options_subcommand (argc , argv , sched_options , sched_subcommands ,
@@ -3659,6 +3674,9 @@ int cmd_sched(int argc, const char **argv)
3659
3674
}
3660
3675
3661
3676
out :
3677
+ free (sched .curr_pid );
3678
+ free (sched .cpu_last_switched );
3679
+ free (sched .curr_thread );
3662
3680
mutex_destroy (& sched .start_work_mutex );
3663
3681
mutex_destroy (& sched .work_done_wait_mutex );
3664
3682
0 commit comments