@@ -3199,14 +3199,44 @@ static void perf_sched__merge_lat(struct perf_sched *sched)
3199
3199
}
3200
3200
}
3201
3201
3202
+ static int setup_cpus_switch_event (struct perf_sched * sched )
3203
+ {
3204
+ unsigned int i ;
3205
+
3206
+ sched -> cpu_last_switched = calloc (MAX_CPUS , sizeof (* (sched -> cpu_last_switched )));
3207
+ if (!sched -> cpu_last_switched )
3208
+ return -1 ;
3209
+
3210
+ sched -> curr_pid = malloc (MAX_CPUS * sizeof (* (sched -> curr_pid )));
3211
+ if (!sched -> curr_pid ) {
3212
+ zfree (& sched -> cpu_last_switched );
3213
+ return -1 ;
3214
+ }
3215
+
3216
+ for (i = 0 ; i < MAX_CPUS ; i ++ )
3217
+ sched -> curr_pid [i ] = -1 ;
3218
+
3219
+ return 0 ;
3220
+ }
3221
+
3222
+ static void free_cpus_switch_event (struct perf_sched * sched )
3223
+ {
3224
+ zfree (& sched -> curr_pid );
3225
+ zfree (& sched -> cpu_last_switched );
3226
+ }
3227
+
3202
3228
static int perf_sched__lat (struct perf_sched * sched )
3203
3229
{
3230
+ int rc = -1 ;
3204
3231
struct rb_node * next ;
3205
3232
3206
3233
setup_pager ();
3207
3234
3235
+ if (setup_cpus_switch_event (sched ))
3236
+ return rc ;
3237
+
3208
3238
if (perf_sched__read_events (sched ))
3209
- return -1 ;
3239
+ goto out_free_cpus_switch_event ;
3210
3240
3211
3241
perf_sched__merge_lat (sched );
3212
3242
perf_sched__sort_lat (sched );
@@ -3235,7 +3265,11 @@ static int perf_sched__lat(struct perf_sched *sched)
3235
3265
print_bad_events (sched );
3236
3266
printf ("\n" );
3237
3267
3238
- return 0 ;
3268
+ rc = 0 ;
3269
+
3270
+ out_free_cpus_switch_event :
3271
+ free_cpus_switch_event (sched );
3272
+ return rc ;
3239
3273
}
3240
3274
3241
3275
static int setup_map_cpus (struct perf_sched * sched )
@@ -3302,9 +3336,12 @@ static int perf_sched__map(struct perf_sched *sched)
3302
3336
if (!sched -> curr_thread )
3303
3337
return rc ;
3304
3338
3305
- if (setup_map_cpus (sched ))
3339
+ if (setup_cpus_switch_event (sched ))
3306
3340
goto out_free_curr_thread ;
3307
3341
3342
+ if (setup_map_cpus (sched ))
3343
+ goto out_free_cpus_switch_event ;
3344
+
3308
3345
if (setup_color_pids (sched ))
3309
3346
goto out_put_map_cpus ;
3310
3347
@@ -3328,6 +3365,9 @@ static int perf_sched__map(struct perf_sched *sched)
3328
3365
zfree (& sched -> map .comp_cpus );
3329
3366
perf_cpu_map__put (sched -> map .cpus );
3330
3367
3368
+ out_free_cpus_switch_event :
3369
+ free_cpus_switch_event (sched );
3370
+
3331
3371
out_free_curr_thread :
3332
3372
zfree (& sched -> curr_thread );
3333
3373
return rc ;
@@ -3341,14 +3381,18 @@ static int perf_sched__replay(struct perf_sched *sched)
3341
3381
mutex_init (& sched -> start_work_mutex );
3342
3382
mutex_init (& sched -> work_done_wait_mutex );
3343
3383
3384
+ ret = setup_cpus_switch_event (sched );
3385
+ if (ret )
3386
+ goto out_mutex_destroy ;
3387
+
3344
3388
calibrate_run_measurement_overhead (sched );
3345
3389
calibrate_sleep_measurement_overhead (sched );
3346
3390
3347
3391
test_calibrations (sched );
3348
3392
3349
3393
ret = perf_sched__read_events (sched );
3350
3394
if (ret )
3351
- goto out_mutex_destroy ;
3395
+ goto out_free_cpus_switch_event ;
3352
3396
3353
3397
printf ("nr_run_events: %ld\n" , sched -> nr_run_events );
3354
3398
printf ("nr_sleep_events: %ld\n" , sched -> nr_sleep_events );
@@ -3374,6 +3418,9 @@ static int perf_sched__replay(struct perf_sched *sched)
3374
3418
sched -> thread_funcs_exit = true;
3375
3419
destroy_tasks (sched );
3376
3420
3421
+ out_free_cpus_switch_event :
3422
+ free_cpus_switch_event (sched );
3423
+
3377
3424
out_mutex_destroy :
3378
3425
mutex_destroy (& sched -> start_work_mutex );
3379
3426
mutex_destroy (& sched -> work_done_wait_mutex );
@@ -3612,21 +3659,7 @@ int cmd_sched(int argc, const char **argv)
3612
3659
.switch_event = replay_switch_event ,
3613
3660
.fork_event = replay_fork_event ,
3614
3661
};
3615
- unsigned int i ;
3616
- int ret = 0 ;
3617
-
3618
- sched .cpu_last_switched = calloc (MAX_CPUS , sizeof (* sched .cpu_last_switched ));
3619
- if (!sched .cpu_last_switched ) {
3620
- ret = - ENOMEM ;
3621
- goto out ;
3622
- }
3623
- sched .curr_pid = malloc (MAX_CPUS * sizeof (* sched .curr_pid ));
3624
- if (!sched .curr_pid ) {
3625
- ret = - ENOMEM ;
3626
- goto out ;
3627
- }
3628
- for (i = 0 ; i < MAX_CPUS ; i ++ )
3629
- sched .curr_pid [i ] = -1 ;
3662
+ int ret ;
3630
3663
3631
3664
argc = parse_options_subcommand (argc , argv , sched_options , sched_subcommands ,
3632
3665
sched_usage , PARSE_OPT_STOP_AT_NON_OPTION );
@@ -3637,9 +3670,9 @@ int cmd_sched(int argc, const char **argv)
3637
3670
* Aliased to 'perf script' for now:
3638
3671
*/
3639
3672
if (!strcmp (argv [0 ], "script" )) {
3640
- ret = cmd_script (argc , argv );
3673
+ return cmd_script (argc , argv );
3641
3674
} else if (strlen (argv [0 ]) > 2 && strstarts ("record" , argv [0 ])) {
3642
- ret = __cmd_record (argc , argv );
3675
+ return __cmd_record (argc , argv );
3643
3676
} else if (strlen (argv [0 ]) > 2 && strstarts ("latency" , argv [0 ])) {
3644
3677
sched .tp_handler = & lat_ops ;
3645
3678
if (argc > 1 ) {
@@ -3648,7 +3681,7 @@ int cmd_sched(int argc, const char **argv)
3648
3681
usage_with_options (latency_usage , latency_options );
3649
3682
}
3650
3683
setup_sorting (& sched , latency_options , latency_usage );
3651
- ret = perf_sched__lat (& sched );
3684
+ return perf_sched__lat (& sched );
3652
3685
} else if (!strcmp (argv [0 ], "map" )) {
3653
3686
if (argc ) {
3654
3687
argc = parse_options (argc , argv , map_options , map_usage , 0 );
@@ -3657,15 +3690,15 @@ int cmd_sched(int argc, const char **argv)
3657
3690
}
3658
3691
sched .tp_handler = & map_ops ;
3659
3692
setup_sorting (& sched , latency_options , latency_usage );
3660
- ret = perf_sched__map (& sched );
3693
+ return perf_sched__map (& sched );
3661
3694
} else if (strlen (argv [0 ]) > 2 && strstarts ("replay" , argv [0 ])) {
3662
3695
sched .tp_handler = & replay_ops ;
3663
3696
if (argc ) {
3664
3697
argc = parse_options (argc , argv , replay_options , replay_usage , 0 );
3665
3698
if (argc )
3666
3699
usage_with_options (replay_usage , replay_options );
3667
3700
}
3668
- ret = perf_sched__replay (& sched );
3701
+ return perf_sched__replay (& sched );
3669
3702
} else if (!strcmp (argv [0 ], "timehist" )) {
3670
3703
if (argc ) {
3671
3704
argc = parse_options (argc , argv , timehist_options ,
@@ -3681,21 +3714,16 @@ int cmd_sched(int argc, const char **argv)
3681
3714
parse_options_usage (NULL , timehist_options , "w" , true);
3682
3715
if (sched .show_next )
3683
3716
parse_options_usage (NULL , timehist_options , "n" , true);
3684
- ret = - EINVAL ;
3685
- goto out ;
3717
+ return - EINVAL ;
3686
3718
}
3687
3719
ret = symbol__validate_sym_arguments ();
3688
3720
if (ret )
3689
- goto out ;
3721
+ return ret ;
3690
3722
3691
- ret = perf_sched__timehist (& sched );
3723
+ return perf_sched__timehist (& sched );
3692
3724
} else {
3693
3725
usage_with_options (sched_usage , sched_options );
3694
3726
}
3695
3727
3696
- out :
3697
- free (sched .curr_pid );
3698
- free (sched .cpu_last_switched );
3699
-
3700
- return ret ;
3728
+ return 0 ;
3701
3729
}
0 commit comments