@@ -3211,14 +3211,44 @@ static void perf_sched__merge_lat(struct perf_sched *sched)
3211
3211
}
3212
3212
}
3213
3213
3214
+ static int setup_cpus_switch_event (struct perf_sched * sched )
3215
+ {
3216
+ unsigned int i ;
3217
+
3218
+ sched -> cpu_last_switched = calloc (MAX_CPUS , sizeof (* (sched -> cpu_last_switched )));
3219
+ if (!sched -> cpu_last_switched )
3220
+ return -1 ;
3221
+
3222
+ sched -> curr_pid = malloc (MAX_CPUS * sizeof (* (sched -> curr_pid )));
3223
+ if (!sched -> curr_pid ) {
3224
+ zfree (& sched -> cpu_last_switched );
3225
+ return -1 ;
3226
+ }
3227
+
3228
+ for (i = 0 ; i < MAX_CPUS ; i ++ )
3229
+ sched -> curr_pid [i ] = -1 ;
3230
+
3231
+ return 0 ;
3232
+ }
3233
+
3234
+ static void free_cpus_switch_event (struct perf_sched * sched )
3235
+ {
3236
+ zfree (& sched -> curr_pid );
3237
+ zfree (& sched -> cpu_last_switched );
3238
+ }
3239
+
3214
3240
static int perf_sched__lat (struct perf_sched * sched )
3215
3241
{
3242
+ int rc = -1 ;
3216
3243
struct rb_node * next ;
3217
3244
3218
3245
setup_pager ();
3219
3246
3247
+ if (setup_cpus_switch_event (sched ))
3248
+ return rc ;
3249
+
3220
3250
if (perf_sched__read_events (sched ))
3221
- return -1 ;
3251
+ goto out_free_cpus_switch_event ;
3222
3252
3223
3253
perf_sched__merge_lat (sched );
3224
3254
perf_sched__sort_lat (sched );
@@ -3247,7 +3277,11 @@ static int perf_sched__lat(struct perf_sched *sched)
3247
3277
print_bad_events (sched );
3248
3278
printf ("\n" );
3249
3279
3250
- return 0 ;
3280
+ rc = 0 ;
3281
+
3282
+ out_free_cpus_switch_event :
3283
+ free_cpus_switch_event (sched );
3284
+ return rc ;
3251
3285
}
3252
3286
3253
3287
static int setup_map_cpus (struct perf_sched * sched )
@@ -3314,9 +3348,12 @@ static int perf_sched__map(struct perf_sched *sched)
3314
3348
if (!sched -> curr_thread )
3315
3349
return rc ;
3316
3350
3317
- if (setup_map_cpus (sched ))
3351
+ if (setup_cpus_switch_event (sched ))
3318
3352
goto out_free_curr_thread ;
3319
3353
3354
+ if (setup_map_cpus (sched ))
3355
+ goto out_free_cpus_switch_event ;
3356
+
3320
3357
if (setup_color_pids (sched ))
3321
3358
goto out_put_map_cpus ;
3322
3359
@@ -3340,6 +3377,9 @@ static int perf_sched__map(struct perf_sched *sched)
3340
3377
zfree (& sched -> map .comp_cpus );
3341
3378
perf_cpu_map__put (sched -> map .cpus );
3342
3379
3380
+ out_free_cpus_switch_event :
3381
+ free_cpus_switch_event (sched );
3382
+
3343
3383
out_free_curr_thread :
3344
3384
zfree (& sched -> curr_thread );
3345
3385
return rc ;
@@ -3353,14 +3393,18 @@ static int perf_sched__replay(struct perf_sched *sched)
3353
3393
mutex_init (& sched -> start_work_mutex );
3354
3394
mutex_init (& sched -> work_done_wait_mutex );
3355
3395
3396
+ ret = setup_cpus_switch_event (sched );
3397
+ if (ret )
3398
+ goto out_mutex_destroy ;
3399
+
3356
3400
calibrate_run_measurement_overhead (sched );
3357
3401
calibrate_sleep_measurement_overhead (sched );
3358
3402
3359
3403
test_calibrations (sched );
3360
3404
3361
3405
ret = perf_sched__read_events (sched );
3362
3406
if (ret )
3363
- goto out_mutex_destroy ;
3407
+ goto out_free_cpus_switch_event ;
3364
3408
3365
3409
printf ("nr_run_events: %ld\n" , sched -> nr_run_events );
3366
3410
printf ("nr_sleep_events: %ld\n" , sched -> nr_sleep_events );
@@ -3386,6 +3430,9 @@ static int perf_sched__replay(struct perf_sched *sched)
3386
3430
sched -> thread_funcs_exit = true;
3387
3431
destroy_tasks (sched );
3388
3432
3433
+ out_free_cpus_switch_event :
3434
+ free_cpus_switch_event (sched );
3435
+
3389
3436
out_mutex_destroy :
3390
3437
mutex_destroy (& sched -> start_work_mutex );
3391
3438
mutex_destroy (& sched -> work_done_wait_mutex );
@@ -3624,21 +3671,7 @@ int cmd_sched(int argc, const char **argv)
3624
3671
.switch_event = replay_switch_event ,
3625
3672
.fork_event = replay_fork_event ,
3626
3673
};
3627
- unsigned int i ;
3628
- int ret = 0 ;
3629
-
3630
- sched .cpu_last_switched = calloc (MAX_CPUS , sizeof (* sched .cpu_last_switched ));
3631
- if (!sched .cpu_last_switched ) {
3632
- ret = - ENOMEM ;
3633
- goto out ;
3634
- }
3635
- sched .curr_pid = malloc (MAX_CPUS * sizeof (* sched .curr_pid ));
3636
- if (!sched .curr_pid ) {
3637
- ret = - ENOMEM ;
3638
- goto out ;
3639
- }
3640
- for (i = 0 ; i < MAX_CPUS ; i ++ )
3641
- sched .curr_pid [i ] = -1 ;
3674
+ int ret ;
3642
3675
3643
3676
argc = parse_options_subcommand (argc , argv , sched_options , sched_subcommands ,
3644
3677
sched_usage , PARSE_OPT_STOP_AT_NON_OPTION );
@@ -3649,9 +3682,9 @@ int cmd_sched(int argc, const char **argv)
3649
3682
* Aliased to 'perf script' for now:
3650
3683
*/
3651
3684
if (!strcmp (argv [0 ], "script" )) {
3652
- ret = cmd_script (argc , argv );
3685
+ return cmd_script (argc , argv );
3653
3686
} else if (strlen (argv [0 ]) > 2 && strstarts ("record" , argv [0 ])) {
3654
- ret = __cmd_record (argc , argv );
3687
+ return __cmd_record (argc , argv );
3655
3688
} else if (strlen (argv [0 ]) > 2 && strstarts ("latency" , argv [0 ])) {
3656
3689
sched .tp_handler = & lat_ops ;
3657
3690
if (argc > 1 ) {
@@ -3660,7 +3693,7 @@ int cmd_sched(int argc, const char **argv)
3660
3693
usage_with_options (latency_usage , latency_options );
3661
3694
}
3662
3695
setup_sorting (& sched , latency_options , latency_usage );
3663
- ret = perf_sched__lat (& sched );
3696
+ return perf_sched__lat (& sched );
3664
3697
} else if (!strcmp (argv [0 ], "map" )) {
3665
3698
if (argc ) {
3666
3699
argc = parse_options (argc , argv , map_options , map_usage , 0 );
@@ -3669,15 +3702,15 @@ int cmd_sched(int argc, const char **argv)
3669
3702
}
3670
3703
sched .tp_handler = & map_ops ;
3671
3704
setup_sorting (& sched , latency_options , latency_usage );
3672
- ret = perf_sched__map (& sched );
3705
+ return perf_sched__map (& sched );
3673
3706
} else if (strlen (argv [0 ]) > 2 && strstarts ("replay" , argv [0 ])) {
3674
3707
sched .tp_handler = & replay_ops ;
3675
3708
if (argc ) {
3676
3709
argc = parse_options (argc , argv , replay_options , replay_usage , 0 );
3677
3710
if (argc )
3678
3711
usage_with_options (replay_usage , replay_options );
3679
3712
}
3680
- ret = perf_sched__replay (& sched );
3713
+ return perf_sched__replay (& sched );
3681
3714
} else if (!strcmp (argv [0 ], "timehist" )) {
3682
3715
if (argc ) {
3683
3716
argc = parse_options (argc , argv , timehist_options ,
@@ -3693,21 +3726,16 @@ int cmd_sched(int argc, const char **argv)
3693
3726
parse_options_usage (NULL , timehist_options , "w" , true);
3694
3727
if (sched .show_next )
3695
3728
parse_options_usage (NULL , timehist_options , "n" , true);
3696
- ret = - EINVAL ;
3697
- goto out ;
3729
+ return - EINVAL ;
3698
3730
}
3699
3731
ret = symbol__validate_sym_arguments ();
3700
3732
if (ret )
3701
- goto out ;
3733
+ return ret ;
3702
3734
3703
- ret = perf_sched__timehist (& sched );
3735
+ return perf_sched__timehist (& sched );
3704
3736
} else {
3705
3737
usage_with_options (sched_usage , sched_options );
3706
3738
}
3707
3739
3708
- out :
3709
- free (sched .curr_pid );
3710
- free (sched .cpu_last_switched );
3711
-
3712
- return ret ;
3740
+ return 0 ;
3713
3741
}
0 commit comments