@@ -5940,6 +5940,25 @@ static __always_inline void intel_pmu_init_glc(struct pmu *pmu)
5940
5940
hybrid (pmu , pebs_constraints ) = intel_glc_pebs_event_constraints ;
5941
5941
}
5942
5942
5943
+ static __always_inline void intel_pmu_init_grt (struct pmu * pmu )
5944
+ {
5945
+ x86_pmu .mid_ack = true;
5946
+ x86_pmu .limit_period = glc_limit_period ;
5947
+ x86_pmu .pebs_aliases = NULL ;
5948
+ x86_pmu .pebs_prec_dist = true;
5949
+ x86_pmu .pebs_block = true;
5950
+ x86_pmu .lbr_pt_coexist = true;
5951
+ x86_pmu .flags |= PMU_FL_HAS_RSP_1 ;
5952
+ x86_pmu .flags |= PMU_FL_INSTR_LATENCY ;
5953
+
5954
+ memcpy (hybrid_var (pmu , hw_cache_event_ids ), glp_hw_cache_event_ids , sizeof (hw_cache_event_ids ));
5955
+ memcpy (hybrid_var (pmu , hw_cache_extra_regs ), tnt_hw_cache_extra_regs , sizeof (hw_cache_extra_regs ));
5956
+ hybrid_var (pmu , hw_cache_event_ids )[C (ITLB )][C (OP_READ )][C (RESULT_ACCESS )] = -1 ;
5957
+ hybrid (pmu , event_constraints ) = intel_slm_event_constraints ;
5958
+ hybrid (pmu , pebs_constraints ) = intel_grt_pebs_event_constraints ;
5959
+ hybrid (pmu , extra_regs ) = intel_grt_extra_regs ;
5960
+ }
5961
+
5943
5962
__init int intel_pmu_init (void )
5944
5963
{
5945
5964
struct attribute * * extra_skl_attr = & empty_attrs ;
@@ -6218,28 +6237,10 @@ __init int intel_pmu_init(void)
6218
6237
break ;
6219
6238
6220
6239
case INTEL_FAM6_ATOM_GRACEMONT :
6221
- x86_pmu .mid_ack = true;
6222
- memcpy (hw_cache_event_ids , glp_hw_cache_event_ids ,
6223
- sizeof (hw_cache_event_ids ));
6224
- memcpy (hw_cache_extra_regs , tnt_hw_cache_extra_regs ,
6225
- sizeof (hw_cache_extra_regs ));
6226
- hw_cache_event_ids [C (ITLB )][C (OP_READ )][C (RESULT_ACCESS )] = -1 ;
6227
-
6228
- x86_pmu .event_constraints = intel_slm_event_constraints ;
6229
- x86_pmu .pebs_constraints = intel_grt_pebs_event_constraints ;
6230
- x86_pmu .extra_regs = intel_grt_extra_regs ;
6231
-
6232
- x86_pmu .pebs_aliases = NULL ;
6233
- x86_pmu .pebs_prec_dist = true;
6234
- x86_pmu .pebs_block = true;
6235
- x86_pmu .lbr_pt_coexist = true;
6236
- x86_pmu .flags |= PMU_FL_HAS_RSP_1 ;
6237
- x86_pmu .flags |= PMU_FL_INSTR_LATENCY ;
6238
-
6240
+ intel_pmu_init_grt (NULL );
6239
6241
intel_pmu_pebs_data_source_grt ();
6240
6242
x86_pmu .pebs_latency_data = adl_latency_data_small ;
6241
6243
x86_pmu .get_event_constraints = tnt_get_event_constraints ;
6242
- x86_pmu .limit_period = glc_limit_period ;
6243
6244
td_attr = tnt_events_attrs ;
6244
6245
mem_attr = grt_mem_attrs ;
6245
6246
extra_attr = nhm_format_attr ;
@@ -6249,28 +6250,11 @@ __init int intel_pmu_init(void)
6249
6250
6250
6251
case INTEL_FAM6_ATOM_CRESTMONT :
6251
6252
case INTEL_FAM6_ATOM_CRESTMONT_X :
6252
- x86_pmu .mid_ack = true;
6253
- memcpy (hw_cache_event_ids , glp_hw_cache_event_ids ,
6254
- sizeof (hw_cache_event_ids ));
6255
- memcpy (hw_cache_extra_regs , tnt_hw_cache_extra_regs ,
6256
- sizeof (hw_cache_extra_regs ));
6257
- hw_cache_event_ids [C (ITLB )][C (OP_READ )][C (RESULT_ACCESS )] = -1 ;
6258
-
6259
- x86_pmu .event_constraints = intel_slm_event_constraints ;
6260
- x86_pmu .pebs_constraints = intel_grt_pebs_event_constraints ;
6253
+ intel_pmu_init_grt (NULL );
6261
6254
x86_pmu .extra_regs = intel_cmt_extra_regs ;
6262
-
6263
- x86_pmu .pebs_aliases = NULL ;
6264
- x86_pmu .pebs_prec_dist = true;
6265
- x86_pmu .lbr_pt_coexist = true;
6266
- x86_pmu .pebs_block = true;
6267
- x86_pmu .flags |= PMU_FL_HAS_RSP_1 ;
6268
- x86_pmu .flags |= PMU_FL_INSTR_LATENCY ;
6269
-
6270
6255
intel_pmu_pebs_data_source_cmt ();
6271
6256
x86_pmu .pebs_latency_data = mtl_latency_data_small ;
6272
6257
x86_pmu .get_event_constraints = cmt_get_event_constraints ;
6273
- x86_pmu .limit_period = glc_limit_period ;
6274
6258
td_attr = cmt_events_attrs ;
6275
6259
mem_attr = grt_mem_attrs ;
6276
6260
extra_attr = cmt_format_attr ;
0 commit comments