Skip to content

Commit 0ba0c03

Browse files
Kan LiangIngo Molnar
Kan Liang
authored and
Ingo Molnar
committed
perf/x86/intel: Factor out the initialization code for SPR
The SPR and ADL p-core have a similar uarch. Most of the initialization code can be shared. Factor out intel_pmu_init_glc() for the common initialization code. The common part of the ADL p-core will be replaced by the later patch. Signed-off-by: Kan Liang <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent d4b5694 commit 0ba0c03

File tree

1 file changed

+26
-23
lines changed

1 file changed

+26
-23
lines changed

arch/x86/events/intel/core.c

+26-23
Original file line numberDiff line numberDiff line change
@@ -5916,6 +5916,30 @@ static __always_inline bool is_mtl(u8 x86_model)
59165916
(x86_model == INTEL_FAM6_METEORLAKE_L);
59175917
}
59185918

5919+
static __always_inline void intel_pmu_init_glc(struct pmu *pmu)
5920+
{
5921+
x86_pmu.late_ack = true;
5922+
x86_pmu.limit_period = glc_limit_period;
5923+
x86_pmu.pebs_aliases = NULL;
5924+
x86_pmu.pebs_prec_dist = true;
5925+
x86_pmu.pebs_block = true;
5926+
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
5927+
x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
5928+
x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
5929+
x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
5930+
x86_pmu.lbr_pt_coexist = true;
5931+
x86_pmu.num_topdown_events = 8;
5932+
static_call_update(intel_pmu_update_topdown_event,
5933+
&icl_update_topdown_event);
5934+
static_call_update(intel_pmu_set_topdown_event_period,
5935+
&icl_set_topdown_event_period);
5936+
5937+
memcpy(hybrid_var(pmu, hw_cache_event_ids), glc_hw_cache_event_ids, sizeof(hw_cache_event_ids));
5938+
memcpy(hybrid_var(pmu, hw_cache_extra_regs), glc_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
5939+
hybrid(pmu, event_constraints) = intel_glc_event_constraints;
5940+
hybrid(pmu, pebs_constraints) = intel_glc_pebs_event_constraints;
5941+
}
5942+
59195943
__init int intel_pmu_init(void)
59205944
{
59215945
struct attribute **extra_skl_attr = &empty_attrs;
@@ -6567,24 +6591,10 @@ __init int intel_pmu_init(void)
65676591
fallthrough;
65686592
case INTEL_FAM6_GRANITERAPIDS_X:
65696593
case INTEL_FAM6_GRANITERAPIDS_D:
6570-
pmem = true;
6571-
x86_pmu.late_ack = true;
6572-
memcpy(hw_cache_event_ids, glc_hw_cache_event_ids, sizeof(hw_cache_event_ids));
6573-
memcpy(hw_cache_extra_regs, glc_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
6574-
6575-
x86_pmu.event_constraints = intel_glc_event_constraints;
6576-
x86_pmu.pebs_constraints = intel_glc_pebs_event_constraints;
6594+
intel_pmu_init_glc(NULL);
65776595
if (!x86_pmu.extra_regs)
65786596
x86_pmu.extra_regs = intel_rwc_extra_regs;
6579-
x86_pmu.limit_period = glc_limit_period;
65806597
x86_pmu.pebs_ept = 1;
6581-
x86_pmu.pebs_aliases = NULL;
6582-
x86_pmu.pebs_prec_dist = true;
6583-
x86_pmu.pebs_block = true;
6584-
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
6585-
x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
6586-
x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
6587-
65886598
x86_pmu.hw_config = hsw_hw_config;
65896599
x86_pmu.get_event_constraints = glc_get_event_constraints;
65906600
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
@@ -6593,14 +6603,7 @@ __init int intel_pmu_init(void)
65936603
mem_attr = glc_events_attrs;
65946604
td_attr = glc_td_events_attrs;
65956605
tsx_attr = glc_tsx_events_attrs;
6596-
x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
6597-
x86_pmu.lbr_pt_coexist = true;
6598-
intel_pmu_pebs_data_source_skl(pmem);
6599-
x86_pmu.num_topdown_events = 8;
6600-
static_call_update(intel_pmu_update_topdown_event,
6601-
&icl_update_topdown_event);
6602-
static_call_update(intel_pmu_set_topdown_event_period,
6603-
&icl_set_topdown_event_period);
6606+
intel_pmu_pebs_data_source_skl(true);
66046607
pr_cont("Sapphire Rapids events, ");
66056608
name = "sapphire_rapids";
66066609
break;

0 commit comments

Comments
 (0)