Skip to content

Commit 37f1883

Browse files
committed
cpufreq: scmi: Use .register_em() to register with energy model
Set the newly added .register_em() callback to register with the EM after the cpufreq policy is properly initialized. Acked-by: Sudeep Holla <[email protected]> Signed-off-by: Viresh Kumar <[email protected]>
1 parent 3fd2311 commit 37f1883

File tree

1 file changed

+42
-23
lines changed

1 file changed

+42
-23
lines changed

drivers/cpufreq/scmi-cpufreq.c

Lines changed: 42 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,9 @@
2222

2323
struct scmi_data {
2424
int domain_id;
25+
int nr_opp;
2526
struct device *cpu_dev;
27+
cpumask_var_t opp_shared_cpus;
2628
};
2729

2830
static struct scmi_protocol_handle *ph;
@@ -123,19 +125,22 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
123125
struct device *cpu_dev;
124126
struct scmi_data *priv;
125127
struct cpufreq_frequency_table *freq_table;
126-
struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
127-
cpumask_var_t opp_shared_cpus;
128-
bool power_scale_mw;
129128

130129
cpu_dev = get_cpu_device(policy->cpu);
131130
if (!cpu_dev) {
132131
pr_err("failed to get cpu%d device\n", policy->cpu);
133132
return -ENODEV;
134133
}
135134

136-
if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL))
135+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
136+
if (!priv)
137137
return -ENOMEM;
138138

139+
if (!zalloc_cpumask_var(&priv->opp_shared_cpus, GFP_KERNEL)) {
140+
ret = -ENOMEM;
141+
goto out_free_priv;
142+
}
143+
139144
/* Obtain CPUs that share SCMI performance controls */
140145
ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
141146
if (ret) {
@@ -148,14 +153,14 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
148153
* The OPP 'sharing cpus' info may come from DT through an empty opp
149154
* table and opp-shared.
150155
*/
151-
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, opp_shared_cpus);
152-
if (ret || !cpumask_weight(opp_shared_cpus)) {
156+
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
157+
if (ret || !cpumask_weight(priv->opp_shared_cpus)) {
153158
/*
154159
* Either opp-table is not set or no opp-shared was found.
155160
* Use the CPU mask from SCMI to designate CPUs sharing an OPP
156161
* table.
157162
*/
158-
cpumask_copy(opp_shared_cpus, policy->cpus);
163+
cpumask_copy(priv->opp_shared_cpus, policy->cpus);
159164
}
160165

161166
/*
@@ -180,29 +185,21 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
180185
goto out_free_opp;
181186
}
182187

183-
ret = dev_pm_opp_set_sharing_cpus(cpu_dev, opp_shared_cpus);
188+
ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
184189
if (ret) {
185190
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
186191
__func__, ret);
187192

188193
goto out_free_opp;
189194
}
190195

191-
power_scale_mw = perf_ops->power_scale_mw_get(ph);
192-
em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb,
193-
opp_shared_cpus, power_scale_mw);
194-
}
195-
196-
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
197-
if (!priv) {
198-
ret = -ENOMEM;
199-
goto out_free_opp;
196+
priv->nr_opp = nr_opp;
200197
}
201198

202199
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
203200
if (ret) {
204201
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
205-
goto out_free_priv;
202+
goto out_free_opp;
206203
}
207204

208205
priv->cpu_dev = cpu_dev;
@@ -223,17 +220,16 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
223220
policy->fast_switch_possible =
224221
perf_ops->fast_switch_possible(ph, cpu_dev);
225222

226-
free_cpumask_var(opp_shared_cpus);
227223
return 0;
228224

229-
out_free_priv:
230-
kfree(priv);
231-
232225
out_free_opp:
233226
dev_pm_opp_remove_all_dynamic(cpu_dev);
234227

235228
out_free_cpumask:
236-
free_cpumask_var(opp_shared_cpus);
229+
free_cpumask_var(priv->opp_shared_cpus);
230+
231+
out_free_priv:
232+
kfree(priv);
237233

238234
return ret;
239235
}
@@ -244,11 +240,33 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
244240

245241
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
246242
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
243+
free_cpumask_var(priv->opp_shared_cpus);
247244
kfree(priv);
248245

249246
return 0;
250247
}
251248

249+
static void scmi_cpufreq_register_em(struct cpufreq_policy *policy)
250+
{
251+
struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
252+
bool power_scale_mw = perf_ops->power_scale_mw_get(ph);
253+
struct scmi_data *priv = policy->driver_data;
254+
255+
/*
256+
* This callback will be called for each policy, but we don't need to
257+
* register with EM every time. Despite not being part of the same
258+
* policy, some CPUs may still share their perf-domains, and a CPU from
259+
* another policy may already have registered with EM on behalf of CPUs
260+
* of this policy.
261+
*/
262+
if (!priv->nr_opp)
263+
return;
264+
265+
em_dev_register_perf_domain(get_cpu_device(policy->cpu), priv->nr_opp,
266+
&em_cb, priv->opp_shared_cpus,
267+
power_scale_mw);
268+
}
269+
252270
static struct cpufreq_driver scmi_cpufreq_driver = {
253271
.name = "scmi",
254272
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
@@ -261,6 +279,7 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
261279
.get = scmi_cpufreq_get_rate,
262280
.init = scmi_cpufreq_init,
263281
.exit = scmi_cpufreq_exit,
282+
.register_em = scmi_cpufreq_register_em,
264283
};
265284

266285
static int scmi_cpufreq_probe(struct scmi_device *sdev)

0 commit comments

Comments
 (0)