22
22
23
23
struct scmi_data {
24
24
int domain_id ;
25
+ int nr_opp ;
25
26
struct device * cpu_dev ;
27
+ cpumask_var_t opp_shared_cpus ;
26
28
};
27
29
28
30
static struct scmi_protocol_handle * ph ;
@@ -123,19 +125,22 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
123
125
struct device * cpu_dev ;
124
126
struct scmi_data * priv ;
125
127
struct cpufreq_frequency_table * freq_table ;
126
- struct em_data_callback em_cb = EM_DATA_CB (scmi_get_cpu_power );
127
- cpumask_var_t opp_shared_cpus ;
128
- bool power_scale_mw ;
129
128
130
129
cpu_dev = get_cpu_device (policy -> cpu );
131
130
if (!cpu_dev ) {
132
131
pr_err ("failed to get cpu%d device\n" , policy -> cpu );
133
132
return - ENODEV ;
134
133
}
135
134
136
- if (!zalloc_cpumask_var (& opp_shared_cpus , GFP_KERNEL ))
135
+ priv = kzalloc (sizeof (* priv ), GFP_KERNEL );
136
+ if (!priv )
137
137
return - ENOMEM ;
138
138
139
+ if (!zalloc_cpumask_var (& priv -> opp_shared_cpus , GFP_KERNEL )) {
140
+ ret = - ENOMEM ;
141
+ goto out_free_priv ;
142
+ }
143
+
139
144
/* Obtain CPUs that share SCMI performance controls */
140
145
ret = scmi_get_sharing_cpus (cpu_dev , policy -> cpus );
141
146
if (ret ) {
@@ -148,14 +153,14 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
148
153
* The OPP 'sharing cpus' info may come from DT through an empty opp
149
154
* table and opp-shared.
150
155
*/
151
- ret = dev_pm_opp_of_get_sharing_cpus (cpu_dev , opp_shared_cpus );
152
- if (ret || !cpumask_weight (opp_shared_cpus )) {
156
+ ret = dev_pm_opp_of_get_sharing_cpus (cpu_dev , priv -> opp_shared_cpus );
157
+ if (ret || !cpumask_weight (priv -> opp_shared_cpus )) {
153
158
/*
154
159
* Either opp-table is not set or no opp-shared was found.
155
160
* Use the CPU mask from SCMI to designate CPUs sharing an OPP
156
161
* table.
157
162
*/
158
- cpumask_copy (opp_shared_cpus , policy -> cpus );
163
+ cpumask_copy (priv -> opp_shared_cpus , policy -> cpus );
159
164
}
160
165
161
166
/*
@@ -180,29 +185,21 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
180
185
goto out_free_opp ;
181
186
}
182
187
183
- ret = dev_pm_opp_set_sharing_cpus (cpu_dev , opp_shared_cpus );
188
+ ret = dev_pm_opp_set_sharing_cpus (cpu_dev , priv -> opp_shared_cpus );
184
189
if (ret ) {
185
190
dev_err (cpu_dev , "%s: failed to mark OPPs as shared: %d\n" ,
186
191
__func__ , ret );
187
192
188
193
goto out_free_opp ;
189
194
}
190
195
191
- power_scale_mw = perf_ops -> power_scale_mw_get (ph );
192
- em_dev_register_perf_domain (cpu_dev , nr_opp , & em_cb ,
193
- opp_shared_cpus , power_scale_mw );
194
- }
195
-
196
- priv = kzalloc (sizeof (* priv ), GFP_KERNEL );
197
- if (!priv ) {
198
- ret = - ENOMEM ;
199
- goto out_free_opp ;
196
+ priv -> nr_opp = nr_opp ;
200
197
}
201
198
202
199
ret = dev_pm_opp_init_cpufreq_table (cpu_dev , & freq_table );
203
200
if (ret ) {
204
201
dev_err (cpu_dev , "failed to init cpufreq table: %d\n" , ret );
205
- goto out_free_priv ;
202
+ goto out_free_opp ;
206
203
}
207
204
208
205
priv -> cpu_dev = cpu_dev ;
@@ -223,17 +220,16 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
223
220
policy -> fast_switch_possible =
224
221
perf_ops -> fast_switch_possible (ph , cpu_dev );
225
222
226
- free_cpumask_var (opp_shared_cpus );
227
223
return 0 ;
228
224
229
- out_free_priv :
230
- kfree (priv );
231
-
232
225
out_free_opp :
233
226
dev_pm_opp_remove_all_dynamic (cpu_dev );
234
227
235
228
out_free_cpumask :
236
- free_cpumask_var (opp_shared_cpus );
229
+ free_cpumask_var (priv -> opp_shared_cpus );
230
+
231
+ out_free_priv :
232
+ kfree (priv );
237
233
238
234
return ret ;
239
235
}
@@ -244,11 +240,33 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
244
240
245
241
dev_pm_opp_free_cpufreq_table (priv -> cpu_dev , & policy -> freq_table );
246
242
dev_pm_opp_remove_all_dynamic (priv -> cpu_dev );
243
+ free_cpumask_var (priv -> opp_shared_cpus );
247
244
kfree (priv );
248
245
249
246
return 0 ;
250
247
}
251
248
249
+ static void scmi_cpufreq_register_em (struct cpufreq_policy * policy )
250
+ {
251
+ struct em_data_callback em_cb = EM_DATA_CB (scmi_get_cpu_power );
252
+ bool power_scale_mw = perf_ops -> power_scale_mw_get (ph );
253
+ struct scmi_data * priv = policy -> driver_data ;
254
+
255
+ /*
256
+ * This callback will be called for each policy, but we don't need to
257
+ * register with EM every time. Despite not being part of the same
258
+ * policy, some CPUs may still share their perf-domains, and a CPU from
259
+ * another policy may already have registered with EM on behalf of CPUs
260
+ * of this policy.
261
+ */
262
+ if (!priv -> nr_opp )
263
+ return ;
264
+
265
+ em_dev_register_perf_domain (get_cpu_device (policy -> cpu ), priv -> nr_opp ,
266
+ & em_cb , priv -> opp_shared_cpus ,
267
+ power_scale_mw );
268
+ }
269
+
252
270
static struct cpufreq_driver scmi_cpufreq_driver = {
253
271
.name = "scmi" ,
254
272
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
@@ -261,6 +279,7 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
261
279
.get = scmi_cpufreq_get_rate ,
262
280
.init = scmi_cpufreq_init ,
263
281
.exit = scmi_cpufreq_exit ,
282
+ .register_em = scmi_cpufreq_register_em ,
264
283
};
265
284
266
285
static int scmi_cpufreq_probe (struct scmi_device * sdev )
0 commit comments