Skip to content

Commit 62a89c4

Browse files
author
Marc Zyngier
committed
arm64: KVM: 32bit handling of coprocessor traps
Provide the necessary infrastructure to trap coprocessor accesses that occur when running 32bit guests. Also wire SMC and HVC trapped in 32bit mode while were at it. Reviewed-by: Christopher Covington <[email protected]> Reviewed-by: Catalin Marinas <[email protected]> Signed-off-by: Marc Zyngier <[email protected]>
1 parent 27b190b commit 62a89c4

File tree

3 files changed

+186
-7
lines changed

3 files changed

+186
-7
lines changed

arch/arm64/include/asm/kvm_coproc.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,11 +32,16 @@ struct kvm_sys_reg_table {
3232

3333
struct kvm_sys_reg_target_table {
3434
struct kvm_sys_reg_table table64;
35+
struct kvm_sys_reg_table table32;
3536
};
3637

3738
void kvm_register_target_sys_reg_table(unsigned int target,
3839
struct kvm_sys_reg_target_table *table);
3940

41+
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
42+
int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
43+
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
44+
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
4045
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run);
4146

4247
#define kvm_coproc_table_init kvm_sys_reg_table_init

arch/arm64/kvm/handle_exit.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,13 @@ static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
6262

6363
static exit_handle_fn arm_exit_handlers[] = {
6464
[ESR_EL2_EC_WFI] = kvm_handle_wfi,
65+
[ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32,
66+
[ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64,
67+
[ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_access,
68+
[ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store,
69+
[ESR_EL2_EC_CP14_64] = kvm_handle_cp14_access,
70+
[ESR_EL2_EC_HVC32] = handle_hvc,
71+
[ESR_EL2_EC_SMC32] = handle_smc,
6572
[ESR_EL2_EC_HVC64] = handle_hvc,
6673
[ESR_EL2_EC_SMC64] = handle_smc,
6774
[ESR_EL2_EC_SYS64] = kvm_handle_sys_reg,

arch/arm64/kvm/sys_regs.c

Lines changed: 174 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,10 @@
3838
* types are different. My gut feeling is that it should be pretty
3939
* easy to merge, but that would be an ABI breakage -- again. VFP
4040
* would also need to be abstracted.
41+
*
42+
* For AArch32, we only take care of what is being trapped. Anything
43+
* that has to do with init and userspace access has to go via the
44+
* 64bit interface.
4145
*/
4246

4347
/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
@@ -166,6 +170,16 @@ static const struct sys_reg_desc sys_reg_descs[] = {
166170
{ Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
167171
access_dcsw },
168172

173+
/* TEECR32_EL1 */
174+
{ Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
175+
NULL, reset_val, TEECR32_EL1, 0 },
176+
/* TEEHBR32_EL1 */
177+
{ Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
178+
NULL, reset_val, TEEHBR32_EL1, 0 },
179+
/* DBGVCR32_EL2 */
180+
{ Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
181+
NULL, reset_val, DBGVCR32_EL2, 0 },
182+
169183
/* MPIDR_EL1 */
170184
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
171185
NULL, reset_mpidr, MPIDR_EL1 },
@@ -276,6 +290,39 @@ static const struct sys_reg_desc sys_reg_descs[] = {
276290
/* TPIDRRO_EL0 */
277291
{ Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
278292
NULL, reset_unknown, TPIDRRO_EL0 },
293+
294+
/* DACR32_EL2 */
295+
{ Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
296+
NULL, reset_unknown, DACR32_EL2 },
297+
/* IFSR32_EL2 */
298+
{ Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
299+
NULL, reset_unknown, IFSR32_EL2 },
300+
/* FPEXC32_EL2 */
301+
{ Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
302+
NULL, reset_val, FPEXC32_EL2, 0x70 },
303+
};
304+
305+
/* Trapped cp15 registers */
306+
static const struct sys_reg_desc cp15_regs[] = {
307+
/*
308+
* DC{C,I,CI}SW operations:
309+
*/
310+
{ Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
311+
{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
312+
{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
313+
{ Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake },
314+
{ Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake },
315+
{ Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake },
316+
{ Op1( 0), CRn( 9), CRm(12), Op2( 3), pm_fake },
317+
{ Op1( 0), CRn( 9), CRm(12), Op2( 5), pm_fake },
318+
{ Op1( 0), CRn( 9), CRm(12), Op2( 6), pm_fake },
319+
{ Op1( 0), CRn( 9), CRm(12), Op2( 7), pm_fake },
320+
{ Op1( 0), CRn( 9), CRm(13), Op2( 0), pm_fake },
321+
{ Op1( 0), CRn( 9), CRm(13), Op2( 1), pm_fake },
322+
{ Op1( 0), CRn( 9), CRm(13), Op2( 2), pm_fake },
323+
{ Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake },
324+
{ Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake },
325+
{ Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake },
279326
};
280327

281328
/* Target specific emulation tables */
@@ -288,13 +335,20 @@ void kvm_register_target_sys_reg_table(unsigned int target,
288335
}
289336

290337
/* Get specific register table for this target. */
291-
static const struct sys_reg_desc *get_target_table(unsigned target, size_t *num)
338+
static const struct sys_reg_desc *get_target_table(unsigned target,
339+
bool mode_is_64,
340+
size_t *num)
292341
{
293342
struct kvm_sys_reg_target_table *table;
294343

295344
table = target_tables[target];
296-
*num = table->table64.num;
297-
return table->table64.table;
345+
if (mode_is_64) {
346+
*num = table->table64.num;
347+
return table->table64.table;
348+
} else {
349+
*num = table->table32.num;
350+
return table->table32.table;
351+
}
298352
}
299353

300354
static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
@@ -322,13 +376,126 @@ static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
322376
return NULL;
323377
}
324378

379+
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
380+
{
381+
kvm_inject_undefined(vcpu);
382+
return 1;
383+
}
384+
385+
int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
386+
{
387+
kvm_inject_undefined(vcpu);
388+
return 1;
389+
}
390+
391+
static void emulate_cp15(struct kvm_vcpu *vcpu,
392+
const struct sys_reg_params *params)
393+
{
394+
size_t num;
395+
const struct sys_reg_desc *table, *r;
396+
397+
table = get_target_table(vcpu->arch.target, false, &num);
398+
399+
/* Search target-specific then generic table. */
400+
r = find_reg(params, table, num);
401+
if (!r)
402+
r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs));
403+
404+
if (likely(r)) {
405+
/*
406+
* Not having an accessor means that we have
407+
* configured a trap that we don't know how to
408+
* handle. This certainly qualifies as a gross bug
409+
* that should be fixed right away.
410+
*/
411+
BUG_ON(!r->access);
412+
413+
if (likely(r->access(vcpu, params, r))) {
414+
/* Skip instruction, since it was emulated */
415+
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
416+
return;
417+
}
418+
/* If access function fails, it should complain. */
419+
}
420+
421+
kvm_err("Unsupported guest CP15 access at: %08lx\n", *vcpu_pc(vcpu));
422+
print_sys_reg_instr(params);
423+
kvm_inject_undefined(vcpu);
424+
}
425+
426+
/**
427+
* kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
428+
* @vcpu: The VCPU pointer
429+
* @run: The kvm_run struct
430+
*/
431+
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
432+
{
433+
struct sys_reg_params params;
434+
u32 hsr = kvm_vcpu_get_hsr(vcpu);
435+
int Rt2 = (hsr >> 10) & 0xf;
436+
437+
params.CRm = (hsr >> 1) & 0xf;
438+
params.Rt = (hsr >> 5) & 0xf;
439+
params.is_write = ((hsr & 1) == 0);
440+
441+
params.Op0 = 0;
442+
params.Op1 = (hsr >> 16) & 0xf;
443+
params.Op2 = 0;
444+
params.CRn = 0;
445+
446+
/*
447+
* Massive hack here. Store Rt2 in the top 32bits so we only
448+
* have one register to deal with. As we use the same trap
449+
* backends between AArch32 and AArch64, we get away with it.
450+
*/
451+
if (params.is_write) {
452+
u64 val = *vcpu_reg(vcpu, params.Rt);
453+
val &= 0xffffffff;
454+
val |= *vcpu_reg(vcpu, Rt2) << 32;
455+
*vcpu_reg(vcpu, params.Rt) = val;
456+
}
457+
458+
emulate_cp15(vcpu, &params);
459+
460+
/* Do the opposite hack for the read side */
461+
if (!params.is_write) {
462+
u64 val = *vcpu_reg(vcpu, params.Rt);
463+
val >>= 32;
464+
*vcpu_reg(vcpu, Rt2) = val;
465+
}
466+
467+
return 1;
468+
}
469+
470+
/**
471+
* kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
472+
* @vcpu: The VCPU pointer
473+
* @run: The kvm_run struct
474+
*/
475+
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
476+
{
477+
struct sys_reg_params params;
478+
u32 hsr = kvm_vcpu_get_hsr(vcpu);
479+
480+
params.CRm = (hsr >> 1) & 0xf;
481+
params.Rt = (hsr >> 5) & 0xf;
482+
params.is_write = ((hsr & 1) == 0);
483+
params.CRn = (hsr >> 10) & 0xf;
484+
params.Op0 = 0;
485+
params.Op1 = (hsr >> 14) & 0x7;
486+
params.Op2 = (hsr >> 17) & 0x7;
487+
488+
emulate_cp15(vcpu, &params);
489+
return 1;
490+
}
491+
325492
static int emulate_sys_reg(struct kvm_vcpu *vcpu,
326493
const struct sys_reg_params *params)
327494
{
328495
size_t num;
329496
const struct sys_reg_desc *table, *r;
330497

331-
table = get_target_table(vcpu->arch.target, &num);
498+
table = get_target_table(vcpu->arch.target, true, &num);
332499

333500
/* Search target-specific then generic table. */
334501
r = find_reg(params, table, num);
@@ -438,7 +605,7 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
438605
if (!index_to_params(id, &params))
439606
return NULL;
440607

441-
table = get_target_table(vcpu->arch.target, &num);
608+
table = get_target_table(vcpu->arch.target, true, &num);
442609
r = find_reg(&params, table, num);
443610
if (!r)
444611
r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
@@ -762,7 +929,7 @@ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
762929
size_t num;
763930

764931
/* We check for duplicates here, to allow arch-specific overrides. */
765-
i1 = get_target_table(vcpu->arch.target, &num);
932+
i1 = get_target_table(vcpu->arch.target, true, &num);
766933
end1 = i1 + num;
767934
i2 = sys_reg_descs;
768935
end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
@@ -874,7 +1041,7 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
8741041
/* Generic chip reset first (so target could override). */
8751042
reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
8761043

877-
table = get_target_table(vcpu->arch.target, &num);
1044+
table = get_target_table(vcpu->arch.target, true, &num);
8781045
reset_sys_reg_descs(vcpu, table, num);
8791046

8801047
for (num = 1; num < NR_SYS_REGS; num++)

0 commit comments

Comments
 (0)