38
38
* types are different. My gut feeling is that it should be pretty
39
39
* easy to merge, but that would be an ABI breakage -- again. VFP
40
40
* would also need to be abstracted.
41
+ *
42
+ * For AArch32, we only take care of what is being trapped. Anything
43
+ * that has to do with init and userspace access has to go via the
44
+ * 64bit interface.
41
45
*/
42
46
43
47
/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
@@ -166,6 +170,16 @@ static const struct sys_reg_desc sys_reg_descs[] = {
166
170
{ Op0 (0b01 ), Op1 (0b000 ), CRn (0b0111 ), CRm (0b1110 ), Op2 (0b010 ),
167
171
access_dcsw },
168
172
173
+ /* TEECR32_EL1 */
174
+ { Op0 (0b10 ), Op1 (0b010 ), CRn (0b0000 ), CRm (0b0000 ), Op2 (0b000 ),
175
+ NULL , reset_val , TEECR32_EL1 , 0 },
176
+ /* TEEHBR32_EL1 */
177
+ { Op0 (0b10 ), Op1 (0b010 ), CRn (0b0001 ), CRm (0b0000 ), Op2 (0b000 ),
178
+ NULL , reset_val , TEEHBR32_EL1 , 0 },
179
+ /* DBGVCR32_EL2 */
180
+ { Op0 (0b10 ), Op1 (0b100 ), CRn (0b0000 ), CRm (0b0111 ), Op2 (0b000 ),
181
+ NULL , reset_val , DBGVCR32_EL2 , 0 },
182
+
169
183
/* MPIDR_EL1 */
170
184
{ Op0 (0b11 ), Op1 (0b000 ), CRn (0b0000 ), CRm (0b0000 ), Op2 (0b101 ),
171
185
NULL , reset_mpidr , MPIDR_EL1 },
@@ -276,6 +290,39 @@ static const struct sys_reg_desc sys_reg_descs[] = {
276
290
/* TPIDRRO_EL0 */
277
291
{ Op0 (0b11 ), Op1 (0b011 ), CRn (0b1101 ), CRm (0b0000 ), Op2 (0b011 ),
278
292
NULL , reset_unknown , TPIDRRO_EL0 },
293
+
294
+ /* DACR32_EL2 */
295
+ { Op0 (0b11 ), Op1 (0b100 ), CRn (0b0011 ), CRm (0b0000 ), Op2 (0b000 ),
296
+ NULL , reset_unknown , DACR32_EL2 },
297
+ /* IFSR32_EL2 */
298
+ { Op0 (0b11 ), Op1 (0b100 ), CRn (0b0101 ), CRm (0b0000 ), Op2 (0b001 ),
299
+ NULL , reset_unknown , IFSR32_EL2 },
300
+ /* FPEXC32_EL2 */
301
+ { Op0 (0b11 ), Op1 (0b100 ), CRn (0b0101 ), CRm (0b0011 ), Op2 (0b000 ),
302
+ NULL , reset_val , FPEXC32_EL2 , 0x70 },
303
+ };
304
+
305
+ /* Trapped cp15 registers */
306
+ static const struct sys_reg_desc cp15_regs [] = {
307
+ /*
308
+ * DC{C,I,CI}SW operations:
309
+ */
310
+ { Op1 ( 0 ), CRn ( 7 ), CRm ( 6 ), Op2 ( 2 ), access_dcsw },
311
+ { Op1 ( 0 ), CRn ( 7 ), CRm (10 ), Op2 ( 2 ), access_dcsw },
312
+ { Op1 ( 0 ), CRn ( 7 ), CRm (14 ), Op2 ( 2 ), access_dcsw },
313
+ { Op1 ( 0 ), CRn ( 9 ), CRm (12 ), Op2 ( 0 ), pm_fake },
314
+ { Op1 ( 0 ), CRn ( 9 ), CRm (12 ), Op2 ( 1 ), pm_fake },
315
+ { Op1 ( 0 ), CRn ( 9 ), CRm (12 ), Op2 ( 2 ), pm_fake },
316
+ { Op1 ( 0 ), CRn ( 9 ), CRm (12 ), Op2 ( 3 ), pm_fake },
317
+ { Op1 ( 0 ), CRn ( 9 ), CRm (12 ), Op2 ( 5 ), pm_fake },
318
+ { Op1 ( 0 ), CRn ( 9 ), CRm (12 ), Op2 ( 6 ), pm_fake },
319
+ { Op1 ( 0 ), CRn ( 9 ), CRm (12 ), Op2 ( 7 ), pm_fake },
320
+ { Op1 ( 0 ), CRn ( 9 ), CRm (13 ), Op2 ( 0 ), pm_fake },
321
+ { Op1 ( 0 ), CRn ( 9 ), CRm (13 ), Op2 ( 1 ), pm_fake },
322
+ { Op1 ( 0 ), CRn ( 9 ), CRm (13 ), Op2 ( 2 ), pm_fake },
323
+ { Op1 ( 0 ), CRn ( 9 ), CRm (14 ), Op2 ( 0 ), pm_fake },
324
+ { Op1 ( 0 ), CRn ( 9 ), CRm (14 ), Op2 ( 1 ), pm_fake },
325
+ { Op1 ( 0 ), CRn ( 9 ), CRm (14 ), Op2 ( 2 ), pm_fake },
279
326
};
280
327
281
328
/* Target specific emulation tables */
@@ -288,13 +335,20 @@ void kvm_register_target_sys_reg_table(unsigned int target,
288
335
}
289
336
290
337
/* Get specific register table for this target. */
291
- static const struct sys_reg_desc * get_target_table (unsigned target , size_t * num )
338
+ static const struct sys_reg_desc * get_target_table (unsigned target ,
339
+ bool mode_is_64 ,
340
+ size_t * num )
292
341
{
293
342
struct kvm_sys_reg_target_table * table ;
294
343
295
344
table = target_tables [target ];
296
- * num = table -> table64 .num ;
297
- return table -> table64 .table ;
345
+ if (mode_is_64 ) {
346
+ * num = table -> table64 .num ;
347
+ return table -> table64 .table ;
348
+ } else {
349
+ * num = table -> table32 .num ;
350
+ return table -> table32 .table ;
351
+ }
298
352
}
299
353
300
354
static const struct sys_reg_desc * find_reg (const struct sys_reg_params * params ,
@@ -322,13 +376,126 @@ static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
322
376
return NULL ;
323
377
}
324
378
379
+ int kvm_handle_cp14_load_store (struct kvm_vcpu * vcpu , struct kvm_run * run )
380
+ {
381
+ kvm_inject_undefined (vcpu );
382
+ return 1 ;
383
+ }
384
+
385
+ int kvm_handle_cp14_access (struct kvm_vcpu * vcpu , struct kvm_run * run )
386
+ {
387
+ kvm_inject_undefined (vcpu );
388
+ return 1 ;
389
+ }
390
+
391
+ static void emulate_cp15 (struct kvm_vcpu * vcpu ,
392
+ const struct sys_reg_params * params )
393
+ {
394
+ size_t num ;
395
+ const struct sys_reg_desc * table , * r ;
396
+
397
+ table = get_target_table (vcpu -> arch .target , false, & num );
398
+
399
+ /* Search target-specific then generic table. */
400
+ r = find_reg (params , table , num );
401
+ if (!r )
402
+ r = find_reg (params , cp15_regs , ARRAY_SIZE (cp15_regs ));
403
+
404
+ if (likely (r )) {
405
+ /*
406
+ * Not having an accessor means that we have
407
+ * configured a trap that we don't know how to
408
+ * handle. This certainly qualifies as a gross bug
409
+ * that should be fixed right away.
410
+ */
411
+ BUG_ON (!r -> access );
412
+
413
+ if (likely (r -> access (vcpu , params , r ))) {
414
+ /* Skip instruction, since it was emulated */
415
+ kvm_skip_instr (vcpu , kvm_vcpu_trap_il_is32bit (vcpu ));
416
+ return ;
417
+ }
418
+ /* If access function fails, it should complain. */
419
+ }
420
+
421
+ kvm_err ("Unsupported guest CP15 access at: %08lx\n" , * vcpu_pc (vcpu ));
422
+ print_sys_reg_instr (params );
423
+ kvm_inject_undefined (vcpu );
424
+ }
425
+
426
+ /**
427
+ * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
428
+ * @vcpu: The VCPU pointer
429
+ * @run: The kvm_run struct
430
+ */
431
+ int kvm_handle_cp15_64 (struct kvm_vcpu * vcpu , struct kvm_run * run )
432
+ {
433
+ struct sys_reg_params params ;
434
+ u32 hsr = kvm_vcpu_get_hsr (vcpu );
435
+ int Rt2 = (hsr >> 10 ) & 0xf ;
436
+
437
+ params .CRm = (hsr >> 1 ) & 0xf ;
438
+ params .Rt = (hsr >> 5 ) & 0xf ;
439
+ params .is_write = ((hsr & 1 ) == 0 );
440
+
441
+ params .Op0 = 0 ;
442
+ params .Op1 = (hsr >> 16 ) & 0xf ;
443
+ params .Op2 = 0 ;
444
+ params .CRn = 0 ;
445
+
446
+ /*
447
+ * Massive hack here. Store Rt2 in the top 32bits so we only
448
+ * have one register to deal with. As we use the same trap
449
+ * backends between AArch32 and AArch64, we get away with it.
450
+ */
451
+ if (params .is_write ) {
452
+ u64 val = * vcpu_reg (vcpu , params .Rt );
453
+ val &= 0xffffffff ;
454
+ val |= * vcpu_reg (vcpu , Rt2 ) << 32 ;
455
+ * vcpu_reg (vcpu , params .Rt ) = val ;
456
+ }
457
+
458
+ emulate_cp15 (vcpu , & params );
459
+
460
+ /* Do the opposite hack for the read side */
461
+ if (!params .is_write ) {
462
+ u64 val = * vcpu_reg (vcpu , params .Rt );
463
+ val >>= 32 ;
464
+ * vcpu_reg (vcpu , Rt2 ) = val ;
465
+ }
466
+
467
+ return 1 ;
468
+ }
469
+
470
+ /**
471
+ * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
472
+ * @vcpu: The VCPU pointer
473
+ * @run: The kvm_run struct
474
+ */
475
+ int kvm_handle_cp15_32 (struct kvm_vcpu * vcpu , struct kvm_run * run )
476
+ {
477
+ struct sys_reg_params params ;
478
+ u32 hsr = kvm_vcpu_get_hsr (vcpu );
479
+
480
+ params .CRm = (hsr >> 1 ) & 0xf ;
481
+ params .Rt = (hsr >> 5 ) & 0xf ;
482
+ params .is_write = ((hsr & 1 ) == 0 );
483
+ params .CRn = (hsr >> 10 ) & 0xf ;
484
+ params .Op0 = 0 ;
485
+ params .Op1 = (hsr >> 14 ) & 0x7 ;
486
+ params .Op2 = (hsr >> 17 ) & 0x7 ;
487
+
488
+ emulate_cp15 (vcpu , & params );
489
+ return 1 ;
490
+ }
491
+
325
492
static int emulate_sys_reg (struct kvm_vcpu * vcpu ,
326
493
const struct sys_reg_params * params )
327
494
{
328
495
size_t num ;
329
496
const struct sys_reg_desc * table , * r ;
330
497
331
- table = get_target_table (vcpu -> arch .target , & num );
498
+ table = get_target_table (vcpu -> arch .target , true, & num );
332
499
333
500
/* Search target-specific then generic table. */
334
501
r = find_reg (params , table , num );
@@ -438,7 +605,7 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
438
605
if (!index_to_params (id , & params ))
439
606
return NULL ;
440
607
441
- table = get_target_table (vcpu -> arch .target , & num );
608
+ table = get_target_table (vcpu -> arch .target , true, & num );
442
609
r = find_reg (& params , table , num );
443
610
if (!r )
444
611
r = find_reg (& params , sys_reg_descs , ARRAY_SIZE (sys_reg_descs ));
@@ -762,7 +929,7 @@ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
762
929
size_t num ;
763
930
764
931
/* We check for duplicates here, to allow arch-specific overrides. */
765
- i1 = get_target_table (vcpu -> arch .target , & num );
932
+ i1 = get_target_table (vcpu -> arch .target , true, & num );
766
933
end1 = i1 + num ;
767
934
i2 = sys_reg_descs ;
768
935
end2 = sys_reg_descs + ARRAY_SIZE (sys_reg_descs );
@@ -874,7 +1041,7 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
874
1041
/* Generic chip reset first (so target could override). */
875
1042
reset_sys_reg_descs (vcpu , sys_reg_descs , ARRAY_SIZE (sys_reg_descs ));
876
1043
877
- table = get_target_table (vcpu -> arch .target , & num );
1044
+ table = get_target_table (vcpu -> arch .target , true, & num );
878
1045
reset_sys_reg_descs (vcpu , table , num );
879
1046
880
1047
for (num = 1 ; num < NR_SYS_REGS ; num ++ )
0 commit comments