@@ -451,7 +451,6 @@ static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
451
451
srcu_read_unlock (& kvm -> srcu , idx );
452
452
}
453
453
454
- #ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
455
454
typedef bool (* hva_handler_t )(struct kvm * kvm , struct kvm_gfn_info * info );
456
455
457
456
static __always_inline int __kvm_handle_hva_range (struct kvm * kvm ,
@@ -541,50 +540,24 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
541
540
542
541
return ret ;
543
542
}
544
- #endif /* KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS */
545
-
546
543
static void kvm_mmu_notifier_change_pte (struct mmu_notifier * mn ,
547
544
struct mm_struct * mm ,
548
545
unsigned long address ,
549
546
pte_t pte )
550
547
{
551
- #ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
552
- kvm_handle_hva_range (mn , address , address + 1 , pte , kvm_set_spte_gfn );
553
- #else
554
- struct kvm * kvm = mmu_notifier_to_kvm (mn );
555
- int idx ;
556
-
557
548
trace_kvm_set_spte_hva (address );
558
549
559
- idx = srcu_read_lock (& kvm -> srcu );
560
-
561
- KVM_MMU_LOCK (kvm );
562
-
563
- kvm -> mmu_notifier_seq ++ ;
564
-
565
- if (kvm_set_spte_hva (kvm , address , pte ))
566
- kvm_flush_remote_tlbs (kvm );
567
-
568
- KVM_MMU_UNLOCK (kvm );
569
- srcu_read_unlock (& kvm -> srcu , idx );
570
- #endif
550
+ kvm_handle_hva_range (mn , address , address + 1 , pte , kvm_set_spte_gfn );
571
551
}
572
552
573
553
static int kvm_mmu_notifier_invalidate_range_start (struct mmu_notifier * mn ,
574
554
const struct mmu_notifier_range * range )
575
555
{
576
- #ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
577
556
bool blockable = mmu_notifier_range_blockable (range );
578
- #endif
579
557
struct kvm * kvm = mmu_notifier_to_kvm (mn );
580
- #ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
581
- int need_tlb_flush = 0 , idx ;
582
558
583
559
trace_kvm_unmap_hva_range (range -> start , range -> end );
584
560
585
- idx = srcu_read_lock (& kvm -> srcu );
586
- #endif
587
-
588
561
KVM_MMU_LOCK (kvm );
589
562
/*
590
563
* The count increase must become visible at unlock time as no
@@ -611,21 +584,10 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
611
584
max (kvm -> mmu_notifier_range_end , range -> end );
612
585
}
613
586
614
- #ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
615
587
__kvm_handle_hva_range (kvm , range -> start , range -> end , __pte (0 ),
616
588
kvm_unmap_gfn_range , true, blockable );
617
- #else
618
- need_tlb_flush = kvm_unmap_hva_range (kvm , range -> start , range -> end ,
619
- range -> flags );
620
- /* we've to flush the tlb before the pages can be freed */
621
- if (need_tlb_flush || kvm -> tlbs_dirty )
622
- kvm_flush_remote_tlbs (kvm );
623
- #endif
624
589
625
590
KVM_MMU_UNLOCK (kvm );
626
- #ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
627
- srcu_read_unlock (& kvm -> srcu , idx );
628
- #endif
629
591
630
592
return 0 ;
631
593
}
@@ -659,42 +621,18 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
659
621
unsigned long start ,
660
622
unsigned long end )
661
623
{
662
- #ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
663
- return kvm_handle_hva_range (mn , start , end , __pte (0 ), kvm_age_gfn );
664
- #else
665
- struct kvm * kvm = mmu_notifier_to_kvm (mn );
666
- int young , idx ;
667
-
668
624
trace_kvm_age_hva (start , end );
669
625
670
- idx = srcu_read_lock (& kvm -> srcu );
671
- KVM_MMU_LOCK (kvm );
672
-
673
- young = kvm_age_hva (kvm , start , end );
674
- if (young )
675
- kvm_flush_remote_tlbs (kvm );
676
-
677
- KVM_MMU_UNLOCK (kvm );
678
- srcu_read_unlock (& kvm -> srcu , idx );
679
-
680
- return young ;
681
- #endif
626
+ return kvm_handle_hva_range (mn , start , end , __pte (0 ), kvm_age_gfn );
682
627
}
683
628
684
629
static int kvm_mmu_notifier_clear_young (struct mmu_notifier * mn ,
685
630
struct mm_struct * mm ,
686
631
unsigned long start ,
687
632
unsigned long end )
688
633
{
689
- #ifndef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
690
- struct kvm * kvm = mmu_notifier_to_kvm (mn );
691
- int young , idx ;
692
-
693
634
trace_kvm_age_hva (start , end );
694
635
695
- idx = srcu_read_lock (& kvm -> srcu );
696
- KVM_MMU_LOCK (kvm );
697
- #endif
698
636
/*
699
637
* Even though we do not flush TLB, this will still adversely
700
638
* affect performance on pre-Haswell Intel EPT, where there is
@@ -708,38 +646,17 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
708
646
* cadence. If we find this inaccurate, we might come up with a
709
647
* more sophisticated heuristic later.
710
648
*/
711
- #ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
712
649
return kvm_handle_hva_range_no_flush (mn , start , end , kvm_age_gfn );
713
- #else
714
- young = kvm_age_hva (kvm , start , end );
715
- KVM_MMU_UNLOCK (kvm );
716
- srcu_read_unlock (& kvm -> srcu , idx );
717
-
718
- return young ;
719
- #endif
720
650
}
721
651
722
652
static int kvm_mmu_notifier_test_young (struct mmu_notifier * mn ,
723
653
struct mm_struct * mm ,
724
654
unsigned long address )
725
655
{
726
- #ifdef KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS
727
- return kvm_handle_hva_range_no_flush (mn , address , address + 1 ,
728
- kvm_test_age_gfn );
729
- #else
730
- struct kvm * kvm = mmu_notifier_to_kvm (mn );
731
- int young , idx ;
732
-
733
656
trace_kvm_test_age_hva (address );
734
657
735
- idx = srcu_read_lock (& kvm -> srcu );
736
- KVM_MMU_LOCK (kvm );
737
- young = kvm_test_age_hva (kvm , address );
738
- KVM_MMU_UNLOCK (kvm );
739
- srcu_read_unlock (& kvm -> srcu , idx );
740
-
741
- return young ;
742
- #endif
658
+ return kvm_handle_hva_range_no_flush (mn , address , address + 1 ,
659
+ kvm_test_age_gfn );
743
660
}
744
661
745
662
static void kvm_mmu_notifier_release (struct mmu_notifier * mn ,
0 commit comments