@@ -942,20 +942,20 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
942
942
static void kvmppc_set_lpcr (struct kvm_vcpu * vcpu , u64 new_lpcr ,
943
943
bool preserve_top32 )
944
944
{
945
+ struct kvm * kvm = vcpu -> kvm ;
945
946
struct kvmppc_vcore * vc = vcpu -> arch .vcore ;
946
947
u64 mask ;
947
948
949
+ mutex_lock (& kvm -> lock );
948
950
spin_lock (& vc -> lock );
949
951
/*
950
952
* If ILE (interrupt little-endian) has changed, update the
951
953
* MSR_LE bit in the intr_msr for each vcpu in this vcore.
952
954
*/
953
955
if ((new_lpcr & LPCR_ILE ) != (vc -> lpcr & LPCR_ILE )) {
954
- struct kvm * kvm = vcpu -> kvm ;
955
956
struct kvm_vcpu * vcpu ;
956
957
int i ;
957
958
958
- mutex_lock (& kvm -> lock );
959
959
kvm_for_each_vcpu (i , vcpu , kvm ) {
960
960
if (vcpu -> arch .vcore != vc )
961
961
continue ;
@@ -964,7 +964,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
964
964
else
965
965
vcpu -> arch .intr_msr &= ~MSR_LE ;
966
966
}
967
- mutex_unlock (& kvm -> lock );
968
967
}
969
968
970
969
/*
@@ -981,6 +980,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
981
980
mask &= 0xFFFFFFFF ;
982
981
vc -> lpcr = (vc -> lpcr & ~mask ) | (new_lpcr & mask );
983
982
spin_unlock (& vc -> lock );
983
+ mutex_unlock (& kvm -> lock );
984
984
}
985
985
986
986
static int kvmppc_get_one_reg_hv (struct kvm_vcpu * vcpu , u64 id ,
0 commit comments