]> git.baikalelectronics.ru Git - kernel.git/commitdiff
KVM: SVM: Hide SEV migration lockdep goo behind CONFIG_PROVE_LOCKING
authorSean Christopherson <seanjc@google.com>
Mon, 13 Jun 2022 21:42:37 +0000 (21:42 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 15 Jun 2022 12:07:22 +0000 (08:07 -0400)
Wrap the manipulation of @role and the manual mutex_{release,acquire}()
invocations in CONFIG_PROVE_LOCKING=y to squash a clang-15 warning.  When
building with -Wunused-but-set-parameter and CONFIG_DEBUG_LOCK_ALLOC=n,
clang-15 seees there's no usage of @role in mutex_lock_killable_nested()
and yells.  PROVE_LOCKING selects DEBUG_LOCK_ALLOC, and the only reason
KVM manipulates @role is to make PROVE_LOCKING happy.

To avoid true ugliness, use "i" and "j" to detect the first pass in the
loops; the "idx" field that's used by kvm_for_each_vcpu() is guaranteed
to be '0' on the first pass as it's simply the first entry in the vCPUs
XArray, which is fully KVM controlled.  kvm_for_each_vcpu() passes '0'
for xa_for_each_range()'s "start", and xa_for_each_range() will not enter
the loop if there's no entry at '0'.

Fixes: 54d62b683926 ("KVM: SEV: Mark nested locking of vcpu->lock")
Reported-by: kernel test robot <lkp@intel.com>
Cc: Peter Gonda <pgonda@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220613214237.2538266-1-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/sev.c

index 51fd985cf21dde4c51885c743d3080d3a3891694..309bcdb2f929e684c3deba0808229cf4c6964d5d 100644 (file)
@@ -1606,38 +1606,35 @@ static int sev_lock_vcpus_for_migration(struct kvm *kvm,
 {
        struct kvm_vcpu *vcpu;
        unsigned long i, j;
-       bool first = true;
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
                if (mutex_lock_killable_nested(&vcpu->mutex, role))
                        goto out_unlock;
 
-               if (first) {
+#ifdef CONFIG_PROVE_LOCKING
+               if (!i)
                        /*
                         * Reset the role to one that avoids colliding with
                         * the role used for the first vcpu mutex.
                         */
                        role = SEV_NR_MIGRATION_ROLES;
-                       first = false;
-               } else {
+               else
                        mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
-               }
+#endif
        }
 
        return 0;
 
 out_unlock:
 
-       first = true;
        kvm_for_each_vcpu(j, vcpu, kvm) {
                if (i == j)
                        break;
 
-               if (first)
-                       first = false;
-               else
+#ifdef CONFIG_PROVE_LOCKING
+               if (j)
                        mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
-
+#endif
 
                mutex_unlock(&vcpu->mutex);
        }