Skip to content

Commit

Permalink
KVM: mmu: Add slots_arch_lock for memslot arch fields
Browse files Browse the repository at this point in the history
Add a new lock to protect the arch-specific fields of memslots if they
need to be modified in a kvm->srcu read critical section. A future
commit will use this lock to lazily allocate memslot rmaps for x86.

Signed-off-by: Ben Gardon <[email protected]>
Message-Id: <[email protected]>
[Add Documentation/ hunk. - Paolo]
Signed-off-by: Paolo Bonzini <[email protected]>
  • Loading branch information
Ben Gardon authored and bonzini committed Jun 17, 2021
1 parent ddc12f2 commit b10a038
Show file tree
Hide file tree
Showing 3 changed files with 62 additions and 6 deletions.
5 changes: 5 additions & 0 deletions Documentation/virt/kvm/locking.rst
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,11 @@ The acquisition orders for mutexes are as follows:
- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
them together is quite rare.

- Unlike kvm->slots_lock, kvm->slots_arch_lock is released before
synchronize_srcu(&kvm->srcu). Therefore kvm->slots_arch_lock
can be taken inside a kvm->srcu read-side critical section,
while kvm->slots_lock cannot.

On x86:

- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock
Expand Down
9 changes: 9 additions & 0 deletions include/linux/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -523,6 +523,15 @@ struct kvm {
#endif /* KVM_HAVE_MMU_RWLOCK */

struct mutex slots_lock;

/*
* Protects the arch-specific fields of struct kvm_memory_slots in
* use by the VM. To be used under the slots_lock (above) or in a
* kvm->srcu critical section where acquiring the slots_lock would
* lead to deadlock with the synchronize_srcu in
* install_new_memslots.
*/
struct mutex slots_arch_lock;
struct mm_struct *mm; /* userspace tied to this vm */
struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
Expand Down
54 changes: 48 additions & 6 deletions virt/kvm/kvm_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -909,6 +909,7 @@ static struct kvm *kvm_create_vm(unsigned long type)
mutex_init(&kvm->lock);
mutex_init(&kvm->irq_lock);
mutex_init(&kvm->slots_lock);
mutex_init(&kvm->slots_arch_lock);
INIT_LIST_HEAD(&kvm->devices);

BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
Expand Down Expand Up @@ -1281,6 +1282,14 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;

rcu_assign_pointer(kvm->memslots[as_id], slots);

/*
* Acquired in kvm_set_memslot. Must be released before synchronize
* SRCU below in order to avoid deadlock with another thread
* acquiring the slots_arch_lock in an srcu critical section.
*/
mutex_unlock(&kvm->slots_arch_lock);

synchronize_srcu_expedited(&kvm->srcu);

/*
Expand Down Expand Up @@ -1352,9 +1361,27 @@ static int kvm_set_memslot(struct kvm *kvm,
struct kvm_memslots *slots;
int r;

/*
* Released in install_new_memslots.
*
* Must be held from before the current memslots are copied until
* after the new memslots are installed with rcu_assign_pointer,
* then released before the synchronize srcu in install_new_memslots.
*
* When modifying memslots outside of the slots_lock, must be held
* before reading the pointer to the current memslots until after all
* changes to those memslots are complete.
*
* These rules ensure that installing new memslots does not lose
* changes made to the previous memslots.
*/
mutex_lock(&kvm->slots_arch_lock);

slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change);
if (!slots)
if (!slots) {
mutex_unlock(&kvm->slots_arch_lock);
return -ENOMEM;
}

if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
/*
Expand All @@ -1365,10 +1392,9 @@ static int kvm_set_memslot(struct kvm *kvm,
slot->flags |= KVM_MEMSLOT_INVALID;

/*
* We can re-use the old memslots, the only difference from the
* newly installed memslots is the invalid flag, which will get
* dropped by update_memslots anyway. We'll also revert to the
* old memslots if preparing the new memory region fails.
* We can re-use the memory from the old memslots.
* It will be overwritten with a copy of the new memslots
* after reacquiring the slots_arch_lock below.
*/
slots = install_new_memslots(kvm, as_id, slots);

Expand All @@ -1380,6 +1406,17 @@ static int kvm_set_memslot(struct kvm *kvm,
* - kvm_is_visible_gfn (mmu_check_root)
*/
kvm_arch_flush_shadow_memslot(kvm, slot);

/* Released in install_new_memslots. */
mutex_lock(&kvm->slots_arch_lock);

/*
* The arch-specific fields of the memslots could have changed
* between releasing the slots_arch_lock in
* install_new_memslots and here, so get a fresh copy of the
* slots.
*/
kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id));
}

r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
Expand All @@ -1395,8 +1432,13 @@ static int kvm_set_memslot(struct kvm *kvm,
return 0;

out_slots:
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
slot = id_to_memslot(slots, old->id);
slot->flags &= ~KVM_MEMSLOT_INVALID;
slots = install_new_memslots(kvm, as_id, slots);
} else {
mutex_unlock(&kvm->slots_arch_lock);
}
kvfree(slots);
return r;
}
Expand Down

0 comments on commit b10a038

Please sign in to comment.