Skip to content

Commit

Permalink
KVM: Allow arch code to track number of memslot address spaces per VM
Browse files Browse the repository at this point in the history
Let x86 track the number of address spaces on a per-VM basis so that KVM
can disallow SMM memslots for confidential VMs.  Confidentials VMs are
fundamentally incompatible with emulating SMM, which as the name suggests
requires being able to read and write guest memory and register state.

Disallowing SMM will simplify support for guest private memory, as KVM
will not need to worry about tracking memory attributes for multiple
address spaces (SMM is the only "non-default" address space across all
architectures).

Signed-off-by: Sean Christopherson <[email protected]>
Reviewed-by: Paolo Bonzini <[email protected]>
Reviewed-by: Fuad Tabba <[email protected]>
Tested-by: Fuad Tabba <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
  • Loading branch information
sean-jc authored and bonzini committed Nov 14, 2023
1 parent 2333afa commit eed52e4
Show file tree
Hide file tree
Showing 8 changed files with 39 additions and 26 deletions.
2 changes: 1 addition & 1 deletion arch/powerpc/kvm/book3s_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -6084,7 +6084,7 @@ static int kvmhv_svm_off(struct kvm *kvm)
}

srcu_idx = srcu_read_lock(&kvm->srcu);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
struct kvm_memory_slot *memslot;
struct kvm_memslots *slots = __kvm_memslots(kvm, i);
int bkt;
Expand Down
8 changes: 7 additions & 1 deletion arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -2136,9 +2136,15 @@ enum {
#define HF_SMM_MASK (1 << 1)
#define HF_SMM_INSIDE_NMI_MASK (1 << 2)

# define KVM_ADDRESS_SPACE_NUM 2
# define KVM_MAX_NR_ADDRESS_SPACES 2
# define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
# define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)

static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm)
{
return KVM_MAX_NR_ADDRESS_SPACES;
}

#else
# define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, 0)
#endif
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ static int kvm_mmu_rmaps_stat_show(struct seq_file *m, void *v)
mutex_lock(&kvm->slots_lock);
write_lock(&kvm->mmu_lock);

for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
int bkt;

slots = __kvm_memslots(kvm, i);
Expand Down
6 changes: 3 additions & 3 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -3763,7 +3763,7 @@ static int mmu_first_shadow_root_alloc(struct kvm *kvm)
kvm_page_track_write_tracking_enabled(kvm))
goto out_success;

for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
slots = __kvm_memslots(kvm, i);
kvm_for_each_memslot(slot, bkt, slots) {
/*
Expand Down Expand Up @@ -6309,7 +6309,7 @@ static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_e
if (!kvm_memslots_have_rmaps(kvm))
return flush;

for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
slots = __kvm_memslots(kvm, i);

kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) {
Expand Down Expand Up @@ -6806,7 +6806,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
* modifier prior to checking for a wrap of the MMIO generation so
* that a wrap in any address space is detected.
*/
gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1);
gen &= ~((u64)kvm_arch_nr_memslot_as_ids(kvm) - 1);

/*
* The very rare case: if the MMIO generation number has wrapped,
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -12577,7 +12577,7 @@ void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
hva = slot->userspace_addr;
}

for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
struct kvm_userspace_memory_region2 m;

m.slot = id | (i << 16);
Expand Down
17 changes: 11 additions & 6 deletions include/linux/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,8 @@
/* Two fragments for cross MMIO pages. */
#define KVM_MAX_MMIO_FRAGMENTS 2

#ifndef KVM_ADDRESS_SPACE_NUM
#define KVM_ADDRESS_SPACE_NUM 1
#ifndef KVM_MAX_NR_ADDRESS_SPACES
#define KVM_MAX_NR_ADDRESS_SPACES 1
#endif

/*
Expand Down Expand Up @@ -690,7 +690,12 @@ bool kvm_arch_irqchip_in_kernel(struct kvm *kvm);
#define KVM_MEM_SLOTS_NUM SHRT_MAX
#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS)

#if KVM_ADDRESS_SPACE_NUM == 1
#if KVM_MAX_NR_ADDRESS_SPACES == 1
static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm)
{
return KVM_MAX_NR_ADDRESS_SPACES;
}

static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
{
return 0;
Expand Down Expand Up @@ -745,9 +750,9 @@ struct kvm {
struct mm_struct *mm; /* userspace tied to this vm */
unsigned long nr_memslot_pages;
/* The two memslot sets - active and inactive (per address space) */
struct kvm_memslots __memslots[KVM_ADDRESS_SPACE_NUM][2];
struct kvm_memslots __memslots[KVM_MAX_NR_ADDRESS_SPACES][2];
/* The current active memslot set for each address space */
struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
struct kvm_memslots __rcu *memslots[KVM_MAX_NR_ADDRESS_SPACES];
struct xarray vcpu_array;
/*
* Protected by slots_lock, but can be read outside if an
Expand Down Expand Up @@ -1017,7 +1022,7 @@ void kvm_put_kvm_no_destroy(struct kvm *kvm);

static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{
as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
as_id = array_index_nospec(as_id, KVM_MAX_NR_ADDRESS_SPACES);
return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
lockdep_is_held(&kvm->slots_lock) ||
!refcount_read(&kvm->users_count));
Expand Down
2 changes: 1 addition & 1 deletion virt/kvm/dirty_ring.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
as_id = slot >> 16;
id = (u16)slot;

if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
return;

memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
Expand Down
26 changes: 14 additions & 12 deletions virt/kvm/kvm_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -615,7 +615,7 @@ static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,

idx = srcu_read_lock(&kvm->srcu);

for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
struct interval_tree_node *node;

slots = __kvm_memslots(kvm, i);
Expand Down Expand Up @@ -1241,7 +1241,7 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
goto out_err_no_irq_srcu;

refcount_set(&kvm->users_count, 1);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
for (j = 0; j < 2; j++) {
slots = &kvm->__memslots[i][j];

Expand Down Expand Up @@ -1391,7 +1391,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
#endif
kvm_arch_destroy_vm(kvm);
kvm_destroy_devices(kvm);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
kvm_free_memslots(kvm, &kvm->__memslots[i][0]);
kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
}
Expand Down Expand Up @@ -1682,7 +1682,7 @@ static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
* space 0 will use generations 0, 2, 4, ... while address space 1 will
* use generations 1, 3, 5, ...
*/
gen += KVM_ADDRESS_SPACE_NUM;
gen += kvm_arch_nr_memslot_as_ids(kvm);

kvm_arch_memslots_updated(kvm, gen);

Expand Down Expand Up @@ -2052,7 +2052,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
(mem->guest_memfd_offset & (PAGE_SIZE - 1) ||
mem->guest_memfd_offset + mem->memory_size < mem->guest_memfd_offset))
return -EINVAL;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_MEM_SLOTS_NUM)
return -EINVAL;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
return -EINVAL;
Expand Down Expand Up @@ -2188,7 +2188,7 @@ int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,

as_id = log->slot >> 16;
id = (u16)log->slot;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
return -EINVAL;

slots = __kvm_memslots(kvm, as_id);
Expand Down Expand Up @@ -2250,7 +2250,7 @@ static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)

as_id = log->slot >> 16;
id = (u16)log->slot;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
return -EINVAL;

slots = __kvm_memslots(kvm, as_id);
Expand Down Expand Up @@ -2362,7 +2362,7 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm,

as_id = log->slot >> 16;
id = (u16)log->slot;
if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
return -EINVAL;

if (log->first_page & 63)
Expand Down Expand Up @@ -2493,7 +2493,7 @@ static __always_inline void kvm_handle_gfn_range(struct kvm *kvm,
gfn_range.arg = range->arg;
gfn_range.may_block = range->may_block;

for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
slots = __kvm_memslots(kvm, i);

kvm_for_each_memslot_in_gfn_range(&iter, slots, range->start, range->end) {
Expand Down Expand Up @@ -4848,9 +4848,11 @@ static int kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
case KVM_CAP_IRQ_ROUTING:
return KVM_MAX_IRQ_ROUTES;
#endif
#if KVM_ADDRESS_SPACE_NUM > 1
#if KVM_MAX_NR_ADDRESS_SPACES > 1
case KVM_CAP_MULTI_ADDRESS_SPACE:
return KVM_ADDRESS_SPACE_NUM;
if (kvm)
return kvm_arch_nr_memslot_as_ids(kvm);
return KVM_MAX_NR_ADDRESS_SPACES;
#endif
case KVM_CAP_NR_MEMSLOTS:
return KVM_USER_MEM_SLOTS;
Expand Down Expand Up @@ -4958,7 +4960,7 @@ bool kvm_are_all_memslots_empty(struct kvm *kvm)

lockdep_assert_held(&kvm->slots_lock);

for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
if (!kvm_memslots_empty(__kvm_memslots(kvm, i)))
return false;
}
Expand Down

0 comments on commit eed52e4

Please sign in to comment.