Skip to content

Commit

Permalink
KVM: sort memslots by its size and use line search
Browse files Browse the repository at this point in the history
Sort memslots base on its size and use line search to find it, so that the
larger memslots have better fit

The idea is from Avi

Signed-off-by: Xiao Guangrong <[email protected]>
Signed-off-by: Avi Kivity <[email protected]>
  • Loading branch information
Xiao Guangrong authored and avikivity committed Dec 27, 2011
1 parent 28a3754 commit bf3e05b
Show file tree
Hide file tree
Showing 2 changed files with 72 additions and 25 deletions.
18 changes: 15 additions & 3 deletions include/linux/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -231,8 +231,12 @@ struct kvm_irq_routing_table {};
#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
#endif

/*
* Note:
* memslots are not sorted by id anymore, please use id_to_memslot()
* to get the memslot by its id.
*/
struct kvm_memslots {
int nmemslots;
u64 generation;
struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
};
Expand Down Expand Up @@ -310,7 +314,8 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)

#define kvm_for_each_memslot(memslot, slots) \
for (memslot = &slots->memslots[0]; \
memslot < slots->memslots + (slots)->nmemslots; memslot++)
memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
memslot++)

int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
Expand All @@ -336,7 +341,14 @@ static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
static inline struct kvm_memory_slot *
id_to_memslot(struct kvm_memslots *slots, int id)
{
return &slots->memslots[id];
int i;

for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
if (slots->memslots[i].id == id)
return &slots->memslots[i];

WARN_ON(1);
return NULL;
}

#define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
Expand Down
79 changes: 57 additions & 22 deletions virt/kvm/kvm_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -440,6 +440,15 @@ static int kvm_init_mmu_notifier(struct kvm *kvm)

#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */

static void kvm_init_memslots_id(struct kvm *kvm)
{
int i;
struct kvm_memslots *slots = kvm->memslots;

for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
slots->memslots[i].id = i;
}

static struct kvm *kvm_create_vm(void)
{
int r, i;
Expand All @@ -465,6 +474,7 @@ static struct kvm *kvm_create_vm(void)
kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
if (!kvm->memslots)
goto out_err_nosrcu;
kvm_init_memslots_id(kvm);
if (init_srcu_struct(&kvm->srcu))
goto out_err_nosrcu;
for (i = 0; i < KVM_NR_BUSES; i++) {
Expand Down Expand Up @@ -630,15 +640,54 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
}
#endif /* !CONFIG_S390 */

static struct kvm_memory_slot *
search_memslots(struct kvm_memslots *slots, gfn_t gfn)
{
struct kvm_memory_slot *memslot;

kvm_for_each_memslot(memslot, slots)
if (gfn >= memslot->base_gfn &&
gfn < memslot->base_gfn + memslot->npages)
return memslot;

return NULL;
}

static int cmp_memslot(const void *slot1, const void *slot2)
{
struct kvm_memory_slot *s1, *s2;

s1 = (struct kvm_memory_slot *)slot1;
s2 = (struct kvm_memory_slot *)slot2;

if (s1->npages < s2->npages)
return 1;
if (s1->npages > s2->npages)
return -1;

return 0;
}

/*
* Sort the memslots base on its size, so the larger slots
* will get better fit.
*/
static void sort_memslots(struct kvm_memslots *slots)
{
sort(slots->memslots, KVM_MEM_SLOTS_NUM,
sizeof(struct kvm_memory_slot), cmp_memslot, NULL);
}

void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new)
{
if (new) {
int id = new->id;
struct kvm_memory_slot *old = id_to_memslot(slots, id);
unsigned long npages = old->npages;

*old = *new;
if (id >= slots->nmemslots)
slots->nmemslots = id + 1;
if (new->npages != npages)
sort_memslots(slots);
}

slots->generation++;
Expand Down Expand Up @@ -980,14 +1029,7 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva);
static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
gfn_t gfn)
{
struct kvm_memory_slot *memslot;

kvm_for_each_memslot(memslot, slots)
if (gfn >= memslot->base_gfn
&& gfn < memslot->base_gfn + memslot->npages)
return memslot;

return NULL;
return search_memslots(slots, gfn);
}

struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
Expand All @@ -998,20 +1040,13 @@ EXPORT_SYMBOL_GPL(gfn_to_memslot);

int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
int i;
struct kvm_memslots *slots = kvm_memslots(kvm);

for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
struct kvm_memory_slot *memslot = &slots->memslots[i];
struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);

if (memslot->flags & KVM_MEMSLOT_INVALID)
continue;
if (!memslot || memslot->id >= KVM_MEMORY_SLOTS ||
memslot->flags & KVM_MEMSLOT_INVALID)
return 0;

if (gfn >= memslot->base_gfn
&& gfn < memslot->base_gfn + memslot->npages)
return 1;
}
return 0;
return 1;
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);

Expand Down

0 comments on commit bf3e05b

Please sign in to comment.