Skip to content

Commit

Permalink
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Browse files Browse the repository at this point in the history
Pull KVM updates from Paolo Bonzini:
 "One NULL pointer dereference, and two fixes for regressions introduced
  during the merge window.

  The rest are fixes for MIPS, s390 and nested VMX"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  kvm: x86: Check memopp before dereference (CVE-2016-8630)
  kvm: nVMX: VMCLEAR an active shadow VMCS after last use
  KVM: x86: drop TSC offsetting kvm_x86_ops to fix KVM_GET/SET_CLOCK
  KVM: x86: fix wbinvd_dirty_mask use-after-free
  kvm/x86: Show WRMSR data is in hex
  kvm: nVMX: Fix kernel panics induced by illegal INVEPT/INVVPID types
  KVM: document lock orders
  KVM: fix OOPS on flush_work
  KVM: s390: Fix STHYI buffer alignment for diag224
  KVM: MIPS: Precalculate MMIO load resume PC
  KVM: MIPS: Make ERET handle ERL before EXL
  KVM: MIPS: Fix lazy user ASID regenerate for SMP
  • Loading branch information
torvalds committed Nov 4, 2016
2 parents 34c510b + d9092f5 commit 66cecb6
Show file tree
Hide file tree
Showing 13 changed files with 95 additions and 106 deletions.
12 changes: 11 additions & 1 deletion Documentation/virtual/kvm/locking.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,17 @@ KVM Lock Overview
1. Acquisition Orders
---------------------

(to be written)
The acquisition orders for mutexes are as follows:

- kvm->lock is taken outside vcpu->mutex

- kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock

- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
them together is quite rare.

For spinlocks, kvm_lock is taken outside kvm->mmu_lock. Everything
else is a leaf: no other lock is taken inside the critical sections.

2: Exception
------------
Expand Down
7 changes: 4 additions & 3 deletions arch/mips/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,10 @@ struct kvm_vcpu_arch {
/* Host KSEG0 address of the EI/DI offset */
void *kseg0_commpage;

u32 io_gpr; /* GPR used as IO source/target */
/* Resume PC after MMIO completion */
unsigned long io_pc;
/* GPR used as IO source/target */
u32 io_gpr;

struct hrtimer comparecount_timer;
/* Count timer control KVM register */
Expand All @@ -315,8 +318,6 @@ struct kvm_vcpu_arch {
/* Bitmask of pending exceptions to be cleared */
unsigned long pending_exceptions_clr;

u32 pending_load_cause;

/* Save/Restore the entryhi register when are are preempted/scheduled back in */
unsigned long preempt_entryhi;

Expand Down
32 changes: 19 additions & 13 deletions arch/mips/kvm/emulate.c
Original file line number Diff line number Diff line change
Expand Up @@ -790,15 +790,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result er = EMULATE_DONE;

if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
kvm_clear_c0_guest_status(cop0, ST0_ERL);
vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
} else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
kvm_read_c0_guest_epc(cop0));
kvm_clear_c0_guest_status(cop0, ST0_EXL);
vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);

} else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
kvm_clear_c0_guest_status(cop0, ST0_ERL);
vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
} else {
kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
vcpu->arch.pc);
Expand Down Expand Up @@ -1528,13 +1528,25 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DO_MMIO;
unsigned long curr_pc;
u32 op, rt;
u32 bytes;

rt = inst.i_format.rt;
op = inst.i_format.opcode;

vcpu->arch.pending_load_cause = cause;
/*
* Find the resume PC now while we have safe and easy access to the
* prior branch instruction, and save it for
* kvm_mips_complete_mmio_load() to restore later.
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, cause);
if (er == EMULATE_FAIL)
return er;
vcpu->arch.io_pc = vcpu->arch.pc;
vcpu->arch.pc = curr_pc;

vcpu->arch.io_gpr = rt;

switch (op) {
Expand Down Expand Up @@ -2494,9 +2506,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
goto done;
}

er = update_pc(vcpu, vcpu->arch.pending_load_cause);
if (er == EMULATE_FAIL)
return er;
/* Restore saved resume PC */
vcpu->arch.pc = vcpu->arch.io_pc;

switch (run->mmio.len) {
case 4:
Expand All @@ -2518,11 +2529,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
break;
}

if (vcpu->arch.pending_load_cause & CAUSEF_BD)
kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
vcpu->mmio_needed);

done:
return er;
}
Expand Down
5 changes: 4 additions & 1 deletion arch/mips/kvm/mips.c
Original file line number Diff line number Diff line change
Expand Up @@ -426,7 +426,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
int cpu = smp_processor_id();
int i, cpu = smp_processor_id();
unsigned int gasid;

/*
Expand All @@ -442,6 +442,9 @@ static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
vcpu);
vcpu->arch.guest_user_asid[cpu] =
vcpu->arch.guest_user_mm.context.asid[cpu];
for_each_possible_cpu(i)
if (i != cpu)
vcpu->arch.guest_user_asid[cpu] = 0;
vcpu->arch.last_user_gasid = gasid;
}
}
Expand Down
4 changes: 0 additions & 4 deletions arch/mips/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -260,13 +260,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)

if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) &
asid_version_mask(cpu)) {
u32 gasid = kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
KVM_ENTRYHI_ASID;

kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
vcpu->arch.guest_user_asid[cpu] =
vcpu->arch.guest_user_mm.context.asid[cpu];
vcpu->arch.last_user_gasid = gasid;
newasid++;

kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
Expand Down
4 changes: 2 additions & 2 deletions arch/s390/kvm/sthyi.c
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
if (r < 0)
goto out;

diag224_buf = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
diag224_buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
if (!diag224_buf || diag224(diag224_buf))
goto out;

Expand Down Expand Up @@ -378,7 +378,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
sctns->par.infpval1 |= PAR_WGHT_VLD;

out:
kfree(diag224_buf);
free_page((unsigned long)diag224_buf);
vfree(diag204_buf);
}

Expand Down
3 changes: 0 additions & 3 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -948,7 +948,6 @@ struct kvm_x86_ops {
int (*get_lpage_level)(void);
bool (*rdtscp_supported)(void);
bool (*invpcid_supported)(void);
void (*adjust_tsc_offset_guest)(struct kvm_vcpu *vcpu, s64 adjustment);

void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);

Expand All @@ -958,8 +957,6 @@ struct kvm_x86_ops {

void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);

u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);

void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);

int (*check_intercept)(struct kvm_vcpu *vcpu,
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/emulate.c
Original file line number Diff line number Diff line change
Expand Up @@ -5045,7 +5045,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
/* Decode and fetch the destination operand: register or memory. */
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);

if (ctxt->rip_relative)
if (ctxt->rip_relative && likely(ctxt->memopp))
ctxt->memopp->addr.mem.ea = address_mask(ctxt,
ctxt->memopp->addr.mem.ea + ctxt->_eip);

Expand Down
23 changes: 0 additions & 23 deletions arch/x86/kvm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1138,21 +1138,6 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
}

static void svm_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
{
struct vcpu_svm *svm = to_svm(vcpu);

svm->vmcb->control.tsc_offset += adjustment;
if (is_guest_mode(vcpu))
svm->nested.hsave->control.tsc_offset += adjustment;
else
trace_kvm_write_tsc_offset(vcpu->vcpu_id,
svm->vmcb->control.tsc_offset - adjustment,
svm->vmcb->control.tsc_offset);

mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
}

static void avic_init_vmcb(struct vcpu_svm *svm)
{
struct vmcb *vmcb = svm->vmcb;
Expand Down Expand Up @@ -3449,12 +3434,6 @@ static int cr8_write_interception(struct vcpu_svm *svm)
return 0;
}

static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{
struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
return vmcb->control.tsc_offset + host_tsc;
}

static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct vcpu_svm *svm = to_svm(vcpu);
Expand Down Expand Up @@ -5422,8 +5401,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.has_wbinvd_exit = svm_has_wbinvd_exit,

.write_tsc_offset = svm_write_tsc_offset,
.adjust_tsc_offset_guest = svm_adjust_tsc_offset_guest,
.read_l1_tsc = svm_read_l1_tsc,

.set_tdp_cr3 = set_tdp_cr3,

Expand Down
Loading

0 comments on commit 66cecb6

Please sign in to comment.