mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
* fix for module unload vs. deferred jump labels (note: there might be
other buggy modules!) * two NULL pointer dereferences from syzkaller * CVE from syzkaller, very serious on 4.10-rc, "just" kernel memory leak on releases * CVE from security@kernel.org, somewhat serious on AMD, less so on Intel -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJYd7l5AAoJEL/70l94x66DLWYH/0GUg+lK9J/gj0kwqi6BwsOP Rrs5Y7XvyNLsy/piBrrHDHvRa+DfAkrU8nepwgygX/yuGmSDV/zmdIb8XA/dvKht MN285NFlVjTyznYlU/LH3etx11CHLMNclishiFHQbcnohtvhOe+fvN6RVNdfeRxm d9iBPOum15ikc1xDl2z8Op+ZXVjMxkgLkzIXFcDBpJf4BvUx0X+ZHZXIKdizVhgU ZMD2ds/MutMB8X1A52qp6kQvT7xE4rp87M0So4qDMTbAto5G4ZmMaWC5MlK2Oxe/ o+3qnx4vVz4H6uYzg1N4diHiC+buhgtXCLwwkcUOKKUVqJRP9e0Bh7kw8JA52XU= =C+tM -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM fixes from Paolo Bonzini: - fix for module unload vs deferred jump labels (note: there might be other buggy modules!) - two NULL pointer dereferences from syzkaller - also syzkaller: fix emulation of fxsave/fxrstor/sgdt/sidt, problem made worse during this merge window, "just" kernel memory leak on releases - fix emulation of "mov ss" - somewhat serious on AMD, less so on Intel * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86: fix emulation of "MOV SS, null selector" KVM: x86: fix NULL deref in vcpu_scan_ioapic KVM: eventfd: fix NULL deref irqbypass consumer KVM: x86: Introduce segmented_write_std KVM: x86: flush pending lapic jump label updates on module unload jump_labels: API for flushing deferred jump label updates
This commit is contained in:
commit
406732c932
7 changed files with 80 additions and 16 deletions
|
@ -818,6 +818,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
|
||||||
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
|
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
|
||||||
|
struct segmented_address addr,
|
||||||
|
void *data,
|
||||||
|
unsigned int size)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
ulong linear;
|
||||||
|
|
||||||
|
rc = linearize(ctxt, addr, size, true, &linear);
|
||||||
|
if (rc != X86EMUL_CONTINUE)
|
||||||
|
return rc;
|
||||||
|
return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Prefetch the remaining bytes of the instruction without crossing page
|
* Prefetch the remaining bytes of the instruction without crossing page
|
||||||
* boundary if they are not in fetch_cache yet.
|
* boundary if they are not in fetch_cache yet.
|
||||||
|
@ -1571,7 +1585,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||||||
&ctxt->exception);
|
&ctxt->exception);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Does not support long mode */
|
|
||||||
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||||||
u16 selector, int seg, u8 cpl,
|
u16 selector, int seg, u8 cpl,
|
||||||
enum x86_transfer_type transfer,
|
enum x86_transfer_type transfer,
|
||||||
|
@ -1608,20 +1621,34 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||||||
|
|
||||||
rpl = selector & 3;
|
rpl = selector & 3;
|
||||||
|
|
||||||
/* NULL selector is not valid for TR, CS and SS (except for long mode) */
|
|
||||||
if ((seg == VCPU_SREG_CS
|
|
||||||
|| (seg == VCPU_SREG_SS
|
|
||||||
&& (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
|
|
||||||
|| seg == VCPU_SREG_TR)
|
|
||||||
&& null_selector)
|
|
||||||
goto exception;
|
|
||||||
|
|
||||||
/* TR should be in GDT only */
|
/* TR should be in GDT only */
|
||||||
if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
|
if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
|
||||||
goto exception;
|
goto exception;
|
||||||
|
|
||||||
if (null_selector) /* for NULL selector skip all following checks */
|
/* NULL selector is not valid for TR, CS and (except for long mode) SS */
|
||||||
|
if (null_selector) {
|
||||||
|
if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
|
||||||
|
goto exception;
|
||||||
|
|
||||||
|
if (seg == VCPU_SREG_SS) {
|
||||||
|
if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
|
||||||
|
goto exception;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ctxt->ops->set_segment expects the CPL to be in
|
||||||
|
* SS.DPL, so fake an expand-up 32-bit data segment.
|
||||||
|
*/
|
||||||
|
seg_desc.type = 3;
|
||||||
|
seg_desc.p = 1;
|
||||||
|
seg_desc.s = 1;
|
||||||
|
seg_desc.dpl = cpl;
|
||||||
|
seg_desc.d = 1;
|
||||||
|
seg_desc.g = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Skip all following checks */
|
||||||
goto load;
|
goto load;
|
||||||
|
}
|
||||||
|
|
||||||
ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
|
ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
|
||||||
if (ret != X86EMUL_CONTINUE)
|
if (ret != X86EMUL_CONTINUE)
|
||||||
|
@ -1737,6 +1764,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
|
||||||
u16 selector, int seg)
|
u16 selector, int seg)
|
||||||
{
|
{
|
||||||
u8 cpl = ctxt->ops->cpl(ctxt);
|
u8 cpl = ctxt->ops->cpl(ctxt);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* None of MOV, POP and LSS can load a NULL selector in CPL=3, but
|
||||||
|
* they can load it at CPL<3 (Intel's manual says only LSS can,
|
||||||
|
* but it's wrong).
|
||||||
|
*
|
||||||
|
* However, the Intel manual says that putting IST=1/DPL=3 in
|
||||||
|
* an interrupt gate will result in SS=3 (the AMD manual instead
|
||||||
|
* says it doesn't), so allow SS=3 in __load_segment_descriptor
|
||||||
|
* and only forbid it here.
|
||||||
|
*/
|
||||||
|
if (seg == VCPU_SREG_SS && selector == 3 &&
|
||||||
|
ctxt->mode == X86EMUL_MODE_PROT64)
|
||||||
|
return emulate_exception(ctxt, GP_VECTOR, 0, true);
|
||||||
|
|
||||||
return __load_segment_descriptor(ctxt, selector, seg, cpl,
|
return __load_segment_descriptor(ctxt, selector, seg, cpl,
|
||||||
X86_TRANSFER_NONE, NULL);
|
X86_TRANSFER_NONE, NULL);
|
||||||
}
|
}
|
||||||
|
@ -3685,8 +3727,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
|
||||||
}
|
}
|
||||||
/* Disable writeback. */
|
/* Disable writeback. */
|
||||||
ctxt->dst.type = OP_NONE;
|
ctxt->dst.type = OP_NONE;
|
||||||
return segmented_write(ctxt, ctxt->dst.addr.mem,
|
return segmented_write_std(ctxt, ctxt->dst.addr.mem,
|
||||||
&desc_ptr, 2 + ctxt->op_bytes);
|
&desc_ptr, 2 + ctxt->op_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int em_sgdt(struct x86_emulate_ctxt *ctxt)
|
static int em_sgdt(struct x86_emulate_ctxt *ctxt)
|
||||||
|
@ -3932,7 +3974,7 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
|
||||||
else
|
else
|
||||||
size = offsetof(struct fxregs_state, xmm_space[0]);
|
size = offsetof(struct fxregs_state, xmm_space[0]);
|
||||||
|
|
||||||
return segmented_write(ctxt, ctxt->memop.addr.mem, &fx_state, size);
|
return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
|
static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
|
||||||
|
@ -3974,7 +4016,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
|
||||||
if (rc != X86EMUL_CONTINUE)
|
if (rc != X86EMUL_CONTINUE)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
rc = segmented_read(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
|
rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
|
||||||
if (rc != X86EMUL_CONTINUE)
|
if (rc != X86EMUL_CONTINUE)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
|
|
|
@ -2426,3 +2426,9 @@ void kvm_lapic_init(void)
|
||||||
jump_label_rate_limit(&apic_hw_disabled, HZ);
|
jump_label_rate_limit(&apic_hw_disabled, HZ);
|
||||||
jump_label_rate_limit(&apic_sw_disabled, HZ);
|
jump_label_rate_limit(&apic_sw_disabled, HZ);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void kvm_lapic_exit(void)
|
||||||
|
{
|
||||||
|
static_key_deferred_flush(&apic_hw_disabled);
|
||||||
|
static_key_deferred_flush(&apic_sw_disabled);
|
||||||
|
}
|
||||||
|
|
|
@ -110,6 +110,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
|
int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
|
||||||
void kvm_lapic_init(void);
|
void kvm_lapic_init(void);
|
||||||
|
void kvm_lapic_exit(void);
|
||||||
|
|
||||||
#define VEC_POS(v) ((v) & (32 - 1))
|
#define VEC_POS(v) ((v) & (32 - 1))
|
||||||
#define REG_POS(v) (((v) >> 5) << 4)
|
#define REG_POS(v) (((v) >> 5) << 4)
|
||||||
|
|
|
@ -3342,6 +3342,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
switch (cap->cap) {
|
switch (cap->cap) {
|
||||||
case KVM_CAP_HYPERV_SYNIC:
|
case KVM_CAP_HYPERV_SYNIC:
|
||||||
|
if (!irqchip_in_kernel(vcpu->kvm))
|
||||||
|
return -EINVAL;
|
||||||
return kvm_hv_activate_synic(vcpu);
|
return kvm_hv_activate_synic(vcpu);
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -6045,6 +6047,7 @@ out:
|
||||||
|
|
||||||
void kvm_arch_exit(void)
|
void kvm_arch_exit(void)
|
||||||
{
|
{
|
||||||
|
kvm_lapic_exit();
|
||||||
perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
|
perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
|
||||||
|
|
||||||
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
|
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
|
||||||
|
|
|
@ -14,6 +14,7 @@ struct static_key_deferred {
|
||||||
|
|
||||||
#ifdef HAVE_JUMP_LABEL
|
#ifdef HAVE_JUMP_LABEL
|
||||||
extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
|
extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
|
||||||
|
extern void static_key_deferred_flush(struct static_key_deferred *key);
|
||||||
extern void
|
extern void
|
||||||
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
|
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
|
||||||
|
|
||||||
|
@ -26,6 +27,10 @@ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
|
||||||
STATIC_KEY_CHECK_USE();
|
STATIC_KEY_CHECK_USE();
|
||||||
static_key_slow_dec(&key->key);
|
static_key_slow_dec(&key->key);
|
||||||
}
|
}
|
||||||
|
static inline void static_key_deferred_flush(struct static_key_deferred *key)
|
||||||
|
{
|
||||||
|
STATIC_KEY_CHECK_USE();
|
||||||
|
}
|
||||||
static inline void
|
static inline void
|
||||||
jump_label_rate_limit(struct static_key_deferred *key,
|
jump_label_rate_limit(struct static_key_deferred *key,
|
||||||
unsigned long rl)
|
unsigned long rl)
|
||||||
|
|
|
@ -182,6 +182,13 @@ void static_key_slow_dec_deferred(struct static_key_deferred *key)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
|
EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
|
||||||
|
|
||||||
|
void static_key_deferred_flush(struct static_key_deferred *key)
|
||||||
|
{
|
||||||
|
STATIC_KEY_CHECK_USE();
|
||||||
|
flush_delayed_work(&key->work);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(static_key_deferred_flush);
|
||||||
|
|
||||||
void jump_label_rate_limit(struct static_key_deferred *key,
|
void jump_label_rate_limit(struct static_key_deferred *key,
|
||||||
unsigned long rl)
|
unsigned long rl)
|
||||||
{
|
{
|
||||||
|
|
|
@ -195,7 +195,7 @@ int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer)
|
||||||
mutex_lock(&lock);
|
mutex_lock(&lock);
|
||||||
|
|
||||||
list_for_each_entry(tmp, &consumers, node) {
|
list_for_each_entry(tmp, &consumers, node) {
|
||||||
if (tmp->token == consumer->token) {
|
if (tmp->token == consumer->token || tmp == consumer) {
|
||||||
mutex_unlock(&lock);
|
mutex_unlock(&lock);
|
||||||
module_put(THIS_MODULE);
|
module_put(THIS_MODULE);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
@ -245,7 +245,7 @@ void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer)
|
||||||
mutex_lock(&lock);
|
mutex_lock(&lock);
|
||||||
|
|
||||||
list_for_each_entry(tmp, &consumers, node) {
|
list_for_each_entry(tmp, &consumers, node) {
|
||||||
if (tmp->token != consumer->token)
|
if (tmp != consumer)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
list_for_each_entry(producer, &producers, node) {
|
list_for_each_entry(producer, &producers, node) {
|
||||||
|
|
Loading…
Add table
Reference in a new issue