mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
Merge branch 'kvm-lockdep-common' into HEAD
Introduce new mutex locking functions mutex_trylock_nest_lock() and mutex_lock_killable_nest_lock() and use them to clean up locking of all vCPUs for a VM. For x86, this removes some complex code that was used instead of lockdep's "nest_lock" feature. For ARM and RISC-V, this removes a lockdep warning when the VM is configured to have more than MAX_LOCK_DEPTH vCPUs, and removes a fair amount of duplicate code by sharing the logic across all architectures. Signed-off-by: Paolo BOnzini <pbonzini@redhat.com>
This commit is contained in:
commit
8e86e73626
13 changed files with 131 additions and 170 deletions
|
@ -1320,9 +1320,6 @@ int __init populate_sysreg_config(const struct sys_reg_desc *sr,
|
|||
unsigned int idx);
|
||||
int __init populate_nv_trap_config(void);
|
||||
|
||||
bool lock_all_vcpus(struct kvm *kvm);
|
||||
void unlock_all_vcpus(struct kvm *kvm);
|
||||
|
||||
void kvm_calculate_traps(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* MMIO helpers */
|
||||
|
|
|
@ -1766,7 +1766,7 @@ int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
|
|||
|
||||
mutex_lock(&kvm->lock);
|
||||
|
||||
if (lock_all_vcpus(kvm)) {
|
||||
if (!kvm_trylock_all_vcpus(kvm)) {
|
||||
set_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &kvm->arch.flags);
|
||||
|
||||
/*
|
||||
|
@ -1778,7 +1778,7 @@ int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
|
|||
kvm->arch.timer_data.voffset = offset->counter_offset;
|
||||
kvm->arch.timer_data.poffset = offset->counter_offset;
|
||||
|
||||
unlock_all_vcpus(kvm);
|
||||
kvm_unlock_all_vcpus(kvm);
|
||||
} else {
|
||||
ret = -EBUSY;
|
||||
}
|
||||
|
|
|
@ -1924,49 +1924,6 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
|
|||
}
|
||||
}
|
||||
|
||||
/* unlocks vcpus from @vcpu_lock_idx and smaller */
|
||||
static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
|
||||
{
|
||||
struct kvm_vcpu *tmp_vcpu;
|
||||
|
||||
for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
|
||||
tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
|
||||
mutex_unlock(&tmp_vcpu->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
void unlock_all_vcpus(struct kvm *kvm)
|
||||
{
|
||||
lockdep_assert_held(&kvm->lock);
|
||||
|
||||
unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
|
||||
}
|
||||
|
||||
/* Returns true if all vcpus were locked, false otherwise */
|
||||
bool lock_all_vcpus(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_vcpu *tmp_vcpu;
|
||||
unsigned long c;
|
||||
|
||||
lockdep_assert_held(&kvm->lock);
|
||||
|
||||
/*
|
||||
* Any time a vcpu is in an ioctl (including running), the
|
||||
* core KVM code tries to grab the vcpu->mutex.
|
||||
*
|
||||
* By grabbing the vcpu->mutex of all VCPUs we ensure that no
|
||||
* other VCPUs can fiddle with the state while we access it.
|
||||
*/
|
||||
kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
|
||||
if (!mutex_trylock(&tmp_vcpu->mutex)) {
|
||||
unlock_vcpus(kvm, c - 1);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static unsigned long nvhe_percpu_size(void)
|
||||
{
|
||||
return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
|
||||
|
|
|
@ -88,7 +88,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
|
|||
lockdep_assert_held(&kvm->lock);
|
||||
|
||||
ret = -EBUSY;
|
||||
if (!lock_all_vcpus(kvm))
|
||||
if (kvm_trylock_all_vcpus(kvm))
|
||||
return ret;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
|
@ -142,7 +142,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
|
|||
|
||||
out_unlock:
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
unlock_all_vcpus(kvm);
|
||||
kvm_unlock_all_vcpus(kvm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1971,7 +1971,7 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
|
|||
|
||||
mutex_lock(&dev->kvm->lock);
|
||||
|
||||
if (!lock_all_vcpus(dev->kvm)) {
|
||||
if (kvm_trylock_all_vcpus(dev->kvm)) {
|
||||
mutex_unlock(&dev->kvm->lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
@ -2006,7 +2006,7 @@ static int vgic_its_attr_regs_access(struct kvm_device *dev,
|
|||
}
|
||||
out:
|
||||
mutex_unlock(&dev->kvm->arch.config_lock);
|
||||
unlock_all_vcpus(dev->kvm);
|
||||
kvm_unlock_all_vcpus(dev->kvm);
|
||||
mutex_unlock(&dev->kvm->lock);
|
||||
return ret;
|
||||
}
|
||||
|
@ -2676,7 +2676,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
|
|||
|
||||
mutex_lock(&kvm->lock);
|
||||
|
||||
if (!lock_all_vcpus(kvm)) {
|
||||
if (kvm_trylock_all_vcpus(kvm)) {
|
||||
mutex_unlock(&kvm->lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
@ -2698,7 +2698,7 @@ static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
|
|||
|
||||
mutex_unlock(&its->its_lock);
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
unlock_all_vcpus(kvm);
|
||||
kvm_unlock_all_vcpus(kvm);
|
||||
mutex_unlock(&kvm->lock);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -268,7 +268,7 @@ static int vgic_set_common_attr(struct kvm_device *dev,
|
|||
return -ENXIO;
|
||||
mutex_lock(&dev->kvm->lock);
|
||||
|
||||
if (!lock_all_vcpus(dev->kvm)) {
|
||||
if (kvm_trylock_all_vcpus(dev->kvm)) {
|
||||
mutex_unlock(&dev->kvm->lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
@ -276,7 +276,7 @@ static int vgic_set_common_attr(struct kvm_device *dev,
|
|||
mutex_lock(&dev->kvm->arch.config_lock);
|
||||
r = vgic_v3_save_pending_tables(dev->kvm);
|
||||
mutex_unlock(&dev->kvm->arch.config_lock);
|
||||
unlock_all_vcpus(dev->kvm);
|
||||
kvm_unlock_all_vcpus(dev->kvm);
|
||||
mutex_unlock(&dev->kvm->lock);
|
||||
return r;
|
||||
}
|
||||
|
@ -390,7 +390,7 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
|
|||
|
||||
mutex_lock(&dev->kvm->lock);
|
||||
|
||||
if (!lock_all_vcpus(dev->kvm)) {
|
||||
if (kvm_trylock_all_vcpus(dev->kvm)) {
|
||||
mutex_unlock(&dev->kvm->lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
@ -415,7 +415,7 @@ static int vgic_v2_attr_regs_access(struct kvm_device *dev,
|
|||
|
||||
out:
|
||||
mutex_unlock(&dev->kvm->arch.config_lock);
|
||||
unlock_all_vcpus(dev->kvm);
|
||||
kvm_unlock_all_vcpus(dev->kvm);
|
||||
mutex_unlock(&dev->kvm->lock);
|
||||
|
||||
if (!ret && !is_write)
|
||||
|
@ -554,7 +554,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
|
|||
|
||||
mutex_lock(&dev->kvm->lock);
|
||||
|
||||
if (!lock_all_vcpus(dev->kvm)) {
|
||||
if (kvm_trylock_all_vcpus(dev->kvm)) {
|
||||
mutex_unlock(&dev->kvm->lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
@ -611,7 +611,7 @@ static int vgic_v3_attr_regs_access(struct kvm_device *dev,
|
|||
|
||||
out:
|
||||
mutex_unlock(&dev->kvm->arch.config_lock);
|
||||
unlock_all_vcpus(dev->kvm);
|
||||
kvm_unlock_all_vcpus(dev->kvm);
|
||||
mutex_unlock(&dev->kvm->lock);
|
||||
|
||||
if (!ret && uaccess && !is_write) {
|
||||
|
|
|
@ -12,36 +12,6 @@
|
|||
#include <linux/kvm_host.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
|
||||
{
|
||||
struct kvm_vcpu *tmp_vcpu;
|
||||
|
||||
for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
|
||||
tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
|
||||
mutex_unlock(&tmp_vcpu->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static void unlock_all_vcpus(struct kvm *kvm)
|
||||
{
|
||||
unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
|
||||
}
|
||||
|
||||
static bool lock_all_vcpus(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_vcpu *tmp_vcpu;
|
||||
unsigned long c;
|
||||
|
||||
kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
|
||||
if (!mutex_trylock(&tmp_vcpu->mutex)) {
|
||||
unlock_vcpus(kvm, c - 1);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int aia_create(struct kvm_device *dev, u32 type)
|
||||
{
|
||||
int ret;
|
||||
|
@ -53,7 +23,7 @@ static int aia_create(struct kvm_device *dev, u32 type)
|
|||
return -EEXIST;
|
||||
|
||||
ret = -EBUSY;
|
||||
if (!lock_all_vcpus(kvm))
|
||||
if (kvm_trylock_all_vcpus(kvm))
|
||||
return ret;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
|
@ -65,7 +35,7 @@ static int aia_create(struct kvm_device *dev, u32 type)
|
|||
kvm->arch.aia.in_kernel = true;
|
||||
|
||||
out_unlock:
|
||||
unlock_all_vcpus(kvm);
|
||||
kvm_unlock_all_vcpus(kvm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1884,70 +1884,6 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
|
|||
atomic_set_release(&src_sev->migration_in_progress, 0);
|
||||
}
|
||||
|
||||
/* vCPU mutex subclasses. */
|
||||
enum sev_migration_role {
|
||||
SEV_MIGRATION_SOURCE = 0,
|
||||
SEV_MIGRATION_TARGET,
|
||||
SEV_NR_MIGRATION_ROLES,
|
||||
};
|
||||
|
||||
static int sev_lock_vcpus_for_migration(struct kvm *kvm,
|
||||
enum sev_migration_role role)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
unsigned long i, j;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
if (mutex_lock_killable_nested(&vcpu->mutex, role))
|
||||
goto out_unlock;
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
if (!i)
|
||||
/*
|
||||
* Reset the role to one that avoids colliding with
|
||||
* the role used for the first vcpu mutex.
|
||||
*/
|
||||
role = SEV_NR_MIGRATION_ROLES;
|
||||
else
|
||||
mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
|
||||
#endif
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
|
||||
kvm_for_each_vcpu(j, vcpu, kvm) {
|
||||
if (i == j)
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
if (j)
|
||||
mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
|
||||
#endif
|
||||
|
||||
mutex_unlock(&vcpu->mutex);
|
||||
}
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
unsigned long i;
|
||||
bool first = true;
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
if (first)
|
||||
first = false;
|
||||
else
|
||||
mutex_acquire(&vcpu->mutex.dep_map,
|
||||
SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
|
||||
|
||||
mutex_unlock(&vcpu->mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
|
||||
{
|
||||
struct kvm_sev_info *dst = to_kvm_sev_info(dst_kvm);
|
||||
|
@ -2085,10 +2021,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
|
|||
charged = true;
|
||||
}
|
||||
|
||||
ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
|
||||
ret = kvm_lock_all_vcpus(kvm);
|
||||
if (ret)
|
||||
goto out_dst_cgroup;
|
||||
ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
|
||||
ret = kvm_lock_all_vcpus(source_kvm);
|
||||
if (ret)
|
||||
goto out_dst_vcpu;
|
||||
|
||||
|
@ -2102,9 +2038,9 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
|
|||
ret = 0;
|
||||
|
||||
out_source_vcpu:
|
||||
sev_unlock_vcpus_for_migration(source_kvm);
|
||||
kvm_unlock_all_vcpus(source_kvm);
|
||||
out_dst_vcpu:
|
||||
sev_unlock_vcpus_for_migration(kvm);
|
||||
kvm_unlock_all_vcpus(kvm);
|
||||
out_dst_cgroup:
|
||||
/* Operates on the source on success, on the destination on failure. */
|
||||
if (charged)
|
||||
|
|
|
@ -1015,6 +1015,10 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
|
|||
|
||||
void kvm_destroy_vcpus(struct kvm *kvm);
|
||||
|
||||
int kvm_trylock_all_vcpus(struct kvm *kvm);
|
||||
int kvm_lock_all_vcpus(struct kvm *kvm);
|
||||
void kvm_unlock_all_vcpus(struct kvm *kvm);
|
||||
|
||||
void vcpu_load(struct kvm_vcpu *vcpu);
|
||||
void vcpu_put(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
|
|
@ -156,16 +156,15 @@ static inline int __devm_mutex_init(struct device *dev, struct mutex *lock)
|
|||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
|
||||
extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
|
||||
|
||||
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
|
||||
unsigned int subclass);
|
||||
extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
|
||||
unsigned int subclass);
|
||||
extern int __must_check _mutex_lock_killable(struct mutex *lock,
|
||||
unsigned int subclass, struct lockdep_map *nest_lock);
|
||||
extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
|
||||
|
||||
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
|
||||
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
|
||||
#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
|
||||
#define mutex_lock_killable(lock) _mutex_lock_killable(lock, 0, NULL)
|
||||
#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0)
|
||||
|
||||
#define mutex_lock_nest_lock(lock, nest_lock) \
|
||||
|
@ -174,6 +173,15 @@ do { \
|
|||
_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
|
||||
} while (0)
|
||||
|
||||
#define mutex_lock_killable_nest_lock(lock, nest_lock) \
|
||||
( \
|
||||
typecheck(struct lockdep_map *, &(nest_lock)->dep_map), \
|
||||
_mutex_lock_killable(lock, 0, &(nest_lock)->dep_map) \
|
||||
)
|
||||
|
||||
#define mutex_lock_killable_nested(lock, subclass) \
|
||||
_mutex_lock_killable(lock, subclass, NULL)
|
||||
|
||||
#else
|
||||
extern void mutex_lock(struct mutex *lock);
|
||||
extern int __must_check mutex_lock_interruptible(struct mutex *lock);
|
||||
|
@ -183,6 +191,7 @@ extern void mutex_lock_io(struct mutex *lock);
|
|||
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
|
||||
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
|
||||
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
|
||||
# define mutex_lock_killable_nest_lock(lock, nest_lock) mutex_lock_killable(lock)
|
||||
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
|
||||
# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
|
||||
#endif
|
||||
|
@ -193,7 +202,22 @@ extern void mutex_lock_io(struct mutex *lock);
|
|||
*
|
||||
* Returns 1 if the mutex has been acquired successfully, and 0 on contention.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
extern int _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
|
||||
|
||||
#define mutex_trylock_nest_lock(lock, nest_lock) \
|
||||
( \
|
||||
typecheck(struct lockdep_map *, &(nest_lock)->dep_map), \
|
||||
_mutex_trylock_nest_lock(lock, &(nest_lock)->dep_map) \
|
||||
)
|
||||
|
||||
#define mutex_trylock(lock) _mutex_trylock_nest_lock(lock, NULL)
|
||||
#else
|
||||
extern int mutex_trylock(struct mutex *lock);
|
||||
#define mutex_trylock_nest_lock(lock, nest_lock) mutex_trylock(lock)
|
||||
#endif
|
||||
|
||||
extern void mutex_unlock(struct mutex *lock);
|
||||
|
||||
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
|
||||
|
|
|
@ -808,11 +808,12 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
|
|||
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
|
||||
|
||||
int __sched
|
||||
mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
|
||||
_mutex_lock_killable(struct mutex *lock, unsigned int subclass,
|
||||
struct lockdep_map *nest)
|
||||
{
|
||||
return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
|
||||
return __mutex_lock(lock, TASK_KILLABLE, subclass, nest, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
|
||||
EXPORT_SYMBOL_GPL(_mutex_lock_killable);
|
||||
|
||||
int __sched
|
||||
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
|
||||
|
@ -1062,6 +1063,7 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
|
|||
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
||||
/**
|
||||
* mutex_trylock - try to acquire the mutex, without waiting
|
||||
* @lock: the mutex to be acquired
|
||||
|
@ -1077,18 +1079,25 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
|
|||
* mutex must be released by the same task that acquired it.
|
||||
*/
|
||||
int __sched mutex_trylock(struct mutex *lock)
|
||||
{
|
||||
MUTEX_WARN_ON(lock->magic != lock);
|
||||
return __mutex_trylock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(mutex_trylock);
|
||||
#else
|
||||
int __sched _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock)
|
||||
{
|
||||
bool locked;
|
||||
|
||||
MUTEX_WARN_ON(lock->magic != lock);
|
||||
|
||||
locked = __mutex_trylock(lock);
|
||||
if (locked)
|
||||
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
||||
mutex_acquire_nest(&lock->dep_map, 0, 1, nest_lock, _RET_IP_);
|
||||
|
||||
return locked;
|
||||
}
|
||||
EXPORT_SYMBOL(mutex_trylock);
|
||||
EXPORT_SYMBOL(_mutex_trylock_nest_lock);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
||||
int __sched
|
||||
|
|
|
@ -7,6 +7,11 @@ void rust_helper_mutex_lock(struct mutex *lock)
|
|||
mutex_lock(lock);
|
||||
}
|
||||
|
||||
int rust_helper_mutex_trylock(struct mutex *lock)
|
||||
{
|
||||
return mutex_trylock(lock);
|
||||
}
|
||||
|
||||
void rust_helper___mutex_init(struct mutex *mutex, const char *name,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
|
|
|
@ -1368,6 +1368,65 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int kvm_trylock_all_vcpus(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
unsigned long i, j;
|
||||
|
||||
lockdep_assert_held(&kvm->lock);
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
if (!mutex_trylock_nest_lock(&vcpu->mutex, &kvm->lock))
|
||||
goto out_unlock;
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
kvm_for_each_vcpu(j, vcpu, kvm) {
|
||||
if (i == j)
|
||||
break;
|
||||
mutex_unlock(&vcpu->mutex);
|
||||
}
|
||||
return -EINTR;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_trylock_all_vcpus);
|
||||
|
||||
int kvm_lock_all_vcpus(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
unsigned long i, j;
|
||||
int r;
|
||||
|
||||
lockdep_assert_held(&kvm->lock);
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
r = mutex_lock_killable_nest_lock(&vcpu->mutex, &kvm->lock);
|
||||
if (r)
|
||||
goto out_unlock;
|
||||
}
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
kvm_for_each_vcpu(j, vcpu, kvm) {
|
||||
if (i == j)
|
||||
break;
|
||||
mutex_unlock(&vcpu->mutex);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_lock_all_vcpus);
|
||||
|
||||
void kvm_unlock_all_vcpus(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
unsigned long i;
|
||||
|
||||
lockdep_assert_held(&kvm->lock);
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
mutex_unlock(&vcpu->mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_unlock_all_vcpus);
|
||||
|
||||
/*
|
||||
* Allocation size is twice as large as the actual dirty bitmap size.
|
||||
* See kvm_vm_ioctl_get_dirty_log() why this is needed.
|
||||
|
|
Loading…
Add table
Reference in a new issue