mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
KVM: arm/arm64: vgic: Make vgic_dist->lpi_list_lock a raw_spinlock
vgic_dist->lpi_list_lock must always be taken with interrupts disabled as it is used in interrupt context. For configurations such as PREEMPT_RT_FULL, this means that it should be a raw_spinlock since RT spinlocks are interruptible. Signed-off-by: Julien Thierry <julien.thierry@arm.com> Acked-by: Christoffer Dall <christoffer.dall@arm.com> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
This commit is contained in:
parent
8fa3adb8c6
commit
fc3bc47523
4 changed files with 11 additions and 11 deletions
|
@ -256,7 +256,7 @@ struct vgic_dist {
|
||||||
u64 propbaser;
|
u64 propbaser;
|
||||||
|
|
||||||
/* Protects the lpi_list and the count value below. */
|
/* Protects the lpi_list and the count value below. */
|
||||||
spinlock_t lpi_list_lock;
|
raw_spinlock_t lpi_list_lock;
|
||||||
struct list_head lpi_list_head;
|
struct list_head lpi_list_head;
|
||||||
int lpi_list_count;
|
int lpi_list_count;
|
||||||
|
|
||||||
|
|
|
@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm)
|
||||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&dist->lpi_list_head);
|
INIT_LIST_HEAD(&dist->lpi_list_head);
|
||||||
spin_lock_init(&dist->lpi_list_lock);
|
raw_spin_lock_init(&dist->lpi_list_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* CREATION */
|
/* CREATION */
|
||||||
|
|
|
@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
|
||||||
irq->target_vcpu = vcpu;
|
irq->target_vcpu = vcpu;
|
||||||
irq->group = 1;
|
irq->group = 1;
|
||||||
|
|
||||||
spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There could be a race with another vgic_add_lpi(), so we need to
|
* There could be a race with another vgic_add_lpi(), so we need to
|
||||||
|
@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
|
||||||
dist->lpi_list_count++;
|
dist->lpi_list_count++;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We "cache" the configuration table entries in our struct vgic_irq's.
|
* We "cache" the configuration table entries in our struct vgic_irq's.
|
||||||
|
@ -332,7 +332,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
|
||||||
if (!intids)
|
if (!intids)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
||||||
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
|
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
|
||||||
if (i == irq_count)
|
if (i == irq_count)
|
||||||
break;
|
break;
|
||||||
|
@ -341,7 +341,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
|
||||||
continue;
|
continue;
|
||||||
intids[i++] = irq->intid;
|
intids[i++] = irq->intid;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
||||||
|
|
||||||
*intid_ptr = intids;
|
*intid_ptr = intids;
|
||||||
return i;
|
return i;
|
||||||
|
|
|
@ -72,7 +72,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
|
||||||
struct vgic_irq *irq = NULL;
|
struct vgic_irq *irq = NULL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
||||||
|
|
||||||
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
|
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
|
||||||
if (irq->intid != intid)
|
if (irq->intid != intid)
|
||||||
|
@ -88,7 +88,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
|
||||||
irq = NULL;
|
irq = NULL;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
||||||
|
|
||||||
return irq;
|
return irq;
|
||||||
}
|
}
|
||||||
|
@ -138,15 +138,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
|
||||||
if (irq->intid < VGIC_MIN_LPI)
|
if (irq->intid < VGIC_MIN_LPI)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
||||||
if (!kref_put(&irq->refcount, vgic_irq_release)) {
|
if (!kref_put(&irq->refcount, vgic_irq_release)) {
|
||||||
spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
list_del(&irq->lpi_list);
|
list_del(&irq->lpi_list);
|
||||||
dist->lpi_list_count--;
|
dist->lpi_list_count--;
|
||||||
spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
||||||
|
|
||||||
kfree(irq);
|
kfree(irq);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue