2019-06-03 07:44:50 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2016-12-20 15:27:52 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2016,2017 ARM Limited, All Rights Reserved.
|
|
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/irqdomain.h>
|
|
|
|
#include <linux/msi.h>
|
2023-12-15 17:49:24 -05:00
|
|
|
#include <linux/pid.h>
|
2016-12-20 15:27:52 +00:00
|
|
|
#include <linux/sched.h>
|
|
|
|
|
|
|
|
#include <linux/irqchip/arm-gic-v4.h>
|
|
|
|
|
2016-12-21 17:40:16 +00:00
|
|
|
/*
|
|
|
|
* WARNING: The blurb below assumes that you understand the
|
|
|
|
* intricacies of GICv3, GICv4, and how a guest's view of a GICv3 gets
|
|
|
|
* translated into GICv4 commands. So it effectively targets at most
|
|
|
|
* two individuals. You know who you are.
|
|
|
|
*
|
|
|
|
* The core GICv4 code is designed to *avoid* exposing too much of the
|
|
|
|
* core GIC code (that would in turn leak into the hypervisor code),
|
|
|
|
* and instead provide a hypervisor agnostic interface to the HW (of
|
|
|
|
* course, the astute reader will quickly realize that hypervisor
|
|
|
|
* agnostic actually means KVM-specific - what were you thinking?).
|
|
|
|
*
|
|
|
|
* In order to achieve a modicum of isolation, we try to hide most of
|
|
|
|
* the GICv4 "stuff" behind normal irqchip operations:
|
|
|
|
*
|
|
|
|
* - Any guest-visible VLPI is backed by a Linux interrupt (and a
|
|
|
|
* physical LPI which gets unmapped when the guest maps the
|
|
|
|
* VLPI). This allows the same DevID/EventID pair to be either
|
|
|
|
* mapped to the LPI (host) or the VLPI (guest). Note that this is
|
|
|
|
* exclusive, and you cannot have both.
|
|
|
|
*
|
|
|
|
* - Enabling/disabling a VLPI is done by issuing mask/unmask calls.
|
|
|
|
*
|
|
|
|
* - Guest INT/CLEAR commands are implemented through
|
|
|
|
* irq_set_irqchip_state().
|
|
|
|
*
|
|
|
|
* - The *bizarre* stuff (mapping/unmapping an interrupt to a VLPI, or
|
|
|
|
* issuing an INV after changing a priority) gets shoved into the
|
|
|
|
* irq_set_vcpu_affinity() method. While this is quite horrible
|
|
|
|
* (let's face it, this is the irqchip version of an ioctl), it
|
|
|
|
* confines the crap to a single location. And map/unmap really is
|
|
|
|
* about setting the affinity of a VLPI to a vcpu, so only INV is
|
|
|
|
* majorly out of place. So there.
|
|
|
|
*
|
|
|
|
* A number of commands are simply not provided by this interface, as
|
|
|
|
* they do not make direct sense. For example, MAPD is purely local to
|
|
|
|
* the virtual ITS (because it references a virtual device, and the
|
|
|
|
* physical ITS is still very much in charge of the physical
|
|
|
|
* device). Same goes for things like MAPC (the physical ITS deals
|
|
|
|
* with the actual vPE affinity, and not the braindead concept of
|
|
|
|
* collection). SYNC is not provided either, as each and every command
|
|
|
|
* is followed by a VSYNC. This could be relaxed in the future, should
|
|
|
|
* this be seen as a bottleneck (yes, this means *never*).
|
|
|
|
*
|
|
|
|
* But handling VLPIs is only one side of the job of the GICv4
|
|
|
|
* code. The other (darker) side is to take care of the doorbell
|
|
|
|
* interrupts which are delivered when a VLPI targeting a non-running
|
|
|
|
* vcpu is being made pending.
|
|
|
|
*
|
|
|
|
* The choice made here is that each vcpu (VPE in old northern GICv4
|
|
|
|
* dialect) gets a single doorbell LPI, no matter how many interrupts
|
|
|
|
* are targeting it. This has a nice property, which is that the
|
|
|
|
* interrupt becomes a handle for the VPE, and that the hypervisor
|
|
|
|
* code can manipulate it through the normal interrupt API:
|
|
|
|
*
|
|
|
|
* - VMs (or rather the VM abstraction that matters to the GIC)
|
|
|
|
* contain an irq domain where each interrupt maps to a VPE. In
|
|
|
|
* turn, this domain sits on top of the normal LPI allocator, and a
|
|
|
|
* specially crafted irq_chip implementation.
|
|
|
|
*
|
|
|
|
* - mask/unmask do what is expected on the doorbell interrupt.
|
|
|
|
*
|
|
|
|
* - irq_set_affinity is used to move a VPE from one redistributor to
|
|
|
|
* another.
|
|
|
|
*
|
|
|
|
* - irq_set_vcpu_affinity once again gets hijacked for the purpose of
|
|
|
|
* creating a new sub-API, namely scheduling/descheduling a VPE
|
|
|
|
* (which involves programming GICR_V{PROP,PEND}BASER) and
|
|
|
|
* performing INVALL operations.
|
|
|
|
*/
|
|
|
|
|
2016-12-20 15:27:52 +00:00
|
|
|
static struct irq_domain *gic_domain;
|
|
|
|
static const struct irq_domain_ops *vpe_domain_ops;
|
2020-03-04 20:33:15 +00:00
|
|
|
static const struct irq_domain_ops *sgi_domain_ops;
|
2016-12-20 15:27:52 +00:00
|
|
|
|
2021-03-17 10:07:19 +00:00
|
|
|
#ifdef CONFIG_ARM64
|
|
|
|
#include <asm/cpufeature.h>
|
|
|
|
|
|
|
|
bool gic_cpuif_has_vsgi(void)
|
|
|
|
{
|
|
|
|
unsigned long fld, reg = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
|
|
|
|
|
2022-09-05 23:54:03 +01:00
|
|
|
fld = cpuid_feature_extract_unsigned_field(reg, ID_AA64PFR0_EL1_GIC_SHIFT);
|
2021-03-17 10:07:19 +00:00
|
|
|
|
2024-08-02 14:26:01 +05:30
|
|
|
return fld >= ID_AA64PFR0_EL1_GIC_V4P1;
|
2021-03-17 10:07:19 +00:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
bool gic_cpuif_has_vsgi(void)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-03-04 20:33:20 +00:00
|
|
|
static bool has_v4_1(void)
|
|
|
|
{
|
|
|
|
return !!sgi_domain_ops;
|
|
|
|
}
|
|
|
|
|
2021-03-17 10:07:19 +00:00
|
|
|
static bool has_v4_1_sgi(void)
|
|
|
|
{
|
|
|
|
return has_v4_1() && gic_cpuif_has_vsgi();
|
|
|
|
}
|
|
|
|
|
2020-03-04 20:33:21 +00:00
|
|
|
static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx)
|
|
|
|
{
|
|
|
|
char *name;
|
|
|
|
int sgi_base;
|
|
|
|
|
2021-03-17 10:07:19 +00:00
|
|
|
if (!has_v4_1_sgi())
|
2020-03-04 20:33:21 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
name = kasprintf(GFP_KERNEL, "GICv4-sgi-%d", task_pid_nr(current));
|
|
|
|
if (!name)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
vpe->fwnode = irq_domain_alloc_named_id_fwnode(name, idx);
|
|
|
|
if (!vpe->fwnode)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
kfree(name);
|
|
|
|
name = NULL;
|
|
|
|
|
|
|
|
vpe->sgi_domain = irq_domain_create_linear(vpe->fwnode, 16,
|
|
|
|
sgi_domain_ops, vpe);
|
|
|
|
if (!vpe->sgi_domain)
|
|
|
|
goto err;
|
|
|
|
|
2022-12-13 15:08:43 +01:00
|
|
|
sgi_base = irq_domain_alloc_irqs(vpe->sgi_domain, 16, NUMA_NO_NODE, vpe);
|
2020-03-04 20:33:21 +00:00
|
|
|
if (sgi_base <= 0)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
if (vpe->sgi_domain)
|
|
|
|
irq_domain_remove(vpe->sgi_domain);
|
|
|
|
if (vpe->fwnode)
|
|
|
|
irq_domain_free_fwnode(vpe->fwnode);
|
|
|
|
kfree(name);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2016-12-20 15:27:52 +00:00
|
|
|
int its_alloc_vcpu_irqs(struct its_vm *vm)
|
|
|
|
{
|
|
|
|
int vpe_base_irq, i;
|
|
|
|
|
|
|
|
vm->fwnode = irq_domain_alloc_named_id_fwnode("GICv4-vpe",
|
|
|
|
task_pid_nr(current));
|
|
|
|
if (!vm->fwnode)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
vm->domain = irq_domain_create_hierarchy(gic_domain, 0, vm->nr_vpes,
|
|
|
|
vm->fwnode, vpe_domain_ops,
|
|
|
|
vm);
|
|
|
|
if (!vm->domain)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->nr_vpes; i++) {
|
|
|
|
vm->vpes[i]->its_vm = vm;
|
|
|
|
vm->vpes[i]->idai = true;
|
|
|
|
}
|
|
|
|
|
2022-12-13 15:08:43 +01:00
|
|
|
vpe_base_irq = irq_domain_alloc_irqs(vm->domain, vm->nr_vpes,
|
|
|
|
NUMA_NO_NODE, vm);
|
2016-12-20 15:27:52 +00:00
|
|
|
if (vpe_base_irq <= 0)
|
|
|
|
goto err;
|
|
|
|
|
2020-03-04 20:33:21 +00:00
|
|
|
for (i = 0; i < vm->nr_vpes; i++) {
|
|
|
|
int ret;
|
2016-12-20 15:27:52 +00:00
|
|
|
vm->vpes[i]->irq = vpe_base_irq + i;
|
2020-03-04 20:33:21 +00:00
|
|
|
ret = its_alloc_vcpu_sgis(vm->vpes[i], i);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
2016-12-20 15:27:52 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
if (vm->domain)
|
|
|
|
irq_domain_remove(vm->domain);
|
|
|
|
if (vm->fwnode)
|
|
|
|
irq_domain_free_fwnode(vm->fwnode);
|
|
|
|
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2020-03-04 20:33:21 +00:00
|
|
|
static void its_free_sgi_irqs(struct its_vm *vm)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2021-03-17 10:07:19 +00:00
|
|
|
if (!has_v4_1_sgi())
|
2020-03-04 20:33:21 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < vm->nr_vpes; i++) {
|
|
|
|
unsigned int irq = irq_find_mapping(vm->vpes[i]->sgi_domain, 0);
|
|
|
|
|
|
|
|
if (WARN_ON(!irq))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
irq_domain_free_irqs(irq, 16);
|
|
|
|
irq_domain_remove(vm->vpes[i]->sgi_domain);
|
|
|
|
irq_domain_free_fwnode(vm->vpes[i]->fwnode);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-20 15:27:52 +00:00
|
|
|
void its_free_vcpu_irqs(struct its_vm *vm)
|
|
|
|
{
|
2020-03-04 20:33:21 +00:00
|
|
|
its_free_sgi_irqs(vm);
|
2016-12-20 15:27:52 +00:00
|
|
|
irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes);
|
|
|
|
irq_domain_remove(vm->domain);
|
|
|
|
irq_domain_free_fwnode(vm->fwnode);
|
|
|
|
}
|
2016-12-20 15:31:02 +00:00
|
|
|
|
|
|
|
static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
|
|
|
|
{
|
|
|
|
return irq_set_vcpu_affinity(vpe->irq, info);
|
|
|
|
}
|
|
|
|
|
2020-03-04 20:33:20 +00:00
|
|
|
int its_make_vpe_non_resident(struct its_vpe *vpe, bool db)
|
2016-12-20 15:31:02 +00:00
|
|
|
{
|
2020-03-04 20:33:20 +00:00
|
|
|
struct irq_desc *desc = irq_to_desc(vpe->irq);
|
|
|
|
struct its_cmd_info info = { };
|
KVM: arm64: vgic-v4: Move the GICv4 residency flow to be driven by vcpu_load/put
When the VHE code was reworked, a lot of the vgic stuff was moved around,
but the GICv4 residency code did stay untouched, meaning that we come
in and out of residency on each flush/sync, which is obviously suboptimal.
To address this, let's move things around a bit:
- Residency entry (flush) moves to vcpu_load
- Residency exit (sync) moves to vcpu_put
- On blocking (entry to WFI), we "put"
- On unblocking (exit from WFI), we "load"
Because these can nest (load/block/put/load/unblock/put, for example),
we now have per-VPE tracking of the residency state.
Additionally, vgic_v4_put gains a "need doorbell" parameter, which only
gets set to true when blocking because of a WFI. This allows a finer
control of the doorbell, which now also gets disabled as soon as
it gets signaled.
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20191027144234.8395-2-maz@kernel.org
2019-10-27 14:41:59 +00:00
|
|
|
int ret;
|
2016-12-20 15:31:02 +00:00
|
|
|
|
|
|
|
WARN_ON(preemptible());
|
|
|
|
|
2020-03-04 20:33:20 +00:00
|
|
|
info.cmd_type = DESCHEDULE_VPE;
|
|
|
|
if (has_v4_1()) {
|
|
|
|
/* GICv4.1 can directly deal with doorbells */
|
|
|
|
info.req_db = db;
|
|
|
|
} else {
|
|
|
|
/* Undo the nested disable_irq() calls... */
|
|
|
|
while (db && irqd_irq_disabled(&desc->irq_data))
|
|
|
|
enable_irq(vpe->irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = its_send_vpe_cmd(vpe, &info);
|
|
|
|
if (!ret)
|
|
|
|
vpe->resident = false;
|
|
|
|
|
2020-11-28 22:18:57 +08:00
|
|
|
vpe->ready = false;
|
|
|
|
|
2020-03-04 20:33:20 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en)
|
|
|
|
{
|
|
|
|
struct its_cmd_info info = { };
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
WARN_ON(preemptible());
|
|
|
|
|
|
|
|
info.cmd_type = SCHEDULE_VPE;
|
|
|
|
if (has_v4_1()) {
|
|
|
|
info.g0en = g0en;
|
|
|
|
info.g1en = g1en;
|
|
|
|
} else {
|
|
|
|
/* Disabled the doorbell, as we're about to enter the guest */
|
|
|
|
disable_irq_nosync(vpe->irq);
|
|
|
|
}
|
2016-12-20 15:31:02 +00:00
|
|
|
|
KVM: arm64: vgic-v4: Move the GICv4 residency flow to be driven by vcpu_load/put
When the VHE code was reworked, a lot of the vgic stuff was moved around,
but the GICv4 residency code did stay untouched, meaning that we come
in and out of residency on each flush/sync, which is obviously suboptimal.
To address this, let's move things around a bit:
- Residency entry (flush) moves to vcpu_load
- Residency exit (sync) moves to vcpu_put
- On blocking (entry to WFI), we "put"
- On unblocking (exit from WFI), we "load"
Because these can nest (load/block/put/load/unblock/put, for example),
we now have per-VPE tracking of the residency state.
Additionally, vgic_v4_put gains a "need doorbell" parameter, which only
gets set to true when blocking because of a WFI. This allows a finer
control of the doorbell, which now also gets disabled as soon as
it gets signaled.
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20191027144234.8395-2-maz@kernel.org
2019-10-27 14:41:59 +00:00
|
|
|
ret = its_send_vpe_cmd(vpe, &info);
|
|
|
|
if (!ret)
|
2020-03-04 20:33:20 +00:00
|
|
|
vpe->resident = true;
|
KVM: arm64: vgic-v4: Move the GICv4 residency flow to be driven by vcpu_load/put
When the VHE code was reworked, a lot of the vgic stuff was moved around,
but the GICv4 residency code did stay untouched, meaning that we come
in and out of residency on each flush/sync, which is obviously suboptimal.
To address this, let's move things around a bit:
- Residency entry (flush) moves to vcpu_load
- Residency exit (sync) moves to vcpu_put
- On blocking (entry to WFI), we "put"
- On unblocking (exit from WFI), we "load"
Because these can nest (load/block/put/load/unblock/put, for example),
we now have per-VPE tracking of the residency state.
Additionally, vgic_v4_put gains a "need doorbell" parameter, which only
gets set to true when blocking because of a WFI. This allows a finer
control of the doorbell, which now also gets disabled as soon as
it gets signaled.
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20191027144234.8395-2-maz@kernel.org
2019-10-27 14:41:59 +00:00
|
|
|
|
|
|
|
return ret;
|
2016-12-20 15:31:02 +00:00
|
|
|
}
|
|
|
|
|
2020-11-28 22:18:57 +08:00
|
|
|
int its_commit_vpe(struct its_vpe *vpe)
|
|
|
|
{
|
|
|
|
struct its_cmd_info info = {
|
|
|
|
.cmd_type = COMMIT_VPE,
|
|
|
|
};
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
WARN_ON(preemptible());
|
|
|
|
|
|
|
|
ret = its_send_vpe_cmd(vpe, &info);
|
|
|
|
if (!ret)
|
|
|
|
vpe->ready = true;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-12-20 15:31:02 +00:00
|
|
|
int its_invall_vpe(struct its_vpe *vpe)
|
|
|
|
{
|
|
|
|
struct its_cmd_info info = {
|
|
|
|
.cmd_type = INVALL_VPE,
|
|
|
|
};
|
|
|
|
|
|
|
|
return its_send_vpe_cmd(vpe, &info);
|
|
|
|
}
|
2016-12-21 21:50:32 +00:00
|
|
|
|
|
|
|
int its_map_vlpi(int irq, struct its_vlpi_map *map)
|
|
|
|
{
|
|
|
|
struct its_cmd_info info = {
|
|
|
|
.cmd_type = MAP_VLPI,
|
2017-09-12 22:08:23 +02:00
|
|
|
{
|
|
|
|
.map = map,
|
|
|
|
},
|
2016-12-21 21:50:32 +00:00
|
|
|
};
|
2017-11-10 09:00:41 +00:00
|
|
|
int ret;
|
2016-12-21 21:50:32 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The host will never see that interrupt firing again, so it
|
|
|
|
* is vital that we don't do any lazy masking.
|
|
|
|
*/
|
|
|
|
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
|
|
|
|
|
2017-11-10 09:00:41 +00:00
|
|
|
ret = irq_set_vcpu_affinity(irq, &info);
|
|
|
|
if (ret)
|
|
|
|
irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
|
|
|
|
|
|
|
|
return ret;
|
2016-12-21 21:50:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int its_get_vlpi(int irq, struct its_vlpi_map *map)
|
|
|
|
{
|
|
|
|
struct its_cmd_info info = {
|
|
|
|
.cmd_type = GET_VLPI,
|
2017-09-12 22:08:23 +02:00
|
|
|
{
|
|
|
|
.map = map,
|
|
|
|
},
|
2016-12-21 21:50:32 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
return irq_set_vcpu_affinity(irq, &info);
|
|
|
|
}
|
|
|
|
|
KVM: arm64: WARN if unmapping a vLPI fails in any path
When unmapping a vLPI, WARN if nullifying vCPU affinity fails, not just if
failure occurs when freeing an ITE. If undoing vCPU affinity fails, then
odds are very good that vLPI state tracking has has gotten out of whack,
i.e. that KVM and the GIC disagree on the state of an IRQ/vLPI. At best,
inconsistent state means there is a lurking bug/flaw somewhere. At worst,
the inconsistency could eventually be fatal to the host, e.g. if an ITS
command fails because KVM's view of things doesn't match reality/hardware.
Note, only the call from kvm_arch_irq_bypass_del_producer() by way of
kvm_vgic_v4_unset_forwarding() doesn't already WARN. Common KVM's
kvm_irq_routing_update() WARNs if kvm_arch_update_irqfd_routing() fails.
For that path, if its_unmap_vlpi() fails in kvm_vgic_v4_unset_forwarding(),
the only possible causes are that the GIC doesn't have a v4 ITS (from
its_irq_set_vcpu_affinity()):
/* Need a v4 ITS */
if (!is_v4(its_dev->its))
return -EINVAL;
guard(raw_spinlock)(&its_dev->event_map.vlpi_lock);
/* Unmap request? */
if (!info)
return its_vlpi_unmap(d);
or that KVM has gotten out of sync with the GIC/ITS (from its_vlpi_unmap()):
if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
return -EINVAL;
All of the above failure scenarios are warnable offences, as they should
never occur absent a kernel/KVM bug.
Acked-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/all/aFWY2LTVIxz5rfhh@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-06-12 16:51:47 -07:00
|
|
|
void its_unmap_vlpi(int irq)
|
2016-12-21 21:50:32 +00:00
|
|
|
{
|
|
|
|
irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
|
KVM: arm64: WARN if unmapping a vLPI fails in any path
When unmapping a vLPI, WARN if nullifying vCPU affinity fails, not just if
failure occurs when freeing an ITE. If undoing vCPU affinity fails, then
odds are very good that vLPI state tracking has has gotten out of whack,
i.e. that KVM and the GIC disagree on the state of an IRQ/vLPI. At best,
inconsistent state means there is a lurking bug/flaw somewhere. At worst,
the inconsistency could eventually be fatal to the host, e.g. if an ITS
command fails because KVM's view of things doesn't match reality/hardware.
Note, only the call from kvm_arch_irq_bypass_del_producer() by way of
kvm_vgic_v4_unset_forwarding() doesn't already WARN. Common KVM's
kvm_irq_routing_update() WARNs if kvm_arch_update_irqfd_routing() fails.
For that path, if its_unmap_vlpi() fails in kvm_vgic_v4_unset_forwarding(),
the only possible causes are that the GIC doesn't have a v4 ITS (from
its_irq_set_vcpu_affinity()):
/* Need a v4 ITS */
if (!is_v4(its_dev->its))
return -EINVAL;
guard(raw_spinlock)(&its_dev->event_map.vlpi_lock);
/* Unmap request? */
if (!info)
return its_vlpi_unmap(d);
or that KVM has gotten out of sync with the GIC/ITS (from its_vlpi_unmap()):
if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
return -EINVAL;
All of the above failure scenarios are warnable offences, as they should
never occur absent a kernel/KVM bug.
Acked-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/all/aFWY2LTVIxz5rfhh@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
2025-06-12 16:51:47 -07:00
|
|
|
WARN_ON_ONCE(irq_set_vcpu_affinity(irq, NULL));
|
2016-12-21 21:50:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int its_prop_update_vlpi(int irq, u8 config, bool inv)
|
|
|
|
{
|
|
|
|
struct its_cmd_info info = {
|
|
|
|
.cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI,
|
2017-09-12 22:08:23 +02:00
|
|
|
{
|
|
|
|
.config = config,
|
|
|
|
},
|
2016-12-21 21:50:32 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
return irq_set_vcpu_affinity(irq, &info);
|
|
|
|
}
|
2016-12-20 15:31:54 +00:00
|
|
|
|
2020-03-04 20:33:22 +00:00
|
|
|
int its_prop_update_vsgi(int irq, u8 priority, bool group)
|
|
|
|
{
|
|
|
|
struct its_cmd_info info = {
|
|
|
|
.cmd_type = PROP_UPDATE_VSGI,
|
|
|
|
{
|
|
|
|
.priority = priority,
|
|
|
|
.group = group,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
return irq_set_vcpu_affinity(irq, &info);
|
|
|
|
}
|
|
|
|
|
2020-03-04 20:33:15 +00:00
|
|
|
int its_init_v4(struct irq_domain *domain,
|
|
|
|
const struct irq_domain_ops *vpe_ops,
|
|
|
|
const struct irq_domain_ops *sgi_ops)
|
2016-12-20 15:31:54 +00:00
|
|
|
{
|
|
|
|
if (domain) {
|
|
|
|
pr_info("ITS: Enabling GICv4 support\n");
|
|
|
|
gic_domain = domain;
|
2020-03-04 20:33:15 +00:00
|
|
|
vpe_domain_ops = vpe_ops;
|
|
|
|
sgi_domain_ops = sgi_ops;
|
2016-12-20 15:31:54 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_err("ITS: No GICv4 VPE domain allocated\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|