xen: branch for v6.15-rc1

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRTLbB6QfY48x44uB6AXGG7T9hjvgUCZ9/gEwAKCRCAXGG7T9hj
 vlxhAQCRzSCNI8wwvENnuc2OnRyWKy8gq7C5WAOIOJdJ3U+scQEAwKGhPJLwE4IS
 /JDh5PRJgZ4rdMYatuDfldEcSAfRRgw=
 =dF6Z
 -----END PGP SIGNATURE-----

Merge tag 'for-linus-6.15-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:

 - cleanup: remove an used function

 - add support for a XenServer specific virtual PCI device

 - fix the handling of a sparse Xen hypervisor symbol table

 - avoid warnings when building the kernel with gcc 15

 - fix use of devices behind a VMD bridge when running as a Xen PV dom0

* tag 'for-linus-6.15-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  PCI/MSI: Convert pci_msi_ignore_mask to per MSI domain flag
  PCI: vmd: Disable MSI remapping bypass under Xen
  xen/pci: Do not register devices with segments >= 0x10000
  xen/pciback: Remove unused pcistub_get_pci_dev
  xenfs/xensyms: respect hypervisor's "next" indication
  xen/mcelog: Add __nonstring annotations for unterminated strings
  xen: Add support for XenServer 6.1 platform device
This commit is contained in:
Linus Torvalds 2025-03-25 14:33:32 -07:00
commit dce3ab4c57
11 changed files with 85 additions and 49 deletions

View file

@ -436,7 +436,8 @@ static struct msi_domain_ops xen_pci_msi_domain_ops = {
};
static struct msi_domain_info xen_pci_msi_domain_info = {
.flags = MSI_FLAG_PCI_MSIX | MSI_FLAG_FREE_MSI_DESCS | MSI_FLAG_DEV_SYSFS,
.flags = MSI_FLAG_PCI_MSIX | MSI_FLAG_FREE_MSI_DESCS |
MSI_FLAG_DEV_SYSFS | MSI_FLAG_NO_MASK,
.ops = &xen_pci_msi_domain_ops,
};
@ -484,11 +485,6 @@ static __init void xen_setup_pci_msi(void)
* in allocating the native domain and never use it.
*/
x86_init.irqs.create_pci_msi_domain = xen_create_pci_msi_domain;
/*
* With XEN PIRQ/Eventchannels in use PCI/MSI[-X] masking is solely
* controlled by the hypervisor.
*/
pci_msi_ignore_mask = 1;
}
#else /* CONFIG_PCI_MSI */

View file

@ -17,6 +17,8 @@
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <xen/xen.h>
#include <asm/irqdomain.h>
#define VMD_CFGBAR 0
@ -970,6 +972,24 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct vmd_dev *vmd;
int err;
if (xen_domain()) {
/*
* Xen doesn't have knowledge about devices in the VMD bus
* because the config space of devices behind the VMD bridge is
* not known to Xen, and hence Xen cannot discover or configure
* them in any way.
*
* Bypass of MSI remapping won't work in that case as direct
* write by Linux to the MSI entries won't result in functional
* interrupts, as Xen is the entity that manages the host
* interrupt controller and must configure interrupts. However
* multiplexing of interrupts by the VMD bridge will work under
* Xen, so force the usage of that mode which must always be
* supported by VMD bridges.
*/
features &= ~VMD_FEAT_CAN_BYPASS_MSI_REMAP;
}
if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
return -ENOMEM;

View file

@ -10,12 +10,12 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include "../pci.h"
#include "msi.h"
int pci_msi_enable = 1;
int pci_msi_ignore_mask;
/**
* pci_msi_supported - check whether MSI may be enabled on a device
@ -285,6 +285,8 @@ static void pci_msi_set_enable(struct pci_dev *dev, int enable)
static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
struct irq_affinity_desc *masks)
{
const struct irq_domain *d = dev_get_msi_domain(&dev->dev);
const struct msi_domain_info *info = d->host_data;
struct msi_desc desc;
u16 control;
@ -295,8 +297,7 @@ static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
/* Lies, damned lies, and MSIs */
if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
control |= PCI_MSI_FLAGS_MASKBIT;
/* Respect XEN's mask disabling */
if (pci_msi_ignore_mask)
if (info->flags & MSI_FLAG_NO_MASK)
control &= ~PCI_MSI_FLAGS_MASKBIT;
desc.nvec_used = nvec;
@ -603,12 +604,15 @@ static void __iomem *msix_map_region(struct pci_dev *dev,
*/
void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc)
{
const struct irq_domain *d = dev_get_msi_domain(&dev->dev);
const struct msi_domain_info *info = d->host_data;
desc->nvec_used = 1;
desc->pci.msi_attrib.is_msix = 1;
desc->pci.msi_attrib.is_64 = 1;
desc->pci.msi_attrib.default_irq = dev->irq;
desc->pci.mask_base = dev->msix_base;
desc->pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
desc->pci.msi_attrib.can_mask = !(info->flags & MSI_FLAG_NO_MASK) &&
!desc->pci.msi_attrib.is_virtual;
if (desc->pci.msi_attrib.can_mask) {
@ -658,9 +662,6 @@ static void msix_mask_all(void __iomem *base, int tsize)
u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
int i;
if (pci_msi_ignore_mask)
return;
for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
@ -714,6 +715,8 @@ static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries
static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
int nvec, struct irq_affinity *affd)
{
const struct irq_domain *d = dev_get_msi_domain(&dev->dev);
const struct msi_domain_info *info = d->host_data;
int ret, tsize;
u16 control;
@ -744,15 +747,17 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
/* Disable INTX */
pci_intx_for_msi(dev, 0);
/*
* Ensure that all table entries are masked to prevent
* stale entries from firing in a crash kernel.
*
* Done late to deal with a broken Marvell NVME device
* which takes the MSI-X mask bits into account even
* when MSI-X is disabled, which prevents MSI delivery.
*/
msix_mask_all(dev->msix_base, tsize);
if (!(info->flags & MSI_FLAG_NO_MASK)) {
/*
* Ensure that all table entries are masked to prevent
* stale entries from firing in a crash kernel.
*
* Done late to deal with a broken Marvell NVME device
* which takes the MSI-X mask bits into account even
* when MSI-X is disabled, which prevents MSI delivery.
*/
msix_mask_all(dev->msix_base, tsize);
}
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
pcibios_free_irq(dev);

View file

@ -43,6 +43,18 @@ static int xen_add_device(struct device *dev)
pci_mcfg_reserved = true;
}
#endif
if (pci_domain_nr(pci_dev->bus) >> 16) {
/*
* The hypercall interface is limited to 16bit PCI segment
* values, do not attempt to register devices with Xen in
* segments greater or equal than 0x10000.
*/
dev_info(dev,
"not registering with Xen: invalid PCI segment\n");
return 0;
}
if (pci_seg_supported) {
DEFINE_RAW_FLEX(struct physdev_pci_device_add, add, optarr, 1);
@ -149,6 +161,16 @@ static int xen_remove_device(struct device *dev)
int r;
struct pci_dev *pci_dev = to_pci_dev(dev);
if (pci_domain_nr(pci_dev->bus) >> 16) {
/*
* The hypercall interface is limited to 16bit PCI segment
* values.
*/
dev_info(dev,
"not unregistering with Xen: invalid PCI segment\n");
return 0;
}
if (pci_seg_supported) {
struct physdev_pci_device device = {
.seg = pci_domain_nr(pci_dev->bus),
@ -182,6 +204,16 @@ int xen_reset_device(const struct pci_dev *dev)
.flags = PCI_DEVICE_RESET_FLR,
};
if (pci_domain_nr(dev->bus) >> 16) {
/*
* The hypercall interface is limited to 16bit PCI segment
* values.
*/
dev_info(&dev->dev,
"unable to notify Xen of device reset: invalid PCI segment\n");
return 0;
}
return HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_reset, &device);
}
EXPORT_SYMBOL_GPL(xen_reset_device);

View file

@ -26,6 +26,8 @@
#define DRV_NAME "xen-platform-pci"
#define PCI_DEVICE_ID_XEN_PLATFORM_XS61 0x0002
static unsigned long platform_mmio;
static unsigned long platform_mmio_alloc;
static unsigned long platform_mmiolen;
@ -174,6 +176,8 @@ pci_out:
static const struct pci_device_id platform_pci_tbl[] = {
{PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM_XS61,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};

View file

@ -262,26 +262,6 @@ struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
return found_dev;
}
struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev)
{
struct pcistub_device *psdev;
struct pci_dev *found_dev = NULL;
unsigned long flags;
spin_lock_irqsave(&pcistub_devices_lock, flags);
list_for_each_entry(psdev, &pcistub_devices, dev_list) {
if (psdev->dev == dev) {
found_dev = pcistub_device_get_pci_dev(pdev, psdev);
break;
}
}
spin_unlock_irqrestore(&pcistub_devices_lock, flags);
return found_dev;
}
/*
* Called when:
* - XenBus state has been reconfigure (pci unplug). See xen_pcibk_remove_device

View file

@ -67,8 +67,6 @@ extern struct list_head xen_pcibk_quirks;
struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
int domain, int bus,
int slot, int func);
struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev);
void pcistub_put_pci_dev(struct pci_dev *dev);
static inline bool xen_pcibk_pv_support(void)

View file

@ -48,7 +48,7 @@ static int xensyms_next_sym(struct xensyms *xs)
return -ENOMEM;
set_xen_guest_handle(symdata->name, xs->name);
symdata->symnum--; /* Rewind */
symdata->symnum = symnum; /* Rewind */
ret = HYPERVISOR_platform_op(&xs->op);
if (ret < 0)
@ -78,7 +78,7 @@ static void *xensyms_next(struct seq_file *m, void *p, loff_t *pos)
{
struct xensyms *xs = m->private;
xs->op.u.symdata.symnum = ++(*pos);
*pos = xs->op.u.symdata.symnum;
if (xensyms_next_sym(xs))
return NULL;

View file

@ -73,7 +73,6 @@ struct msi_msg {
};
};
extern int pci_msi_ignore_mask;
/* Helper functions */
struct msi_desc;
struct pci_dev;
@ -558,6 +557,8 @@ enum {
MSI_FLAG_PCI_MSIX_ALLOC_DYN = (1 << 20),
/* PCI MSIs cannot be steered separately to CPU cores */
MSI_FLAG_NO_AFFINITY = (1 << 21),
/* Inhibit usage of entry masking */
MSI_FLAG_NO_MASK = (1 << 22),
};
/*

View file

@ -372,7 +372,7 @@ struct xen_mce {
#define XEN_MCE_LOG_LEN 32
struct xen_mce_log {
char signature[12]; /* "MACHINECHECK" */
char signature[12] __nonstring; /* "MACHINECHECK" */
unsigned len; /* = XEN_MCE_LOG_LEN */
unsigned next;
unsigned flags;

View file

@ -1144,7 +1144,7 @@ static bool msi_check_reservation_mode(struct irq_domain *domain,
if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
return false;
if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
if (info->flags & MSI_FLAG_NO_MASK)
return false;
/*