linux/drivers/iommu/intel/iommu.c

4795 lines
125 KiB
C
Raw Permalink Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2006-2014 Intel Corporation.
*
* Authors: David Woodhouse <dwmw2@infradead.org>,
* Ashok Raj <ashok.raj@intel.com>,
* Shaohua Li <shaohua.li@intel.com>,
* Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
* Fenghua Yu <fenghua.yu@intel.com>
* Joerg Roedel <jroedel@suse.de>
*/
#define pr_fmt(fmt) "DMAR: " fmt
#define dev_fmt(fmt) pr_fmt(fmt)
#include <linux/crash_dump.h>
#include <linux/dma-direct.h>
#include <linux/dmi.h>
#include <linux/memory.h>
#include <linux/pci.h>
#include <linux/pci-ats.h>
#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
#include <linux/tboot.h>
#include <uapi/linux/iommufd.h>
#include "iommu.h"
#include "../dma-iommu.h"
#include "../irq_remapping.h"
#include "../iommu-pages.h"
#include "pasid.h"
#include "perfmon.h"
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
#define IS_GFX_DEVICE(pdev) pci_is_display(pdev)
#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
#define IOAPIC_RANGE_START (0xfee00000)
#define IOAPIC_RANGE_END (0xfeefffff)
#define IOVA_START_ADDR (0x1000)
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1)
#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << (gaw)) - 1)
/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
to match. That way, we can use 'unsigned long' for PFNs with impunity. */
#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
__DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
#define rwbf_required(iommu) (rwbf_quirk || cap_rwbf((iommu)->cap))
/*
* set to 1 to panic kernel if can't successfully enable VT-d
* (used when kernel is launched w/ TXT)
*/
static int force_on = 0;
static int intel_iommu_tboot_noforce;
iommu/vt-d: Force IOMMU on for platform opt in hint Intel VT-d spec added a new DMA_CTRL_PLATFORM_OPT_IN_FLAG flag in DMAR ACPI table [1] for BIOS to report compliance about platform initiated DMA restricted to RMRR ranges when transferring control to the OS. This means that during OS boot, before it enables IOMMU none of the connected devices can bypass DMA protection for instance by overwriting the data structures used by the IOMMU. The OS also treats this as a hint that the IOMMU should be enabled to prevent DMA attacks from possible malicious devices. A use of this flag is Kernel DMA protection for Thunderbolt [2] which in practice means that IOMMU should be enabled for PCIe devices connected to the Thunderbolt ports. With IOMMU enabled for these devices, all DMA operations are limited in the range reserved for it, thus the DMA attacks are prevented. All these devices are enumerated in the PCI/PCIe module and marked with an untrusted flag. This forces IOMMU to be enabled if DMA_CTRL_PLATFORM_OPT_IN_FLAG is set in DMAR ACPI table and there are PCIe devices marked as untrusted in the system. This can be turned off by adding "intel_iommu=off" in the kernel command line, if any problems are found. [1] https://software.intel.com/sites/default/files/managed/c5/15/vt-directed-io-spec.pdf [2] https://docs.microsoft.com/en-us/windows/security/information-protection/kernel-dma-protection-for-thunderbolt Cc: Jacob Pan <jacob.jun.pan@linux.intel.com> Cc: Sohil Mehta <sohil.mehta@intel.com> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Reviewed-by: Ashok Raj <ashok.raj@intel.com> Reviewed-by: Joerg Roedel <jroedel@suse.de> Acked-by: Joerg Roedel <jroedel@suse.de>
2018-10-23 15:45:01 +08:00
static int no_platform_optin;
#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
/*
* Take a root_entry and return the Lower Context Table Pointer (LCTP)
* if marked present.
*/
static phys_addr_t root_entry_lctp(struct root_entry *re)
{
if (!(re->lo & 1))
return 0;
return re->lo & VTD_PAGE_MASK;
}
/*
* Take a root_entry and return the Upper Context Table Pointer (UCTP)
* if marked present.
*/
static phys_addr_t root_entry_uctp(struct root_entry *re)
{
if (!(re->hi & 1))
return 0;
return re->hi & VTD_PAGE_MASK;
}
static int device_rid_cmp_key(const void *key, const struct rb_node *node)
{
struct device_domain_info *info =
rb_entry(node, struct device_domain_info, node);
const u16 *rid_lhs = key;
if (*rid_lhs < PCI_DEVID(info->bus, info->devfn))
return -1;
if (*rid_lhs > PCI_DEVID(info->bus, info->devfn))
return 1;
return 0;
}
static int device_rid_cmp(struct rb_node *lhs, const struct rb_node *rhs)
{
struct device_domain_info *info =
rb_entry(lhs, struct device_domain_info, node);
u16 key = PCI_DEVID(info->bus, info->devfn);
return device_rid_cmp_key(&key, rhs);
}
/*
* Looks up an IOMMU-probed device using its source ID.
*
* Returns the pointer to the device if there is a match. Otherwise,
* returns NULL.
*
* Note that this helper doesn't guarantee that the device won't be
* released by the iommu subsystem after being returned. The caller
* should use its own synchronization mechanism to avoid the device
* being released during its use if its possibly the case.
*/
struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid)
{
struct device_domain_info *info = NULL;
struct rb_node *node;
unsigned long flags;
spin_lock_irqsave(&iommu->device_rbtree_lock, flags);
node = rb_find(&rid, &iommu->device_rbtree, device_rid_cmp_key);
if (node)
info = rb_entry(node, struct device_domain_info, node);
spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags);
return info ? info->dev : NULL;
}
static int device_rbtree_insert(struct intel_iommu *iommu,
struct device_domain_info *info)
{
struct rb_node *curr;
unsigned long flags;
spin_lock_irqsave(&iommu->device_rbtree_lock, flags);
curr = rb_find_add(&info->node, &iommu->device_rbtree, device_rid_cmp);
spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags);
if (WARN_ON(curr))
return -EEXIST;
return 0;
}
static void device_rbtree_remove(struct device_domain_info *info)
{
struct intel_iommu *iommu = info->iommu;
unsigned long flags;
spin_lock_irqsave(&iommu->device_rbtree_lock, flags);
rb_erase(&info->node, &iommu->device_rbtree);
spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags);
}
struct dmar_rmrr_unit {
struct list_head list; /* list of rmrr units */
struct acpi_dmar_header *hdr; /* ACPI header */
u64 base_address; /* reserved base address*/
u64 end_address; /* reserved end address */
struct dmar_dev_scope *devices; /* target devices */
int devices_cnt; /* target device count */
};
struct dmar_atsr_unit {
struct list_head list; /* list of ATSR units */
struct acpi_dmar_header *hdr; /* ACPI header */
struct dmar_dev_scope *devices; /* target devices */
int devices_cnt; /* target device count */
u8 include_all:1; /* include all ports */
};
struct dmar_satc_unit {
struct list_head list; /* list of SATC units */
struct acpi_dmar_header *hdr; /* ACPI header */
struct dmar_dev_scope *devices; /* target devices */
struct intel_iommu *iommu; /* the corresponding iommu */
int devices_cnt; /* target device count */
u8 atc_required:1; /* ATS is required */
};
static LIST_HEAD(dmar_atsr_units);
static LIST_HEAD(dmar_rmrr_units);
static LIST_HEAD(dmar_satc_units);
#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
static void intel_iommu_domain_free(struct iommu_domain *domain);
int dmar_disabled = !IS_ENABLED(CONFIG_INTEL_IOMMU_DEFAULT_ON);
int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON);
int intel_iommu_enabled = 0;
EXPORT_SYMBOL_GPL(intel_iommu_enabled);
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
static int intel_iommu_superpage = 1;
static int iommu_identity_mapping;
static int iommu_skip_te_disable;
iommu/vt-d: Decouple igfx_off from graphic identity mapping A kernel command called igfx_off was introduced in commit <ba39592764ed> ("Intel IOMMU: Intel IOMMU driver"). This command allows the user to disable the IOMMU dedicated to SOC-integrated graphic devices. Commit <9452618e7462> ("iommu/intel: disable DMAR for g4x integrated gfx") used this mechanism to disable the graphic-dedicated IOMMU for some problematic devices. Later, more problematic graphic devices were added to the list by commit <1f76249cc3beb> ("iommu/vt-d: Declare Broadwell igfx dmar support snafu"). On the other hand, commit <19943b0e30b05> ("intel-iommu: Unify hardware and software passthrough support") uses the identity domain for graphic devices if CONFIG_DMAR_BROKEN_GFX_WA is selected. + if (iommu_pass_through) + iommu_identity_mapping = 1; +#ifdef CONFIG_DMAR_BROKEN_GFX_WA + else + iommu_identity_mapping = 2; +#endif ... static int iommu_should_identity_map(struct pci_dev *pdev, int startup) { + if (iommu_identity_mapping == 2) + return IS_GFX_DEVICE(pdev); ... In the following driver evolution, CONFIG_DMAR_BROKEN_GFX_WA and quirk_iommu_igfx() are mixed together, causing confusion in the driver's device_def_domain_type callback. On one hand, dmar_map_gfx is used to turn off the graphic-dedicated IOMMU as a workaround for some buggy hardware; on the other hand, for those graphic devices, IDENTITY mapping is required for the IOMMU core. Commit <4b8d18c0c986> "iommu/vt-d: Remove INTEL_IOMMU_BROKEN_GFX_WA" has removed the CONFIG_DMAR_BROKEN_GFX_WA option, so the IDENTITY_DOMAIN requirement for graphic devices is no longer needed. Therefore, this requirement can be removed from device_def_domain_type() and igfx_off can be made independent. Fixes: 4b8d18c0c986 ("iommu/vt-d: Remove INTEL_IOMMU_BROKEN_GFX_WA") Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/20240428032020.214616-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2024-05-03 21:36:02 +08:00
static int disable_igfx_iommu;
iommu/vt-d: Only enable extended context tables if PASID is supported Although the extended tables are theoretically a completely orthogonal feature to PASID and anything else that *uses* the newly-available bits, some of the early hardware has problems even when all we do is enable them and use only the same bits that were in the old context tables. For now, there's no motivation to support extended tables unless we're going to use PASID support to do SVM. So just don't use them unless PASID support is advertised too. Also add a command-line bailout just in case later chips also have issues. The equivalent problem for PASID support has already been fixed with the upcoming VT-d spec update and commit bd00c606a ("iommu/vt-d: Change PASID support to bit 40 of Extended Capability Register"), because the problematic platforms use the old definition of the PASID-capable bit, which is now marked as reserved and meaningless. So with this change, we'll magically start using ECS again only when we see the new hardware advertising "hey, we have PASID support and we actually tested it this time" on bit 40. The VT-d hardware architect has promised that we are not going to have any reason to support ECS *without* PASID any time soon, and he'll make sure he checks with us before changing that. In the future, if hypothetical new features also use new bits in the context tables and can be seen on implementations *without* PASID support, we might need to add their feature bits to the ecs_enabled() macro. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2015-06-12 10:15:49 +01:00
#define IDENTMAP_AZALIA 4
iommu/vt-d: Only enable extended context tables if PASID is supported Although the extended tables are theoretically a completely orthogonal feature to PASID and anything else that *uses* the newly-available bits, some of the early hardware has problems even when all we do is enable them and use only the same bits that were in the old context tables. For now, there's no motivation to support extended tables unless we're going to use PASID support to do SVM. So just don't use them unless PASID support is advertised too. Also add a command-line bailout just in case later chips also have issues. The equivalent problem for PASID support has already been fixed with the upcoming VT-d spec update and commit bd00c606a ("iommu/vt-d: Change PASID support to bit 40 of Extended Capability Register"), because the problematic platforms use the old definition of the PASID-capable bit, which is now marked as reserved and meaningless. So with this change, we'll magically start using ECS again only when we see the new hardware advertising "hey, we have PASID support and we actually tested it this time" on bit 40. The VT-d hardware architect has promised that we are not going to have any reason to support ECS *without* PASID any time soon, and he'll make sure he checks with us before changing that. In the future, if hypothetical new features also use new bits in the context tables and can be seen on implementations *without* PASID support, we might need to add their feature bits to the ecs_enabled() macro. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2015-06-12 10:15:49 +01:00
const struct iommu_ops intel_iommu_ops;
static const struct iommu_dirty_ops intel_dirty_ops;
static bool translation_pre_enabled(struct intel_iommu *iommu)
{
return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
}
static void clear_translation_pre_enabled(struct intel_iommu *iommu)
{
iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
}
static void init_translation_status(struct intel_iommu *iommu)
{
u32 gsts;
gsts = readl(iommu->reg + DMAR_GSTS_REG);
if (gsts & DMA_GSTS_TES)
iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
}
static int __init intel_iommu_setup(char *str)
{
if (!str)
return -EINVAL;
while (*str) {
if (!strncmp(str, "on", 2)) {
dmar_disabled = 0;
pr_info("IOMMU enabled\n");
} else if (!strncmp(str, "off", 3)) {
dmar_disabled = 1;
iommu/vt-d: Force IOMMU on for platform opt in hint Intel VT-d spec added a new DMA_CTRL_PLATFORM_OPT_IN_FLAG flag in DMAR ACPI table [1] for BIOS to report compliance about platform initiated DMA restricted to RMRR ranges when transferring control to the OS. This means that during OS boot, before it enables IOMMU none of the connected devices can bypass DMA protection for instance by overwriting the data structures used by the IOMMU. The OS also treats this as a hint that the IOMMU should be enabled to prevent DMA attacks from possible malicious devices. A use of this flag is Kernel DMA protection for Thunderbolt [2] which in practice means that IOMMU should be enabled for PCIe devices connected to the Thunderbolt ports. With IOMMU enabled for these devices, all DMA operations are limited in the range reserved for it, thus the DMA attacks are prevented. All these devices are enumerated in the PCI/PCIe module and marked with an untrusted flag. This forces IOMMU to be enabled if DMA_CTRL_PLATFORM_OPT_IN_FLAG is set in DMAR ACPI table and there are PCIe devices marked as untrusted in the system. This can be turned off by adding "intel_iommu=off" in the kernel command line, if any problems are found. [1] https://software.intel.com/sites/default/files/managed/c5/15/vt-directed-io-spec.pdf [2] https://docs.microsoft.com/en-us/windows/security/information-protection/kernel-dma-protection-for-thunderbolt Cc: Jacob Pan <jacob.jun.pan@linux.intel.com> Cc: Sohil Mehta <sohil.mehta@intel.com> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Reviewed-by: Ashok Raj <ashok.raj@intel.com> Reviewed-by: Joerg Roedel <jroedel@suse.de> Acked-by: Joerg Roedel <jroedel@suse.de>
2018-10-23 15:45:01 +08:00
no_platform_optin = 1;
pr_info("IOMMU disabled\n");
} else if (!strncmp(str, "igfx_off", 8)) {
iommu/vt-d: Decouple igfx_off from graphic identity mapping A kernel command called igfx_off was introduced in commit <ba39592764ed> ("Intel IOMMU: Intel IOMMU driver"). This command allows the user to disable the IOMMU dedicated to SOC-integrated graphic devices. Commit <9452618e7462> ("iommu/intel: disable DMAR for g4x integrated gfx") used this mechanism to disable the graphic-dedicated IOMMU for some problematic devices. Later, more problematic graphic devices were added to the list by commit <1f76249cc3beb> ("iommu/vt-d: Declare Broadwell igfx dmar support snafu"). On the other hand, commit <19943b0e30b05> ("intel-iommu: Unify hardware and software passthrough support") uses the identity domain for graphic devices if CONFIG_DMAR_BROKEN_GFX_WA is selected. + if (iommu_pass_through) + iommu_identity_mapping = 1; +#ifdef CONFIG_DMAR_BROKEN_GFX_WA + else + iommu_identity_mapping = 2; +#endif ... static int iommu_should_identity_map(struct pci_dev *pdev, int startup) { + if (iommu_identity_mapping == 2) + return IS_GFX_DEVICE(pdev); ... In the following driver evolution, CONFIG_DMAR_BROKEN_GFX_WA and quirk_iommu_igfx() are mixed together, causing confusion in the driver's device_def_domain_type callback. On one hand, dmar_map_gfx is used to turn off the graphic-dedicated IOMMU as a workaround for some buggy hardware; on the other hand, for those graphic devices, IDENTITY mapping is required for the IOMMU core. Commit <4b8d18c0c986> "iommu/vt-d: Remove INTEL_IOMMU_BROKEN_GFX_WA" has removed the CONFIG_DMAR_BROKEN_GFX_WA option, so the IDENTITY_DOMAIN requirement for graphic devices is no longer needed. Therefore, this requirement can be removed from device_def_domain_type() and igfx_off can be made independent. Fixes: 4b8d18c0c986 ("iommu/vt-d: Remove INTEL_IOMMU_BROKEN_GFX_WA") Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/20240428032020.214616-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2024-05-03 21:36:02 +08:00
disable_igfx_iommu = 1;
pr_info("Disable GFX device mapping\n");
} else if (!strncmp(str, "forcedac", 8)) {
pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
iommu_dma_forcedac = true;
} else if (!strncmp(str, "strict", 6)) {
pr_warn("intel_iommu=strict deprecated; use iommu.strict=1 instead\n");
iommu_set_dma_strict();
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
} else if (!strncmp(str, "sp_off", 6)) {
pr_info("Disable supported super page\n");
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
intel_iommu_superpage = 0;
} else if (!strncmp(str, "sm_on", 5)) {
pr_info("Enable scalable mode if hardware supports\n");
intel_iommu_sm = 1;
} else if (!strncmp(str, "sm_off", 6)) {
pr_info("Scalable mode is disallowed\n");
intel_iommu_sm = 0;
} else if (!strncmp(str, "tboot_noforce", 13)) {
pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
intel_iommu_tboot_noforce = 1;
} else {
pr_notice("Unknown option - '%s'\n", str);
}
str += strcspn(str, ",");
while (*str == ',')
str++;
}
return 1;
}
__setup("intel_iommu=", intel_iommu_setup);
static int domain_pfn_supported(struct dmar_domain *domain, unsigned long pfn)
{
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
}
/*
* Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
* Refer to 11.4.2 of the VT-d spec for the encoding of each bit of
* the returned SAGAW.
*/
static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
{
unsigned long fl_sagaw, sl_sagaw;
fl_sagaw = BIT(2) | (cap_fl5lp_support(iommu->cap) ? BIT(3) : 0);
sl_sagaw = cap_sagaw(iommu->cap);
/* Second level only. */
if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
return sl_sagaw;
/* First level only. */
if (!ecap_slts(iommu->ecap))
return fl_sagaw;
return fl_sagaw & sl_sagaw;
}
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{
unsigned long sagaw;
int agaw;
sagaw = __iommu_calculate_sagaw(iommu);
for (agaw = width_to_agaw(max_gaw); agaw >= 0; agaw--) {
if (test_bit(agaw, &sagaw))
break;
}
return agaw;
}
/*
* Calculate max SAGAW for each iommu.
*/
int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
{
return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
}
/*
* calculate agaw for each iommu.
* "SAGAW" may be different across iommus, use a default agaw, and
* get a supported less agaw for iommus that don't support the default agaw.
*/
int iommu_calculate_agaw(struct intel_iommu *iommu)
{
return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
}
static bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
{
return sm_supported(iommu) ?
ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
}
/* Return the super pagesize bitmap if supported. */
static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain)
{
unsigned long bitmap = 0;
/*
* 1-level super page supports page size of 2MiB, 2-level super page
* supports page size of both 2MiB and 1GiB.
*/
if (domain->iommu_superpage == 1)
bitmap |= SZ_2M;
else if (domain->iommu_superpage == 2)
bitmap |= SZ_2M | SZ_1G;
return bitmap;
}
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
u8 devfn, int alloc)
{
struct root_entry *root = &iommu->root_entry[bus];
struct context_entry *context;
u64 *entry;
/*
* Except that the caller requested to allocate a new entry,
* returning a copied context entry makes no sense.
*/
if (!alloc && context_copied(iommu, bus, devfn))
return NULL;
entry = &root->lo;
if (sm_supported(iommu)) {
if (devfn >= 0x80) {
devfn -= 0x80;
entry = &root->hi;
}
devfn *= 2;
}
if (*entry & 1)
context = phys_to_virt(*entry & VTD_PAGE_MASK);
else {
unsigned long phy_addr;
if (!alloc)
return NULL;
context = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC,
SZ_4K);
if (!context)
return NULL;
__iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
phy_addr = virt_to_phys((void *)context);
*entry = phy_addr | 1;
__iommu_flush_cache(iommu, entry, sizeof(*entry));
}
return &context[devfn];
}
/**
* is_downstream_to_pci_bridge - test if a device belongs to the PCI
* sub-hierarchy of a candidate PCI-PCI bridge
* @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
* @bridge: the candidate PCI-PCI bridge
*
* Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
*/
static bool
is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
{
struct pci_dev *pdev, *pbridge;
if (!dev_is_pci(dev) || !dev_is_pci(bridge))
return false;
pdev = to_pci_dev(dev);
pbridge = to_pci_dev(bridge);
if (pbridge->subordinate &&
pbridge->subordinate->number <= pdev->bus->number &&
pbridge->subordinate->busn_res.end >= pdev->bus->number)
return true;
return false;
}
iommu/vt-d: Fix NULL pointer dereference in dev_iommu_priv_set() The dev_iommu_priv_set() must be called after probe_device(). This fixes a NULL pointer deference bug when booting a system with kernel cmdline "intel_iommu=on,igfx_off", where the dev_iommu_priv_set() is abused. The following stacktrace was produced: Command line: BOOT_IMAGE=/isolinux/bzImage console=tty1 intel_iommu=on,igfx_off ... DMAR: Host address width 39 DMAR: DRHD base: 0x000000fed90000 flags: 0x0 DMAR: dmar0: reg_base_addr fed90000 ver 1:0 cap 1c0000c40660462 ecap 19e2ff0505e DMAR: DRHD base: 0x000000fed91000 flags: 0x1 DMAR: dmar1: reg_base_addr fed91000 ver 1:0 cap d2008c40660462 ecap f050da DMAR: RMRR base: 0x0000009aa9f000 end: 0x0000009aabefff DMAR: RMRR base: 0x0000009d000000 end: 0x0000009f7fffff DMAR: No ATSR found BUG: kernel NULL pointer dereference, address: 0000000000000038 #PF: supervisor write access in kernel mode #PF: error_code(0x0002) - not-present page PGD 0 P4D 0 Oops: 0002 [#1] SMP PTI CPU: 1 PID: 1 Comm: swapper/0 Not tainted 5.9.0-devel+ #2 Hardware name: LENOVO 20HGS0TW00/20HGS0TW00, BIOS N1WET46S (1.25s ) 03/30/2018 RIP: 0010:intel_iommu_init+0xed0/0x1136 Code: fe e9 61 02 00 00 bb f4 ff ff ff e9 57 02 00 00 48 63 d1 48 c1 e2 04 48 03 50 20 48 8b 12 48 85 d2 74 0b 48 8b 92 d0 02 00 00 48 89 7a 38 ff c1 e9 15 f5 ff ff 48 c7 c7 60 99 ac a7 49 c7 c7 a0 RSP: 0000:ffff96d180073dd0 EFLAGS: 00010282 RAX: ffff8c91037a7d20 RBX: 0000000000000000 RCX: 0000000000000000 RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffffffffffffffff RBP: ffff96d180073e90 R08: 0000000000000001 R09: ffff8c91039fe3c0 R10: 0000000000000226 R11: 0000000000000226 R12: 000000000000000b R13: ffff8c910367c650 R14: ffffffffa8426d60 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff8c9107480000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000038 CR3: 00000004b100a001 CR4: 00000000003706e0 Call Trace: ? _raw_spin_unlock_irqrestore+0x1f/0x30 ? call_rcu+0x10e/0x320 ? trace_hardirqs_on+0x2c/0xd0 ? rdinit_setup+0x2c/0x2c ? e820__memblock_setup+0x8b/0x8b pci_iommu_init+0x16/0x3f do_one_initcall+0x46/0x1e4 kernel_init_freeable+0x169/0x1b2 ? rest_init+0x9f/0x9f kernel_init+0xa/0x101 ret_from_fork+0x22/0x30 Modules linked in: CR2: 0000000000000038 ---[ end trace 3653722a6f936f18 ]--- Fixes: 01b9d4e21148c ("iommu/vt-d: Use dev_iommu_priv_get/set()") Reported-by: Torsten Hilbrich <torsten.hilbrich@secunet.com> Reported-by: Wendy Wang <wendy.wang@intel.com> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Tested-by: Torsten Hilbrich <torsten.hilbrich@secunet.com> Link: https://lore.kernel.org/linux-iommu/96717683-70be-7388-3d2f-61131070a96a@secunet.com/ Link: https://lore.kernel.org/r/20200903065132.16879-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2020-09-03 14:51:32 +08:00
static bool quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
{
struct dmar_drhd_unit *drhd;
u32 vtbar;
int rc;
/* We know that this device on this chipset has its own IOMMU.
* If we find it under a different IOMMU, then the BIOS is lying
* to us. Hope that the IOMMU for this device is actually
* disabled, and it needs no translation...
*/
rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
if (rc) {
/* "can't" happen */
dev_info(&pdev->dev, "failed to run vt-d quirk\n");
return false;
}
vtbar &= 0xffff0000;
/* we know that the this iommu should be at offset 0xa000 from vtbar */
drhd = dmar_find_matched_drhd_unit(pdev);
if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
return true;
}
return false;
}
static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
{
if (!iommu || iommu->drhd->ignored)
return true;
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SNB &&
quirk_ioat_snb_local_iommu(pdev))
return true;
}
return false;
}
static struct intel_iommu *device_lookup_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
struct pci_dev *pdev = NULL;
struct intel_iommu *iommu;
struct device *tmp;
u16 segment = 0;
int i;
iommu/vt-d: Fix NULL pointer dereference in dev_iommu_priv_set() The dev_iommu_priv_set() must be called after probe_device(). This fixes a NULL pointer deference bug when booting a system with kernel cmdline "intel_iommu=on,igfx_off", where the dev_iommu_priv_set() is abused. The following stacktrace was produced: Command line: BOOT_IMAGE=/isolinux/bzImage console=tty1 intel_iommu=on,igfx_off ... DMAR: Host address width 39 DMAR: DRHD base: 0x000000fed90000 flags: 0x0 DMAR: dmar0: reg_base_addr fed90000 ver 1:0 cap 1c0000c40660462 ecap 19e2ff0505e DMAR: DRHD base: 0x000000fed91000 flags: 0x1 DMAR: dmar1: reg_base_addr fed91000 ver 1:0 cap d2008c40660462 ecap f050da DMAR: RMRR base: 0x0000009aa9f000 end: 0x0000009aabefff DMAR: RMRR base: 0x0000009d000000 end: 0x0000009f7fffff DMAR: No ATSR found BUG: kernel NULL pointer dereference, address: 0000000000000038 #PF: supervisor write access in kernel mode #PF: error_code(0x0002) - not-present page PGD 0 P4D 0 Oops: 0002 [#1] SMP PTI CPU: 1 PID: 1 Comm: swapper/0 Not tainted 5.9.0-devel+ #2 Hardware name: LENOVO 20HGS0TW00/20HGS0TW00, BIOS N1WET46S (1.25s ) 03/30/2018 RIP: 0010:intel_iommu_init+0xed0/0x1136 Code: fe e9 61 02 00 00 bb f4 ff ff ff e9 57 02 00 00 48 63 d1 48 c1 e2 04 48 03 50 20 48 8b 12 48 85 d2 74 0b 48 8b 92 d0 02 00 00 48 89 7a 38 ff c1 e9 15 f5 ff ff 48 c7 c7 60 99 ac a7 49 c7 c7 a0 RSP: 0000:ffff96d180073dd0 EFLAGS: 00010282 RAX: ffff8c91037a7d20 RBX: 0000000000000000 RCX: 0000000000000000 RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffffffffffffffff RBP: ffff96d180073e90 R08: 0000000000000001 R09: ffff8c91039fe3c0 R10: 0000000000000226 R11: 0000000000000226 R12: 000000000000000b R13: ffff8c910367c650 R14: ffffffffa8426d60 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff8c9107480000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000038 CR3: 00000004b100a001 CR4: 00000000003706e0 Call Trace: ? _raw_spin_unlock_irqrestore+0x1f/0x30 ? call_rcu+0x10e/0x320 ? trace_hardirqs_on+0x2c/0xd0 ? rdinit_setup+0x2c/0x2c ? e820__memblock_setup+0x8b/0x8b pci_iommu_init+0x16/0x3f do_one_initcall+0x46/0x1e4 kernel_init_freeable+0x169/0x1b2 ? rest_init+0x9f/0x9f kernel_init+0xa/0x101 ret_from_fork+0x22/0x30 Modules linked in: CR2: 0000000000000038 ---[ end trace 3653722a6f936f18 ]--- Fixes: 01b9d4e21148c ("iommu/vt-d: Use dev_iommu_priv_get/set()") Reported-by: Torsten Hilbrich <torsten.hilbrich@secunet.com> Reported-by: Wendy Wang <wendy.wang@intel.com> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Tested-by: Torsten Hilbrich <torsten.hilbrich@secunet.com> Link: https://lore.kernel.org/linux-iommu/96717683-70be-7388-3d2f-61131070a96a@secunet.com/ Link: https://lore.kernel.org/r/20200903065132.16879-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2020-09-03 14:51:32 +08:00
if (!dev)
return NULL;
if (dev_is_pci(dev)) {
struct pci_dev *pf_pdev;
pdev = pci_real_dma_dev(to_pci_dev(dev));
/* VFs aren't listed in scope tables; we need to look up
* the PF instead to find the IOMMU. */
pf_pdev = pci_physfn(pdev);
dev = &pf_pdev->dev;
segment = pci_domain_nr(pdev->bus);
} else if (has_acpi_companion(dev))
dev = &ACPI_COMPANION(dev)->dev;
rcu_read_lock();
iommu/vt-d: Fix NULL pointer dereference in dev_iommu_priv_set() The dev_iommu_priv_set() must be called after probe_device(). This fixes a NULL pointer deference bug when booting a system with kernel cmdline "intel_iommu=on,igfx_off", where the dev_iommu_priv_set() is abused. The following stacktrace was produced: Command line: BOOT_IMAGE=/isolinux/bzImage console=tty1 intel_iommu=on,igfx_off ... DMAR: Host address width 39 DMAR: DRHD base: 0x000000fed90000 flags: 0x0 DMAR: dmar0: reg_base_addr fed90000 ver 1:0 cap 1c0000c40660462 ecap 19e2ff0505e DMAR: DRHD base: 0x000000fed91000 flags: 0x1 DMAR: dmar1: reg_base_addr fed91000 ver 1:0 cap d2008c40660462 ecap f050da DMAR: RMRR base: 0x0000009aa9f000 end: 0x0000009aabefff DMAR: RMRR base: 0x0000009d000000 end: 0x0000009f7fffff DMAR: No ATSR found BUG: kernel NULL pointer dereference, address: 0000000000000038 #PF: supervisor write access in kernel mode #PF: error_code(0x0002) - not-present page PGD 0 P4D 0 Oops: 0002 [#1] SMP PTI CPU: 1 PID: 1 Comm: swapper/0 Not tainted 5.9.0-devel+ #2 Hardware name: LENOVO 20HGS0TW00/20HGS0TW00, BIOS N1WET46S (1.25s ) 03/30/2018 RIP: 0010:intel_iommu_init+0xed0/0x1136 Code: fe e9 61 02 00 00 bb f4 ff ff ff e9 57 02 00 00 48 63 d1 48 c1 e2 04 48 03 50 20 48 8b 12 48 85 d2 74 0b 48 8b 92 d0 02 00 00 48 89 7a 38 ff c1 e9 15 f5 ff ff 48 c7 c7 60 99 ac a7 49 c7 c7 a0 RSP: 0000:ffff96d180073dd0 EFLAGS: 00010282 RAX: ffff8c91037a7d20 RBX: 0000000000000000 RCX: 0000000000000000 RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffffffffffffffff RBP: ffff96d180073e90 R08: 0000000000000001 R09: ffff8c91039fe3c0 R10: 0000000000000226 R11: 0000000000000226 R12: 000000000000000b R13: ffff8c910367c650 R14: ffffffffa8426d60 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff8c9107480000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000038 CR3: 00000004b100a001 CR4: 00000000003706e0 Call Trace: ? _raw_spin_unlock_irqrestore+0x1f/0x30 ? call_rcu+0x10e/0x320 ? trace_hardirqs_on+0x2c/0xd0 ? rdinit_setup+0x2c/0x2c ? e820__memblock_setup+0x8b/0x8b pci_iommu_init+0x16/0x3f do_one_initcall+0x46/0x1e4 kernel_init_freeable+0x169/0x1b2 ? rest_init+0x9f/0x9f kernel_init+0xa/0x101 ret_from_fork+0x22/0x30 Modules linked in: CR2: 0000000000000038 ---[ end trace 3653722a6f936f18 ]--- Fixes: 01b9d4e21148c ("iommu/vt-d: Use dev_iommu_priv_get/set()") Reported-by: Torsten Hilbrich <torsten.hilbrich@secunet.com> Reported-by: Wendy Wang <wendy.wang@intel.com> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Tested-by: Torsten Hilbrich <torsten.hilbrich@secunet.com> Link: https://lore.kernel.org/linux-iommu/96717683-70be-7388-3d2f-61131070a96a@secunet.com/ Link: https://lore.kernel.org/r/20200903065132.16879-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2020-09-03 14:51:32 +08:00
for_each_iommu(iommu, drhd) {
if (pdev && segment != drhd->segment)
continue;
for_each_active_dev_scope(drhd->devices,
drhd->devices_cnt, i, tmp) {
if (tmp == dev) {
/* For a VF use its original BDF# not that of the PF
* which we used for the IOMMU lookup. Strictly speaking
* we could do this for all PCI devices; we only need to
* get the BDF# from the scope table for ACPI matches. */
iommu/vt-d: Fix NULL pointer dereference in device_to_iommu The function device_to_iommu() in the Intel VT-d driver lacks a NULL-ptr check, resulting in this oops at boot on some platforms: BUG: unable to handle kernel NULL pointer dereference at 00000000000007ab IP: [<ffffffff8132234a>] device_to_iommu+0x11a/0x1a0 PGD 0 [...] Call Trace: ? find_or_alloc_domain.constprop.29+0x1a/0x300 ? dw_dma_probe+0x561/0x580 [dw_dmac_core] ? __get_valid_domain_for_dev+0x39/0x120 ? __intel_map_single+0x138/0x180 ? intel_alloc_coherent+0xb6/0x120 ? sst_hsw_dsp_init+0x173/0x420 [snd_soc_sst_haswell_pcm] ? mutex_lock+0x9/0x30 ? kernfs_add_one+0xdb/0x130 ? devres_add+0x19/0x60 ? hsw_pcm_dev_probe+0x46/0xd0 [snd_soc_sst_haswell_pcm] ? platform_drv_probe+0x30/0x90 ? driver_probe_device+0x1ed/0x2b0 ? __driver_attach+0x8f/0xa0 ? driver_probe_device+0x2b0/0x2b0 ? bus_for_each_dev+0x55/0x90 ? bus_add_driver+0x110/0x210 ? 0xffffffffa11ea000 ? driver_register+0x52/0xc0 ? 0xffffffffa11ea000 ? do_one_initcall+0x32/0x130 ? free_vmap_area_noflush+0x37/0x70 ? kmem_cache_alloc+0x88/0xd0 ? do_init_module+0x51/0x1c4 ? load_module+0x1ee9/0x2430 ? show_taint+0x20/0x20 ? kernel_read_file+0xfd/0x190 ? SyS_finit_module+0xa3/0xb0 ? do_syscall_64+0x4a/0xb0 ? entry_SYSCALL64_slow_path+0x25/0x25 Code: 78 ff ff ff 4d 85 c0 74 ee 49 8b 5a 10 0f b6 9b e0 00 00 00 41 38 98 e0 00 00 00 77 da 0f b6 eb 49 39 a8 88 00 00 00 72 ce eb 8f <41> f6 82 ab 07 00 00 04 0f 85 76 ff ff ff 0f b6 4d 08 88 0e 49 RIP [<ffffffff8132234a>] device_to_iommu+0x11a/0x1a0 RSP <ffffc90001457a78> CR2: 00000000000007ab ---[ end trace 16f974b6d58d0aad ]--- Add the missing pointer check. Fixes: 1c387188c60f53b338c20eee32db055dfe022a9b ("iommu/vt-d: Fix IOMMU lookup for SR-IOV Virtual Functions") Signed-off-by: Koos Vriezen <koos.vriezen@gmail.com> Cc: stable@vger.kernel.org # 4.8.15+ Signed-off-by: Joerg Roedel <jroedel@suse.de>
2017-03-01 21:02:50 +01:00
if (pdev && pdev->is_virtfn)
goto got_pdev;
if (bus && devfn) {
*bus = drhd->devices[i].bus;
*devfn = drhd->devices[i].devfn;
}
goto out;
}
if (is_downstream_to_pci_bridge(dev, tmp))
goto got_pdev;
}
if (pdev && drhd->include_all) {
got_pdev:
if (bus && devfn) {
*bus = pdev->bus->number;
*devfn = pdev->devfn;
}
goto out;
}
}
iommu = NULL;
out:
iommu/vt-d: Fix NULL pointer dereference in dev_iommu_priv_set() The dev_iommu_priv_set() must be called after probe_device(). This fixes a NULL pointer deference bug when booting a system with kernel cmdline "intel_iommu=on,igfx_off", where the dev_iommu_priv_set() is abused. The following stacktrace was produced: Command line: BOOT_IMAGE=/isolinux/bzImage console=tty1 intel_iommu=on,igfx_off ... DMAR: Host address width 39 DMAR: DRHD base: 0x000000fed90000 flags: 0x0 DMAR: dmar0: reg_base_addr fed90000 ver 1:0 cap 1c0000c40660462 ecap 19e2ff0505e DMAR: DRHD base: 0x000000fed91000 flags: 0x1 DMAR: dmar1: reg_base_addr fed91000 ver 1:0 cap d2008c40660462 ecap f050da DMAR: RMRR base: 0x0000009aa9f000 end: 0x0000009aabefff DMAR: RMRR base: 0x0000009d000000 end: 0x0000009f7fffff DMAR: No ATSR found BUG: kernel NULL pointer dereference, address: 0000000000000038 #PF: supervisor write access in kernel mode #PF: error_code(0x0002) - not-present page PGD 0 P4D 0 Oops: 0002 [#1] SMP PTI CPU: 1 PID: 1 Comm: swapper/0 Not tainted 5.9.0-devel+ #2 Hardware name: LENOVO 20HGS0TW00/20HGS0TW00, BIOS N1WET46S (1.25s ) 03/30/2018 RIP: 0010:intel_iommu_init+0xed0/0x1136 Code: fe e9 61 02 00 00 bb f4 ff ff ff e9 57 02 00 00 48 63 d1 48 c1 e2 04 48 03 50 20 48 8b 12 48 85 d2 74 0b 48 8b 92 d0 02 00 00 48 89 7a 38 ff c1 e9 15 f5 ff ff 48 c7 c7 60 99 ac a7 49 c7 c7 a0 RSP: 0000:ffff96d180073dd0 EFLAGS: 00010282 RAX: ffff8c91037a7d20 RBX: 0000000000000000 RCX: 0000000000000000 RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffffffffffffffff RBP: ffff96d180073e90 R08: 0000000000000001 R09: ffff8c91039fe3c0 R10: 0000000000000226 R11: 0000000000000226 R12: 000000000000000b R13: ffff8c910367c650 R14: ffffffffa8426d60 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff8c9107480000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000038 CR3: 00000004b100a001 CR4: 00000000003706e0 Call Trace: ? _raw_spin_unlock_irqrestore+0x1f/0x30 ? call_rcu+0x10e/0x320 ? trace_hardirqs_on+0x2c/0xd0 ? rdinit_setup+0x2c/0x2c ? e820__memblock_setup+0x8b/0x8b pci_iommu_init+0x16/0x3f do_one_initcall+0x46/0x1e4 kernel_init_freeable+0x169/0x1b2 ? rest_init+0x9f/0x9f kernel_init+0xa/0x101 ret_from_fork+0x22/0x30 Modules linked in: CR2: 0000000000000038 ---[ end trace 3653722a6f936f18 ]--- Fixes: 01b9d4e21148c ("iommu/vt-d: Use dev_iommu_priv_get/set()") Reported-by: Torsten Hilbrich <torsten.hilbrich@secunet.com> Reported-by: Wendy Wang <wendy.wang@intel.com> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Tested-by: Torsten Hilbrich <torsten.hilbrich@secunet.com> Link: https://lore.kernel.org/linux-iommu/96717683-70be-7388-3d2f-61131070a96a@secunet.com/ Link: https://lore.kernel.org/r/20200903065132.16879-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2020-09-03 14:51:32 +08:00
if (iommu_is_dummy(iommu, dev))
iommu = NULL;
rcu_read_unlock();
return iommu;
}
static void domain_flush_cache(struct dmar_domain *domain,
void *addr, int size)
{
if (!domain->iommu_coherency)
clflush_cache_range(addr, size);
}
static void free_context_table(struct intel_iommu *iommu)
{
struct context_entry *context;
int i;
if (!iommu->root_entry)
return;
for (i = 0; i < ROOT_ENTRY_NR; i++) {
context = iommu_context_addr(iommu, i, 0, 0);
if (context)
iommu_free_pages(context);
if (!sm_supported(iommu))
continue;
context = iommu_context_addr(iommu, i, 0x80, 0);
if (context)
iommu_free_pages(context);
}
iommu_free_pages(iommu->root_entry);
iommu->root_entry = NULL;
}
#ifdef CONFIG_DMAR_DEBUG
iommu/vt-d: Fix lockdep splat due to klist iteration in atomic context With CONFIG_INTEL_IOMMU_DEBUGFS enabled, below lockdep splat are seen when an I/O fault occurs on a machine with an Intel IOMMU in it. DMAR: DRHD: handling fault status reg 3 DMAR: [DMA Write NO_PASID] Request device [00:1a.0] fault addr 0x0 [fault reason 0x05] PTE Write access is not set DMAR: Dump dmar0 table entries for IOVA 0x0 DMAR: root entry: 0x0000000127f42001 DMAR: context entry: hi 0x0000000000001502, low 0x000000012d8ab001 ================================ WARNING: inconsistent lock state 5.20.0-0.rc0.20220812git7ebfc85e2cd7.10.fc38.x86_64 #1 Not tainted -------------------------------- inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage. rngd/1006 [HC1[1]:SC0[0]:HE0:SE1] takes: ff177021416f2d78 (&k->k_lock){?.+.}-{2:2}, at: klist_next+0x1b/0x160 {HARDIRQ-ON-W} state was registered at: lock_acquire+0xce/0x2d0 _raw_spin_lock+0x33/0x80 klist_add_tail+0x46/0x80 bus_add_device+0xee/0x150 device_add+0x39d/0x9a0 add_memory_block+0x108/0x1d0 memory_dev_init+0xe1/0x117 driver_init+0x43/0x4d kernel_init_freeable+0x1c2/0x2cc kernel_init+0x16/0x140 ret_from_fork+0x1f/0x30 irq event stamp: 7812 hardirqs last enabled at (7811): [<ffffffff85000e86>] asm_sysvec_apic_timer_interrupt+0x16/0x20 hardirqs last disabled at (7812): [<ffffffff84f16894>] irqentry_enter+0x54/0x60 softirqs last enabled at (7794): [<ffffffff840ff669>] __irq_exit_rcu+0xf9/0x170 softirqs last disabled at (7787): [<ffffffff840ff669>] __irq_exit_rcu+0xf9/0x170 The klist iterator functions using spin_*lock_irq*() but the klist insertion functions using spin_*lock(), combined with the Intel DMAR IOMMU driver iterating over klists from atomic (hardirq) context, where pci_get_domain_bus_and_slot() calls into bus_find_device() which iterates over klists. As currently there's no plan to fix the klist to make it safe to use in atomic context, this fixes the lockdep splat by avoid calling pci_get_domain_bus_and_slot() in the hardirq context. Fixes: 8ac0b64b9735 ("iommu/vt-d: Use pci_get_domain_bus_and_slot() in pgtable_walk()") Reported-by: Lennert Buytenhek <buytenh@wantstofly.org> Link: https://lore.kernel.org/linux-iommu/Yvo2dfpEh%2FWC+Wrr@wantstofly.org/ Link: https://lore.kernel.org/linux-iommu/YvyBdPwrTuHHbn5X@wantstofly.org/ Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20220819015949.4795-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-08-23 14:15:57 +08:00
static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn,
u8 bus, u8 devfn, struct dma_pte *parent, int level)
{
iommu/vt-d: Fix lockdep splat due to klist iteration in atomic context With CONFIG_INTEL_IOMMU_DEBUGFS enabled, below lockdep splat are seen when an I/O fault occurs on a machine with an Intel IOMMU in it. DMAR: DRHD: handling fault status reg 3 DMAR: [DMA Write NO_PASID] Request device [00:1a.0] fault addr 0x0 [fault reason 0x05] PTE Write access is not set DMAR: Dump dmar0 table entries for IOVA 0x0 DMAR: root entry: 0x0000000127f42001 DMAR: context entry: hi 0x0000000000001502, low 0x000000012d8ab001 ================================ WARNING: inconsistent lock state 5.20.0-0.rc0.20220812git7ebfc85e2cd7.10.fc38.x86_64 #1 Not tainted -------------------------------- inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage. rngd/1006 [HC1[1]:SC0[0]:HE0:SE1] takes: ff177021416f2d78 (&k->k_lock){?.+.}-{2:2}, at: klist_next+0x1b/0x160 {HARDIRQ-ON-W} state was registered at: lock_acquire+0xce/0x2d0 _raw_spin_lock+0x33/0x80 klist_add_tail+0x46/0x80 bus_add_device+0xee/0x150 device_add+0x39d/0x9a0 add_memory_block+0x108/0x1d0 memory_dev_init+0xe1/0x117 driver_init+0x43/0x4d kernel_init_freeable+0x1c2/0x2cc kernel_init+0x16/0x140 ret_from_fork+0x1f/0x30 irq event stamp: 7812 hardirqs last enabled at (7811): [<ffffffff85000e86>] asm_sysvec_apic_timer_interrupt+0x16/0x20 hardirqs last disabled at (7812): [<ffffffff84f16894>] irqentry_enter+0x54/0x60 softirqs last enabled at (7794): [<ffffffff840ff669>] __irq_exit_rcu+0xf9/0x170 softirqs last disabled at (7787): [<ffffffff840ff669>] __irq_exit_rcu+0xf9/0x170 The klist iterator functions using spin_*lock_irq*() but the klist insertion functions using spin_*lock(), combined with the Intel DMAR IOMMU driver iterating over klists from atomic (hardirq) context, where pci_get_domain_bus_and_slot() calls into bus_find_device() which iterates over klists. As currently there's no plan to fix the klist to make it safe to use in atomic context, this fixes the lockdep splat by avoid calling pci_get_domain_bus_and_slot() in the hardirq context. Fixes: 8ac0b64b9735 ("iommu/vt-d: Use pci_get_domain_bus_and_slot() in pgtable_walk()") Reported-by: Lennert Buytenhek <buytenh@wantstofly.org> Link: https://lore.kernel.org/linux-iommu/Yvo2dfpEh%2FWC+Wrr@wantstofly.org/ Link: https://lore.kernel.org/linux-iommu/YvyBdPwrTuHHbn5X@wantstofly.org/ Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20220819015949.4795-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-08-23 14:15:57 +08:00
struct dma_pte *pte;
int offset;
while (1) {
offset = pfn_level_offset(pfn, level);
pte = &parent[offset];
pr_info("pte level: %d, pte value: 0x%016llx\n", level, pte->val);
if (!dma_pte_present(pte)) {
pr_info("page table not present at level %d\n", level - 1);
break;
}
if (level == 1 || dma_pte_superpage(pte))
break;
parent = phys_to_virt(dma_pte_addr(pte));
level--;
}
}
void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
unsigned long long addr, u32 pasid)
{
struct pasid_dir_entry *dir, *pde;
struct pasid_entry *entries, *pte;
struct context_entry *ctx_entry;
struct root_entry *rt_entry;
iommu/vt-d: Fix lockdep splat due to klist iteration in atomic context With CONFIG_INTEL_IOMMU_DEBUGFS enabled, below lockdep splat are seen when an I/O fault occurs on a machine with an Intel IOMMU in it. DMAR: DRHD: handling fault status reg 3 DMAR: [DMA Write NO_PASID] Request device [00:1a.0] fault addr 0x0 [fault reason 0x05] PTE Write access is not set DMAR: Dump dmar0 table entries for IOVA 0x0 DMAR: root entry: 0x0000000127f42001 DMAR: context entry: hi 0x0000000000001502, low 0x000000012d8ab001 ================================ WARNING: inconsistent lock state 5.20.0-0.rc0.20220812git7ebfc85e2cd7.10.fc38.x86_64 #1 Not tainted -------------------------------- inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage. rngd/1006 [HC1[1]:SC0[0]:HE0:SE1] takes: ff177021416f2d78 (&k->k_lock){?.+.}-{2:2}, at: klist_next+0x1b/0x160 {HARDIRQ-ON-W} state was registered at: lock_acquire+0xce/0x2d0 _raw_spin_lock+0x33/0x80 klist_add_tail+0x46/0x80 bus_add_device+0xee/0x150 device_add+0x39d/0x9a0 add_memory_block+0x108/0x1d0 memory_dev_init+0xe1/0x117 driver_init+0x43/0x4d kernel_init_freeable+0x1c2/0x2cc kernel_init+0x16/0x140 ret_from_fork+0x1f/0x30 irq event stamp: 7812 hardirqs last enabled at (7811): [<ffffffff85000e86>] asm_sysvec_apic_timer_interrupt+0x16/0x20 hardirqs last disabled at (7812): [<ffffffff84f16894>] irqentry_enter+0x54/0x60 softirqs last enabled at (7794): [<ffffffff840ff669>] __irq_exit_rcu+0xf9/0x170 softirqs last disabled at (7787): [<ffffffff840ff669>] __irq_exit_rcu+0xf9/0x170 The klist iterator functions using spin_*lock_irq*() but the klist insertion functions using spin_*lock(), combined with the Intel DMAR IOMMU driver iterating over klists from atomic (hardirq) context, where pci_get_domain_bus_and_slot() calls into bus_find_device() which iterates over klists. As currently there's no plan to fix the klist to make it safe to use in atomic context, this fixes the lockdep splat by avoid calling pci_get_domain_bus_and_slot() in the hardirq context. Fixes: 8ac0b64b9735 ("iommu/vt-d: Use pci_get_domain_bus_and_slot() in pgtable_walk()") Reported-by: Lennert Buytenhek <buytenh@wantstofly.org> Link: https://lore.kernel.org/linux-iommu/Yvo2dfpEh%2FWC+Wrr@wantstofly.org/ Link: https://lore.kernel.org/linux-iommu/YvyBdPwrTuHHbn5X@wantstofly.org/ Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20220819015949.4795-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-08-23 14:15:57 +08:00
int i, dir_index, index, level;
u8 devfn = source_id & 0xff;
u8 bus = source_id >> 8;
iommu/vt-d: Fix lockdep splat due to klist iteration in atomic context With CONFIG_INTEL_IOMMU_DEBUGFS enabled, below lockdep splat are seen when an I/O fault occurs on a machine with an Intel IOMMU in it. DMAR: DRHD: handling fault status reg 3 DMAR: [DMA Write NO_PASID] Request device [00:1a.0] fault addr 0x0 [fault reason 0x05] PTE Write access is not set DMAR: Dump dmar0 table entries for IOVA 0x0 DMAR: root entry: 0x0000000127f42001 DMAR: context entry: hi 0x0000000000001502, low 0x000000012d8ab001 ================================ WARNING: inconsistent lock state 5.20.0-0.rc0.20220812git7ebfc85e2cd7.10.fc38.x86_64 #1 Not tainted -------------------------------- inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage. rngd/1006 [HC1[1]:SC0[0]:HE0:SE1] takes: ff177021416f2d78 (&k->k_lock){?.+.}-{2:2}, at: klist_next+0x1b/0x160 {HARDIRQ-ON-W} state was registered at: lock_acquire+0xce/0x2d0 _raw_spin_lock+0x33/0x80 klist_add_tail+0x46/0x80 bus_add_device+0xee/0x150 device_add+0x39d/0x9a0 add_memory_block+0x108/0x1d0 memory_dev_init+0xe1/0x117 driver_init+0x43/0x4d kernel_init_freeable+0x1c2/0x2cc kernel_init+0x16/0x140 ret_from_fork+0x1f/0x30 irq event stamp: 7812 hardirqs last enabled at (7811): [<ffffffff85000e86>] asm_sysvec_apic_timer_interrupt+0x16/0x20 hardirqs last disabled at (7812): [<ffffffff84f16894>] irqentry_enter+0x54/0x60 softirqs last enabled at (7794): [<ffffffff840ff669>] __irq_exit_rcu+0xf9/0x170 softirqs last disabled at (7787): [<ffffffff840ff669>] __irq_exit_rcu+0xf9/0x170 The klist iterator functions using spin_*lock_irq*() but the klist insertion functions using spin_*lock(), combined with the Intel DMAR IOMMU driver iterating over klists from atomic (hardirq) context, where pci_get_domain_bus_and_slot() calls into bus_find_device() which iterates over klists. As currently there's no plan to fix the klist to make it safe to use in atomic context, this fixes the lockdep splat by avoid calling pci_get_domain_bus_and_slot() in the hardirq context. Fixes: 8ac0b64b9735 ("iommu/vt-d: Use pci_get_domain_bus_and_slot() in pgtable_walk()") Reported-by: Lennert Buytenhek <buytenh@wantstofly.org> Link: https://lore.kernel.org/linux-iommu/Yvo2dfpEh%2FWC+Wrr@wantstofly.org/ Link: https://lore.kernel.org/linux-iommu/YvyBdPwrTuHHbn5X@wantstofly.org/ Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20220819015949.4795-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-08-23 14:15:57 +08:00
struct dma_pte *pgtable;
pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr);
/* root entry dump */
iommu/vt-d: Fix checks and print in dmar_fault_dump_ptes() There are some issues in dmar_fault_dump_ptes(): 1. return value of phys_to_virt() is used for checking if an entry is present. 2. dump is confusing, e.g., "pasid table entry is not present", confusing by unpresent pasid table vs. unpresent pasid table entry. Current code means the former. 3. pgtable_walk() is called without checking if page table is present. Fix 1 by checking present bit of an entry before dump a lower level entry. Fix 2 by removing "entry" string, e.g., "pasid table is not present". Fix 3 by checking page table present before walk. Take issue 3 for example, before fix: [ 442.240357] DMAR: pasid dir entry: 0x000000012c83e001 [ 442.246661] DMAR: pasid table entry[0]: 0x0000000000000000 [ 442.253429] DMAR: pasid table entry[1]: 0x0000000000000000 [ 442.260203] DMAR: pasid table entry[2]: 0x0000000000000000 [ 442.266969] DMAR: pasid table entry[3]: 0x0000000000000000 [ 442.273733] DMAR: pasid table entry[4]: 0x0000000000000000 [ 442.280479] DMAR: pasid table entry[5]: 0x0000000000000000 [ 442.287234] DMAR: pasid table entry[6]: 0x0000000000000000 [ 442.293989] DMAR: pasid table entry[7]: 0x0000000000000000 [ 442.300742] DMAR: PTE not present at level 2 After fix: ... [ 357.241214] DMAR: pasid table entry[6]: 0x0000000000000000 [ 357.248022] DMAR: pasid table entry[7]: 0x0000000000000000 [ 357.254824] DMAR: scalable mode page table is not present Fixes: 914ff7719e8a ("iommu/vt-d: Dump DMAR translation structure when DMA fault occurs") Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com> Link: https://lore.kernel.org/r/20241024092146.715063-2-zhenzhong.duan@intel.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2024-11-04 09:40:32 +08:00
if (!iommu->root_entry) {
pr_info("root table is not present\n");
return;
}
iommu/vt-d: Fix checks and print in dmar_fault_dump_ptes() There are some issues in dmar_fault_dump_ptes(): 1. return value of phys_to_virt() is used for checking if an entry is present. 2. dump is confusing, e.g., "pasid table entry is not present", confusing by unpresent pasid table vs. unpresent pasid table entry. Current code means the former. 3. pgtable_walk() is called without checking if page table is present. Fix 1 by checking present bit of an entry before dump a lower level entry. Fix 2 by removing "entry" string, e.g., "pasid table is not present". Fix 3 by checking page table present before walk. Take issue 3 for example, before fix: [ 442.240357] DMAR: pasid dir entry: 0x000000012c83e001 [ 442.246661] DMAR: pasid table entry[0]: 0x0000000000000000 [ 442.253429] DMAR: pasid table entry[1]: 0x0000000000000000 [ 442.260203] DMAR: pasid table entry[2]: 0x0000000000000000 [ 442.266969] DMAR: pasid table entry[3]: 0x0000000000000000 [ 442.273733] DMAR: pasid table entry[4]: 0x0000000000000000 [ 442.280479] DMAR: pasid table entry[5]: 0x0000000000000000 [ 442.287234] DMAR: pasid table entry[6]: 0x0000000000000000 [ 442.293989] DMAR: pasid table entry[7]: 0x0000000000000000 [ 442.300742] DMAR: PTE not present at level 2 After fix: ... [ 357.241214] DMAR: pasid table entry[6]: 0x0000000000000000 [ 357.248022] DMAR: pasid table entry[7]: 0x0000000000000000 [ 357.254824] DMAR: scalable mode page table is not present Fixes: 914ff7719e8a ("iommu/vt-d: Dump DMAR translation structure when DMA fault occurs") Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com> Link: https://lore.kernel.org/r/20241024092146.715063-2-zhenzhong.duan@intel.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2024-11-04 09:40:32 +08:00
rt_entry = &iommu->root_entry[bus];
if (sm_supported(iommu))
pr_info("scalable mode root entry: hi 0x%016llx, low 0x%016llx\n",
rt_entry->hi, rt_entry->lo);
else
pr_info("root entry: 0x%016llx", rt_entry->lo);
/* context entry dump */
ctx_entry = iommu_context_addr(iommu, bus, devfn, 0);
if (!ctx_entry) {
iommu/vt-d: Fix checks and print in dmar_fault_dump_ptes() There are some issues in dmar_fault_dump_ptes(): 1. return value of phys_to_virt() is used for checking if an entry is present. 2. dump is confusing, e.g., "pasid table entry is not present", confusing by unpresent pasid table vs. unpresent pasid table entry. Current code means the former. 3. pgtable_walk() is called without checking if page table is present. Fix 1 by checking present bit of an entry before dump a lower level entry. Fix 2 by removing "entry" string, e.g., "pasid table is not present". Fix 3 by checking page table present before walk. Take issue 3 for example, before fix: [ 442.240357] DMAR: pasid dir entry: 0x000000012c83e001 [ 442.246661] DMAR: pasid table entry[0]: 0x0000000000000000 [ 442.253429] DMAR: pasid table entry[1]: 0x0000000000000000 [ 442.260203] DMAR: pasid table entry[2]: 0x0000000000000000 [ 442.266969] DMAR: pasid table entry[3]: 0x0000000000000000 [ 442.273733] DMAR: pasid table entry[4]: 0x0000000000000000 [ 442.280479] DMAR: pasid table entry[5]: 0x0000000000000000 [ 442.287234] DMAR: pasid table entry[6]: 0x0000000000000000 [ 442.293989] DMAR: pasid table entry[7]: 0x0000000000000000 [ 442.300742] DMAR: PTE not present at level 2 After fix: ... [ 357.241214] DMAR: pasid table entry[6]: 0x0000000000000000 [ 357.248022] DMAR: pasid table entry[7]: 0x0000000000000000 [ 357.254824] DMAR: scalable mode page table is not present Fixes: 914ff7719e8a ("iommu/vt-d: Dump DMAR translation structure when DMA fault occurs") Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com> Link: https://lore.kernel.org/r/20241024092146.715063-2-zhenzhong.duan@intel.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2024-11-04 09:40:32 +08:00
pr_info("context table is not present\n");
return;
}
pr_info("context entry: hi 0x%016llx, low 0x%016llx\n",
ctx_entry->hi, ctx_entry->lo);
/* legacy mode does not require PASID entries */
iommu/vt-d: Fix lockdep splat due to klist iteration in atomic context With CONFIG_INTEL_IOMMU_DEBUGFS enabled, below lockdep splat are seen when an I/O fault occurs on a machine with an Intel IOMMU in it. DMAR: DRHD: handling fault status reg 3 DMAR: [DMA Write NO_PASID] Request device [00:1a.0] fault addr 0x0 [fault reason 0x05] PTE Write access is not set DMAR: Dump dmar0 table entries for IOVA 0x0 DMAR: root entry: 0x0000000127f42001 DMAR: context entry: hi 0x0000000000001502, low 0x000000012d8ab001 ================================ WARNING: inconsistent lock state 5.20.0-0.rc0.20220812git7ebfc85e2cd7.10.fc38.x86_64 #1 Not tainted -------------------------------- inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage. rngd/1006 [HC1[1]:SC0[0]:HE0:SE1] takes: ff177021416f2d78 (&k->k_lock){?.+.}-{2:2}, at: klist_next+0x1b/0x160 {HARDIRQ-ON-W} state was registered at: lock_acquire+0xce/0x2d0 _raw_spin_lock+0x33/0x80 klist_add_tail+0x46/0x80 bus_add_device+0xee/0x150 device_add+0x39d/0x9a0 add_memory_block+0x108/0x1d0 memory_dev_init+0xe1/0x117 driver_init+0x43/0x4d kernel_init_freeable+0x1c2/0x2cc kernel_init+0x16/0x140 ret_from_fork+0x1f/0x30 irq event stamp: 7812 hardirqs last enabled at (7811): [<ffffffff85000e86>] asm_sysvec_apic_timer_interrupt+0x16/0x20 hardirqs last disabled at (7812): [<ffffffff84f16894>] irqentry_enter+0x54/0x60 softirqs last enabled at (7794): [<ffffffff840ff669>] __irq_exit_rcu+0xf9/0x170 softirqs last disabled at (7787): [<ffffffff840ff669>] __irq_exit_rcu+0xf9/0x170 The klist iterator functions using spin_*lock_irq*() but the klist insertion functions using spin_*lock(), combined with the Intel DMAR IOMMU driver iterating over klists from atomic (hardirq) context, where pci_get_domain_bus_and_slot() calls into bus_find_device() which iterates over klists. As currently there's no plan to fix the klist to make it safe to use in atomic context, this fixes the lockdep splat by avoid calling pci_get_domain_bus_and_slot() in the hardirq context. Fixes: 8ac0b64b9735 ("iommu/vt-d: Use pci_get_domain_bus_and_slot() in pgtable_walk()") Reported-by: Lennert Buytenhek <buytenh@wantstofly.org> Link: https://lore.kernel.org/linux-iommu/Yvo2dfpEh%2FWC+Wrr@wantstofly.org/ Link: https://lore.kernel.org/linux-iommu/YvyBdPwrTuHHbn5X@wantstofly.org/ Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20220819015949.4795-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-08-23 14:15:57 +08:00
if (!sm_supported(iommu)) {
iommu/vt-d: Fix checks and print in dmar_fault_dump_ptes() There are some issues in dmar_fault_dump_ptes(): 1. return value of phys_to_virt() is used for checking if an entry is present. 2. dump is confusing, e.g., "pasid table entry is not present", confusing by unpresent pasid table vs. unpresent pasid table entry. Current code means the former. 3. pgtable_walk() is called without checking if page table is present. Fix 1 by checking present bit of an entry before dump a lower level entry. Fix 2 by removing "entry" string, e.g., "pasid table is not present". Fix 3 by checking page table present before walk. Take issue 3 for example, before fix: [ 442.240357] DMAR: pasid dir entry: 0x000000012c83e001 [ 442.246661] DMAR: pasid table entry[0]: 0x0000000000000000 [ 442.253429] DMAR: pasid table entry[1]: 0x0000000000000000 [ 442.260203] DMAR: pasid table entry[2]: 0x0000000000000000 [ 442.266969] DMAR: pasid table entry[3]: 0x0000000000000000 [ 442.273733] DMAR: pasid table entry[4]: 0x0000000000000000 [ 442.280479] DMAR: pasid table entry[5]: 0x0000000000000000 [ 442.287234] DMAR: pasid table entry[6]: 0x0000000000000000 [ 442.293989] DMAR: pasid table entry[7]: 0x0000000000000000 [ 442.300742] DMAR: PTE not present at level 2 After fix: ... [ 357.241214] DMAR: pasid table entry[6]: 0x0000000000000000 [ 357.248022] DMAR: pasid table entry[7]: 0x0000000000000000 [ 357.254824] DMAR: scalable mode page table is not present Fixes: 914ff7719e8a ("iommu/vt-d: Dump DMAR translation structure when DMA fault occurs") Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com> Link: https://lore.kernel.org/r/20241024092146.715063-2-zhenzhong.duan@intel.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2024-11-04 09:40:32 +08:00
if (!context_present(ctx_entry)) {
pr_info("legacy mode page table is not present\n");
return;
}
iommu/vt-d: Fix lockdep splat due to klist iteration in atomic context With CONFIG_INTEL_IOMMU_DEBUGFS enabled, below lockdep splat are seen when an I/O fault occurs on a machine with an Intel IOMMU in it. DMAR: DRHD: handling fault status reg 3 DMAR: [DMA Write NO_PASID] Request device [00:1a.0] fault addr 0x0 [fault reason 0x05] PTE Write access is not set DMAR: Dump dmar0 table entries for IOVA 0x0 DMAR: root entry: 0x0000000127f42001 DMAR: context entry: hi 0x0000000000001502, low 0x000000012d8ab001 ================================ WARNING: inconsistent lock state 5.20.0-0.rc0.20220812git7ebfc85e2cd7.10.fc38.x86_64 #1 Not tainted -------------------------------- inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage. rngd/1006 [HC1[1]:SC0[0]:HE0:SE1] takes: ff177021416f2d78 (&k->k_lock){?.+.}-{2:2}, at: klist_next+0x1b/0x160 {HARDIRQ-ON-W} state was registered at: lock_acquire+0xce/0x2d0 _raw_spin_lock+0x33/0x80 klist_add_tail+0x46/0x80 bus_add_device+0xee/0x150 device_add+0x39d/0x9a0 add_memory_block+0x108/0x1d0 memory_dev_init+0xe1/0x117 driver_init+0x43/0x4d kernel_init_freeable+0x1c2/0x2cc kernel_init+0x16/0x140 ret_from_fork+0x1f/0x30 irq event stamp: 7812 hardirqs last enabled at (7811): [<ffffffff85000e86>] asm_sysvec_apic_timer_interrupt+0x16/0x20 hardirqs last disabled at (7812): [<ffffffff84f16894>] irqentry_enter+0x54/0x60 softirqs last enabled at (7794): [<ffffffff840ff669>] __irq_exit_rcu+0xf9/0x170 softirqs last disabled at (7787): [<ffffffff840ff669>] __irq_exit_rcu+0xf9/0x170 The klist iterator functions using spin_*lock_irq*() but the klist insertion functions using spin_*lock(), combined with the Intel DMAR IOMMU driver iterating over klists from atomic (hardirq) context, where pci_get_domain_bus_and_slot() calls into bus_find_device() which iterates over klists. As currently there's no plan to fix the klist to make it safe to use in atomic context, this fixes the lockdep splat by avoid calling pci_get_domain_bus_and_slot() in the hardirq context. Fixes: 8ac0b64b9735 ("iommu/vt-d: Use pci_get_domain_bus_and_slot() in pgtable_walk()") Reported-by: Lennert Buytenhek <buytenh@wantstofly.org> Link: https://lore.kernel.org/linux-iommu/Yvo2dfpEh%2FWC+Wrr@wantstofly.org/ Link: https://lore.kernel.org/linux-iommu/YvyBdPwrTuHHbn5X@wantstofly.org/ Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20220819015949.4795-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-08-23 14:15:57 +08:00
level = agaw_to_level(ctx_entry->hi & 7);
pgtable = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK);
goto pgtable_walk;
iommu/vt-d: Fix lockdep splat due to klist iteration in atomic context With CONFIG_INTEL_IOMMU_DEBUGFS enabled, below lockdep splat are seen when an I/O fault occurs on a machine with an Intel IOMMU in it. DMAR: DRHD: handling fault status reg 3 DMAR: [DMA Write NO_PASID] Request device [00:1a.0] fault addr 0x0 [fault reason 0x05] PTE Write access is not set DMAR: Dump dmar0 table entries for IOVA 0x0 DMAR: root entry: 0x0000000127f42001 DMAR: context entry: hi 0x0000000000001502, low 0x000000012d8ab001 ================================ WARNING: inconsistent lock state 5.20.0-0.rc0.20220812git7ebfc85e2cd7.10.fc38.x86_64 #1 Not tainted -------------------------------- inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage. rngd/1006 [HC1[1]:SC0[0]:HE0:SE1] takes: ff177021416f2d78 (&k->k_lock){?.+.}-{2:2}, at: klist_next+0x1b/0x160 {HARDIRQ-ON-W} state was registered at: lock_acquire+0xce/0x2d0 _raw_spin_lock+0x33/0x80 klist_add_tail+0x46/0x80 bus_add_device+0xee/0x150 device_add+0x39d/0x9a0 add_memory_block+0x108/0x1d0 memory_dev_init+0xe1/0x117 driver_init+0x43/0x4d kernel_init_freeable+0x1c2/0x2cc kernel_init+0x16/0x140 ret_from_fork+0x1f/0x30 irq event stamp: 7812 hardirqs last enabled at (7811): [<ffffffff85000e86>] asm_sysvec_apic_timer_interrupt+0x16/0x20 hardirqs last disabled at (7812): [<ffffffff84f16894>] irqentry_enter+0x54/0x60 softirqs last enabled at (7794): [<ffffffff840ff669>] __irq_exit_rcu+0xf9/0x170 softirqs last disabled at (7787): [<ffffffff840ff669>] __irq_exit_rcu+0xf9/0x170 The klist iterator functions using spin_*lock_irq*() but the klist insertion functions using spin_*lock(), combined with the Intel DMAR IOMMU driver iterating over klists from atomic (hardirq) context, where pci_get_domain_bus_and_slot() calls into bus_find_device() which iterates over klists. As currently there's no plan to fix the klist to make it safe to use in atomic context, this fixes the lockdep splat by avoid calling pci_get_domain_bus_and_slot() in the hardirq context. Fixes: 8ac0b64b9735 ("iommu/vt-d: Use pci_get_domain_bus_and_slot() in pgtable_walk()") Reported-by: Lennert Buytenhek <buytenh@wantstofly.org> Link: https://lore.kernel.org/linux-iommu/Yvo2dfpEh%2FWC+Wrr@wantstofly.org/ Link: https://lore.kernel.org/linux-iommu/YvyBdPwrTuHHbn5X@wantstofly.org/ Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20220819015949.4795-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-08-23 14:15:57 +08:00
}
iommu/vt-d: Fix checks and print in dmar_fault_dump_ptes() There are some issues in dmar_fault_dump_ptes(): 1. return value of phys_to_virt() is used for checking if an entry is present. 2. dump is confusing, e.g., "pasid table entry is not present", confusing by unpresent pasid table vs. unpresent pasid table entry. Current code means the former. 3. pgtable_walk() is called without checking if page table is present. Fix 1 by checking present bit of an entry before dump a lower level entry. Fix 2 by removing "entry" string, e.g., "pasid table is not present". Fix 3 by checking page table present before walk. Take issue 3 for example, before fix: [ 442.240357] DMAR: pasid dir entry: 0x000000012c83e001 [ 442.246661] DMAR: pasid table entry[0]: 0x0000000000000000 [ 442.253429] DMAR: pasid table entry[1]: 0x0000000000000000 [ 442.260203] DMAR: pasid table entry[2]: 0x0000000000000000 [ 442.266969] DMAR: pasid table entry[3]: 0x0000000000000000 [ 442.273733] DMAR: pasid table entry[4]: 0x0000000000000000 [ 442.280479] DMAR: pasid table entry[5]: 0x0000000000000000 [ 442.287234] DMAR: pasid table entry[6]: 0x0000000000000000 [ 442.293989] DMAR: pasid table entry[7]: 0x0000000000000000 [ 442.300742] DMAR: PTE not present at level 2 After fix: ... [ 357.241214] DMAR: pasid table entry[6]: 0x0000000000000000 [ 357.248022] DMAR: pasid table entry[7]: 0x0000000000000000 [ 357.254824] DMAR: scalable mode page table is not present Fixes: 914ff7719e8a ("iommu/vt-d: Dump DMAR translation structure when DMA fault occurs") Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com> Link: https://lore.kernel.org/r/20241024092146.715063-2-zhenzhong.duan@intel.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2024-11-04 09:40:32 +08:00
if (!context_present(ctx_entry)) {
pr_info("pasid directory table is not present\n");
return;
}
iommu/vt-d: Fix checks and print in dmar_fault_dump_ptes() There are some issues in dmar_fault_dump_ptes(): 1. return value of phys_to_virt() is used for checking if an entry is present. 2. dump is confusing, e.g., "pasid table entry is not present", confusing by unpresent pasid table vs. unpresent pasid table entry. Current code means the former. 3. pgtable_walk() is called without checking if page table is present. Fix 1 by checking present bit of an entry before dump a lower level entry. Fix 2 by removing "entry" string, e.g., "pasid table is not present". Fix 3 by checking page table present before walk. Take issue 3 for example, before fix: [ 442.240357] DMAR: pasid dir entry: 0x000000012c83e001 [ 442.246661] DMAR: pasid table entry[0]: 0x0000000000000000 [ 442.253429] DMAR: pasid table entry[1]: 0x0000000000000000 [ 442.260203] DMAR: pasid table entry[2]: 0x0000000000000000 [ 442.266969] DMAR: pasid table entry[3]: 0x0000000000000000 [ 442.273733] DMAR: pasid table entry[4]: 0x0000000000000000 [ 442.280479] DMAR: pasid table entry[5]: 0x0000000000000000 [ 442.287234] DMAR: pasid table entry[6]: 0x0000000000000000 [ 442.293989] DMAR: pasid table entry[7]: 0x0000000000000000 [ 442.300742] DMAR: PTE not present at level 2 After fix: ... [ 357.241214] DMAR: pasid table entry[6]: 0x0000000000000000 [ 357.248022] DMAR: pasid table entry[7]: 0x0000000000000000 [ 357.254824] DMAR: scalable mode page table is not present Fixes: 914ff7719e8a ("iommu/vt-d: Dump DMAR translation structure when DMA fault occurs") Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com> Link: https://lore.kernel.org/r/20241024092146.715063-2-zhenzhong.duan@intel.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2024-11-04 09:40:32 +08:00
/* get the pointer to pasid directory entry */
dir = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK);
/* For request-without-pasid, get the pasid from context entry */
if (intel_iommu_sm && pasid == IOMMU_PASID_INVALID)
pasid = IOMMU_NO_PASID;
dir_index = pasid >> PASID_PDE_SHIFT;
pde = &dir[dir_index];
pr_info("pasid dir entry: 0x%016llx\n", pde->val);
/* get the pointer to the pasid table entry */
entries = get_pasid_table_from_pde(pde);
if (!entries) {
iommu/vt-d: Fix checks and print in dmar_fault_dump_ptes() There are some issues in dmar_fault_dump_ptes(): 1. return value of phys_to_virt() is used for checking if an entry is present. 2. dump is confusing, e.g., "pasid table entry is not present", confusing by unpresent pasid table vs. unpresent pasid table entry. Current code means the former. 3. pgtable_walk() is called without checking if page table is present. Fix 1 by checking present bit of an entry before dump a lower level entry. Fix 2 by removing "entry" string, e.g., "pasid table is not present". Fix 3 by checking page table present before walk. Take issue 3 for example, before fix: [ 442.240357] DMAR: pasid dir entry: 0x000000012c83e001 [ 442.246661] DMAR: pasid table entry[0]: 0x0000000000000000 [ 442.253429] DMAR: pasid table entry[1]: 0x0000000000000000 [ 442.260203] DMAR: pasid table entry[2]: 0x0000000000000000 [ 442.266969] DMAR: pasid table entry[3]: 0x0000000000000000 [ 442.273733] DMAR: pasid table entry[4]: 0x0000000000000000 [ 442.280479] DMAR: pasid table entry[5]: 0x0000000000000000 [ 442.287234] DMAR: pasid table entry[6]: 0x0000000000000000 [ 442.293989] DMAR: pasid table entry[7]: 0x0000000000000000 [ 442.300742] DMAR: PTE not present at level 2 After fix: ... [ 357.241214] DMAR: pasid table entry[6]: 0x0000000000000000 [ 357.248022] DMAR: pasid table entry[7]: 0x0000000000000000 [ 357.254824] DMAR: scalable mode page table is not present Fixes: 914ff7719e8a ("iommu/vt-d: Dump DMAR translation structure when DMA fault occurs") Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com> Link: https://lore.kernel.org/r/20241024092146.715063-2-zhenzhong.duan@intel.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2024-11-04 09:40:32 +08:00
pr_info("pasid table is not present\n");
return;
}
index = pasid & PASID_PTE_MASK;
pte = &entries[index];
for (i = 0; i < ARRAY_SIZE(pte->val); i++)
pr_info("pasid table entry[%d]: 0x%016llx\n", i, pte->val[i]);
iommu/vt-d: Fix checks and print in dmar_fault_dump_ptes() There are some issues in dmar_fault_dump_ptes(): 1. return value of phys_to_virt() is used for checking if an entry is present. 2. dump is confusing, e.g., "pasid table entry is not present", confusing by unpresent pasid table vs. unpresent pasid table entry. Current code means the former. 3. pgtable_walk() is called without checking if page table is present. Fix 1 by checking present bit of an entry before dump a lower level entry. Fix 2 by removing "entry" string, e.g., "pasid table is not present". Fix 3 by checking page table present before walk. Take issue 3 for example, before fix: [ 442.240357] DMAR: pasid dir entry: 0x000000012c83e001 [ 442.246661] DMAR: pasid table entry[0]: 0x0000000000000000 [ 442.253429] DMAR: pasid table entry[1]: 0x0000000000000000 [ 442.260203] DMAR: pasid table entry[2]: 0x0000000000000000 [ 442.266969] DMAR: pasid table entry[3]: 0x0000000000000000 [ 442.273733] DMAR: pasid table entry[4]: 0x0000000000000000 [ 442.280479] DMAR: pasid table entry[5]: 0x0000000000000000 [ 442.287234] DMAR: pasid table entry[6]: 0x0000000000000000 [ 442.293989] DMAR: pasid table entry[7]: 0x0000000000000000 [ 442.300742] DMAR: PTE not present at level 2 After fix: ... [ 357.241214] DMAR: pasid table entry[6]: 0x0000000000000000 [ 357.248022] DMAR: pasid table entry[7]: 0x0000000000000000 [ 357.254824] DMAR: scalable mode page table is not present Fixes: 914ff7719e8a ("iommu/vt-d: Dump DMAR translation structure when DMA fault occurs") Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com> Link: https://lore.kernel.org/r/20241024092146.715063-2-zhenzhong.duan@intel.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2024-11-04 09:40:32 +08:00
if (!pasid_pte_is_present(pte)) {
pr_info("scalable mode page table is not present\n");
return;
}
iommu/vt-d: Fix lockdep splat due to klist iteration in atomic context With CONFIG_INTEL_IOMMU_DEBUGFS enabled, below lockdep splat are seen when an I/O fault occurs on a machine with an Intel IOMMU in it. DMAR: DRHD: handling fault status reg 3 DMAR: [DMA Write NO_PASID] Request device [00:1a.0] fault addr 0x0 [fault reason 0x05] PTE Write access is not set DMAR: Dump dmar0 table entries for IOVA 0x0 DMAR: root entry: 0x0000000127f42001 DMAR: context entry: hi 0x0000000000001502, low 0x000000012d8ab001 ================================ WARNING: inconsistent lock state 5.20.0-0.rc0.20220812git7ebfc85e2cd7.10.fc38.x86_64 #1 Not tainted -------------------------------- inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage. rngd/1006 [HC1[1]:SC0[0]:HE0:SE1] takes: ff177021416f2d78 (&k->k_lock){?.+.}-{2:2}, at: klist_next+0x1b/0x160 {HARDIRQ-ON-W} state was registered at: lock_acquire+0xce/0x2d0 _raw_spin_lock+0x33/0x80 klist_add_tail+0x46/0x80 bus_add_device+0xee/0x150 device_add+0x39d/0x9a0 add_memory_block+0x108/0x1d0 memory_dev_init+0xe1/0x117 driver_init+0x43/0x4d kernel_init_freeable+0x1c2/0x2cc kernel_init+0x16/0x140 ret_from_fork+0x1f/0x30 irq event stamp: 7812 hardirqs last enabled at (7811): [<ffffffff85000e86>] asm_sysvec_apic_timer_interrupt+0x16/0x20 hardirqs last disabled at (7812): [<ffffffff84f16894>] irqentry_enter+0x54/0x60 softirqs last enabled at (7794): [<ffffffff840ff669>] __irq_exit_rcu+0xf9/0x170 softirqs last disabled at (7787): [<ffffffff840ff669>] __irq_exit_rcu+0xf9/0x170 The klist iterator functions using spin_*lock_irq*() but the klist insertion functions using spin_*lock(), combined with the Intel DMAR IOMMU driver iterating over klists from atomic (hardirq) context, where pci_get_domain_bus_and_slot() calls into bus_find_device() which iterates over klists. As currently there's no plan to fix the klist to make it safe to use in atomic context, this fixes the lockdep splat by avoid calling pci_get_domain_bus_and_slot() in the hardirq context. Fixes: 8ac0b64b9735 ("iommu/vt-d: Use pci_get_domain_bus_and_slot() in pgtable_walk()") Reported-by: Lennert Buytenhek <buytenh@wantstofly.org> Link: https://lore.kernel.org/linux-iommu/Yvo2dfpEh%2FWC+Wrr@wantstofly.org/ Link: https://lore.kernel.org/linux-iommu/YvyBdPwrTuHHbn5X@wantstofly.org/ Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20220819015949.4795-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-08-23 14:15:57 +08:00
if (pasid_pte_get_pgtt(pte) == PASID_ENTRY_PGTT_FL_ONLY) {
level = pte->val[2] & BIT_ULL(2) ? 5 : 4;
pgtable = phys_to_virt(pte->val[2] & VTD_PAGE_MASK);
} else {
level = agaw_to_level((pte->val[0] >> 2) & 0x7);
pgtable = phys_to_virt(pte->val[0] & VTD_PAGE_MASK);
}
pgtable_walk:
iommu/vt-d: Fix lockdep splat due to klist iteration in atomic context With CONFIG_INTEL_IOMMU_DEBUGFS enabled, below lockdep splat are seen when an I/O fault occurs on a machine with an Intel IOMMU in it. DMAR: DRHD: handling fault status reg 3 DMAR: [DMA Write NO_PASID] Request device [00:1a.0] fault addr 0x0 [fault reason 0x05] PTE Write access is not set DMAR: Dump dmar0 table entries for IOVA 0x0 DMAR: root entry: 0x0000000127f42001 DMAR: context entry: hi 0x0000000000001502, low 0x000000012d8ab001 ================================ WARNING: inconsistent lock state 5.20.0-0.rc0.20220812git7ebfc85e2cd7.10.fc38.x86_64 #1 Not tainted -------------------------------- inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage. rngd/1006 [HC1[1]:SC0[0]:HE0:SE1] takes: ff177021416f2d78 (&k->k_lock){?.+.}-{2:2}, at: klist_next+0x1b/0x160 {HARDIRQ-ON-W} state was registered at: lock_acquire+0xce/0x2d0 _raw_spin_lock+0x33/0x80 klist_add_tail+0x46/0x80 bus_add_device+0xee/0x150 device_add+0x39d/0x9a0 add_memory_block+0x108/0x1d0 memory_dev_init+0xe1/0x117 driver_init+0x43/0x4d kernel_init_freeable+0x1c2/0x2cc kernel_init+0x16/0x140 ret_from_fork+0x1f/0x30 irq event stamp: 7812 hardirqs last enabled at (7811): [<ffffffff85000e86>] asm_sysvec_apic_timer_interrupt+0x16/0x20 hardirqs last disabled at (7812): [<ffffffff84f16894>] irqentry_enter+0x54/0x60 softirqs last enabled at (7794): [<ffffffff840ff669>] __irq_exit_rcu+0xf9/0x170 softirqs last disabled at (7787): [<ffffffff840ff669>] __irq_exit_rcu+0xf9/0x170 The klist iterator functions using spin_*lock_irq*() but the klist insertion functions using spin_*lock(), combined with the Intel DMAR IOMMU driver iterating over klists from atomic (hardirq) context, where pci_get_domain_bus_and_slot() calls into bus_find_device() which iterates over klists. As currently there's no plan to fix the klist to make it safe to use in atomic context, this fixes the lockdep splat by avoid calling pci_get_domain_bus_and_slot() in the hardirq context. Fixes: 8ac0b64b9735 ("iommu/vt-d: Use pci_get_domain_bus_and_slot() in pgtable_walk()") Reported-by: Lennert Buytenhek <buytenh@wantstofly.org> Link: https://lore.kernel.org/linux-iommu/Yvo2dfpEh%2FWC+Wrr@wantstofly.org/ Link: https://lore.kernel.org/linux-iommu/YvyBdPwrTuHHbn5X@wantstofly.org/ Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20220819015949.4795-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-08-23 14:15:57 +08:00
pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn, pgtable, level);
}
#endif
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
unsigned long pfn, int *target_level,
gfp_t gfp)
{
struct dma_pte *parent, *pte;
int level = agaw_to_level(domain->agaw);
int offset;
if (!domain_pfn_supported(domain, pfn))
/* Address beyond IOMMU's addressing capabilities. */
return NULL;
parent = domain->pgd;
while (1) {
void *tmp_page;
offset = pfn_level_offset(pfn, level);
pte = &parent[offset];
if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
break;
if (level == *target_level)
break;
if (!dma_pte_present(pte)) {
uint64_t pteval, tmp;
tmp_page = iommu_alloc_pages_node_sz(domain->nid, gfp,
SZ_4K);
if (!tmp_page)
return NULL;
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
pteval = virt_to_phys(tmp_page) | DMA_PTE_READ |
DMA_PTE_WRITE;
if (domain->use_first_level)
pteval |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
tmp = 0ULL;
if (!try_cmpxchg64(&pte->val, &tmp, pteval))
/* Someone else set it while we were thinking; use theirs. */
iommu_free_pages(tmp_page);
else
domain_flush_cache(domain, pte, sizeof(*pte));
}
if (level == 1)
break;
parent = phys_to_virt(dma_pte_addr(pte));
level--;
}
if (!*target_level)
*target_level = level;
return pte;
}
/* return address's pte at specific level */
static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
unsigned long pfn,
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
int level, int *large_page)
{
struct dma_pte *parent, *pte;
int total = agaw_to_level(domain->agaw);
int offset;
parent = domain->pgd;
while (level <= total) {
offset = pfn_level_offset(pfn, total);
pte = &parent[offset];
if (level == total)
return pte;
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
if (!dma_pte_present(pte)) {
*large_page = total;
break;
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
}
if (dma_pte_superpage(pte)) {
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
*large_page = total;
return pte;
}
parent = phys_to_virt(dma_pte_addr(pte));
total--;
}
return NULL;
}
/* clear last level pte, a tlb flush should be followed */
static void dma_pte_clear_range(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long last_pfn)
{
unsigned int large_page;
struct dma_pte *first_pte, *pte;
if (WARN_ON(!domain_pfn_supported(domain, last_pfn)) ||
WARN_ON(start_pfn > last_pfn))
return;
/* we don't need lock here; nobody else touches the iova range */
do {
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
large_page = 1;
first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
if (!pte) {
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
start_pfn = align_to_level(start_pfn + 1, large_page + 1);
continue;
}
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
do {
dma_clear_pte(pte);
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
start_pfn += lvl_to_nr_pages(large_page);
pte++;
} while (start_pfn <= last_pfn && !first_pte_in_page(pte));
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
} while (start_pfn && start_pfn <= last_pfn);
}
static void dma_pte_free_level(struct dmar_domain *domain, int level,
int retain_level, struct dma_pte *pte,
unsigned long pfn, unsigned long start_pfn,
unsigned long last_pfn)
{
pfn = max(start_pfn, pfn);
pte = &pte[pfn_level_offset(pfn, level)];
do {
unsigned long level_pfn;
struct dma_pte *level_pte;
if (!dma_pte_present(pte) || dma_pte_superpage(pte))
goto next;
iommu/vt-d: Don't over-free page table directories dma_pte_free_level() recurses down the IOMMU page tables and frees directory pages that are entirely contained in the given PFN range. Unfortunately, it incorrectly calculates the starting address covered by the PTE under consideration, which can lead to it clearing an entry that is still in use. This occurs if we have a scatterlist with an entry that has a length greater than 1026 MB and is aligned to 2 MB for both the IOMMU and physical addresses. For example, if __domain_mapping() is asked to map a two-entry scatterlist with 2 MB and 1028 MB segments to PFN 0xffff80000, it will ask if dma_pte_free_pagetable() is asked to PFNs from 0xffff80200 to 0xffffc05ff, it will also incorrectly clear the PFNs from 0xffff80000 to 0xffff801ff because of this issue. The current code will set level_pfn to 0xffff80200, and 0xffff80200-0xffffc01ff fits inside the range being cleared. Properly setting the level_pfn for the current level under consideration catches that this PTE is outside of the range being cleared. This patch also changes the value passed into dma_pte_free_level() when it recurses. This only affects the first PTE of the range being cleared, and is handled by the existing code that ensures we start our cursor no lower than start_pfn. This was found when using dma_map_sg() to map large chunks of contiguous memory, which immediatedly led to faults on the first access of the erroneously-deleted mappings. Fixes: 3269ee0bd668 ("intel-iommu: Fix leaks in pagetable freeing") Reviewed-by: Benjamin Serebrin <serebrin@google.com> Signed-off-by: David Dillow <dillow@google.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2017-01-30 19:11:11 -08:00
level_pfn = pfn & level_mask(level);
level_pte = phys_to_virt(dma_pte_addr(pte));
if (level > 2) {
dma_pte_free_level(domain, level - 1, retain_level,
level_pte, level_pfn, start_pfn,
last_pfn);
}
/*
* Free the page table if we're below the level we want to
* retain and the range covers the entire table.
*/
if (level < retain_level && !(start_pfn > level_pfn ||
last_pfn < level_pfn + level_size(level) - 1)) {
dma_clear_pte(pte);
domain_flush_cache(domain, pte, sizeof(*pte));
iommu_free_pages(level_pte);
}
next:
pfn += level_size(level);
} while (!first_pte_in_page(++pte) && pfn <= last_pfn);
}
/*
* clear last level (leaf) ptes and free page table pages below the
* level we wish to keep intact.
*/
static void dma_pte_free_pagetable(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long last_pfn,
int retain_level)
{
dma_pte_clear_range(domain, start_pfn, last_pfn);
/* We don't need lock here; nobody else touches the iova range */
dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
domain->pgd, 0, start_pfn, last_pfn);
/* free pgd */
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
iommu_free_pages(domain->pgd);
domain->pgd = NULL;
}
}
/* When a page at a given level is being unlinked from its parent, we don't
need to *modify* it at all. All we need to do is make a list of all the
pages which can be freed just as soon as we've flushed the IOTLB and we
know the hardware page-walk will no longer touch them.
The 'pte' argument is the *parent* PTE, pointing to the page that is to
be freed. */
static void dma_pte_list_pagetables(struct dmar_domain *domain,
int level, struct dma_pte *parent_pte,
struct iommu_pages_list *freelist)
{
struct dma_pte *pte = phys_to_virt(dma_pte_addr(parent_pte));
iommu_pages_list_add(freelist, pte);
if (level == 1)
return;
do {
if (dma_pte_present(pte) && !dma_pte_superpage(pte))
dma_pte_list_pagetables(domain, level - 1, pte, freelist);
pte++;
} while (!first_pte_in_page(pte));
}
static void dma_pte_clear_level(struct dmar_domain *domain, int level,
struct dma_pte *pte, unsigned long pfn,
unsigned long start_pfn, unsigned long last_pfn,
struct iommu_pages_list *freelist)
{
struct dma_pte *first_pte = NULL, *last_pte = NULL;
pfn = max(start_pfn, pfn);
pte = &pte[pfn_level_offset(pfn, level)];
do {
iommu/vt-d: Fix unmap_pages support When supporting only the .map and .unmap callbacks of iommu_ops, the IOMMU driver can make assumptions about the size and alignment used for mappings based on the driver provided pgsize_bitmap. VT-d previously used essentially PAGE_MASK for this bitmap as any power of two mapping was acceptably filled by native page sizes. However, with the .map_pages and .unmap_pages interface we're now getting page-size and count arguments. If we simply combine these as (page-size * count) and make use of the previous map/unmap functions internally, any size and alignment assumptions are very different. As an example, a given vfio device assignment VM will often create a 4MB mapping at IOVA pfn [0x3fe00 - 0x401ff]. On a system that does not support IOMMU super pages, the unmap_pages interface will ask to unmap 1024 4KB pages at the base IOVA. dma_pte_clear_level() will recurse down to level 2 of the page table where the first half of the pfn range exactly matches the entire pte level. We clear the pte, increment the pfn by the level size, but (oops) the next pte is on a new page, so we exit the loop an pop back up a level. When we then update the pfn based on that higher level, we seem to assume that the previous pfn value was at the start of the level. In this case the level size is 256K pfns, which we add to the base pfn and get a results of 0x7fe00, which is clearly greater than 0x401ff, so we're done. Meanwhile we never cleared the ptes for the remainder of the range. When the VM remaps this range, we're overwriting valid ptes and the VT-d driver complains loudly, as reported by the user report linked below. The fix for this seems relatively simple, if each iteration of the loop in dma_pte_clear_level() is assumed to clear to the end of the level pte page, then our next pfn should be calculated from level_pfn rather than our working pfn. Fixes: 3f34f1259776 ("iommu/vt-d: Implement map/unmap_pages() iommu_ops callback") Reported-by: Ajay Garg <ajaygargnsit@gmail.com> Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Tested-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com> Link: https://lore.kernel.org/all/20211002124012.18186-1-ajaygargnsit@gmail.com/ Link: https://lore.kernel.org/r/163659074748.1617923.12716161410774184024.stgit@omen Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20211126135556.397932-3-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2021-11-26 21:55:56 +08:00
unsigned long level_pfn = pfn & level_mask(level);
if (!dma_pte_present(pte))
goto next;
/* If range covers entire pagetable, free it */
if (start_pfn <= level_pfn &&
last_pfn >= level_pfn + level_size(level) - 1) {
/* These suborbinate page tables are going away entirely. Don't
bother to clear them; we're just going to *free* them. */
if (level > 1 && !dma_pte_superpage(pte))
dma_pte_list_pagetables(domain, level - 1, pte, freelist);
dma_clear_pte(pte);
if (!first_pte)
first_pte = pte;
last_pte = pte;
} else if (level > 1) {
/* Recurse down into a level that isn't *entirely* obsolete */
dma_pte_clear_level(domain, level - 1,
phys_to_virt(dma_pte_addr(pte)),
level_pfn, start_pfn, last_pfn,
freelist);
}
next:
iommu/vt-d: Fix unmap_pages support When supporting only the .map and .unmap callbacks of iommu_ops, the IOMMU driver can make assumptions about the size and alignment used for mappings based on the driver provided pgsize_bitmap. VT-d previously used essentially PAGE_MASK for this bitmap as any power of two mapping was acceptably filled by native page sizes. However, with the .map_pages and .unmap_pages interface we're now getting page-size and count arguments. If we simply combine these as (page-size * count) and make use of the previous map/unmap functions internally, any size and alignment assumptions are very different. As an example, a given vfio device assignment VM will often create a 4MB mapping at IOVA pfn [0x3fe00 - 0x401ff]. On a system that does not support IOMMU super pages, the unmap_pages interface will ask to unmap 1024 4KB pages at the base IOVA. dma_pte_clear_level() will recurse down to level 2 of the page table where the first half of the pfn range exactly matches the entire pte level. We clear the pte, increment the pfn by the level size, but (oops) the next pte is on a new page, so we exit the loop an pop back up a level. When we then update the pfn based on that higher level, we seem to assume that the previous pfn value was at the start of the level. In this case the level size is 256K pfns, which we add to the base pfn and get a results of 0x7fe00, which is clearly greater than 0x401ff, so we're done. Meanwhile we never cleared the ptes for the remainder of the range. When the VM remaps this range, we're overwriting valid ptes and the VT-d driver complains loudly, as reported by the user report linked below. The fix for this seems relatively simple, if each iteration of the loop in dma_pte_clear_level() is assumed to clear to the end of the level pte page, then our next pfn should be calculated from level_pfn rather than our working pfn. Fixes: 3f34f1259776 ("iommu/vt-d: Implement map/unmap_pages() iommu_ops callback") Reported-by: Ajay Garg <ajaygargnsit@gmail.com> Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Tested-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com> Link: https://lore.kernel.org/all/20211002124012.18186-1-ajaygargnsit@gmail.com/ Link: https://lore.kernel.org/r/163659074748.1617923.12716161410774184024.stgit@omen Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20211126135556.397932-3-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2021-11-26 21:55:56 +08:00
pfn = level_pfn + level_size(level);
} while (!first_pte_in_page(++pte) && pfn <= last_pfn);
if (first_pte)
domain_flush_cache(domain, first_pte,
(void *)++last_pte - (void *)first_pte);
}
/* We can't just free the pages because the IOMMU may still be walking
the page tables, and may have cached the intermediate levels. The
pages can only be freed after the IOTLB flush has been done. */
static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
unsigned long last_pfn,
struct iommu_pages_list *freelist)
{
if (WARN_ON(!domain_pfn_supported(domain, last_pfn)) ||
WARN_ON(start_pfn > last_pfn))
return;
/* we don't need lock here; nobody else touches the iova range */
dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
domain->pgd, 0, start_pfn, last_pfn, freelist);
/* free pgd */
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
iommu_pages_list_add(freelist, domain->pgd);
domain->pgd = NULL;
}
}
/* iommu handling */
static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
struct root_entry *root;
root = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC, SZ_4K);
if (!root) {
pr_err("Allocating root entry for %s failed\n",
iommu->name);
return -ENOMEM;
}
__iommu_flush_cache(iommu, root, ROOT_SIZE);
iommu->root_entry = root;
return 0;
}
static void iommu_set_root_entry(struct intel_iommu *iommu)
{
u64 addr;
u32 sts;
unsigned long flag;
addr = virt_to_phys(iommu->root_entry);
if (sm_supported(iommu))
addr |= DMA_RTADDR_SMT;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_RTPS), sts);
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
/*
* Hardware invalidates all DMA remapping hardware translation
* caches as part of SRTP flow.
*/
if (cap_esrtps(iommu->cap))
return;
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
if (sm_supported(iommu))
qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
}
void iommu_flush_write_buffer(struct intel_iommu *iommu)
{
u32 val;
unsigned long flag;
if (!rwbf_quirk && !cap_rwbf(iommu->cap))
return;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (!(val & DMA_GSTS_WBFS)), val);
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
/* return value determine if we need a write buffer flush */
static void __iommu_flush_context(struct intel_iommu *iommu,
u16 did, u16 source_id, u8 function_mask,
u64 type)
{
u64 val = 0;
unsigned long flag;
switch (type) {
case DMA_CCMD_GLOBAL_INVL:
val = DMA_CCMD_GLOBAL_INVL;
break;
case DMA_CCMD_DOMAIN_INVL:
val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
break;
case DMA_CCMD_DEVICE_INVL:
val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
| DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
break;
default:
pr_warn("%s: Unexpected context-cache invalidation type 0x%llx\n",
iommu->name, type);
return;
}
val |= DMA_CCMD_ICC;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
dmar_readq, (!(val & DMA_CCMD_ICC)), val);
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type)
{
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
u64 val = 0, val_iva = 0;
unsigned long flag;
switch (type) {
case DMA_TLB_GLOBAL_FLUSH:
/* global flush doesn't need set IVA_REG */
val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
break;
case DMA_TLB_DSI_FLUSH:
val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
break;
case DMA_TLB_PSI_FLUSH:
val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
/* IH bit is passed in as part of address */
val_iva = size_order | addr;
break;
default:
pr_warn("%s: Unexpected iotlb invalidation type 0x%llx\n",
iommu->name, type);
return;
}
if (cap_write_drain(iommu->cap))
val |= DMA_TLB_WRITE_DRAIN;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
/* Note: Only uses first TLB reg currently */
if (val_iva)
dmar_writeq(iommu->reg + tlb_offset, val_iva);
dmar_writeq(iommu->reg + tlb_offset + 8, val);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, tlb_offset + 8,
dmar_readq, (!(val & DMA_TLB_IVT)), val);
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
/* check IOTLB invalidation granularity */
if (DMA_TLB_IAIG(val) == 0)
pr_err("Flush IOTLB failed\n");
if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
pr_debug("TLB flush request %Lx, actual %Lx\n",
(unsigned long long)DMA_TLB_IIRG(type),
(unsigned long long)DMA_TLB_IAIG(val));
}
static struct device_domain_info *
domain_lookup_dev_info(struct dmar_domain *domain,
struct intel_iommu *iommu, u8 bus, u8 devfn)
{
struct device_domain_info *info;
iommu/vt-d: Fix recursive lock issue in iommu_flush_dev_iotlb() The per domain spinlock is acquired in iommu_flush_dev_iotlb(), which is possbile to be called in the interrupt context. For example, the drm-intel's CI system got completely blocked with below error: WARNING: inconsistent lock state 6.0.0-rc1-CI_DRM_11990-g6590d43d39b9+ #1 Not tainted -------------------------------- inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage. swapper/6/0 [HC0[0]:SC1[1]:HE1:SE0] takes: ffff88810440d678 (&domain->lock){+.?.}-{2:2}, at: iommu_flush_dev_iotlb.part.61+0x23/0x80 {SOFTIRQ-ON-W} state was registered at: lock_acquire+0xd3/0x310 _raw_spin_lock+0x2a/0x40 domain_update_iommu_cap+0x20b/0x2c0 intel_iommu_attach_device+0x5bd/0x860 __iommu_attach_device+0x18/0xe0 bus_iommu_probe+0x1f3/0x2d0 bus_set_iommu+0x82/0xd0 intel_iommu_init+0xe45/0x102a pci_iommu_init+0x9/0x31 do_one_initcall+0x53/0x2f0 kernel_init_freeable+0x18f/0x1e1 kernel_init+0x11/0x120 ret_from_fork+0x1f/0x30 irq event stamp: 162354 hardirqs last enabled at (162354): [<ffffffff81b59274>] _raw_spin_unlock_irqrestore+0x54/0x70 hardirqs last disabled at (162353): [<ffffffff81b5901b>] _raw_spin_lock_irqsave+0x4b/0x50 softirqs last enabled at (162338): [<ffffffff81e00323>] __do_softirq+0x323/0x48e softirqs last disabled at (162349): [<ffffffff810c1588>] irq_exit_rcu+0xb8/0xe0 other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&domain->lock); <Interrupt> lock(&domain->lock); *** DEADLOCK *** 1 lock held by swapper/6/0: This coverts the spin_lock/unlock() into the irq save/restore varieties to fix the recursive locking issues. Fixes: ffd5869d93530 ("iommu/vt-d: Replace spin_lock_irqsave() with spin_lock()") Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Acked-by: Lucas De Marchi <lucas.demarchi@intel.com> Link: https://lore.kernel.org/r/20220817025650.3253959-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-08-23 14:15:56 +08:00
unsigned long flags;
iommu/vt-d: Fix recursive lock issue in iommu_flush_dev_iotlb() The per domain spinlock is acquired in iommu_flush_dev_iotlb(), which is possbile to be called in the interrupt context. For example, the drm-intel's CI system got completely blocked with below error: WARNING: inconsistent lock state 6.0.0-rc1-CI_DRM_11990-g6590d43d39b9+ #1 Not tainted -------------------------------- inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage. swapper/6/0 [HC0[0]:SC1[1]:HE1:SE0] takes: ffff88810440d678 (&domain->lock){+.?.}-{2:2}, at: iommu_flush_dev_iotlb.part.61+0x23/0x80 {SOFTIRQ-ON-W} state was registered at: lock_acquire+0xd3/0x310 _raw_spin_lock+0x2a/0x40 domain_update_iommu_cap+0x20b/0x2c0 intel_iommu_attach_device+0x5bd/0x860 __iommu_attach_device+0x18/0xe0 bus_iommu_probe+0x1f3/0x2d0 bus_set_iommu+0x82/0xd0 intel_iommu_init+0xe45/0x102a pci_iommu_init+0x9/0x31 do_one_initcall+0x53/0x2f0 kernel_init_freeable+0x18f/0x1e1 kernel_init+0x11/0x120 ret_from_fork+0x1f/0x30 irq event stamp: 162354 hardirqs last enabled at (162354): [<ffffffff81b59274>] _raw_spin_unlock_irqrestore+0x54/0x70 hardirqs last disabled at (162353): [<ffffffff81b5901b>] _raw_spin_lock_irqsave+0x4b/0x50 softirqs last enabled at (162338): [<ffffffff81e00323>] __do_softirq+0x323/0x48e softirqs last disabled at (162349): [<ffffffff810c1588>] irq_exit_rcu+0xb8/0xe0 other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&domain->lock); <Interrupt> lock(&domain->lock); *** DEADLOCK *** 1 lock held by swapper/6/0: This coverts the spin_lock/unlock() into the irq save/restore varieties to fix the recursive locking issues. Fixes: ffd5869d93530 ("iommu/vt-d: Replace spin_lock_irqsave() with spin_lock()") Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Acked-by: Lucas De Marchi <lucas.demarchi@intel.com> Link: https://lore.kernel.org/r/20220817025650.3253959-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-08-23 14:15:56 +08:00
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
iommu/vt-d: Fix recursive lock issue in iommu_flush_dev_iotlb() The per domain spinlock is acquired in iommu_flush_dev_iotlb(), which is possbile to be called in the interrupt context. For example, the drm-intel's CI system got completely blocked with below error: WARNING: inconsistent lock state 6.0.0-rc1-CI_DRM_11990-g6590d43d39b9+ #1 Not tainted -------------------------------- inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage. swapper/6/0 [HC0[0]:SC1[1]:HE1:SE0] takes: ffff88810440d678 (&domain->lock){+.?.}-{2:2}, at: iommu_flush_dev_iotlb.part.61+0x23/0x80 {SOFTIRQ-ON-W} state was registered at: lock_acquire+0xd3/0x310 _raw_spin_lock+0x2a/0x40 domain_update_iommu_cap+0x20b/0x2c0 intel_iommu_attach_device+0x5bd/0x860 __iommu_attach_device+0x18/0xe0 bus_iommu_probe+0x1f3/0x2d0 bus_set_iommu+0x82/0xd0 intel_iommu_init+0xe45/0x102a pci_iommu_init+0x9/0x31 do_one_initcall+0x53/0x2f0 kernel_init_freeable+0x18f/0x1e1 kernel_init+0x11/0x120 ret_from_fork+0x1f/0x30 irq event stamp: 162354 hardirqs last enabled at (162354): [<ffffffff81b59274>] _raw_spin_unlock_irqrestore+0x54/0x70 hardirqs last disabled at (162353): [<ffffffff81b5901b>] _raw_spin_lock_irqsave+0x4b/0x50 softirqs last enabled at (162338): [<ffffffff81e00323>] __do_softirq+0x323/0x48e softirqs last disabled at (162349): [<ffffffff810c1588>] irq_exit_rcu+0xb8/0xe0 other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&domain->lock); <Interrupt> lock(&domain->lock); *** DEADLOCK *** 1 lock held by swapper/6/0: This coverts the spin_lock/unlock() into the irq save/restore varieties to fix the recursive locking issues. Fixes: ffd5869d93530 ("iommu/vt-d: Replace spin_lock_irqsave() with spin_lock()") Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Acked-by: Lucas De Marchi <lucas.demarchi@intel.com> Link: https://lore.kernel.org/r/20220817025650.3253959-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-08-23 14:15:56 +08:00
spin_unlock_irqrestore(&domain->lock, flags);
return info;
}
}
iommu/vt-d: Fix recursive lock issue in iommu_flush_dev_iotlb() The per domain spinlock is acquired in iommu_flush_dev_iotlb(), which is possbile to be called in the interrupt context. For example, the drm-intel's CI system got completely blocked with below error: WARNING: inconsistent lock state 6.0.0-rc1-CI_DRM_11990-g6590d43d39b9+ #1 Not tainted -------------------------------- inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage. swapper/6/0 [HC0[0]:SC1[1]:HE1:SE0] takes: ffff88810440d678 (&domain->lock){+.?.}-{2:2}, at: iommu_flush_dev_iotlb.part.61+0x23/0x80 {SOFTIRQ-ON-W} state was registered at: lock_acquire+0xd3/0x310 _raw_spin_lock+0x2a/0x40 domain_update_iommu_cap+0x20b/0x2c0 intel_iommu_attach_device+0x5bd/0x860 __iommu_attach_device+0x18/0xe0 bus_iommu_probe+0x1f3/0x2d0 bus_set_iommu+0x82/0xd0 intel_iommu_init+0xe45/0x102a pci_iommu_init+0x9/0x31 do_one_initcall+0x53/0x2f0 kernel_init_freeable+0x18f/0x1e1 kernel_init+0x11/0x120 ret_from_fork+0x1f/0x30 irq event stamp: 162354 hardirqs last enabled at (162354): [<ffffffff81b59274>] _raw_spin_unlock_irqrestore+0x54/0x70 hardirqs last disabled at (162353): [<ffffffff81b5901b>] _raw_spin_lock_irqsave+0x4b/0x50 softirqs last enabled at (162338): [<ffffffff81e00323>] __do_softirq+0x323/0x48e softirqs last disabled at (162349): [<ffffffff810c1588>] irq_exit_rcu+0xb8/0xe0 other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&domain->lock); <Interrupt> lock(&domain->lock); *** DEADLOCK *** 1 lock held by swapper/6/0: This coverts the spin_lock/unlock() into the irq save/restore varieties to fix the recursive locking issues. Fixes: ffd5869d93530 ("iommu/vt-d: Replace spin_lock_irqsave() with spin_lock()") Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Acked-by: Lucas De Marchi <lucas.demarchi@intel.com> Link: https://lore.kernel.org/r/20220817025650.3253959-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-08-23 14:15:56 +08:00
spin_unlock_irqrestore(&domain->lock, flags);
return NULL;
}
iommu/vt-d: Add a fix for devices need extra dtlb flush QAT devices on Intel Sapphire Rapids and Emerald Rapids have a defect in address translation service (ATS). These devices may inadvertently issue ATS invalidation completion before posted writes initiated with translated address that utilized translations matching the invalidation address range, violating the invalidation completion ordering. This patch adds an extra device TLB invalidation for the affected devices, it is needed to ensure no more posted writes with translated address following the invalidation completion. Therefore, the ordering is preserved and data-corruption is prevented. Device TLBs are invalidated under the following six conditions: 1. Device driver does DMA API unmap IOVA 2. Device driver unbind a PASID from a process, sva_unbind_device() 3. PASID is torn down, after PASID cache is flushed. e.g. process exit_mmap() due to crash 4. Under SVA usage, called by mmu_notifier.invalidate_range() where VM has to free pages that were unmapped 5. userspace driver unmaps a DMA buffer 6. Cache invalidation in vSVA usage (upcoming) For #1 and #2, device drivers are responsible for stopping DMA traffic before unmap/unbind. For #3, iommu driver gets mmu_notifier to invalidate TLB the same way as normal user unmap which will do an extra invalidation. The dTLB invalidation after PASID cache flush does not need an extra invalidation. Therefore, we only need to deal with #4 and #5 in this patch. #1 is also covered by this patch due to common code path with #5. Tested-by: Yuzhang Luo <yuzhang.luo@intel.com> Reviewed-by: Ashok Raj <ashok.raj@intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com> Link: https://lore.kernel.org/r/20221130062449.1360063-1-jacob.jun.pan@linux.intel.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-12-01 12:01:24 +08:00
/*
* The extra devTLB flush quirk impacts those QAT devices with PCI device
* IDs ranging from 0x4940 to 0x4943. It is exempted from risky_device()
* check because it applies only to the built-in QAT devices and it doesn't
* grant additional privileges.
*/
#define BUGGY_QAT_DEVID_MASK 0x4940
iommu/vt-d: Add a fix for devices need extra dtlb flush QAT devices on Intel Sapphire Rapids and Emerald Rapids have a defect in address translation service (ATS). These devices may inadvertently issue ATS invalidation completion before posted writes initiated with translated address that utilized translations matching the invalidation address range, violating the invalidation completion ordering. This patch adds an extra device TLB invalidation for the affected devices, it is needed to ensure no more posted writes with translated address following the invalidation completion. Therefore, the ordering is preserved and data-corruption is prevented. Device TLBs are invalidated under the following six conditions: 1. Device driver does DMA API unmap IOVA 2. Device driver unbind a PASID from a process, sva_unbind_device() 3. PASID is torn down, after PASID cache is flushed. e.g. process exit_mmap() due to crash 4. Under SVA usage, called by mmu_notifier.invalidate_range() where VM has to free pages that were unmapped 5. userspace driver unmaps a DMA buffer 6. Cache invalidation in vSVA usage (upcoming) For #1 and #2, device drivers are responsible for stopping DMA traffic before unmap/unbind. For #3, iommu driver gets mmu_notifier to invalidate TLB the same way as normal user unmap which will do an extra invalidation. The dTLB invalidation after PASID cache flush does not need an extra invalidation. Therefore, we only need to deal with #4 and #5 in this patch. #1 is also covered by this patch due to common code path with #5. Tested-by: Yuzhang Luo <yuzhang.luo@intel.com> Reviewed-by: Ashok Raj <ashok.raj@intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com> Link: https://lore.kernel.org/r/20221130062449.1360063-1-jacob.jun.pan@linux.intel.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-12-01 12:01:24 +08:00
static bool dev_needs_extra_dtlb_flush(struct pci_dev *pdev)
{
if (pdev->vendor != PCI_VENDOR_ID_INTEL)
return false;
if ((pdev->device & 0xfffc) != BUGGY_QAT_DEVID_MASK)
return false;
return true;
}
static void iommu_enable_pci_ats(struct device_domain_info *info)
{
struct pci_dev *pdev;
if (!info->ats_supported)
return;
pdev = to_pci_dev(info->dev);
if (!pci_ats_page_aligned(pdev))
return;
if (!pci_enable_ats(pdev, VTD_PAGE_SHIFT))
info->ats_enabled = 1;
}
static void iommu_disable_pci_ats(struct device_domain_info *info)
{
if (!info->ats_enabled)
return;
pci_disable_ats(to_pci_dev(info->dev));
info->ats_enabled = 0;
}
static void iommu_enable_pci_pri(struct device_domain_info *info)
{
struct pci_dev *pdev;
if (!info->ats_enabled || !info->pri_supported)
return;
pdev = to_pci_dev(info->dev);
/* PASID is required in PRG Response Message. */
if (info->pasid_enabled && !pci_prg_resp_pasid_required(pdev))
return;
if (pci_reset_pri(pdev))
return;
if (!pci_enable_pri(pdev, PRQ_DEPTH))
info->pri_enabled = 1;
}
static void iommu_disable_pci_pri(struct device_domain_info *info)
{
if (!info->pri_enabled)
return;
if (WARN_ON(info->iopf_refcount))
iopf_queue_remove_device(info->iommu->iopf_queue, info->dev);
pci_disable_pri(to_pci_dev(info->dev));
info->pri_enabled = 0;
}
static void intel_flush_iotlb_all(struct iommu_domain *domain)
{
cache_tag_flush_all(to_dmar_domain(domain));
}
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
{
u32 pmen;
unsigned long flags;
if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
return;
raw_spin_lock_irqsave(&iommu->register_lock, flags);
pmen = readl(iommu->reg + DMAR_PMEN_REG);
pmen &= ~DMA_PMEN_EPM;
writel(pmen, iommu->reg + DMAR_PMEN_REG);
/* wait for the protected region status bit to clear */
IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
readl, !(pmen & DMA_PMEN_PRS), pmen);
raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
static void iommu_enable_translation(struct intel_iommu *iommu)
{
u32 sts;
unsigned long flags;
raw_spin_lock_irqsave(&iommu->register_lock, flags);
iommu->gcmd |= DMA_GCMD_TE;
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_TES), sts);
raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
}
static void iommu_disable_translation(struct intel_iommu *iommu)
{
u32 sts;
unsigned long flag;
if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated &&
(cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap)))
return;
raw_spin_lock_irqsave(&iommu->register_lock, flag);
iommu->gcmd &= ~DMA_GCMD_TE;
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (!(sts & DMA_GSTS_TES)), sts);
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
static void disable_dmar_iommu(struct intel_iommu *iommu)
{
/*
* All iommu domains must have been detached from the devices,
* hence there should be no domain IDs in use.
*/
if (WARN_ON(!ida_is_empty(&iommu->domain_ida)))
return;
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
}
static void free_dmar_iommu(struct intel_iommu *iommu)
{
if (iommu->copied_tables) {
bitmap_free(iommu->copied_tables);
iommu->copied_tables = NULL;
}
/* free context mapping */
free_context_table(iommu);
if (ecap_prs(iommu->ecap))
intel_iommu_finish_prq(iommu);
}
/*
* Check and return whether first level is used by default for
* DMA translation.
*/
static bool first_level_by_default(struct intel_iommu *iommu)
{
/* Only SL is available in legacy mode */
if (!sm_supported(iommu))
return false;
/* Only level (either FL or SL) is available, just use it */
if (ecap_flts(iommu->ecap) ^ ecap_slts(iommu->ecap))
return ecap_flts(iommu->ecap);
return true;
}
int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
{
struct iommu_domain_info *info, *curr;
int num, ret = -ENOSPC;
if (domain->domain.type == IOMMU_DOMAIN_SVA)
return 0;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
guard(mutex)(&iommu->did_lock);
curr = xa_load(&domain->iommu_array, iommu->seq_id);
if (curr) {
curr->refcnt++;
kfree(info);
return 0;
}
num = ida_alloc_range(&iommu->domain_ida, IDA_START_DID,
cap_ndoms(iommu->cap) - 1, GFP_KERNEL);
if (num < 0) {
pr_err("%s: No free domain ids\n", iommu->name);
goto err_unlock;
}
info->refcnt = 1;
info->did = num;
info->iommu = iommu;
curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id,
NULL, info, GFP_KERNEL);
if (curr) {
ret = xa_err(curr) ? : -EBUSY;
goto err_clear;
}
return 0;
err_clear:
ida_free(&iommu->domain_ida, info->did);
err_unlock:
kfree(info);
return ret;
}
void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
{
struct iommu_domain_info *info;
if (domain->domain.type == IOMMU_DOMAIN_SVA)
return;
guard(mutex)(&iommu->did_lock);
info = xa_load(&domain->iommu_array, iommu->seq_id);
if (--info->refcnt == 0) {
ida_free(&iommu->domain_ida, info->did);
xa_erase(&domain->iommu_array, iommu->seq_id);
kfree(info);
}
}
/*
* For kdump cases, old valid entries may be cached due to the
* in-flight DMA and copied pgtable, but there is no unmapping
* behaviour for them, thus we need an explicit cache flush for
* the newly-mapped device. For kdump, at this point, the device
* is supposed to finish reset at its driver probe stage, so no
* in-flight DMA will exist, and we don't need to worry anymore
* hereafter.
*/
static void copied_context_tear_down(struct intel_iommu *iommu,
struct context_entry *context,
u8 bus, u8 devfn)
{
u16 did_old;
if (!context_copied(iommu, bus, devfn))
return;
assert_spin_locked(&iommu->lock);
did_old = context_domain_id(context);
context_clear_entry(context);
if (did_old < cap_ndoms(iommu->cap)) {
iommu->flush.flush_context(iommu, did_old,
PCI_DEVID(bus, devfn),
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
DMA_TLB_DSI_FLUSH);
}
clear_context_copied(iommu, bus, devfn);
}
/*
* It's a non-present to present mapping. If hardware doesn't cache
* non-present entry we only need to flush the write-buffer. If the
* _does_ cache non-present entries, then it does so in the special
* domain #0, which we have to flush:
*/
static void context_present_cache_flush(struct intel_iommu *iommu, u16 did,
u8 bus, u8 devfn)
{
if (cap_caching_mode(iommu->cap)) {
iommu->flush.flush_context(iommu, 0,
PCI_DEVID(bus, devfn),
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
} else {
iommu_flush_write_buffer(iommu);
}
}
static int domain_context_mapping_one(struct dmar_domain *domain,
struct intel_iommu *iommu,
u8 bus, u8 devfn)
{
struct device_domain_info *info =
domain_lookup_dev_info(domain, iommu, bus, devfn);
u16 did = domain_id_iommu(domain, iommu);
int translation = CONTEXT_TT_MULTI_LEVEL;
struct dma_pte *pgd = domain->pgd;
struct context_entry *context;
int ret;
if (WARN_ON(!intel_domain_is_ss_paging(domain)))
return -EINVAL;
pr_debug("Set context mapping for %02x:%02x.%d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
spin_lock(&iommu->lock);
ret = -ENOMEM;
context = iommu_context_addr(iommu, bus, devfn, 1);
if (!context)
goto out_unlock;
ret = 0;
if (context_present(context) && !context_copied(iommu, bus, devfn))
goto out_unlock;
copied_context_tear_down(iommu, context, bus, devfn);
context_clear_entry(context);
context_set_domain_id(context, did);
if (info && info->ats_supported)
translation = CONTEXT_TT_DEV_IOTLB;
else
translation = CONTEXT_TT_MULTI_LEVEL;
context_set_address_root(context, virt_to_phys(pgd));
context_set_address_width(context, domain->agaw);
context_set_translation_type(context, translation);
context_set_fault_enable(context);
context_set_present(context);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(context, sizeof(*context));
context_present_cache_flush(iommu, did, bus, devfn);
ret = 0;
out_unlock:
spin_unlock(&iommu->lock);
return ret;
}
static int domain_context_mapping_cb(struct pci_dev *pdev,
u16 alias, void *opaque)
{
struct device_domain_info *info = dev_iommu_priv_get(&pdev->dev);
struct intel_iommu *iommu = info->iommu;
struct dmar_domain *domain = opaque;
return domain_context_mapping_one(domain, iommu,
PCI_BUS_NUM(alias), alias & 0xff);
}
static int
domain_context_mapping(struct dmar_domain *domain, struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
u8 bus = info->bus, devfn = info->devfn;
int ret;
if (!dev_is_pci(dev))
return domain_context_mapping_one(domain, iommu, bus, devfn);
ret = pci_for_each_dma_alias(to_pci_dev(dev),
domain_context_mapping_cb, domain);
if (ret)
return ret;
iommu_enable_pci_ats(info);
return 0;
}
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
/* Return largest possible superpage level for a given mapping */
static int hardware_largepage_caps(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long phy_pfn, unsigned long pages)
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
{
int support, level = 1;
unsigned long pfnmerge;
support = domain->iommu_superpage;
/* To use a large page, the virtual *and* physical addresses
must be aligned to 2MiB/1GiB/etc. Lower bits set in either
of them will mean we have to use smaller pages. So just
merge them and check both at once. */
pfnmerge = iov_pfn | phy_pfn;
while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
pages >>= VTD_STRIDE_SHIFT;
if (!pages)
break;
pfnmerge >>= VTD_STRIDE_SHIFT;
level++;
support--;
}
return level;
}
iommu/vt-d: Force to flush iotlb before creating superpage The translation caches may preserve obsolete data when the mapping size is changed, suppose the following sequence which can reveal the problem with high probability. 1.mmap(4GB,MAP_HUGETLB) 2. while (1) { (a) DMA MAP 0,0xa0000 (b) DMA UNMAP 0,0xa0000 (c) DMA MAP 0,0xc0000000 * DMA read IOVA 0 may failure here (Not present) * if the problem occurs. (d) DMA UNMAP 0,0xc0000000 } The page table(only focus on IOVA 0) after (a) is: PML4: 0x19db5c1003 entry:0xffff899bdcd2f000 PDPE: 0x1a1cacb003 entry:0xffff89b35b5c1000 PDE: 0x1a30a72003 entry:0xffff89b39cacb000 PTE: 0x21d200803 entry:0xffff89b3b0a72000 The page table after (b) is: PML4: 0x19db5c1003 entry:0xffff899bdcd2f000 PDPE: 0x1a1cacb003 entry:0xffff89b35b5c1000 PDE: 0x1a30a72003 entry:0xffff89b39cacb000 PTE: 0x0 entry:0xffff89b3b0a72000 The page table after (c) is: PML4: 0x19db5c1003 entry:0xffff899bdcd2f000 PDPE: 0x1a1cacb003 entry:0xffff89b35b5c1000 PDE: 0x21d200883 entry:0xffff89b39cacb000 (*) Because the PDE entry after (b) is present, it won't be flushed even if the iommu driver flush cache when unmap, so the obsolete data may be preserved in cache, which would cause the wrong translation at end. However, we can see the PDE entry is finally switch to 2M-superpage mapping, but it does not transform to 0x21d200883 directly: 1. PDE: 0x1a30a72003 2. __domain_mapping dma_pte_free_pagetable Set the PDE entry to ZERO Set the PDE entry to 0x21d200883 So we must flush the cache after the entry switch to ZERO to avoid the obsolete info be preserved. Cc: David Woodhouse <dwmw2@infradead.org> Cc: Lu Baolu <baolu.lu@linux.intel.com> Cc: Nadav Amit <nadav.amit@gmail.com> Cc: Alex Williamson <alex.williamson@redhat.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Kevin Tian <kevin.tian@intel.com> Cc: Gonglei (Arei) <arei.gonglei@huawei.com> Fixes: 6491d4d02893 ("intel-iommu: Free old page tables before creating superpage") Cc: <stable@vger.kernel.org> # v3.0+ Link: https://lore.kernel.org/linux-iommu/670baaf8-4ff8-4e84-4be3-030b95ab5a5e@huawei.com/ Suggested-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com> Acked-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20210415004628.1779-1-longpeng2@huawei.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2021-04-15 08:46:28 +08:00
/*
* Ensure that old small page tables are removed to make room for superpage(s).
* We're going to add new large pages, so make sure we don't remove their parent
* tables. The IOTLB/devTLBs should be flushed if any PDE/PTEs are cleared.
*/
static void switch_to_super_page(struct dmar_domain *domain,
unsigned long start_pfn,
unsigned long end_pfn, int level)
{
unsigned long lvl_pages = lvl_to_nr_pages(level);
struct dma_pte *pte = NULL;
while (start_pfn <= end_pfn) {
if (!pte)
pte = pfn_to_dma_pte(domain, start_pfn, &level,
GFP_ATOMIC);
iommu/vt-d: Force to flush iotlb before creating superpage The translation caches may preserve obsolete data when the mapping size is changed, suppose the following sequence which can reveal the problem with high probability. 1.mmap(4GB,MAP_HUGETLB) 2. while (1) { (a) DMA MAP 0,0xa0000 (b) DMA UNMAP 0,0xa0000 (c) DMA MAP 0,0xc0000000 * DMA read IOVA 0 may failure here (Not present) * if the problem occurs. (d) DMA UNMAP 0,0xc0000000 } The page table(only focus on IOVA 0) after (a) is: PML4: 0x19db5c1003 entry:0xffff899bdcd2f000 PDPE: 0x1a1cacb003 entry:0xffff89b35b5c1000 PDE: 0x1a30a72003 entry:0xffff89b39cacb000 PTE: 0x21d200803 entry:0xffff89b3b0a72000 The page table after (b) is: PML4: 0x19db5c1003 entry:0xffff899bdcd2f000 PDPE: 0x1a1cacb003 entry:0xffff89b35b5c1000 PDE: 0x1a30a72003 entry:0xffff89b39cacb000 PTE: 0x0 entry:0xffff89b3b0a72000 The page table after (c) is: PML4: 0x19db5c1003 entry:0xffff899bdcd2f000 PDPE: 0x1a1cacb003 entry:0xffff89b35b5c1000 PDE: 0x21d200883 entry:0xffff89b39cacb000 (*) Because the PDE entry after (b) is present, it won't be flushed even if the iommu driver flush cache when unmap, so the obsolete data may be preserved in cache, which would cause the wrong translation at end. However, we can see the PDE entry is finally switch to 2M-superpage mapping, but it does not transform to 0x21d200883 directly: 1. PDE: 0x1a30a72003 2. __domain_mapping dma_pte_free_pagetable Set the PDE entry to ZERO Set the PDE entry to 0x21d200883 So we must flush the cache after the entry switch to ZERO to avoid the obsolete info be preserved. Cc: David Woodhouse <dwmw2@infradead.org> Cc: Lu Baolu <baolu.lu@linux.intel.com> Cc: Nadav Amit <nadav.amit@gmail.com> Cc: Alex Williamson <alex.williamson@redhat.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Kevin Tian <kevin.tian@intel.com> Cc: Gonglei (Arei) <arei.gonglei@huawei.com> Fixes: 6491d4d02893 ("intel-iommu: Free old page tables before creating superpage") Cc: <stable@vger.kernel.org> # v3.0+ Link: https://lore.kernel.org/linux-iommu/670baaf8-4ff8-4e84-4be3-030b95ab5a5e@huawei.com/ Suggested-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com> Acked-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20210415004628.1779-1-longpeng2@huawei.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2021-04-15 08:46:28 +08:00
if (dma_pte_present(pte)) {
dma_pte_free_pagetable(domain, start_pfn,
start_pfn + lvl_pages - 1,
level + 1);
cache_tag_flush_range(domain, start_pfn << VTD_PAGE_SHIFT,
end_pfn << VTD_PAGE_SHIFT, 0);
iommu/vt-d: Force to flush iotlb before creating superpage The translation caches may preserve obsolete data when the mapping size is changed, suppose the following sequence which can reveal the problem with high probability. 1.mmap(4GB,MAP_HUGETLB) 2. while (1) { (a) DMA MAP 0,0xa0000 (b) DMA UNMAP 0,0xa0000 (c) DMA MAP 0,0xc0000000 * DMA read IOVA 0 may failure here (Not present) * if the problem occurs. (d) DMA UNMAP 0,0xc0000000 } The page table(only focus on IOVA 0) after (a) is: PML4: 0x19db5c1003 entry:0xffff899bdcd2f000 PDPE: 0x1a1cacb003 entry:0xffff89b35b5c1000 PDE: 0x1a30a72003 entry:0xffff89b39cacb000 PTE: 0x21d200803 entry:0xffff89b3b0a72000 The page table after (b) is: PML4: 0x19db5c1003 entry:0xffff899bdcd2f000 PDPE: 0x1a1cacb003 entry:0xffff89b35b5c1000 PDE: 0x1a30a72003 entry:0xffff89b39cacb000 PTE: 0x0 entry:0xffff89b3b0a72000 The page table after (c) is: PML4: 0x19db5c1003 entry:0xffff899bdcd2f000 PDPE: 0x1a1cacb003 entry:0xffff89b35b5c1000 PDE: 0x21d200883 entry:0xffff89b39cacb000 (*) Because the PDE entry after (b) is present, it won't be flushed even if the iommu driver flush cache when unmap, so the obsolete data may be preserved in cache, which would cause the wrong translation at end. However, we can see the PDE entry is finally switch to 2M-superpage mapping, but it does not transform to 0x21d200883 directly: 1. PDE: 0x1a30a72003 2. __domain_mapping dma_pte_free_pagetable Set the PDE entry to ZERO Set the PDE entry to 0x21d200883 So we must flush the cache after the entry switch to ZERO to avoid the obsolete info be preserved. Cc: David Woodhouse <dwmw2@infradead.org> Cc: Lu Baolu <baolu.lu@linux.intel.com> Cc: Nadav Amit <nadav.amit@gmail.com> Cc: Alex Williamson <alex.williamson@redhat.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Kevin Tian <kevin.tian@intel.com> Cc: Gonglei (Arei) <arei.gonglei@huawei.com> Fixes: 6491d4d02893 ("intel-iommu: Free old page tables before creating superpage") Cc: <stable@vger.kernel.org> # v3.0+ Link: https://lore.kernel.org/linux-iommu/670baaf8-4ff8-4e84-4be3-030b95ab5a5e@huawei.com/ Suggested-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com> Acked-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20210415004628.1779-1-longpeng2@huawei.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2021-04-15 08:46:28 +08:00
}
pte++;
start_pfn += lvl_pages;
if (first_pte_in_page(pte))
pte = NULL;
}
}
static int
__domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long phys_pfn, unsigned long nr_pages, int prot,
gfp_t gfp)
{
struct dma_pte *first_pte = NULL, *pte = NULL;
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0;
phys_addr_t pteval;
u64 attr;
if (unlikely(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1)))
return -EINVAL;
if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
return -EINVAL;
if (!(prot & DMA_PTE_WRITE) && domain->nested_parent) {
pr_err_ratelimited("Read-only mapping is disallowed on the domain which serves as the parent in a nested configuration, due to HW errata (ERRATA_772415_SPR17)\n");
return -EINVAL;
}
attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
if (domain->use_first_level) {
attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
if (prot & DMA_PTE_WRITE)
attr |= DMA_FL_PTE_DIRTY;
}
domain->has_mappings = true;
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
while (nr_pages > 0) {
uint64_t tmp;
if (!pte) {
largepage_lvl = hardware_largepage_caps(domain, iov_pfn,
phys_pfn, nr_pages);
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl,
gfp);
if (!pte)
return -ENOMEM;
first_pte = pte;
lvl_pages = lvl_to_nr_pages(largepage_lvl);
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
/* It is large page*/
if (largepage_lvl > 1) {
iommu/vt-d: Force to flush iotlb before creating superpage The translation caches may preserve obsolete data when the mapping size is changed, suppose the following sequence which can reveal the problem with high probability. 1.mmap(4GB,MAP_HUGETLB) 2. while (1) { (a) DMA MAP 0,0xa0000 (b) DMA UNMAP 0,0xa0000 (c) DMA MAP 0,0xc0000000 * DMA read IOVA 0 may failure here (Not present) * if the problem occurs. (d) DMA UNMAP 0,0xc0000000 } The page table(only focus on IOVA 0) after (a) is: PML4: 0x19db5c1003 entry:0xffff899bdcd2f000 PDPE: 0x1a1cacb003 entry:0xffff89b35b5c1000 PDE: 0x1a30a72003 entry:0xffff89b39cacb000 PTE: 0x21d200803 entry:0xffff89b3b0a72000 The page table after (b) is: PML4: 0x19db5c1003 entry:0xffff899bdcd2f000 PDPE: 0x1a1cacb003 entry:0xffff89b35b5c1000 PDE: 0x1a30a72003 entry:0xffff89b39cacb000 PTE: 0x0 entry:0xffff89b3b0a72000 The page table after (c) is: PML4: 0x19db5c1003 entry:0xffff899bdcd2f000 PDPE: 0x1a1cacb003 entry:0xffff89b35b5c1000 PDE: 0x21d200883 entry:0xffff89b39cacb000 (*) Because the PDE entry after (b) is present, it won't be flushed even if the iommu driver flush cache when unmap, so the obsolete data may be preserved in cache, which would cause the wrong translation at end. However, we can see the PDE entry is finally switch to 2M-superpage mapping, but it does not transform to 0x21d200883 directly: 1. PDE: 0x1a30a72003 2. __domain_mapping dma_pte_free_pagetable Set the PDE entry to ZERO Set the PDE entry to 0x21d200883 So we must flush the cache after the entry switch to ZERO to avoid the obsolete info be preserved. Cc: David Woodhouse <dwmw2@infradead.org> Cc: Lu Baolu <baolu.lu@linux.intel.com> Cc: Nadav Amit <nadav.amit@gmail.com> Cc: Alex Williamson <alex.williamson@redhat.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Kevin Tian <kevin.tian@intel.com> Cc: Gonglei (Arei) <arei.gonglei@huawei.com> Fixes: 6491d4d02893 ("intel-iommu: Free old page tables before creating superpage") Cc: <stable@vger.kernel.org> # v3.0+ Link: https://lore.kernel.org/linux-iommu/670baaf8-4ff8-4e84-4be3-030b95ab5a5e@huawei.com/ Suggested-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com> Acked-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20210415004628.1779-1-longpeng2@huawei.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2021-04-15 08:46:28 +08:00
unsigned long end_pfn;
unsigned long pages_to_remove;
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
pteval |= DMA_PTE_LARGE_PAGE;
pages_to_remove = min_t(unsigned long, nr_pages,
nr_pte_to_next_page(pte) * lvl_pages);
end_pfn = iov_pfn + pages_to_remove - 1;
iommu/vt-d: Force to flush iotlb before creating superpage The translation caches may preserve obsolete data when the mapping size is changed, suppose the following sequence which can reveal the problem with high probability. 1.mmap(4GB,MAP_HUGETLB) 2. while (1) { (a) DMA MAP 0,0xa0000 (b) DMA UNMAP 0,0xa0000 (c) DMA MAP 0,0xc0000000 * DMA read IOVA 0 may failure here (Not present) * if the problem occurs. (d) DMA UNMAP 0,0xc0000000 } The page table(only focus on IOVA 0) after (a) is: PML4: 0x19db5c1003 entry:0xffff899bdcd2f000 PDPE: 0x1a1cacb003 entry:0xffff89b35b5c1000 PDE: 0x1a30a72003 entry:0xffff89b39cacb000 PTE: 0x21d200803 entry:0xffff89b3b0a72000 The page table after (b) is: PML4: 0x19db5c1003 entry:0xffff899bdcd2f000 PDPE: 0x1a1cacb003 entry:0xffff89b35b5c1000 PDE: 0x1a30a72003 entry:0xffff89b39cacb000 PTE: 0x0 entry:0xffff89b3b0a72000 The page table after (c) is: PML4: 0x19db5c1003 entry:0xffff899bdcd2f000 PDPE: 0x1a1cacb003 entry:0xffff89b35b5c1000 PDE: 0x21d200883 entry:0xffff89b39cacb000 (*) Because the PDE entry after (b) is present, it won't be flushed even if the iommu driver flush cache when unmap, so the obsolete data may be preserved in cache, which would cause the wrong translation at end. However, we can see the PDE entry is finally switch to 2M-superpage mapping, but it does not transform to 0x21d200883 directly: 1. PDE: 0x1a30a72003 2. __domain_mapping dma_pte_free_pagetable Set the PDE entry to ZERO Set the PDE entry to 0x21d200883 So we must flush the cache after the entry switch to ZERO to avoid the obsolete info be preserved. Cc: David Woodhouse <dwmw2@infradead.org> Cc: Lu Baolu <baolu.lu@linux.intel.com> Cc: Nadav Amit <nadav.amit@gmail.com> Cc: Alex Williamson <alex.williamson@redhat.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Kevin Tian <kevin.tian@intel.com> Cc: Gonglei (Arei) <arei.gonglei@huawei.com> Fixes: 6491d4d02893 ("intel-iommu: Free old page tables before creating superpage") Cc: <stable@vger.kernel.org> # v3.0+ Link: https://lore.kernel.org/linux-iommu/670baaf8-4ff8-4e84-4be3-030b95ab5a5e@huawei.com/ Suggested-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com> Acked-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20210415004628.1779-1-longpeng2@huawei.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2021-04-15 08:46:28 +08:00
switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
} else {
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
}
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
}
/* We don't need lock here, nobody else
* touches the iova range
*/
tmp = 0ULL;
if (!try_cmpxchg64_local(&pte->val, &tmp, pteval)) {
static int dumps = 5;
pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
iov_pfn, tmp, (unsigned long long)pteval);
if (dumps) {
dumps--;
debug_dma_dump_mappings(NULL);
}
WARN_ON(1);
}
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
nr_pages -= lvl_pages;
iov_pfn += lvl_pages;
phys_pfn += lvl_pages;
pteval += lvl_pages * VTD_PAGE_SIZE;
/* If the next PTE would be the first in a new page, then we
* need to flush the cache on the entries we've just written.
* And then we'll need to recalculate 'pte', so clear it and
* let it get set again in the if (!pte) block above.
*
* If we're done (!nr_pages) we need to flush the cache too.
*
* Also if we've been setting superpages, we may need to
* recalculate 'pte' and switch back to smaller pages for the
* end of the mapping, if the trailing size is not enough to
* use another superpage (i.e. nr_pages < lvl_pages).
*/
pte++;
intel-iommu: Enable super page (2MiB, 1GiB, etc.) support There are no externally-visible changes with this. In the loop in the internal __domain_mapping() function, we simply detect if we are mapping: - size >= 2MiB, and - virtual address aligned to 2MiB, and - physical address aligned to 2MiB, and - on hardware that supports superpages. (and likewise for larger superpages). We automatically use a superpage for such mappings. We never have to worry about *breaking* superpages, since we trust that we will always *unmap* the same range that was mapped. So all we need to do is ensure that dma_pte_clear_range() will also cope with superpages. Adjust pfn_to_dma_pte() to take a superpage 'level' as an argument, so it can return a PTE at the appropriate level rather than always extending the page tables all the way down to level 1. Again, this is simplified by the fact that we should never encounter existing small pages when we're creating a mapping; any old mapping that used the same virtual range will have been entirely removed and its obsolete page tables freed. Provide an 'intel_iommu=sp_off' argument on the command line as a chicken bit. Not that it should ever be required. == The original commit seen in the iommu-2.6.git was Youquan's implementation (and completion) of my own half-baked code which I'd typed into an email. Followed by half a dozen subsequent 'fixes'. I've taken the unusual step of rewriting history and collapsing the original commits in order to keep the main history simpler, and make life easier for the people who are going to have to backport this to older kernels. And also so I can give it a more coherent commit comment which (hopefully) gives a better explanation of what's going on. The original sequence of commits leading to identical code was: Youquan Song (3): intel-iommu: super page support intel-iommu: Fix superpage alignment calculation error intel-iommu: Fix superpage level calculation error in dma_pfn_level_pte() David Woodhouse (4): intel-iommu: Precalculate superpage support for dmar_domain intel-iommu: Fix hardware_largepage_caps() intel-iommu: Fix inappropriate use of superpages in __domain_mapping() intel-iommu: Fix phys_pfn in __domain_mapping for sglist pages Signed-off-by: Youquan Song <youquan.song@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
2011-05-25 19:13:49 +01:00
if (!nr_pages || first_pte_in_page(pte) ||
(largepage_lvl > 1 && nr_pages < lvl_pages)) {
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
pte = NULL;
}
}
return 0;
}
static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 devfn)
{
struct intel_iommu *iommu = info->iommu;
struct context_entry *context;
u16 did;
spin_lock(&iommu->lock);
context = iommu_context_addr(iommu, bus, devfn, 0);
if (!context) {
spin_unlock(&iommu->lock);
return;
}
did = context_domain_id(context);
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
spin_unlock(&iommu->lock);
intel_context_flush_no_pasid(info, context, did);
}
int __domain_setup_first_level(struct intel_iommu *iommu, struct device *dev,
ioasid_t pasid, u16 did, phys_addr_t fsptptr,
int flags, struct iommu_domain *old)
{
if (!old)
return intel_pasid_setup_first_level(iommu, dev, fsptptr, pasid,
did, flags);
return intel_pasid_replace_first_level(iommu, dev, fsptptr, pasid, did,
iommu_domain_did(old, iommu),
flags);
}
static int domain_setup_second_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev, ioasid_t pasid,
struct iommu_domain *old)
{
if (!old)
return intel_pasid_setup_second_level(iommu, domain,
dev, pasid);
return intel_pasid_replace_second_level(iommu, domain, dev,
iommu_domain_did(old, iommu),
pasid);
}
static int domain_setup_passthrough(struct intel_iommu *iommu,
struct device *dev, ioasid_t pasid,
struct iommu_domain *old)
{
if (!old)
return intel_pasid_setup_pass_through(iommu, dev, pasid);
return intel_pasid_replace_pass_through(iommu, dev,
iommu_domain_did(old, iommu),
pasid);
}
static int domain_setup_first_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev,
u32 pasid, struct iommu_domain *old)
{
struct dma_pte *pgd = domain->pgd;
int level, flags = 0;
level = agaw_to_level(domain->agaw);
if (level != 4 && level != 5)
return -EINVAL;
if (level == 5)
flags |= PASID_FLAG_FL5LP;
if (domain->force_snooping)
flags |= PASID_FLAG_PAGE_SNOOP;
return __domain_setup_first_level(iommu, dev, pasid,
domain_id_iommu(domain, iommu),
__pa(pgd), flags, old);
}
static int dmar_domain_attach_device(struct dmar_domain *domain,
struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
iommu/vt-d: Fix recursive lock issue in iommu_flush_dev_iotlb() The per domain spinlock is acquired in iommu_flush_dev_iotlb(), which is possbile to be called in the interrupt context. For example, the drm-intel's CI system got completely blocked with below error: WARNING: inconsistent lock state 6.0.0-rc1-CI_DRM_11990-g6590d43d39b9+ #1 Not tainted -------------------------------- inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage. swapper/6/0 [HC0[0]:SC1[1]:HE1:SE0] takes: ffff88810440d678 (&domain->lock){+.?.}-{2:2}, at: iommu_flush_dev_iotlb.part.61+0x23/0x80 {SOFTIRQ-ON-W} state was registered at: lock_acquire+0xd3/0x310 _raw_spin_lock+0x2a/0x40 domain_update_iommu_cap+0x20b/0x2c0 intel_iommu_attach_device+0x5bd/0x860 __iommu_attach_device+0x18/0xe0 bus_iommu_probe+0x1f3/0x2d0 bus_set_iommu+0x82/0xd0 intel_iommu_init+0xe45/0x102a pci_iommu_init+0x9/0x31 do_one_initcall+0x53/0x2f0 kernel_init_freeable+0x18f/0x1e1 kernel_init+0x11/0x120 ret_from_fork+0x1f/0x30 irq event stamp: 162354 hardirqs last enabled at (162354): [<ffffffff81b59274>] _raw_spin_unlock_irqrestore+0x54/0x70 hardirqs last disabled at (162353): [<ffffffff81b5901b>] _raw_spin_lock_irqsave+0x4b/0x50 softirqs last enabled at (162338): [<ffffffff81e00323>] __do_softirq+0x323/0x48e softirqs last disabled at (162349): [<ffffffff810c1588>] irq_exit_rcu+0xb8/0xe0 other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&domain->lock); <Interrupt> lock(&domain->lock); *** DEADLOCK *** 1 lock held by swapper/6/0: This coverts the spin_lock/unlock() into the irq save/restore varieties to fix the recursive locking issues. Fixes: ffd5869d93530 ("iommu/vt-d: Replace spin_lock_irqsave() with spin_lock()") Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Acked-by: Lucas De Marchi <lucas.demarchi@intel.com> Link: https://lore.kernel.org/r/20220817025650.3253959-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-08-23 14:15:56 +08:00
unsigned long flags;
int ret;
ret = domain_attach_iommu(domain, iommu);
if (ret)
return ret;
info->domain = domain;
iommu/vt-d: Restore context entry setup order for aliased devices Commit 2031c469f816 ("iommu/vt-d: Add support for static identity domain") changed the context entry setup during domain attachment from a set-and-check policy to a clear-and-reset approach. This inadvertently introduced a regression affecting PCI aliased devices behind PCIe-to-PCI bridges. Specifically, keyboard and touchpad stopped working on several Apple Macbooks with below messages: kernel: platform pxa2xx-spi.3: Adding to iommu group 20 kernel: input: Apple SPI Keyboard as /devices/pci0000:00/0000:00:1e.3/pxa2xx-spi.3/spi_master/spi2/spi-APP000D:00/input/input0 kernel: DMAR: DRHD: handling fault status reg 3 kernel: DMAR: [DMA Read NO_PASID] Request device [00:1e.3] fault addr 0xffffa000 [fault reason 0x06] PTE Read access is not set kernel: DMAR: DRHD: handling fault status reg 3 kernel: DMAR: [DMA Read NO_PASID] Request device [00:1e.3] fault addr 0xffffa000 [fault reason 0x06] PTE Read access is not set kernel: applespi spi-APP000D:00: Error writing to device: 01 0e 00 00 kernel: DMAR: DRHD: handling fault status reg 3 kernel: DMAR: [DMA Read NO_PASID] Request device [00:1e.3] fault addr 0xffffa000 [fault reason 0x06] PTE Read access is not set kernel: DMAR: DRHD: handling fault status reg 3 kernel: applespi spi-APP000D:00: Error writing to device: 01 0e 00 00 Fix this by restoring the previous context setup order. Fixes: 2031c469f816 ("iommu/vt-d: Add support for static identity domain") Closes: https://lore.kernel.org/all/4dada48a-c5dd-4c30-9c85-5b03b0aa01f0@bfh.ch/ Cc: stable@vger.kernel.org Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Yi Liu <yi.l.liu@intel.com> Link: https://lore.kernel.org/r/20250514060523.2862195-1-baolu.lu@linux.intel.com Link: https://lore.kernel.org/r/20250520075849.755012-2-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2025-05-20 15:58:49 +08:00
info->domain_attached = true;
iommu/vt-d: Fix recursive lock issue in iommu_flush_dev_iotlb() The per domain spinlock is acquired in iommu_flush_dev_iotlb(), which is possbile to be called in the interrupt context. For example, the drm-intel's CI system got completely blocked with below error: WARNING: inconsistent lock state 6.0.0-rc1-CI_DRM_11990-g6590d43d39b9+ #1 Not tainted -------------------------------- inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage. swapper/6/0 [HC0[0]:SC1[1]:HE1:SE0] takes: ffff88810440d678 (&domain->lock){+.?.}-{2:2}, at: iommu_flush_dev_iotlb.part.61+0x23/0x80 {SOFTIRQ-ON-W} state was registered at: lock_acquire+0xd3/0x310 _raw_spin_lock+0x2a/0x40 domain_update_iommu_cap+0x20b/0x2c0 intel_iommu_attach_device+0x5bd/0x860 __iommu_attach_device+0x18/0xe0 bus_iommu_probe+0x1f3/0x2d0 bus_set_iommu+0x82/0xd0 intel_iommu_init+0xe45/0x102a pci_iommu_init+0x9/0x31 do_one_initcall+0x53/0x2f0 kernel_init_freeable+0x18f/0x1e1 kernel_init+0x11/0x120 ret_from_fork+0x1f/0x30 irq event stamp: 162354 hardirqs last enabled at (162354): [<ffffffff81b59274>] _raw_spin_unlock_irqrestore+0x54/0x70 hardirqs last disabled at (162353): [<ffffffff81b5901b>] _raw_spin_lock_irqsave+0x4b/0x50 softirqs last enabled at (162338): [<ffffffff81e00323>] __do_softirq+0x323/0x48e softirqs last disabled at (162349): [<ffffffff810c1588>] irq_exit_rcu+0xb8/0xe0 other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&domain->lock); <Interrupt> lock(&domain->lock); *** DEADLOCK *** 1 lock held by swapper/6/0: This coverts the spin_lock/unlock() into the irq save/restore varieties to fix the recursive locking issues. Fixes: ffd5869d93530 ("iommu/vt-d: Replace spin_lock_irqsave() with spin_lock()") Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Acked-by: Lucas De Marchi <lucas.demarchi@intel.com> Link: https://lore.kernel.org/r/20220817025650.3253959-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-08-23 14:15:56 +08:00
spin_lock_irqsave(&domain->lock, flags);
list_add(&info->link, &domain->devices);
iommu/vt-d: Fix recursive lock issue in iommu_flush_dev_iotlb() The per domain spinlock is acquired in iommu_flush_dev_iotlb(), which is possbile to be called in the interrupt context. For example, the drm-intel's CI system got completely blocked with below error: WARNING: inconsistent lock state 6.0.0-rc1-CI_DRM_11990-g6590d43d39b9+ #1 Not tainted -------------------------------- inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage. swapper/6/0 [HC0[0]:SC1[1]:HE1:SE0] takes: ffff88810440d678 (&domain->lock){+.?.}-{2:2}, at: iommu_flush_dev_iotlb.part.61+0x23/0x80 {SOFTIRQ-ON-W} state was registered at: lock_acquire+0xd3/0x310 _raw_spin_lock+0x2a/0x40 domain_update_iommu_cap+0x20b/0x2c0 intel_iommu_attach_device+0x5bd/0x860 __iommu_attach_device+0x18/0xe0 bus_iommu_probe+0x1f3/0x2d0 bus_set_iommu+0x82/0xd0 intel_iommu_init+0xe45/0x102a pci_iommu_init+0x9/0x31 do_one_initcall+0x53/0x2f0 kernel_init_freeable+0x18f/0x1e1 kernel_init+0x11/0x120 ret_from_fork+0x1f/0x30 irq event stamp: 162354 hardirqs last enabled at (162354): [<ffffffff81b59274>] _raw_spin_unlock_irqrestore+0x54/0x70 hardirqs last disabled at (162353): [<ffffffff81b5901b>] _raw_spin_lock_irqsave+0x4b/0x50 softirqs last enabled at (162338): [<ffffffff81e00323>] __do_softirq+0x323/0x48e softirqs last disabled at (162349): [<ffffffff810c1588>] irq_exit_rcu+0xb8/0xe0 other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&domain->lock); <Interrupt> lock(&domain->lock); *** DEADLOCK *** 1 lock held by swapper/6/0: This coverts the spin_lock/unlock() into the irq save/restore varieties to fix the recursive locking issues. Fixes: ffd5869d93530 ("iommu/vt-d: Replace spin_lock_irqsave() with spin_lock()") Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Acked-by: Lucas De Marchi <lucas.demarchi@intel.com> Link: https://lore.kernel.org/r/20220817025650.3253959-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-08-23 14:15:56 +08:00
spin_unlock_irqrestore(&domain->lock, flags);
if (dev_is_real_dma_subdevice(dev))
return 0;
if (!sm_supported(iommu))
ret = domain_context_mapping(domain, dev);
else if (intel_domain_is_fs_paging(domain))
ret = domain_setup_first_level(iommu, domain, dev,
IOMMU_NO_PASID, NULL);
else if (intel_domain_is_ss_paging(domain))
ret = domain_setup_second_level(iommu, domain, dev,
IOMMU_NO_PASID, NULL);
else if (WARN_ON(true))
ret = -EINVAL;
if (ret)
goto out_block_translation;
ret = cache_tag_assign_domain(domain, dev, IOMMU_NO_PASID);
if (ret)
goto out_block_translation;
return 0;
out_block_translation:
device_block_translation(dev);
return ret;
}
/**
* device_rmrr_is_relaxable - Test whether the RMRR of this device
* is relaxable (ie. is allowed to be not enforced under some conditions)
* @dev: device handle
*
* We assume that PCI USB devices with RMRRs have them largely
* for historical reasons and that the RMRR space is not actively used post
* boot. This exclusion may change if vendors begin to abuse it.
*
* The same exception is made for graphics devices, with the requirement that
* any use of the RMRR regions will be torn down before assigning the device
* to a guest.
*
* Return: true if the RMRR is relaxable, false otherwise
*/
static bool device_rmrr_is_relaxable(struct device *dev)
{
struct pci_dev *pdev;
if (!dev_is_pci(dev))
return false;
pdev = to_pci_dev(dev);
if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
return true;
else
return false;
}
static int device_def_domain_type(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
/*
* Hardware does not support the passthrough translation mode.
* Always use a dynamaic mapping domain.
*/
if (!ecap_pass_through(iommu->ecap))
return IOMMU_DOMAIN_DMA;
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
return IOMMU_DOMAIN_IDENTITY;
}
return 0;
}
static void intel_iommu_init_qi(struct intel_iommu *iommu)
{
/*
* Start from the sane iommu hardware state.
* If the queued invalidation is already initialized by us
* (for example, while enabling interrupt-remapping) then
* we got the things already rolling from a sane state.
*/
if (!iommu->qi) {
/*
* Clear any previous faults.
*/
dmar_fault(-1, iommu);
/*
* Disable queued invalidation if supported and already enabled
* before OS handover.
*/
dmar_disable_qi(iommu);
}
if (dmar_enable_qi(iommu)) {
/*
* Queued Invalidate not enabled, use Register Based Invalidate
*/
iommu->flush.flush_context = __iommu_flush_context;
iommu->flush.flush_iotlb = __iommu_flush_iotlb;
pr_info("%s: Using Register based invalidation\n",
iommu->name);
} else {
iommu->flush.flush_context = qi_flush_context;
iommu->flush.flush_iotlb = qi_flush_iotlb;
pr_info("%s: Using Queued invalidation\n", iommu->name);
}
}
static int copy_context_table(struct intel_iommu *iommu,
struct root_entry *old_re,
struct context_entry **tbl,
int bus, bool ext)
{
int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
struct context_entry *new_ce = NULL, ce;
struct context_entry *old_ce = NULL;
struct root_entry re;
phys_addr_t old_ce_phys;
tbl_idx = ext ? bus * 2 : bus;
memcpy(&re, old_re, sizeof(re));
for (devfn = 0; devfn < 256; devfn++) {
/* First calculate the correct index */
idx = (ext ? devfn * 2 : devfn) % 256;
if (idx == 0) {
/* First save what we may have and clean up */
if (new_ce) {
tbl[tbl_idx] = new_ce;
__iommu_flush_cache(iommu, new_ce,
VTD_PAGE_SIZE);
pos = 1;
}
if (old_ce)
memunmap(old_ce);
ret = 0;
if (devfn < 0x80)
old_ce_phys = root_entry_lctp(&re);
else
old_ce_phys = root_entry_uctp(&re);
if (!old_ce_phys) {
if (ext && devfn == 0) {
/* No LCTP, try UCTP */
devfn = 0x7f;
continue;
} else {
goto out;
}
}
ret = -ENOMEM;
old_ce = memremap(old_ce_phys, PAGE_SIZE,
MEMREMAP_WB);
if (!old_ce)
goto out;
new_ce = iommu_alloc_pages_node_sz(iommu->node,
GFP_KERNEL, SZ_4K);
if (!new_ce)
goto out_unmap;
ret = 0;
}
/* Now copy the context entry */
memcpy(&ce, old_ce + idx, sizeof(ce));
if (!context_present(&ce))
continue;
did = context_domain_id(&ce);
if (did >= 0 && did < cap_ndoms(iommu->cap))
ida_alloc_range(&iommu->domain_ida, did, did, GFP_KERNEL);
set_context_copied(iommu, bus, devfn);
new_ce[idx] = ce;
}
tbl[tbl_idx + pos] = new_ce;
__iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
out_unmap:
memunmap(old_ce);
out:
return ret;
}
static int copy_translation_tables(struct intel_iommu *iommu)
{
struct context_entry **ctxt_tbls;
struct root_entry *old_rt;
phys_addr_t old_rt_phys;
int ctxt_table_entries;
u64 rtaddr_reg;
int bus, ret;
bool new_ext, ext;
rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
ext = !!(rtaddr_reg & DMA_RTADDR_SMT);
new_ext = !!sm_supported(iommu);
/*
* The RTT bit can only be changed when translation is disabled,
* but disabling translation means to open a window for data
* corruption. So bail out and don't copy anything if we would
* have to change the bit.
*/
if (new_ext != ext)
return -EINVAL;
iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL);
if (!iommu->copied_tables)
return -ENOMEM;
old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
if (!old_rt_phys)
return -EINVAL;
old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
if (!old_rt)
return -ENOMEM;
/* This is too big for the stack - allocate it from slab */
ctxt_table_entries = ext ? 512 : 256;
ret = -ENOMEM;
treewide: kzalloc() -> kcalloc() The kzalloc() function has a 2-factor argument form, kcalloc(). This patch replaces cases of: kzalloc(a * b, gfp) with: kcalloc(a * b, gfp) as well as handling cases of: kzalloc(a * b * c, gfp) with: kzalloc(array3_size(a, b, c), gfp) as it's slightly less ugly than: kzalloc_array(array_size(a, b), c, gfp) This does, however, attempt to ignore constant size factors like: kzalloc(4 * 1024, gfp) though any constants defined via macros get caught up in the conversion. Any factors with a sizeof() of "unsigned char", "char", and "u8" were dropped, since they're redundant. The Coccinelle script used for this was: // Fix redundant parens around sizeof(). @@ type TYPE; expression THING, E; @@ ( kzalloc( - (sizeof(TYPE)) * E + sizeof(TYPE) * E , ...) | kzalloc( - (sizeof(THING)) * E + sizeof(THING) * E , ...) ) // Drop single-byte sizes and redundant parens. @@ expression COUNT; typedef u8; typedef __u8; @@ ( kzalloc( - sizeof(u8) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(__u8) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(char) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(unsigned char) * (COUNT) + COUNT , ...) | kzalloc( - sizeof(u8) * COUNT + COUNT , ...) | kzalloc( - sizeof(__u8) * COUNT + COUNT , ...) | kzalloc( - sizeof(char) * COUNT + COUNT , ...) | kzalloc( - sizeof(unsigned char) * COUNT + COUNT , ...) ) // 2-factor product with sizeof(type/expression) and identifier or constant. @@ type TYPE; expression THING; identifier COUNT_ID; constant COUNT_CONST; @@ ( - kzalloc + kcalloc ( - sizeof(TYPE) * (COUNT_ID) + COUNT_ID, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * COUNT_ID + COUNT_ID, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * (COUNT_CONST) + COUNT_CONST, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * COUNT_CONST + COUNT_CONST, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (COUNT_ID) + COUNT_ID, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * COUNT_ID + COUNT_ID, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (COUNT_CONST) + COUNT_CONST, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * COUNT_CONST + COUNT_CONST, sizeof(THING) , ...) ) // 2-factor product, only identifiers. @@ identifier SIZE, COUNT; @@ - kzalloc + kcalloc ( - SIZE * COUNT + COUNT, SIZE , ...) // 3-factor product with 1 sizeof(type) or sizeof(expression), with // redundant parens removed. @@ expression THING; identifier STRIDE, COUNT; type TYPE; @@ ( kzalloc( - sizeof(TYPE) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(TYPE) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kzalloc( - sizeof(THING) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kzalloc( - sizeof(THING) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) ) // 3-factor product with 2 sizeof(variable), with redundant parens removed. @@ expression THING1, THING2; identifier COUNT; type TYPE1, TYPE2; @@ ( kzalloc( - sizeof(TYPE1) * sizeof(TYPE2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kzalloc( - sizeof(THING1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kzalloc( - sizeof(THING1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) | kzalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) ) // 3-factor product, only identifiers, with redundant parens removed. @@ identifier STRIDE, SIZE, COUNT; @@ ( kzalloc( - (COUNT) * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - (COUNT) * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kzalloc( - COUNT * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) ) // Any remaining multi-factor products, first at least 3-factor products, // when they're not all constants... @@ expression E1, E2, E3; constant C1, C2, C3; @@ ( kzalloc(C1 * C2 * C3, ...) | kzalloc( - (E1) * E2 * E3 + array3_size(E1, E2, E3) , ...) | kzalloc( - (E1) * (E2) * E3 + array3_size(E1, E2, E3) , ...) | kzalloc( - (E1) * (E2) * (E3) + array3_size(E1, E2, E3) , ...) | kzalloc( - E1 * E2 * E3 + array3_size(E1, E2, E3) , ...) ) // And then all remaining 2 factors products when they're not all constants, // keeping sizeof() as the second factor argument. @@ expression THING, E1, E2; type TYPE; constant C1, C2, C3; @@ ( kzalloc(sizeof(THING) * C2, ...) | kzalloc(sizeof(TYPE) * C2, ...) | kzalloc(C1 * C2 * C3, ...) | kzalloc(C1 * C2, ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * (E2) + E2, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(TYPE) * E2 + E2, sizeof(TYPE) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * (E2) + E2, sizeof(THING) , ...) | - kzalloc + kcalloc ( - sizeof(THING) * E2 + E2, sizeof(THING) , ...) | - kzalloc + kcalloc ( - (E1) * E2 + E1, E2 , ...) | - kzalloc + kcalloc ( - (E1) * (E2) + E1, E2 , ...) | - kzalloc + kcalloc ( - E1 * E2 + E1, E2 , ...) ) Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 14:03:40 -07:00
ctxt_tbls = kcalloc(ctxt_table_entries, sizeof(void *), GFP_KERNEL);
if (!ctxt_tbls)
goto out_unmap;
for (bus = 0; bus < 256; bus++) {
ret = copy_context_table(iommu, &old_rt[bus],
ctxt_tbls, bus, ext);
if (ret) {
pr_err("%s: Failed to copy context table for bus %d\n",
iommu->name, bus);
continue;
}
}
spin_lock(&iommu->lock);
/* Context tables are copied, now write them to the root_entry table */
for (bus = 0; bus < 256; bus++) {
int idx = ext ? bus * 2 : bus;
u64 val;
if (ctxt_tbls[idx]) {
val = virt_to_phys(ctxt_tbls[idx]) | 1;
iommu->root_entry[bus].lo = val;
}
if (!ext || !ctxt_tbls[idx + 1])
continue;
val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
iommu->root_entry[bus].hi = val;
}
spin_unlock(&iommu->lock);
kfree(ctxt_tbls);
__iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
ret = 0;
out_unmap:
memunmap(old_rt);
return ret;
}
static int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
int ret;
for_each_iommu(iommu, drhd) {
if (drhd->ignored) {
iommu_disable_translation(iommu);
continue;
}
/*
* Find the max pasid size of all IOMMU's in the system.
* We need to ensure the system pasid table is no bigger
* than the smallest supported.
*/
if (pasid_supported(iommu)) {
u32 temp = 2 << ecap_pss(iommu->ecap);
intel_pasid_max_id = min_t(u32, temp,
intel_pasid_max_id);
}
intel_iommu_init_qi(iommu);
init_translation_status(iommu);
if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
iommu_disable_translation(iommu);
clear_translation_pre_enabled(iommu);
pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
iommu->name);
}
/*
* TBD:
* we could share the same root & context tables
* among all IOMMU's. Need to Split it later.
*/
ret = iommu_alloc_root_entry(iommu);
if (ret)
iommu/vt-d: Avoid double free of g_iommus on error recovery path Array 'g_iommus' may be freed twice on error recovery path in function init_dmars() and free_dmar_iommu(), thus cause random system crash as below. [ 6.774301] IOMMU: dmar init failed [ 6.778310] PCI-DMA: Using software bounce buffering for IO (SWIOTLB) [ 6.785615] software IO TLB [mem 0x76bcf000-0x7abcf000] (64MB) mapped at [ffff880076bcf000-ffff88007abcefff] [ 6.796887] general protection fault: 0000 [#1] SMP DEBUG_PAGEALLOC [ 6.804173] Modules linked in: [ 6.807731] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.14.0-rc1+ #108 [ 6.815122] Hardware name: Intel Corporation BRICKLAND/BRICKLAND, BIOS BRIVTIN1.86B.0047.R00.1402050741 02/05/2014 [ 6.836000] task: ffff880455a80000 ti: ffff880455a88000 task.ti: ffff880455a88000 [ 6.844487] RIP: 0010:[<ffffffff8143eea6>] [<ffffffff8143eea6>] memcpy+0x6/0x110 [ 6.853039] RSP: 0000:ffff880455a89cc8 EFLAGS: 00010293 [ 6.859064] RAX: ffff006568636163 RBX: ffff00656863616a RCX: 0000000000000005 [ 6.867134] RDX: 0000000000000005 RSI: ffffffff81cdc439 RDI: ffff006568636163 [ 6.875205] RBP: ffff880455a89d30 R08: 000000000001bc3b R09: 0000000000000000 [ 6.883275] R10: 0000000000000000 R11: ffffffff81cdc43e R12: ffff880455a89da8 [ 6.891338] R13: ffff006568636163 R14: 0000000000000005 R15: ffffffff81cdc439 [ 6.899408] FS: 0000000000000000(0000) GS:ffff88045b800000(0000) knlGS:0000000000000000 [ 6.908575] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 6.915088] CR2: ffff88047e1ff000 CR3: 0000000001e0e000 CR4: 00000000001407f0 [ 6.923160] Stack: [ 6.925487] ffffffff8143c904 ffff88045b407e00 ffff006568636163 ffff006568636163 [ 6.934113] ffffffff8120a1a9 ffffffff81cdc43e 0000000000000007 0000000000000000 [ 6.942747] ffff880455a89da8 ffff006568636163 0000000000000007 ffffffff81cdc439 [ 6.951382] Call Trace: [ 6.954197] [<ffffffff8143c904>] ? vsnprintf+0x124/0x6f0 [ 6.960323] [<ffffffff8120a1a9>] ? __kmalloc_track_caller+0x169/0x360 [ 6.967716] [<ffffffff81440e1b>] kvasprintf+0x6b/0x80 [ 6.973552] [<ffffffff81432bf1>] kobject_set_name_vargs+0x21/0x70 [ 6.980552] [<ffffffff8143393d>] kobject_init_and_add+0x4d/0x90 [ 6.987364] [<ffffffff812067c9>] ? __kmalloc+0x169/0x370 [ 6.993492] [<ffffffff8102dbbc>] ? cache_add_dev+0x17c/0x4f0 [ 7.000005] [<ffffffff8102ddfa>] cache_add_dev+0x3ba/0x4f0 [ 7.006327] [<ffffffff821a87ca>] ? i8237A_init_ops+0x14/0x14 [ 7.012842] [<ffffffff821a87f8>] cache_sysfs_init+0x2e/0x61 [ 7.019260] [<ffffffff81002162>] do_one_initcall+0xf2/0x220 [ 7.025679] [<ffffffff810a4a29>] ? parse_args+0x2c9/0x450 [ 7.031903] [<ffffffff8219d1b1>] kernel_init_freeable+0x1c9/0x25b [ 7.038904] [<ffffffff8219c8d2>] ? do_early_param+0x8a/0x8a [ 7.045322] [<ffffffff8184d5e0>] ? rest_init+0x150/0x150 [ 7.051447] [<ffffffff8184d5ee>] kernel_init+0xe/0x100 [ 7.057380] [<ffffffff8187b87c>] ret_from_fork+0x7c/0xb0 [ 7.063503] [<ffffffff8184d5e0>] ? rest_init+0x150/0x150 [ 7.069628] Code: 89 e5 53 48 89 fb 75 16 80 7f 3c 00 75 05 e8 d2 f9 ff ff 48 8b 43 58 48 2b 43 50 88 43 4e 5b 5d c3 90 90 90 90 48 89 f8 48 89 d1 <f3> a4 c3 03 83 e2 07 f3 48 a5 89 d1 f3 a4 c3 20 4c 8b 06 4c 8b [ 7.094960] RIP [<ffffffff8143eea6>] memcpy+0x6/0x110 [ 7.100856] RSP <ffff880455a89cc8> [ 7.104864] ---[ end trace b5d3fdc6c6c28083 ]--- [ 7.110142] Kernel panic - not syncing: Attempted to kill init! exitcode=0x0000000b [ 7.110142] [ 7.120540] Kernel Offset: 0x0 from 0xffffffff81000000 (relocation range: 0xffffffff80000000-0xffffffff9fffffff) Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 14:07:21 +08:00
goto free_iommu;
if (translation_pre_enabled(iommu)) {
pr_info("Translation already enabled - trying to copy translation structures\n");
ret = copy_translation_tables(iommu);
if (ret) {
/*
* We found the IOMMU with translation
* enabled - but failed to copy over the
* old root-entry table. Try to proceed
* by disabling translation now and
* allocating a clean root-entry table.
* This might cause DMAR faults, but
* probably the dump will still succeed.
*/
pr_err("Failed to copy translation tables from previous kernel for %s\n",
iommu->name);
iommu_disable_translation(iommu);
clear_translation_pre_enabled(iommu);
} else {
pr_info("Copied translation tables from previous kernel for %s\n",
iommu->name);
}
}
intel_svm_check(iommu);
}
/*
* Now that qi is enabled on all iommus, set the root entry and flush
* caches. This is required on some Intel X58 chipsets, otherwise the
* flush_context function will loop forever and the boot hangs.
*/
for_each_active_iommu(iommu, drhd) {
iommu_flush_write_buffer(iommu);
iommu_set_root_entry(iommu);
}
check_tylersburg_isoch();
/*
* for each drhd
* enable fault log
* global invalidate context cache
* global invalidate iotlb
* enable translation
*/
for_each_iommu(iommu, drhd) {
if (drhd->ignored) {
/*
* we always have to disable PMRs or DMA may fail on
* this device
*/
if (force_on)
iommu_disable_protect_mem_regions(iommu);
continue;
}
iommu_flush_write_buffer(iommu);
if (ecap_prs(iommu->ecap)) {
iommu/vt-d: Don't request page request irq under dmar_global_lock Requesting page reqest irq under dmar_global_lock could cause potential lock race condition (caught by lockdep). [ 4.100055] ====================================================== [ 4.100063] WARNING: possible circular locking dependency detected [ 4.100072] 5.1.0-rc4+ #2169 Not tainted [ 4.100078] ------------------------------------------------------ [ 4.100086] swapper/0/1 is trying to acquire lock: [ 4.100094] 000000007dcbe3c3 (dmar_lock){+.+.}, at: dmar_alloc_hwirq+0x35/0x140 [ 4.100112] but task is already holding lock: [ 4.100120] 0000000060bbe946 (dmar_global_lock){++++}, at: intel_iommu_init+0x191/0x1438 [ 4.100136] which lock already depends on the new lock. [ 4.100146] the existing dependency chain (in reverse order) is: [ 4.100155] -> #2 (dmar_global_lock){++++}: [ 4.100169] down_read+0x44/0xa0 [ 4.100178] intel_irq_remapping_alloc+0xb2/0x7b0 [ 4.100186] mp_irqdomain_alloc+0x9e/0x2e0 [ 4.100195] __irq_domain_alloc_irqs+0x131/0x330 [ 4.100203] alloc_isa_irq_from_domain.isra.4+0x9a/0xd0 [ 4.100212] mp_map_pin_to_irq+0x244/0x310 [ 4.100221] setup_IO_APIC+0x757/0x7ed [ 4.100229] x86_late_time_init+0x17/0x1c [ 4.100238] start_kernel+0x425/0x4e3 [ 4.100247] secondary_startup_64+0xa4/0xb0 [ 4.100254] -> #1 (irq_domain_mutex){+.+.}: [ 4.100265] __mutex_lock+0x7f/0x9d0 [ 4.100273] __irq_domain_add+0x195/0x2b0 [ 4.100280] irq_domain_create_hierarchy+0x3d/0x40 [ 4.100289] msi_create_irq_domain+0x32/0x110 [ 4.100297] dmar_alloc_hwirq+0x111/0x140 [ 4.100305] dmar_set_interrupt.part.14+0x1a/0x70 [ 4.100314] enable_drhd_fault_handling+0x2c/0x6c [ 4.100323] apic_bsp_setup+0x75/0x7a [ 4.100330] x86_late_time_init+0x17/0x1c [ 4.100338] start_kernel+0x425/0x4e3 [ 4.100346] secondary_startup_64+0xa4/0xb0 [ 4.100352] -> #0 (dmar_lock){+.+.}: [ 4.100364] lock_acquire+0xb4/0x1c0 [ 4.100372] __mutex_lock+0x7f/0x9d0 [ 4.100379] dmar_alloc_hwirq+0x35/0x140 [ 4.100389] intel_svm_enable_prq+0x61/0x180 [ 4.100397] intel_iommu_init+0x1128/0x1438 [ 4.100406] pci_iommu_init+0x16/0x3f [ 4.100414] do_one_initcall+0x5d/0x2be [ 4.100422] kernel_init_freeable+0x1f0/0x27c [ 4.100431] kernel_init+0xa/0x110 [ 4.100438] ret_from_fork+0x3a/0x50 [ 4.100444] other info that might help us debug this: [ 4.100454] Chain exists of: dmar_lock --> irq_domain_mutex --> dmar_global_lock [ 4.100469] Possible unsafe locking scenario: [ 4.100476] CPU0 CPU1 [ 4.100483] ---- ---- [ 4.100488] lock(dmar_global_lock); [ 4.100495] lock(irq_domain_mutex); [ 4.100503] lock(dmar_global_lock); [ 4.100512] lock(dmar_lock); [ 4.100518] *** DEADLOCK *** Cc: Ashok Raj <ashok.raj@intel.com> Cc: Jacob Pan <jacob.jun.pan@linux.intel.com> Cc: Kevin Tian <kevin.tian@intel.com> Reported-by: Dave Jiang <dave.jiang@intel.com> Fixes: a222a7f0bb6c9 ("iommu/vt-d: Implement page request handling") Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2019-04-19 14:43:29 +08:00
/*
* Call dmar_alloc_hwirq() with dmar_global_lock held,
* could cause possible lock race condition.
*/
up_write(&dmar_global_lock);
ret = intel_iommu_enable_prq(iommu);
iommu/vt-d: Don't request page request irq under dmar_global_lock Requesting page reqest irq under dmar_global_lock could cause potential lock race condition (caught by lockdep). [ 4.100055] ====================================================== [ 4.100063] WARNING: possible circular locking dependency detected [ 4.100072] 5.1.0-rc4+ #2169 Not tainted [ 4.100078] ------------------------------------------------------ [ 4.100086] swapper/0/1 is trying to acquire lock: [ 4.100094] 000000007dcbe3c3 (dmar_lock){+.+.}, at: dmar_alloc_hwirq+0x35/0x140 [ 4.100112] but task is already holding lock: [ 4.100120] 0000000060bbe946 (dmar_global_lock){++++}, at: intel_iommu_init+0x191/0x1438 [ 4.100136] which lock already depends on the new lock. [ 4.100146] the existing dependency chain (in reverse order) is: [ 4.100155] -> #2 (dmar_global_lock){++++}: [ 4.100169] down_read+0x44/0xa0 [ 4.100178] intel_irq_remapping_alloc+0xb2/0x7b0 [ 4.100186] mp_irqdomain_alloc+0x9e/0x2e0 [ 4.100195] __irq_domain_alloc_irqs+0x131/0x330 [ 4.100203] alloc_isa_irq_from_domain.isra.4+0x9a/0xd0 [ 4.100212] mp_map_pin_to_irq+0x244/0x310 [ 4.100221] setup_IO_APIC+0x757/0x7ed [ 4.100229] x86_late_time_init+0x17/0x1c [ 4.100238] start_kernel+0x425/0x4e3 [ 4.100247] secondary_startup_64+0xa4/0xb0 [ 4.100254] -> #1 (irq_domain_mutex){+.+.}: [ 4.100265] __mutex_lock+0x7f/0x9d0 [ 4.100273] __irq_domain_add+0x195/0x2b0 [ 4.100280] irq_domain_create_hierarchy+0x3d/0x40 [ 4.100289] msi_create_irq_domain+0x32/0x110 [ 4.100297] dmar_alloc_hwirq+0x111/0x140 [ 4.100305] dmar_set_interrupt.part.14+0x1a/0x70 [ 4.100314] enable_drhd_fault_handling+0x2c/0x6c [ 4.100323] apic_bsp_setup+0x75/0x7a [ 4.100330] x86_late_time_init+0x17/0x1c [ 4.100338] start_kernel+0x425/0x4e3 [ 4.100346] secondary_startup_64+0xa4/0xb0 [ 4.100352] -> #0 (dmar_lock){+.+.}: [ 4.100364] lock_acquire+0xb4/0x1c0 [ 4.100372] __mutex_lock+0x7f/0x9d0 [ 4.100379] dmar_alloc_hwirq+0x35/0x140 [ 4.100389] intel_svm_enable_prq+0x61/0x180 [ 4.100397] intel_iommu_init+0x1128/0x1438 [ 4.100406] pci_iommu_init+0x16/0x3f [ 4.100414] do_one_initcall+0x5d/0x2be [ 4.100422] kernel_init_freeable+0x1f0/0x27c [ 4.100431] kernel_init+0xa/0x110 [ 4.100438] ret_from_fork+0x3a/0x50 [ 4.100444] other info that might help us debug this: [ 4.100454] Chain exists of: dmar_lock --> irq_domain_mutex --> dmar_global_lock [ 4.100469] Possible unsafe locking scenario: [ 4.100476] CPU0 CPU1 [ 4.100483] ---- ---- [ 4.100488] lock(dmar_global_lock); [ 4.100495] lock(irq_domain_mutex); [ 4.100503] lock(dmar_global_lock); [ 4.100512] lock(dmar_lock); [ 4.100518] *** DEADLOCK *** Cc: Ashok Raj <ashok.raj@intel.com> Cc: Jacob Pan <jacob.jun.pan@linux.intel.com> Cc: Kevin Tian <kevin.tian@intel.com> Reported-by: Dave Jiang <dave.jiang@intel.com> Fixes: a222a7f0bb6c9 ("iommu/vt-d: Implement page request handling") Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2019-04-19 14:43:29 +08:00
down_write(&dmar_global_lock);
if (ret)
goto free_iommu;
}
ret = dmar_set_interrupt(iommu);
if (ret)
iommu/vt-d: Avoid double free of g_iommus on error recovery path Array 'g_iommus' may be freed twice on error recovery path in function init_dmars() and free_dmar_iommu(), thus cause random system crash as below. [ 6.774301] IOMMU: dmar init failed [ 6.778310] PCI-DMA: Using software bounce buffering for IO (SWIOTLB) [ 6.785615] software IO TLB [mem 0x76bcf000-0x7abcf000] (64MB) mapped at [ffff880076bcf000-ffff88007abcefff] [ 6.796887] general protection fault: 0000 [#1] SMP DEBUG_PAGEALLOC [ 6.804173] Modules linked in: [ 6.807731] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.14.0-rc1+ #108 [ 6.815122] Hardware name: Intel Corporation BRICKLAND/BRICKLAND, BIOS BRIVTIN1.86B.0047.R00.1402050741 02/05/2014 [ 6.836000] task: ffff880455a80000 ti: ffff880455a88000 task.ti: ffff880455a88000 [ 6.844487] RIP: 0010:[<ffffffff8143eea6>] [<ffffffff8143eea6>] memcpy+0x6/0x110 [ 6.853039] RSP: 0000:ffff880455a89cc8 EFLAGS: 00010293 [ 6.859064] RAX: ffff006568636163 RBX: ffff00656863616a RCX: 0000000000000005 [ 6.867134] RDX: 0000000000000005 RSI: ffffffff81cdc439 RDI: ffff006568636163 [ 6.875205] RBP: ffff880455a89d30 R08: 000000000001bc3b R09: 0000000000000000 [ 6.883275] R10: 0000000000000000 R11: ffffffff81cdc43e R12: ffff880455a89da8 [ 6.891338] R13: ffff006568636163 R14: 0000000000000005 R15: ffffffff81cdc439 [ 6.899408] FS: 0000000000000000(0000) GS:ffff88045b800000(0000) knlGS:0000000000000000 [ 6.908575] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 6.915088] CR2: ffff88047e1ff000 CR3: 0000000001e0e000 CR4: 00000000001407f0 [ 6.923160] Stack: [ 6.925487] ffffffff8143c904 ffff88045b407e00 ffff006568636163 ffff006568636163 [ 6.934113] ffffffff8120a1a9 ffffffff81cdc43e 0000000000000007 0000000000000000 [ 6.942747] ffff880455a89da8 ffff006568636163 0000000000000007 ffffffff81cdc439 [ 6.951382] Call Trace: [ 6.954197] [<ffffffff8143c904>] ? vsnprintf+0x124/0x6f0 [ 6.960323] [<ffffffff8120a1a9>] ? __kmalloc_track_caller+0x169/0x360 [ 6.967716] [<ffffffff81440e1b>] kvasprintf+0x6b/0x80 [ 6.973552] [<ffffffff81432bf1>] kobject_set_name_vargs+0x21/0x70 [ 6.980552] [<ffffffff8143393d>] kobject_init_and_add+0x4d/0x90 [ 6.987364] [<ffffffff812067c9>] ? __kmalloc+0x169/0x370 [ 6.993492] [<ffffffff8102dbbc>] ? cache_add_dev+0x17c/0x4f0 [ 7.000005] [<ffffffff8102ddfa>] cache_add_dev+0x3ba/0x4f0 [ 7.006327] [<ffffffff821a87ca>] ? i8237A_init_ops+0x14/0x14 [ 7.012842] [<ffffffff821a87f8>] cache_sysfs_init+0x2e/0x61 [ 7.019260] [<ffffffff81002162>] do_one_initcall+0xf2/0x220 [ 7.025679] [<ffffffff810a4a29>] ? parse_args+0x2c9/0x450 [ 7.031903] [<ffffffff8219d1b1>] kernel_init_freeable+0x1c9/0x25b [ 7.038904] [<ffffffff8219c8d2>] ? do_early_param+0x8a/0x8a [ 7.045322] [<ffffffff8184d5e0>] ? rest_init+0x150/0x150 [ 7.051447] [<ffffffff8184d5ee>] kernel_init+0xe/0x100 [ 7.057380] [<ffffffff8187b87c>] ret_from_fork+0x7c/0xb0 [ 7.063503] [<ffffffff8184d5e0>] ? rest_init+0x150/0x150 [ 7.069628] Code: 89 e5 53 48 89 fb 75 16 80 7f 3c 00 75 05 e8 d2 f9 ff ff 48 8b 43 58 48 2b 43 50 88 43 4e 5b 5d c3 90 90 90 90 48 89 f8 48 89 d1 <f3> a4 c3 03 83 e2 07 f3 48 a5 89 d1 f3 a4 c3 20 4c 8b 06 4c 8b [ 7.094960] RIP [<ffffffff8143eea6>] memcpy+0x6/0x110 [ 7.100856] RSP <ffff880455a89cc8> [ 7.104864] ---[ end trace b5d3fdc6c6c28083 ]--- [ 7.110142] Kernel panic - not syncing: Attempted to kill init! exitcode=0x0000000b [ 7.110142] [ 7.120540] Kernel Offset: 0x0 from 0xffffffff81000000 (relocation range: 0xffffffff80000000-0xffffffff9fffffff) Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 14:07:21 +08:00
goto free_iommu;
}
return 0;
iommu/vt-d: Avoid double free of g_iommus on error recovery path Array 'g_iommus' may be freed twice on error recovery path in function init_dmars() and free_dmar_iommu(), thus cause random system crash as below. [ 6.774301] IOMMU: dmar init failed [ 6.778310] PCI-DMA: Using software bounce buffering for IO (SWIOTLB) [ 6.785615] software IO TLB [mem 0x76bcf000-0x7abcf000] (64MB) mapped at [ffff880076bcf000-ffff88007abcefff] [ 6.796887] general protection fault: 0000 [#1] SMP DEBUG_PAGEALLOC [ 6.804173] Modules linked in: [ 6.807731] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.14.0-rc1+ #108 [ 6.815122] Hardware name: Intel Corporation BRICKLAND/BRICKLAND, BIOS BRIVTIN1.86B.0047.R00.1402050741 02/05/2014 [ 6.836000] task: ffff880455a80000 ti: ffff880455a88000 task.ti: ffff880455a88000 [ 6.844487] RIP: 0010:[<ffffffff8143eea6>] [<ffffffff8143eea6>] memcpy+0x6/0x110 [ 6.853039] RSP: 0000:ffff880455a89cc8 EFLAGS: 00010293 [ 6.859064] RAX: ffff006568636163 RBX: ffff00656863616a RCX: 0000000000000005 [ 6.867134] RDX: 0000000000000005 RSI: ffffffff81cdc439 RDI: ffff006568636163 [ 6.875205] RBP: ffff880455a89d30 R08: 000000000001bc3b R09: 0000000000000000 [ 6.883275] R10: 0000000000000000 R11: ffffffff81cdc43e R12: ffff880455a89da8 [ 6.891338] R13: ffff006568636163 R14: 0000000000000005 R15: ffffffff81cdc439 [ 6.899408] FS: 0000000000000000(0000) GS:ffff88045b800000(0000) knlGS:0000000000000000 [ 6.908575] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 6.915088] CR2: ffff88047e1ff000 CR3: 0000000001e0e000 CR4: 00000000001407f0 [ 6.923160] Stack: [ 6.925487] ffffffff8143c904 ffff88045b407e00 ffff006568636163 ffff006568636163 [ 6.934113] ffffffff8120a1a9 ffffffff81cdc43e 0000000000000007 0000000000000000 [ 6.942747] ffff880455a89da8 ffff006568636163 0000000000000007 ffffffff81cdc439 [ 6.951382] Call Trace: [ 6.954197] [<ffffffff8143c904>] ? vsnprintf+0x124/0x6f0 [ 6.960323] [<ffffffff8120a1a9>] ? __kmalloc_track_caller+0x169/0x360 [ 6.967716] [<ffffffff81440e1b>] kvasprintf+0x6b/0x80 [ 6.973552] [<ffffffff81432bf1>] kobject_set_name_vargs+0x21/0x70 [ 6.980552] [<ffffffff8143393d>] kobject_init_and_add+0x4d/0x90 [ 6.987364] [<ffffffff812067c9>] ? __kmalloc+0x169/0x370 [ 6.993492] [<ffffffff8102dbbc>] ? cache_add_dev+0x17c/0x4f0 [ 7.000005] [<ffffffff8102ddfa>] cache_add_dev+0x3ba/0x4f0 [ 7.006327] [<ffffffff821a87ca>] ? i8237A_init_ops+0x14/0x14 [ 7.012842] [<ffffffff821a87f8>] cache_sysfs_init+0x2e/0x61 [ 7.019260] [<ffffffff81002162>] do_one_initcall+0xf2/0x220 [ 7.025679] [<ffffffff810a4a29>] ? parse_args+0x2c9/0x450 [ 7.031903] [<ffffffff8219d1b1>] kernel_init_freeable+0x1c9/0x25b [ 7.038904] [<ffffffff8219c8d2>] ? do_early_param+0x8a/0x8a [ 7.045322] [<ffffffff8184d5e0>] ? rest_init+0x150/0x150 [ 7.051447] [<ffffffff8184d5ee>] kernel_init+0xe/0x100 [ 7.057380] [<ffffffff8187b87c>] ret_from_fork+0x7c/0xb0 [ 7.063503] [<ffffffff8184d5e0>] ? rest_init+0x150/0x150 [ 7.069628] Code: 89 e5 53 48 89 fb 75 16 80 7f 3c 00 75 05 e8 d2 f9 ff ff 48 8b 43 58 48 2b 43 50 88 43 4e 5b 5d c3 90 90 90 90 48 89 f8 48 89 d1 <f3> a4 c3 03 83 e2 07 f3 48 a5 89 d1 f3 a4 c3 20 4c 8b 06 4c 8b [ 7.094960] RIP [<ffffffff8143eea6>] memcpy+0x6/0x110 [ 7.100856] RSP <ffff880455a89cc8> [ 7.104864] ---[ end trace b5d3fdc6c6c28083 ]--- [ 7.110142] Kernel panic - not syncing: Attempted to kill init! exitcode=0x0000000b [ 7.110142] [ 7.120540] Kernel Offset: 0x0 from 0xffffffff81000000 (relocation range: 0xffffffff80000000-0xffffffff9fffffff) Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 14:07:21 +08:00
free_iommu:
for_each_active_iommu(iommu, drhd) {
disable_dmar_iommu(iommu);
iommu/vt-d: keep shared resources when failed to initialize iommu devices Data structure drhd->iommu is shared between DMA remapping driver and interrupt remapping driver, so DMA remapping driver shouldn't release drhd->iommu when it failed to initialize IOMMU devices. Otherwise it may cause invalid memory access to the interrupt remapping driver. Sample stack dump: [ 13.315090] BUG: unable to handle kernel paging request at ffffc9000605a088 [ 13.323221] IP: [<ffffffff81461bac>] qi_submit_sync+0x15c/0x400 [ 13.330107] PGD 82f81e067 PUD c2f81e067 PMD 82e846067 PTE 0 [ 13.336818] Oops: 0002 [#1] SMP [ 13.340757] Modules linked in: [ 13.344422] CPU: 0 PID: 4 Comm: kworker/0:0 Not tainted 3.13.0-rc1-gerry+ #7 [ 13.352474] Hardware name: Intel Corporation LH Pass ........../SVRBD-ROW_T, BIOS SE5C600.86B.99.99.x059.091020121352 09/10/2012 [ 13.365659] Workqueue: events work_for_cpu_fn [ 13.370774] task: ffff88042ddf00d0 ti: ffff88042ddee000 task.ti: ffff88042dde e000 [ 13.379389] RIP: 0010:[<ffffffff81461bac>] [<ffffffff81461bac>] qi_submit_sy nc+0x15c/0x400 [ 13.389055] RSP: 0000:ffff88042ddef940 EFLAGS: 00010002 [ 13.395151] RAX: 00000000000005e0 RBX: 0000000000000082 RCX: 0000000200000025 [ 13.403308] RDX: ffffc9000605a000 RSI: 0000000000000010 RDI: ffff88042ddb8610 [ 13.411446] RBP: ffff88042ddef9a0 R08: 00000000000005d0 R09: 0000000000000001 [ 13.419599] R10: 0000000000000000 R11: 000000000000005d R12: 000000000000005c [ 13.427742] R13: ffff88102d84d300 R14: 0000000000000174 R15: ffff88042ddb4800 [ 13.435877] FS: 0000000000000000(0000) GS:ffff88043de00000(0000) knlGS:00000 00000000000 [ 13.445168] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 13.451749] CR2: ffffc9000605a088 CR3: 0000000001a0b000 CR4: 00000000000407f0 [ 13.459895] Stack: [ 13.462297] ffff88042ddb85d0 000000000000005d ffff88042ddef9b0 0000000000000 5d0 [ 13.471147] 00000000000005c0 ffff88042ddb8000 000000000000005c 0000000000000 015 [ 13.480001] ffff88042ddb4800 0000000000000282 ffff88042ddefa40 ffff88042ddef ac0 [ 13.488855] Call Trace: [ 13.491771] [<ffffffff8146848d>] modify_irte+0x9d/0xd0 [ 13.497778] [<ffffffff8146886d>] intel_setup_ioapic_entry+0x10d/0x290 [ 13.505250] [<ffffffff810a92a6>] ? trace_hardirqs_on_caller+0x16/0x1e0 [ 13.512824] [<ffffffff810346b0>] ? default_init_apic_ldr+0x60/0x60 [ 13.519998] [<ffffffff81468be0>] setup_ioapic_remapped_entry+0x20/0x30 [ 13.527566] [<ffffffff8103683a>] io_apic_setup_irq_pin+0x12a/0x2c0 [ 13.534742] [<ffffffff8136673b>] ? acpi_pci_irq_find_prt_entry+0x2b9/0x2d8 [ 13.544102] [<ffffffff81037fd5>] io_apic_setup_irq_pin_once+0x85/0xa0 [ 13.551568] [<ffffffff8103816f>] ? mp_find_ioapic_pin+0x8f/0xf0 [ 13.558434] [<ffffffff81038044>] io_apic_set_pci_routing+0x34/0x70 [ 13.565621] [<ffffffff8102f4cf>] mp_register_gsi+0xaf/0x1c0 [ 13.572111] [<ffffffff8102f5ee>] acpi_register_gsi_ioapic+0xe/0x10 [ 13.579286] [<ffffffff8102f33f>] acpi_register_gsi+0xf/0x20 [ 13.585779] [<ffffffff81366b86>] acpi_pci_irq_enable+0x171/0x1e3 [ 13.592764] [<ffffffff8146d771>] pcibios_enable_device+0x31/0x40 [ 13.599744] [<ffffffff81320e9b>] do_pci_enable_device+0x3b/0x60 [ 13.606633] [<ffffffff81322248>] pci_enable_device_flags+0xc8/0x120 [ 13.613887] [<ffffffff813222f3>] pci_enable_device+0x13/0x20 [ 13.620484] [<ffffffff8132fa7e>] pcie_port_device_register+0x1e/0x510 [ 13.627947] [<ffffffff810a92a6>] ? trace_hardirqs_on_caller+0x16/0x1e0 [ 13.635510] [<ffffffff810a947d>] ? trace_hardirqs_on+0xd/0x10 [ 13.642189] [<ffffffff813302b8>] pcie_portdrv_probe+0x58/0xc0 [ 13.648877] [<ffffffff81323ba5>] local_pci_probe+0x45/0xa0 [ 13.655266] [<ffffffff8106bc44>] work_for_cpu_fn+0x14/0x20 [ 13.661656] [<ffffffff8106fa79>] process_one_work+0x369/0x710 [ 13.668334] [<ffffffff8106fa02>] ? process_one_work+0x2f2/0x710 [ 13.675215] [<ffffffff81071d56>] ? worker_thread+0x46/0x690 [ 13.681714] [<ffffffff81072194>] worker_thread+0x484/0x690 [ 13.688109] [<ffffffff81071d10>] ? cancel_delayed_work_sync+0x20/0x20 [ 13.695576] [<ffffffff81079c60>] kthread+0xf0/0x110 [ 13.701300] [<ffffffff8108e7bf>] ? local_clock+0x3f/0x50 [ 13.707492] [<ffffffff81079b70>] ? kthread_create_on_node+0x250/0x250 [ 13.714959] [<ffffffff81574d2c>] ret_from_fork+0x7c/0xb0 [ 13.721152] [<ffffffff81079b70>] ? kthread_create_on_node+0x250/0x250 Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-01-06 14:18:20 +08:00
free_dmar_iommu(iommu);
}
return ret;
}
static void __init init_no_remapping_devices(void)
{
struct dmar_drhd_unit *drhd;
struct device *dev;
int i;
for_each_drhd_unit(drhd) {
if (!drhd->include_all) {
for_each_active_dev_scope(drhd->devices,
drhd->devices_cnt, i, dev)
break;
/* ignore DMAR unit if no devices exist */
if (i == drhd->devices_cnt)
drhd->ignored = 1;
}
}
for_each_active_drhd_unit(drhd) {
if (drhd->include_all)
continue;
for_each_active_dev_scope(drhd->devices,
drhd->devices_cnt, i, dev)
if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
break;
if (i < drhd->devices_cnt)
continue;
/* This IOMMU has *only* gfx devices. Either bypass it or
set the gfx_mapped flag, as appropriate */
drhd->gfx_dedicated = 1;
iommu/vt-d: Decouple igfx_off from graphic identity mapping A kernel command called igfx_off was introduced in commit <ba39592764ed> ("Intel IOMMU: Intel IOMMU driver"). This command allows the user to disable the IOMMU dedicated to SOC-integrated graphic devices. Commit <9452618e7462> ("iommu/intel: disable DMAR for g4x integrated gfx") used this mechanism to disable the graphic-dedicated IOMMU for some problematic devices. Later, more problematic graphic devices were added to the list by commit <1f76249cc3beb> ("iommu/vt-d: Declare Broadwell igfx dmar support snafu"). On the other hand, commit <19943b0e30b05> ("intel-iommu: Unify hardware and software passthrough support") uses the identity domain for graphic devices if CONFIG_DMAR_BROKEN_GFX_WA is selected. + if (iommu_pass_through) + iommu_identity_mapping = 1; +#ifdef CONFIG_DMAR_BROKEN_GFX_WA + else + iommu_identity_mapping = 2; +#endif ... static int iommu_should_identity_map(struct pci_dev *pdev, int startup) { + if (iommu_identity_mapping == 2) + return IS_GFX_DEVICE(pdev); ... In the following driver evolution, CONFIG_DMAR_BROKEN_GFX_WA and quirk_iommu_igfx() are mixed together, causing confusion in the driver's device_def_domain_type callback. On one hand, dmar_map_gfx is used to turn off the graphic-dedicated IOMMU as a workaround for some buggy hardware; on the other hand, for those graphic devices, IDENTITY mapping is required for the IOMMU core. Commit <4b8d18c0c986> "iommu/vt-d: Remove INTEL_IOMMU_BROKEN_GFX_WA" has removed the CONFIG_DMAR_BROKEN_GFX_WA option, so the IDENTITY_DOMAIN requirement for graphic devices is no longer needed. Therefore, this requirement can be removed from device_def_domain_type() and igfx_off can be made independent. Fixes: 4b8d18c0c986 ("iommu/vt-d: Remove INTEL_IOMMU_BROKEN_GFX_WA") Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/20240428032020.214616-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2024-05-03 21:36:02 +08:00
if (disable_igfx_iommu)
drhd->ignored = 1;
}
}
#ifdef CONFIG_SUSPEND
static int init_iommu_hw(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu = NULL;
int ret;
for_each_active_iommu(iommu, drhd) {
if (iommu->qi) {
ret = dmar_reenable_qi(iommu);
if (ret)
return ret;
}
}
for_each_iommu(iommu, drhd) {
if (drhd->ignored) {
/*
* we always have to disable PMRs or DMA may fail on
* this device
*/
if (force_on)
iommu_disable_protect_mem_regions(iommu);
continue;
}
iommu_flush_write_buffer(iommu);
iommu_set_root_entry(iommu);
iommu_enable_translation(iommu);
iommu_disable_protect_mem_regions(iommu);
}
return 0;
}
static void iommu_flush_all(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
for_each_active_iommu(iommu, drhd) {
iommu->flush.flush_context(iommu, 0, 0, 0,
DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH);
}
}
static int iommu_suspend(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu = NULL;
unsigned long flag;
iommu_flush_all();
for_each_active_iommu(iommu, drhd) {
iommu_disable_translation(iommu);
raw_spin_lock_irqsave(&iommu->register_lock, flag);
iommu->iommu_state[SR_DMAR_FECTL_REG] =
readl(iommu->reg + DMAR_FECTL_REG);
iommu->iommu_state[SR_DMAR_FEDATA_REG] =
readl(iommu->reg + DMAR_FEDATA_REG);
iommu->iommu_state[SR_DMAR_FEADDR_REG] =
readl(iommu->reg + DMAR_FEADDR_REG);
iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
readl(iommu->reg + DMAR_FEUADDR_REG);
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
return 0;
}
static void iommu_resume(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu = NULL;
unsigned long flag;
if (init_iommu_hw()) {
if (force_on)
panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
else
WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
return;
}
for_each_active_iommu(iommu, drhd) {
raw_spin_lock_irqsave(&iommu->register_lock, flag);
writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
iommu->reg + DMAR_FECTL_REG);
writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
iommu->reg + DMAR_FEDATA_REG);
writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
iommu->reg + DMAR_FEADDR_REG);
writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
iommu->reg + DMAR_FEUADDR_REG);
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
}
static struct syscore_ops iommu_syscore_ops = {
.resume = iommu_resume,
.suspend = iommu_suspend,
};
static void __init init_iommu_pm_ops(void)
{
register_syscore_ops(&iommu_syscore_ops);
}
#else
static inline void init_iommu_pm_ops(void) {}
#endif /* CONFIG_PM */
static int __init rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
{
if (!IS_ALIGNED(rmrr->base_address, PAGE_SIZE) ||
!IS_ALIGNED(rmrr->end_address + 1, PAGE_SIZE) ||
rmrr->end_address <= rmrr->base_address ||
arch_rmrr_sanity_check(rmrr))
return -EINVAL;
return 0;
}
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_reserved_memory *rmrr;
struct dmar_rmrr_unit *rmrru;
rmrr = (struct acpi_dmar_reserved_memory *)header;
iommu/vt-d: dmar_parse_one_rmrr: replace WARN_TAINT with pr_warn + add_taint Quoting from the comment describing the WARN functions in include/asm-generic/bug.h: * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report * significant kernel issues that need prompt attention if they should ever * appear at runtime. * * Do not use these macros when checking for invalid external inputs The (buggy) firmware tables which the dmar code was calling WARN_TAINT for really are invalid external inputs. They are not under the kernel's control and the issues in them cannot be fixed by a kernel update. So logging a backtrace, which invites bug reports to be filed about this, is not helpful. Some distros, e.g. Fedora, have tools watching for the kernel backtraces logged by the WARN macros and offer the user an option to file a bug for this when these are encountered. The WARN_TAINT in dmar_parse_one_rmrr + another iommu WARN_TAINT, addressed in another patch, have lead to over a 100 bugs being filed this way. This commit replaces the WARN_TAINT("...") call, with a pr_warn(FW_BUG "...") + add_taint(TAINT_FIRMWARE_WORKAROUND, ...) call avoiding the backtrace and thus also avoiding bug-reports being filed about this against the kernel. Fixes: f5a68bb0752e ("iommu/vt-d: Mark firmware tainted if RMRR fails sanity check") Signed-off-by: Hans de Goede <hdegoede@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de> Acked-by: Lu Baolu <baolu.lu@linux.intel.com> Cc: stable@vger.kernel.org Cc: Barret Rhoden <brho@google.com> Link: https://lore.kernel.org/r/20200309140138.3753-3-hdegoede@redhat.com BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1808874
2020-03-09 15:01:38 +01:00
if (rmrr_sanity_check(rmrr)) {
pr_warn(FW_BUG
"Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
rmrr->base_address, rmrr->end_address,
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
iommu/vt-d: dmar_parse_one_rmrr: replace WARN_TAINT with pr_warn + add_taint Quoting from the comment describing the WARN functions in include/asm-generic/bug.h: * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report * significant kernel issues that need prompt attention if they should ever * appear at runtime. * * Do not use these macros when checking for invalid external inputs The (buggy) firmware tables which the dmar code was calling WARN_TAINT for really are invalid external inputs. They are not under the kernel's control and the issues in them cannot be fixed by a kernel update. So logging a backtrace, which invites bug reports to be filed about this, is not helpful. Some distros, e.g. Fedora, have tools watching for the kernel backtraces logged by the WARN macros and offer the user an option to file a bug for this when these are encountered. The WARN_TAINT in dmar_parse_one_rmrr + another iommu WARN_TAINT, addressed in another patch, have lead to over a 100 bugs being filed this way. This commit replaces the WARN_TAINT("...") call, with a pr_warn(FW_BUG "...") + add_taint(TAINT_FIRMWARE_WORKAROUND, ...) call avoiding the backtrace and thus also avoiding bug-reports being filed about this against the kernel. Fixes: f5a68bb0752e ("iommu/vt-d: Mark firmware tainted if RMRR fails sanity check") Signed-off-by: Hans de Goede <hdegoede@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de> Acked-by: Lu Baolu <baolu.lu@linux.intel.com> Cc: stable@vger.kernel.org Cc: Barret Rhoden <brho@google.com> Link: https://lore.kernel.org/r/20200309140138.3753-3-hdegoede@redhat.com BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1808874
2020-03-09 15:01:38 +01:00
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
}
rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
if (!rmrru)
goto out;
rmrru->hdr = header;
rmrru->base_address = rmrr->base_address;
rmrru->end_address = rmrr->end_address;
rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
((void *)rmrr) + rmrr->header.length,
&rmrru->devices_cnt);
if (rmrru->devices_cnt && rmrru->devices == NULL)
goto free_rmrru;
list_add(&rmrru->list, &dmar_rmrr_units);
return 0;
free_rmrru:
kfree(rmrru);
out:
return -ENOMEM;
}
static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
{
struct dmar_atsr_unit *atsru;
struct acpi_dmar_atsr *tmp;
list_for_each_entry_rcu(atsru, &dmar_atsr_units, list,
dmar_rcu_check()) {
tmp = (struct acpi_dmar_atsr *)atsru->hdr;
if (atsr->segment != tmp->segment)
continue;
if (atsr->header.length != tmp->header.length)
continue;
if (memcmp(atsr, tmp, atsr->header.length) == 0)
return atsru;
}
return NULL;
}
int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{
struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
return 0;
atsr = container_of(hdr, struct acpi_dmar_atsr, header);
atsru = dmar_find_atsr(atsr);
if (atsru)
return 0;
atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
if (!atsru)
return -ENOMEM;
/*
* If memory is allocated from slab by ACPI _DSM method, we need to
* copy the memory content because the memory buffer will be freed
* on return.
*/
atsru->hdr = (void *)(atsru + 1);
memcpy(atsru->hdr, hdr, hdr->length);
atsru->include_all = atsr->flags & 0x1;
if (!atsru->include_all) {
atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
(void *)atsr + atsr->header.length,
&atsru->devices_cnt);
if (atsru->devices_cnt && atsru->devices == NULL) {
kfree(atsru);
return -ENOMEM;
}
}
list_add_rcu(&atsru->list, &dmar_atsr_units);
return 0;
}
static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
{
dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
kfree(atsru);
}
int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{
struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
atsr = container_of(hdr, struct acpi_dmar_atsr, header);
atsru = dmar_find_atsr(atsr);
if (atsru) {
list_del_rcu(&atsru->list);
synchronize_rcu();
intel_iommu_free_atsr(atsru);
}
return 0;
}
int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
{
int i;
struct device *dev;
struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
atsr = container_of(hdr, struct acpi_dmar_atsr, header);
atsru = dmar_find_atsr(atsr);
if (!atsru)
return 0;
if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
i, dev)
return -EBUSY;
}
return 0;
}
static struct dmar_satc_unit *dmar_find_satc(struct acpi_dmar_satc *satc)
{
struct dmar_satc_unit *satcu;
struct acpi_dmar_satc *tmp;
list_for_each_entry_rcu(satcu, &dmar_satc_units, list,
dmar_rcu_check()) {
tmp = (struct acpi_dmar_satc *)satcu->hdr;
if (satc->segment != tmp->segment)
continue;
if (satc->header.length != tmp->header.length)
continue;
if (memcmp(satc, tmp, satc->header.length) == 0)
return satcu;
}
return NULL;
}
int dmar_parse_one_satc(struct acpi_dmar_header *hdr, void *arg)
{
struct acpi_dmar_satc *satc;
struct dmar_satc_unit *satcu;
if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
return 0;
satc = container_of(hdr, struct acpi_dmar_satc, header);
satcu = dmar_find_satc(satc);
if (satcu)
return 0;
satcu = kzalloc(sizeof(*satcu) + hdr->length, GFP_KERNEL);
if (!satcu)
return -ENOMEM;
satcu->hdr = (void *)(satcu + 1);
memcpy(satcu->hdr, hdr, hdr->length);
satcu->atc_required = satc->flags & 0x1;
satcu->devices = dmar_alloc_dev_scope((void *)(satc + 1),
(void *)satc + satc->header.length,
&satcu->devices_cnt);
if (satcu->devices_cnt && !satcu->devices) {
kfree(satcu);
return -ENOMEM;
}
list_add_rcu(&satcu->list, &dmar_satc_units);
return 0;
}
static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
{
struct intel_iommu *iommu = dmaru->iommu;
int ret;
/*
* Disable translation if already enabled prior to OS handover.
*/
if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu);
ret = iommu_alloc_root_entry(iommu);
if (ret)
goto out;
intel_svm_check(iommu);
if (dmaru->ignored) {
/*
* we always have to disable PMRs or DMA may fail on this device
*/
if (force_on)
iommu_disable_protect_mem_regions(iommu);
return 0;
}
intel_iommu_init_qi(iommu);
iommu_flush_write_buffer(iommu);
if (ecap_prs(iommu->ecap)) {
ret = intel_iommu_enable_prq(iommu);
if (ret)
goto disable_iommu;
}
ret = dmar_set_interrupt(iommu);
if (ret)
goto disable_iommu;
iommu_set_root_entry(iommu);
iommu_enable_translation(iommu);
iommu_disable_protect_mem_regions(iommu);
return 0;
disable_iommu:
disable_dmar_iommu(iommu);
out:
free_dmar_iommu(iommu);
return ret;
}
int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{
int ret = 0;
struct intel_iommu *iommu = dmaru->iommu;
if (!intel_iommu_enabled)
return 0;
if (iommu == NULL)
return -EINVAL;
if (insert) {
ret = intel_iommu_add(dmaru);
} else {
disable_dmar_iommu(iommu);
free_dmar_iommu(iommu);
}
return ret;
}
static void intel_iommu_free_dmars(void)
{
struct dmar_rmrr_unit *rmrru, *rmrr_n;
struct dmar_atsr_unit *atsru, *atsr_n;
struct dmar_satc_unit *satcu, *satc_n;
list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
list_del(&rmrru->list);
dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
kfree(rmrru);
}
list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
list_del(&atsru->list);
intel_iommu_free_atsr(atsru);
}
list_for_each_entry_safe(satcu, satc_n, &dmar_satc_units, list) {
list_del(&satcu->list);
dmar_free_dev_scope(&satcu->devices, &satcu->devices_cnt);
kfree(satcu);
}
}
static struct dmar_satc_unit *dmar_find_matched_satc_unit(struct pci_dev *dev)
{
struct dmar_satc_unit *satcu;
struct acpi_dmar_satc *satc;
struct device *tmp;
int i;
rcu_read_lock();
list_for_each_entry_rcu(satcu, &dmar_satc_units, list) {
satc = container_of(satcu->hdr, struct acpi_dmar_satc, header);
if (satc->segment != pci_domain_nr(dev->bus))
continue;
for_each_dev_scope(satcu->devices, satcu->devices_cnt, i, tmp)
if (to_pci_dev(tmp) == dev)
goto out;
}
satcu = NULL;
out:
rcu_read_unlock();
return satcu;
}
static bool dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
{
struct pci_dev *bridge = NULL;
struct dmar_atsr_unit *atsru;
struct dmar_satc_unit *satcu;
struct acpi_dmar_atsr *atsr;
bool supported = true;
struct pci_bus *bus;
struct device *tmp;
int i;
dev = pci_physfn(dev);
satcu = dmar_find_matched_satc_unit(dev);
if (satcu)
/*
* This device supports ATS as it is in SATC table.
* When IOMMU is in legacy mode, enabling ATS is done
* automatically by HW for the device that requires
* ATS, hence OS should not enable this device ATS
* to avoid duplicated TLB invalidation.
*/
return !(satcu->atc_required && !sm_supported(iommu));
for (bus = dev->bus; bus; bus = bus->parent) {
iommu/vt-d: Fix error in detect ATS capability Current Intel IOMMU driver only matches a PCIe root port with the first DRHD unit with the samge segment number. It will report false result if there are multiple DRHD units with the same segment number, thus fail to detect ATS capability for some PCIe devices. This patch refines function dmar_find_matched_atsr_unit() to search all DRHD units with the same segment number. An example DMAR table entries as below: [1D0h 0464 2] Subtable Type : 0002 <Root Port ATS Capability> [1D2h 0466 2] Length : 0028 [1D4h 0468 1] Flags : 00 [1D5h 0469 1] Reserved : 00 [1D6h 0470 2] PCI Segment Number : 0000 [1D8h 0472 1] Device Scope Entry Type : 02 [1D9h 0473 1] Entry Length : 08 [1DAh 0474 2] Reserved : 0000 [1DCh 0476 1] Enumeration ID : 00 [1DDh 0477 1] PCI Bus Number : 00 [1DEh 0478 2] PCI Path : [02, 00] [1E0h 0480 1] Device Scope Entry Type : 02 [1E1h 0481 1] Entry Length : 08 [1E2h 0482 2] Reserved : 0000 [1E4h 0484 1] Enumeration ID : 00 [1E5h 0485 1] PCI Bus Number : 00 [1E6h 0486 2] PCI Path : [03, 00] [1E8h 0488 1] Device Scope Entry Type : 02 [1E9h 0489 1] Entry Length : 08 [1EAh 0490 2] Reserved : 0000 [1ECh 0492 1] Enumeration ID : 00 [1EDh 0493 1] PCI Bus Number : 00 [1EEh 0494 2] PCI Path : [03, 02] [1F0h 0496 1] Device Scope Entry Type : 02 [1F1h 0497 1] Entry Length : 08 [1F2h 0498 2] Reserved : 0000 [1F4h 0500 1] Enumeration ID : 00 [1F5h 0501 1] PCI Bus Number : 00 [1F6h 0502 2] PCI Path : [03, 03] [1F8h 0504 2] Subtable Type : 0002 <Root Port ATS Capability> [1FAh 0506 2] Length : 0020 [1FCh 0508 1] Flags : 00 [1FDh 0509 1] Reserved : 00 [1FEh 0510 2] PCI Segment Number : 0000 [200h 0512 1] Device Scope Entry Type : 02 [201h 0513 1] Entry Length : 08 [202h 0514 2] Reserved : 0000 [204h 0516 1] Enumeration ID : 00 [205h 0517 1] PCI Bus Number : 40 [206h 0518 2] PCI Path : [02, 00] [208h 0520 1] Device Scope Entry Type : 02 [209h 0521 1] Entry Length : 08 [20Ah 0522 2] Reserved : 0000 [20Ch 0524 1] Enumeration ID : 00 [20Dh 0525 1] PCI Bus Number : 40 [20Eh 0526 2] PCI Path : [02, 02] [210h 0528 1] Device Scope Entry Type : 02 [211h 0529 1] Entry Length : 08 [212h 0530 2] Reserved : 0000 [214h 0532 1] Enumeration ID : 00 [215h 0533 1] PCI Bus Number : 40 [216h 0534 2] PCI Path : [03, 00] [218h 0536 2] Subtable Type : 0002 <Root Port ATS Capability> [21Ah 0538 2] Length : 0020 [21Ch 0540 1] Flags : 00 [21Dh 0541 1] Reserved : 00 [21Eh 0542 2] PCI Segment Number : 0000 [220h 0544 1] Device Scope Entry Type : 02 [221h 0545 1] Entry Length : 08 [222h 0546 2] Reserved : 0000 [224h 0548 1] Enumeration ID : 00 [225h 0549 1] PCI Bus Number : 80 [226h 0550 2] PCI Path : [02, 00] [228h 0552 1] Device Scope Entry Type : 02 [229h 0553 1] Entry Length : 08 [22Ah 0554 2] Reserved : 0000 [22Ch 0556 1] Enumeration ID : 00 [22Dh 0557 1] PCI Bus Number : 80 [22Eh 0558 2] PCI Path : [02, 02] [230h 0560 1] Device Scope Entry Type : 02 [231h 0561 1] Entry Length : 08 [232h 0562 2] Reserved : 0000 [234h 0564 1] Enumeration ID : 00 [235h 0565 1] PCI Bus Number : 80 [236h 0566 2] PCI Path : [03, 00] [238h 0568 2] Subtable Type : 0002 <Root Port ATS Capability> [23Ah 0570 2] Length : 0020 [23Ch 0572 1] Flags : 00 [23Dh 0573 1] Reserved : 00 [23Eh 0574 2] PCI Segment Number : 0000 [240h 0576 1] Device Scope Entry Type : 02 [241h 0577 1] Entry Length : 08 [242h 0578 2] Reserved : 0000 [244h 0580 1] Enumeration ID : 00 [245h 0581 1] PCI Bus Number : C0 [246h 0582 2] PCI Path : [02, 00] [248h 0584 1] Device Scope Entry Type : 02 [249h 0585 1] Entry Length : 08 [24Ah 0586 2] Reserved : 0000 [24Ch 0588 1] Enumeration ID : 00 [24Dh 0589 1] PCI Bus Number : C0 [24Eh 0590 2] PCI Path : [02, 02] [250h 0592 1] Device Scope Entry Type : 02 [251h 0593 1] Entry Length : 08 [252h 0594 2] Reserved : 0000 [254h 0596 1] Enumeration ID : 00 [255h 0597 1] PCI Bus Number : C0 [256h 0598 2] PCI Path : [03, 00] Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 14:07:31 +08:00
bridge = bus->self;
/* If it's an integrated device, allow ATS */
if (!bridge)
return true;
/* Connected via non-PCIe: no ATS */
if (!pci_is_pcie(bridge) ||
pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
return false;
/* If we found the root port, look it up in the ATSR */
iommu/vt-d: Fix error in detect ATS capability Current Intel IOMMU driver only matches a PCIe root port with the first DRHD unit with the samge segment number. It will report false result if there are multiple DRHD units with the same segment number, thus fail to detect ATS capability for some PCIe devices. This patch refines function dmar_find_matched_atsr_unit() to search all DRHD units with the same segment number. An example DMAR table entries as below: [1D0h 0464 2] Subtable Type : 0002 <Root Port ATS Capability> [1D2h 0466 2] Length : 0028 [1D4h 0468 1] Flags : 00 [1D5h 0469 1] Reserved : 00 [1D6h 0470 2] PCI Segment Number : 0000 [1D8h 0472 1] Device Scope Entry Type : 02 [1D9h 0473 1] Entry Length : 08 [1DAh 0474 2] Reserved : 0000 [1DCh 0476 1] Enumeration ID : 00 [1DDh 0477 1] PCI Bus Number : 00 [1DEh 0478 2] PCI Path : [02, 00] [1E0h 0480 1] Device Scope Entry Type : 02 [1E1h 0481 1] Entry Length : 08 [1E2h 0482 2] Reserved : 0000 [1E4h 0484 1] Enumeration ID : 00 [1E5h 0485 1] PCI Bus Number : 00 [1E6h 0486 2] PCI Path : [03, 00] [1E8h 0488 1] Device Scope Entry Type : 02 [1E9h 0489 1] Entry Length : 08 [1EAh 0490 2] Reserved : 0000 [1ECh 0492 1] Enumeration ID : 00 [1EDh 0493 1] PCI Bus Number : 00 [1EEh 0494 2] PCI Path : [03, 02] [1F0h 0496 1] Device Scope Entry Type : 02 [1F1h 0497 1] Entry Length : 08 [1F2h 0498 2] Reserved : 0000 [1F4h 0500 1] Enumeration ID : 00 [1F5h 0501 1] PCI Bus Number : 00 [1F6h 0502 2] PCI Path : [03, 03] [1F8h 0504 2] Subtable Type : 0002 <Root Port ATS Capability> [1FAh 0506 2] Length : 0020 [1FCh 0508 1] Flags : 00 [1FDh 0509 1] Reserved : 00 [1FEh 0510 2] PCI Segment Number : 0000 [200h 0512 1] Device Scope Entry Type : 02 [201h 0513 1] Entry Length : 08 [202h 0514 2] Reserved : 0000 [204h 0516 1] Enumeration ID : 00 [205h 0517 1] PCI Bus Number : 40 [206h 0518 2] PCI Path : [02, 00] [208h 0520 1] Device Scope Entry Type : 02 [209h 0521 1] Entry Length : 08 [20Ah 0522 2] Reserved : 0000 [20Ch 0524 1] Enumeration ID : 00 [20Dh 0525 1] PCI Bus Number : 40 [20Eh 0526 2] PCI Path : [02, 02] [210h 0528 1] Device Scope Entry Type : 02 [211h 0529 1] Entry Length : 08 [212h 0530 2] Reserved : 0000 [214h 0532 1] Enumeration ID : 00 [215h 0533 1] PCI Bus Number : 40 [216h 0534 2] PCI Path : [03, 00] [218h 0536 2] Subtable Type : 0002 <Root Port ATS Capability> [21Ah 0538 2] Length : 0020 [21Ch 0540 1] Flags : 00 [21Dh 0541 1] Reserved : 00 [21Eh 0542 2] PCI Segment Number : 0000 [220h 0544 1] Device Scope Entry Type : 02 [221h 0545 1] Entry Length : 08 [222h 0546 2] Reserved : 0000 [224h 0548 1] Enumeration ID : 00 [225h 0549 1] PCI Bus Number : 80 [226h 0550 2] PCI Path : [02, 00] [228h 0552 1] Device Scope Entry Type : 02 [229h 0553 1] Entry Length : 08 [22Ah 0554 2] Reserved : 0000 [22Ch 0556 1] Enumeration ID : 00 [22Dh 0557 1] PCI Bus Number : 80 [22Eh 0558 2] PCI Path : [02, 02] [230h 0560 1] Device Scope Entry Type : 02 [231h 0561 1] Entry Length : 08 [232h 0562 2] Reserved : 0000 [234h 0564 1] Enumeration ID : 00 [235h 0565 1] PCI Bus Number : 80 [236h 0566 2] PCI Path : [03, 00] [238h 0568 2] Subtable Type : 0002 <Root Port ATS Capability> [23Ah 0570 2] Length : 0020 [23Ch 0572 1] Flags : 00 [23Dh 0573 1] Reserved : 00 [23Eh 0574 2] PCI Segment Number : 0000 [240h 0576 1] Device Scope Entry Type : 02 [241h 0577 1] Entry Length : 08 [242h 0578 2] Reserved : 0000 [244h 0580 1] Enumeration ID : 00 [245h 0581 1] PCI Bus Number : C0 [246h 0582 2] PCI Path : [02, 00] [248h 0584 1] Device Scope Entry Type : 02 [249h 0585 1] Entry Length : 08 [24Ah 0586 2] Reserved : 0000 [24Ch 0588 1] Enumeration ID : 00 [24Dh 0589 1] PCI Bus Number : C0 [24Eh 0590 2] PCI Path : [02, 02] [250h 0592 1] Device Scope Entry Type : 02 [251h 0593 1] Entry Length : 08 [252h 0594 2] Reserved : 0000 [254h 0596 1] Enumeration ID : 00 [255h 0597 1] PCI Bus Number : C0 [256h 0598 2] PCI Path : [03, 00] Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 14:07:31 +08:00
if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
break;
}
rcu_read_lock();
iommu/vt-d: Fix error in detect ATS capability Current Intel IOMMU driver only matches a PCIe root port with the first DRHD unit with the samge segment number. It will report false result if there are multiple DRHD units with the same segment number, thus fail to detect ATS capability for some PCIe devices. This patch refines function dmar_find_matched_atsr_unit() to search all DRHD units with the same segment number. An example DMAR table entries as below: [1D0h 0464 2] Subtable Type : 0002 <Root Port ATS Capability> [1D2h 0466 2] Length : 0028 [1D4h 0468 1] Flags : 00 [1D5h 0469 1] Reserved : 00 [1D6h 0470 2] PCI Segment Number : 0000 [1D8h 0472 1] Device Scope Entry Type : 02 [1D9h 0473 1] Entry Length : 08 [1DAh 0474 2] Reserved : 0000 [1DCh 0476 1] Enumeration ID : 00 [1DDh 0477 1] PCI Bus Number : 00 [1DEh 0478 2] PCI Path : [02, 00] [1E0h 0480 1] Device Scope Entry Type : 02 [1E1h 0481 1] Entry Length : 08 [1E2h 0482 2] Reserved : 0000 [1E4h 0484 1] Enumeration ID : 00 [1E5h 0485 1] PCI Bus Number : 00 [1E6h 0486 2] PCI Path : [03, 00] [1E8h 0488 1] Device Scope Entry Type : 02 [1E9h 0489 1] Entry Length : 08 [1EAh 0490 2] Reserved : 0000 [1ECh 0492 1] Enumeration ID : 00 [1EDh 0493 1] PCI Bus Number : 00 [1EEh 0494 2] PCI Path : [03, 02] [1F0h 0496 1] Device Scope Entry Type : 02 [1F1h 0497 1] Entry Length : 08 [1F2h 0498 2] Reserved : 0000 [1F4h 0500 1] Enumeration ID : 00 [1F5h 0501 1] PCI Bus Number : 00 [1F6h 0502 2] PCI Path : [03, 03] [1F8h 0504 2] Subtable Type : 0002 <Root Port ATS Capability> [1FAh 0506 2] Length : 0020 [1FCh 0508 1] Flags : 00 [1FDh 0509 1] Reserved : 00 [1FEh 0510 2] PCI Segment Number : 0000 [200h 0512 1] Device Scope Entry Type : 02 [201h 0513 1] Entry Length : 08 [202h 0514 2] Reserved : 0000 [204h 0516 1] Enumeration ID : 00 [205h 0517 1] PCI Bus Number : 40 [206h 0518 2] PCI Path : [02, 00] [208h 0520 1] Device Scope Entry Type : 02 [209h 0521 1] Entry Length : 08 [20Ah 0522 2] Reserved : 0000 [20Ch 0524 1] Enumeration ID : 00 [20Dh 0525 1] PCI Bus Number : 40 [20Eh 0526 2] PCI Path : [02, 02] [210h 0528 1] Device Scope Entry Type : 02 [211h 0529 1] Entry Length : 08 [212h 0530 2] Reserved : 0000 [214h 0532 1] Enumeration ID : 00 [215h 0533 1] PCI Bus Number : 40 [216h 0534 2] PCI Path : [03, 00] [218h 0536 2] Subtable Type : 0002 <Root Port ATS Capability> [21Ah 0538 2] Length : 0020 [21Ch 0540 1] Flags : 00 [21Dh 0541 1] Reserved : 00 [21Eh 0542 2] PCI Segment Number : 0000 [220h 0544 1] Device Scope Entry Type : 02 [221h 0545 1] Entry Length : 08 [222h 0546 2] Reserved : 0000 [224h 0548 1] Enumeration ID : 00 [225h 0549 1] PCI Bus Number : 80 [226h 0550 2] PCI Path : [02, 00] [228h 0552 1] Device Scope Entry Type : 02 [229h 0553 1] Entry Length : 08 [22Ah 0554 2] Reserved : 0000 [22Ch 0556 1] Enumeration ID : 00 [22Dh 0557 1] PCI Bus Number : 80 [22Eh 0558 2] PCI Path : [02, 02] [230h 0560 1] Device Scope Entry Type : 02 [231h 0561 1] Entry Length : 08 [232h 0562 2] Reserved : 0000 [234h 0564 1] Enumeration ID : 00 [235h 0565 1] PCI Bus Number : 80 [236h 0566 2] PCI Path : [03, 00] [238h 0568 2] Subtable Type : 0002 <Root Port ATS Capability> [23Ah 0570 2] Length : 0020 [23Ch 0572 1] Flags : 00 [23Dh 0573 1] Reserved : 00 [23Eh 0574 2] PCI Segment Number : 0000 [240h 0576 1] Device Scope Entry Type : 02 [241h 0577 1] Entry Length : 08 [242h 0578 2] Reserved : 0000 [244h 0580 1] Enumeration ID : 00 [245h 0581 1] PCI Bus Number : C0 [246h 0582 2] PCI Path : [02, 00] [248h 0584 1] Device Scope Entry Type : 02 [249h 0585 1] Entry Length : 08 [24Ah 0586 2] Reserved : 0000 [24Ch 0588 1] Enumeration ID : 00 [24Dh 0589 1] PCI Bus Number : C0 [24Eh 0590 2] PCI Path : [02, 02] [250h 0592 1] Device Scope Entry Type : 02 [251h 0593 1] Entry Length : 08 [252h 0594 2] Reserved : 0000 [254h 0596 1] Enumeration ID : 00 [255h 0597 1] PCI Bus Number : C0 [256h 0598 2] PCI Path : [03, 00] Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 14:07:31 +08:00
list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
if (atsr->segment != pci_domain_nr(dev->bus))
continue;
for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
if (tmp == &bridge->dev)
goto out;
iommu/vt-d: Fix error in detect ATS capability Current Intel IOMMU driver only matches a PCIe root port with the first DRHD unit with the samge segment number. It will report false result if there are multiple DRHD units with the same segment number, thus fail to detect ATS capability for some PCIe devices. This patch refines function dmar_find_matched_atsr_unit() to search all DRHD units with the same segment number. An example DMAR table entries as below: [1D0h 0464 2] Subtable Type : 0002 <Root Port ATS Capability> [1D2h 0466 2] Length : 0028 [1D4h 0468 1] Flags : 00 [1D5h 0469 1] Reserved : 00 [1D6h 0470 2] PCI Segment Number : 0000 [1D8h 0472 1] Device Scope Entry Type : 02 [1D9h 0473 1] Entry Length : 08 [1DAh 0474 2] Reserved : 0000 [1DCh 0476 1] Enumeration ID : 00 [1DDh 0477 1] PCI Bus Number : 00 [1DEh 0478 2] PCI Path : [02, 00] [1E0h 0480 1] Device Scope Entry Type : 02 [1E1h 0481 1] Entry Length : 08 [1E2h 0482 2] Reserved : 0000 [1E4h 0484 1] Enumeration ID : 00 [1E5h 0485 1] PCI Bus Number : 00 [1E6h 0486 2] PCI Path : [03, 00] [1E8h 0488 1] Device Scope Entry Type : 02 [1E9h 0489 1] Entry Length : 08 [1EAh 0490 2] Reserved : 0000 [1ECh 0492 1] Enumeration ID : 00 [1EDh 0493 1] PCI Bus Number : 00 [1EEh 0494 2] PCI Path : [03, 02] [1F0h 0496 1] Device Scope Entry Type : 02 [1F1h 0497 1] Entry Length : 08 [1F2h 0498 2] Reserved : 0000 [1F4h 0500 1] Enumeration ID : 00 [1F5h 0501 1] PCI Bus Number : 00 [1F6h 0502 2] PCI Path : [03, 03] [1F8h 0504 2] Subtable Type : 0002 <Root Port ATS Capability> [1FAh 0506 2] Length : 0020 [1FCh 0508 1] Flags : 00 [1FDh 0509 1] Reserved : 00 [1FEh 0510 2] PCI Segment Number : 0000 [200h 0512 1] Device Scope Entry Type : 02 [201h 0513 1] Entry Length : 08 [202h 0514 2] Reserved : 0000 [204h 0516 1] Enumeration ID : 00 [205h 0517 1] PCI Bus Number : 40 [206h 0518 2] PCI Path : [02, 00] [208h 0520 1] Device Scope Entry Type : 02 [209h 0521 1] Entry Length : 08 [20Ah 0522 2] Reserved : 0000 [20Ch 0524 1] Enumeration ID : 00 [20Dh 0525 1] PCI Bus Number : 40 [20Eh 0526 2] PCI Path : [02, 02] [210h 0528 1] Device Scope Entry Type : 02 [211h 0529 1] Entry Length : 08 [212h 0530 2] Reserved : 0000 [214h 0532 1] Enumeration ID : 00 [215h 0533 1] PCI Bus Number : 40 [216h 0534 2] PCI Path : [03, 00] [218h 0536 2] Subtable Type : 0002 <Root Port ATS Capability> [21Ah 0538 2] Length : 0020 [21Ch 0540 1] Flags : 00 [21Dh 0541 1] Reserved : 00 [21Eh 0542 2] PCI Segment Number : 0000 [220h 0544 1] Device Scope Entry Type : 02 [221h 0545 1] Entry Length : 08 [222h 0546 2] Reserved : 0000 [224h 0548 1] Enumeration ID : 00 [225h 0549 1] PCI Bus Number : 80 [226h 0550 2] PCI Path : [02, 00] [228h 0552 1] Device Scope Entry Type : 02 [229h 0553 1] Entry Length : 08 [22Ah 0554 2] Reserved : 0000 [22Ch 0556 1] Enumeration ID : 00 [22Dh 0557 1] PCI Bus Number : 80 [22Eh 0558 2] PCI Path : [02, 02] [230h 0560 1] Device Scope Entry Type : 02 [231h 0561 1] Entry Length : 08 [232h 0562 2] Reserved : 0000 [234h 0564 1] Enumeration ID : 00 [235h 0565 1] PCI Bus Number : 80 [236h 0566 2] PCI Path : [03, 00] [238h 0568 2] Subtable Type : 0002 <Root Port ATS Capability> [23Ah 0570 2] Length : 0020 [23Ch 0572 1] Flags : 00 [23Dh 0573 1] Reserved : 00 [23Eh 0574 2] PCI Segment Number : 0000 [240h 0576 1] Device Scope Entry Type : 02 [241h 0577 1] Entry Length : 08 [242h 0578 2] Reserved : 0000 [244h 0580 1] Enumeration ID : 00 [245h 0581 1] PCI Bus Number : C0 [246h 0582 2] PCI Path : [02, 00] [248h 0584 1] Device Scope Entry Type : 02 [249h 0585 1] Entry Length : 08 [24Ah 0586 2] Reserved : 0000 [24Ch 0588 1] Enumeration ID : 00 [24Dh 0589 1] PCI Bus Number : C0 [24Eh 0590 2] PCI Path : [02, 02] [250h 0592 1] Device Scope Entry Type : 02 [251h 0593 1] Entry Length : 08 [252h 0594 2] Reserved : 0000 [254h 0596 1] Enumeration ID : 00 [255h 0597 1] PCI Bus Number : C0 [256h 0598 2] PCI Path : [03, 00] Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 14:07:31 +08:00
if (atsru->include_all)
goto out;
iommu/vt-d: Fix error in detect ATS capability Current Intel IOMMU driver only matches a PCIe root port with the first DRHD unit with the samge segment number. It will report false result if there are multiple DRHD units with the same segment number, thus fail to detect ATS capability for some PCIe devices. This patch refines function dmar_find_matched_atsr_unit() to search all DRHD units with the same segment number. An example DMAR table entries as below: [1D0h 0464 2] Subtable Type : 0002 <Root Port ATS Capability> [1D2h 0466 2] Length : 0028 [1D4h 0468 1] Flags : 00 [1D5h 0469 1] Reserved : 00 [1D6h 0470 2] PCI Segment Number : 0000 [1D8h 0472 1] Device Scope Entry Type : 02 [1D9h 0473 1] Entry Length : 08 [1DAh 0474 2] Reserved : 0000 [1DCh 0476 1] Enumeration ID : 00 [1DDh 0477 1] PCI Bus Number : 00 [1DEh 0478 2] PCI Path : [02, 00] [1E0h 0480 1] Device Scope Entry Type : 02 [1E1h 0481 1] Entry Length : 08 [1E2h 0482 2] Reserved : 0000 [1E4h 0484 1] Enumeration ID : 00 [1E5h 0485 1] PCI Bus Number : 00 [1E6h 0486 2] PCI Path : [03, 00] [1E8h 0488 1] Device Scope Entry Type : 02 [1E9h 0489 1] Entry Length : 08 [1EAh 0490 2] Reserved : 0000 [1ECh 0492 1] Enumeration ID : 00 [1EDh 0493 1] PCI Bus Number : 00 [1EEh 0494 2] PCI Path : [03, 02] [1F0h 0496 1] Device Scope Entry Type : 02 [1F1h 0497 1] Entry Length : 08 [1F2h 0498 2] Reserved : 0000 [1F4h 0500 1] Enumeration ID : 00 [1F5h 0501 1] PCI Bus Number : 00 [1F6h 0502 2] PCI Path : [03, 03] [1F8h 0504 2] Subtable Type : 0002 <Root Port ATS Capability> [1FAh 0506 2] Length : 0020 [1FCh 0508 1] Flags : 00 [1FDh 0509 1] Reserved : 00 [1FEh 0510 2] PCI Segment Number : 0000 [200h 0512 1] Device Scope Entry Type : 02 [201h 0513 1] Entry Length : 08 [202h 0514 2] Reserved : 0000 [204h 0516 1] Enumeration ID : 00 [205h 0517 1] PCI Bus Number : 40 [206h 0518 2] PCI Path : [02, 00] [208h 0520 1] Device Scope Entry Type : 02 [209h 0521 1] Entry Length : 08 [20Ah 0522 2] Reserved : 0000 [20Ch 0524 1] Enumeration ID : 00 [20Dh 0525 1] PCI Bus Number : 40 [20Eh 0526 2] PCI Path : [02, 02] [210h 0528 1] Device Scope Entry Type : 02 [211h 0529 1] Entry Length : 08 [212h 0530 2] Reserved : 0000 [214h 0532 1] Enumeration ID : 00 [215h 0533 1] PCI Bus Number : 40 [216h 0534 2] PCI Path : [03, 00] [218h 0536 2] Subtable Type : 0002 <Root Port ATS Capability> [21Ah 0538 2] Length : 0020 [21Ch 0540 1] Flags : 00 [21Dh 0541 1] Reserved : 00 [21Eh 0542 2] PCI Segment Number : 0000 [220h 0544 1] Device Scope Entry Type : 02 [221h 0545 1] Entry Length : 08 [222h 0546 2] Reserved : 0000 [224h 0548 1] Enumeration ID : 00 [225h 0549 1] PCI Bus Number : 80 [226h 0550 2] PCI Path : [02, 00] [228h 0552 1] Device Scope Entry Type : 02 [229h 0553 1] Entry Length : 08 [22Ah 0554 2] Reserved : 0000 [22Ch 0556 1] Enumeration ID : 00 [22Dh 0557 1] PCI Bus Number : 80 [22Eh 0558 2] PCI Path : [02, 02] [230h 0560 1] Device Scope Entry Type : 02 [231h 0561 1] Entry Length : 08 [232h 0562 2] Reserved : 0000 [234h 0564 1] Enumeration ID : 00 [235h 0565 1] PCI Bus Number : 80 [236h 0566 2] PCI Path : [03, 00] [238h 0568 2] Subtable Type : 0002 <Root Port ATS Capability> [23Ah 0570 2] Length : 0020 [23Ch 0572 1] Flags : 00 [23Dh 0573 1] Reserved : 00 [23Eh 0574 2] PCI Segment Number : 0000 [240h 0576 1] Device Scope Entry Type : 02 [241h 0577 1] Entry Length : 08 [242h 0578 2] Reserved : 0000 [244h 0580 1] Enumeration ID : 00 [245h 0581 1] PCI Bus Number : C0 [246h 0582 2] PCI Path : [02, 00] [248h 0584 1] Device Scope Entry Type : 02 [249h 0585 1] Entry Length : 08 [24Ah 0586 2] Reserved : 0000 [24Ch 0588 1] Enumeration ID : 00 [24Dh 0589 1] PCI Bus Number : C0 [24Eh 0590 2] PCI Path : [02, 02] [250h 0592 1] Device Scope Entry Type : 02 [251h 0593 1] Entry Length : 08 [252h 0594 2] Reserved : 0000 [254h 0596 1] Enumeration ID : 00 [255h 0597 1] PCI Bus Number : C0 [256h 0598 2] PCI Path : [03, 00] Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 14:07:31 +08:00
}
supported = false;
out:
rcu_read_unlock();
return supported;
}
iommu/vt-d: Update DRHD/RMRR/ATSR device scope caches when PCI hotplug happens Current Intel DMAR/IOMMU driver assumes that all PCI devices associated with DMAR/RMRR/ATSR device scope arrays are created at boot time and won't change at runtime, so it caches pointers of associated PCI device object. That assumption may be wrong now due to: 1) introduction of PCI host bridge hotplug 2) PCI device hotplug through sysfs interfaces. Wang Yijing has tried to solve this issue by caching <bus, dev, func> tupple instead of the PCI device object pointer, but that's still unreliable because PCI bus number may change in case of hotplug. Please refer to http://lkml.org/lkml/2013/11/5/64 Message from Yingjing's mail: after remove and rescan a pci device [ 611.857095] dmar: DRHD: handling fault status reg 2 [ 611.857109] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff7000 [ 611.857109] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.857524] dmar: DRHD: handling fault status reg 102 [ 611.857534] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff6000 [ 611.857534] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.857936] dmar: DRHD: handling fault status reg 202 [ 611.857947] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff5000 [ 611.857947] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.858351] dmar: DRHD: handling fault status reg 302 [ 611.858362] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff4000 [ 611.858362] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.860819] IPv6: ADDRCONF(NETDEV_UP): eth3: link is not ready [ 611.860983] dmar: DRHD: handling fault status reg 402 [ 611.860995] dmar: INTR-REMAP: Request device [[86:00.3] fault index a4 [ 611.860995] INTR-REMAP:[fault reason 34] Present field in the IRTE entry is clear This patch introduces a new mechanism to update the DRHD/RMRR/ATSR device scope caches by hooking PCI bus notification. Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 14:07:35 +08:00
int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{
int ret;
iommu/vt-d: Update DRHD/RMRR/ATSR device scope caches when PCI hotplug happens Current Intel DMAR/IOMMU driver assumes that all PCI devices associated with DMAR/RMRR/ATSR device scope arrays are created at boot time and won't change at runtime, so it caches pointers of associated PCI device object. That assumption may be wrong now due to: 1) introduction of PCI host bridge hotplug 2) PCI device hotplug through sysfs interfaces. Wang Yijing has tried to solve this issue by caching <bus, dev, func> tupple instead of the PCI device object pointer, but that's still unreliable because PCI bus number may change in case of hotplug. Please refer to http://lkml.org/lkml/2013/11/5/64 Message from Yingjing's mail: after remove and rescan a pci device [ 611.857095] dmar: DRHD: handling fault status reg 2 [ 611.857109] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff7000 [ 611.857109] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.857524] dmar: DRHD: handling fault status reg 102 [ 611.857534] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff6000 [ 611.857534] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.857936] dmar: DRHD: handling fault status reg 202 [ 611.857947] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff5000 [ 611.857947] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.858351] dmar: DRHD: handling fault status reg 302 [ 611.858362] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff4000 [ 611.858362] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.860819] IPv6: ADDRCONF(NETDEV_UP): eth3: link is not ready [ 611.860983] dmar: DRHD: handling fault status reg 402 [ 611.860995] dmar: INTR-REMAP: Request device [[86:00.3] fault index a4 [ 611.860995] INTR-REMAP:[fault reason 34] Present field in the IRTE entry is clear This patch introduces a new mechanism to update the DRHD/RMRR/ATSR device scope caches by hooking PCI bus notification. Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 14:07:35 +08:00
struct dmar_rmrr_unit *rmrru;
struct dmar_atsr_unit *atsru;
struct dmar_satc_unit *satcu;
iommu/vt-d: Update DRHD/RMRR/ATSR device scope caches when PCI hotplug happens Current Intel DMAR/IOMMU driver assumes that all PCI devices associated with DMAR/RMRR/ATSR device scope arrays are created at boot time and won't change at runtime, so it caches pointers of associated PCI device object. That assumption may be wrong now due to: 1) introduction of PCI host bridge hotplug 2) PCI device hotplug through sysfs interfaces. Wang Yijing has tried to solve this issue by caching <bus, dev, func> tupple instead of the PCI device object pointer, but that's still unreliable because PCI bus number may change in case of hotplug. Please refer to http://lkml.org/lkml/2013/11/5/64 Message from Yingjing's mail: after remove and rescan a pci device [ 611.857095] dmar: DRHD: handling fault status reg 2 [ 611.857109] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff7000 [ 611.857109] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.857524] dmar: DRHD: handling fault status reg 102 [ 611.857534] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff6000 [ 611.857534] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.857936] dmar: DRHD: handling fault status reg 202 [ 611.857947] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff5000 [ 611.857947] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.858351] dmar: DRHD: handling fault status reg 302 [ 611.858362] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff4000 [ 611.858362] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.860819] IPv6: ADDRCONF(NETDEV_UP): eth3: link is not ready [ 611.860983] dmar: DRHD: handling fault status reg 402 [ 611.860995] dmar: INTR-REMAP: Request device [[86:00.3] fault index a4 [ 611.860995] INTR-REMAP:[fault reason 34] Present field in the IRTE entry is clear This patch introduces a new mechanism to update the DRHD/RMRR/ATSR device scope caches by hooking PCI bus notification. Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 14:07:35 +08:00
struct acpi_dmar_atsr *atsr;
struct acpi_dmar_reserved_memory *rmrr;
struct acpi_dmar_satc *satc;
iommu/vt-d: Update DRHD/RMRR/ATSR device scope caches when PCI hotplug happens Current Intel DMAR/IOMMU driver assumes that all PCI devices associated with DMAR/RMRR/ATSR device scope arrays are created at boot time and won't change at runtime, so it caches pointers of associated PCI device object. That assumption may be wrong now due to: 1) introduction of PCI host bridge hotplug 2) PCI device hotplug through sysfs interfaces. Wang Yijing has tried to solve this issue by caching <bus, dev, func> tupple instead of the PCI device object pointer, but that's still unreliable because PCI bus number may change in case of hotplug. Please refer to http://lkml.org/lkml/2013/11/5/64 Message from Yingjing's mail: after remove and rescan a pci device [ 611.857095] dmar: DRHD: handling fault status reg 2 [ 611.857109] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff7000 [ 611.857109] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.857524] dmar: DRHD: handling fault status reg 102 [ 611.857534] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff6000 [ 611.857534] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.857936] dmar: DRHD: handling fault status reg 202 [ 611.857947] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff5000 [ 611.857947] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.858351] dmar: DRHD: handling fault status reg 302 [ 611.858362] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff4000 [ 611.858362] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.860819] IPv6: ADDRCONF(NETDEV_UP): eth3: link is not ready [ 611.860983] dmar: DRHD: handling fault status reg 402 [ 611.860995] dmar: INTR-REMAP: Request device [[86:00.3] fault index a4 [ 611.860995] INTR-REMAP:[fault reason 34] Present field in the IRTE entry is clear This patch introduces a new mechanism to update the DRHD/RMRR/ATSR device scope caches by hooking PCI bus notification. Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 14:07:35 +08:00
if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
iommu/vt-d: Update DRHD/RMRR/ATSR device scope caches when PCI hotplug happens Current Intel DMAR/IOMMU driver assumes that all PCI devices associated with DMAR/RMRR/ATSR device scope arrays are created at boot time and won't change at runtime, so it caches pointers of associated PCI device object. That assumption may be wrong now due to: 1) introduction of PCI host bridge hotplug 2) PCI device hotplug through sysfs interfaces. Wang Yijing has tried to solve this issue by caching <bus, dev, func> tupple instead of the PCI device object pointer, but that's still unreliable because PCI bus number may change in case of hotplug. Please refer to http://lkml.org/lkml/2013/11/5/64 Message from Yingjing's mail: after remove and rescan a pci device [ 611.857095] dmar: DRHD: handling fault status reg 2 [ 611.857109] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff7000 [ 611.857109] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.857524] dmar: DRHD: handling fault status reg 102 [ 611.857534] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff6000 [ 611.857534] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.857936] dmar: DRHD: handling fault status reg 202 [ 611.857947] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff5000 [ 611.857947] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.858351] dmar: DRHD: handling fault status reg 302 [ 611.858362] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff4000 [ 611.858362] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.860819] IPv6: ADDRCONF(NETDEV_UP): eth3: link is not ready [ 611.860983] dmar: DRHD: handling fault status reg 402 [ 611.860995] dmar: INTR-REMAP: Request device [[86:00.3] fault index a4 [ 611.860995] INTR-REMAP:[fault reason 34] Present field in the IRTE entry is clear This patch introduces a new mechanism to update the DRHD/RMRR/ATSR device scope caches by hooking PCI bus notification. Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 14:07:35 +08:00
return 0;
list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
rmrr = container_of(rmrru->hdr,
struct acpi_dmar_reserved_memory, header);
if (info->event == BUS_NOTIFY_ADD_DEVICE) {
ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
((void *)rmrr) + rmrr->header.length,
rmrr->segment, rmrru->devices,
rmrru->devices_cnt);
if (ret < 0)
iommu/vt-d: Update DRHD/RMRR/ATSR device scope caches when PCI hotplug happens Current Intel DMAR/IOMMU driver assumes that all PCI devices associated with DMAR/RMRR/ATSR device scope arrays are created at boot time and won't change at runtime, so it caches pointers of associated PCI device object. That assumption may be wrong now due to: 1) introduction of PCI host bridge hotplug 2) PCI device hotplug through sysfs interfaces. Wang Yijing has tried to solve this issue by caching <bus, dev, func> tupple instead of the PCI device object pointer, but that's still unreliable because PCI bus number may change in case of hotplug. Please refer to http://lkml.org/lkml/2013/11/5/64 Message from Yingjing's mail: after remove and rescan a pci device [ 611.857095] dmar: DRHD: handling fault status reg 2 [ 611.857109] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff7000 [ 611.857109] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.857524] dmar: DRHD: handling fault status reg 102 [ 611.857534] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff6000 [ 611.857534] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.857936] dmar: DRHD: handling fault status reg 202 [ 611.857947] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff5000 [ 611.857947] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.858351] dmar: DRHD: handling fault status reg 302 [ 611.858362] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff4000 [ 611.858362] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.860819] IPv6: ADDRCONF(NETDEV_UP): eth3: link is not ready [ 611.860983] dmar: DRHD: handling fault status reg 402 [ 611.860995] dmar: INTR-REMAP: Request device [[86:00.3] fault index a4 [ 611.860995] INTR-REMAP:[fault reason 34] Present field in the IRTE entry is clear This patch introduces a new mechanism to update the DRHD/RMRR/ATSR device scope caches by hooking PCI bus notification. Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 14:07:35 +08:00
return ret;
} else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
dmar_remove_dev_scope(info, rmrr->segment,
rmrru->devices, rmrru->devices_cnt);
iommu/vt-d: Update DRHD/RMRR/ATSR device scope caches when PCI hotplug happens Current Intel DMAR/IOMMU driver assumes that all PCI devices associated with DMAR/RMRR/ATSR device scope arrays are created at boot time and won't change at runtime, so it caches pointers of associated PCI device object. That assumption may be wrong now due to: 1) introduction of PCI host bridge hotplug 2) PCI device hotplug through sysfs interfaces. Wang Yijing has tried to solve this issue by caching <bus, dev, func> tupple instead of the PCI device object pointer, but that's still unreliable because PCI bus number may change in case of hotplug. Please refer to http://lkml.org/lkml/2013/11/5/64 Message from Yingjing's mail: after remove and rescan a pci device [ 611.857095] dmar: DRHD: handling fault status reg 2 [ 611.857109] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff7000 [ 611.857109] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.857524] dmar: DRHD: handling fault status reg 102 [ 611.857534] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff6000 [ 611.857534] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.857936] dmar: DRHD: handling fault status reg 202 [ 611.857947] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff5000 [ 611.857947] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.858351] dmar: DRHD: handling fault status reg 302 [ 611.858362] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff4000 [ 611.858362] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.860819] IPv6: ADDRCONF(NETDEV_UP): eth3: link is not ready [ 611.860983] dmar: DRHD: handling fault status reg 402 [ 611.860995] dmar: INTR-REMAP: Request device [[86:00.3] fault index a4 [ 611.860995] INTR-REMAP:[fault reason 34] Present field in the IRTE entry is clear This patch introduces a new mechanism to update the DRHD/RMRR/ATSR device scope caches by hooking PCI bus notification. Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 14:07:35 +08:00
}
}
list_for_each_entry(atsru, &dmar_atsr_units, list) {
if (atsru->include_all)
continue;
atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
if (info->event == BUS_NOTIFY_ADD_DEVICE) {
ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
(void *)atsr + atsr->header.length,
atsr->segment, atsru->devices,
atsru->devices_cnt);
if (ret > 0)
break;
else if (ret < 0)
iommu/vt-d: Update DRHD/RMRR/ATSR device scope caches when PCI hotplug happens Current Intel DMAR/IOMMU driver assumes that all PCI devices associated with DMAR/RMRR/ATSR device scope arrays are created at boot time and won't change at runtime, so it caches pointers of associated PCI device object. That assumption may be wrong now due to: 1) introduction of PCI host bridge hotplug 2) PCI device hotplug through sysfs interfaces. Wang Yijing has tried to solve this issue by caching <bus, dev, func> tupple instead of the PCI device object pointer, but that's still unreliable because PCI bus number may change in case of hotplug. Please refer to http://lkml.org/lkml/2013/11/5/64 Message from Yingjing's mail: after remove and rescan a pci device [ 611.857095] dmar: DRHD: handling fault status reg 2 [ 611.857109] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff7000 [ 611.857109] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.857524] dmar: DRHD: handling fault status reg 102 [ 611.857534] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff6000 [ 611.857534] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.857936] dmar: DRHD: handling fault status reg 202 [ 611.857947] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff5000 [ 611.857947] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.858351] dmar: DRHD: handling fault status reg 302 [ 611.858362] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff4000 [ 611.858362] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.860819] IPv6: ADDRCONF(NETDEV_UP): eth3: link is not ready [ 611.860983] dmar: DRHD: handling fault status reg 402 [ 611.860995] dmar: INTR-REMAP: Request device [[86:00.3] fault index a4 [ 611.860995] INTR-REMAP:[fault reason 34] Present field in the IRTE entry is clear This patch introduces a new mechanism to update the DRHD/RMRR/ATSR device scope caches by hooking PCI bus notification. Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 14:07:35 +08:00
return ret;
} else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
iommu/vt-d: Update DRHD/RMRR/ATSR device scope caches when PCI hotplug happens Current Intel DMAR/IOMMU driver assumes that all PCI devices associated with DMAR/RMRR/ATSR device scope arrays are created at boot time and won't change at runtime, so it caches pointers of associated PCI device object. That assumption may be wrong now due to: 1) introduction of PCI host bridge hotplug 2) PCI device hotplug through sysfs interfaces. Wang Yijing has tried to solve this issue by caching <bus, dev, func> tupple instead of the PCI device object pointer, but that's still unreliable because PCI bus number may change in case of hotplug. Please refer to http://lkml.org/lkml/2013/11/5/64 Message from Yingjing's mail: after remove and rescan a pci device [ 611.857095] dmar: DRHD: handling fault status reg 2 [ 611.857109] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff7000 [ 611.857109] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.857524] dmar: DRHD: handling fault status reg 102 [ 611.857534] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff6000 [ 611.857534] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.857936] dmar: DRHD: handling fault status reg 202 [ 611.857947] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff5000 [ 611.857947] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.858351] dmar: DRHD: handling fault status reg 302 [ 611.858362] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff4000 [ 611.858362] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.860819] IPv6: ADDRCONF(NETDEV_UP): eth3: link is not ready [ 611.860983] dmar: DRHD: handling fault status reg 402 [ 611.860995] dmar: INTR-REMAP: Request device [[86:00.3] fault index a4 [ 611.860995] INTR-REMAP:[fault reason 34] Present field in the IRTE entry is clear This patch introduces a new mechanism to update the DRHD/RMRR/ATSR device scope caches by hooking PCI bus notification. Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 14:07:35 +08:00
if (dmar_remove_dev_scope(info, atsr->segment,
atsru->devices, atsru->devices_cnt))
break;
}
}
list_for_each_entry(satcu, &dmar_satc_units, list) {
satc = container_of(satcu->hdr, struct acpi_dmar_satc, header);
if (info->event == BUS_NOTIFY_ADD_DEVICE) {
ret = dmar_insert_dev_scope(info, (void *)(satc + 1),
(void *)satc + satc->header.length,
satc->segment, satcu->devices,
satcu->devices_cnt);
if (ret > 0)
break;
else if (ret < 0)
return ret;
} else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
if (dmar_remove_dev_scope(info, satc->segment,
satcu->devices, satcu->devices_cnt))
break;
}
}
iommu/vt-d: Update DRHD/RMRR/ATSR device scope caches when PCI hotplug happens Current Intel DMAR/IOMMU driver assumes that all PCI devices associated with DMAR/RMRR/ATSR device scope arrays are created at boot time and won't change at runtime, so it caches pointers of associated PCI device object. That assumption may be wrong now due to: 1) introduction of PCI host bridge hotplug 2) PCI device hotplug through sysfs interfaces. Wang Yijing has tried to solve this issue by caching <bus, dev, func> tupple instead of the PCI device object pointer, but that's still unreliable because PCI bus number may change in case of hotplug. Please refer to http://lkml.org/lkml/2013/11/5/64 Message from Yingjing's mail: after remove and rescan a pci device [ 611.857095] dmar: DRHD: handling fault status reg 2 [ 611.857109] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff7000 [ 611.857109] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.857524] dmar: DRHD: handling fault status reg 102 [ 611.857534] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff6000 [ 611.857534] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.857936] dmar: DRHD: handling fault status reg 202 [ 611.857947] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff5000 [ 611.857947] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.858351] dmar: DRHD: handling fault status reg 302 [ 611.858362] dmar: DMAR:[DMA Read] Request device [86:00.3] fault addr ffff4000 [ 611.858362] DMAR:[fault reason 02] Present bit in context entry is clear [ 611.860819] IPv6: ADDRCONF(NETDEV_UP): eth3: link is not ready [ 611.860983] dmar: DRHD: handling fault status reg 402 [ 611.860995] dmar: INTR-REMAP: Request device [[86:00.3] fault index a4 [ 611.860995] INTR-REMAP:[fault reason 34] Present field in the IRTE entry is clear This patch introduces a new mechanism to update the DRHD/RMRR/ATSR device scope caches by hooking PCI bus notification. Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Signed-off-by: Joerg Roedel <joro@8bytes.org>
2014-02-19 14:07:35 +08:00
return 0;
}
static void intel_disable_iommus(void)
{
struct intel_iommu *iommu = NULL;
struct dmar_drhd_unit *drhd;
for_each_iommu(iommu, drhd)
iommu_disable_translation(iommu);
}
void intel_iommu_shutdown(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu = NULL;
if (no_iommu || dmar_disabled)
return;
iommu/vt-d: Fix system hang on reboot -f We found that executing the command ./a.out &;reboot -f (where a.out is a program that only executes a while(1) infinite loop) can probabilistically cause the system to hang in the intel_iommu_shutdown() function, rendering it unresponsive. Through analysis, we identified that the factors contributing to this issue are as follows: 1. The reboot -f command does not prompt the kernel to notify the application layer to perform cleanup actions, allowing the application to continue running. 2. When the kernel reaches the intel_iommu_shutdown() function, only the BSP (Bootstrap Processor) CPU is operational in the system. 3. During the execution of intel_iommu_shutdown(), the function down_write (&dmar_global_lock) causes the process to sleep and be scheduled out. 4. At this point, though the processor's interrupt flag is not cleared, allowing interrupts to be accepted. However, only legacy devices and NMI (Non-Maskable Interrupt) interrupts could come in, as other interrupts routing have already been disabled. If no legacy or NMI interrupts occur at this stage, the scheduler will not be able to run. 5. If the application got scheduled at this time is executing a while(1)- type loop, it will be unable to be preempted, leading to an infinite loop and causing the system to become unresponsive. To resolve this issue, the intel_iommu_shutdown() function should not execute down_write(), which can potentially cause the process to be scheduled out. Furthermore, since only the BSP is running during the later stages of the reboot, there is no need for protection against parallel access to the DMAR (DMA Remapping) unit. Therefore, the following lines could be removed: down_write(&dmar_global_lock); up_write(&dmar_global_lock); After testing, the issue has been resolved. Fixes: 6c3a44ed3c55 ("iommu/vt-d: Turn off translations at shutdown") Co-developed-by: Ethan Zhao <haifeng.zhao@linux.intel.com> Signed-off-by: Ethan Zhao <haifeng.zhao@linux.intel.com> Signed-off-by: Yunhui Cui <cuiyunhui@bytedance.com> Link: https://lore.kernel.org/r/20250303062421.17929-1-cuiyunhui@bytedance.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2025-03-10 10:47:44 +08:00
/*
* All other CPUs were brought down, hotplug interrupts were disabled,
* no lock and RCU checking needed anymore
*/
list_for_each_entry(drhd, &dmar_drhd_units, list) {
iommu = drhd->iommu;
iommu/vt-d: Fix system hang on reboot -f We found that executing the command ./a.out &;reboot -f (where a.out is a program that only executes a while(1) infinite loop) can probabilistically cause the system to hang in the intel_iommu_shutdown() function, rendering it unresponsive. Through analysis, we identified that the factors contributing to this issue are as follows: 1. The reboot -f command does not prompt the kernel to notify the application layer to perform cleanup actions, allowing the application to continue running. 2. When the kernel reaches the intel_iommu_shutdown() function, only the BSP (Bootstrap Processor) CPU is operational in the system. 3. During the execution of intel_iommu_shutdown(), the function down_write (&dmar_global_lock) causes the process to sleep and be scheduled out. 4. At this point, though the processor's interrupt flag is not cleared, allowing interrupts to be accepted. However, only legacy devices and NMI (Non-Maskable Interrupt) interrupts could come in, as other interrupts routing have already been disabled. If no legacy or NMI interrupts occur at this stage, the scheduler will not be able to run. 5. If the application got scheduled at this time is executing a while(1)- type loop, it will be unable to be preempted, leading to an infinite loop and causing the system to become unresponsive. To resolve this issue, the intel_iommu_shutdown() function should not execute down_write(), which can potentially cause the process to be scheduled out. Furthermore, since only the BSP is running during the later stages of the reboot, there is no need for protection against parallel access to the DMAR (DMA Remapping) unit. Therefore, the following lines could be removed: down_write(&dmar_global_lock); up_write(&dmar_global_lock); After testing, the issue has been resolved. Fixes: 6c3a44ed3c55 ("iommu/vt-d: Turn off translations at shutdown") Co-developed-by: Ethan Zhao <haifeng.zhao@linux.intel.com> Signed-off-by: Ethan Zhao <haifeng.zhao@linux.intel.com> Signed-off-by: Yunhui Cui <cuiyunhui@bytedance.com> Link: https://lore.kernel.org/r/20250303062421.17929-1-cuiyunhui@bytedance.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2025-03-10 10:47:44 +08:00
/* Disable PMRs explicitly here. */
iommu_disable_protect_mem_regions(iommu);
iommu/vt-d: Fix system hang on reboot -f We found that executing the command ./a.out &;reboot -f (where a.out is a program that only executes a while(1) infinite loop) can probabilistically cause the system to hang in the intel_iommu_shutdown() function, rendering it unresponsive. Through analysis, we identified that the factors contributing to this issue are as follows: 1. The reboot -f command does not prompt the kernel to notify the application layer to perform cleanup actions, allowing the application to continue running. 2. When the kernel reaches the intel_iommu_shutdown() function, only the BSP (Bootstrap Processor) CPU is operational in the system. 3. During the execution of intel_iommu_shutdown(), the function down_write (&dmar_global_lock) causes the process to sleep and be scheduled out. 4. At this point, though the processor's interrupt flag is not cleared, allowing interrupts to be accepted. However, only legacy devices and NMI (Non-Maskable Interrupt) interrupts could come in, as other interrupts routing have already been disabled. If no legacy or NMI interrupts occur at this stage, the scheduler will not be able to run. 5. If the application got scheduled at this time is executing a while(1)- type loop, it will be unable to be preempted, leading to an infinite loop and causing the system to become unresponsive. To resolve this issue, the intel_iommu_shutdown() function should not execute down_write(), which can potentially cause the process to be scheduled out. Furthermore, since only the BSP is running during the later stages of the reboot, there is no need for protection against parallel access to the DMAR (DMA Remapping) unit. Therefore, the following lines could be removed: down_write(&dmar_global_lock); up_write(&dmar_global_lock); After testing, the issue has been resolved. Fixes: 6c3a44ed3c55 ("iommu/vt-d: Turn off translations at shutdown") Co-developed-by: Ethan Zhao <haifeng.zhao@linux.intel.com> Signed-off-by: Ethan Zhao <haifeng.zhao@linux.intel.com> Signed-off-by: Yunhui Cui <cuiyunhui@bytedance.com> Link: https://lore.kernel.org/r/20250303062421.17929-1-cuiyunhui@bytedance.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2025-03-10 10:47:44 +08:00
/* Make sure the IOMMUs are switched off */
iommu_disable_translation(iommu);
}
}
static struct intel_iommu *dev_to_intel_iommu(struct device *dev)
{
struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
return container_of(iommu_dev, struct intel_iommu, iommu);
}
static ssize_t version_show(struct device *dev,
struct device_attribute *attr, char *buf)
iommu/vt-d: Make use of IOMMU sysfs support Register our DRHD IOMMUs, cross link devices, and provide a base set of attributes for the IOMMU. Note that IRQ remapping support parses the DMAR table very early in boot, well before the iommu_class can reasonably be setup, so our registration is split between intel_iommu_init(), which occurs later, and alloc_iommu(), which typically occurs much earlier, but may happen at any time later with IOMMU hot-add support. On a typical desktop system, this provides the following (pruned): $ find /sys | grep dmar /sys/devices/virtual/iommu/dmar0 /sys/devices/virtual/iommu/dmar0/devices /sys/devices/virtual/iommu/dmar0/devices/0000:00:02.0 /sys/devices/virtual/iommu/dmar0/intel-iommu /sys/devices/virtual/iommu/dmar0/intel-iommu/cap /sys/devices/virtual/iommu/dmar0/intel-iommu/ecap /sys/devices/virtual/iommu/dmar0/intel-iommu/address /sys/devices/virtual/iommu/dmar0/intel-iommu/version /sys/devices/virtual/iommu/dmar1 /sys/devices/virtual/iommu/dmar1/devices /sys/devices/virtual/iommu/dmar1/devices/0000:00:00.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:01.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:16.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1a.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1b.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1c.0 ... /sys/devices/virtual/iommu/dmar1/intel-iommu /sys/devices/virtual/iommu/dmar1/intel-iommu/cap /sys/devices/virtual/iommu/dmar1/intel-iommu/ecap /sys/devices/virtual/iommu/dmar1/intel-iommu/address /sys/devices/virtual/iommu/dmar1/intel-iommu/version /sys/class/iommu/dmar0 /sys/class/iommu/dmar1 (devices also link back to the dmar units) This makes address, version, capabilities, and extended capabilities available, just like printed on boot. I've tried not to duplicate data that can be found in the DMAR table, with the exception of the address, which provides an easy way to associate the sysfs device with a DRHD entry in the DMAR. It's tempting to add scopes and RMRR data here, but the full DMAR table is already exposed under /sys/firmware/ and therefore already provides a way for userspace to learn such details. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-06-12 16:12:31 -06:00
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
iommu/vt-d: Make use of IOMMU sysfs support Register our DRHD IOMMUs, cross link devices, and provide a base set of attributes for the IOMMU. Note that IRQ remapping support parses the DMAR table very early in boot, well before the iommu_class can reasonably be setup, so our registration is split between intel_iommu_init(), which occurs later, and alloc_iommu(), which typically occurs much earlier, but may happen at any time later with IOMMU hot-add support. On a typical desktop system, this provides the following (pruned): $ find /sys | grep dmar /sys/devices/virtual/iommu/dmar0 /sys/devices/virtual/iommu/dmar0/devices /sys/devices/virtual/iommu/dmar0/devices/0000:00:02.0 /sys/devices/virtual/iommu/dmar0/intel-iommu /sys/devices/virtual/iommu/dmar0/intel-iommu/cap /sys/devices/virtual/iommu/dmar0/intel-iommu/ecap /sys/devices/virtual/iommu/dmar0/intel-iommu/address /sys/devices/virtual/iommu/dmar0/intel-iommu/version /sys/devices/virtual/iommu/dmar1 /sys/devices/virtual/iommu/dmar1/devices /sys/devices/virtual/iommu/dmar1/devices/0000:00:00.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:01.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:16.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1a.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1b.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1c.0 ... /sys/devices/virtual/iommu/dmar1/intel-iommu /sys/devices/virtual/iommu/dmar1/intel-iommu/cap /sys/devices/virtual/iommu/dmar1/intel-iommu/ecap /sys/devices/virtual/iommu/dmar1/intel-iommu/address /sys/devices/virtual/iommu/dmar1/intel-iommu/version /sys/class/iommu/dmar0 /sys/class/iommu/dmar1 (devices also link back to the dmar units) This makes address, version, capabilities, and extended capabilities available, just like printed on boot. I've tried not to duplicate data that can be found in the DMAR table, with the exception of the address, which provides an easy way to associate the sysfs device with a DRHD entry in the DMAR. It's tempting to add scopes and RMRR data here, but the full DMAR table is already exposed under /sys/firmware/ and therefore already provides a way for userspace to learn such details. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-06-12 16:12:31 -06:00
u32 ver = readl(iommu->reg + DMAR_VER_REG);
return sysfs_emit(buf, "%d:%d\n",
DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
iommu/vt-d: Make use of IOMMU sysfs support Register our DRHD IOMMUs, cross link devices, and provide a base set of attributes for the IOMMU. Note that IRQ remapping support parses the DMAR table very early in boot, well before the iommu_class can reasonably be setup, so our registration is split between intel_iommu_init(), which occurs later, and alloc_iommu(), which typically occurs much earlier, but may happen at any time later with IOMMU hot-add support. On a typical desktop system, this provides the following (pruned): $ find /sys | grep dmar /sys/devices/virtual/iommu/dmar0 /sys/devices/virtual/iommu/dmar0/devices /sys/devices/virtual/iommu/dmar0/devices/0000:00:02.0 /sys/devices/virtual/iommu/dmar0/intel-iommu /sys/devices/virtual/iommu/dmar0/intel-iommu/cap /sys/devices/virtual/iommu/dmar0/intel-iommu/ecap /sys/devices/virtual/iommu/dmar0/intel-iommu/address /sys/devices/virtual/iommu/dmar0/intel-iommu/version /sys/devices/virtual/iommu/dmar1 /sys/devices/virtual/iommu/dmar1/devices /sys/devices/virtual/iommu/dmar1/devices/0000:00:00.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:01.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:16.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1a.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1b.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1c.0 ... /sys/devices/virtual/iommu/dmar1/intel-iommu /sys/devices/virtual/iommu/dmar1/intel-iommu/cap /sys/devices/virtual/iommu/dmar1/intel-iommu/ecap /sys/devices/virtual/iommu/dmar1/intel-iommu/address /sys/devices/virtual/iommu/dmar1/intel-iommu/version /sys/class/iommu/dmar0 /sys/class/iommu/dmar1 (devices also link back to the dmar units) This makes address, version, capabilities, and extended capabilities available, just like printed on boot. I've tried not to duplicate data that can be found in the DMAR table, with the exception of the address, which provides an easy way to associate the sysfs device with a DRHD entry in the DMAR. It's tempting to add scopes and RMRR data here, but the full DMAR table is already exposed under /sys/firmware/ and therefore already provides a way for userspace to learn such details. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-06-12 16:12:31 -06:00
}
static DEVICE_ATTR_RO(version);
iommu/vt-d: Make use of IOMMU sysfs support Register our DRHD IOMMUs, cross link devices, and provide a base set of attributes for the IOMMU. Note that IRQ remapping support parses the DMAR table very early in boot, well before the iommu_class can reasonably be setup, so our registration is split between intel_iommu_init(), which occurs later, and alloc_iommu(), which typically occurs much earlier, but may happen at any time later with IOMMU hot-add support. On a typical desktop system, this provides the following (pruned): $ find /sys | grep dmar /sys/devices/virtual/iommu/dmar0 /sys/devices/virtual/iommu/dmar0/devices /sys/devices/virtual/iommu/dmar0/devices/0000:00:02.0 /sys/devices/virtual/iommu/dmar0/intel-iommu /sys/devices/virtual/iommu/dmar0/intel-iommu/cap /sys/devices/virtual/iommu/dmar0/intel-iommu/ecap /sys/devices/virtual/iommu/dmar0/intel-iommu/address /sys/devices/virtual/iommu/dmar0/intel-iommu/version /sys/devices/virtual/iommu/dmar1 /sys/devices/virtual/iommu/dmar1/devices /sys/devices/virtual/iommu/dmar1/devices/0000:00:00.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:01.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:16.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1a.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1b.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1c.0 ... /sys/devices/virtual/iommu/dmar1/intel-iommu /sys/devices/virtual/iommu/dmar1/intel-iommu/cap /sys/devices/virtual/iommu/dmar1/intel-iommu/ecap /sys/devices/virtual/iommu/dmar1/intel-iommu/address /sys/devices/virtual/iommu/dmar1/intel-iommu/version /sys/class/iommu/dmar0 /sys/class/iommu/dmar1 (devices also link back to the dmar units) This makes address, version, capabilities, and extended capabilities available, just like printed on boot. I've tried not to duplicate data that can be found in the DMAR table, with the exception of the address, which provides an easy way to associate the sysfs device with a DRHD entry in the DMAR. It's tempting to add scopes and RMRR data here, but the full DMAR table is already exposed under /sys/firmware/ and therefore already provides a way for userspace to learn such details. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-06-12 16:12:31 -06:00
static ssize_t address_show(struct device *dev,
struct device_attribute *attr, char *buf)
iommu/vt-d: Make use of IOMMU sysfs support Register our DRHD IOMMUs, cross link devices, and provide a base set of attributes for the IOMMU. Note that IRQ remapping support parses the DMAR table very early in boot, well before the iommu_class can reasonably be setup, so our registration is split between intel_iommu_init(), which occurs later, and alloc_iommu(), which typically occurs much earlier, but may happen at any time later with IOMMU hot-add support. On a typical desktop system, this provides the following (pruned): $ find /sys | grep dmar /sys/devices/virtual/iommu/dmar0 /sys/devices/virtual/iommu/dmar0/devices /sys/devices/virtual/iommu/dmar0/devices/0000:00:02.0 /sys/devices/virtual/iommu/dmar0/intel-iommu /sys/devices/virtual/iommu/dmar0/intel-iommu/cap /sys/devices/virtual/iommu/dmar0/intel-iommu/ecap /sys/devices/virtual/iommu/dmar0/intel-iommu/address /sys/devices/virtual/iommu/dmar0/intel-iommu/version /sys/devices/virtual/iommu/dmar1 /sys/devices/virtual/iommu/dmar1/devices /sys/devices/virtual/iommu/dmar1/devices/0000:00:00.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:01.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:16.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1a.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1b.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1c.0 ... /sys/devices/virtual/iommu/dmar1/intel-iommu /sys/devices/virtual/iommu/dmar1/intel-iommu/cap /sys/devices/virtual/iommu/dmar1/intel-iommu/ecap /sys/devices/virtual/iommu/dmar1/intel-iommu/address /sys/devices/virtual/iommu/dmar1/intel-iommu/version /sys/class/iommu/dmar0 /sys/class/iommu/dmar1 (devices also link back to the dmar units) This makes address, version, capabilities, and extended capabilities available, just like printed on boot. I've tried not to duplicate data that can be found in the DMAR table, with the exception of the address, which provides an easy way to associate the sysfs device with a DRHD entry in the DMAR. It's tempting to add scopes and RMRR data here, but the full DMAR table is already exposed under /sys/firmware/ and therefore already provides a way for userspace to learn such details. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-06-12 16:12:31 -06:00
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
return sysfs_emit(buf, "%llx\n", iommu->reg_phys);
iommu/vt-d: Make use of IOMMU sysfs support Register our DRHD IOMMUs, cross link devices, and provide a base set of attributes for the IOMMU. Note that IRQ remapping support parses the DMAR table very early in boot, well before the iommu_class can reasonably be setup, so our registration is split between intel_iommu_init(), which occurs later, and alloc_iommu(), which typically occurs much earlier, but may happen at any time later with IOMMU hot-add support. On a typical desktop system, this provides the following (pruned): $ find /sys | grep dmar /sys/devices/virtual/iommu/dmar0 /sys/devices/virtual/iommu/dmar0/devices /sys/devices/virtual/iommu/dmar0/devices/0000:00:02.0 /sys/devices/virtual/iommu/dmar0/intel-iommu /sys/devices/virtual/iommu/dmar0/intel-iommu/cap /sys/devices/virtual/iommu/dmar0/intel-iommu/ecap /sys/devices/virtual/iommu/dmar0/intel-iommu/address /sys/devices/virtual/iommu/dmar0/intel-iommu/version /sys/devices/virtual/iommu/dmar1 /sys/devices/virtual/iommu/dmar1/devices /sys/devices/virtual/iommu/dmar1/devices/0000:00:00.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:01.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:16.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1a.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1b.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1c.0 ... /sys/devices/virtual/iommu/dmar1/intel-iommu /sys/devices/virtual/iommu/dmar1/intel-iommu/cap /sys/devices/virtual/iommu/dmar1/intel-iommu/ecap /sys/devices/virtual/iommu/dmar1/intel-iommu/address /sys/devices/virtual/iommu/dmar1/intel-iommu/version /sys/class/iommu/dmar0 /sys/class/iommu/dmar1 (devices also link back to the dmar units) This makes address, version, capabilities, and extended capabilities available, just like printed on boot. I've tried not to duplicate data that can be found in the DMAR table, with the exception of the address, which provides an easy way to associate the sysfs device with a DRHD entry in the DMAR. It's tempting to add scopes and RMRR data here, but the full DMAR table is already exposed under /sys/firmware/ and therefore already provides a way for userspace to learn such details. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-06-12 16:12:31 -06:00
}
static DEVICE_ATTR_RO(address);
iommu/vt-d: Make use of IOMMU sysfs support Register our DRHD IOMMUs, cross link devices, and provide a base set of attributes for the IOMMU. Note that IRQ remapping support parses the DMAR table very early in boot, well before the iommu_class can reasonably be setup, so our registration is split between intel_iommu_init(), which occurs later, and alloc_iommu(), which typically occurs much earlier, but may happen at any time later with IOMMU hot-add support. On a typical desktop system, this provides the following (pruned): $ find /sys | grep dmar /sys/devices/virtual/iommu/dmar0 /sys/devices/virtual/iommu/dmar0/devices /sys/devices/virtual/iommu/dmar0/devices/0000:00:02.0 /sys/devices/virtual/iommu/dmar0/intel-iommu /sys/devices/virtual/iommu/dmar0/intel-iommu/cap /sys/devices/virtual/iommu/dmar0/intel-iommu/ecap /sys/devices/virtual/iommu/dmar0/intel-iommu/address /sys/devices/virtual/iommu/dmar0/intel-iommu/version /sys/devices/virtual/iommu/dmar1 /sys/devices/virtual/iommu/dmar1/devices /sys/devices/virtual/iommu/dmar1/devices/0000:00:00.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:01.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:16.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1a.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1b.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1c.0 ... /sys/devices/virtual/iommu/dmar1/intel-iommu /sys/devices/virtual/iommu/dmar1/intel-iommu/cap /sys/devices/virtual/iommu/dmar1/intel-iommu/ecap /sys/devices/virtual/iommu/dmar1/intel-iommu/address /sys/devices/virtual/iommu/dmar1/intel-iommu/version /sys/class/iommu/dmar0 /sys/class/iommu/dmar1 (devices also link back to the dmar units) This makes address, version, capabilities, and extended capabilities available, just like printed on boot. I've tried not to duplicate data that can be found in the DMAR table, with the exception of the address, which provides an easy way to associate the sysfs device with a DRHD entry in the DMAR. It's tempting to add scopes and RMRR data here, but the full DMAR table is already exposed under /sys/firmware/ and therefore already provides a way for userspace to learn such details. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-06-12 16:12:31 -06:00
static ssize_t cap_show(struct device *dev,
struct device_attribute *attr, char *buf)
iommu/vt-d: Make use of IOMMU sysfs support Register our DRHD IOMMUs, cross link devices, and provide a base set of attributes for the IOMMU. Note that IRQ remapping support parses the DMAR table very early in boot, well before the iommu_class can reasonably be setup, so our registration is split between intel_iommu_init(), which occurs later, and alloc_iommu(), which typically occurs much earlier, but may happen at any time later with IOMMU hot-add support. On a typical desktop system, this provides the following (pruned): $ find /sys | grep dmar /sys/devices/virtual/iommu/dmar0 /sys/devices/virtual/iommu/dmar0/devices /sys/devices/virtual/iommu/dmar0/devices/0000:00:02.0 /sys/devices/virtual/iommu/dmar0/intel-iommu /sys/devices/virtual/iommu/dmar0/intel-iommu/cap /sys/devices/virtual/iommu/dmar0/intel-iommu/ecap /sys/devices/virtual/iommu/dmar0/intel-iommu/address /sys/devices/virtual/iommu/dmar0/intel-iommu/version /sys/devices/virtual/iommu/dmar1 /sys/devices/virtual/iommu/dmar1/devices /sys/devices/virtual/iommu/dmar1/devices/0000:00:00.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:01.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:16.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1a.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1b.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1c.0 ... /sys/devices/virtual/iommu/dmar1/intel-iommu /sys/devices/virtual/iommu/dmar1/intel-iommu/cap /sys/devices/virtual/iommu/dmar1/intel-iommu/ecap /sys/devices/virtual/iommu/dmar1/intel-iommu/address /sys/devices/virtual/iommu/dmar1/intel-iommu/version /sys/class/iommu/dmar0 /sys/class/iommu/dmar1 (devices also link back to the dmar units) This makes address, version, capabilities, and extended capabilities available, just like printed on boot. I've tried not to duplicate data that can be found in the DMAR table, with the exception of the address, which provides an easy way to associate the sysfs device with a DRHD entry in the DMAR. It's tempting to add scopes and RMRR data here, but the full DMAR table is already exposed under /sys/firmware/ and therefore already provides a way for userspace to learn such details. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-06-12 16:12:31 -06:00
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
return sysfs_emit(buf, "%llx\n", iommu->cap);
iommu/vt-d: Make use of IOMMU sysfs support Register our DRHD IOMMUs, cross link devices, and provide a base set of attributes for the IOMMU. Note that IRQ remapping support parses the DMAR table very early in boot, well before the iommu_class can reasonably be setup, so our registration is split between intel_iommu_init(), which occurs later, and alloc_iommu(), which typically occurs much earlier, but may happen at any time later with IOMMU hot-add support. On a typical desktop system, this provides the following (pruned): $ find /sys | grep dmar /sys/devices/virtual/iommu/dmar0 /sys/devices/virtual/iommu/dmar0/devices /sys/devices/virtual/iommu/dmar0/devices/0000:00:02.0 /sys/devices/virtual/iommu/dmar0/intel-iommu /sys/devices/virtual/iommu/dmar0/intel-iommu/cap /sys/devices/virtual/iommu/dmar0/intel-iommu/ecap /sys/devices/virtual/iommu/dmar0/intel-iommu/address /sys/devices/virtual/iommu/dmar0/intel-iommu/version /sys/devices/virtual/iommu/dmar1 /sys/devices/virtual/iommu/dmar1/devices /sys/devices/virtual/iommu/dmar1/devices/0000:00:00.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:01.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:16.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1a.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1b.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1c.0 ... /sys/devices/virtual/iommu/dmar1/intel-iommu /sys/devices/virtual/iommu/dmar1/intel-iommu/cap /sys/devices/virtual/iommu/dmar1/intel-iommu/ecap /sys/devices/virtual/iommu/dmar1/intel-iommu/address /sys/devices/virtual/iommu/dmar1/intel-iommu/version /sys/class/iommu/dmar0 /sys/class/iommu/dmar1 (devices also link back to the dmar units) This makes address, version, capabilities, and extended capabilities available, just like printed on boot. I've tried not to duplicate data that can be found in the DMAR table, with the exception of the address, which provides an easy way to associate the sysfs device with a DRHD entry in the DMAR. It's tempting to add scopes and RMRR data here, but the full DMAR table is already exposed under /sys/firmware/ and therefore already provides a way for userspace to learn such details. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-06-12 16:12:31 -06:00
}
static DEVICE_ATTR_RO(cap);
iommu/vt-d: Make use of IOMMU sysfs support Register our DRHD IOMMUs, cross link devices, and provide a base set of attributes for the IOMMU. Note that IRQ remapping support parses the DMAR table very early in boot, well before the iommu_class can reasonably be setup, so our registration is split between intel_iommu_init(), which occurs later, and alloc_iommu(), which typically occurs much earlier, but may happen at any time later with IOMMU hot-add support. On a typical desktop system, this provides the following (pruned): $ find /sys | grep dmar /sys/devices/virtual/iommu/dmar0 /sys/devices/virtual/iommu/dmar0/devices /sys/devices/virtual/iommu/dmar0/devices/0000:00:02.0 /sys/devices/virtual/iommu/dmar0/intel-iommu /sys/devices/virtual/iommu/dmar0/intel-iommu/cap /sys/devices/virtual/iommu/dmar0/intel-iommu/ecap /sys/devices/virtual/iommu/dmar0/intel-iommu/address /sys/devices/virtual/iommu/dmar0/intel-iommu/version /sys/devices/virtual/iommu/dmar1 /sys/devices/virtual/iommu/dmar1/devices /sys/devices/virtual/iommu/dmar1/devices/0000:00:00.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:01.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:16.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1a.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1b.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1c.0 ... /sys/devices/virtual/iommu/dmar1/intel-iommu /sys/devices/virtual/iommu/dmar1/intel-iommu/cap /sys/devices/virtual/iommu/dmar1/intel-iommu/ecap /sys/devices/virtual/iommu/dmar1/intel-iommu/address /sys/devices/virtual/iommu/dmar1/intel-iommu/version /sys/class/iommu/dmar0 /sys/class/iommu/dmar1 (devices also link back to the dmar units) This makes address, version, capabilities, and extended capabilities available, just like printed on boot. I've tried not to duplicate data that can be found in the DMAR table, with the exception of the address, which provides an easy way to associate the sysfs device with a DRHD entry in the DMAR. It's tempting to add scopes and RMRR data here, but the full DMAR table is already exposed under /sys/firmware/ and therefore already provides a way for userspace to learn such details. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-06-12 16:12:31 -06:00
static ssize_t ecap_show(struct device *dev,
struct device_attribute *attr, char *buf)
iommu/vt-d: Make use of IOMMU sysfs support Register our DRHD IOMMUs, cross link devices, and provide a base set of attributes for the IOMMU. Note that IRQ remapping support parses the DMAR table very early in boot, well before the iommu_class can reasonably be setup, so our registration is split between intel_iommu_init(), which occurs later, and alloc_iommu(), which typically occurs much earlier, but may happen at any time later with IOMMU hot-add support. On a typical desktop system, this provides the following (pruned): $ find /sys | grep dmar /sys/devices/virtual/iommu/dmar0 /sys/devices/virtual/iommu/dmar0/devices /sys/devices/virtual/iommu/dmar0/devices/0000:00:02.0 /sys/devices/virtual/iommu/dmar0/intel-iommu /sys/devices/virtual/iommu/dmar0/intel-iommu/cap /sys/devices/virtual/iommu/dmar0/intel-iommu/ecap /sys/devices/virtual/iommu/dmar0/intel-iommu/address /sys/devices/virtual/iommu/dmar0/intel-iommu/version /sys/devices/virtual/iommu/dmar1 /sys/devices/virtual/iommu/dmar1/devices /sys/devices/virtual/iommu/dmar1/devices/0000:00:00.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:01.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:16.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1a.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1b.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1c.0 ... /sys/devices/virtual/iommu/dmar1/intel-iommu /sys/devices/virtual/iommu/dmar1/intel-iommu/cap /sys/devices/virtual/iommu/dmar1/intel-iommu/ecap /sys/devices/virtual/iommu/dmar1/intel-iommu/address /sys/devices/virtual/iommu/dmar1/intel-iommu/version /sys/class/iommu/dmar0 /sys/class/iommu/dmar1 (devices also link back to the dmar units) This makes address, version, capabilities, and extended capabilities available, just like printed on boot. I've tried not to duplicate data that can be found in the DMAR table, with the exception of the address, which provides an easy way to associate the sysfs device with a DRHD entry in the DMAR. It's tempting to add scopes and RMRR data here, but the full DMAR table is already exposed under /sys/firmware/ and therefore already provides a way for userspace to learn such details. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-06-12 16:12:31 -06:00
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
return sysfs_emit(buf, "%llx\n", iommu->ecap);
iommu/vt-d: Make use of IOMMU sysfs support Register our DRHD IOMMUs, cross link devices, and provide a base set of attributes for the IOMMU. Note that IRQ remapping support parses the DMAR table very early in boot, well before the iommu_class can reasonably be setup, so our registration is split between intel_iommu_init(), which occurs later, and alloc_iommu(), which typically occurs much earlier, but may happen at any time later with IOMMU hot-add support. On a typical desktop system, this provides the following (pruned): $ find /sys | grep dmar /sys/devices/virtual/iommu/dmar0 /sys/devices/virtual/iommu/dmar0/devices /sys/devices/virtual/iommu/dmar0/devices/0000:00:02.0 /sys/devices/virtual/iommu/dmar0/intel-iommu /sys/devices/virtual/iommu/dmar0/intel-iommu/cap /sys/devices/virtual/iommu/dmar0/intel-iommu/ecap /sys/devices/virtual/iommu/dmar0/intel-iommu/address /sys/devices/virtual/iommu/dmar0/intel-iommu/version /sys/devices/virtual/iommu/dmar1 /sys/devices/virtual/iommu/dmar1/devices /sys/devices/virtual/iommu/dmar1/devices/0000:00:00.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:01.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:16.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1a.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1b.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1c.0 ... /sys/devices/virtual/iommu/dmar1/intel-iommu /sys/devices/virtual/iommu/dmar1/intel-iommu/cap /sys/devices/virtual/iommu/dmar1/intel-iommu/ecap /sys/devices/virtual/iommu/dmar1/intel-iommu/address /sys/devices/virtual/iommu/dmar1/intel-iommu/version /sys/class/iommu/dmar0 /sys/class/iommu/dmar1 (devices also link back to the dmar units) This makes address, version, capabilities, and extended capabilities available, just like printed on boot. I've tried not to duplicate data that can be found in the DMAR table, with the exception of the address, which provides an easy way to associate the sysfs device with a DRHD entry in the DMAR. It's tempting to add scopes and RMRR data here, but the full DMAR table is already exposed under /sys/firmware/ and therefore already provides a way for userspace to learn such details. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-06-12 16:12:31 -06:00
}
static DEVICE_ATTR_RO(ecap);
iommu/vt-d: Make use of IOMMU sysfs support Register our DRHD IOMMUs, cross link devices, and provide a base set of attributes for the IOMMU. Note that IRQ remapping support parses the DMAR table very early in boot, well before the iommu_class can reasonably be setup, so our registration is split between intel_iommu_init(), which occurs later, and alloc_iommu(), which typically occurs much earlier, but may happen at any time later with IOMMU hot-add support. On a typical desktop system, this provides the following (pruned): $ find /sys | grep dmar /sys/devices/virtual/iommu/dmar0 /sys/devices/virtual/iommu/dmar0/devices /sys/devices/virtual/iommu/dmar0/devices/0000:00:02.0 /sys/devices/virtual/iommu/dmar0/intel-iommu /sys/devices/virtual/iommu/dmar0/intel-iommu/cap /sys/devices/virtual/iommu/dmar0/intel-iommu/ecap /sys/devices/virtual/iommu/dmar0/intel-iommu/address /sys/devices/virtual/iommu/dmar0/intel-iommu/version /sys/devices/virtual/iommu/dmar1 /sys/devices/virtual/iommu/dmar1/devices /sys/devices/virtual/iommu/dmar1/devices/0000:00:00.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:01.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:16.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1a.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1b.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1c.0 ... /sys/devices/virtual/iommu/dmar1/intel-iommu /sys/devices/virtual/iommu/dmar1/intel-iommu/cap /sys/devices/virtual/iommu/dmar1/intel-iommu/ecap /sys/devices/virtual/iommu/dmar1/intel-iommu/address /sys/devices/virtual/iommu/dmar1/intel-iommu/version /sys/class/iommu/dmar0 /sys/class/iommu/dmar1 (devices also link back to the dmar units) This makes address, version, capabilities, and extended capabilities available, just like printed on boot. I've tried not to duplicate data that can be found in the DMAR table, with the exception of the address, which provides an easy way to associate the sysfs device with a DRHD entry in the DMAR. It's tempting to add scopes and RMRR data here, but the full DMAR table is already exposed under /sys/firmware/ and therefore already provides a way for userspace to learn such details. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-06-12 16:12:31 -06:00
static ssize_t domains_supported_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
return sysfs_emit(buf, "%ld\n", cap_ndoms(iommu->cap));
}
static DEVICE_ATTR_RO(domains_supported);
static ssize_t domains_used_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct intel_iommu *iommu = dev_to_intel_iommu(dev);
unsigned int count = 0;
int id;
for (id = 0; id < cap_ndoms(iommu->cap); id++)
if (ida_exists(&iommu->domain_ida, id))
count++;
return sysfs_emit(buf, "%d\n", count);
}
static DEVICE_ATTR_RO(domains_used);
iommu/vt-d: Make use of IOMMU sysfs support Register our DRHD IOMMUs, cross link devices, and provide a base set of attributes for the IOMMU. Note that IRQ remapping support parses the DMAR table very early in boot, well before the iommu_class can reasonably be setup, so our registration is split between intel_iommu_init(), which occurs later, and alloc_iommu(), which typically occurs much earlier, but may happen at any time later with IOMMU hot-add support. On a typical desktop system, this provides the following (pruned): $ find /sys | grep dmar /sys/devices/virtual/iommu/dmar0 /sys/devices/virtual/iommu/dmar0/devices /sys/devices/virtual/iommu/dmar0/devices/0000:00:02.0 /sys/devices/virtual/iommu/dmar0/intel-iommu /sys/devices/virtual/iommu/dmar0/intel-iommu/cap /sys/devices/virtual/iommu/dmar0/intel-iommu/ecap /sys/devices/virtual/iommu/dmar0/intel-iommu/address /sys/devices/virtual/iommu/dmar0/intel-iommu/version /sys/devices/virtual/iommu/dmar1 /sys/devices/virtual/iommu/dmar1/devices /sys/devices/virtual/iommu/dmar1/devices/0000:00:00.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:01.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:16.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1a.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1b.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1c.0 ... /sys/devices/virtual/iommu/dmar1/intel-iommu /sys/devices/virtual/iommu/dmar1/intel-iommu/cap /sys/devices/virtual/iommu/dmar1/intel-iommu/ecap /sys/devices/virtual/iommu/dmar1/intel-iommu/address /sys/devices/virtual/iommu/dmar1/intel-iommu/version /sys/class/iommu/dmar0 /sys/class/iommu/dmar1 (devices also link back to the dmar units) This makes address, version, capabilities, and extended capabilities available, just like printed on boot. I've tried not to duplicate data that can be found in the DMAR table, with the exception of the address, which provides an easy way to associate the sysfs device with a DRHD entry in the DMAR. It's tempting to add scopes and RMRR data here, but the full DMAR table is already exposed under /sys/firmware/ and therefore already provides a way for userspace to learn such details. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-06-12 16:12:31 -06:00
static struct attribute *intel_iommu_attrs[] = {
&dev_attr_version.attr,
&dev_attr_address.attr,
&dev_attr_cap.attr,
&dev_attr_ecap.attr,
&dev_attr_domains_supported.attr,
&dev_attr_domains_used.attr,
iommu/vt-d: Make use of IOMMU sysfs support Register our DRHD IOMMUs, cross link devices, and provide a base set of attributes for the IOMMU. Note that IRQ remapping support parses the DMAR table very early in boot, well before the iommu_class can reasonably be setup, so our registration is split between intel_iommu_init(), which occurs later, and alloc_iommu(), which typically occurs much earlier, but may happen at any time later with IOMMU hot-add support. On a typical desktop system, this provides the following (pruned): $ find /sys | grep dmar /sys/devices/virtual/iommu/dmar0 /sys/devices/virtual/iommu/dmar0/devices /sys/devices/virtual/iommu/dmar0/devices/0000:00:02.0 /sys/devices/virtual/iommu/dmar0/intel-iommu /sys/devices/virtual/iommu/dmar0/intel-iommu/cap /sys/devices/virtual/iommu/dmar0/intel-iommu/ecap /sys/devices/virtual/iommu/dmar0/intel-iommu/address /sys/devices/virtual/iommu/dmar0/intel-iommu/version /sys/devices/virtual/iommu/dmar1 /sys/devices/virtual/iommu/dmar1/devices /sys/devices/virtual/iommu/dmar1/devices/0000:00:00.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:01.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:16.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1a.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1b.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1c.0 ... /sys/devices/virtual/iommu/dmar1/intel-iommu /sys/devices/virtual/iommu/dmar1/intel-iommu/cap /sys/devices/virtual/iommu/dmar1/intel-iommu/ecap /sys/devices/virtual/iommu/dmar1/intel-iommu/address /sys/devices/virtual/iommu/dmar1/intel-iommu/version /sys/class/iommu/dmar0 /sys/class/iommu/dmar1 (devices also link back to the dmar units) This makes address, version, capabilities, and extended capabilities available, just like printed on boot. I've tried not to duplicate data that can be found in the DMAR table, with the exception of the address, which provides an easy way to associate the sysfs device with a DRHD entry in the DMAR. It's tempting to add scopes and RMRR data here, but the full DMAR table is already exposed under /sys/firmware/ and therefore already provides a way for userspace to learn such details. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-06-12 16:12:31 -06:00
NULL,
};
static struct attribute_group intel_iommu_group = {
.name = "intel-iommu",
.attrs = intel_iommu_attrs,
};
const struct attribute_group *intel_iommu_groups[] = {
&intel_iommu_group,
NULL,
};
static bool has_external_pci(void)
iommu/vt-d: Force IOMMU on for platform opt in hint Intel VT-d spec added a new DMA_CTRL_PLATFORM_OPT_IN_FLAG flag in DMAR ACPI table [1] for BIOS to report compliance about platform initiated DMA restricted to RMRR ranges when transferring control to the OS. This means that during OS boot, before it enables IOMMU none of the connected devices can bypass DMA protection for instance by overwriting the data structures used by the IOMMU. The OS also treats this as a hint that the IOMMU should be enabled to prevent DMA attacks from possible malicious devices. A use of this flag is Kernel DMA protection for Thunderbolt [2] which in practice means that IOMMU should be enabled for PCIe devices connected to the Thunderbolt ports. With IOMMU enabled for these devices, all DMA operations are limited in the range reserved for it, thus the DMA attacks are prevented. All these devices are enumerated in the PCI/PCIe module and marked with an untrusted flag. This forces IOMMU to be enabled if DMA_CTRL_PLATFORM_OPT_IN_FLAG is set in DMAR ACPI table and there are PCIe devices marked as untrusted in the system. This can be turned off by adding "intel_iommu=off" in the kernel command line, if any problems are found. [1] https://software.intel.com/sites/default/files/managed/c5/15/vt-directed-io-spec.pdf [2] https://docs.microsoft.com/en-us/windows/security/information-protection/kernel-dma-protection-for-thunderbolt Cc: Jacob Pan <jacob.jun.pan@linux.intel.com> Cc: Sohil Mehta <sohil.mehta@intel.com> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Reviewed-by: Ashok Raj <ashok.raj@intel.com> Reviewed-by: Joerg Roedel <jroedel@suse.de> Acked-by: Joerg Roedel <jroedel@suse.de>
2018-10-23 15:45:01 +08:00
{
struct pci_dev *pdev = NULL;
for_each_pci_dev(pdev)
if (pdev->external_facing) {
pci_dev_put(pdev);
return true;
}
iommu/vt-d: Force IOMMU on for platform opt in hint Intel VT-d spec added a new DMA_CTRL_PLATFORM_OPT_IN_FLAG flag in DMAR ACPI table [1] for BIOS to report compliance about platform initiated DMA restricted to RMRR ranges when transferring control to the OS. This means that during OS boot, before it enables IOMMU none of the connected devices can bypass DMA protection for instance by overwriting the data structures used by the IOMMU. The OS also treats this as a hint that the IOMMU should be enabled to prevent DMA attacks from possible malicious devices. A use of this flag is Kernel DMA protection for Thunderbolt [2] which in practice means that IOMMU should be enabled for PCIe devices connected to the Thunderbolt ports. With IOMMU enabled for these devices, all DMA operations are limited in the range reserved for it, thus the DMA attacks are prevented. All these devices are enumerated in the PCI/PCIe module and marked with an untrusted flag. This forces IOMMU to be enabled if DMA_CTRL_PLATFORM_OPT_IN_FLAG is set in DMAR ACPI table and there are PCIe devices marked as untrusted in the system. This can be turned off by adding "intel_iommu=off" in the kernel command line, if any problems are found. [1] https://software.intel.com/sites/default/files/managed/c5/15/vt-directed-io-spec.pdf [2] https://docs.microsoft.com/en-us/windows/security/information-protection/kernel-dma-protection-for-thunderbolt Cc: Jacob Pan <jacob.jun.pan@linux.intel.com> Cc: Sohil Mehta <sohil.mehta@intel.com> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Reviewed-by: Ashok Raj <ashok.raj@intel.com> Reviewed-by: Joerg Roedel <jroedel@suse.de> Acked-by: Joerg Roedel <jroedel@suse.de>
2018-10-23 15:45:01 +08:00
return false;
}
iommu/vt-d: Force IOMMU on for platform opt in hint Intel VT-d spec added a new DMA_CTRL_PLATFORM_OPT_IN_FLAG flag in DMAR ACPI table [1] for BIOS to report compliance about platform initiated DMA restricted to RMRR ranges when transferring control to the OS. This means that during OS boot, before it enables IOMMU none of the connected devices can bypass DMA protection for instance by overwriting the data structures used by the IOMMU. The OS also treats this as a hint that the IOMMU should be enabled to prevent DMA attacks from possible malicious devices. A use of this flag is Kernel DMA protection for Thunderbolt [2] which in practice means that IOMMU should be enabled for PCIe devices connected to the Thunderbolt ports. With IOMMU enabled for these devices, all DMA operations are limited in the range reserved for it, thus the DMA attacks are prevented. All these devices are enumerated in the PCI/PCIe module and marked with an untrusted flag. This forces IOMMU to be enabled if DMA_CTRL_PLATFORM_OPT_IN_FLAG is set in DMAR ACPI table and there are PCIe devices marked as untrusted in the system. This can be turned off by adding "intel_iommu=off" in the kernel command line, if any problems are found. [1] https://software.intel.com/sites/default/files/managed/c5/15/vt-directed-io-spec.pdf [2] https://docs.microsoft.com/en-us/windows/security/information-protection/kernel-dma-protection-for-thunderbolt Cc: Jacob Pan <jacob.jun.pan@linux.intel.com> Cc: Sohil Mehta <sohil.mehta@intel.com> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Reviewed-by: Ashok Raj <ashok.raj@intel.com> Reviewed-by: Joerg Roedel <jroedel@suse.de> Acked-by: Joerg Roedel <jroedel@suse.de>
2018-10-23 15:45:01 +08:00
static int __init platform_optin_force_iommu(void)
{
if (!dmar_platform_optin() || no_platform_optin || !has_external_pci())
iommu/vt-d: Force IOMMU on for platform opt in hint Intel VT-d spec added a new DMA_CTRL_PLATFORM_OPT_IN_FLAG flag in DMAR ACPI table [1] for BIOS to report compliance about platform initiated DMA restricted to RMRR ranges when transferring control to the OS. This means that during OS boot, before it enables IOMMU none of the connected devices can bypass DMA protection for instance by overwriting the data structures used by the IOMMU. The OS also treats this as a hint that the IOMMU should be enabled to prevent DMA attacks from possible malicious devices. A use of this flag is Kernel DMA protection for Thunderbolt [2] which in practice means that IOMMU should be enabled for PCIe devices connected to the Thunderbolt ports. With IOMMU enabled for these devices, all DMA operations are limited in the range reserved for it, thus the DMA attacks are prevented. All these devices are enumerated in the PCI/PCIe module and marked with an untrusted flag. This forces IOMMU to be enabled if DMA_CTRL_PLATFORM_OPT_IN_FLAG is set in DMAR ACPI table and there are PCIe devices marked as untrusted in the system. This can be turned off by adding "intel_iommu=off" in the kernel command line, if any problems are found. [1] https://software.intel.com/sites/default/files/managed/c5/15/vt-directed-io-spec.pdf [2] https://docs.microsoft.com/en-us/windows/security/information-protection/kernel-dma-protection-for-thunderbolt Cc: Jacob Pan <jacob.jun.pan@linux.intel.com> Cc: Sohil Mehta <sohil.mehta@intel.com> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Reviewed-by: Ashok Raj <ashok.raj@intel.com> Reviewed-by: Joerg Roedel <jroedel@suse.de> Acked-by: Joerg Roedel <jroedel@suse.de>
2018-10-23 15:45:01 +08:00
return 0;
if (no_iommu || dmar_disabled)
pr_info("Intel-IOMMU force enabled due to platform opt in\n");
/*
* If Intel-IOMMU is disabled by default, we will apply identity
* map for all devices except those marked as being untrusted.
*/
if (dmar_disabled)
iommu_set_default_passthrough(false);
iommu/vt-d: Force IOMMU on for platform opt in hint Intel VT-d spec added a new DMA_CTRL_PLATFORM_OPT_IN_FLAG flag in DMAR ACPI table [1] for BIOS to report compliance about platform initiated DMA restricted to RMRR ranges when transferring control to the OS. This means that during OS boot, before it enables IOMMU none of the connected devices can bypass DMA protection for instance by overwriting the data structures used by the IOMMU. The OS also treats this as a hint that the IOMMU should be enabled to prevent DMA attacks from possible malicious devices. A use of this flag is Kernel DMA protection for Thunderbolt [2] which in practice means that IOMMU should be enabled for PCIe devices connected to the Thunderbolt ports. With IOMMU enabled for these devices, all DMA operations are limited in the range reserved for it, thus the DMA attacks are prevented. All these devices are enumerated in the PCI/PCIe module and marked with an untrusted flag. This forces IOMMU to be enabled if DMA_CTRL_PLATFORM_OPT_IN_FLAG is set in DMAR ACPI table and there are PCIe devices marked as untrusted in the system. This can be turned off by adding "intel_iommu=off" in the kernel command line, if any problems are found. [1] https://software.intel.com/sites/default/files/managed/c5/15/vt-directed-io-spec.pdf [2] https://docs.microsoft.com/en-us/windows/security/information-protection/kernel-dma-protection-for-thunderbolt Cc: Jacob Pan <jacob.jun.pan@linux.intel.com> Cc: Sohil Mehta <sohil.mehta@intel.com> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Reviewed-by: Ashok Raj <ashok.raj@intel.com> Reviewed-by: Joerg Roedel <jroedel@suse.de> Acked-by: Joerg Roedel <jroedel@suse.de>
2018-10-23 15:45:01 +08:00
dmar_disabled = 0;
no_iommu = 0;
return 1;
}
static int __init probe_acpi_namespace_devices(void)
{
struct dmar_drhd_unit *drhd;
/* To avoid a -Wunused-but-set-variable warning. */
struct intel_iommu *iommu __maybe_unused;
struct device *dev;
int i, ret = 0;
for_each_active_iommu(iommu, drhd) {
for_each_active_dev_scope(drhd->devices,
drhd->devices_cnt, i, dev) {
struct acpi_device_physical_node *pn;
struct acpi_device *adev;
if (dev->bus != &acpi_bus_type)
continue;
iommu/vt-d: Fix possible circular locking dependency We have recently seen report of lockdep circular lock dependency warnings on platforms like Skylake and Kabylake: ====================================================== WARNING: possible circular locking dependency detected 6.14.0-rc6-CI_DRM_16276-gca2c04fe76e8+ #1 Not tainted ------------------------------------------------------ swapper/0/1 is trying to acquire lock: ffffffff8360ee48 (iommu_probe_device_lock){+.+.}-{3:3}, at: iommu_probe_device+0x1d/0x70 but task is already holding lock: ffff888102c7efa8 (&device->physical_node_lock){+.+.}-{3:3}, at: intel_iommu_init+0xe75/0x11f0 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #6 (&device->physical_node_lock){+.+.}-{3:3}: __mutex_lock+0xb4/0xe40 mutex_lock_nested+0x1b/0x30 intel_iommu_init+0xe75/0x11f0 pci_iommu_init+0x13/0x70 do_one_initcall+0x62/0x3f0 kernel_init_freeable+0x3da/0x6a0 kernel_init+0x1b/0x200 ret_from_fork+0x44/0x70 ret_from_fork_asm+0x1a/0x30 -> #5 (dmar_global_lock){++++}-{3:3}: down_read+0x43/0x1d0 enable_drhd_fault_handling+0x21/0x110 cpuhp_invoke_callback+0x4c6/0x870 cpuhp_issue_call+0xbf/0x1f0 __cpuhp_setup_state_cpuslocked+0x111/0x320 __cpuhp_setup_state+0xb0/0x220 irq_remap_enable_fault_handling+0x3f/0xa0 apic_intr_mode_init+0x5c/0x110 x86_late_time_init+0x24/0x40 start_kernel+0x895/0xbd0 x86_64_start_reservations+0x18/0x30 x86_64_start_kernel+0xbf/0x110 common_startup_64+0x13e/0x141 -> #4 (cpuhp_state_mutex){+.+.}-{3:3}: __mutex_lock+0xb4/0xe40 mutex_lock_nested+0x1b/0x30 __cpuhp_setup_state_cpuslocked+0x67/0x320 __cpuhp_setup_state+0xb0/0x220 page_alloc_init_cpuhp+0x2d/0x60 mm_core_init+0x18/0x2c0 start_kernel+0x576/0xbd0 x86_64_start_reservations+0x18/0x30 x86_64_start_kernel+0xbf/0x110 common_startup_64+0x13e/0x141 -> #3 (cpu_hotplug_lock){++++}-{0:0}: __cpuhp_state_add_instance+0x4f/0x220 iova_domain_init_rcaches+0x214/0x280 iommu_setup_dma_ops+0x1a4/0x710 iommu_device_register+0x17d/0x260 intel_iommu_init+0xda4/0x11f0 pci_iommu_init+0x13/0x70 do_one_initcall+0x62/0x3f0 kernel_init_freeable+0x3da/0x6a0 kernel_init+0x1b/0x200 ret_from_fork+0x44/0x70 ret_from_fork_asm+0x1a/0x30 -> #2 (&domain->iova_cookie->mutex){+.+.}-{3:3}: __mutex_lock+0xb4/0xe40 mutex_lock_nested+0x1b/0x30 iommu_setup_dma_ops+0x16b/0x710 iommu_device_register+0x17d/0x260 intel_iommu_init+0xda4/0x11f0 pci_iommu_init+0x13/0x70 do_one_initcall+0x62/0x3f0 kernel_init_freeable+0x3da/0x6a0 kernel_init+0x1b/0x200 ret_from_fork+0x44/0x70 ret_from_fork_asm+0x1a/0x30 -> #1 (&group->mutex){+.+.}-{3:3}: __mutex_lock+0xb4/0xe40 mutex_lock_nested+0x1b/0x30 __iommu_probe_device+0x24c/0x4e0 probe_iommu_group+0x2b/0x50 bus_for_each_dev+0x7d/0xe0 iommu_device_register+0xe1/0x260 intel_iommu_init+0xda4/0x11f0 pci_iommu_init+0x13/0x70 do_one_initcall+0x62/0x3f0 kernel_init_freeable+0x3da/0x6a0 kernel_init+0x1b/0x200 ret_from_fork+0x44/0x70 ret_from_fork_asm+0x1a/0x30 -> #0 (iommu_probe_device_lock){+.+.}-{3:3}: __lock_acquire+0x1637/0x2810 lock_acquire+0xc9/0x300 __mutex_lock+0xb4/0xe40 mutex_lock_nested+0x1b/0x30 iommu_probe_device+0x1d/0x70 intel_iommu_init+0xe90/0x11f0 pci_iommu_init+0x13/0x70 do_one_initcall+0x62/0x3f0 kernel_init_freeable+0x3da/0x6a0 kernel_init+0x1b/0x200 ret_from_fork+0x44/0x70 ret_from_fork_asm+0x1a/0x30 other info that might help us debug this: Chain exists of: iommu_probe_device_lock --> dmar_global_lock --> &device->physical_node_lock Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&device->physical_node_lock); lock(dmar_global_lock); lock(&device->physical_node_lock); lock(iommu_probe_device_lock); *** DEADLOCK *** This driver uses a global lock to protect the list of enumerated DMA remapping units. It is necessary due to the driver's support for dynamic addition and removal of remapping units at runtime. Two distinct code paths require iteration over this remapping unit list: - Device registration and probing: the driver iterates the list to register each remapping unit with the upper layer IOMMU framework and subsequently probe the devices managed by that unit. - Global configuration: Upper layer components may also iterate the list to apply configuration changes. The lock acquisition order between these two code paths was reversed. This caused lockdep warnings, indicating a risk of deadlock. Fix this warning by releasing the global lock before invoking upper layer interfaces for device registration. Fixes: b150654f74bf ("iommu/vt-d: Fix suspicious RCU usage") Closes: https://lore.kernel.org/linux-iommu/SJ1PR11MB612953431F94F18C954C4A9CB9D32@SJ1PR11MB6129.namprd11.prod.outlook.com/ Tested-by: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com> Cc: stable@vger.kernel.org Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20250317035714.1041549-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2025-03-19 10:21:01 +08:00
up_read(&dmar_global_lock);
adev = to_acpi_device(dev);
mutex_lock(&adev->physical_node_lock);
list_for_each_entry(pn,
&adev->physical_node_list, node) {
ret = iommu_probe_device(pn->dev);
if (ret)
break;
}
mutex_unlock(&adev->physical_node_lock);
iommu/vt-d: Fix possible circular locking dependency We have recently seen report of lockdep circular lock dependency warnings on platforms like Skylake and Kabylake: ====================================================== WARNING: possible circular locking dependency detected 6.14.0-rc6-CI_DRM_16276-gca2c04fe76e8+ #1 Not tainted ------------------------------------------------------ swapper/0/1 is trying to acquire lock: ffffffff8360ee48 (iommu_probe_device_lock){+.+.}-{3:3}, at: iommu_probe_device+0x1d/0x70 but task is already holding lock: ffff888102c7efa8 (&device->physical_node_lock){+.+.}-{3:3}, at: intel_iommu_init+0xe75/0x11f0 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #6 (&device->physical_node_lock){+.+.}-{3:3}: __mutex_lock+0xb4/0xe40 mutex_lock_nested+0x1b/0x30 intel_iommu_init+0xe75/0x11f0 pci_iommu_init+0x13/0x70 do_one_initcall+0x62/0x3f0 kernel_init_freeable+0x3da/0x6a0 kernel_init+0x1b/0x200 ret_from_fork+0x44/0x70 ret_from_fork_asm+0x1a/0x30 -> #5 (dmar_global_lock){++++}-{3:3}: down_read+0x43/0x1d0 enable_drhd_fault_handling+0x21/0x110 cpuhp_invoke_callback+0x4c6/0x870 cpuhp_issue_call+0xbf/0x1f0 __cpuhp_setup_state_cpuslocked+0x111/0x320 __cpuhp_setup_state+0xb0/0x220 irq_remap_enable_fault_handling+0x3f/0xa0 apic_intr_mode_init+0x5c/0x110 x86_late_time_init+0x24/0x40 start_kernel+0x895/0xbd0 x86_64_start_reservations+0x18/0x30 x86_64_start_kernel+0xbf/0x110 common_startup_64+0x13e/0x141 -> #4 (cpuhp_state_mutex){+.+.}-{3:3}: __mutex_lock+0xb4/0xe40 mutex_lock_nested+0x1b/0x30 __cpuhp_setup_state_cpuslocked+0x67/0x320 __cpuhp_setup_state+0xb0/0x220 page_alloc_init_cpuhp+0x2d/0x60 mm_core_init+0x18/0x2c0 start_kernel+0x576/0xbd0 x86_64_start_reservations+0x18/0x30 x86_64_start_kernel+0xbf/0x110 common_startup_64+0x13e/0x141 -> #3 (cpu_hotplug_lock){++++}-{0:0}: __cpuhp_state_add_instance+0x4f/0x220 iova_domain_init_rcaches+0x214/0x280 iommu_setup_dma_ops+0x1a4/0x710 iommu_device_register+0x17d/0x260 intel_iommu_init+0xda4/0x11f0 pci_iommu_init+0x13/0x70 do_one_initcall+0x62/0x3f0 kernel_init_freeable+0x3da/0x6a0 kernel_init+0x1b/0x200 ret_from_fork+0x44/0x70 ret_from_fork_asm+0x1a/0x30 -> #2 (&domain->iova_cookie->mutex){+.+.}-{3:3}: __mutex_lock+0xb4/0xe40 mutex_lock_nested+0x1b/0x30 iommu_setup_dma_ops+0x16b/0x710 iommu_device_register+0x17d/0x260 intel_iommu_init+0xda4/0x11f0 pci_iommu_init+0x13/0x70 do_one_initcall+0x62/0x3f0 kernel_init_freeable+0x3da/0x6a0 kernel_init+0x1b/0x200 ret_from_fork+0x44/0x70 ret_from_fork_asm+0x1a/0x30 -> #1 (&group->mutex){+.+.}-{3:3}: __mutex_lock+0xb4/0xe40 mutex_lock_nested+0x1b/0x30 __iommu_probe_device+0x24c/0x4e0 probe_iommu_group+0x2b/0x50 bus_for_each_dev+0x7d/0xe0 iommu_device_register+0xe1/0x260 intel_iommu_init+0xda4/0x11f0 pci_iommu_init+0x13/0x70 do_one_initcall+0x62/0x3f0 kernel_init_freeable+0x3da/0x6a0 kernel_init+0x1b/0x200 ret_from_fork+0x44/0x70 ret_from_fork_asm+0x1a/0x30 -> #0 (iommu_probe_device_lock){+.+.}-{3:3}: __lock_acquire+0x1637/0x2810 lock_acquire+0xc9/0x300 __mutex_lock+0xb4/0xe40 mutex_lock_nested+0x1b/0x30 iommu_probe_device+0x1d/0x70 intel_iommu_init+0xe90/0x11f0 pci_iommu_init+0x13/0x70 do_one_initcall+0x62/0x3f0 kernel_init_freeable+0x3da/0x6a0 kernel_init+0x1b/0x200 ret_from_fork+0x44/0x70 ret_from_fork_asm+0x1a/0x30 other info that might help us debug this: Chain exists of: iommu_probe_device_lock --> dmar_global_lock --> &device->physical_node_lock Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&device->physical_node_lock); lock(dmar_global_lock); lock(&device->physical_node_lock); lock(iommu_probe_device_lock); *** DEADLOCK *** This driver uses a global lock to protect the list of enumerated DMA remapping units. It is necessary due to the driver's support for dynamic addition and removal of remapping units at runtime. Two distinct code paths require iteration over this remapping unit list: - Device registration and probing: the driver iterates the list to register each remapping unit with the upper layer IOMMU framework and subsequently probe the devices managed by that unit. - Global configuration: Upper layer components may also iterate the list to apply configuration changes. The lock acquisition order between these two code paths was reversed. This caused lockdep warnings, indicating a risk of deadlock. Fix this warning by releasing the global lock before invoking upper layer interfaces for device registration. Fixes: b150654f74bf ("iommu/vt-d: Fix suspicious RCU usage") Closes: https://lore.kernel.org/linux-iommu/SJ1PR11MB612953431F94F18C954C4A9CB9D32@SJ1PR11MB6129.namprd11.prod.outlook.com/ Tested-by: Chaitanya Kumar Borah <chaitanya.kumar.borah@intel.com> Cc: stable@vger.kernel.org Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20250317035714.1041549-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2025-03-19 10:21:01 +08:00
down_read(&dmar_global_lock);
if (ret)
return ret;
}
}
return 0;
}
static __init int tboot_force_iommu(void)
{
if (!tboot_enabled())
return 0;
if (no_iommu || dmar_disabled)
pr_warn("Forcing Intel-IOMMU to enabled\n");
dmar_disabled = 0;
no_iommu = 0;
return 1;
}
int __init intel_iommu_init(void)
{
int ret = -ENODEV;
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
iommu/vt-d: Force IOMMU on for platform opt in hint Intel VT-d spec added a new DMA_CTRL_PLATFORM_OPT_IN_FLAG flag in DMAR ACPI table [1] for BIOS to report compliance about platform initiated DMA restricted to RMRR ranges when transferring control to the OS. This means that during OS boot, before it enables IOMMU none of the connected devices can bypass DMA protection for instance by overwriting the data structures used by the IOMMU. The OS also treats this as a hint that the IOMMU should be enabled to prevent DMA attacks from possible malicious devices. A use of this flag is Kernel DMA protection for Thunderbolt [2] which in practice means that IOMMU should be enabled for PCIe devices connected to the Thunderbolt ports. With IOMMU enabled for these devices, all DMA operations are limited in the range reserved for it, thus the DMA attacks are prevented. All these devices are enumerated in the PCI/PCIe module and marked with an untrusted flag. This forces IOMMU to be enabled if DMA_CTRL_PLATFORM_OPT_IN_FLAG is set in DMAR ACPI table and there are PCIe devices marked as untrusted in the system. This can be turned off by adding "intel_iommu=off" in the kernel command line, if any problems are found. [1] https://software.intel.com/sites/default/files/managed/c5/15/vt-directed-io-spec.pdf [2] https://docs.microsoft.com/en-us/windows/security/information-protection/kernel-dma-protection-for-thunderbolt Cc: Jacob Pan <jacob.jun.pan@linux.intel.com> Cc: Sohil Mehta <sohil.mehta@intel.com> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Reviewed-by: Ashok Raj <ashok.raj@intel.com> Reviewed-by: Joerg Roedel <jroedel@suse.de> Acked-by: Joerg Roedel <jroedel@suse.de>
2018-10-23 15:45:01 +08:00
/*
* Intel IOMMU is required for a TXT/tboot launch or platform
* opt in, so enforce that.
*/
force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) ||
platform_optin_force_iommu();
2014-02-19 14:07:33 +08:00
down_write(&dmar_global_lock);
if (dmar_table_init()) {
if (force_on)
panic("tboot: Failed to initialize DMAR table\n");
goto out_free_dmar;
}
if (dmar_dev_scope_init() < 0) {
if (force_on)
panic("tboot: Failed to initialize DMAR device scope\n");
goto out_free_dmar;
}
up_write(&dmar_global_lock);
/*
* The bus notifier takes the dmar_global_lock, so lockdep will
* complain later when we register it under the lock.
*/
dmar_register_bus_notifier();
down_write(&dmar_global_lock);
if (!no_iommu)
intel_iommu_debugfs_init();
if (no_iommu || dmar_disabled) {
/*
* We exit the function here to ensure IOMMU's remapping and
* mempool aren't setup, which means that the IOMMU's PMRs
* won't be disabled via the call to init_dmars(). So disable
* it explicitly here. The PMRs were setup by tboot prior to
* calling SENTER, but the kernel is expected to reset/tear
* down the PMRs.
*/
if (intel_iommu_tboot_noforce) {
for_each_iommu(iommu, drhd)
iommu_disable_protect_mem_regions(iommu);
}
/*
* Make sure the IOMMUs are switched off, even when we
* boot into a kexec kernel and the previous kernel left
* them enabled
*/
intel_disable_iommus();
goto out_free_dmar;
}
if (list_empty(&dmar_rmrr_units))
pr_info("No RMRR found\n");
if (list_empty(&dmar_atsr_units))
pr_info("No ATSR found\n");
if (list_empty(&dmar_satc_units))
pr_info("No SATC found\n");
init_no_remapping_devices();
ret = init_dmars();
if (ret) {
if (force_on)
panic("tboot: Failed to initialize DMARs\n");
pr_err("Initialization failed\n");
goto out_free_dmar;
}
2014-02-19 14:07:33 +08:00
up_write(&dmar_global_lock);
init_iommu_pm_ops();
down_read(&dmar_global_lock);
for_each_active_iommu(iommu, drhd) {
/*
* The flush queue implementation does not perform
* page-selective invalidations that are required for efficient
* TLB flushes in virtual environments. The benefit of batching
* is likely to be much lower than the overhead of synchronizing
* the virtual and physical IOMMU page-tables.
*/
iommu/vt-d: Allow to use flush-queue when first level is default Commit 29b32839725f ("iommu/vt-d: Do not use flush-queue when caching-mode is on") forced default domains to be strict mode as long as IOMMU caching-mode is flagged. The reason for doing this is that when vIOMMU uses VT-d caching mode to synchronize shadowing page tables, the strict mode shows better performance. However, this optimization is orthogonal to the first-level page table because the Intel VT-d architecture does not define the caching mode of the first-level page table. Refer to VT-d spec, section 6.1, "When the CM field is reported as Set, any software updates to remapping structures other than first-stage mapping (including updates to not- present entries or present entries whose programming resulted in translation faults) requires explicit invalidation of the caches." Exclude the first-level page table from this optimization. Generally using first-stage translation in vIOMMU implies nested translation enabled in the physical IOMMU. In this case the first-stage page table is wholly captured by the guest. The vIOMMU only needs to transfer the cache invalidations on vIOMMU to the physical IOMMU. Forcing the default domain to strict mode will cause more frequent cache invalidations, resulting in performance degradation. In a real performance benchmark test measured by iperf receive, the performance result on Sapphire Rapids 100Gb NIC shows: w/ this fix ~51 Gbits/s, w/o this fix ~39.3 Gbits/s. Theoretically a first-stage IOMMU page table can still be shadowed in absence of the caching mode, e.g. with host write-protecting guest IOMMU page table to synchronize changed PTEs with the physical IOMMU page table. In this case the shadowing overhead is decoupled from emulating IOTLB invalidation then the overhead of the latter part is solely decided by the frequency of IOTLB invalidations. Hence allowing guest default dma domain to be lazy can also benefit the overall performance by reducing the total VM-exit numbers. Fixes: 29b32839725f ("iommu/vt-d: Do not use flush-queue when caching-mode is on") Reported-by: Sanjay Kumar <sanjay.k.kumar@intel.com> Suggested-by: Sanjay Kumar <sanjay.k.kumar@intel.com> Signed-off-by: Tina Zhang <tina.zhang@intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/20230214025618.2292889-1-tina.zhang@intel.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-02-16 21:08:16 +08:00
if (cap_caching_mode(iommu->cap) &&
!first_level_by_default(iommu)) {
pr_info_once("IOMMU batching disallowed due to virtualization\n");
iommu_set_dma_strict();
}
iommu_device_sysfs_add(&iommu->iommu, NULL,
intel_iommu_groups,
"%s", iommu->name);
iommu/vt-d: Fix suspicious RCU usage Commit <d74169ceb0d2> ("iommu/vt-d: Allocate DMAR fault interrupts locally") moved the call to enable_drhd_fault_handling() to a code path that does not hold any lock while traversing the drhd list. Fix it by ensuring the dmar_global_lock lock is held when traversing the drhd list. Without this fix, the following warning is triggered: ============================= WARNING: suspicious RCU usage 6.14.0-rc3 #55 Not tainted ----------------------------- drivers/iommu/intel/dmar.c:2046 RCU-list traversed in non-reader section!! other info that might help us debug this: rcu_scheduler_active = 1, debug_locks = 1 2 locks held by cpuhp/1/23: #0: ffffffff84a67c50 (cpu_hotplug_lock){++++}-{0:0}, at: cpuhp_thread_fun+0x87/0x2c0 #1: ffffffff84a6a380 (cpuhp_state-up){+.+.}-{0:0}, at: cpuhp_thread_fun+0x87/0x2c0 stack backtrace: CPU: 1 UID: 0 PID: 23 Comm: cpuhp/1 Not tainted 6.14.0-rc3 #55 Call Trace: <TASK> dump_stack_lvl+0xb7/0xd0 lockdep_rcu_suspicious+0x159/0x1f0 ? __pfx_enable_drhd_fault_handling+0x10/0x10 enable_drhd_fault_handling+0x151/0x180 cpuhp_invoke_callback+0x1df/0x990 cpuhp_thread_fun+0x1ea/0x2c0 smpboot_thread_fn+0x1f5/0x2e0 ? __pfx_smpboot_thread_fn+0x10/0x10 kthread+0x12a/0x2d0 ? __pfx_kthread+0x10/0x10 ret_from_fork+0x4a/0x60 ? __pfx_kthread+0x10/0x10 ret_from_fork_asm+0x1a/0x30 </TASK> Holding the lock in enable_drhd_fault_handling() triggers a lockdep splat about a possible deadlock between dmar_global_lock and cpu_hotplug_lock. This is avoided by not holding dmar_global_lock when calling iommu_device_register(), which initiates the device probe process. Fixes: d74169ceb0d2 ("iommu/vt-d: Allocate DMAR fault interrupts locally") Reported-and-tested-by: Ido Schimmel <idosch@nvidia.com> Closes: https://lore.kernel.org/linux-iommu/Zx9OwdLIc_VoQ0-a@shredder.mtl.com/ Tested-by: Breno Leitao <leitao@debian.org> Cc: stable@vger.kernel.org Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/20250218022422.2315082-1-baolu.lu@linux.intel.com Tested-by: Ido Schimmel <idosch@nvidia.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2025-02-28 18:27:26 +08:00
/*
* The iommu device probe is protected by the iommu_probe_device_lock.
* Release the dmar_global_lock before entering the device probe path
* to avoid unnecessary lock order splat.
*/
up_read(&dmar_global_lock);
iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
iommu/vt-d: Fix suspicious RCU usage Commit <d74169ceb0d2> ("iommu/vt-d: Allocate DMAR fault interrupts locally") moved the call to enable_drhd_fault_handling() to a code path that does not hold any lock while traversing the drhd list. Fix it by ensuring the dmar_global_lock lock is held when traversing the drhd list. Without this fix, the following warning is triggered: ============================= WARNING: suspicious RCU usage 6.14.0-rc3 #55 Not tainted ----------------------------- drivers/iommu/intel/dmar.c:2046 RCU-list traversed in non-reader section!! other info that might help us debug this: rcu_scheduler_active = 1, debug_locks = 1 2 locks held by cpuhp/1/23: #0: ffffffff84a67c50 (cpu_hotplug_lock){++++}-{0:0}, at: cpuhp_thread_fun+0x87/0x2c0 #1: ffffffff84a6a380 (cpuhp_state-up){+.+.}-{0:0}, at: cpuhp_thread_fun+0x87/0x2c0 stack backtrace: CPU: 1 UID: 0 PID: 23 Comm: cpuhp/1 Not tainted 6.14.0-rc3 #55 Call Trace: <TASK> dump_stack_lvl+0xb7/0xd0 lockdep_rcu_suspicious+0x159/0x1f0 ? __pfx_enable_drhd_fault_handling+0x10/0x10 enable_drhd_fault_handling+0x151/0x180 cpuhp_invoke_callback+0x1df/0x990 cpuhp_thread_fun+0x1ea/0x2c0 smpboot_thread_fn+0x1f5/0x2e0 ? __pfx_smpboot_thread_fn+0x10/0x10 kthread+0x12a/0x2d0 ? __pfx_kthread+0x10/0x10 ret_from_fork+0x4a/0x60 ? __pfx_kthread+0x10/0x10 ret_from_fork_asm+0x1a/0x30 </TASK> Holding the lock in enable_drhd_fault_handling() triggers a lockdep splat about a possible deadlock between dmar_global_lock and cpu_hotplug_lock. This is avoided by not holding dmar_global_lock when calling iommu_device_register(), which initiates the device probe process. Fixes: d74169ceb0d2 ("iommu/vt-d: Allocate DMAR fault interrupts locally") Reported-and-tested-by: Ido Schimmel <idosch@nvidia.com> Closes: https://lore.kernel.org/linux-iommu/Zx9OwdLIc_VoQ0-a@shredder.mtl.com/ Tested-by: Breno Leitao <leitao@debian.org> Cc: stable@vger.kernel.org Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/20250218022422.2315082-1-baolu.lu@linux.intel.com Tested-by: Ido Schimmel <idosch@nvidia.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2025-02-28 18:27:26 +08:00
down_read(&dmar_global_lock);
iommu_pmu_register(iommu);
}
if (probe_acpi_namespace_devices())
pr_warn("ACPI name space devices didn't probe correctly\n");
/* Finally, we enable the DMA remapping hardware. */
for_each_iommu(iommu, drhd) {
if (!drhd->ignored && !translation_pre_enabled(iommu))
iommu_enable_translation(iommu);
iommu_disable_protect_mem_regions(iommu);
}
up_read(&dmar_global_lock);
pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
intel_iommu_enabled = 1;
return 0;
out_free_dmar:
intel_iommu_free_dmars();
2014-02-19 14:07:33 +08:00
up_write(&dmar_global_lock);
return ret;
}
static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
{
struct device_domain_info *info = opaque;
domain_context_clear_one(info, PCI_BUS_NUM(alias), alias & 0xff);
return 0;
}
/*
* NB - intel-iommu lacks any sort of reference counting for the users of
* dependent devices. If multiple endpoints have intersecting dependent
* devices, unbinding the driver from any one of them will possibly leave
* the others unable to operate.
*/
static void domain_context_clear(struct device_domain_info *info)
{
if (!dev_is_pci(info->dev)) {
domain_context_clear_one(info, info->bus, info->devfn);
return;
}
pci_for_each_dma_alias(to_pci_dev(info->dev),
&domain_context_clear_one_cb, info);
iommu_disable_pci_ats(info);
}
/*
* Clear the page table pointer in context or pasid table entries so that
* all DMA requests without PASID from the device are blocked. If the page
* table has been set, clean up the data structures.
*/
void device_block_translation(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
unsigned long flags;
iommu/vt-d: Restore context entry setup order for aliased devices Commit 2031c469f816 ("iommu/vt-d: Add support for static identity domain") changed the context entry setup during domain attachment from a set-and-check policy to a clear-and-reset approach. This inadvertently introduced a regression affecting PCI aliased devices behind PCIe-to-PCI bridges. Specifically, keyboard and touchpad stopped working on several Apple Macbooks with below messages: kernel: platform pxa2xx-spi.3: Adding to iommu group 20 kernel: input: Apple SPI Keyboard as /devices/pci0000:00/0000:00:1e.3/pxa2xx-spi.3/spi_master/spi2/spi-APP000D:00/input/input0 kernel: DMAR: DRHD: handling fault status reg 3 kernel: DMAR: [DMA Read NO_PASID] Request device [00:1e.3] fault addr 0xffffa000 [fault reason 0x06] PTE Read access is not set kernel: DMAR: DRHD: handling fault status reg 3 kernel: DMAR: [DMA Read NO_PASID] Request device [00:1e.3] fault addr 0xffffa000 [fault reason 0x06] PTE Read access is not set kernel: applespi spi-APP000D:00: Error writing to device: 01 0e 00 00 kernel: DMAR: DRHD: handling fault status reg 3 kernel: DMAR: [DMA Read NO_PASID] Request device [00:1e.3] fault addr 0xffffa000 [fault reason 0x06] PTE Read access is not set kernel: DMAR: DRHD: handling fault status reg 3 kernel: applespi spi-APP000D:00: Error writing to device: 01 0e 00 00 Fix this by restoring the previous context setup order. Fixes: 2031c469f816 ("iommu/vt-d: Add support for static identity domain") Closes: https://lore.kernel.org/all/4dada48a-c5dd-4c30-9c85-5b03b0aa01f0@bfh.ch/ Cc: stable@vger.kernel.org Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Yi Liu <yi.l.liu@intel.com> Link: https://lore.kernel.org/r/20250514060523.2862195-1-baolu.lu@linux.intel.com Link: https://lore.kernel.org/r/20250520075849.755012-2-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2025-05-20 15:58:49 +08:00
/* Device in DMA blocking state. Noting to do. */
if (!info->domain_attached)
return;
iommu/vt-d: Remove cache tags before disabling ATS The current implementation removes cache tags after disabling ATS, leading to potential memory leaks and kernel crashes. Specifically, CACHE_TAG_DEVTLB type cache tags may still remain in the list even after the domain is freed, causing a use-after-free condition. This issue really shows up when multiple VFs from different PFs passed through to a single user-space process via vfio-pci. In such cases, the kernel may crash with kernel messages like: BUG: kernel NULL pointer dereference, address: 0000000000000014 PGD 19036a067 P4D 1940a3067 PUD 136c9b067 PMD 0 Oops: Oops: 0000 [#1] PREEMPT SMP NOPTI CPU: 74 UID: 0 PID: 3183 Comm: testCli Not tainted 6.11.9 #2 RIP: 0010:cache_tag_flush_range+0x9b/0x250 Call Trace: <TASK> ? __die+0x1f/0x60 ? page_fault_oops+0x163/0x590 ? exc_page_fault+0x72/0x190 ? asm_exc_page_fault+0x22/0x30 ? cache_tag_flush_range+0x9b/0x250 ? cache_tag_flush_range+0x5d/0x250 intel_iommu_tlb_sync+0x29/0x40 intel_iommu_unmap_pages+0xfe/0x160 __iommu_unmap+0xd8/0x1a0 vfio_unmap_unpin+0x182/0x340 [vfio_iommu_type1] vfio_remove_dma+0x2a/0xb0 [vfio_iommu_type1] vfio_iommu_type1_ioctl+0xafa/0x18e0 [vfio_iommu_type1] Move cache_tag_unassign_domain() before iommu_disable_pci_caps() to fix it. Fixes: 3b1d9e2b2d68 ("iommu/vt-d: Add cache tag assignment interface") Cc: stable@vger.kernel.org Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/20241129020506.576413-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2024-12-13 09:17:50 +08:00
if (info->domain)
cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID);
if (!dev_is_real_dma_subdevice(dev)) {
if (sm_supported(iommu))
intel_pasid_tear_down_entry(iommu, dev,
IOMMU_NO_PASID, false);
else
domain_context_clear(info);
}
iommu/vt-d: Restore context entry setup order for aliased devices Commit 2031c469f816 ("iommu/vt-d: Add support for static identity domain") changed the context entry setup during domain attachment from a set-and-check policy to a clear-and-reset approach. This inadvertently introduced a regression affecting PCI aliased devices behind PCIe-to-PCI bridges. Specifically, keyboard and touchpad stopped working on several Apple Macbooks with below messages: kernel: platform pxa2xx-spi.3: Adding to iommu group 20 kernel: input: Apple SPI Keyboard as /devices/pci0000:00/0000:00:1e.3/pxa2xx-spi.3/spi_master/spi2/spi-APP000D:00/input/input0 kernel: DMAR: DRHD: handling fault status reg 3 kernel: DMAR: [DMA Read NO_PASID] Request device [00:1e.3] fault addr 0xffffa000 [fault reason 0x06] PTE Read access is not set kernel: DMAR: DRHD: handling fault status reg 3 kernel: DMAR: [DMA Read NO_PASID] Request device [00:1e.3] fault addr 0xffffa000 [fault reason 0x06] PTE Read access is not set kernel: applespi spi-APP000D:00: Error writing to device: 01 0e 00 00 kernel: DMAR: DRHD: handling fault status reg 3 kernel: DMAR: [DMA Read NO_PASID] Request device [00:1e.3] fault addr 0xffffa000 [fault reason 0x06] PTE Read access is not set kernel: DMAR: DRHD: handling fault status reg 3 kernel: applespi spi-APP000D:00: Error writing to device: 01 0e 00 00 Fix this by restoring the previous context setup order. Fixes: 2031c469f816 ("iommu/vt-d: Add support for static identity domain") Closes: https://lore.kernel.org/all/4dada48a-c5dd-4c30-9c85-5b03b0aa01f0@bfh.ch/ Cc: stable@vger.kernel.org Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Yi Liu <yi.l.liu@intel.com> Link: https://lore.kernel.org/r/20250514060523.2862195-1-baolu.lu@linux.intel.com Link: https://lore.kernel.org/r/20250520075849.755012-2-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2025-05-20 15:58:49 +08:00
/* Device now in DMA blocking state. */
info->domain_attached = false;
if (!info->domain)
return;
spin_lock_irqsave(&info->domain->lock, flags);
list_del(&info->link);
spin_unlock_irqrestore(&info->domain->lock, flags);
domain_detach_iommu(info->domain, iommu);
info->domain = NULL;
}
static int blocking_domain_attach_dev(struct iommu_domain *domain,
struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
iopf_for_domain_remove(info->domain ? &info->domain->domain : NULL, dev);
device_block_translation(dev);
return 0;
}
static int blocking_domain_set_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid,
struct iommu_domain *old);
static struct iommu_domain blocking_domain = {
.type = IOMMU_DOMAIN_BLOCKED,
.ops = &(const struct iommu_domain_ops) {
.attach_dev = blocking_domain_attach_dev,
.set_dev_pasid = blocking_domain_set_dev_pasid,
}
};
static int iommu_superpage_capability(struct intel_iommu *iommu, bool first_stage)
{
if (!intel_iommu_superpage)
return 0;
if (first_stage)
return cap_fl1gp_support(iommu->cap) ? 2 : 1;
return fls(cap_super_page_val(iommu->cap));
}
static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_stage)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
struct dmar_domain *domain;
int addr_width;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&domain->devices);
INIT_LIST_HEAD(&domain->dev_pasids);
INIT_LIST_HEAD(&domain->cache_tags);
spin_lock_init(&domain->lock);
spin_lock_init(&domain->cache_lock);
xa_init(&domain->iommu_array);
INIT_LIST_HEAD(&domain->s1_domains);
spin_lock_init(&domain->s1_lock);
domain->nid = dev_to_node(dev);
domain->use_first_level = first_stage;
domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
/* calculate the address width */
addr_width = agaw_to_width(iommu->agaw);
if (addr_width > cap_mgaw(iommu->cap))
addr_width = cap_mgaw(iommu->cap);
domain->gaw = addr_width;
domain->agaw = iommu->agaw;
domain->max_addr = __DOMAIN_MAX_ADDR(addr_width);
/* iommu memory access coherency */
domain->iommu_coherency = iommu_paging_structure_coherency(iommu);
/* pagesize bitmap */
domain->domain.pgsize_bitmap = SZ_4K;
domain->iommu_superpage = iommu_superpage_capability(iommu, first_stage);
domain->domain.pgsize_bitmap |= domain_super_pgsize_bitmap(domain);
/*
* IOVA aperture: First-level translation restricts the input-address
* to a canonical address (i.e., address bits 63:N have the same value
* as address bit [N-1], where N is 48-bits with 4-level paging and
* 57-bits with 5-level paging). Hence, skip bit [N-1].
*/
domain->domain.geometry.force_aperture = true;
domain->domain.geometry.aperture_start = 0;
if (first_stage)
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
else
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
/* always allocate the top pgd */
domain->pgd = iommu_alloc_pages_node_sz(domain->nid, GFP_KERNEL, SZ_4K);
if (!domain->pgd) {
kfree(domain);
return ERR_PTR(-ENOMEM);
}
domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
return domain;
}
static struct iommu_domain *
intel_iommu_domain_alloc_first_stage(struct device *dev,
struct intel_iommu *iommu, u32 flags)
{
struct dmar_domain *dmar_domain;
if (flags & ~IOMMU_HWPT_ALLOC_PASID)
return ERR_PTR(-EOPNOTSUPP);
/* Only SL is available in legacy mode */
if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
return ERR_PTR(-EOPNOTSUPP);
dmar_domain = paging_domain_alloc(dev, true);
if (IS_ERR(dmar_domain))
return ERR_CAST(dmar_domain);
dmar_domain->domain.ops = &intel_fs_paging_domain_ops;
/*
* iotlb sync for map is only needed for legacy implementations that
* explicitly require flushing internal write buffers to ensure memory
* coherence.
*/
if (rwbf_required(iommu))
dmar_domain->iotlb_sync_map = true;
return &dmar_domain->domain;
}
static struct iommu_domain *
intel_iommu_domain_alloc_second_stage(struct device *dev,
struct intel_iommu *iommu, u32 flags)
{
struct dmar_domain *dmar_domain;
iommu/vt-d: Access/Dirty bit support for SS domains IOMMU advertises Access/Dirty bits for second-stage page table if the extended capability DMAR register reports it (ECAP, mnemonic ECAP.SSADS). The first stage table is compatible with CPU page table thus A/D bits are implicitly supported. Relevant Intel IOMMU SDM ref for first stage table "3.6.2 Accessed, Extended Accessed, and Dirty Flags" and second stage table "3.7.2 Accessed and Dirty Flags". First stage page table is enabled by default so it's allowed to set dirty tracking and no control bits needed, it just returns 0. To use SSADS, set bit 9 (SSADE) in the scalable-mode PASID table entry and flush the IOTLB via pasid_flush_caches() following the manual. Relevant SDM refs: "3.7.2 Accessed and Dirty Flags" "6.5.3.3 Guidance to Software for Invalidations, Table 23. Guidance to Software for Invalidations" PTE dirty bit is located in bit 9 and it's cached in the IOTLB so flush IOTLB to make sure IOMMU attempts to set the dirty bit again. Note that iommu_dirty_bitmap_record() will add the IOVA to iotlb_gather and thus the caller of the iommu op will flush the IOTLB. Relevant manuals over the hardware translation is chapter 6 with some special mention to: "6.2.3.1 Scalable-Mode PASID-Table Entry Programming Considerations" "6.2.4 IOTLB" Select IOMMUFD_DRIVER only if IOMMUFD is enabled, given that IOMMU dirty tracking requires IOMMUFD. Link: https://lore.kernel.org/r/20231024135109.73787-13-joao.m.martins@oracle.com Signed-off-by: Joao Martins <joao.m.martins@oracle.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2023-10-24 14:51:03 +01:00
if (flags &
(~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
IOMMU_HWPT_ALLOC_PASID)))
return ERR_PTR(-EOPNOTSUPP);
if (((flags & IOMMU_HWPT_ALLOC_NEST_PARENT) &&
!nested_supported(iommu)) ||
((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) &&
!ssads_supported(iommu)))
return ERR_PTR(-EOPNOTSUPP);
iommu/vt-d: Access/Dirty bit support for SS domains IOMMU advertises Access/Dirty bits for second-stage page table if the extended capability DMAR register reports it (ECAP, mnemonic ECAP.SSADS). The first stage table is compatible with CPU page table thus A/D bits are implicitly supported. Relevant Intel IOMMU SDM ref for first stage table "3.6.2 Accessed, Extended Accessed, and Dirty Flags" and second stage table "3.7.2 Accessed and Dirty Flags". First stage page table is enabled by default so it's allowed to set dirty tracking and no control bits needed, it just returns 0. To use SSADS, set bit 9 (SSADE) in the scalable-mode PASID table entry and flush the IOTLB via pasid_flush_caches() following the manual. Relevant SDM refs: "3.7.2 Accessed and Dirty Flags" "6.5.3.3 Guidance to Software for Invalidations, Table 23. Guidance to Software for Invalidations" PTE dirty bit is located in bit 9 and it's cached in the IOTLB so flush IOTLB to make sure IOMMU attempts to set the dirty bit again. Note that iommu_dirty_bitmap_record() will add the IOVA to iotlb_gather and thus the caller of the iommu op will flush the IOTLB. Relevant manuals over the hardware translation is chapter 6 with some special mention to: "6.2.3.1 Scalable-Mode PASID-Table Entry Programming Considerations" "6.2.4 IOTLB" Select IOMMUFD_DRIVER only if IOMMUFD is enabled, given that IOMMU dirty tracking requires IOMMUFD. Link: https://lore.kernel.org/r/20231024135109.73787-13-joao.m.martins@oracle.com Signed-off-by: Joao Martins <joao.m.martins@oracle.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2023-10-24 14:51:03 +01:00
/* Legacy mode always supports second stage */
if (sm_supported(iommu) && !ecap_slts(iommu->ecap))
iommu/vt-d: Access/Dirty bit support for SS domains IOMMU advertises Access/Dirty bits for second-stage page table if the extended capability DMAR register reports it (ECAP, mnemonic ECAP.SSADS). The first stage table is compatible with CPU page table thus A/D bits are implicitly supported. Relevant Intel IOMMU SDM ref for first stage table "3.6.2 Accessed, Extended Accessed, and Dirty Flags" and second stage table "3.7.2 Accessed and Dirty Flags". First stage page table is enabled by default so it's allowed to set dirty tracking and no control bits needed, it just returns 0. To use SSADS, set bit 9 (SSADE) in the scalable-mode PASID table entry and flush the IOTLB via pasid_flush_caches() following the manual. Relevant SDM refs: "3.7.2 Accessed and Dirty Flags" "6.5.3.3 Guidance to Software for Invalidations, Table 23. Guidance to Software for Invalidations" PTE dirty bit is located in bit 9 and it's cached in the IOTLB so flush IOTLB to make sure IOMMU attempts to set the dirty bit again. Note that iommu_dirty_bitmap_record() will add the IOVA to iotlb_gather and thus the caller of the iommu op will flush the IOTLB. Relevant manuals over the hardware translation is chapter 6 with some special mention to: "6.2.3.1 Scalable-Mode PASID-Table Entry Programming Considerations" "6.2.4 IOTLB" Select IOMMUFD_DRIVER only if IOMMUFD is enabled, given that IOMMU dirty tracking requires IOMMUFD. Link: https://lore.kernel.org/r/20231024135109.73787-13-joao.m.martins@oracle.com Signed-off-by: Joao Martins <joao.m.martins@oracle.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2023-10-24 14:51:03 +01:00
return ERR_PTR(-EOPNOTSUPP);
dmar_domain = paging_domain_alloc(dev, false);
if (IS_ERR(dmar_domain))
return ERR_CAST(dmar_domain);
dmar_domain->domain.ops = &intel_ss_paging_domain_ops;
dmar_domain->nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
iommu/vt-d: Access/Dirty bit support for SS domains IOMMU advertises Access/Dirty bits for second-stage page table if the extended capability DMAR register reports it (ECAP, mnemonic ECAP.SSADS). The first stage table is compatible with CPU page table thus A/D bits are implicitly supported. Relevant Intel IOMMU SDM ref for first stage table "3.6.2 Accessed, Extended Accessed, and Dirty Flags" and second stage table "3.7.2 Accessed and Dirty Flags". First stage page table is enabled by default so it's allowed to set dirty tracking and no control bits needed, it just returns 0. To use SSADS, set bit 9 (SSADE) in the scalable-mode PASID table entry and flush the IOTLB via pasid_flush_caches() following the manual. Relevant SDM refs: "3.7.2 Accessed and Dirty Flags" "6.5.3.3 Guidance to Software for Invalidations, Table 23. Guidance to Software for Invalidations" PTE dirty bit is located in bit 9 and it's cached in the IOTLB so flush IOTLB to make sure IOMMU attempts to set the dirty bit again. Note that iommu_dirty_bitmap_record() will add the IOVA to iotlb_gather and thus the caller of the iommu op will flush the IOTLB. Relevant manuals over the hardware translation is chapter 6 with some special mention to: "6.2.3.1 Scalable-Mode PASID-Table Entry Programming Considerations" "6.2.4 IOTLB" Select IOMMUFD_DRIVER only if IOMMUFD is enabled, given that IOMMU dirty tracking requires IOMMUFD. Link: https://lore.kernel.org/r/20231024135109.73787-13-joao.m.martins@oracle.com Signed-off-by: Joao Martins <joao.m.martins@oracle.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2023-10-24 14:51:03 +01:00
if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
dmar_domain->domain.dirty_ops = &intel_dirty_ops;
iommu/vt-d: Access/Dirty bit support for SS domains IOMMU advertises Access/Dirty bits for second-stage page table if the extended capability DMAR register reports it (ECAP, mnemonic ECAP.SSADS). The first stage table is compatible with CPU page table thus A/D bits are implicitly supported. Relevant Intel IOMMU SDM ref for first stage table "3.6.2 Accessed, Extended Accessed, and Dirty Flags" and second stage table "3.7.2 Accessed and Dirty Flags". First stage page table is enabled by default so it's allowed to set dirty tracking and no control bits needed, it just returns 0. To use SSADS, set bit 9 (SSADE) in the scalable-mode PASID table entry and flush the IOTLB via pasid_flush_caches() following the manual. Relevant SDM refs: "3.7.2 Accessed and Dirty Flags" "6.5.3.3 Guidance to Software for Invalidations, Table 23. Guidance to Software for Invalidations" PTE dirty bit is located in bit 9 and it's cached in the IOTLB so flush IOTLB to make sure IOMMU attempts to set the dirty bit again. Note that iommu_dirty_bitmap_record() will add the IOVA to iotlb_gather and thus the caller of the iommu op will flush the IOTLB. Relevant manuals over the hardware translation is chapter 6 with some special mention to: "6.2.3.1 Scalable-Mode PASID-Table Entry Programming Considerations" "6.2.4 IOTLB" Select IOMMUFD_DRIVER only if IOMMUFD is enabled, given that IOMMU dirty tracking requires IOMMUFD. Link: https://lore.kernel.org/r/20231024135109.73787-13-joao.m.martins@oracle.com Signed-off-by: Joao Martins <joao.m.martins@oracle.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2023-10-24 14:51:03 +01:00
/*
* Besides the internal write buffer flush, the caching mode used for
* legacy nested translation (which utilizes shadowing page tables)
* also requires iotlb sync on map.
*/
if (rwbf_required(iommu) || cap_caching_mode(iommu->cap))
dmar_domain->iotlb_sync_map = true;
return &dmar_domain->domain;
}
iommu/vt-d: Access/Dirty bit support for SS domains IOMMU advertises Access/Dirty bits for second-stage page table if the extended capability DMAR register reports it (ECAP, mnemonic ECAP.SSADS). The first stage table is compatible with CPU page table thus A/D bits are implicitly supported. Relevant Intel IOMMU SDM ref for first stage table "3.6.2 Accessed, Extended Accessed, and Dirty Flags" and second stage table "3.7.2 Accessed and Dirty Flags". First stage page table is enabled by default so it's allowed to set dirty tracking and no control bits needed, it just returns 0. To use SSADS, set bit 9 (SSADE) in the scalable-mode PASID table entry and flush the IOTLB via pasid_flush_caches() following the manual. Relevant SDM refs: "3.7.2 Accessed and Dirty Flags" "6.5.3.3 Guidance to Software for Invalidations, Table 23. Guidance to Software for Invalidations" PTE dirty bit is located in bit 9 and it's cached in the IOTLB so flush IOTLB to make sure IOMMU attempts to set the dirty bit again. Note that iommu_dirty_bitmap_record() will add the IOVA to iotlb_gather and thus the caller of the iommu op will flush the IOTLB. Relevant manuals over the hardware translation is chapter 6 with some special mention to: "6.2.3.1 Scalable-Mode PASID-Table Entry Programming Considerations" "6.2.4 IOTLB" Select IOMMUFD_DRIVER only if IOMMUFD is enabled, given that IOMMU dirty tracking requires IOMMUFD. Link: https://lore.kernel.org/r/20231024135109.73787-13-joao.m.martins@oracle.com Signed-off-by: Joao Martins <joao.m.martins@oracle.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2023-10-24 14:51:03 +01:00
static struct iommu_domain *
intel_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
const struct iommu_user_data *user_data)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
struct iommu_domain *domain;
iommu/vt-d: Access/Dirty bit support for SS domains IOMMU advertises Access/Dirty bits for second-stage page table if the extended capability DMAR register reports it (ECAP, mnemonic ECAP.SSADS). The first stage table is compatible with CPU page table thus A/D bits are implicitly supported. Relevant Intel IOMMU SDM ref for first stage table "3.6.2 Accessed, Extended Accessed, and Dirty Flags" and second stage table "3.7.2 Accessed and Dirty Flags". First stage page table is enabled by default so it's allowed to set dirty tracking and no control bits needed, it just returns 0. To use SSADS, set bit 9 (SSADE) in the scalable-mode PASID table entry and flush the IOTLB via pasid_flush_caches() following the manual. Relevant SDM refs: "3.7.2 Accessed and Dirty Flags" "6.5.3.3 Guidance to Software for Invalidations, Table 23. Guidance to Software for Invalidations" PTE dirty bit is located in bit 9 and it's cached in the IOTLB so flush IOTLB to make sure IOMMU attempts to set the dirty bit again. Note that iommu_dirty_bitmap_record() will add the IOVA to iotlb_gather and thus the caller of the iommu op will flush the IOTLB. Relevant manuals over the hardware translation is chapter 6 with some special mention to: "6.2.3.1 Scalable-Mode PASID-Table Entry Programming Considerations" "6.2.4 IOTLB" Select IOMMUFD_DRIVER only if IOMMUFD is enabled, given that IOMMU dirty tracking requires IOMMUFD. Link: https://lore.kernel.org/r/20231024135109.73787-13-joao.m.martins@oracle.com Signed-off-by: Joao Martins <joao.m.martins@oracle.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2023-10-24 14:51:03 +01:00
if (user_data)
return ERR_PTR(-EOPNOTSUPP);
/* Prefer first stage if possible by default. */
domain = intel_iommu_domain_alloc_first_stage(dev, iommu, flags);
if (domain != ERR_PTR(-EOPNOTSUPP))
return domain;
return intel_iommu_domain_alloc_second_stage(dev, iommu, flags);
}
static void intel_iommu_domain_free(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
if (WARN_ON(dmar_domain->nested_parent &&
!list_empty(&dmar_domain->s1_domains)))
return;
if (WARN_ON(!list_empty(&dmar_domain->devices)))
return;
if (dmar_domain->pgd) {
struct iommu_pages_list freelist =
IOMMU_PAGES_LIST_INIT(freelist);
domain_unmap(dmar_domain, 0, DOMAIN_MAX_PFN(dmar_domain->gaw),
&freelist);
iommu_put_pages_list(&freelist);
}
kfree(dmar_domain->qi_batch);
kfree(dmar_domain);
}
static int paging_domain_compatible_first_stage(struct dmar_domain *dmar_domain,
struct intel_iommu *iommu)
{
if (WARN_ON(dmar_domain->domain.dirty_ops ||
dmar_domain->nested_parent))
return -EINVAL;
/* Only SL is available in legacy mode */
if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
return -EINVAL;
/* Same page size support */
if (!cap_fl1gp_support(iommu->cap) &&
(dmar_domain->domain.pgsize_bitmap & SZ_1G))
return -EINVAL;
/* iotlb sync on map requirement */
if ((rwbf_required(iommu)) && !dmar_domain->iotlb_sync_map)
return -EINVAL;
return 0;
}
static int
paging_domain_compatible_second_stage(struct dmar_domain *dmar_domain,
struct intel_iommu *iommu)
{
unsigned int sslps = cap_super_page_val(iommu->cap);
if (dmar_domain->domain.dirty_ops && !ssads_supported(iommu))
return -EINVAL;
if (dmar_domain->nested_parent && !nested_supported(iommu))
return -EINVAL;
/* Legacy mode always supports second stage */
if (sm_supported(iommu) && !ecap_slts(iommu->ecap))
return -EINVAL;
/* Same page size support */
if (!(sslps & BIT(0)) && (dmar_domain->domain.pgsize_bitmap & SZ_2M))
return -EINVAL;
if (!(sslps & BIT(1)) && (dmar_domain->domain.pgsize_bitmap & SZ_1G))
return -EINVAL;
/* iotlb sync on map requirement */
if ((rwbf_required(iommu) || cap_caching_mode(iommu->cap)) &&
!dmar_domain->iotlb_sync_map)
return -EINVAL;
return 0;
}
int paging_domain_compatible(struct iommu_domain *domain, struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct intel_iommu *iommu = info->iommu;
int ret = -EINVAL;
int addr_width;
if (intel_domain_is_fs_paging(dmar_domain))
ret = paging_domain_compatible_first_stage(dmar_domain, iommu);
else if (intel_domain_is_ss_paging(dmar_domain))
ret = paging_domain_compatible_second_stage(dmar_domain, iommu);
else if (WARN_ON(true))
ret = -EINVAL;
if (ret)
return ret;
/*
* FIXME this is locked wrong, it needs to be under the
* dmar_domain->lock
*/
if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap))
return -EINVAL;
if (dmar_domain->iommu_coherency !=
iommu_paging_structure_coherency(iommu))
return -EINVAL;
/* check if this iommu agaw is sufficient for max mapped address */
addr_width = agaw_to_width(iommu->agaw);
if (addr_width > cap_mgaw(iommu->cap))
addr_width = cap_mgaw(iommu->cap);
if (dmar_domain->gaw > addr_width || dmar_domain->agaw > iommu->agaw)
return -EINVAL;
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
context_copied(iommu, info->bus, info->devfn))
return intel_pasid_setup_sm_context(dev);
return 0;
}
static int intel_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
int ret;
device_block_translation(dev);
ret = paging_domain_compatible(domain, dev);
if (ret)
return ret;
ret = iopf_for_domain_set(domain, dev);
if (ret)
return ret;
ret = dmar_domain_attach_device(to_dmar_domain(domain), dev);
if (ret)
iopf_for_domain_remove(domain, dev);
return ret;
}
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
size_t size, int iommu_prot, gfp_t gfp)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
u64 max_addr;
int prot = 0;
if (iommu_prot & IOMMU_READ)
prot |= DMA_PTE_READ;
if (iommu_prot & IOMMU_WRITE)
prot |= DMA_PTE_WRITE;
if (dmar_domain->set_pte_snp)
prot |= DMA_PTE_SNP;
max_addr = iova + size;
if (dmar_domain->max_addr < max_addr) {
u64 end;
/* check if minimum agaw is sufficient for mapped address */
end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
if (end < max_addr) {
pr_err("%s: iommu width (%d) is not "
"sufficient for the mapped address (%llx)\n",
__func__, dmar_domain->gaw, max_addr);
return -EFAULT;
}
dmar_domain->max_addr = max_addr;
}
/* Round up size to next multiple of PAGE_SIZE, if it and
the low bits of hpa would take us onto the next page */
size = aligned_nrpages(hpa, size);
2021-02-04 09:43:58 +08:00
return __domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
hpa >> VTD_PAGE_SHIFT, size, prot, gfp);
}
static int intel_iommu_map_pages(struct iommu_domain *domain,
unsigned long iova, phys_addr_t paddr,
size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
{
unsigned long pgshift = __ffs(pgsize);
size_t size = pgcount << pgshift;
int ret;
if (pgsize != SZ_4K && pgsize != SZ_2M && pgsize != SZ_1G)
return -EINVAL;
if (!IS_ALIGNED(iova | paddr, pgsize))
return -EINVAL;
ret = intel_iommu_map(domain, iova, paddr, size, prot, gfp);
if (!ret && mapped)
*mapped = size;
return ret;
}
static size_t intel_iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *gather)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
unsigned long start_pfn, last_pfn;
int level = 0;
/* Cope with horrid API which requires us to unmap more than the
size argument if it happens to be a large-page mapping. */
if (unlikely(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT,
&level, GFP_ATOMIC)))
return 0;
if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
size = VTD_PAGE_SIZE << level_to_offset_bits(level);
start_pfn = iova >> VTD_PAGE_SHIFT;
last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
domain_unmap(dmar_domain, start_pfn, last_pfn, &gather->freelist);
if (dmar_domain->max_addr == iova + size)
dmar_domain->max_addr = iova;
/*
* We do not use page-selective IOTLB invalidation in flush queue,
* so there is no need to track page and sync iotlb.
*/
if (!iommu_iotlb_gather_queued(gather))
iommu_iotlb_gather_add_page(domain, gather, iova, size);
return size;
}
static size_t intel_iommu_unmap_pages(struct iommu_domain *domain,
unsigned long iova,
size_t pgsize, size_t pgcount,
struct iommu_iotlb_gather *gather)
{
unsigned long pgshift = __ffs(pgsize);
size_t size = pgcount << pgshift;
return intel_iommu_unmap(domain, iova, size, gather);
}
static void intel_iommu_tlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
cache_tag_flush_range(to_dmar_domain(domain), gather->start,
gather->end,
iommu_pages_list_empty(&gather->freelist));
iommu_put_pages_list(&gather->freelist);
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct dma_pte *pte;
int level = 0;
u64 phys = 0;
pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level,
GFP_ATOMIC);
if (pte && dma_pte_present(pte))
phys = dma_pte_addr(pte) +
(iova & (BIT_MASK(level_to_offset_bits(level) +
VTD_PAGE_SHIFT) - 1));
return phys;
}
static bool domain_support_force_snooping(struct dmar_domain *domain)
{
struct device_domain_info *info;
bool support = true;
assert_spin_locked(&domain->lock);
list_for_each_entry(info, &domain->devices, link) {
if (!ecap_sc_support(info->iommu->ecap)) {
support = false;
break;
}
}
return support;
}
static bool intel_iommu_enforce_cache_coherency_fs(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct device_domain_info *info;
guard(spinlock_irqsave)(&dmar_domain->lock);
if (dmar_domain->force_snooping)
return true;
if (!domain_support_force_snooping(dmar_domain))
return false;
dmar_domain->force_snooping = true;
list_for_each_entry(info, &dmar_domain->devices, link)
intel_pasid_setup_page_snoop_control(info->iommu, info->dev,
IOMMU_NO_PASID);
return true;
}
static bool intel_iommu_enforce_cache_coherency_ss(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
guard(spinlock_irqsave)(&dmar_domain->lock);
if (!domain_support_force_snooping(dmar_domain) ||
dmar_domain->has_mappings)
return false;
/*
* Second level page table supports per-PTE snoop control. The
* iommu_map() interface will handle this by setting SNP bit.
*/
dmar_domain->set_pte_snp = true;
dmar_domain->force_snooping = true;
return true;
}
static bool intel_iommu_capable(struct device *dev, enum iommu_cap cap)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
case IOMMU_CAP_DEFERRED_FLUSH:
return true;
case IOMMU_CAP_PRE_BOOT_PROTECTION:
return dmar_platform_optin();
case IOMMU_CAP_ENFORCE_CACHE_COHERENCY:
return ecap_sc_support(info->iommu->ecap);
iommu/vt-d: Access/Dirty bit support for SS domains IOMMU advertises Access/Dirty bits for second-stage page table if the extended capability DMAR register reports it (ECAP, mnemonic ECAP.SSADS). The first stage table is compatible with CPU page table thus A/D bits are implicitly supported. Relevant Intel IOMMU SDM ref for first stage table "3.6.2 Accessed, Extended Accessed, and Dirty Flags" and second stage table "3.7.2 Accessed and Dirty Flags". First stage page table is enabled by default so it's allowed to set dirty tracking and no control bits needed, it just returns 0. To use SSADS, set bit 9 (SSADE) in the scalable-mode PASID table entry and flush the IOTLB via pasid_flush_caches() following the manual. Relevant SDM refs: "3.7.2 Accessed and Dirty Flags" "6.5.3.3 Guidance to Software for Invalidations, Table 23. Guidance to Software for Invalidations" PTE dirty bit is located in bit 9 and it's cached in the IOTLB so flush IOTLB to make sure IOMMU attempts to set the dirty bit again. Note that iommu_dirty_bitmap_record() will add the IOVA to iotlb_gather and thus the caller of the iommu op will flush the IOTLB. Relevant manuals over the hardware translation is chapter 6 with some special mention to: "6.2.3.1 Scalable-Mode PASID-Table Entry Programming Considerations" "6.2.4 IOTLB" Select IOMMUFD_DRIVER only if IOMMUFD is enabled, given that IOMMU dirty tracking requires IOMMUFD. Link: https://lore.kernel.org/r/20231024135109.73787-13-joao.m.martins@oracle.com Signed-off-by: Joao Martins <joao.m.martins@oracle.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2023-10-24 14:51:03 +01:00
case IOMMU_CAP_DIRTY_TRACKING:
return ssads_supported(info->iommu);
default:
return false;
}
}
static struct iommu_device *intel_iommu_probe_device(struct device *dev)
{
struct pci_dev *pdev = dev_is_pci(dev) ? to_pci_dev(dev) : NULL;
struct device_domain_info *info;
iommu/vt-d: Make use of IOMMU sysfs support Register our DRHD IOMMUs, cross link devices, and provide a base set of attributes for the IOMMU. Note that IRQ remapping support parses the DMAR table very early in boot, well before the iommu_class can reasonably be setup, so our registration is split between intel_iommu_init(), which occurs later, and alloc_iommu(), which typically occurs much earlier, but may happen at any time later with IOMMU hot-add support. On a typical desktop system, this provides the following (pruned): $ find /sys | grep dmar /sys/devices/virtual/iommu/dmar0 /sys/devices/virtual/iommu/dmar0/devices /sys/devices/virtual/iommu/dmar0/devices/0000:00:02.0 /sys/devices/virtual/iommu/dmar0/intel-iommu /sys/devices/virtual/iommu/dmar0/intel-iommu/cap /sys/devices/virtual/iommu/dmar0/intel-iommu/ecap /sys/devices/virtual/iommu/dmar0/intel-iommu/address /sys/devices/virtual/iommu/dmar0/intel-iommu/version /sys/devices/virtual/iommu/dmar1 /sys/devices/virtual/iommu/dmar1/devices /sys/devices/virtual/iommu/dmar1/devices/0000:00:00.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:01.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:16.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1a.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1b.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1c.0 ... /sys/devices/virtual/iommu/dmar1/intel-iommu /sys/devices/virtual/iommu/dmar1/intel-iommu/cap /sys/devices/virtual/iommu/dmar1/intel-iommu/ecap /sys/devices/virtual/iommu/dmar1/intel-iommu/address /sys/devices/virtual/iommu/dmar1/intel-iommu/version /sys/class/iommu/dmar0 /sys/class/iommu/dmar1 (devices also link back to the dmar units) This makes address, version, capabilities, and extended capabilities available, just like printed on boot. I've tried not to duplicate data that can be found in the DMAR table, with the exception of the address, which provides an easy way to associate the sysfs device with a DRHD entry in the DMAR. It's tempting to add scopes and RMRR data here, but the full DMAR table is already exposed under /sys/firmware/ and therefore already provides a way for userspace to learn such details. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-06-12 16:12:31 -06:00
struct intel_iommu *iommu;
u8 bus, devfn;
int ret;
iommu = device_lookup_iommu(dev, &bus, &devfn);
if (!iommu || !iommu->iommu.ops)
return ERR_PTR(-ENODEV);
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return ERR_PTR(-ENOMEM);
if (dev_is_real_dma_subdevice(dev)) {
info->bus = pdev->bus->number;
info->devfn = pdev->devfn;
info->segment = pci_domain_nr(pdev->bus);
} else {
info->bus = bus;
info->devfn = devfn;
info->segment = iommu->segment;
}
info->dev = dev;
info->iommu = iommu;
if (dev_is_pci(dev)) {
if (ecap_dev_iotlb_support(iommu->ecap) &&
pci_ats_supported(pdev) &&
iommu/vt-d: Add a fix for devices need extra dtlb flush QAT devices on Intel Sapphire Rapids and Emerald Rapids have a defect in address translation service (ATS). These devices may inadvertently issue ATS invalidation completion before posted writes initiated with translated address that utilized translations matching the invalidation address range, violating the invalidation completion ordering. This patch adds an extra device TLB invalidation for the affected devices, it is needed to ensure no more posted writes with translated address following the invalidation completion. Therefore, the ordering is preserved and data-corruption is prevented. Device TLBs are invalidated under the following six conditions: 1. Device driver does DMA API unmap IOVA 2. Device driver unbind a PASID from a process, sva_unbind_device() 3. PASID is torn down, after PASID cache is flushed. e.g. process exit_mmap() due to crash 4. Under SVA usage, called by mmu_notifier.invalidate_range() where VM has to free pages that were unmapped 5. userspace driver unmaps a DMA buffer 6. Cache invalidation in vSVA usage (upcoming) For #1 and #2, device drivers are responsible for stopping DMA traffic before unmap/unbind. For #3, iommu driver gets mmu_notifier to invalidate TLB the same way as normal user unmap which will do an extra invalidation. The dTLB invalidation after PASID cache flush does not need an extra invalidation. Therefore, we only need to deal with #4 and #5 in this patch. #1 is also covered by this patch due to common code path with #5. Tested-by: Yuzhang Luo <yuzhang.luo@intel.com> Reviewed-by: Ashok Raj <ashok.raj@intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com> Link: https://lore.kernel.org/r/20221130062449.1360063-1-jacob.jun.pan@linux.intel.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-12-01 12:01:24 +08:00
dmar_ats_supported(pdev, iommu)) {
info->ats_supported = 1;
iommu/vt-d: Add a fix for devices need extra dtlb flush QAT devices on Intel Sapphire Rapids and Emerald Rapids have a defect in address translation service (ATS). These devices may inadvertently issue ATS invalidation completion before posted writes initiated with translated address that utilized translations matching the invalidation address range, violating the invalidation completion ordering. This patch adds an extra device TLB invalidation for the affected devices, it is needed to ensure no more posted writes with translated address following the invalidation completion. Therefore, the ordering is preserved and data-corruption is prevented. Device TLBs are invalidated under the following six conditions: 1. Device driver does DMA API unmap IOVA 2. Device driver unbind a PASID from a process, sva_unbind_device() 3. PASID is torn down, after PASID cache is flushed. e.g. process exit_mmap() due to crash 4. Under SVA usage, called by mmu_notifier.invalidate_range() where VM has to free pages that were unmapped 5. userspace driver unmaps a DMA buffer 6. Cache invalidation in vSVA usage (upcoming) For #1 and #2, device drivers are responsible for stopping DMA traffic before unmap/unbind. For #3, iommu driver gets mmu_notifier to invalidate TLB the same way as normal user unmap which will do an extra invalidation. The dTLB invalidation after PASID cache flush does not need an extra invalidation. Therefore, we only need to deal with #4 and #5 in this patch. #1 is also covered by this patch due to common code path with #5. Tested-by: Yuzhang Luo <yuzhang.luo@intel.com> Reviewed-by: Ashok Raj <ashok.raj@intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com> Link: https://lore.kernel.org/r/20221130062449.1360063-1-jacob.jun.pan@linux.intel.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-12-01 12:01:24 +08:00
info->dtlb_extra_inval = dev_needs_extra_dtlb_flush(pdev);
/*
* For IOMMU that supports device IOTLB throttling
* (DIT), we assign PFSID to the invalidation desc
* of a VF such that IOMMU HW can gauge queue depth
* at PF level. If DIT is not set, PFSID will be
* treated as reserved, which should be set to 0.
*/
if (ecap_dit(iommu->ecap))
info->pfsid = pci_dev_id(pci_physfn(pdev));
info->ats_qdep = pci_ats_queue_depth(pdev);
iommu/vt-d: Add a fix for devices need extra dtlb flush QAT devices on Intel Sapphire Rapids and Emerald Rapids have a defect in address translation service (ATS). These devices may inadvertently issue ATS invalidation completion before posted writes initiated with translated address that utilized translations matching the invalidation address range, violating the invalidation completion ordering. This patch adds an extra device TLB invalidation for the affected devices, it is needed to ensure no more posted writes with translated address following the invalidation completion. Therefore, the ordering is preserved and data-corruption is prevented. Device TLBs are invalidated under the following six conditions: 1. Device driver does DMA API unmap IOVA 2. Device driver unbind a PASID from a process, sva_unbind_device() 3. PASID is torn down, after PASID cache is flushed. e.g. process exit_mmap() due to crash 4. Under SVA usage, called by mmu_notifier.invalidate_range() where VM has to free pages that were unmapped 5. userspace driver unmaps a DMA buffer 6. Cache invalidation in vSVA usage (upcoming) For #1 and #2, device drivers are responsible for stopping DMA traffic before unmap/unbind. For #3, iommu driver gets mmu_notifier to invalidate TLB the same way as normal user unmap which will do an extra invalidation. The dTLB invalidation after PASID cache flush does not need an extra invalidation. Therefore, we only need to deal with #4 and #5 in this patch. #1 is also covered by this patch due to common code path with #5. Tested-by: Yuzhang Luo <yuzhang.luo@intel.com> Reviewed-by: Ashok Raj <ashok.raj@intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com> Link: https://lore.kernel.org/r/20221130062449.1360063-1-jacob.jun.pan@linux.intel.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-12-01 12:01:24 +08:00
}
if (sm_supported(iommu)) {
if (pasid_supported(iommu)) {
int features = pci_pasid_features(pdev);
if (features >= 0)
info->pasid_supported = features | 1;
}
if (info->ats_supported && ecap_prs(iommu->ecap) &&
pci_pri_supported(pdev))
info->pri_supported = 1;
}
}
dev_iommu_priv_set(dev, info);
iommu/vt-d: Fix WARN_ON in iommu probe path Commit 1a75cc710b95 ("iommu/vt-d: Use rbtree to track iommu probed devices") adds all devices probed by the iommu driver in a rbtree indexed by the source ID of each device. It assumes that each device has a unique source ID. This assumption is incorrect and the VT-d spec doesn't state this requirement either. The reason for using a rbtree to track devices is to look up the device with PCI bus and devfunc in the paths of handling ATS invalidation time out error and the PRI I/O page faults. Both are PCI ATS feature related. Only track the devices that have PCI ATS capabilities in the rbtree to avoid unnecessary WARN_ON in the iommu probe path. Otherwise, on some platforms below kernel splat will be displayed and the iommu probe results in failure. WARNING: CPU: 3 PID: 166 at drivers/iommu/intel/iommu.c:158 intel_iommu_probe_device+0x319/0xd90 Call Trace: <TASK> ? __warn+0x7e/0x180 ? intel_iommu_probe_device+0x319/0xd90 ? report_bug+0x1f8/0x200 ? handle_bug+0x3c/0x70 ? exc_invalid_op+0x18/0x70 ? asm_exc_invalid_op+0x1a/0x20 ? intel_iommu_probe_device+0x319/0xd90 ? debug_mutex_init+0x37/0x50 __iommu_probe_device+0xf2/0x4f0 iommu_probe_device+0x22/0x70 iommu_bus_notifier+0x1e/0x40 notifier_call_chain+0x46/0x150 blocking_notifier_call_chain+0x42/0x60 bus_notify+0x2f/0x50 device_add+0x5ed/0x7e0 platform_device_add+0xf5/0x240 mfd_add_devices+0x3f9/0x500 ? preempt_count_add+0x4c/0xa0 ? up_write+0xa2/0x1b0 ? __debugfs_create_file+0xe3/0x150 intel_lpss_probe+0x49f/0x5b0 ? pci_conf1_write+0xa3/0xf0 intel_lpss_pci_probe+0xcf/0x110 [intel_lpss_pci] pci_device_probe+0x95/0x120 really_probe+0xd9/0x370 ? __pfx___driver_attach+0x10/0x10 __driver_probe_device+0x73/0x150 driver_probe_device+0x19/0xa0 __driver_attach+0xb6/0x180 ? __pfx___driver_attach+0x10/0x10 bus_for_each_dev+0x77/0xd0 bus_add_driver+0x114/0x210 driver_register+0x5b/0x110 ? __pfx_intel_lpss_pci_driver_init+0x10/0x10 [intel_lpss_pci] do_one_initcall+0x57/0x2b0 ? kmalloc_trace+0x21e/0x280 ? do_init_module+0x1e/0x210 do_init_module+0x5f/0x210 load_module+0x1d37/0x1fc0 ? init_module_from_file+0x86/0xd0 init_module_from_file+0x86/0xd0 idempotent_init_module+0x17c/0x230 __x64_sys_finit_module+0x56/0xb0 do_syscall_64+0x6e/0x140 entry_SYSCALL_64_after_hwframe+0x71/0x79 Fixes: 1a75cc710b95 ("iommu/vt-d: Use rbtree to track iommu probed devices") Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/10689 Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20240407011429.136282-1-baolu.lu@linux.intel.com Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2024-04-11 11:07:44 +08:00
if (pdev && pci_ats_supported(pdev)) {
iommu: Allow ATS to work on VFs when the PF uses IDENTITY PCI ATS has a global Smallest Translation Unit field that is located in the PF but shared by all of the VFs. The expectation is that the STU will be set to the root port's global STU capability which is driven by the IO page table configuration of the iommu HW. Today it becomes set when the iommu driver first enables ATS. Thus, to enable ATS on the VF, the PF must have already had the correct STU programmed, even if ATS is off on the PF. Unfortunately the PF only programs the STU when the PF enables ATS. The iommu drivers tend to leave ATS disabled when IDENTITY translation is being used. Thus we can get into a state where the PF is setup to use IDENTITY with the DMA API while the VF would like to use VFIO with a PAGING domain and have ATS turned on. This fails because the PF never loaded a PAGING domain and so it never setup the STU, and the VF can't do it. The simplest solution is to have the iommu driver set the ATS STU when it probes the device. This way the ATS STU is loaded immediately at boot time to all PFs and there is no issue when a VF comes to use it. Add a new call pci_prepare_ats() which should be called by iommu drivers in their probe_device() op for every PCI device if the iommu driver supports ATS. This will setup the STU based on whatever page size capability the iommu HW has. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Acked-by: Bjorn Helgaas <bhelgaas@google.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/0-v1-0fb4d2ab6770+7e706-ats_vf_jgg@nvidia.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2024-08-07 15:19:20 -03:00
pci_prepare_ats(pdev, VTD_PAGE_SHIFT);
iommu/vt-d: Fix WARN_ON in iommu probe path Commit 1a75cc710b95 ("iommu/vt-d: Use rbtree to track iommu probed devices") adds all devices probed by the iommu driver in a rbtree indexed by the source ID of each device. It assumes that each device has a unique source ID. This assumption is incorrect and the VT-d spec doesn't state this requirement either. The reason for using a rbtree to track devices is to look up the device with PCI bus and devfunc in the paths of handling ATS invalidation time out error and the PRI I/O page faults. Both are PCI ATS feature related. Only track the devices that have PCI ATS capabilities in the rbtree to avoid unnecessary WARN_ON in the iommu probe path. Otherwise, on some platforms below kernel splat will be displayed and the iommu probe results in failure. WARNING: CPU: 3 PID: 166 at drivers/iommu/intel/iommu.c:158 intel_iommu_probe_device+0x319/0xd90 Call Trace: <TASK> ? __warn+0x7e/0x180 ? intel_iommu_probe_device+0x319/0xd90 ? report_bug+0x1f8/0x200 ? handle_bug+0x3c/0x70 ? exc_invalid_op+0x18/0x70 ? asm_exc_invalid_op+0x1a/0x20 ? intel_iommu_probe_device+0x319/0xd90 ? debug_mutex_init+0x37/0x50 __iommu_probe_device+0xf2/0x4f0 iommu_probe_device+0x22/0x70 iommu_bus_notifier+0x1e/0x40 notifier_call_chain+0x46/0x150 blocking_notifier_call_chain+0x42/0x60 bus_notify+0x2f/0x50 device_add+0x5ed/0x7e0 platform_device_add+0xf5/0x240 mfd_add_devices+0x3f9/0x500 ? preempt_count_add+0x4c/0xa0 ? up_write+0xa2/0x1b0 ? __debugfs_create_file+0xe3/0x150 intel_lpss_probe+0x49f/0x5b0 ? pci_conf1_write+0xa3/0xf0 intel_lpss_pci_probe+0xcf/0x110 [intel_lpss_pci] pci_device_probe+0x95/0x120 really_probe+0xd9/0x370 ? __pfx___driver_attach+0x10/0x10 __driver_probe_device+0x73/0x150 driver_probe_device+0x19/0xa0 __driver_attach+0xb6/0x180 ? __pfx___driver_attach+0x10/0x10 bus_for_each_dev+0x77/0xd0 bus_add_driver+0x114/0x210 driver_register+0x5b/0x110 ? __pfx_intel_lpss_pci_driver_init+0x10/0x10 [intel_lpss_pci] do_one_initcall+0x57/0x2b0 ? kmalloc_trace+0x21e/0x280 ? do_init_module+0x1e/0x210 do_init_module+0x5f/0x210 load_module+0x1d37/0x1fc0 ? init_module_from_file+0x86/0xd0 init_module_from_file+0x86/0xd0 idempotent_init_module+0x17c/0x230 __x64_sys_finit_module+0x56/0xb0 do_syscall_64+0x6e/0x140 entry_SYSCALL_64_after_hwframe+0x71/0x79 Fixes: 1a75cc710b95 ("iommu/vt-d: Use rbtree to track iommu probed devices") Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/10689 Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20240407011429.136282-1-baolu.lu@linux.intel.com Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2024-04-11 11:07:44 +08:00
ret = device_rbtree_insert(iommu, info);
if (ret)
goto free;
}
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
ret = intel_pasid_alloc_table(dev);
if (ret) {
dev_err(dev, "PASID table allocation failed\n");
goto clear_rbtree;
}
if (!context_copied(iommu, info->bus, info->devfn)) {
ret = intel_pasid_setup_sm_context(dev);
if (ret)
goto free_table;
}
}
intel_iommu_debugfs_create_dev(info);
iommu/vt-d: Revert ATS timing change to fix boot failure Commit <5518f239aff1> ("iommu/vt-d: Move scalable mode ATS enablement to probe path") changed the PCI ATS enablement logic to run earlier, specifically before the default domain attachment. On some client platforms, this change resulted in boot failures, causing the kernel to panic with the following message and call trace: Kernel panic - not syncing: DMAR hardware is malfunctioning CPU: 0 UID: 0 PID: 1 Comm: swapper/0 Not tainted 6.14.0-rc3+ #175 Call Trace: <TASK> dump_stack_lvl+0x6f/0xb0 dump_stack+0x10/0x16 panic+0x10a/0x2b7 iommu_enable_translation.cold+0xc/0xc intel_iommu_init+0xe39/0xec0 ? trace_hardirqs_on+0x1e/0xd0 ? __pfx_pci_iommu_init+0x10/0x10 pci_iommu_init+0xd/0x40 do_one_initcall+0x5b/0x390 kernel_init_freeable+0x26d/0x2b0 ? __pfx_kernel_init+0x10/0x10 kernel_init+0x15/0x120 ret_from_fork+0x35/0x60 ? __pfx_kernel_init+0x10/0x10 ret_from_fork_asm+0x1a/0x30 RIP: 1f0f:0x0 Code: Unable to access opcode bytes at 0xffffffffffffffd6. RSP: 0000:0000000000000000 EFLAGS: 841f0f2e66 ORIG_RAX: 1f0f2e6600000000 RAX: 0000000000000000 RBX: 1f0f2e6600000000 RCX: 2e66000000000084 RDX: 0000000000841f0f RSI: 000000841f0f2e66 RDI: 00841f0f2e660000 RBP: 00841f0f2e660000 R08: 00841f0f2e660000 R09: 000000841f0f2e66 R10: 0000000000841f0f R11: 2e66000000000084 R12: 000000841f0f2e66 R13: 0000000000841f0f R14: 2e66000000000084 R15: 1f0f2e6600000000 </TASK> ---[ end Kernel panic - not syncing: DMAR hardware is malfunctioning ]--- Fix this by reverting the timing change for ATS enablement introduced by the offending commit and restoring the previous behavior. Fixes: 5518f239aff1 ("iommu/vt-d: Move scalable mode ATS enablement to probe path") Reported-by: Jarkko Nikula <jarkko.nikula@linux.intel.com> Closes: https://lore.kernel.org/linux-iommu/01b9c72f-460d-4f77-b696-54c6825babc9@linux.intel.com/ Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Tested-by: Jarkko Nikula <jarkko.nikula@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/20250416073608.1799578-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2025-04-16 15:36:08 +08:00
return &iommu->iommu;
free_table:
intel_pasid_free_table(dev);
clear_rbtree:
device_rbtree_remove(info);
free:
kfree(info);
return ERR_PTR(ret);
}
static void intel_iommu_probe_finalize(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
/*
* The PCIe spec, in its wisdom, declares that the behaviour of the
* device is undefined if you enable PASID support after ATS support.
* So always enable PASID support on devices which have it, even if
* we can't yet know if we're ever going to use it.
*/
if (info->pasid_supported &&
iommu/vt-d: Revert ATS timing change to fix boot failure Commit <5518f239aff1> ("iommu/vt-d: Move scalable mode ATS enablement to probe path") changed the PCI ATS enablement logic to run earlier, specifically before the default domain attachment. On some client platforms, this change resulted in boot failures, causing the kernel to panic with the following message and call trace: Kernel panic - not syncing: DMAR hardware is malfunctioning CPU: 0 UID: 0 PID: 1 Comm: swapper/0 Not tainted 6.14.0-rc3+ #175 Call Trace: <TASK> dump_stack_lvl+0x6f/0xb0 dump_stack+0x10/0x16 panic+0x10a/0x2b7 iommu_enable_translation.cold+0xc/0xc intel_iommu_init+0xe39/0xec0 ? trace_hardirqs_on+0x1e/0xd0 ? __pfx_pci_iommu_init+0x10/0x10 pci_iommu_init+0xd/0x40 do_one_initcall+0x5b/0x390 kernel_init_freeable+0x26d/0x2b0 ? __pfx_kernel_init+0x10/0x10 kernel_init+0x15/0x120 ret_from_fork+0x35/0x60 ? __pfx_kernel_init+0x10/0x10 ret_from_fork_asm+0x1a/0x30 RIP: 1f0f:0x0 Code: Unable to access opcode bytes at 0xffffffffffffffd6. RSP: 0000:0000000000000000 EFLAGS: 841f0f2e66 ORIG_RAX: 1f0f2e6600000000 RAX: 0000000000000000 RBX: 1f0f2e6600000000 RCX: 2e66000000000084 RDX: 0000000000841f0f RSI: 000000841f0f2e66 RDI: 00841f0f2e660000 RBP: 00841f0f2e660000 R08: 00841f0f2e660000 R09: 000000841f0f2e66 R10: 0000000000841f0f R11: 2e66000000000084 R12: 000000841f0f2e66 R13: 0000000000841f0f R14: 2e66000000000084 R15: 1f0f2e6600000000 </TASK> ---[ end Kernel panic - not syncing: DMAR hardware is malfunctioning ]--- Fix this by reverting the timing change for ATS enablement introduced by the offending commit and restoring the previous behavior. Fixes: 5518f239aff1 ("iommu/vt-d: Move scalable mode ATS enablement to probe path") Reported-by: Jarkko Nikula <jarkko.nikula@linux.intel.com> Closes: https://lore.kernel.org/linux-iommu/01b9c72f-460d-4f77-b696-54c6825babc9@linux.intel.com/ Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Tested-by: Jarkko Nikula <jarkko.nikula@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/20250416073608.1799578-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2025-04-16 15:36:08 +08:00
!pci_enable_pasid(to_pci_dev(dev), info->pasid_supported & ~1))
info->pasid_enabled = 1;
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
iommu_enable_pci_ats(info);
/* Assign a DEVTLB cache tag to the default domain. */
if (info->ats_enabled && info->domain) {
u16 did = domain_id_iommu(info->domain, iommu);
if (cache_tag_assign(info->domain, did, dev,
IOMMU_NO_PASID, CACHE_TAG_DEVTLB))
iommu_disable_pci_ats(info);
}
}
iommu_enable_pci_pri(info);
}
static void intel_iommu_release_device(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
iommu/vt-d: Make use of IOMMU sysfs support Register our DRHD IOMMUs, cross link devices, and provide a base set of attributes for the IOMMU. Note that IRQ remapping support parses the DMAR table very early in boot, well before the iommu_class can reasonably be setup, so our registration is split between intel_iommu_init(), which occurs later, and alloc_iommu(), which typically occurs much earlier, but may happen at any time later with IOMMU hot-add support. On a typical desktop system, this provides the following (pruned): $ find /sys | grep dmar /sys/devices/virtual/iommu/dmar0 /sys/devices/virtual/iommu/dmar0/devices /sys/devices/virtual/iommu/dmar0/devices/0000:00:02.0 /sys/devices/virtual/iommu/dmar0/intel-iommu /sys/devices/virtual/iommu/dmar0/intel-iommu/cap /sys/devices/virtual/iommu/dmar0/intel-iommu/ecap /sys/devices/virtual/iommu/dmar0/intel-iommu/address /sys/devices/virtual/iommu/dmar0/intel-iommu/version /sys/devices/virtual/iommu/dmar1 /sys/devices/virtual/iommu/dmar1/devices /sys/devices/virtual/iommu/dmar1/devices/0000:00:00.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:01.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:16.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1a.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1b.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1c.0 ... /sys/devices/virtual/iommu/dmar1/intel-iommu /sys/devices/virtual/iommu/dmar1/intel-iommu/cap /sys/devices/virtual/iommu/dmar1/intel-iommu/ecap /sys/devices/virtual/iommu/dmar1/intel-iommu/address /sys/devices/virtual/iommu/dmar1/intel-iommu/version /sys/class/iommu/dmar0 /sys/class/iommu/dmar1 (devices also link back to the dmar units) This makes address, version, capabilities, and extended capabilities available, just like printed on boot. I've tried not to duplicate data that can be found in the DMAR table, with the exception of the address, which provides an easy way to associate the sysfs device with a DRHD entry in the DMAR. It's tempting to add scopes and RMRR data here, but the full DMAR table is already exposed under /sys/firmware/ and therefore already provides a way for userspace to learn such details. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-06-12 16:12:31 -06:00
iommu_disable_pci_pri(info);
iommu_disable_pci_ats(info);
if (info->pasid_enabled) {
pci_disable_pasid(to_pci_dev(dev));
info->pasid_enabled = 0;
}
mutex_lock(&iommu->iopf_lock);
iommu/vt-d: Fix WARN_ON in iommu probe path Commit 1a75cc710b95 ("iommu/vt-d: Use rbtree to track iommu probed devices") adds all devices probed by the iommu driver in a rbtree indexed by the source ID of each device. It assumes that each device has a unique source ID. This assumption is incorrect and the VT-d spec doesn't state this requirement either. The reason for using a rbtree to track devices is to look up the device with PCI bus and devfunc in the paths of handling ATS invalidation time out error and the PRI I/O page faults. Both are PCI ATS feature related. Only track the devices that have PCI ATS capabilities in the rbtree to avoid unnecessary WARN_ON in the iommu probe path. Otherwise, on some platforms below kernel splat will be displayed and the iommu probe results in failure. WARNING: CPU: 3 PID: 166 at drivers/iommu/intel/iommu.c:158 intel_iommu_probe_device+0x319/0xd90 Call Trace: <TASK> ? __warn+0x7e/0x180 ? intel_iommu_probe_device+0x319/0xd90 ? report_bug+0x1f8/0x200 ? handle_bug+0x3c/0x70 ? exc_invalid_op+0x18/0x70 ? asm_exc_invalid_op+0x1a/0x20 ? intel_iommu_probe_device+0x319/0xd90 ? debug_mutex_init+0x37/0x50 __iommu_probe_device+0xf2/0x4f0 iommu_probe_device+0x22/0x70 iommu_bus_notifier+0x1e/0x40 notifier_call_chain+0x46/0x150 blocking_notifier_call_chain+0x42/0x60 bus_notify+0x2f/0x50 device_add+0x5ed/0x7e0 platform_device_add+0xf5/0x240 mfd_add_devices+0x3f9/0x500 ? preempt_count_add+0x4c/0xa0 ? up_write+0xa2/0x1b0 ? __debugfs_create_file+0xe3/0x150 intel_lpss_probe+0x49f/0x5b0 ? pci_conf1_write+0xa3/0xf0 intel_lpss_pci_probe+0xcf/0x110 [intel_lpss_pci] pci_device_probe+0x95/0x120 really_probe+0xd9/0x370 ? __pfx___driver_attach+0x10/0x10 __driver_probe_device+0x73/0x150 driver_probe_device+0x19/0xa0 __driver_attach+0xb6/0x180 ? __pfx___driver_attach+0x10/0x10 bus_for_each_dev+0x77/0xd0 bus_add_driver+0x114/0x210 driver_register+0x5b/0x110 ? __pfx_intel_lpss_pci_driver_init+0x10/0x10 [intel_lpss_pci] do_one_initcall+0x57/0x2b0 ? kmalloc_trace+0x21e/0x280 ? do_init_module+0x1e/0x210 do_init_module+0x5f/0x210 load_module+0x1d37/0x1fc0 ? init_module_from_file+0x86/0xd0 init_module_from_file+0x86/0xd0 idempotent_init_module+0x17c/0x230 __x64_sys_finit_module+0x56/0xb0 do_syscall_64+0x6e/0x140 entry_SYSCALL_64_after_hwframe+0x71/0x79 Fixes: 1a75cc710b95 ("iommu/vt-d: Use rbtree to track iommu probed devices") Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/10689 Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20240407011429.136282-1-baolu.lu@linux.intel.com Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2024-04-11 11:07:44 +08:00
if (dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev)))
device_rbtree_remove(info);
mutex_unlock(&iommu->iopf_lock);
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
!context_copied(iommu, info->bus, info->devfn))
intel_pasid_teardown_sm_context(dev);
iommu/vt-d: Make use of IOMMU sysfs support Register our DRHD IOMMUs, cross link devices, and provide a base set of attributes for the IOMMU. Note that IRQ remapping support parses the DMAR table very early in boot, well before the iommu_class can reasonably be setup, so our registration is split between intel_iommu_init(), which occurs later, and alloc_iommu(), which typically occurs much earlier, but may happen at any time later with IOMMU hot-add support. On a typical desktop system, this provides the following (pruned): $ find /sys | grep dmar /sys/devices/virtual/iommu/dmar0 /sys/devices/virtual/iommu/dmar0/devices /sys/devices/virtual/iommu/dmar0/devices/0000:00:02.0 /sys/devices/virtual/iommu/dmar0/intel-iommu /sys/devices/virtual/iommu/dmar0/intel-iommu/cap /sys/devices/virtual/iommu/dmar0/intel-iommu/ecap /sys/devices/virtual/iommu/dmar0/intel-iommu/address /sys/devices/virtual/iommu/dmar0/intel-iommu/version /sys/devices/virtual/iommu/dmar1 /sys/devices/virtual/iommu/dmar1/devices /sys/devices/virtual/iommu/dmar1/devices/0000:00:00.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:01.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:16.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1a.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1b.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1c.0 ... /sys/devices/virtual/iommu/dmar1/intel-iommu /sys/devices/virtual/iommu/dmar1/intel-iommu/cap /sys/devices/virtual/iommu/dmar1/intel-iommu/ecap /sys/devices/virtual/iommu/dmar1/intel-iommu/address /sys/devices/virtual/iommu/dmar1/intel-iommu/version /sys/class/iommu/dmar0 /sys/class/iommu/dmar1 (devices also link back to the dmar units) This makes address, version, capabilities, and extended capabilities available, just like printed on boot. I've tried not to duplicate data that can be found in the DMAR table, with the exception of the address, which provides an easy way to associate the sysfs device with a DRHD entry in the DMAR. It's tempting to add scopes and RMRR data here, but the full DMAR table is already exposed under /sys/firmware/ and therefore already provides a way for userspace to learn such details. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-06-12 16:12:31 -06:00
intel_pasid_free_table(dev);
intel_iommu_debugfs_remove_dev(info);
kfree(info);
}
iommu/vt-d: Make use of IOMMU sysfs support Register our DRHD IOMMUs, cross link devices, and provide a base set of attributes for the IOMMU. Note that IRQ remapping support parses the DMAR table very early in boot, well before the iommu_class can reasonably be setup, so our registration is split between intel_iommu_init(), which occurs later, and alloc_iommu(), which typically occurs much earlier, but may happen at any time later with IOMMU hot-add support. On a typical desktop system, this provides the following (pruned): $ find /sys | grep dmar /sys/devices/virtual/iommu/dmar0 /sys/devices/virtual/iommu/dmar0/devices /sys/devices/virtual/iommu/dmar0/devices/0000:00:02.0 /sys/devices/virtual/iommu/dmar0/intel-iommu /sys/devices/virtual/iommu/dmar0/intel-iommu/cap /sys/devices/virtual/iommu/dmar0/intel-iommu/ecap /sys/devices/virtual/iommu/dmar0/intel-iommu/address /sys/devices/virtual/iommu/dmar0/intel-iommu/version /sys/devices/virtual/iommu/dmar1 /sys/devices/virtual/iommu/dmar1/devices /sys/devices/virtual/iommu/dmar1/devices/0000:00:00.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:01.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:16.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1a.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1b.0 /sys/devices/virtual/iommu/dmar1/devices/0000:00:1c.0 ... /sys/devices/virtual/iommu/dmar1/intel-iommu /sys/devices/virtual/iommu/dmar1/intel-iommu/cap /sys/devices/virtual/iommu/dmar1/intel-iommu/ecap /sys/devices/virtual/iommu/dmar1/intel-iommu/address /sys/devices/virtual/iommu/dmar1/intel-iommu/version /sys/class/iommu/dmar0 /sys/class/iommu/dmar1 (devices also link back to the dmar units) This makes address, version, capabilities, and extended capabilities available, just like printed on boot. I've tried not to duplicate data that can be found in the DMAR table, with the exception of the address, which provides an easy way to associate the sysfs device with a DRHD entry in the DMAR. It's tempting to add scopes and RMRR data here, but the full DMAR table is already exposed under /sys/firmware/ and therefore already provides a way for userspace to learn such details. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2014-06-12 16:12:31 -06:00
static void intel_iommu_get_resv_regions(struct device *device,
struct list_head *head)
{
int prot = DMA_PTE_READ | DMA_PTE_WRITE;
struct iommu_resv_region *reg;
struct dmar_rmrr_unit *rmrr;
struct device *i_dev;
int i;
iommu/vt-d: Use rcu_lock in get_resv_regions Commit 5f64ce5411b46 ("iommu/vt-d: Duplicate iommu_resv_region objects per device list") converted rcu_lock in get_resv_regions to dmar_global_lock to allow sleeping in iommu_alloc_resv_region(). This introduced possible recursive locking if get_resv_regions is called from within a section where intel_iommu_init() already holds dmar_global_lock. Especially, after commit 57365a04c921 ("iommu: Move bus setup to IOMMU device registration"), below lockdep splats could always be seen. ============================================ WARNING: possible recursive locking detected 6.0.0-rc4+ #325 Tainted: G I -------------------------------------------- swapper/0/1 is trying to acquire lock: ffffffffa8a18c90 (dmar_global_lock){++++}-{3:3}, at: intel_iommu_get_resv_regions+0x25/0x270 but task is already holding lock: ffffffffa8a18c90 (dmar_global_lock){++++}-{3:3}, at: intel_iommu_init+0x36d/0x6ea ... Call Trace: <TASK> dump_stack_lvl+0x48/0x5f __lock_acquire.cold.73+0xad/0x2bb lock_acquire+0xc2/0x2e0 ? intel_iommu_get_resv_regions+0x25/0x270 ? lock_is_held_type+0x9d/0x110 down_read+0x42/0x150 ? intel_iommu_get_resv_regions+0x25/0x270 intel_iommu_get_resv_regions+0x25/0x270 iommu_create_device_direct_mappings.isra.28+0x8d/0x1c0 ? iommu_get_dma_cookie+0x6d/0x90 bus_iommu_probe+0x19f/0x2e0 iommu_device_register+0xd4/0x130 intel_iommu_init+0x3e1/0x6ea ? iommu_setup+0x289/0x289 ? rdinit_setup+0x34/0x34 pci_iommu_init+0x12/0x3a do_one_initcall+0x65/0x320 ? rdinit_setup+0x34/0x34 ? rcu_read_lock_sched_held+0x5a/0x80 kernel_init_freeable+0x28a/0x2f3 ? rest_init+0x1b0/0x1b0 kernel_init+0x1a/0x130 ret_from_fork+0x1f/0x30 </TASK> This rolls back dmar_global_lock to rcu_lock in get_resv_regions to avoid the lockdep splat. Fixes: 57365a04c921 ("iommu: Move bus setup to IOMMU device registration") Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Tested-by: Alex Williamson <alex.williamson@redhat.com> Link: https://lore.kernel.org/r/20220927053109.4053662-3-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-10-19 08:44:45 +08:00
rcu_read_lock();
for_each_rmrr_units(rmrr) {
for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
i, i_dev) {
struct iommu_resv_region *resv;
enum iommu_resv_type type;
size_t length;
if (i_dev != device &&
!is_downstream_to_pci_bridge(device, i_dev))
continue;
length = rmrr->end_address - rmrr->base_address + 1;
type = device_rmrr_is_relaxable(device) ?
IOMMU_RESV_DIRECT_RELAXABLE : IOMMU_RESV_DIRECT;
resv = iommu_alloc_resv_region(rmrr->base_address,
length, prot, type,
iommu/vt-d: Use rcu_lock in get_resv_regions Commit 5f64ce5411b46 ("iommu/vt-d: Duplicate iommu_resv_region objects per device list") converted rcu_lock in get_resv_regions to dmar_global_lock to allow sleeping in iommu_alloc_resv_region(). This introduced possible recursive locking if get_resv_regions is called from within a section where intel_iommu_init() already holds dmar_global_lock. Especially, after commit 57365a04c921 ("iommu: Move bus setup to IOMMU device registration"), below lockdep splats could always be seen. ============================================ WARNING: possible recursive locking detected 6.0.0-rc4+ #325 Tainted: G I -------------------------------------------- swapper/0/1 is trying to acquire lock: ffffffffa8a18c90 (dmar_global_lock){++++}-{3:3}, at: intel_iommu_get_resv_regions+0x25/0x270 but task is already holding lock: ffffffffa8a18c90 (dmar_global_lock){++++}-{3:3}, at: intel_iommu_init+0x36d/0x6ea ... Call Trace: <TASK> dump_stack_lvl+0x48/0x5f __lock_acquire.cold.73+0xad/0x2bb lock_acquire+0xc2/0x2e0 ? intel_iommu_get_resv_regions+0x25/0x270 ? lock_is_held_type+0x9d/0x110 down_read+0x42/0x150 ? intel_iommu_get_resv_regions+0x25/0x270 intel_iommu_get_resv_regions+0x25/0x270 iommu_create_device_direct_mappings.isra.28+0x8d/0x1c0 ? iommu_get_dma_cookie+0x6d/0x90 bus_iommu_probe+0x19f/0x2e0 iommu_device_register+0xd4/0x130 intel_iommu_init+0x3e1/0x6ea ? iommu_setup+0x289/0x289 ? rdinit_setup+0x34/0x34 pci_iommu_init+0x12/0x3a do_one_initcall+0x65/0x320 ? rdinit_setup+0x34/0x34 ? rcu_read_lock_sched_held+0x5a/0x80 kernel_init_freeable+0x28a/0x2f3 ? rest_init+0x1b0/0x1b0 kernel_init+0x1a/0x130 ret_from_fork+0x1f/0x30 </TASK> This rolls back dmar_global_lock to rcu_lock in get_resv_regions to avoid the lockdep splat. Fixes: 57365a04c921 ("iommu: Move bus setup to IOMMU device registration") Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Tested-by: Alex Williamson <alex.williamson@redhat.com> Link: https://lore.kernel.org/r/20220927053109.4053662-3-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-10-19 08:44:45 +08:00
GFP_ATOMIC);
if (!resv)
break;
list_add_tail(&resv->list, head);
}
}
iommu/vt-d: Use rcu_lock in get_resv_regions Commit 5f64ce5411b46 ("iommu/vt-d: Duplicate iommu_resv_region objects per device list") converted rcu_lock in get_resv_regions to dmar_global_lock to allow sleeping in iommu_alloc_resv_region(). This introduced possible recursive locking if get_resv_regions is called from within a section where intel_iommu_init() already holds dmar_global_lock. Especially, after commit 57365a04c921 ("iommu: Move bus setup to IOMMU device registration"), below lockdep splats could always be seen. ============================================ WARNING: possible recursive locking detected 6.0.0-rc4+ #325 Tainted: G I -------------------------------------------- swapper/0/1 is trying to acquire lock: ffffffffa8a18c90 (dmar_global_lock){++++}-{3:3}, at: intel_iommu_get_resv_regions+0x25/0x270 but task is already holding lock: ffffffffa8a18c90 (dmar_global_lock){++++}-{3:3}, at: intel_iommu_init+0x36d/0x6ea ... Call Trace: <TASK> dump_stack_lvl+0x48/0x5f __lock_acquire.cold.73+0xad/0x2bb lock_acquire+0xc2/0x2e0 ? intel_iommu_get_resv_regions+0x25/0x270 ? lock_is_held_type+0x9d/0x110 down_read+0x42/0x150 ? intel_iommu_get_resv_regions+0x25/0x270 intel_iommu_get_resv_regions+0x25/0x270 iommu_create_device_direct_mappings.isra.28+0x8d/0x1c0 ? iommu_get_dma_cookie+0x6d/0x90 bus_iommu_probe+0x19f/0x2e0 iommu_device_register+0xd4/0x130 intel_iommu_init+0x3e1/0x6ea ? iommu_setup+0x289/0x289 ? rdinit_setup+0x34/0x34 pci_iommu_init+0x12/0x3a do_one_initcall+0x65/0x320 ? rdinit_setup+0x34/0x34 ? rcu_read_lock_sched_held+0x5a/0x80 kernel_init_freeable+0x28a/0x2f3 ? rest_init+0x1b0/0x1b0 kernel_init+0x1a/0x130 ret_from_fork+0x1f/0x30 </TASK> This rolls back dmar_global_lock to rcu_lock in get_resv_regions to avoid the lockdep splat. Fixes: 57365a04c921 ("iommu: Move bus setup to IOMMU device registration") Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Tested-by: Alex Williamson <alex.williamson@redhat.com> Link: https://lore.kernel.org/r/20220927053109.4053662-3-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-10-19 08:44:45 +08:00
rcu_read_unlock();
#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
if (dev_is_pci(device)) {
struct pci_dev *pdev = to_pci_dev(device);
if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) {
reg = iommu_alloc_resv_region(0, 1UL << 24, prot,
IOMMU_RESV_DIRECT_RELAXABLE,
GFP_KERNEL);
if (reg)
list_add_tail(&reg->list, head);
}
}
#endif /* CONFIG_INTEL_IOMMU_FLOPPY_WA */
reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
0, IOMMU_RESV_MSI, GFP_KERNEL);
if (!reg)
return;
list_add_tail(&reg->list, head);
}
iommu/vt-d: Fix adding non-PCI devices to Intel IOMMU Starting with commit fa212a97f3a3 ("iommu/vt-d: Probe DMA-capable ACPI name space devices"), we now probe DMA-capable ACPI name space devices. On Dell XPS 13 9343, which has an Intel LPSS platform device INTL9C60 enumerated via ACPI, this change leads to the following warning: ------------[ cut here ]------------ WARNING: CPU: 1 PID: 1 at pci_device_group+0x11a/0x130 CPU: 1 PID: 1 Comm: swapper/0 Tainted: G T 5.5.0-rc3+ #22 Hardware name: Dell Inc. XPS 13 9343/0310JH, BIOS A20 06/06/2019 RIP: 0010:pci_device_group+0x11a/0x130 Code: f0 ff ff 48 85 c0 49 89 c4 75 c4 48 8d 74 24 10 48 89 ef e8 48 ef ff ff 48 85 c0 49 89 c4 75 af e8 db f7 ff ff 49 89 c4 eb a5 <0f> 0b 49 c7 c4 ea ff ff ff eb 9a e8 96 1e c7 ff 66 0f 1f 44 00 00 RSP: 0000:ffffc0d6c0043cb0 EFLAGS: 00010202 RAX: 0000000000000000 RBX: ffffa3d1d43dd810 RCX: 0000000000000000 RDX: ffffa3d1d4fecf80 RSI: ffffa3d12943dcc0 RDI: ffffa3d1d43dd810 RBP: ffffa3d1d43dd810 R08: 0000000000000000 R09: ffffa3d1d4c04a80 R10: ffffa3d1d4c00880 R11: ffffa3d1d44ba000 R12: 0000000000000000 R13: ffffa3d1d4383b80 R14: ffffa3d1d4c090d0 R15: ffffa3d1d4324530 FS: 0000000000000000(0000) GS:ffffa3d1d6700000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000000 CR3: 000000000460a001 CR4: 00000000003606e0 Call Trace: ? iommu_group_get_for_dev+0x81/0x1f0 ? intel_iommu_add_device+0x61/0x170 ? iommu_probe_device+0x43/0xd0 ? intel_iommu_init+0x1fa2/0x2235 ? pci_iommu_init+0x52/0xe7 ? e820__memblock_setup+0x15c/0x15c ? do_one_initcall+0xcc/0x27e ? kernel_init_freeable+0x169/0x259 ? rest_init+0x95/0x95 ? kernel_init+0x5/0xeb ? ret_from_fork+0x35/0x40 ---[ end trace 28473e7abc25b92c ]--- DMAR: ACPI name space devices didn't probe correctly The bug results from the fact that while we now enumerate ACPI devices, we aren't able to handle any non-PCI device when generating the device group. Fix the issue by implementing an Intel-specific callback that returns `pci_device_group` only if the device is a PCI device. Otherwise, it will return a generic device group. Fixes: fa212a97f3a3 ("iommu/vt-d: Probe DMA-capable ACPI name space devices") Signed-off-by: Patrick Steinhardt <ps@pks.im> Cc: stable@vger.kernel.org # v5.3+ Acked-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2019-12-27 00:56:18 +01:00
static struct iommu_group *intel_iommu_device_group(struct device *dev)
{
if (dev_is_pci(dev))
return pci_device_group(dev);
return generic_device_group(dev);
}
int intel_iommu_enable_iopf(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
int ret;
if (!info->pri_enabled)
return -ENODEV;
/* pri_enabled is protected by the group mutex. */
iommu_group_mutex_assert(dev);
if (info->iopf_refcount) {
info->iopf_refcount++;
return 0;
}
ret = iopf_queue_add_device(iommu->iopf_queue, dev);
if (ret)
return ret;
info->iopf_refcount = 1;
return 0;
}
void intel_iommu_disable_iopf(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
if (WARN_ON(!info->pri_enabled || !info->iopf_refcount))
return;
iommu_group_mutex_assert(dev);
if (--info->iopf_refcount)
return;
iopf_queue_remove_device(iommu->iopf_queue, dev);
}
static bool intel_iommu_is_attach_deferred(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
return translation_pre_enabled(info->iommu) && !info->domain;
}
/*
* Check that the device does not live on an external facing PCI port that is
* marked as untrusted. Such devices should not be able to apply quirks and
* thus not be able to bypass the IOMMU restrictions.
*/
static bool risky_device(struct pci_dev *pdev)
{
if (pdev->untrusted) {
pci_info(pdev,
"Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n",
pdev->vendor, pdev->device);
pci_info(pdev, "Please check with your BIOS/Platform vendor about this\n");
return true;
}
return false;
}
static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
unsigned long iova, size_t size)
2021-02-04 09:43:58 +08:00
{
iommu/vt-d: Optimize iotlb_sync_map for non-caching/non-RWBF modes The iotlb_sync_map iommu ops allows drivers to perform necessary cache flushes when new mappings are established. For the Intel iommu driver, this callback specifically serves two purposes: - To flush caches when a second-stage page table is attached to a device whose iommu is operating in caching mode (CAP_REG.CM==1). - To explicitly flush internal write buffers to ensure updates to memory- resident remapping structures are visible to hardware (CAP_REG.RWBF==1). However, in scenarios where neither caching mode nor the RWBF flag is active, the cache_tag_flush_range_np() helper, which is called in the iotlb_sync_map path, effectively becomes a no-op. Despite being a no-op, cache_tag_flush_range_np() involves iterating through all cache tags of the iommu's attached to the domain, protected by a spinlock. This unnecessary execution path introduces overhead, leading to a measurable I/O performance regression. On systems with NVMes under the same bridge, performance was observed to drop from approximately ~6150 MiB/s down to ~4985 MiB/s. Introduce a flag in the dmar_domain structure. This flag will only be set when iotlb_sync_map is required (i.e., when CM or RWBF is set). The cache_tag_flush_range_np() is called only for domains where this flag is set. This flag, once set, is immutable, given that there won't be mixed configurations in real-world scenarios where some IOMMUs in a system operate in caching mode while others do not. Theoretically, the immutability of this flag does not impact functionality. Reported-by: Ioanna Alifieraki <ioanna-maria.alifieraki@canonical.com> Closes: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/2115738 Link: https://lore.kernel.org/r/20250701171154.52435-1-ioanna-maria.alifieraki@canonical.com Fixes: 129dab6e1286 ("iommu/vt-d: Use cache_tag_flush_range_np() in iotlb_sync_map") Cc: stable@vger.kernel.org Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/20250703031545.3378602-1-baolu.lu@linux.intel.com Link: https://lore.kernel.org/r/20250714045028.958850-3-baolu.lu@linux.intel.com Signed-off-by: Will Deacon <will@kernel.org>
2025-07-14 12:50:19 +08:00
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
if (dmar_domain->iotlb_sync_map)
cache_tag_flush_range_np(dmar_domain, iova, iova + size - 1);
2021-02-04 09:43:58 +08:00
return 0;
2021-02-04 09:43:58 +08:00
}
void domain_remove_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dev_pasid_info *curr, *dev_pasid = NULL;
struct intel_iommu *iommu = info->iommu;
struct dmar_domain *dmar_domain;
unsigned long flags;
if (!domain)
return;
/* Identity domain has no meta data for pasid. */
if (domain->type == IOMMU_DOMAIN_IDENTITY)
return;
dmar_domain = to_dmar_domain(domain);
spin_lock_irqsave(&dmar_domain->lock, flags);
list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) {
if (curr->dev == dev && curr->pasid == pasid) {
list_del(&curr->link_domain);
dev_pasid = curr;
break;
}
}
spin_unlock_irqrestore(&dmar_domain->lock, flags);
cache_tag_unassign_domain(dmar_domain, dev, pasid);
domain_detach_iommu(dmar_domain, iommu);
if (!WARN_ON_ONCE(!dev_pasid)) {
intel_iommu_debugfs_remove_dev_pasid(dev_pasid);
kfree(dev_pasid);
}
}
static int blocking_domain_set_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid,
struct iommu_domain *old)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
intel_pasid_tear_down_entry(info->iommu, dev, pasid, false);
iommu/vt-d: Fix UAF on sva unbind with pending IOPFs Commit 17fce9d2336d ("iommu/vt-d: Put iopf enablement in domain attach path") disables IOPF on device by removing the device from its IOMMU's IOPF queue when the last IOPF-capable domain is detached from the device. Unfortunately, it did this in a wrong place where there are still pending IOPFs. As a result, a use-after-free error is potentially triggered and eventually a kernel panic with a kernel trace similar to the following: refcount_t: underflow; use-after-free. WARNING: CPU: 3 PID: 313 at lib/refcount.c:28 refcount_warn_saturate+0xd8/0xe0 Workqueue: iopf_queue/dmar0-iopfq iommu_sva_handle_iopf Call Trace: <TASK> iopf_free_group+0xe/0x20 process_one_work+0x197/0x3d0 worker_thread+0x23a/0x350 ? rescuer_thread+0x4a0/0x4a0 kthread+0xf8/0x230 ? finish_task_switch.isra.0+0x81/0x260 ? kthreads_online_cpu+0x110/0x110 ? kthreads_online_cpu+0x110/0x110 ret_from_fork+0x13b/0x170 ? kthreads_online_cpu+0x110/0x110 ret_from_fork_asm+0x11/0x20 </TASK> ---[ end trace 0000000000000000 ]--- The intel_pasid_tear_down_entry() function is responsible for blocking hardware from generating new page faults and flushing all in-flight ones. Therefore, moving iopf_for_domain_remove() after this function should resolve this. Fixes: 17fce9d2336d ("iommu/vt-d: Put iopf enablement in domain attach path") Reported-by: Ethan Milon <ethan.milon@eviden.com> Closes: https://lore.kernel.org/r/e8b37f3e-8539-40d4-8993-43a1f3ffe5aa@eviden.com Suggested-by: Ethan Milon <ethan.milon@eviden.com> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20250723072045.1853328-1-baolu.lu@linux.intel.com Signed-off-by: Will Deacon <will@kernel.org>
2025-07-23 15:20:45 +08:00
iopf_for_domain_remove(old, dev);
domain_remove_dev_pasid(old, dev, pasid);
return 0;
}
struct dev_pasid_info *
domain_add_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct intel_iommu *iommu = info->iommu;
struct dev_pasid_info *dev_pasid;
unsigned long flags;
int ret;
dev_pasid = kzalloc(sizeof(*dev_pasid), GFP_KERNEL);
if (!dev_pasid)
return ERR_PTR(-ENOMEM);
ret = domain_attach_iommu(dmar_domain, iommu);
if (ret)
goto out_free;
ret = cache_tag_assign_domain(dmar_domain, dev, pasid);
if (ret)
goto out_detach_iommu;
dev_pasid->dev = dev;
dev_pasid->pasid = pasid;
spin_lock_irqsave(&dmar_domain->lock, flags);
list_add(&dev_pasid->link_domain, &dmar_domain->dev_pasids);
spin_unlock_irqrestore(&dmar_domain->lock, flags);
return dev_pasid;
out_detach_iommu:
domain_detach_iommu(dmar_domain, iommu);
out_free:
kfree(dev_pasid);
return ERR_PTR(ret);
}
static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid,
struct iommu_domain *old)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct intel_iommu *iommu = info->iommu;
struct dev_pasid_info *dev_pasid;
int ret;
if (WARN_ON_ONCE(!(domain->type & __IOMMU_DOMAIN_PAGING)))
return -EINVAL;
if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev))
return -EOPNOTSUPP;
iommu/vt-d: Access/Dirty bit support for SS domains IOMMU advertises Access/Dirty bits for second-stage page table if the extended capability DMAR register reports it (ECAP, mnemonic ECAP.SSADS). The first stage table is compatible with CPU page table thus A/D bits are implicitly supported. Relevant Intel IOMMU SDM ref for first stage table "3.6.2 Accessed, Extended Accessed, and Dirty Flags" and second stage table "3.7.2 Accessed and Dirty Flags". First stage page table is enabled by default so it's allowed to set dirty tracking and no control bits needed, it just returns 0. To use SSADS, set bit 9 (SSADE) in the scalable-mode PASID table entry and flush the IOTLB via pasid_flush_caches() following the manual. Relevant SDM refs: "3.7.2 Accessed and Dirty Flags" "6.5.3.3 Guidance to Software for Invalidations, Table 23. Guidance to Software for Invalidations" PTE dirty bit is located in bit 9 and it's cached in the IOTLB so flush IOTLB to make sure IOMMU attempts to set the dirty bit again. Note that iommu_dirty_bitmap_record() will add the IOVA to iotlb_gather and thus the caller of the iommu op will flush the IOTLB. Relevant manuals over the hardware translation is chapter 6 with some special mention to: "6.2.3.1 Scalable-Mode PASID-Table Entry Programming Considerations" "6.2.4 IOTLB" Select IOMMUFD_DRIVER only if IOMMUFD is enabled, given that IOMMU dirty tracking requires IOMMUFD. Link: https://lore.kernel.org/r/20231024135109.73787-13-joao.m.martins@oracle.com Signed-off-by: Joao Martins <joao.m.martins@oracle.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2023-10-24 14:51:03 +01:00
if (domain->dirty_ops)
return -EINVAL;
if (context_copied(iommu, info->bus, info->devfn))
return -EBUSY;
ret = paging_domain_compatible(domain, dev);
if (ret)
return ret;
dev_pasid = domain_add_dev_pasid(domain, dev, pasid);
if (IS_ERR(dev_pasid))
return PTR_ERR(dev_pasid);
ret = iopf_for_domain_replace(domain, old, dev);
if (ret)
goto out_remove_dev_pasid;
if (intel_domain_is_fs_paging(dmar_domain))
ret = domain_setup_first_level(iommu, dmar_domain,
dev, pasid, old);
else if (intel_domain_is_ss_paging(dmar_domain))
ret = domain_setup_second_level(iommu, dmar_domain,
dev, pasid, old);
else if (WARN_ON(true))
ret = -EINVAL;
if (ret)
goto out_unwind_iopf;
domain_remove_dev_pasid(old, dev, pasid);
intel_iommu_debugfs_create_dev_pasid(dev_pasid);
return 0;
out_unwind_iopf:
iopf_for_domain_replace(old, domain, dev);
out_remove_dev_pasid:
domain_remove_dev_pasid(domain, dev, pasid);
return ret;
}
static void *intel_iommu_hw_info(struct device *dev, u32 *length,
enum iommu_hw_info_type *type)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
struct iommu_hw_info_vtd *vtd;
if (*type != IOMMU_HW_INFO_TYPE_DEFAULT &&
*type != IOMMU_HW_INFO_TYPE_INTEL_VTD)
return ERR_PTR(-EOPNOTSUPP);
vtd = kzalloc(sizeof(*vtd), GFP_KERNEL);
if (!vtd)
return ERR_PTR(-ENOMEM);
vtd->flags = IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17;
vtd->cap_reg = iommu->cap;
vtd->ecap_reg = iommu->ecap;
*length = sizeof(*vtd);
*type = IOMMU_HW_INFO_TYPE_INTEL_VTD;
return vtd;
}
/*
* Set dirty tracking for the device list of a domain. The caller must
* hold the domain->lock when calling it.
*/
static int device_set_dirty_tracking(struct list_head *devices, bool enable)
{
struct device_domain_info *info;
int ret = 0;
list_for_each_entry(info, devices, link) {
ret = intel_pasid_setup_dirty_tracking(info->iommu, info->dev,
IOMMU_NO_PASID, enable);
if (ret)
break;
}
return ret;
}
static int parent_domain_set_dirty_tracking(struct dmar_domain *domain,
bool enable)
{
struct dmar_domain *s1_domain;
unsigned long flags;
int ret;
spin_lock(&domain->s1_lock);
list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
spin_lock_irqsave(&s1_domain->lock, flags);
ret = device_set_dirty_tracking(&s1_domain->devices, enable);
spin_unlock_irqrestore(&s1_domain->lock, flags);
if (ret)
goto err_unwind;
}
spin_unlock(&domain->s1_lock);
return 0;
err_unwind:
list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
spin_lock_irqsave(&s1_domain->lock, flags);
device_set_dirty_tracking(&s1_domain->devices,
domain->dirty_tracking);
spin_unlock_irqrestore(&s1_domain->lock, flags);
}
spin_unlock(&domain->s1_lock);
return ret;
}
iommu/vt-d: Access/Dirty bit support for SS domains IOMMU advertises Access/Dirty bits for second-stage page table if the extended capability DMAR register reports it (ECAP, mnemonic ECAP.SSADS). The first stage table is compatible with CPU page table thus A/D bits are implicitly supported. Relevant Intel IOMMU SDM ref for first stage table "3.6.2 Accessed, Extended Accessed, and Dirty Flags" and second stage table "3.7.2 Accessed and Dirty Flags". First stage page table is enabled by default so it's allowed to set dirty tracking and no control bits needed, it just returns 0. To use SSADS, set bit 9 (SSADE) in the scalable-mode PASID table entry and flush the IOTLB via pasid_flush_caches() following the manual. Relevant SDM refs: "3.7.2 Accessed and Dirty Flags" "6.5.3.3 Guidance to Software for Invalidations, Table 23. Guidance to Software for Invalidations" PTE dirty bit is located in bit 9 and it's cached in the IOTLB so flush IOTLB to make sure IOMMU attempts to set the dirty bit again. Note that iommu_dirty_bitmap_record() will add the IOVA to iotlb_gather and thus the caller of the iommu op will flush the IOTLB. Relevant manuals over the hardware translation is chapter 6 with some special mention to: "6.2.3.1 Scalable-Mode PASID-Table Entry Programming Considerations" "6.2.4 IOTLB" Select IOMMUFD_DRIVER only if IOMMUFD is enabled, given that IOMMU dirty tracking requires IOMMUFD. Link: https://lore.kernel.org/r/20231024135109.73787-13-joao.m.martins@oracle.com Signed-off-by: Joao Martins <joao.m.martins@oracle.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2023-10-24 14:51:03 +01:00
static int intel_iommu_set_dirty_tracking(struct iommu_domain *domain,
bool enable)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
int ret;
spin_lock(&dmar_domain->lock);
if (dmar_domain->dirty_tracking == enable)
goto out_unlock;
ret = device_set_dirty_tracking(&dmar_domain->devices, enable);
if (ret)
goto err_unwind;
iommu/vt-d: Access/Dirty bit support for SS domains IOMMU advertises Access/Dirty bits for second-stage page table if the extended capability DMAR register reports it (ECAP, mnemonic ECAP.SSADS). The first stage table is compatible with CPU page table thus A/D bits are implicitly supported. Relevant Intel IOMMU SDM ref for first stage table "3.6.2 Accessed, Extended Accessed, and Dirty Flags" and second stage table "3.7.2 Accessed and Dirty Flags". First stage page table is enabled by default so it's allowed to set dirty tracking and no control bits needed, it just returns 0. To use SSADS, set bit 9 (SSADE) in the scalable-mode PASID table entry and flush the IOTLB via pasid_flush_caches() following the manual. Relevant SDM refs: "3.7.2 Accessed and Dirty Flags" "6.5.3.3 Guidance to Software for Invalidations, Table 23. Guidance to Software for Invalidations" PTE dirty bit is located in bit 9 and it's cached in the IOTLB so flush IOTLB to make sure IOMMU attempts to set the dirty bit again. Note that iommu_dirty_bitmap_record() will add the IOVA to iotlb_gather and thus the caller of the iommu op will flush the IOTLB. Relevant manuals over the hardware translation is chapter 6 with some special mention to: "6.2.3.1 Scalable-Mode PASID-Table Entry Programming Considerations" "6.2.4 IOTLB" Select IOMMUFD_DRIVER only if IOMMUFD is enabled, given that IOMMU dirty tracking requires IOMMUFD. Link: https://lore.kernel.org/r/20231024135109.73787-13-joao.m.martins@oracle.com Signed-off-by: Joao Martins <joao.m.martins@oracle.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2023-10-24 14:51:03 +01:00
if (dmar_domain->nested_parent) {
ret = parent_domain_set_dirty_tracking(dmar_domain, enable);
if (ret)
goto err_unwind;
}
iommu/vt-d: Access/Dirty bit support for SS domains IOMMU advertises Access/Dirty bits for second-stage page table if the extended capability DMAR register reports it (ECAP, mnemonic ECAP.SSADS). The first stage table is compatible with CPU page table thus A/D bits are implicitly supported. Relevant Intel IOMMU SDM ref for first stage table "3.6.2 Accessed, Extended Accessed, and Dirty Flags" and second stage table "3.7.2 Accessed and Dirty Flags". First stage page table is enabled by default so it's allowed to set dirty tracking and no control bits needed, it just returns 0. To use SSADS, set bit 9 (SSADE) in the scalable-mode PASID table entry and flush the IOTLB via pasid_flush_caches() following the manual. Relevant SDM refs: "3.7.2 Accessed and Dirty Flags" "6.5.3.3 Guidance to Software for Invalidations, Table 23. Guidance to Software for Invalidations" PTE dirty bit is located in bit 9 and it's cached in the IOTLB so flush IOTLB to make sure IOMMU attempts to set the dirty bit again. Note that iommu_dirty_bitmap_record() will add the IOVA to iotlb_gather and thus the caller of the iommu op will flush the IOTLB. Relevant manuals over the hardware translation is chapter 6 with some special mention to: "6.2.3.1 Scalable-Mode PASID-Table Entry Programming Considerations" "6.2.4 IOTLB" Select IOMMUFD_DRIVER only if IOMMUFD is enabled, given that IOMMU dirty tracking requires IOMMUFD. Link: https://lore.kernel.org/r/20231024135109.73787-13-joao.m.martins@oracle.com Signed-off-by: Joao Martins <joao.m.martins@oracle.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2023-10-24 14:51:03 +01:00
dmar_domain->dirty_tracking = enable;
out_unlock:
spin_unlock(&dmar_domain->lock);
return 0;
err_unwind:
device_set_dirty_tracking(&dmar_domain->devices,
dmar_domain->dirty_tracking);
iommu/vt-d: Access/Dirty bit support for SS domains IOMMU advertises Access/Dirty bits for second-stage page table if the extended capability DMAR register reports it (ECAP, mnemonic ECAP.SSADS). The first stage table is compatible with CPU page table thus A/D bits are implicitly supported. Relevant Intel IOMMU SDM ref for first stage table "3.6.2 Accessed, Extended Accessed, and Dirty Flags" and second stage table "3.7.2 Accessed and Dirty Flags". First stage page table is enabled by default so it's allowed to set dirty tracking and no control bits needed, it just returns 0. To use SSADS, set bit 9 (SSADE) in the scalable-mode PASID table entry and flush the IOTLB via pasid_flush_caches() following the manual. Relevant SDM refs: "3.7.2 Accessed and Dirty Flags" "6.5.3.3 Guidance to Software for Invalidations, Table 23. Guidance to Software for Invalidations" PTE dirty bit is located in bit 9 and it's cached in the IOTLB so flush IOTLB to make sure IOMMU attempts to set the dirty bit again. Note that iommu_dirty_bitmap_record() will add the IOVA to iotlb_gather and thus the caller of the iommu op will flush the IOTLB. Relevant manuals over the hardware translation is chapter 6 with some special mention to: "6.2.3.1 Scalable-Mode PASID-Table Entry Programming Considerations" "6.2.4 IOTLB" Select IOMMUFD_DRIVER only if IOMMUFD is enabled, given that IOMMU dirty tracking requires IOMMUFD. Link: https://lore.kernel.org/r/20231024135109.73787-13-joao.m.martins@oracle.com Signed-off-by: Joao Martins <joao.m.martins@oracle.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2023-10-24 14:51:03 +01:00
spin_unlock(&dmar_domain->lock);
return ret;
}
static int intel_iommu_read_and_clear_dirty(struct iommu_domain *domain,
unsigned long iova, size_t size,
unsigned long flags,
struct iommu_dirty_bitmap *dirty)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
unsigned long end = iova + size - 1;
unsigned long pgsize;
/*
* IOMMUFD core calls into a dirty tracking disabled domain without an
* IOVA bitmap set in order to clean dirty bits in all PTEs that might
* have occurred when we stopped dirty tracking. This ensures that we
* never inherit dirtied bits from a previous cycle.
*/
if (!dmar_domain->dirty_tracking && dirty->bitmap)
return -EINVAL;
do {
struct dma_pte *pte;
int lvl = 0;
pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &lvl,
GFP_ATOMIC);
pgsize = level_size(lvl) << VTD_PAGE_SHIFT;
if (!pte || !dma_pte_present(pte)) {
iova += pgsize;
continue;
}
if (dma_sl_pte_test_and_clear_dirty(pte, flags))
iommu_dirty_bitmap_record(dirty, iova, pgsize);
iova += pgsize;
} while (iova < end);
return 0;
}
static const struct iommu_dirty_ops intel_dirty_ops = {
iommu/vt-d: Access/Dirty bit support for SS domains IOMMU advertises Access/Dirty bits for second-stage page table if the extended capability DMAR register reports it (ECAP, mnemonic ECAP.SSADS). The first stage table is compatible with CPU page table thus A/D bits are implicitly supported. Relevant Intel IOMMU SDM ref for first stage table "3.6.2 Accessed, Extended Accessed, and Dirty Flags" and second stage table "3.7.2 Accessed and Dirty Flags". First stage page table is enabled by default so it's allowed to set dirty tracking and no control bits needed, it just returns 0. To use SSADS, set bit 9 (SSADE) in the scalable-mode PASID table entry and flush the IOTLB via pasid_flush_caches() following the manual. Relevant SDM refs: "3.7.2 Accessed and Dirty Flags" "6.5.3.3 Guidance to Software for Invalidations, Table 23. Guidance to Software for Invalidations" PTE dirty bit is located in bit 9 and it's cached in the IOTLB so flush IOTLB to make sure IOMMU attempts to set the dirty bit again. Note that iommu_dirty_bitmap_record() will add the IOVA to iotlb_gather and thus the caller of the iommu op will flush the IOTLB. Relevant manuals over the hardware translation is chapter 6 with some special mention to: "6.2.3.1 Scalable-Mode PASID-Table Entry Programming Considerations" "6.2.4 IOTLB" Select IOMMUFD_DRIVER only if IOMMUFD is enabled, given that IOMMU dirty tracking requires IOMMUFD. Link: https://lore.kernel.org/r/20231024135109.73787-13-joao.m.martins@oracle.com Signed-off-by: Joao Martins <joao.m.martins@oracle.com> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2023-10-24 14:51:03 +01:00
.set_dirty_tracking = intel_iommu_set_dirty_tracking,
.read_and_clear_dirty = intel_iommu_read_and_clear_dirty,
};
static int context_setup_pass_through(struct device *dev, u8 bus, u8 devfn)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
struct context_entry *context;
spin_lock(&iommu->lock);
context = iommu_context_addr(iommu, bus, devfn, 1);
if (!context) {
spin_unlock(&iommu->lock);
return -ENOMEM;
}
if (context_present(context) && !context_copied(iommu, bus, devfn)) {
spin_unlock(&iommu->lock);
return 0;
}
copied_context_tear_down(iommu, context, bus, devfn);
context_clear_entry(context);
context_set_domain_id(context, FLPT_DEFAULT_DID);
/*
* In pass through mode, AW must be programmed to indicate the largest
* AGAW value supported by hardware. And ASR is ignored by hardware.
*/
context_set_address_width(context, iommu->msagaw);
context_set_translation_type(context, CONTEXT_TT_PASS_THROUGH);
context_set_fault_enable(context);
context_set_present(context);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(context, sizeof(*context));
context_present_cache_flush(iommu, FLPT_DEFAULT_DID, bus, devfn);
spin_unlock(&iommu->lock);
return 0;
}
static int context_setup_pass_through_cb(struct pci_dev *pdev, u16 alias, void *data)
{
struct device *dev = data;
return context_setup_pass_through(dev, PCI_BUS_NUM(alias), alias & 0xff);
}
static int device_setup_pass_through(struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
if (!dev_is_pci(dev))
return context_setup_pass_through(dev, info->bus, info->devfn);
return pci_for_each_dma_alias(to_pci_dev(dev),
context_setup_pass_through_cb, dev);
}
static int identity_domain_attach_dev(struct iommu_domain *domain, struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
int ret;
device_block_translation(dev);
if (dev_is_real_dma_subdevice(dev))
return 0;
/*
* No PRI support with the global identity domain. No need to enable or
* disable PRI in this path as the iommu has been put in the blocking
* state.
*/
if (sm_supported(iommu))
ret = intel_pasid_setup_pass_through(iommu, dev, IOMMU_NO_PASID);
else
ret = device_setup_pass_through(dev);
iommu/vt-d: Restore context entry setup order for aliased devices Commit 2031c469f816 ("iommu/vt-d: Add support for static identity domain") changed the context entry setup during domain attachment from a set-and-check policy to a clear-and-reset approach. This inadvertently introduced a regression affecting PCI aliased devices behind PCIe-to-PCI bridges. Specifically, keyboard and touchpad stopped working on several Apple Macbooks with below messages: kernel: platform pxa2xx-spi.3: Adding to iommu group 20 kernel: input: Apple SPI Keyboard as /devices/pci0000:00/0000:00:1e.3/pxa2xx-spi.3/spi_master/spi2/spi-APP000D:00/input/input0 kernel: DMAR: DRHD: handling fault status reg 3 kernel: DMAR: [DMA Read NO_PASID] Request device [00:1e.3] fault addr 0xffffa000 [fault reason 0x06] PTE Read access is not set kernel: DMAR: DRHD: handling fault status reg 3 kernel: DMAR: [DMA Read NO_PASID] Request device [00:1e.3] fault addr 0xffffa000 [fault reason 0x06] PTE Read access is not set kernel: applespi spi-APP000D:00: Error writing to device: 01 0e 00 00 kernel: DMAR: DRHD: handling fault status reg 3 kernel: DMAR: [DMA Read NO_PASID] Request device [00:1e.3] fault addr 0xffffa000 [fault reason 0x06] PTE Read access is not set kernel: DMAR: DRHD: handling fault status reg 3 kernel: applespi spi-APP000D:00: Error writing to device: 01 0e 00 00 Fix this by restoring the previous context setup order. Fixes: 2031c469f816 ("iommu/vt-d: Add support for static identity domain") Closes: https://lore.kernel.org/all/4dada48a-c5dd-4c30-9c85-5b03b0aa01f0@bfh.ch/ Cc: stable@vger.kernel.org Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Yi Liu <yi.l.liu@intel.com> Link: https://lore.kernel.org/r/20250514060523.2862195-1-baolu.lu@linux.intel.com Link: https://lore.kernel.org/r/20250520075849.755012-2-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2025-05-20 15:58:49 +08:00
if (!ret)
info->domain_attached = true;
return ret;
}
static int identity_domain_set_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid,
struct iommu_domain *old)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
int ret;
if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev))
return -EOPNOTSUPP;
ret = iopf_for_domain_replace(domain, old, dev);
if (ret)
return ret;
ret = domain_setup_passthrough(iommu, dev, pasid, old);
if (ret) {
iopf_for_domain_replace(old, domain, dev);
return ret;
}
domain_remove_dev_pasid(old, dev, pasid);
return 0;
}
static struct iommu_domain identity_domain = {
.type = IOMMU_DOMAIN_IDENTITY,
.ops = &(const struct iommu_domain_ops) {
.attach_dev = identity_domain_attach_dev,
.set_dev_pasid = identity_domain_set_dev_pasid,
},
};
const struct iommu_domain_ops intel_fs_paging_domain_ops = {
.attach_dev = intel_iommu_attach_device,
.set_dev_pasid = intel_iommu_set_dev_pasid,
.map_pages = intel_iommu_map_pages,
.unmap_pages = intel_iommu_unmap_pages,
.iotlb_sync_map = intel_iommu_iotlb_sync_map,
.flush_iotlb_all = intel_flush_iotlb_all,
.iotlb_sync = intel_iommu_tlb_sync,
.iova_to_phys = intel_iommu_iova_to_phys,
.free = intel_iommu_domain_free,
.enforce_cache_coherency = intel_iommu_enforce_cache_coherency_fs,
};
const struct iommu_domain_ops intel_ss_paging_domain_ops = {
.attach_dev = intel_iommu_attach_device,
.set_dev_pasid = intel_iommu_set_dev_pasid,
.map_pages = intel_iommu_map_pages,
.unmap_pages = intel_iommu_unmap_pages,
.iotlb_sync_map = intel_iommu_iotlb_sync_map,
.flush_iotlb_all = intel_flush_iotlb_all,
.iotlb_sync = intel_iommu_tlb_sync,
.iova_to_phys = intel_iommu_iova_to_phys,
.free = intel_iommu_domain_free,
.enforce_cache_coherency = intel_iommu_enforce_cache_coherency_ss,
};
const struct iommu_ops intel_iommu_ops = {
.blocked_domain = &blocking_domain,
.release_domain = &blocking_domain,
.identity_domain = &identity_domain,
.capable = intel_iommu_capable,
.hw_info = intel_iommu_hw_info,
.domain_alloc_paging_flags = intel_iommu_domain_alloc_paging_flags,
.domain_alloc_sva = intel_svm_domain_alloc,
.domain_alloc_nested = intel_iommu_domain_alloc_nested,
.probe_device = intel_iommu_probe_device,
iommu/vt-d: Revert ATS timing change to fix boot failure Commit <5518f239aff1> ("iommu/vt-d: Move scalable mode ATS enablement to probe path") changed the PCI ATS enablement logic to run earlier, specifically before the default domain attachment. On some client platforms, this change resulted in boot failures, causing the kernel to panic with the following message and call trace: Kernel panic - not syncing: DMAR hardware is malfunctioning CPU: 0 UID: 0 PID: 1 Comm: swapper/0 Not tainted 6.14.0-rc3+ #175 Call Trace: <TASK> dump_stack_lvl+0x6f/0xb0 dump_stack+0x10/0x16 panic+0x10a/0x2b7 iommu_enable_translation.cold+0xc/0xc intel_iommu_init+0xe39/0xec0 ? trace_hardirqs_on+0x1e/0xd0 ? __pfx_pci_iommu_init+0x10/0x10 pci_iommu_init+0xd/0x40 do_one_initcall+0x5b/0x390 kernel_init_freeable+0x26d/0x2b0 ? __pfx_kernel_init+0x10/0x10 kernel_init+0x15/0x120 ret_from_fork+0x35/0x60 ? __pfx_kernel_init+0x10/0x10 ret_from_fork_asm+0x1a/0x30 RIP: 1f0f:0x0 Code: Unable to access opcode bytes at 0xffffffffffffffd6. RSP: 0000:0000000000000000 EFLAGS: 841f0f2e66 ORIG_RAX: 1f0f2e6600000000 RAX: 0000000000000000 RBX: 1f0f2e6600000000 RCX: 2e66000000000084 RDX: 0000000000841f0f RSI: 000000841f0f2e66 RDI: 00841f0f2e660000 RBP: 00841f0f2e660000 R08: 00841f0f2e660000 R09: 000000841f0f2e66 R10: 0000000000841f0f R11: 2e66000000000084 R12: 000000841f0f2e66 R13: 0000000000841f0f R14: 2e66000000000084 R15: 1f0f2e6600000000 </TASK> ---[ end Kernel panic - not syncing: DMAR hardware is malfunctioning ]--- Fix this by reverting the timing change for ATS enablement introduced by the offending commit and restoring the previous behavior. Fixes: 5518f239aff1 ("iommu/vt-d: Move scalable mode ATS enablement to probe path") Reported-by: Jarkko Nikula <jarkko.nikula@linux.intel.com> Closes: https://lore.kernel.org/linux-iommu/01b9c72f-460d-4f77-b696-54c6825babc9@linux.intel.com/ Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Tested-by: Jarkko Nikula <jarkko.nikula@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/20250416073608.1799578-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2025-04-16 15:36:08 +08:00
.probe_finalize = intel_iommu_probe_finalize,
.release_device = intel_iommu_release_device,
.get_resv_regions = intel_iommu_get_resv_regions,
iommu/vt-d: Fix adding non-PCI devices to Intel IOMMU Starting with commit fa212a97f3a3 ("iommu/vt-d: Probe DMA-capable ACPI name space devices"), we now probe DMA-capable ACPI name space devices. On Dell XPS 13 9343, which has an Intel LPSS platform device INTL9C60 enumerated via ACPI, this change leads to the following warning: ------------[ cut here ]------------ WARNING: CPU: 1 PID: 1 at pci_device_group+0x11a/0x130 CPU: 1 PID: 1 Comm: swapper/0 Tainted: G T 5.5.0-rc3+ #22 Hardware name: Dell Inc. XPS 13 9343/0310JH, BIOS A20 06/06/2019 RIP: 0010:pci_device_group+0x11a/0x130 Code: f0 ff ff 48 85 c0 49 89 c4 75 c4 48 8d 74 24 10 48 89 ef e8 48 ef ff ff 48 85 c0 49 89 c4 75 af e8 db f7 ff ff 49 89 c4 eb a5 <0f> 0b 49 c7 c4 ea ff ff ff eb 9a e8 96 1e c7 ff 66 0f 1f 44 00 00 RSP: 0000:ffffc0d6c0043cb0 EFLAGS: 00010202 RAX: 0000000000000000 RBX: ffffa3d1d43dd810 RCX: 0000000000000000 RDX: ffffa3d1d4fecf80 RSI: ffffa3d12943dcc0 RDI: ffffa3d1d43dd810 RBP: ffffa3d1d43dd810 R08: 0000000000000000 R09: ffffa3d1d4c04a80 R10: ffffa3d1d4c00880 R11: ffffa3d1d44ba000 R12: 0000000000000000 R13: ffffa3d1d4383b80 R14: ffffa3d1d4c090d0 R15: ffffa3d1d4324530 FS: 0000000000000000(0000) GS:ffffa3d1d6700000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000000 CR3: 000000000460a001 CR4: 00000000003606e0 Call Trace: ? iommu_group_get_for_dev+0x81/0x1f0 ? intel_iommu_add_device+0x61/0x170 ? iommu_probe_device+0x43/0xd0 ? intel_iommu_init+0x1fa2/0x2235 ? pci_iommu_init+0x52/0xe7 ? e820__memblock_setup+0x15c/0x15c ? do_one_initcall+0xcc/0x27e ? kernel_init_freeable+0x169/0x259 ? rest_init+0x95/0x95 ? kernel_init+0x5/0xeb ? ret_from_fork+0x35/0x40 ---[ end trace 28473e7abc25b92c ]--- DMAR: ACPI name space devices didn't probe correctly The bug results from the fact that while we now enumerate ACPI devices, we aren't able to handle any non-PCI device when generating the device group. Fix the issue by implementing an Intel-specific callback that returns `pci_device_group` only if the device is a PCI device. Otherwise, it will return a generic device group. Fixes: fa212a97f3a3 ("iommu/vt-d: Probe DMA-capable ACPI name space devices") Signed-off-by: Patrick Steinhardt <ps@pks.im> Cc: stable@vger.kernel.org # v5.3+ Acked-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2019-12-27 00:56:18 +01:00
.device_group = intel_iommu_device_group,
.is_attach_deferred = intel_iommu_is_attach_deferred,
.def_domain_type = device_def_domain_type,
.page_response = intel_iommu_page_response,
};
static void quirk_iommu_igfx(struct pci_dev *dev)
{
if (risky_device(dev))
return;
pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
iommu/vt-d: Decouple igfx_off from graphic identity mapping A kernel command called igfx_off was introduced in commit <ba39592764ed> ("Intel IOMMU: Intel IOMMU driver"). This command allows the user to disable the IOMMU dedicated to SOC-integrated graphic devices. Commit <9452618e7462> ("iommu/intel: disable DMAR for g4x integrated gfx") used this mechanism to disable the graphic-dedicated IOMMU for some problematic devices. Later, more problematic graphic devices were added to the list by commit <1f76249cc3beb> ("iommu/vt-d: Declare Broadwell igfx dmar support snafu"). On the other hand, commit <19943b0e30b05> ("intel-iommu: Unify hardware and software passthrough support") uses the identity domain for graphic devices if CONFIG_DMAR_BROKEN_GFX_WA is selected. + if (iommu_pass_through) + iommu_identity_mapping = 1; +#ifdef CONFIG_DMAR_BROKEN_GFX_WA + else + iommu_identity_mapping = 2; +#endif ... static int iommu_should_identity_map(struct pci_dev *pdev, int startup) { + if (iommu_identity_mapping == 2) + return IS_GFX_DEVICE(pdev); ... In the following driver evolution, CONFIG_DMAR_BROKEN_GFX_WA and quirk_iommu_igfx() are mixed together, causing confusion in the driver's device_def_domain_type callback. On one hand, dmar_map_gfx is used to turn off the graphic-dedicated IOMMU as a workaround for some buggy hardware; on the other hand, for those graphic devices, IDENTITY mapping is required for the IOMMU core. Commit <4b8d18c0c986> "iommu/vt-d: Remove INTEL_IOMMU_BROKEN_GFX_WA" has removed the CONFIG_DMAR_BROKEN_GFX_WA option, so the IDENTITY_DOMAIN requirement for graphic devices is no longer needed. Therefore, this requirement can be removed from device_def_domain_type() and igfx_off can be made independent. Fixes: 4b8d18c0c986 ("iommu/vt-d: Remove INTEL_IOMMU_BROKEN_GFX_WA") Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/20240428032020.214616-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2024-05-03 21:36:02 +08:00
disable_igfx_iommu = 1;
}
/* G4x/GM45 integrated gfx dmar support is totally busted. */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx);
iommu/vt-d: Apply quirk_iommu_igfx for 8086:0044 (QM57/QS57) On the Lenovo ThinkPad X201, when Intel VT-d is enabled in the BIOS, the kernel boots with errors related to DMAR, the graphical interface appeared quite choppy, and the system resets erratically within a minute after it booted: DMAR: DRHD: handling fault status reg 3 DMAR: [DMA Write NO_PASID] Request device [00:02.0] fault addr 0xb97ff000 [fault reason 0x05] PTE Write access is not set Upon comparing boot logs with VT-d on/off, I found that the Intel Calpella quirk (`quirk_calpella_no_shadow_gtt()') correctly applied the igfx IOMMU disable/quirk correctly: pci 0000:00:00.0: DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics Whereas with VT-d on, it went into the "else" branch, which then triggered the DMAR handling fault above: ... else if (!disable_igfx_iommu) { /* we have to ensure the gfx device is idle before we flush */ pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n"); iommu_set_dma_strict(); } Now, this is not exactly scientific, but moving 0x0044 to quirk_iommu_igfx seems to have fixed the aforementioned issue. Running a few `git blame' runs on the function, I have found that the quirk was originally introduced as a fix specific to ThinkPad X201: commit 9eecabcb9a92 ("intel-iommu: Abort IOMMU setup for igfx if BIOS gave no shadow GTT space") Which was later revised twice to the "else" branch we saw above: - 2011: commit 6fbcfb3e467a ("intel-iommu: Workaround IOTLB hang on Ironlake GPU") - 2024: commit ba00196ca41c ("iommu/vt-d: Decouple igfx_off from graphic identity mapping") I'm uncertain whether further testings on this particular laptops were done in 2011 and (honestly I'm not sure) 2024, but I would be happy to do some distro-specific testing if that's what would be required to verify this patch. P.S., I also see IDs 0x0040, 0x0062, and 0x006a listed under the same `quirk_calpella_no_shadow_gtt()' quirk, but I'm not sure how similar these chipsets are (if they share the same issue with VT-d or even, indeed, if this issue is specific to a bug in the Lenovo BIOS). With regards to 0x0062, it seems to be a Centrino wireless card, but not a chipset? I have also listed a couple (distro and kernel) bug reports below as references (some of them are from 7-8 years ago!), as they seem to be similar issue found on different Westmere/Ironlake, Haswell, and Broadwell hardware setups. Cc: stable@vger.kernel.org Fixes: 6fbcfb3e467a ("intel-iommu: Workaround IOTLB hang on Ironlake GPU") Fixes: ba00196ca41c ("iommu/vt-d: Decouple igfx_off from graphic identity mapping") Link: https://groups.google.com/g/qubes-users/c/4NP4goUds2c?pli=1 Link: https://bugs.archlinux.org/task/65362 Link: https://bbs.archlinux.org/viewtopic.php?id=230323 Reported-by: Wenhao Sun <weiguangtwk@outlook.com> Closes: https://bugzilla.kernel.org/show_bug.cgi?id=197029 Signed-off-by: Mingcong Bai <jeffbai@aosc.io> Link: https://lore.kernel.org/r/20250415133330.12528-1-jeffbai@aosc.io Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2025-04-18 11:16:42 +08:00
/* QM57/QS57 integrated gfx malfunctions with dmar */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_iommu_igfx);
/* Broadwell igfx malfunctions with dmar */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160E, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1602, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160A, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160D, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1616, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161B, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161E, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1612, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161A, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x161D, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1626, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162B, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162E, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1622, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162A, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x162D, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1636, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163B, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163E, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1632, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163A, quirk_iommu_igfx);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
static void quirk_iommu_rwbf(struct pci_dev *dev)
{
if (risky_device(dev))
return;
/*
* Mobile 4 Series Chipset neglects to set RWBF capability,
* but needs it. Same seems to hold for the desktop versions.
*/
pci_info(dev, "Forcing write-buffer flush capability\n");
rwbf_quirk = 1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
#define GGC 0x52
#define GGC_MEMORY_SIZE_MASK (0xf << 8)
#define GGC_MEMORY_SIZE_NONE (0x0 << 8)
#define GGC_MEMORY_SIZE_1M (0x1 << 8)
#define GGC_MEMORY_SIZE_2M (0x3 << 8)
#define GGC_MEMORY_VT_ENABLED (0x8 << 8)
#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
#define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
#define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
{
unsigned short ggc;
if (risky_device(dev))
return;
if (pci_read_config_word(dev, GGC, &ggc))
return;
if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
iommu/vt-d: Decouple igfx_off from graphic identity mapping A kernel command called igfx_off was introduced in commit <ba39592764ed> ("Intel IOMMU: Intel IOMMU driver"). This command allows the user to disable the IOMMU dedicated to SOC-integrated graphic devices. Commit <9452618e7462> ("iommu/intel: disable DMAR for g4x integrated gfx") used this mechanism to disable the graphic-dedicated IOMMU for some problematic devices. Later, more problematic graphic devices were added to the list by commit <1f76249cc3beb> ("iommu/vt-d: Declare Broadwell igfx dmar support snafu"). On the other hand, commit <19943b0e30b05> ("intel-iommu: Unify hardware and software passthrough support") uses the identity domain for graphic devices if CONFIG_DMAR_BROKEN_GFX_WA is selected. + if (iommu_pass_through) + iommu_identity_mapping = 1; +#ifdef CONFIG_DMAR_BROKEN_GFX_WA + else + iommu_identity_mapping = 2; +#endif ... static int iommu_should_identity_map(struct pci_dev *pdev, int startup) { + if (iommu_identity_mapping == 2) + return IS_GFX_DEVICE(pdev); ... In the following driver evolution, CONFIG_DMAR_BROKEN_GFX_WA and quirk_iommu_igfx() are mixed together, causing confusion in the driver's device_def_domain_type callback. On one hand, dmar_map_gfx is used to turn off the graphic-dedicated IOMMU as a workaround for some buggy hardware; on the other hand, for those graphic devices, IDENTITY mapping is required for the IOMMU core. Commit <4b8d18c0c986> "iommu/vt-d: Remove INTEL_IOMMU_BROKEN_GFX_WA" has removed the CONFIG_DMAR_BROKEN_GFX_WA option, so the IDENTITY_DOMAIN requirement for graphic devices is no longer needed. Therefore, this requirement can be removed from device_def_domain_type() and igfx_off can be made independent. Fixes: 4b8d18c0c986 ("iommu/vt-d: Remove INTEL_IOMMU_BROKEN_GFX_WA") Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/20240428032020.214616-1-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
2024-05-03 21:36:02 +08:00
disable_igfx_iommu = 1;
} else if (!disable_igfx_iommu) {
/* we have to ensure the gfx device is idle before we flush */
pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
iommu_set_dma_strict();
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
{
unsigned short ver;
if (!IS_GFX_DEVICE(dev))
return;
ver = (dev->device >> 8) & 0xff;
if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
ver != 0x4e && ver != 0x8a && ver != 0x98 &&
ver != 0x9a && ver != 0xa7 && ver != 0x7d)
return;
if (risky_device(dev))
return;
pci_info(dev, "Skip IOMMU disabling for graphics\n");
iommu_skip_te_disable = 1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_igfx_skip_te_disable);
/* On Tylersburg chipsets, some BIOSes have been known to enable the
ISOCH DMAR unit for the Azalia sound device, but not give it any
TLB entries, which causes it to deadlock. Check for that. We do
this in a function called from init_dmars(), instead of in a PCI
quirk, because we don't want to print the obnoxious "BIOS broken"
message if VT-d is actually disabled.
*/
static void __init check_tylersburg_isoch(void)
{
struct pci_dev *pdev;
uint32_t vtisochctrl;
/* If there's no Azalia in the system anyway, forget it. */
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
if (!pdev)
return;
if (risky_device(pdev)) {
pci_dev_put(pdev);
return;
}
pci_dev_put(pdev);
/* System Management Registers. Might be hidden, in which case
we can't do the sanity check. But that's OK, because the
known-broken BIOSes _don't_ actually hide it, so far. */
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
if (!pdev)
return;
if (risky_device(pdev)) {
pci_dev_put(pdev);
return;
}
if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
pci_dev_put(pdev);
return;
}
pci_dev_put(pdev);
/* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
if (vtisochctrl & 1)
return;
/* Drop all bits other than the number of TLB entries */
vtisochctrl &= 0x1c;
/* If we have the recommended number of TLB entries (16), fine. */
if (vtisochctrl == 0x10)
return;
/* Zero TLB entries? You get to ride the short bus to school. */
if (!vtisochctrl) {
WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
iommu_identity_mapping |= IDENTMAP_AZALIA;
return;
}
pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
vtisochctrl);
}
iommu/vt-d: Add a fix for devices need extra dtlb flush QAT devices on Intel Sapphire Rapids and Emerald Rapids have a defect in address translation service (ATS). These devices may inadvertently issue ATS invalidation completion before posted writes initiated with translated address that utilized translations matching the invalidation address range, violating the invalidation completion ordering. This patch adds an extra device TLB invalidation for the affected devices, it is needed to ensure no more posted writes with translated address following the invalidation completion. Therefore, the ordering is preserved and data-corruption is prevented. Device TLBs are invalidated under the following six conditions: 1. Device driver does DMA API unmap IOVA 2. Device driver unbind a PASID from a process, sva_unbind_device() 3. PASID is torn down, after PASID cache is flushed. e.g. process exit_mmap() due to crash 4. Under SVA usage, called by mmu_notifier.invalidate_range() where VM has to free pages that were unmapped 5. userspace driver unmaps a DMA buffer 6. Cache invalidation in vSVA usage (upcoming) For #1 and #2, device drivers are responsible for stopping DMA traffic before unmap/unbind. For #3, iommu driver gets mmu_notifier to invalidate TLB the same way as normal user unmap which will do an extra invalidation. The dTLB invalidation after PASID cache flush does not need an extra invalidation. Therefore, we only need to deal with #4 and #5 in this patch. #1 is also covered by this patch due to common code path with #5. Tested-by: Yuzhang Luo <yuzhang.luo@intel.com> Reviewed-by: Ashok Raj <ashok.raj@intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com> Link: https://lore.kernel.org/r/20221130062449.1360063-1-jacob.jun.pan@linux.intel.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-12-01 12:01:24 +08:00
/*
* Here we deal with a device TLB defect where device may inadvertently issue ATS
* invalidation completion before posted writes initiated with translated address
* that utilized translations matching the invalidation address range, violating
* the invalidation completion ordering.
* Therefore, any use cases that cannot guarantee DMA is stopped before unmap is
* vulnerable to this defect. In other words, any dTLB invalidation initiated not
* under the control of the trusted/privileged host device driver must use this
* quirk.
* Device TLBs are invalidated under the following six conditions:
* 1. Device driver does DMA API unmap IOVA
* 2. Device driver unbind a PASID from a process, sva_unbind_device()
* 3. PASID is torn down, after PASID cache is flushed. e.g. process
* exit_mmap() due to crash
* 4. Under SVA usage, called by mmu_notifier.invalidate_range() where
* VM has to free pages that were unmapped
* 5. Userspace driver unmaps a DMA buffer
* 6. Cache invalidation in vSVA usage (upcoming)
*
* For #1 and #2, device drivers are responsible for stopping DMA traffic
* before unmap/unbind. For #3, iommu driver gets mmu_notifier to
* invalidate TLB the same way as normal user unmap which will use this quirk.
* The dTLB invalidation after PASID cache flush does not need this quirk.
*
* As a reminder, #6 will *NEED* this quirk as we enable nested translation.
*/
void quirk_extra_dev_tlb_flush(struct device_domain_info *info,
unsigned long address, unsigned long mask,
u32 pasid, u16 qdep)
{
u16 sid;
if (likely(!info->dtlb_extra_inval))
return;
sid = PCI_DEVID(info->bus, info->devfn);
if (pasid == IOMMU_NO_PASID) {
iommu/vt-d: Add a fix for devices need extra dtlb flush QAT devices on Intel Sapphire Rapids and Emerald Rapids have a defect in address translation service (ATS). These devices may inadvertently issue ATS invalidation completion before posted writes initiated with translated address that utilized translations matching the invalidation address range, violating the invalidation completion ordering. This patch adds an extra device TLB invalidation for the affected devices, it is needed to ensure no more posted writes with translated address following the invalidation completion. Therefore, the ordering is preserved and data-corruption is prevented. Device TLBs are invalidated under the following six conditions: 1. Device driver does DMA API unmap IOVA 2. Device driver unbind a PASID from a process, sva_unbind_device() 3. PASID is torn down, after PASID cache is flushed. e.g. process exit_mmap() due to crash 4. Under SVA usage, called by mmu_notifier.invalidate_range() where VM has to free pages that were unmapped 5. userspace driver unmaps a DMA buffer 6. Cache invalidation in vSVA usage (upcoming) For #1 and #2, device drivers are responsible for stopping DMA traffic before unmap/unbind. For #3, iommu driver gets mmu_notifier to invalidate TLB the same way as normal user unmap which will do an extra invalidation. The dTLB invalidation after PASID cache flush does not need an extra invalidation. Therefore, we only need to deal with #4 and #5 in this patch. #1 is also covered by this patch due to common code path with #5. Tested-by: Yuzhang Luo <yuzhang.luo@intel.com> Reviewed-by: Ashok Raj <ashok.raj@intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com> Link: https://lore.kernel.org/r/20221130062449.1360063-1-jacob.jun.pan@linux.intel.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
2022-12-01 12:01:24 +08:00
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
qdep, address, mask);
} else {
qi_flush_dev_iotlb_pasid(info->iommu, sid, info->pfsid,
pasid, qdep, address, mask);
}
}
#define ecmd_get_status_code(res) (((res) & 0xff) >> 1)
/*
* Function to submit a command to the enhanced command interface. The
* valid enhanced command descriptions are defined in Table 47 of the
* VT-d spec. The VT-d hardware implementation may support some but not
* all commands, which can be determined by checking the Enhanced
* Command Capability Register.
*
* Return values:
* - 0: Command successful without any error;
* - Negative: software error value;
* - Nonzero positive: failure status code defined in Table 48.
*/
int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob)
{
unsigned long flags;
u64 res;
int ret;
if (!cap_ecmds(iommu->cap))
return -ENODEV;
raw_spin_lock_irqsave(&iommu->register_lock, flags);
res = dmar_readq(iommu->reg + DMAR_ECRSP_REG);
if (res & DMA_ECMD_ECRSP_IP) {
ret = -EBUSY;
goto err;
}
/*
* Unconditionally write the operand B, because
* - There is no side effect if an ecmd doesn't require an
* operand B, but we set the register to some value.
* - It's not invoked in any critical path. The extra MMIO
* write doesn't bring any performance concerns.
*/
dmar_writeq(iommu->reg + DMAR_ECEO_REG, ob);
dmar_writeq(iommu->reg + DMAR_ECMD_REG, ecmd | (oa << DMA_ECMD_OA_SHIFT));
IOMMU_WAIT_OP(iommu, DMAR_ECRSP_REG, dmar_readq,
!(res & DMA_ECMD_ECRSP_IP), res);
if (res & DMA_ECMD_ECRSP_IP) {
ret = -ETIMEDOUT;
goto err;
}
ret = ecmd_get_status_code(res);
err:
raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
return ret;
}