2019-05-27 08:55:05 +02:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-16 15:20:36 -07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
|
|
|
|
*
|
|
|
|
* Rewrite, cleanup, new allocation schemes, virtual merging:
|
|
|
|
* Copyright (C) 2004 Olof Johansson, IBM Corporation
|
|
|
|
* and Ben. Herrenschmidt, IBM Corporation
|
|
|
|
*
|
|
|
|
* Dynamic DMA mapping support, bus-independent parts.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/string.h>
|
2025-02-10 23:42:44 +01:00
|
|
|
#include <linux/string_choices.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
#include <linux/dma-mapping.h>
|
2009-12-15 16:48:28 -08:00
|
|
|
#include <linux/bitmap.h>
|
2008-02-04 22:28:08 -08:00
|
|
|
#include <linux/iommu-helper.h>
|
2008-10-22 15:39:04 -05:00
|
|
|
#include <linux/crash_dump.h>
|
2012-06-07 18:14:48 +00:00
|
|
|
#include <linux/hash.h>
|
2012-06-24 18:26:17 +00:00
|
|
|
#include <linux/fault-inject.h>
|
|
|
|
#include <linux/pci.h>
|
2013-05-21 13:33:09 +10:00
|
|
|
#include <linux/iommu.h>
|
|
|
|
#include <linux/sched.h>
|
2021-01-13 21:20:14 +11:00
|
|
|
#include <linux/debugfs.h>
|
2024-03-21 09:36:23 -07:00
|
|
|
#include <linux/vmalloc.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/iommu.h>
|
|
|
|
#include <asm/pci-bridge.h>
|
|
|
|
#include <asm/machdep.h>
|
2006-06-22 23:35:10 -07:00
|
|
|
#include <asm/kdump.h>
|
2012-02-20 02:15:03 +00:00
|
|
|
#include <asm/fadump.h>
|
2012-06-24 18:26:17 +00:00
|
|
|
#include <asm/vio.h>
|
2013-05-21 13:33:09 +10:00
|
|
|
#include <asm/tce.h>
|
2018-12-19 19:52:15 +11:00
|
|
|
#include <asm/mmu_context.h>
|
2023-03-06 11:31:00 -06:00
|
|
|
#include <asm/ppc-pci.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
#define DBG(...)
|
|
|
|
|
2021-01-13 21:20:14 +11:00
|
|
|
#ifdef CONFIG_IOMMU_DEBUGFS
|
|
|
|
static int iommu_debugfs_weight_get(void *data, u64 *val)
|
|
|
|
{
|
|
|
|
struct iommu_table *tbl = data;
|
|
|
|
*val = bitmap_weight(tbl->it_map, tbl->it_size);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
DEFINE_DEBUGFS_ATTRIBUTE(iommu_debugfs_fops_weight, iommu_debugfs_weight_get, NULL, "%llu\n");
|
|
|
|
|
|
|
|
static void iommu_debugfs_add(struct iommu_table *tbl)
|
|
|
|
{
|
|
|
|
char name[10];
|
|
|
|
struct dentry *liobn_entry;
|
|
|
|
|
|
|
|
sprintf(name, "%08lx", tbl->it_index);
|
|
|
|
liobn_entry = debugfs_create_dir(name, iommu_debugfs_dir);
|
|
|
|
|
|
|
|
debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight);
|
|
|
|
debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size);
|
|
|
|
debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift);
|
|
|
|
debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start);
|
|
|
|
debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end);
|
|
|
|
debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels);
|
|
|
|
debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iommu_debugfs_del(struct iommu_table *tbl)
|
|
|
|
{
|
|
|
|
char name[10];
|
|
|
|
|
|
|
|
sprintf(name, "%08lx", tbl->it_index);
|
2023-02-02 15:19:19 +01:00
|
|
|
debugfs_lookup_and_remove(name, iommu_debugfs_dir);
|
2021-01-13 21:20:14 +11:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
static void iommu_debugfs_add(struct iommu_table *tbl){}
|
|
|
|
static void iommu_debugfs_del(struct iommu_table *tbl){}
|
|
|
|
#endif
|
|
|
|
|
2010-03-02 14:25:38 +00:00
|
|
|
static int novmerge;
|
2007-03-29 08:44:02 -05:00
|
|
|
|
2008-07-24 04:31:16 +10:00
|
|
|
static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
static int __init setup_iommu(char *str)
|
|
|
|
{
|
|
|
|
if (!strcmp(str, "novmerge"))
|
|
|
|
novmerge = 1;
|
|
|
|
else if (!strcmp(str, "vmerge"))
|
|
|
|
novmerge = 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
__setup("iommu=", setup_iommu);
|
|
|
|
|
2012-06-07 18:14:48 +00:00
|
|
|
static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We precalculate the hash to avoid doing it on every allocation.
|
|
|
|
*
|
|
|
|
* The hash is important to spread CPUs across all the pools. For example,
|
|
|
|
* on a POWER7 with 4 way SMT we want interrupts on the primary threads and
|
|
|
|
* with 4 pools all primary threads would map to the same pool.
|
|
|
|
*/
|
|
|
|
static int __init setup_iommu_pool_hash(void)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for_each_possible_cpu(i)
|
|
|
|
per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
subsys_initcall(setup_iommu_pool_hash);
|
|
|
|
|
2012-06-24 18:26:17 +00:00
|
|
|
#ifdef CONFIG_FAIL_IOMMU
|
|
|
|
|
|
|
|
static DECLARE_FAULT_ATTR(fail_iommu);
|
|
|
|
|
|
|
|
static int __init setup_fail_iommu(char *str)
|
|
|
|
{
|
|
|
|
return setup_fault_attr(&fail_iommu, str);
|
|
|
|
}
|
|
|
|
__setup("fail_iommu=", setup_fail_iommu);
|
|
|
|
|
|
|
|
static bool should_fail_iommu(struct device *dev)
|
|
|
|
{
|
|
|
|
return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init fail_iommu_debugfs(void)
|
|
|
|
{
|
|
|
|
struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
|
|
|
|
NULL, &fail_iommu);
|
|
|
|
|
2013-07-15 11:20:32 +09:30
|
|
|
return PTR_ERR_OR_ZERO(dir);
|
2012-06-24 18:26:17 +00:00
|
|
|
}
|
|
|
|
late_initcall(fail_iommu_debugfs);
|
|
|
|
|
|
|
|
static ssize_t fail_iommu_show(struct device *dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
|
|
|
{
|
|
|
|
return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t fail_iommu_store(struct device *dev,
|
|
|
|
struct device_attribute *attr, const char *buf,
|
|
|
|
size_t count)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (count > 0 && sscanf(buf, "%d", &i) > 0)
|
|
|
|
dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2016-10-29 21:37:02 +02:00
|
|
|
static DEVICE_ATTR_RW(fail_iommu);
|
2012-06-24 18:26:17 +00:00
|
|
|
|
|
|
|
static int fail_iommu_bus_notify(struct notifier_block *nb,
|
|
|
|
unsigned long action, void *data)
|
|
|
|
{
|
|
|
|
struct device *dev = data;
|
|
|
|
|
|
|
|
if (action == BUS_NOTIFY_ADD_DEVICE) {
|
|
|
|
if (device_create_file(dev, &dev_attr_fail_iommu))
|
|
|
|
pr_warn("Unable to create IOMMU fault injection sysfs "
|
|
|
|
"entries\n");
|
|
|
|
} else if (action == BUS_NOTIFY_DEL_DEVICE) {
|
|
|
|
device_remove_file(dev, &dev_attr_fail_iommu);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-03-22 14:53:22 +11:00
|
|
|
/*
|
|
|
|
* PCI and VIO buses need separate notifier_block structs, since they're linked
|
|
|
|
* list nodes. Sharing a notifier_block would mean that any notifiers later
|
|
|
|
* registered for PCI buses would also get called by VIO buses and vice versa.
|
|
|
|
*/
|
|
|
|
static struct notifier_block fail_iommu_pci_bus_notifier = {
|
2012-06-24 18:26:17 +00:00
|
|
|
.notifier_call = fail_iommu_bus_notify
|
|
|
|
};
|
|
|
|
|
2023-03-22 14:53:22 +11:00
|
|
|
#ifdef CONFIG_IBMVIO
|
|
|
|
static struct notifier_block fail_iommu_vio_bus_notifier = {
|
|
|
|
.notifier_call = fail_iommu_bus_notify
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2012-06-24 18:26:17 +00:00
|
|
|
static int __init fail_iommu_setup(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_PCI
|
2023-03-22 14:53:22 +11:00
|
|
|
bus_register_notifier(&pci_bus_type, &fail_iommu_pci_bus_notifier);
|
2012-06-24 18:26:17 +00:00
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_IBMVIO
|
2023-03-22 14:53:22 +11:00
|
|
|
bus_register_notifier(&vio_bus_type, &fail_iommu_vio_bus_notifier);
|
2012-06-24 18:26:17 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Must execute after PCI and VIO subsystem have initialised but before
|
|
|
|
* devices are probed.
|
|
|
|
*/
|
|
|
|
arch_initcall(fail_iommu_setup);
|
|
|
|
#else
|
|
|
|
static inline bool should_fail_iommu(struct device *dev)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-02-04 22:28:08 -08:00
|
|
|
static unsigned long iommu_range_alloc(struct device *dev,
|
|
|
|
struct iommu_table *tbl,
|
2005-04-16 15:20:36 -07:00
|
|
|
unsigned long npages,
|
|
|
|
unsigned long *handle,
|
2006-04-12 21:05:59 -05:00
|
|
|
unsigned long mask,
|
2005-04-16 15:20:36 -07:00
|
|
|
unsigned int align_order)
|
|
|
|
{
|
2008-02-04 22:28:08 -08:00
|
|
|
unsigned long n, end, start;
|
2005-04-16 15:20:36 -07:00
|
|
|
unsigned long limit;
|
|
|
|
int largealloc = npages > 15;
|
|
|
|
int pass = 0;
|
|
|
|
unsigned long align_mask;
|
2012-06-03 19:44:25 +00:00
|
|
|
unsigned long flags;
|
2012-06-07 18:14:48 +00:00
|
|
|
unsigned int pool_nr;
|
|
|
|
struct iommu_pool *pool;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2017-08-08 17:06:32 +10:00
|
|
|
align_mask = (1ull << align_order) - 1;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
/* This allocator was derived from x86_64's bit string search */
|
|
|
|
|
|
|
|
/* Sanity check */
|
2006-10-04 17:25:44 +02:00
|
|
|
if (unlikely(npages == 0)) {
|
2005-04-16 15:20:36 -07:00
|
|
|
if (printk_ratelimit())
|
|
|
|
WARN_ON(1);
|
2018-11-21 18:56:25 +01:00
|
|
|
return DMA_MAPPING_ERROR;
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
2012-06-24 18:26:17 +00:00
|
|
|
if (should_fail_iommu(dev))
|
2018-11-21 18:56:25 +01:00
|
|
|
return DMA_MAPPING_ERROR;
|
2012-06-24 18:26:17 +00:00
|
|
|
|
2012-06-07 18:14:48 +00:00
|
|
|
/*
|
|
|
|
* We don't need to disable preemption here because any CPU can
|
|
|
|
* safely use any IOMMU pool.
|
|
|
|
*/
|
2017-07-20 14:26:06 -03:00
|
|
|
pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
|
2012-06-03 19:44:25 +00:00
|
|
|
|
2012-06-07 18:14:48 +00:00
|
|
|
if (largealloc)
|
|
|
|
pool = &(tbl->large_pool);
|
2005-04-16 15:20:36 -07:00
|
|
|
else
|
2012-06-07 18:14:48 +00:00
|
|
|
pool = &(tbl->pools[pool_nr]);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2012-06-07 18:14:48 +00:00
|
|
|
spin_lock_irqsave(&(pool->lock), flags);
|
|
|
|
|
|
|
|
again:
|
2012-10-03 18:57:10 +00:00
|
|
|
if ((pass == 0) && handle && *handle &&
|
|
|
|
(*handle >= pool->start) && (*handle < pool->end))
|
2012-06-07 18:14:48 +00:00
|
|
|
start = *handle;
|
|
|
|
else
|
|
|
|
start = pool->hint;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2012-06-07 18:14:48 +00:00
|
|
|
limit = pool->end;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
/* The case below can happen if we have a small segment appended
|
|
|
|
* to a large, or when the previous alloc was at the very end of
|
|
|
|
* the available space. If so, go back to the initial start.
|
|
|
|
*/
|
|
|
|
if (start >= limit)
|
2012-06-07 18:14:48 +00:00
|
|
|
start = pool->start;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2006-04-12 21:05:59 -05:00
|
|
|
if (limit + tbl->it_offset > mask) {
|
|
|
|
limit = mask - tbl->it_offset + 1;
|
|
|
|
/* If we're constrained on address range, first try
|
|
|
|
* at the masked hint to avoid O(n) search complexity,
|
2012-06-07 18:14:48 +00:00
|
|
|
* but on second pass, start at 0 in pool 0.
|
2006-04-12 21:05:59 -05:00
|
|
|
*/
|
2012-06-07 18:14:48 +00:00
|
|
|
if ((start & mask) >= limit || pass > 0) {
|
2012-10-03 18:57:10 +00:00
|
|
|
spin_unlock(&(pool->lock));
|
2012-06-07 18:14:48 +00:00
|
|
|
pool = &(tbl->pools[0]);
|
2012-10-03 18:57:10 +00:00
|
|
|
spin_lock(&(pool->lock));
|
2012-06-07 18:14:48 +00:00
|
|
|
start = pool->start;
|
|
|
|
} else {
|
2006-04-12 21:05:59 -05:00
|
|
|
start &= mask;
|
2012-06-07 18:14:48 +00:00
|
|
|
}
|
2006-04-12 21:05:59 -05:00
|
|
|
}
|
|
|
|
|
2013-12-09 18:17:03 +11:00
|
|
|
n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
|
2020-09-01 15:16:45 -07:00
|
|
|
dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift),
|
|
|
|
align_mask);
|
2008-02-04 22:28:08 -08:00
|
|
|
if (n == -1) {
|
2012-06-07 18:14:48 +00:00
|
|
|
if (likely(pass == 0)) {
|
|
|
|
/* First try the pool from the start */
|
|
|
|
pool->hint = pool->start;
|
2005-04-16 15:20:36 -07:00
|
|
|
pass++;
|
|
|
|
goto again;
|
2012-06-07 18:14:48 +00:00
|
|
|
|
|
|
|
} else if (pass <= tbl->nr_pools) {
|
|
|
|
/* Now try scanning all the other pools */
|
|
|
|
spin_unlock(&(pool->lock));
|
|
|
|
pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
|
|
|
|
pool = &tbl->pools[pool_nr];
|
|
|
|
spin_lock(&(pool->lock));
|
|
|
|
pool->hint = pool->start;
|
|
|
|
pass++;
|
|
|
|
goto again;
|
|
|
|
|
2021-03-18 14:44:17 -03:00
|
|
|
} else if (pass == tbl->nr_pools + 1) {
|
|
|
|
/* Last resort: try largepool */
|
|
|
|
spin_unlock(&pool->lock);
|
|
|
|
pool = &tbl->large_pool;
|
|
|
|
spin_lock(&pool->lock);
|
|
|
|
pool->hint = pool->start;
|
|
|
|
pass++;
|
|
|
|
goto again;
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
} else {
|
2012-06-07 18:14:48 +00:00
|
|
|
/* Give up */
|
|
|
|
spin_unlock_irqrestore(&(pool->lock), flags);
|
2018-11-21 18:56:25 +01:00
|
|
|
return DMA_MAPPING_ERROR;
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-04 22:28:08 -08:00
|
|
|
end = n + npages;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
/* Bump the hint to a new block for small allocs. */
|
|
|
|
if (largealloc) {
|
|
|
|
/* Don't bump to new block to avoid fragmentation */
|
2012-06-07 18:14:48 +00:00
|
|
|
pool->hint = end;
|
2005-04-16 15:20:36 -07:00
|
|
|
} else {
|
|
|
|
/* Overflow will be taken care of at the next allocation */
|
2012-06-07 18:14:48 +00:00
|
|
|
pool->hint = (end + tbl->it_blocksize - 1) &
|
2005-04-16 15:20:36 -07:00
|
|
|
~(tbl->it_blocksize - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update handle for SG allocations */
|
|
|
|
if (handle)
|
|
|
|
*handle = end;
|
|
|
|
|
2012-06-07 18:14:48 +00:00
|
|
|
spin_unlock_irqrestore(&(pool->lock), flags);
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2008-02-04 22:28:08 -08:00
|
|
|
static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
|
|
|
|
void *page, unsigned int npages,
|
|
|
|
enum dma_data_direction direction,
|
2008-07-16 05:51:47 +10:00
|
|
|
unsigned long mask, unsigned int align_order,
|
2016-08-03 13:46:00 -07:00
|
|
|
unsigned long attrs)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
2012-06-03 19:44:25 +00:00
|
|
|
unsigned long entry;
|
2018-11-21 18:56:25 +01:00
|
|
|
dma_addr_t ret = DMA_MAPPING_ERROR;
|
2008-07-24 04:31:16 +10:00
|
|
|
int build_fail;
|
2006-04-12 21:05:59 -05:00
|
|
|
|
2008-02-04 22:28:08 -08:00
|
|
|
entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2018-11-21 18:56:25 +01:00
|
|
|
if (unlikely(entry == DMA_MAPPING_ERROR))
|
|
|
|
return DMA_MAPPING_ERROR;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
entry += tbl->it_offset; /* Offset into real TCE table */
|
2013-12-09 18:17:03 +11:00
|
|
|
ret = entry << tbl->it_page_shift; /* Set the return dma address */
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
/* Put the TCEs in the HW table */
|
2015-06-05 16:35:06 +10:00
|
|
|
build_fail = tbl->it_ops->set(tbl, entry, npages,
|
2013-12-09 18:17:03 +11:00
|
|
|
(unsigned long)page &
|
|
|
|
IOMMU_PAGE_MASK(tbl), direction, attrs);
|
2008-07-24 04:31:16 +10:00
|
|
|
|
2015-06-05 16:35:06 +10:00
|
|
|
/* tbl->it_ops->set() only returns non-zero for transient errors.
|
2008-07-24 04:31:16 +10:00
|
|
|
* Clean up the table bitmap in this case and return
|
2018-11-21 18:56:25 +01:00
|
|
|
* DMA_MAPPING_ERROR. For all other errors the functionality is
|
2008-07-24 04:31:16 +10:00
|
|
|
* not altered.
|
|
|
|
*/
|
|
|
|
if (unlikely(build_fail)) {
|
|
|
|
__iommu_free(tbl, ret, npages);
|
2018-11-21 18:56:25 +01:00
|
|
|
return DMA_MAPPING_ERROR;
|
2008-07-24 04:31:16 +10:00
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
/* Flush/invalidate TLB caches if necessary */
|
2015-06-05 16:35:06 +10:00
|
|
|
if (tbl->it_ops->flush)
|
|
|
|
tbl->it_ops->flush(tbl);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
/* Make sure updates are seen by hardware */
|
|
|
|
mb();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-06-03 19:43:44 +00:00
|
|
|
static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
|
|
|
|
unsigned int npages)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
|
|
|
unsigned long entry, free_entry;
|
|
|
|
|
2013-12-09 18:17:03 +11:00
|
|
|
entry = dma_addr >> tbl->it_page_shift;
|
2005-04-16 15:20:36 -07:00
|
|
|
free_entry = entry - tbl->it_offset;
|
|
|
|
|
|
|
|
if (((free_entry + npages) > tbl->it_size) ||
|
|
|
|
(entry < tbl->it_offset)) {
|
|
|
|
if (printk_ratelimit()) {
|
|
|
|
printk(KERN_INFO "iommu_free: invalid entry\n");
|
|
|
|
printk(KERN_INFO "\tentry = 0x%lx\n", entry);
|
2009-01-06 14:26:03 +00:00
|
|
|
printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
|
|
|
|
printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
|
|
|
|
printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
|
|
|
|
printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
|
|
|
|
printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
|
|
|
|
printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
|
2005-04-16 15:20:36 -07:00
|
|
|
WARN_ON(1);
|
|
|
|
}
|
2012-06-03 19:43:44 +00:00
|
|
|
|
|
|
|
return false;
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
2012-06-03 19:43:44 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-06-07 18:14:48 +00:00
|
|
|
static struct iommu_pool *get_pool(struct iommu_table *tbl,
|
|
|
|
unsigned long entry)
|
|
|
|
{
|
|
|
|
struct iommu_pool *p;
|
|
|
|
unsigned long largepool_start = tbl->large_pool.start;
|
|
|
|
|
|
|
|
/* The large pool is the last pool at the top of the table */
|
|
|
|
if (entry >= largepool_start) {
|
|
|
|
p = &tbl->large_pool;
|
|
|
|
} else {
|
|
|
|
unsigned int pool_nr = entry / tbl->poolsize;
|
|
|
|
|
|
|
|
BUG_ON(pool_nr > tbl->nr_pools);
|
|
|
|
p = &tbl->pools[pool_nr];
|
|
|
|
}
|
|
|
|
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2012-06-03 19:43:44 +00:00
|
|
|
static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
|
|
|
unsigned int npages)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
2012-06-03 19:43:44 +00:00
|
|
|
unsigned long entry, free_entry;
|
2005-04-16 15:20:36 -07:00
|
|
|
unsigned long flags;
|
2012-06-07 18:14:48 +00:00
|
|
|
struct iommu_pool *pool;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2013-12-09 18:17:03 +11:00
|
|
|
entry = dma_addr >> tbl->it_page_shift;
|
2012-06-03 19:43:44 +00:00
|
|
|
free_entry = entry - tbl->it_offset;
|
|
|
|
|
2012-06-07 18:14:48 +00:00
|
|
|
pool = get_pool(tbl, free_entry);
|
|
|
|
|
2012-06-03 19:43:44 +00:00
|
|
|
if (!iommu_free_check(tbl, dma_addr, npages))
|
|
|
|
return;
|
|
|
|
|
2015-06-05 16:35:06 +10:00
|
|
|
tbl->it_ops->clear(tbl, entry, npages);
|
2012-06-03 19:43:44 +00:00
|
|
|
|
2012-06-07 18:14:48 +00:00
|
|
|
spin_lock_irqsave(&(pool->lock), flags);
|
2012-06-03 19:43:44 +00:00
|
|
|
bitmap_clear(tbl->it_map, free_entry, npages);
|
2012-06-07 18:14:48 +00:00
|
|
|
spin_unlock_irqrestore(&(pool->lock), flags);
|
2012-06-03 19:43:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
|
|
|
unsigned int npages)
|
|
|
|
{
|
|
|
|
__iommu_free(tbl, dma_addr, npages);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
/* Make sure TLB cache is flushed if the HW needs it. We do
|
|
|
|
* not do an mb() here on purpose, it is not needed on any of
|
|
|
|
* the current platforms.
|
|
|
|
*/
|
2015-06-05 16:35:06 +10:00
|
|
|
if (tbl->it_ops->flush)
|
|
|
|
tbl->it_ops->flush(tbl);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
2014-11-05 15:28:30 +01:00
|
|
|
int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
|
|
|
struct scatterlist *sglist, int nelems,
|
|
|
|
unsigned long mask, enum dma_data_direction direction,
|
2016-08-03 13:46:00 -07:00
|
|
|
unsigned long attrs)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
|
|
|
dma_addr_t dma_next = 0, dma_addr;
|
|
|
|
struct scatterlist *s, *outs, *segstart;
|
2008-07-24 04:31:16 +10:00
|
|
|
int outcount, incount, i, build_fail = 0;
|
2008-01-08 10:34:22 +11:00
|
|
|
unsigned int align;
|
2005-04-16 15:20:36 -07:00
|
|
|
unsigned long handle;
|
2008-02-04 22:27:57 -08:00
|
|
|
unsigned int max_seg_size;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
BUG_ON(direction == DMA_NONE);
|
|
|
|
|
|
|
|
if ((nelems == 0) || !tbl)
|
2021-07-29 14:15:28 -06:00
|
|
|
return -EINVAL;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
outs = s = segstart = &sglist[0];
|
|
|
|
outcount = 1;
|
2005-08-18 07:32:18 +10:00
|
|
|
incount = nelems;
|
2005-04-16 15:20:36 -07:00
|
|
|
handle = 0;
|
|
|
|
|
|
|
|
/* Init first segment length for backout at failure */
|
|
|
|
outs->dma_length = 0;
|
|
|
|
|
2006-10-30 16:15:59 +11:00
|
|
|
DBG("sg mapping %d elements:\n", nelems);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2008-02-04 22:27:57 -08:00
|
|
|
max_seg_size = dma_get_max_seg_size(dev);
|
2007-10-12 13:44:12 +02:00
|
|
|
for_each_sg(sglist, s, nelems, i) {
|
2005-04-16 15:20:36 -07:00
|
|
|
unsigned long vaddr, npages, entry, slen;
|
|
|
|
|
|
|
|
slen = s->length;
|
|
|
|
/* Sanity check */
|
|
|
|
if (slen == 0) {
|
|
|
|
dma_next = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* Allocate iommu entries for that segment */
|
2007-10-22 20:02:46 +02:00
|
|
|
vaddr = (unsigned long) sg_virt(s);
|
2013-12-09 18:17:03 +11:00
|
|
|
npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
|
2008-01-08 10:34:22 +11:00
|
|
|
align = 0;
|
2013-12-09 18:17:03 +11:00
|
|
|
if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
|
2008-01-08 10:34:22 +11:00
|
|
|
(vaddr & ~PAGE_MASK) == 0)
|
2013-12-09 18:17:03 +11:00
|
|
|
align = PAGE_SHIFT - tbl->it_page_shift;
|
2008-02-04 22:28:08 -08:00
|
|
|
entry = iommu_range_alloc(dev, tbl, npages, &handle,
|
2013-12-09 18:17:03 +11:00
|
|
|
mask >> tbl->it_page_shift, align);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
|
|
|
|
|
|
|
|
/* Handle failure */
|
2018-11-21 18:56:25 +01:00
|
|
|
if (unlikely(entry == DMA_MAPPING_ERROR)) {
|
2016-10-11 13:54:17 -07:00
|
|
|
if (!(attrs & DMA_ATTR_NO_WARN) &&
|
|
|
|
printk_ratelimit())
|
2010-12-07 14:36:05 +00:00
|
|
|
dev_info(dev, "iommu_alloc failed, tbl %p "
|
|
|
|
"vaddr %lx npages %lu\n", tbl, vaddr,
|
|
|
|
npages);
|
2005-04-16 15:20:36 -07:00
|
|
|
goto failure;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Convert entry to a dma_addr_t */
|
|
|
|
entry += tbl->it_offset;
|
2013-12-09 18:17:03 +11:00
|
|
|
dma_addr = entry << tbl->it_page_shift;
|
2023-05-04 12:59:13 -05:00
|
|
|
dma_addr |= (vaddr & ~IOMMU_PAGE_MASK(tbl));
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2006-10-30 16:15:59 +11:00
|
|
|
DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
|
2005-04-16 15:20:36 -07:00
|
|
|
npages, entry, dma_addr);
|
|
|
|
|
|
|
|
/* Insert into HW table */
|
2015-06-05 16:35:06 +10:00
|
|
|
build_fail = tbl->it_ops->set(tbl, entry, npages,
|
2013-12-09 18:17:03 +11:00
|
|
|
vaddr & IOMMU_PAGE_MASK(tbl),
|
|
|
|
direction, attrs);
|
2008-07-24 04:31:16 +10:00
|
|
|
if(unlikely(build_fail))
|
|
|
|
goto failure;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
/* If we are in an open segment, try merging */
|
|
|
|
if (segstart != s) {
|
|
|
|
DBG(" - trying merge...\n");
|
|
|
|
/* We cannot merge if:
|
|
|
|
* - allocated dma_addr isn't contiguous to previous allocation
|
|
|
|
*/
|
2008-02-04 22:27:57 -08:00
|
|
|
if (novmerge || (dma_addr != dma_next) ||
|
|
|
|
(outs->dma_length + s->length > max_seg_size)) {
|
2005-04-16 15:20:36 -07:00
|
|
|
/* Can't merge: create a new segment */
|
|
|
|
segstart = s;
|
2007-10-12 13:44:12 +02:00
|
|
|
outcount++;
|
|
|
|
outs = sg_next(outs);
|
2005-04-16 15:20:36 -07:00
|
|
|
DBG(" can't merge, new segment.\n");
|
|
|
|
} else {
|
|
|
|
outs->dma_length += s->length;
|
2006-10-30 16:15:59 +11:00
|
|
|
DBG(" merged, new len: %ux\n", outs->dma_length);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (segstart == s) {
|
|
|
|
/* This is a new segment, fill entries */
|
|
|
|
DBG(" - filling new segment.\n");
|
|
|
|
outs->dma_address = dma_addr;
|
|
|
|
outs->dma_length = slen;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Calculate next page pointer for contiguous check */
|
|
|
|
dma_next = dma_addr + slen;
|
|
|
|
|
|
|
|
DBG(" - dma next is: %lx\n", dma_next);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Flush/invalidate TLB caches if necessary */
|
2015-06-05 16:35:06 +10:00
|
|
|
if (tbl->it_ops->flush)
|
|
|
|
tbl->it_ops->flush(tbl);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
DBG("mapped %d elements:\n", outcount);
|
|
|
|
|
2014-11-05 15:28:30 +01:00
|
|
|
/* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
|
2005-04-16 15:20:36 -07:00
|
|
|
* next entry of the sglist if we didn't fill the list completely
|
|
|
|
*/
|
2005-08-18 07:32:18 +10:00
|
|
|
if (outcount < incount) {
|
2007-10-12 13:44:12 +02:00
|
|
|
outs = sg_next(outs);
|
2005-04-16 15:20:36 -07:00
|
|
|
outs->dma_length = 0;
|
|
|
|
}
|
2006-01-30 21:51:54 -06:00
|
|
|
|
|
|
|
/* Make sure updates are seen by hardware */
|
|
|
|
mb();
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
return outcount;
|
|
|
|
|
|
|
|
failure:
|
2007-10-12 13:44:12 +02:00
|
|
|
for_each_sg(sglist, s, nelems, i) {
|
2005-04-16 15:20:36 -07:00
|
|
|
if (s->dma_length != 0) {
|
|
|
|
unsigned long vaddr, npages;
|
|
|
|
|
2013-12-09 18:17:03 +11:00
|
|
|
vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
|
2008-10-15 22:02:13 -07:00
|
|
|
npages = iommu_num_pages(s->dma_address, s->dma_length,
|
2013-12-09 18:17:03 +11:00
|
|
|
IOMMU_PAGE_SIZE(tbl));
|
2012-06-03 19:44:25 +00:00
|
|
|
__iommu_free(tbl, vaddr, npages);
|
2006-01-30 21:51:54 -06:00
|
|
|
s->dma_length = 0;
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
2007-10-12 13:44:12 +02:00
|
|
|
if (s == outs)
|
|
|
|
break;
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
2021-07-29 14:15:28 -06:00
|
|
|
return -EIO;
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-11-05 15:28:30 +01:00
|
|
|
void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
|
|
|
int nelems, enum dma_data_direction direction,
|
2016-08-03 13:46:00 -07:00
|
|
|
unsigned long attrs)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
2007-10-12 13:44:12 +02:00
|
|
|
struct scatterlist *sg;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
BUG_ON(direction == DMA_NONE);
|
|
|
|
|
|
|
|
if (!tbl)
|
|
|
|
return;
|
|
|
|
|
2007-10-12 13:44:12 +02:00
|
|
|
sg = sglist;
|
2005-04-16 15:20:36 -07:00
|
|
|
while (nelems--) {
|
|
|
|
unsigned int npages;
|
2007-10-12 13:44:12 +02:00
|
|
|
dma_addr_t dma_handle = sg->dma_address;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2007-10-12 13:44:12 +02:00
|
|
|
if (sg->dma_length == 0)
|
2005-04-16 15:20:36 -07:00
|
|
|
break;
|
2008-10-15 22:02:13 -07:00
|
|
|
npages = iommu_num_pages(dma_handle, sg->dma_length,
|
2013-12-09 18:17:03 +11:00
|
|
|
IOMMU_PAGE_SIZE(tbl));
|
2012-06-03 19:44:25 +00:00
|
|
|
__iommu_free(tbl, dma_handle, npages);
|
2007-10-12 13:44:12 +02:00
|
|
|
sg = sg_next(sg);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
|
|
|
|
* do not do an mb() here, the affected platforms do not need it
|
|
|
|
* when freeing.
|
|
|
|
*/
|
2015-06-05 16:35:06 +10:00
|
|
|
if (tbl->it_ops->flush)
|
|
|
|
tbl->it_ops->flush(tbl);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
2024-06-24 12:38:21 +00:00
|
|
|
void iommu_table_clear(struct iommu_table *tbl)
|
2008-10-21 17:38:10 +00:00
|
|
|
{
|
2012-02-20 02:15:03 +00:00
|
|
|
/*
|
|
|
|
* In case of firmware assisted dump system goes through clean
|
|
|
|
* reboot process at the time of system crash. Hence it's safe to
|
|
|
|
* clear the TCE entries if firmware assisted dump is active.
|
|
|
|
*/
|
|
|
|
if (!is_kdump_kernel() || is_fadump_active()) {
|
2008-10-21 17:38:10 +00:00
|
|
|
/* Clear the table in case firmware left allocations in it */
|
2015-06-05 16:35:06 +10:00
|
|
|
tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
|
2008-10-21 17:38:10 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_CRASH_DUMP
|
2015-06-05 16:35:06 +10:00
|
|
|
if (tbl->it_ops->get) {
|
2008-10-21 17:38:10 +00:00
|
|
|
unsigned long index, tceval, tcecount = 0;
|
|
|
|
|
|
|
|
/* Reserve the existing mappings left by the first kernel. */
|
|
|
|
for (index = 0; index < tbl->it_size; index++) {
|
2015-06-05 16:35:06 +10:00
|
|
|
tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
|
2008-10-21 17:38:10 +00:00
|
|
|
/*
|
|
|
|
* Freed TCE entry contains 0x7fffffffffffffff on JS20
|
|
|
|
*/
|
|
|
|
if (tceval && (tceval != 0x7fffffffffffffffUL)) {
|
|
|
|
__set_bit(index, tbl->it_map);
|
|
|
|
tcecount++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
|
|
|
|
printk(KERN_WARNING "TCE table is full; freeing ");
|
|
|
|
printk(KERN_WARNING "%d entries for the kdump boot\n",
|
|
|
|
KDUMP_MIN_TCE_ENTRIES);
|
|
|
|
for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
|
|
|
|
index < tbl->it_size; index++)
|
|
|
|
__clear_bit(index, tbl->it_map);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2024-06-24 12:38:21 +00:00
|
|
|
void iommu_table_reserve_pages(struct iommu_table *tbl,
|
2019-07-18 15:11:39 +10:00
|
|
|
unsigned long res_start, unsigned long res_end)
|
|
|
|
{
|
powerpc/pseries/iommu: IOMMU incorrectly marks MMIO range in DDW
Power Hypervisor can possibily allocate MMIO window intersecting with
Dynamic DMA Window (DDW) range, which is over 32-bit addressing.
These MMIO pages needs to be marked as reserved so that IOMMU doesn't map
DMA buffers in this range.
The current code is not marking these pages correctly which is resulting
in LPAR to OOPS while booting. The stack is at below
BUG: Unable to handle kernel data access on read at 0xc00800005cd40000
Faulting instruction address: 0xc00000000005cdac
Oops: Kernel access of bad area, sig: 11 [#1]
LE PAGE_SIZE=64K MMU=Hash SMP NR_CPUS=2048 NUMA pSeries
Modules linked in: af_packet rfkill ibmveth(X) lpfc(+) nvmet_fc nvmet nvme_keyring crct10dif_vpmsum nvme_fc nvme_fabrics nvme_core be2net(+) nvme_auth rtc_generic nfsd auth_rpcgss nfs_acl lockd grace sunrpc fuse configfs ip_tables x_tables xfs libcrc32c dm_service_time ibmvfc(X) scsi_transport_fc vmx_crypto gf128mul crc32c_vpmsum dm_mirror dm_region_hash dm_log dm_multipath dm_mod sd_mod scsi_dh_emc scsi_dh_rdac scsi_dh_alua t10_pi crc64_rocksoft_generic crc64_rocksoft sg crc64 scsi_mod
Supported: Yes, External
CPU: 8 PID: 241 Comm: kworker/8:1 Kdump: loaded Not tainted 6.4.0-150600.23.14-default #1 SLE15-SP6 b44ee71c81261b9e4bab5e0cde1f2ed891d5359b
Hardware name: IBM,9080-M9S POWER9 (raw) 0x4e2103 0xf000005 of:IBM,FW950.B0 (VH950_149) hv:phyp pSeries
Workqueue: events work_for_cpu_fn
NIP: c00000000005cdac LR: c00000000005e830 CTR: 0000000000000000
REGS: c00001400c9ff770 TRAP: 0300 Not tainted (6.4.0-150600.23.14-default)
MSR: 800000000280b033 <SF,VEC,VSX,EE,FP,ME,IR,DR,RI,LE> CR: 24228448 XER: 00000001
CFAR: c00000000005cdd4 DAR: c00800005cd40000 DSISR: 40000000 IRQMASK: 0
GPR00: c00000000005e830 c00001400c9ffa10 c000000001987d00 c00001400c4fe800
GPR04: 0000080000000000 0000000000000001 0000000004000000 0000000000800000
GPR08: 0000000004000000 0000000000000001 c00800005cd40000 ffffffffffffffff
GPR12: 0000000084228882 c00000000a4c4f00 0000000000000010 0000080000000000
GPR16: c00001400c4fe800 0000000004000000 0800000000000000 c00000006088b800
GPR20: c00001401a7be980 c00001400eff3800 c000000002a2da68 000000000000002b
GPR24: c0000000026793a8 c000000002679368 000000000000002a c0000000026793c8
GPR28: 000008007effffff 0000080000000000 0000000000800000 c00001400c4fe800
NIP [c00000000005cdac] iommu_table_reserve_pages+0xac/0x100
LR [c00000000005e830] iommu_init_table+0x80/0x1e0
Call Trace:
[c00001400c9ffa10] [c00000000005e810] iommu_init_table+0x60/0x1e0 (unreliable)
[c00001400c9ffa90] [c00000000010356c] iommu_bypass_supported_pSeriesLP+0x9cc/0xe40
[c00001400c9ffc30] [c00000000005c300] dma_iommu_dma_supported+0xf0/0x230
[c00001400c9ffcb0] [c00000000024b0c4] dma_supported+0x44/0x90
[c00001400c9ffcd0] [c00000000024b14c] dma_set_mask+0x3c/0x80
[c00001400c9ffd00] [c0080000555b715c] be_probe+0xc4/0xb90 [be2net]
[c00001400c9ffdc0] [c000000000986f3c] local_pci_probe+0x6c/0x110
[c00001400c9ffe40] [c000000000188f28] work_for_cpu_fn+0x38/0x60
[c00001400c9ffe70] [c00000000018e454] process_one_work+0x314/0x620
[c00001400c9fff10] [c00000000018f280] worker_thread+0x2b0/0x620
[c00001400c9fff90] [c00000000019bb18] kthread+0x148/0x150
[c00001400c9fffe0] [c00000000000ded8] start_kernel_thread+0x14/0x18
There are 2 issues in the code
1. The index is "int" while the address is "unsigned long". This results in
negative value when setting the bitmap.
2. The DMA offset is page shifted but the MMIO range is used as-is (64-bit
address). MMIO address needs to be page shifted as well.
Fixes: 3c33066a2190 ("powerpc/kernel/iommu: Add new iommu_table_in_use() helper")
Signed-off-by: Gaurav Batra <gbatra@linux.ibm.com>
Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/20241206210039.93172-1-gbatra@linux.ibm.com
2024-12-06 15:00:39 -06:00
|
|
|
unsigned long i;
|
2019-07-18 15:11:39 +10:00
|
|
|
|
|
|
|
WARN_ON_ONCE(res_end < res_start);
|
|
|
|
/*
|
|
|
|
* Reserve page 0 so it will not be used for any mappings.
|
|
|
|
* This avoids buggy drivers that consider page 0 to be invalid
|
|
|
|
* to crash the machine or even lose data.
|
|
|
|
*/
|
|
|
|
if (tbl->it_offset == 0)
|
|
|
|
set_bit(0, tbl->it_map);
|
|
|
|
|
2021-08-17 03:39:20 -03:00
|
|
|
if (res_start < tbl->it_offset)
|
|
|
|
res_start = tbl->it_offset;
|
2019-07-18 15:11:39 +10:00
|
|
|
|
2021-08-17 03:39:20 -03:00
|
|
|
if (res_end > (tbl->it_offset + tbl->it_size))
|
|
|
|
res_end = tbl->it_offset + tbl->it_size;
|
2019-07-18 15:11:39 +10:00
|
|
|
|
2021-08-17 03:39:20 -03:00
|
|
|
/* Check if res_start..res_end is a valid range in the table */
|
|
|
|
if (res_start >= res_end) {
|
|
|
|
tbl->it_reserved_start = tbl->it_offset;
|
|
|
|
tbl->it_reserved_end = tbl->it_offset;
|
|
|
|
return;
|
|
|
|
}
|
2019-07-18 15:11:39 +10:00
|
|
|
|
2021-08-17 03:39:20 -03:00
|
|
|
tbl->it_reserved_start = res_start;
|
|
|
|
tbl->it_reserved_end = res_end;
|
2019-07-18 15:11:39 +10:00
|
|
|
|
|
|
|
for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
|
2021-08-17 03:39:20 -03:00
|
|
|
set_bit(i - tbl->it_offset, tbl->it_map);
|
2019-07-18 15:11:39 +10:00
|
|
|
}
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
/*
|
|
|
|
* Build a iommu_table structure. This contains a bit map which
|
|
|
|
* is used to manage allocation of the tce space.
|
|
|
|
*/
|
2019-07-18 15:11:39 +10:00
|
|
|
struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
|
|
|
|
unsigned long res_start, unsigned long res_end)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
|
|
|
unsigned long sz;
|
|
|
|
static int welcomed = 0;
|
2012-06-07 18:14:48 +00:00
|
|
|
unsigned int i;
|
|
|
|
struct iommu_pool *p;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2015-06-05 16:35:06 +10:00
|
|
|
BUG_ON(!tbl->it_ops);
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
/* number of bytes needed for the bitmap */
|
2012-11-04 02:03:43 +00:00
|
|
|
sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2021-02-16 14:33:06 +11:00
|
|
|
tbl->it_map = vzalloc_node(sz, nid);
|
2021-02-16 14:33:07 +11:00
|
|
|
if (!tbl->it_map) {
|
|
|
|
pr_err("%s: Can't allocate %ld bytes\n", __func__, sz);
|
|
|
|
return NULL;
|
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2019-07-18 15:11:39 +10:00
|
|
|
iommu_table_reserve_pages(tbl, res_start, res_end);
|
2011-09-20 03:07:24 +00:00
|
|
|
|
2012-06-07 18:14:48 +00:00
|
|
|
/* We only split the IOMMU table if we have 1GB or more of space */
|
2013-12-09 18:17:03 +11:00
|
|
|
if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
|
2012-06-07 18:14:48 +00:00
|
|
|
tbl->nr_pools = IOMMU_NR_POOLS;
|
|
|
|
else
|
|
|
|
tbl->nr_pools = 1;
|
|
|
|
|
|
|
|
/* We reserve the top 1/4 of the table for large allocations */
|
2012-07-13 17:45:49 +10:00
|
|
|
tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
|
2012-06-07 18:14:48 +00:00
|
|
|
|
2012-07-13 17:45:49 +10:00
|
|
|
for (i = 0; i < tbl->nr_pools; i++) {
|
2012-06-07 18:14:48 +00:00
|
|
|
p = &tbl->pools[i];
|
|
|
|
spin_lock_init(&(p->lock));
|
|
|
|
p->start = tbl->poolsize * i;
|
|
|
|
p->hint = p->start;
|
|
|
|
p->end = p->start + tbl->poolsize;
|
|
|
|
}
|
|
|
|
|
|
|
|
p = &tbl->large_pool;
|
|
|
|
spin_lock_init(&(p->lock));
|
|
|
|
p->start = tbl->poolsize * i;
|
|
|
|
p->hint = p->start;
|
|
|
|
p->end = tbl->it_size;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2008-10-21 17:38:10 +00:00
|
|
|
iommu_table_clear(tbl);
|
2005-06-20 21:43:48 +10:00
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
if (!welcomed) {
|
2025-02-10 23:42:44 +01:00
|
|
|
pr_info("IOMMU table initialized, virtual merging %s\n",
|
|
|
|
str_disabled_enabled(novmerge));
|
2005-04-16 15:20:36 -07:00
|
|
|
welcomed = 1;
|
|
|
|
}
|
|
|
|
|
2021-01-13 21:20:14 +11:00
|
|
|
iommu_debugfs_add(tbl);
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
return tbl;
|
|
|
|
}
|
|
|
|
|
2021-08-17 03:39:20 -03:00
|
|
|
bool iommu_table_in_use(struct iommu_table *tbl)
|
|
|
|
{
|
|
|
|
unsigned long start = 0, end;
|
|
|
|
|
|
|
|
/* ignore reserved bit0 */
|
|
|
|
if (tbl->it_offset == 0)
|
|
|
|
start = 1;
|
2022-07-14 18:11:19 +10:00
|
|
|
|
|
|
|
/* Simple case with no reserved MMIO32 region */
|
|
|
|
if (!tbl->it_reserved_start && !tbl->it_reserved_end)
|
|
|
|
return find_next_bit(tbl->it_map, tbl->it_size, start) != tbl->it_size;
|
|
|
|
|
2021-08-17 03:39:20 -03:00
|
|
|
end = tbl->it_reserved_start - tbl->it_offset;
|
|
|
|
if (find_next_bit(tbl->it_map, end, start) != end)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
start = tbl->it_reserved_end - tbl->it_offset;
|
|
|
|
end = tbl->it_size;
|
|
|
|
return find_next_bit(tbl->it_map, end, start) != end;
|
|
|
|
}
|
|
|
|
|
2017-03-22 15:21:50 +11:00
|
|
|
static void iommu_table_free(struct kref *kref)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
2017-03-22 15:21:50 +11:00
|
|
|
struct iommu_table *tbl;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2017-03-22 15:21:50 +11:00
|
|
|
tbl = container_of(kref, struct iommu_table, it_kref);
|
2015-06-05 16:34:57 +10:00
|
|
|
|
2017-03-22 15:21:49 +11:00
|
|
|
if (tbl->it_ops->free)
|
|
|
|
tbl->it_ops->free(tbl);
|
|
|
|
|
2015-06-05 16:34:57 +10:00
|
|
|
if (!tbl->it_map) {
|
|
|
|
kfree(tbl);
|
2005-04-16 15:20:36 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-01-13 21:20:14 +11:00
|
|
|
iommu_debugfs_del(tbl);
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
/* verify that table contains no entries */
|
2021-08-17 03:39:20 -03:00
|
|
|
if (iommu_table_in_use(tbl))
|
2017-03-22 15:21:50 +11:00
|
|
|
pr_warn("%s: Unexpected TCEs\n", __func__);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
/* free bitmap */
|
2021-02-16 14:33:06 +11:00
|
|
|
vfree(tbl->it_map);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
/* free table */
|
|
|
|
kfree(tbl);
|
|
|
|
}
|
2017-03-22 15:21:50 +11:00
|
|
|
|
|
|
|
struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
|
|
|
|
{
|
|
|
|
if (kref_get_unless_zero(&tbl->it_kref))
|
|
|
|
return tbl;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iommu_tce_table_get);
|
|
|
|
|
|
|
|
int iommu_tce_table_put(struct iommu_table *tbl)
|
|
|
|
{
|
|
|
|
if (WARN_ON(!tbl))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return kref_put(&tbl->it_kref, iommu_table_free);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iommu_tce_table_put);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
/* Creates TCEs for a user provided buffer. The user buffer must be
|
2008-10-27 20:38:08 +00:00
|
|
|
* contiguous real kernel storage (not vmalloc). The address passed here
|
|
|
|
* comprises a page address and offset into that page. The dma_addr_t
|
|
|
|
* returned will point to the same byte within the page as was passed in.
|
2005-04-16 15:20:36 -07:00
|
|
|
*/
|
2008-10-27 20:38:08 +00:00
|
|
|
dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
|
|
|
|
struct page *page, unsigned long offset, size_t size,
|
|
|
|
unsigned long mask, enum dma_data_direction direction,
|
2016-08-03 13:46:00 -07:00
|
|
|
unsigned long attrs)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
2018-11-21 18:56:25 +01:00
|
|
|
dma_addr_t dma_handle = DMA_MAPPING_ERROR;
|
2008-10-27 20:38:08 +00:00
|
|
|
void *vaddr;
|
2005-04-16 15:20:36 -07:00
|
|
|
unsigned long uaddr;
|
2008-01-08 10:34:22 +11:00
|
|
|
unsigned int npages, align;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
BUG_ON(direction == DMA_NONE);
|
|
|
|
|
2008-10-27 20:38:08 +00:00
|
|
|
vaddr = page_address(page) + offset;
|
2005-04-16 15:20:36 -07:00
|
|
|
uaddr = (unsigned long)vaddr;
|
|
|
|
|
|
|
|
if (tbl) {
|
2018-08-21 15:44:48 -03:00
|
|
|
npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
|
2008-01-08 10:34:22 +11:00
|
|
|
align = 0;
|
2013-12-09 18:17:03 +11:00
|
|
|
if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
|
2008-01-08 10:34:22 +11:00
|
|
|
((unsigned long)vaddr & ~PAGE_MASK) == 0)
|
2013-12-09 18:17:03 +11:00
|
|
|
align = PAGE_SHIFT - tbl->it_page_shift;
|
2008-01-08 10:34:22 +11:00
|
|
|
|
2008-02-04 22:28:08 -08:00
|
|
|
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
|
2013-12-09 18:17:03 +11:00
|
|
|
mask >> tbl->it_page_shift, align,
|
2008-07-16 05:51:47 +10:00
|
|
|
attrs);
|
2018-11-21 18:56:25 +01:00
|
|
|
if (dma_handle == DMA_MAPPING_ERROR) {
|
2016-10-11 13:54:17 -07:00
|
|
|
if (!(attrs & DMA_ATTR_NO_WARN) &&
|
|
|
|
printk_ratelimit()) {
|
2010-12-07 14:36:05 +00:00
|
|
|
dev_info(dev, "iommu_alloc failed, tbl %p "
|
|
|
|
"vaddr %p npages %d\n", tbl, vaddr,
|
|
|
|
npages);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
} else
|
2013-12-09 18:17:03 +11:00
|
|
|
dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return dma_handle;
|
|
|
|
}
|
|
|
|
|
2008-10-27 20:38:08 +00:00
|
|
|
void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
|
|
|
|
size_t size, enum dma_data_direction direction,
|
2016-08-03 13:46:00 -07:00
|
|
|
unsigned long attrs)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
2006-10-30 16:15:59 +11:00
|
|
|
unsigned int npages;
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
BUG_ON(direction == DMA_NONE);
|
|
|
|
|
2006-10-30 16:15:59 +11:00
|
|
|
if (tbl) {
|
2013-12-09 18:17:03 +11:00
|
|
|
npages = iommu_num_pages(dma_handle, size,
|
|
|
|
IOMMU_PAGE_SIZE(tbl));
|
2006-10-30 16:15:59 +11:00
|
|
|
iommu_free(tbl, dma_handle, npages);
|
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocates a contiguous real buffer and creates mappings over it.
|
|
|
|
* Returns the virtual address of the buffer and sets dma_handle
|
|
|
|
* to the dma address (mapping) of the first page.
|
|
|
|
*/
|
2008-02-04 22:28:08 -08:00
|
|
|
void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
|
|
|
|
size_t size, dma_addr_t *dma_handle,
|
|
|
|
unsigned long mask, gfp_t flag, int node)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
|
|
|
void *ret = NULL;
|
|
|
|
dma_addr_t mapping;
|
2006-10-30 16:15:59 +11:00
|
|
|
unsigned int order;
|
|
|
|
unsigned int nio_pages, io_order;
|
2006-06-06 16:11:35 +02:00
|
|
|
struct page *page;
|
2023-05-04 12:59:13 -05:00
|
|
|
int tcesize = (1 << tbl->it_page_shift);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
size = PAGE_ALIGN(size);
|
|
|
|
order = get_order(size);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Client asked for way too much space. This is checked later
|
|
|
|
* anyway. It is easier to debug here for the drivers than in
|
|
|
|
* the tce tables.
|
|
|
|
*/
|
|
|
|
if (order >= IOMAP_MAX_ORDER) {
|
2010-12-07 14:36:05 +00:00
|
|
|
dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
|
|
|
|
size);
|
2005-04-16 15:20:36 -07:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!tbl)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Alloc enough pages (and possibly more) */
|
2006-06-10 18:17:35 +10:00
|
|
|
page = alloc_pages_node(node, flag, order);
|
2006-06-06 16:11:35 +02:00
|
|
|
if (!page)
|
2005-04-16 15:20:36 -07:00
|
|
|
return NULL;
|
2006-06-06 16:11:35 +02:00
|
|
|
ret = page_address(page);
|
2005-04-16 15:20:36 -07:00
|
|
|
memset(ret, 0, size);
|
|
|
|
|
|
|
|
/* Set up tces to cover the allocated range */
|
2023-05-04 12:59:13 -05:00
|
|
|
nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
|
|
|
|
|
2021-05-26 16:45:40 +02:00
|
|
|
io_order = get_iommu_order(size, tbl);
|
2008-02-04 22:28:08 -08:00
|
|
|
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
|
2016-08-03 13:46:00 -07:00
|
|
|
mask >> tbl->it_page_shift, io_order, 0);
|
2018-11-21 18:56:25 +01:00
|
|
|
if (mapping == DMA_MAPPING_ERROR) {
|
2005-04-16 15:20:36 -07:00
|
|
|
free_pages((unsigned long)ret, order);
|
2006-06-06 16:11:35 +02:00
|
|
|
return NULL;
|
|
|
|
}
|
2023-05-04 12:59:13 -05:00
|
|
|
|
|
|
|
*dma_handle = mapping | ((u64)ret & (tcesize - 1));
|
2005-04-16 15:20:36 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
void iommu_free_coherent(struct iommu_table *tbl, size_t size,
|
|
|
|
void *vaddr, dma_addr_t dma_handle)
|
|
|
|
{
|
|
|
|
if (tbl) {
|
2021-05-26 16:45:40 +02:00
|
|
|
unsigned int nio_pages;
|
2006-10-30 16:15:59 +11:00
|
|
|
|
2021-05-26 16:45:40 +02:00
|
|
|
size = PAGE_ALIGN(size);
|
2023-05-04 12:59:13 -05:00
|
|
|
nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
|
2006-10-30 16:15:59 +11:00
|
|
|
iommu_free(tbl, dma_handle, nio_pages);
|
2005-04-16 15:20:36 -07:00
|
|
|
size = PAGE_ALIGN(size);
|
|
|
|
free_pages((unsigned long)vaddr, get_order(size));
|
|
|
|
}
|
|
|
|
}
|
2013-05-21 13:33:09 +10:00
|
|
|
|
2015-06-05 16:35:05 +10:00
|
|
|
unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
switch (dir) {
|
|
|
|
case DMA_BIDIRECTIONAL:
|
|
|
|
return TCE_PCI_READ | TCE_PCI_WRITE;
|
|
|
|
case DMA_FROM_DEVICE:
|
|
|
|
return TCE_PCI_WRITE;
|
|
|
|
case DMA_TO_DEVICE:
|
|
|
|
return TCE_PCI_READ;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
|
|
|
|
|
2013-05-21 13:33:09 +10:00
|
|
|
#ifdef CONFIG_IOMMU_API
|
2024-06-24 12:39:10 +00:00
|
|
|
|
|
|
|
int dev_has_iommu_table(struct device *dev, void *data)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev = to_pci_dev(dev);
|
|
|
|
struct pci_dev **ppdev = data;
|
|
|
|
|
|
|
|
if (!dev)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (device_iommu_mapped(dev)) {
|
|
|
|
*ppdev = pdev;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-05-21 13:33:09 +10:00
|
|
|
/*
|
|
|
|
* SPAPR TCE API
|
|
|
|
*/
|
|
|
|
static void group_release(void *iommu_data)
|
|
|
|
{
|
2015-06-05 16:35:08 +10:00
|
|
|
struct iommu_table_group *table_group = iommu_data;
|
|
|
|
|
|
|
|
table_group->group = NULL;
|
2013-05-21 13:33:09 +10:00
|
|
|
}
|
|
|
|
|
2015-06-05 16:35:08 +10:00
|
|
|
void iommu_register_group(struct iommu_table_group *table_group,
|
2013-05-21 13:33:09 +10:00
|
|
|
int pci_domain_number, unsigned long pe_num)
|
|
|
|
{
|
|
|
|
struct iommu_group *grp;
|
|
|
|
char *name;
|
|
|
|
|
|
|
|
grp = iommu_group_alloc();
|
|
|
|
if (IS_ERR(grp)) {
|
|
|
|
pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
|
|
|
|
PTR_ERR(grp));
|
|
|
|
return;
|
|
|
|
}
|
2015-06-05 16:35:08 +10:00
|
|
|
table_group->group = grp;
|
|
|
|
iommu_group_set_iommudata(grp, table_group, group_release);
|
2013-05-21 13:33:09 +10:00
|
|
|
name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
|
|
|
|
pci_domain_number, pe_num);
|
|
|
|
if (!name)
|
|
|
|
return;
|
|
|
|
iommu_group_set_name(grp, name);
|
|
|
|
kfree(name);
|
|
|
|
}
|
|
|
|
|
|
|
|
enum dma_data_direction iommu_tce_direction(unsigned long tce)
|
|
|
|
{
|
|
|
|
if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
|
|
|
|
return DMA_BIDIRECTIONAL;
|
|
|
|
else if (tce & TCE_PCI_READ)
|
|
|
|
return DMA_TO_DEVICE;
|
|
|
|
else if (tce & TCE_PCI_WRITE)
|
|
|
|
return DMA_FROM_DEVICE;
|
|
|
|
else
|
|
|
|
return DMA_NONE;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iommu_tce_direction);
|
|
|
|
|
|
|
|
void iommu_flush_tce(struct iommu_table *tbl)
|
|
|
|
{
|
|
|
|
/* Flush/invalidate TLB caches if necessary */
|
2015-06-05 16:35:06 +10:00
|
|
|
if (tbl->it_ops->flush)
|
|
|
|
tbl->it_ops->flush(tbl);
|
2013-05-21 13:33:09 +10:00
|
|
|
|
|
|
|
/* Make sure updates are seen by hardware */
|
|
|
|
mb();
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iommu_flush_tce);
|
|
|
|
|
2017-03-22 15:21:55 +11:00
|
|
|
int iommu_tce_check_ioba(unsigned long page_shift,
|
|
|
|
unsigned long offset, unsigned long size,
|
|
|
|
unsigned long ioba, unsigned long npages)
|
2013-05-21 13:33:09 +10:00
|
|
|
{
|
2017-03-22 15:21:55 +11:00
|
|
|
unsigned long mask = (1UL << page_shift) - 1;
|
2013-05-21 13:33:09 +10:00
|
|
|
|
2017-03-22 15:21:55 +11:00
|
|
|
if (ioba & mask)
|
2013-05-21 13:33:09 +10:00
|
|
|
return -EINVAL;
|
|
|
|
|
2017-03-22 15:21:55 +11:00
|
|
|
ioba >>= page_shift;
|
|
|
|
if (ioba < offset)
|
2013-05-21 13:33:09 +10:00
|
|
|
return -EINVAL;
|
|
|
|
|
2017-03-22 15:21:55 +11:00
|
|
|
if ((ioba + 1) > (offset + size))
|
2013-05-21 13:33:09 +10:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2017-03-22 15:21:55 +11:00
|
|
|
EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
|
2013-05-21 13:33:09 +10:00
|
|
|
|
2017-03-22 15:21:55 +11:00
|
|
|
int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
|
2013-05-21 13:33:09 +10:00
|
|
|
{
|
2017-03-22 15:21:55 +11:00
|
|
|
unsigned long mask = (1UL << page_shift) - 1;
|
2013-05-21 13:33:09 +10:00
|
|
|
|
2017-03-22 15:21:55 +11:00
|
|
|
if (gpa & mask)
|
2013-05-21 13:33:09 +10:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2017-03-22 15:21:55 +11:00
|
|
|
EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
|
2013-05-21 13:33:09 +10:00
|
|
|
|
2023-10-11 16:37:04 +11:00
|
|
|
long iommu_tce_xchg_no_kill(struct mm_struct *mm,
|
|
|
|
struct iommu_table *tbl,
|
|
|
|
unsigned long entry, unsigned long *hpa,
|
|
|
|
enum dma_data_direction *direction)
|
2013-05-21 13:33:09 +10:00
|
|
|
{
|
2015-06-05 16:35:15 +10:00
|
|
|
long ret;
|
2018-12-19 19:52:15 +11:00
|
|
|
unsigned long size = 0;
|
2013-05-21 13:33:09 +10:00
|
|
|
|
2022-05-06 15:37:55 +10:00
|
|
|
ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction);
|
2015-06-05 16:35:15 +10:00
|
|
|
if (!ret && ((*direction == DMA_FROM_DEVICE) ||
|
2018-12-19 19:52:15 +11:00
|
|
|
(*direction == DMA_BIDIRECTIONAL)) &&
|
|
|
|
!mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
|
|
|
|
&size))
|
2015-06-05 16:35:15 +10:00
|
|
|
SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
|
2013-05-21 13:33:09 +10:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2019-08-29 18:52:48 +10:00
|
|
|
EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill);
|
|
|
|
|
|
|
|
void iommu_tce_kill(struct iommu_table *tbl,
|
|
|
|
unsigned long entry, unsigned long pages)
|
|
|
|
{
|
|
|
|
if (tbl->it_ops->tce_kill)
|
2022-05-06 15:37:55 +10:00
|
|
|
tbl->it_ops->tce_kill(tbl, entry, pages);
|
2019-08-29 18:52:48 +10:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iommu_tce_kill);
|
2013-05-21 13:33:09 +10:00
|
|
|
|
2018-12-19 19:52:21 +11:00
|
|
|
int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
|
2013-05-21 13:33:09 +10:00
|
|
|
{
|
2014-08-06 17:10:16 +10:00
|
|
|
/*
|
|
|
|
* The sysfs entries should be populated before
|
|
|
|
* binding IOMMU group. If sysfs entries isn't
|
|
|
|
* ready, we simply bail.
|
|
|
|
*/
|
|
|
|
if (!device_is_registered(dev))
|
|
|
|
return -ENOENT;
|
|
|
|
|
2018-11-30 14:23:19 +01:00
|
|
|
if (device_iommu_mapped(dev)) {
|
2014-08-06 17:10:16 +10:00
|
|
|
pr_debug("%s: Skipping device %s with iommu group %d\n",
|
|
|
|
__func__, dev_name(dev),
|
|
|
|
iommu_group_id(dev->iommu_group));
|
2013-05-21 13:33:09 +10:00
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
2014-08-06 17:10:16 +10:00
|
|
|
pr_debug("%s: Adding %s to iommu group %d\n",
|
2018-12-19 19:52:21 +11:00
|
|
|
__func__, dev_name(dev), iommu_group_id(table_group->group));
|
2023-03-06 11:31:00 -06:00
|
|
|
/*
|
|
|
|
* This is still not adding devices via the IOMMU bus notifier because
|
|
|
|
* of pcibios_init() from arch/powerpc/kernel/pci_64.c which calls
|
|
|
|
* pcibios_scan_phb() first (and this guy adds devices and triggers
|
|
|
|
* the notifier) and only then it calls pci_bus_add_devices() which
|
|
|
|
* configures DMA for buses which also creates PEs and IOMMU groups.
|
|
|
|
*/
|
|
|
|
return iommu_probe_device(dev);
|
2013-05-21 13:33:09 +10:00
|
|
|
}
|
2013-11-21 17:43:14 +11:00
|
|
|
EXPORT_SYMBOL_GPL(iommu_add_device);
|
2013-05-21 13:33:09 +10:00
|
|
|
|
2023-06-05 13:48:56 -05:00
|
|
|
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
|
2023-03-06 11:31:00 -06:00
|
|
|
/*
|
|
|
|
* A simple iommu_ops to allow less cruft in generic VFIO code.
|
|
|
|
*/
|
powerpc/iommu: Do not do platform domain attach atctions after probe
POWER throws a splat at boot, it looks like the DMA ops were probably
changed while a driver was attached. Something is still weird about how
power sequences its bootup. Previously this was hidden since the core
iommu code did nothing during probe, now it calls
spapr_tce_platform_iommu_attach_dev().
Make spapr_tce_platform_iommu_attach_dev() do nothing on the probe time
call like it did before.
WARNING: CPU: 0 PID: 8 at arch/powerpc/kernel/iommu.c:407 __iommu_free+0x1e4/0x1f0
Modules linked in: sd_mod t10_pi crc64_rocksoft crc64 sg ibmvfc mlx5_core(+) scsi_transport_fc ibmveth mlxfw psample dm_multipath dm_mirror dm_region_hash dm_log dm_mod fuse
CPU: 0 PID: 8 Comm: kworker/0:0 Not tainted 6.6.0-rc3-next-20230929-auto #1
Hardware name: IBM,9080-HEX POWER10 (raw) 0x800200 0xf000006 of:IBM,FW1030.30 (NH1030_062) hv:phyp pSeries
Workqueue: events work_for_cpu_fn
NIP: c00000000005f6d4 LR: c00000000005f6d0 CTR: 00000000005ca81c
REGS: c000000003a27890 TRAP: 0700 Not tainted (6.6.0-rc3-next-20230929-auto)
MSR: 800000000282b033 <SF,VEC,VSX,EE,FP,ME,IR,DR,RI,LE> CR: 48000824 XER: 00000008
CFAR: c00000000020f738 IRQMASK: 0
GPR00: c00000000005f6d0 c000000003a27b30 c000000001481800 000000000000017
GPR04: 00000000ffff7fff c000000003a27950 c000000003a27948 0000000000000027
GPR08: c000000c18c07c10 0000000000000001 0000000000000027 c000000002ac8a08
GPR12: 0000000000000000 c000000002ff0000 c00000000019cc88 c000000003042300
GPR16: 0000000000000000 0000000000000000 0000000000000000 c000000003071ab0
GPR20: c00000000349f80d c000000003215440 c000000003215480 61c8864680b583eb
GPR24: 0000000000000000 000000007fffffff 0800000020000000 0000000000000010
GPR28: 0000000000020000 0000800000020000 c00000000c5dc800 c00000000c5dc880
NIP [c00000000005f6d4] __iommu_free+0x1e4/0x1f0
LR [c00000000005f6d0] __iommu_free+0x1e0/0x1f0
Call Trace:
[c000000003a27b30] [c00000000005f6d0] __iommu_free+0x1e0/0x1f0 (unreliable)
[c000000003a27bc0] [c00000000005f848] iommu_free+0x28/0x70
[c000000003a27bf0] [c000000000061518] iommu_free_coherent+0x68/0xa0
[c000000003a27c20] [c00000000005e8d4] dma_iommu_free_coherent+0x24/0x40
[c000000003a27c40] [c00000000024698c] dma_free_attrs+0x10c/0x140
[c000000003a27c90] [c008000000dcb8d4] mlx5_cmd_cleanup+0x5c/0x90 [mlx5_core]
[c000000003a27cc0] [c008000000dc45a0] mlx5_mdev_uninit+0xc8/0x100 [mlx5_core]
[c000000003a27d00] [c008000000dc4ac4] probe_one+0x3ec/0x530 [mlx5_core]
[c000000003a27d90] [c0000000008c5edc] local_pci_probe+0x6c/0x110
[c000000003a27e10] [c000000000189c98] work_for_cpu_fn+0x38/0x60
[c000000003a27e40] [c00000000018d1d0] process_scheduled_works+0x230/0x4f0
[c000000003a27f10] [c00000000018ff14] worker_thread+0x1e4/0x500
[c000000003a27f90] [c00000000019cdb8] kthread+0x138/0x140
[c000000003a27fe0] [c00000000000df98] start_kernel_thread+0x14/0x18
Code: 481b004d 60000000 e89e0028 3c62ffe0 3863dd20 481b0039 60000000 e89e0038 3c62ffe0 3863dd38 481b0025 60000000 <0fe00000> 4bffff20 60000000 3c4c0142
---[ end trace 0000000000000000 ]---
iommu_free: invalid entry
entry = 0x8000000203d0
dma_addr = 0x8000000203d0000
Table = 0xc00000000c5dc800
bus# = 0x1
size = 0x20000
startOff = 0x800000000000
index = 0x70200016
Fixes: 2ad56efa80db ("powerpc/iommu: Setup a default domain and remove set_platform_dma_ops")
Reported-by: Tasmiya Nalatwad <tasmiya@linux.vnet.ibm.com>
Link: https://lore.kernel.org/r/d06cee81-c47f-9d62-dfc6-4c77b60058db@linux.vnet.ibm.com
Tested-by: Tasmiya Nalatwad <tasmiya@linux.vnet.ibm.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/0-v1-2b52423411b9+164fc-iommu_ppc_defdomain_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-05 10:35:11 -03:00
|
|
|
static int
|
|
|
|
spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
|
|
|
|
struct device *dev)
|
2023-03-06 11:31:00 -06:00
|
|
|
{
|
powerpc/iommu: Do not do platform domain attach atctions after probe
POWER throws a splat at boot, it looks like the DMA ops were probably
changed while a driver was attached. Something is still weird about how
power sequences its bootup. Previously this was hidden since the core
iommu code did nothing during probe, now it calls
spapr_tce_platform_iommu_attach_dev().
Make spapr_tce_platform_iommu_attach_dev() do nothing on the probe time
call like it did before.
WARNING: CPU: 0 PID: 8 at arch/powerpc/kernel/iommu.c:407 __iommu_free+0x1e4/0x1f0
Modules linked in: sd_mod t10_pi crc64_rocksoft crc64 sg ibmvfc mlx5_core(+) scsi_transport_fc ibmveth mlxfw psample dm_multipath dm_mirror dm_region_hash dm_log dm_mod fuse
CPU: 0 PID: 8 Comm: kworker/0:0 Not tainted 6.6.0-rc3-next-20230929-auto #1
Hardware name: IBM,9080-HEX POWER10 (raw) 0x800200 0xf000006 of:IBM,FW1030.30 (NH1030_062) hv:phyp pSeries
Workqueue: events work_for_cpu_fn
NIP: c00000000005f6d4 LR: c00000000005f6d0 CTR: 00000000005ca81c
REGS: c000000003a27890 TRAP: 0700 Not tainted (6.6.0-rc3-next-20230929-auto)
MSR: 800000000282b033 <SF,VEC,VSX,EE,FP,ME,IR,DR,RI,LE> CR: 48000824 XER: 00000008
CFAR: c00000000020f738 IRQMASK: 0
GPR00: c00000000005f6d0 c000000003a27b30 c000000001481800 000000000000017
GPR04: 00000000ffff7fff c000000003a27950 c000000003a27948 0000000000000027
GPR08: c000000c18c07c10 0000000000000001 0000000000000027 c000000002ac8a08
GPR12: 0000000000000000 c000000002ff0000 c00000000019cc88 c000000003042300
GPR16: 0000000000000000 0000000000000000 0000000000000000 c000000003071ab0
GPR20: c00000000349f80d c000000003215440 c000000003215480 61c8864680b583eb
GPR24: 0000000000000000 000000007fffffff 0800000020000000 0000000000000010
GPR28: 0000000000020000 0000800000020000 c00000000c5dc800 c00000000c5dc880
NIP [c00000000005f6d4] __iommu_free+0x1e4/0x1f0
LR [c00000000005f6d0] __iommu_free+0x1e0/0x1f0
Call Trace:
[c000000003a27b30] [c00000000005f6d0] __iommu_free+0x1e0/0x1f0 (unreliable)
[c000000003a27bc0] [c00000000005f848] iommu_free+0x28/0x70
[c000000003a27bf0] [c000000000061518] iommu_free_coherent+0x68/0xa0
[c000000003a27c20] [c00000000005e8d4] dma_iommu_free_coherent+0x24/0x40
[c000000003a27c40] [c00000000024698c] dma_free_attrs+0x10c/0x140
[c000000003a27c90] [c008000000dcb8d4] mlx5_cmd_cleanup+0x5c/0x90 [mlx5_core]
[c000000003a27cc0] [c008000000dc45a0] mlx5_mdev_uninit+0xc8/0x100 [mlx5_core]
[c000000003a27d00] [c008000000dc4ac4] probe_one+0x3ec/0x530 [mlx5_core]
[c000000003a27d90] [c0000000008c5edc] local_pci_probe+0x6c/0x110
[c000000003a27e10] [c000000000189c98] work_for_cpu_fn+0x38/0x60
[c000000003a27e40] [c00000000018d1d0] process_scheduled_works+0x230/0x4f0
[c000000003a27f10] [c00000000018ff14] worker_thread+0x1e4/0x500
[c000000003a27f90] [c00000000019cdb8] kthread+0x138/0x140
[c000000003a27fe0] [c00000000000df98] start_kernel_thread+0x14/0x18
Code: 481b004d 60000000 e89e0028 3c62ffe0 3863dd20 481b0039 60000000 e89e0038 3c62ffe0 3863dd38 481b0025 60000000 <0fe00000> 4bffff20 60000000 3c4c0142
---[ end trace 0000000000000000 ]---
iommu_free: invalid entry
entry = 0x8000000203d0
dma_addr = 0x8000000203d0000
Table = 0xc00000000c5dc800
bus# = 0x1
size = 0x20000
startOff = 0x800000000000
index = 0x70200016
Fixes: 2ad56efa80db ("powerpc/iommu: Setup a default domain and remove set_platform_dma_ops")
Reported-by: Tasmiya Nalatwad <tasmiya@linux.vnet.ibm.com>
Link: https://lore.kernel.org/r/d06cee81-c47f-9d62-dfc6-4c77b60058db@linux.vnet.ibm.com
Tested-by: Tasmiya Nalatwad <tasmiya@linux.vnet.ibm.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/0-v1-2b52423411b9+164fc-iommu_ppc_defdomain_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-05 10:35:11 -03:00
|
|
|
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
2023-03-06 11:31:00 -06:00
|
|
|
struct iommu_table_group *table_group;
|
2024-02-15 07:52:32 -06:00
|
|
|
struct iommu_group *grp;
|
2023-03-06 11:31:00 -06:00
|
|
|
|
powerpc/iommu: Do not do platform domain attach atctions after probe
POWER throws a splat at boot, it looks like the DMA ops were probably
changed while a driver was attached. Something is still weird about how
power sequences its bootup. Previously this was hidden since the core
iommu code did nothing during probe, now it calls
spapr_tce_platform_iommu_attach_dev().
Make spapr_tce_platform_iommu_attach_dev() do nothing on the probe time
call like it did before.
WARNING: CPU: 0 PID: 8 at arch/powerpc/kernel/iommu.c:407 __iommu_free+0x1e4/0x1f0
Modules linked in: sd_mod t10_pi crc64_rocksoft crc64 sg ibmvfc mlx5_core(+) scsi_transport_fc ibmveth mlxfw psample dm_multipath dm_mirror dm_region_hash dm_log dm_mod fuse
CPU: 0 PID: 8 Comm: kworker/0:0 Not tainted 6.6.0-rc3-next-20230929-auto #1
Hardware name: IBM,9080-HEX POWER10 (raw) 0x800200 0xf000006 of:IBM,FW1030.30 (NH1030_062) hv:phyp pSeries
Workqueue: events work_for_cpu_fn
NIP: c00000000005f6d4 LR: c00000000005f6d0 CTR: 00000000005ca81c
REGS: c000000003a27890 TRAP: 0700 Not tainted (6.6.0-rc3-next-20230929-auto)
MSR: 800000000282b033 <SF,VEC,VSX,EE,FP,ME,IR,DR,RI,LE> CR: 48000824 XER: 00000008
CFAR: c00000000020f738 IRQMASK: 0
GPR00: c00000000005f6d0 c000000003a27b30 c000000001481800 000000000000017
GPR04: 00000000ffff7fff c000000003a27950 c000000003a27948 0000000000000027
GPR08: c000000c18c07c10 0000000000000001 0000000000000027 c000000002ac8a08
GPR12: 0000000000000000 c000000002ff0000 c00000000019cc88 c000000003042300
GPR16: 0000000000000000 0000000000000000 0000000000000000 c000000003071ab0
GPR20: c00000000349f80d c000000003215440 c000000003215480 61c8864680b583eb
GPR24: 0000000000000000 000000007fffffff 0800000020000000 0000000000000010
GPR28: 0000000000020000 0000800000020000 c00000000c5dc800 c00000000c5dc880
NIP [c00000000005f6d4] __iommu_free+0x1e4/0x1f0
LR [c00000000005f6d0] __iommu_free+0x1e0/0x1f0
Call Trace:
[c000000003a27b30] [c00000000005f6d0] __iommu_free+0x1e0/0x1f0 (unreliable)
[c000000003a27bc0] [c00000000005f848] iommu_free+0x28/0x70
[c000000003a27bf0] [c000000000061518] iommu_free_coherent+0x68/0xa0
[c000000003a27c20] [c00000000005e8d4] dma_iommu_free_coherent+0x24/0x40
[c000000003a27c40] [c00000000024698c] dma_free_attrs+0x10c/0x140
[c000000003a27c90] [c008000000dcb8d4] mlx5_cmd_cleanup+0x5c/0x90 [mlx5_core]
[c000000003a27cc0] [c008000000dc45a0] mlx5_mdev_uninit+0xc8/0x100 [mlx5_core]
[c000000003a27d00] [c008000000dc4ac4] probe_one+0x3ec/0x530 [mlx5_core]
[c000000003a27d90] [c0000000008c5edc] local_pci_probe+0x6c/0x110
[c000000003a27e10] [c000000000189c98] work_for_cpu_fn+0x38/0x60
[c000000003a27e40] [c00000000018d1d0] process_scheduled_works+0x230/0x4f0
[c000000003a27f10] [c00000000018ff14] worker_thread+0x1e4/0x500
[c000000003a27f90] [c00000000019cdb8] kthread+0x138/0x140
[c000000003a27fe0] [c00000000000df98] start_kernel_thread+0x14/0x18
Code: 481b004d 60000000 e89e0028 3c62ffe0 3863dd20 481b0039 60000000 e89e0038 3c62ffe0 3863dd38 481b0025 60000000 <0fe00000> 4bffff20 60000000 3c4c0142
---[ end trace 0000000000000000 ]---
iommu_free: invalid entry
entry = 0x8000000203d0
dma_addr = 0x8000000203d0000
Table = 0xc00000000c5dc800
bus# = 0x1
size = 0x20000
startOff = 0x800000000000
index = 0x70200016
Fixes: 2ad56efa80db ("powerpc/iommu: Setup a default domain and remove set_platform_dma_ops")
Reported-by: Tasmiya Nalatwad <tasmiya@linux.vnet.ibm.com>
Link: https://lore.kernel.org/r/d06cee81-c47f-9d62-dfc6-4c77b60058db@linux.vnet.ibm.com
Tested-by: Tasmiya Nalatwad <tasmiya@linux.vnet.ibm.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/0-v1-2b52423411b9+164fc-iommu_ppc_defdomain_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-05 10:35:11 -03:00
|
|
|
/* At first attach the ownership is already set */
|
2024-02-15 07:52:32 -06:00
|
|
|
if (!domain)
|
powerpc/iommu: Do not do platform domain attach atctions after probe
POWER throws a splat at boot, it looks like the DMA ops were probably
changed while a driver was attached. Something is still weird about how
power sequences its bootup. Previously this was hidden since the core
iommu code did nothing during probe, now it calls
spapr_tce_platform_iommu_attach_dev().
Make spapr_tce_platform_iommu_attach_dev() do nothing on the probe time
call like it did before.
WARNING: CPU: 0 PID: 8 at arch/powerpc/kernel/iommu.c:407 __iommu_free+0x1e4/0x1f0
Modules linked in: sd_mod t10_pi crc64_rocksoft crc64 sg ibmvfc mlx5_core(+) scsi_transport_fc ibmveth mlxfw psample dm_multipath dm_mirror dm_region_hash dm_log dm_mod fuse
CPU: 0 PID: 8 Comm: kworker/0:0 Not tainted 6.6.0-rc3-next-20230929-auto #1
Hardware name: IBM,9080-HEX POWER10 (raw) 0x800200 0xf000006 of:IBM,FW1030.30 (NH1030_062) hv:phyp pSeries
Workqueue: events work_for_cpu_fn
NIP: c00000000005f6d4 LR: c00000000005f6d0 CTR: 00000000005ca81c
REGS: c000000003a27890 TRAP: 0700 Not tainted (6.6.0-rc3-next-20230929-auto)
MSR: 800000000282b033 <SF,VEC,VSX,EE,FP,ME,IR,DR,RI,LE> CR: 48000824 XER: 00000008
CFAR: c00000000020f738 IRQMASK: 0
GPR00: c00000000005f6d0 c000000003a27b30 c000000001481800 000000000000017
GPR04: 00000000ffff7fff c000000003a27950 c000000003a27948 0000000000000027
GPR08: c000000c18c07c10 0000000000000001 0000000000000027 c000000002ac8a08
GPR12: 0000000000000000 c000000002ff0000 c00000000019cc88 c000000003042300
GPR16: 0000000000000000 0000000000000000 0000000000000000 c000000003071ab0
GPR20: c00000000349f80d c000000003215440 c000000003215480 61c8864680b583eb
GPR24: 0000000000000000 000000007fffffff 0800000020000000 0000000000000010
GPR28: 0000000000020000 0000800000020000 c00000000c5dc800 c00000000c5dc880
NIP [c00000000005f6d4] __iommu_free+0x1e4/0x1f0
LR [c00000000005f6d0] __iommu_free+0x1e0/0x1f0
Call Trace:
[c000000003a27b30] [c00000000005f6d0] __iommu_free+0x1e0/0x1f0 (unreliable)
[c000000003a27bc0] [c00000000005f848] iommu_free+0x28/0x70
[c000000003a27bf0] [c000000000061518] iommu_free_coherent+0x68/0xa0
[c000000003a27c20] [c00000000005e8d4] dma_iommu_free_coherent+0x24/0x40
[c000000003a27c40] [c00000000024698c] dma_free_attrs+0x10c/0x140
[c000000003a27c90] [c008000000dcb8d4] mlx5_cmd_cleanup+0x5c/0x90 [mlx5_core]
[c000000003a27cc0] [c008000000dc45a0] mlx5_mdev_uninit+0xc8/0x100 [mlx5_core]
[c000000003a27d00] [c008000000dc4ac4] probe_one+0x3ec/0x530 [mlx5_core]
[c000000003a27d90] [c0000000008c5edc] local_pci_probe+0x6c/0x110
[c000000003a27e10] [c000000000189c98] work_for_cpu_fn+0x38/0x60
[c000000003a27e40] [c00000000018d1d0] process_scheduled_works+0x230/0x4f0
[c000000003a27f10] [c00000000018ff14] worker_thread+0x1e4/0x500
[c000000003a27f90] [c00000000019cdb8] kthread+0x138/0x140
[c000000003a27fe0] [c00000000000df98] start_kernel_thread+0x14/0x18
Code: 481b004d 60000000 e89e0028 3c62ffe0 3863dd20 481b0039 60000000 e89e0038 3c62ffe0 3863dd38 481b0025 60000000 <0fe00000> 4bffff20 60000000 3c4c0142
---[ end trace 0000000000000000 ]---
iommu_free: invalid entry
entry = 0x8000000203d0
dma_addr = 0x8000000203d0000
Table = 0xc00000000c5dc800
bus# = 0x1
size = 0x20000
startOff = 0x800000000000
index = 0x70200016
Fixes: 2ad56efa80db ("powerpc/iommu: Setup a default domain and remove set_platform_dma_ops")
Reported-by: Tasmiya Nalatwad <tasmiya@linux.vnet.ibm.com>
Link: https://lore.kernel.org/r/d06cee81-c47f-9d62-dfc6-4c77b60058db@linux.vnet.ibm.com
Tested-by: Tasmiya Nalatwad <tasmiya@linux.vnet.ibm.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/0-v1-2b52423411b9+164fc-iommu_ppc_defdomain_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2023-10-05 10:35:11 -03:00
|
|
|
return 0;
|
|
|
|
|
2024-02-15 07:52:32 -06:00
|
|
|
grp = iommu_group_get(dev);
|
2023-03-06 11:31:00 -06:00
|
|
|
table_group = iommu_group_get_iommudata(grp);
|
2024-01-26 09:09:18 -06:00
|
|
|
/*
|
|
|
|
* The domain being set to PLATFORM from earlier
|
|
|
|
* BLOCKED. The table_group ownership has to be released.
|
|
|
|
*/
|
powerpc/iommu: Reimplement the iommu_table_group_ops for pSeries
PPC64 IOMMU API defines iommu_table_group_ops which handles DMA
windows for PEs, their ownership transfer, create/set/unset the TCE
tables for the Dynamic DMA wundows(DDW). VFIOS uses these APIs for
support on POWER.
The commit 9d67c9433509 ("powerpc/iommu: Add "borrowing"
iommu_table_group_ops") implemented partial support for this API with
"borrow" mechanism wherein the DMA windows if created already by the
host driver, they would be available for VFIO to use. Also, it didn't
have the support to control/modify the window size or the IO page
size.
The current patch implements all the necessary iommu_table_group_ops
APIs there by avoiding the "borrrowing". So, just the way it is on the
PowerNV platform, with this patch the iommu table group ownership is
transferred to the VFIO PPC subdriver, the iommu table, DMA windows
creation/deletion all driven through the APIs.
The pSeries uses the query-pe-dma-window, create-pe-dma-window and
reset-pe-dma-window RTAS calls for DMA window creation, deletion and
reset to defaul. The RTAs calls do show some minor differences to the
way things are to be handled on the pSeries which are listed below.
* On pSeries, the default DMA window size is "fixed" cannot be custom
sized as requested by the user. For non-SRIOV VFs, It is fixed at 2GB
and for SRIOV VFs, its variable sized based on the capacity assigned
to it during the VF assignment to the LPAR. So, for the default DMA
window alone the size if requested less than tce32_size, the smaller
size is enforced using the iommu table->it_size.
* The DMA start address for 32-bit window is 0, and for the 64-bit
window in case of PowerNV is hardcoded to TVE select (bit 59) at 512PiB
offset. This address is returned at the time of create_table() API call
(even before the window is created), the subsequent set_window() call
actually opens the DMA window. On pSeries, the DMA start address for
32-bit window is known from the 'ibm,dma-window' DT property. However,
the 64-bit window start address is not known until the create-pe-dma
RTAS call is made. So, the create_table() which returns the DMA window
start address actually opens the DMA window and returns the DMA start
address as returned by the Hypervisor for the create-pe-dma RTAS call.
* The reset-pe-dma RTAS call resets the DMA windows and restores the
default DMA window, however it does not clear the TCE table entries
if there are any. In case of ownership transfer from platform domain
which used direct mapping, the patch chooses remove-pe-dma instead of
reset-pe for the 64-bit window intentionally so that the
clear_dma_window() is called.
Other than the DMA window management changes mentioned above, the
patch also brings back the userspace view for the single level TCE
as it existed before commit 090bad39b237a ("powerpc/powernv: Add
indirect levels to it_userspace") along with the relavent
refactoring.
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/171923275958.1397.907964437142542242.stgit@linux.ibm.com
2024-06-24 12:39:23 +00:00
|
|
|
table_group->ops->release_ownership(table_group, dev);
|
2023-03-06 11:31:00 -06:00
|
|
|
iommu_group_put(grp);
|
|
|
|
|
2024-01-26 09:09:18 -06:00
|
|
|
return 0;
|
2023-03-06 11:31:00 -06:00
|
|
|
}
|
|
|
|
|
2023-09-13 10:43:36 -03:00
|
|
|
static const struct iommu_domain_ops spapr_tce_platform_domain_ops = {
|
|
|
|
.attach_dev = spapr_tce_platform_iommu_attach_dev,
|
|
|
|
};
|
2023-03-06 11:31:00 -06:00
|
|
|
|
2023-09-13 10:43:36 -03:00
|
|
|
static struct iommu_domain spapr_tce_platform_domain = {
|
|
|
|
.type = IOMMU_DOMAIN_PLATFORM,
|
|
|
|
.ops = &spapr_tce_platform_domain_ops,
|
|
|
|
};
|
2023-03-06 11:31:00 -06:00
|
|
|
|
2024-01-26 09:09:18 -06:00
|
|
|
static int
|
|
|
|
spapr_tce_blocked_iommu_attach_dev(struct iommu_domain *platform_domain,
|
|
|
|
struct device *dev)
|
|
|
|
{
|
|
|
|
struct iommu_group *grp = iommu_group_get(dev);
|
|
|
|
struct iommu_table_group *table_group;
|
|
|
|
int ret = -EINVAL;
|
|
|
|
|
2023-09-13 10:43:36 -03:00
|
|
|
/*
|
|
|
|
* FIXME: SPAPR mixes blocked and platform behaviors, the blocked domain
|
|
|
|
* also sets the dma_api ops
|
|
|
|
*/
|
2024-01-26 09:09:18 -06:00
|
|
|
table_group = iommu_group_get_iommudata(grp);
|
powerpc/iommu: Reimplement the iommu_table_group_ops for pSeries
PPC64 IOMMU API defines iommu_table_group_ops which handles DMA
windows for PEs, their ownership transfer, create/set/unset the TCE
tables for the Dynamic DMA wundows(DDW). VFIOS uses these APIs for
support on POWER.
The commit 9d67c9433509 ("powerpc/iommu: Add "borrowing"
iommu_table_group_ops") implemented partial support for this API with
"borrow" mechanism wherein the DMA windows if created already by the
host driver, they would be available for VFIO to use. Also, it didn't
have the support to control/modify the window size or the IO page
size.
The current patch implements all the necessary iommu_table_group_ops
APIs there by avoiding the "borrrowing". So, just the way it is on the
PowerNV platform, with this patch the iommu table group ownership is
transferred to the VFIO PPC subdriver, the iommu table, DMA windows
creation/deletion all driven through the APIs.
The pSeries uses the query-pe-dma-window, create-pe-dma-window and
reset-pe-dma-window RTAS calls for DMA window creation, deletion and
reset to defaul. The RTAs calls do show some minor differences to the
way things are to be handled on the pSeries which are listed below.
* On pSeries, the default DMA window size is "fixed" cannot be custom
sized as requested by the user. For non-SRIOV VFs, It is fixed at 2GB
and for SRIOV VFs, its variable sized based on the capacity assigned
to it during the VF assignment to the LPAR. So, for the default DMA
window alone the size if requested less than tce32_size, the smaller
size is enforced using the iommu table->it_size.
* The DMA start address for 32-bit window is 0, and for the 64-bit
window in case of PowerNV is hardcoded to TVE select (bit 59) at 512PiB
offset. This address is returned at the time of create_table() API call
(even before the window is created), the subsequent set_window() call
actually opens the DMA window. On pSeries, the DMA start address for
32-bit window is known from the 'ibm,dma-window' DT property. However,
the 64-bit window start address is not known until the create-pe-dma
RTAS call is made. So, the create_table() which returns the DMA window
start address actually opens the DMA window and returns the DMA start
address as returned by the Hypervisor for the create-pe-dma RTAS call.
* The reset-pe-dma RTAS call resets the DMA windows and restores the
default DMA window, however it does not clear the TCE table entries
if there are any. In case of ownership transfer from platform domain
which used direct mapping, the patch chooses remove-pe-dma instead of
reset-pe for the 64-bit window intentionally so that the
clear_dma_window() is called.
Other than the DMA window management changes mentioned above, the
patch also brings back the userspace view for the single level TCE
as it existed before commit 090bad39b237a ("powerpc/powernv: Add
indirect levels to it_userspace") along with the relavent
refactoring.
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/171923275958.1397.907964437142542242.stgit@linux.ibm.com
2024-06-24 12:39:23 +00:00
|
|
|
ret = table_group->ops->take_ownership(table_group, dev);
|
2024-01-26 09:09:18 -06:00
|
|
|
iommu_group_put(grp);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct iommu_domain_ops spapr_tce_blocked_domain_ops = {
|
|
|
|
.attach_dev = spapr_tce_blocked_iommu_attach_dev,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct iommu_domain spapr_tce_blocked_domain = {
|
|
|
|
.type = IOMMU_DOMAIN_BLOCKED,
|
|
|
|
.ops = &spapr_tce_blocked_domain_ops,
|
2023-03-06 11:31:00 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap)
|
|
|
|
{
|
|
|
|
switch (cap) {
|
|
|
|
case IOMMU_CAP_CACHE_COHERENCY:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct iommu_device *spapr_tce_iommu_probe_device(struct device *dev)
|
|
|
|
{
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
struct pci_controller *hose;
|
|
|
|
|
|
|
|
if (!dev_is_pci(dev))
|
powerpc/pseries/iommu: DLPAR add doesn't completely initialize pci_controller
When a PCI device is dynamically added, the kernel oopses with a NULL
pointer dereference:
BUG: Kernel NULL pointer dereference on read at 0x00000030
Faulting instruction address: 0xc0000000006bbe5c
Oops: Kernel access of bad area, sig: 11 [#1]
LE PAGE_SIZE=64K MMU=Radix SMP NR_CPUS=2048 NUMA pSeries
Modules linked in: rpadlpar_io rpaphp rpcsec_gss_krb5 auth_rpcgss nfsv4 dns_resolver nfs lockd grace fscache netfs xsk_diag bonding nft_compat nf_tables nfnetlink rfkill binfmt_misc dm_multipath rpcrdma sunrpc rdma_ucm ib_srpt ib_isert iscsi_target_mod target_core_mod ib_umad ib_iser libiscsi scsi_transport_iscsi ib_ipoib rdma_cm iw_cm ib_cm mlx5_ib ib_uverbs ib_core pseries_rng drm drm_panel_orientation_quirks xfs libcrc32c mlx5_core mlxfw sd_mod t10_pi sg tls ibmvscsi ibmveth scsi_transport_srp vmx_crypto pseries_wdt psample dm_mirror dm_region_hash dm_log dm_mod fuse
CPU: 17 PID: 2685 Comm: drmgr Not tainted 6.7.0-203405+ #66
Hardware name: IBM,9080-HEX POWER10 (raw) 0x800200 0xf000006 of:IBM,FW1060.00 (NH1060_008) hv:phyp pSeries
NIP: c0000000006bbe5c LR: c000000000a13e68 CTR: c0000000000579f8
REGS: c00000009924f240 TRAP: 0300 Not tainted (6.7.0-203405+)
MSR: 8000000000009033 <SF,EE,ME,IR,DR,RI,LE> CR: 24002220 XER: 20040006
CFAR: c000000000a13e64 DAR: 0000000000000030 DSISR: 40000000 IRQMASK: 0
...
NIP sysfs_add_link_to_group+0x34/0x94
LR iommu_device_link+0x5c/0x118
Call Trace:
iommu_init_device+0x26c/0x318 (unreliable)
iommu_device_link+0x5c/0x118
iommu_init_device+0xa8/0x318
iommu_probe_device+0xc0/0x134
iommu_bus_notifier+0x44/0x104
notifier_call_chain+0xb8/0x19c
blocking_notifier_call_chain+0x64/0x98
bus_notify+0x50/0x7c
device_add+0x640/0x918
pci_device_add+0x23c/0x298
of_create_pci_dev+0x400/0x884
of_scan_pci_dev+0x124/0x1b0
__of_scan_bus+0x78/0x18c
pcibios_scan_phb+0x2a4/0x3b0
init_phb_dynamic+0xb8/0x110
dlpar_add_slot+0x170/0x3b8 [rpadlpar_io]
add_slot_store.part.0+0xb4/0x130 [rpadlpar_io]
kobj_attr_store+0x2c/0x48
sysfs_kf_write+0x64/0x78
kernfs_fop_write_iter+0x1b0/0x290
vfs_write+0x350/0x4a0
ksys_write+0x84/0x140
system_call_exception+0x124/0x330
system_call_vectored_common+0x15c/0x2ec
Commit a940904443e4 ("powerpc/iommu: Add iommu_ops to report capabilities
and allow blocking domains") broke DLPAR add of PCI devices.
The above added iommu_device structure to pci_controller. During
system boot, PCI devices are discovered and this newly added iommu_device
structure is initialized by a call to iommu_device_register().
During DLPAR add of a PCI device, a new pci_controller structure is
allocated but there are no calls made to iommu_device_register()
interface.
Fix is to register the iommu device during DLPAR add as well.
Fixes: a940904443e4 ("powerpc/iommu: Add iommu_ops to report capabilities and allow blocking domains")
Signed-off-by: Gaurav Batra <gbatra@linux.ibm.com>
Reviewed-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20240215221833.4817-1-gbatra@linux.ibm.com
2024-02-15 16:18:33 -06:00
|
|
|
return ERR_PTR(-ENODEV);
|
2023-03-06 11:31:00 -06:00
|
|
|
|
|
|
|
pdev = to_pci_dev(dev);
|
|
|
|
hose = pdev->bus->sysdata;
|
|
|
|
|
|
|
|
return &hose->iommu;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spapr_tce_iommu_release_device(struct device *dev)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct iommu_group *spapr_tce_iommu_device_group(struct device *dev)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose;
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
|
|
|
|
pdev = to_pci_dev(dev);
|
|
|
|
hose = pdev->bus->sysdata;
|
|
|
|
|
|
|
|
if (!hose->controller_ops.device_group)
|
|
|
|
return ERR_PTR(-ENOENT);
|
|
|
|
|
|
|
|
return hose->controller_ops.device_group(hose, pdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct iommu_ops spapr_tce_iommu_ops = {
|
2023-09-13 10:43:36 -03:00
|
|
|
.default_domain = &spapr_tce_platform_domain,
|
2023-09-27 20:47:31 -03:00
|
|
|
.blocked_domain = &spapr_tce_blocked_domain,
|
2023-03-06 11:31:00 -06:00
|
|
|
.capable = spapr_tce_iommu_capable,
|
|
|
|
.probe_device = spapr_tce_iommu_probe_device,
|
|
|
|
.release_device = spapr_tce_iommu_release_device,
|
|
|
|
.device_group = spapr_tce_iommu_device_group,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute *spapr_tce_iommu_attrs[] = {
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group spapr_tce_iommu_group = {
|
|
|
|
.name = "spapr-tce-iommu",
|
|
|
|
.attrs = spapr_tce_iommu_attrs,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct attribute_group *spapr_tce_iommu_groups[] = {
|
|
|
|
&spapr_tce_iommu_group,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
powerpc/pseries/iommu: DLPAR add doesn't completely initialize pci_controller
When a PCI device is dynamically added, the kernel oopses with a NULL
pointer dereference:
BUG: Kernel NULL pointer dereference on read at 0x00000030
Faulting instruction address: 0xc0000000006bbe5c
Oops: Kernel access of bad area, sig: 11 [#1]
LE PAGE_SIZE=64K MMU=Radix SMP NR_CPUS=2048 NUMA pSeries
Modules linked in: rpadlpar_io rpaphp rpcsec_gss_krb5 auth_rpcgss nfsv4 dns_resolver nfs lockd grace fscache netfs xsk_diag bonding nft_compat nf_tables nfnetlink rfkill binfmt_misc dm_multipath rpcrdma sunrpc rdma_ucm ib_srpt ib_isert iscsi_target_mod target_core_mod ib_umad ib_iser libiscsi scsi_transport_iscsi ib_ipoib rdma_cm iw_cm ib_cm mlx5_ib ib_uverbs ib_core pseries_rng drm drm_panel_orientation_quirks xfs libcrc32c mlx5_core mlxfw sd_mod t10_pi sg tls ibmvscsi ibmveth scsi_transport_srp vmx_crypto pseries_wdt psample dm_mirror dm_region_hash dm_log dm_mod fuse
CPU: 17 PID: 2685 Comm: drmgr Not tainted 6.7.0-203405+ #66
Hardware name: IBM,9080-HEX POWER10 (raw) 0x800200 0xf000006 of:IBM,FW1060.00 (NH1060_008) hv:phyp pSeries
NIP: c0000000006bbe5c LR: c000000000a13e68 CTR: c0000000000579f8
REGS: c00000009924f240 TRAP: 0300 Not tainted (6.7.0-203405+)
MSR: 8000000000009033 <SF,EE,ME,IR,DR,RI,LE> CR: 24002220 XER: 20040006
CFAR: c000000000a13e64 DAR: 0000000000000030 DSISR: 40000000 IRQMASK: 0
...
NIP sysfs_add_link_to_group+0x34/0x94
LR iommu_device_link+0x5c/0x118
Call Trace:
iommu_init_device+0x26c/0x318 (unreliable)
iommu_device_link+0x5c/0x118
iommu_init_device+0xa8/0x318
iommu_probe_device+0xc0/0x134
iommu_bus_notifier+0x44/0x104
notifier_call_chain+0xb8/0x19c
blocking_notifier_call_chain+0x64/0x98
bus_notify+0x50/0x7c
device_add+0x640/0x918
pci_device_add+0x23c/0x298
of_create_pci_dev+0x400/0x884
of_scan_pci_dev+0x124/0x1b0
__of_scan_bus+0x78/0x18c
pcibios_scan_phb+0x2a4/0x3b0
init_phb_dynamic+0xb8/0x110
dlpar_add_slot+0x170/0x3b8 [rpadlpar_io]
add_slot_store.part.0+0xb4/0x130 [rpadlpar_io]
kobj_attr_store+0x2c/0x48
sysfs_kf_write+0x64/0x78
kernfs_fop_write_iter+0x1b0/0x290
vfs_write+0x350/0x4a0
ksys_write+0x84/0x140
system_call_exception+0x124/0x330
system_call_vectored_common+0x15c/0x2ec
Commit a940904443e4 ("powerpc/iommu: Add iommu_ops to report capabilities
and allow blocking domains") broke DLPAR add of PCI devices.
The above added iommu_device structure to pci_controller. During
system boot, PCI devices are discovered and this newly added iommu_device
structure is initialized by a call to iommu_device_register().
During DLPAR add of a PCI device, a new pci_controller structure is
allocated but there are no calls made to iommu_device_register()
interface.
Fix is to register the iommu device during DLPAR add as well.
Fixes: a940904443e4 ("powerpc/iommu: Add iommu_ops to report capabilities and allow blocking domains")
Signed-off-by: Gaurav Batra <gbatra@linux.ibm.com>
Reviewed-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20240215221833.4817-1-gbatra@linux.ibm.com
2024-02-15 16:18:33 -06:00
|
|
|
void ppc_iommu_register_device(struct pci_controller *phb)
|
|
|
|
{
|
|
|
|
iommu_device_sysfs_add(&phb->iommu, phb->parent,
|
|
|
|
spapr_tce_iommu_groups, "iommu-phb%04x",
|
|
|
|
phb->global_number);
|
|
|
|
iommu_device_register(&phb->iommu, &spapr_tce_iommu_ops,
|
|
|
|
phb->parent);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ppc_iommu_unregister_device(struct pci_controller *phb)
|
|
|
|
{
|
|
|
|
iommu_device_unregister(&phb->iommu);
|
|
|
|
iommu_device_sysfs_remove(&phb->iommu);
|
|
|
|
}
|
|
|
|
|
2023-03-06 11:31:00 -06:00
|
|
|
/*
|
|
|
|
* This registers IOMMU devices of PHBs. This needs to happen
|
|
|
|
* after core_initcall(iommu_init) + postcore_initcall(pci_driver_init) and
|
|
|
|
* before subsys_initcall(iommu_subsys_init).
|
|
|
|
*/
|
|
|
|
static int __init spapr_tce_setup_phb_iommus_initcall(void)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose;
|
|
|
|
|
|
|
|
list_for_each_entry(hose, &hose_list, list_node) {
|
powerpc/pseries/iommu: DLPAR add doesn't completely initialize pci_controller
When a PCI device is dynamically added, the kernel oopses with a NULL
pointer dereference:
BUG: Kernel NULL pointer dereference on read at 0x00000030
Faulting instruction address: 0xc0000000006bbe5c
Oops: Kernel access of bad area, sig: 11 [#1]
LE PAGE_SIZE=64K MMU=Radix SMP NR_CPUS=2048 NUMA pSeries
Modules linked in: rpadlpar_io rpaphp rpcsec_gss_krb5 auth_rpcgss nfsv4 dns_resolver nfs lockd grace fscache netfs xsk_diag bonding nft_compat nf_tables nfnetlink rfkill binfmt_misc dm_multipath rpcrdma sunrpc rdma_ucm ib_srpt ib_isert iscsi_target_mod target_core_mod ib_umad ib_iser libiscsi scsi_transport_iscsi ib_ipoib rdma_cm iw_cm ib_cm mlx5_ib ib_uverbs ib_core pseries_rng drm drm_panel_orientation_quirks xfs libcrc32c mlx5_core mlxfw sd_mod t10_pi sg tls ibmvscsi ibmveth scsi_transport_srp vmx_crypto pseries_wdt psample dm_mirror dm_region_hash dm_log dm_mod fuse
CPU: 17 PID: 2685 Comm: drmgr Not tainted 6.7.0-203405+ #66
Hardware name: IBM,9080-HEX POWER10 (raw) 0x800200 0xf000006 of:IBM,FW1060.00 (NH1060_008) hv:phyp pSeries
NIP: c0000000006bbe5c LR: c000000000a13e68 CTR: c0000000000579f8
REGS: c00000009924f240 TRAP: 0300 Not tainted (6.7.0-203405+)
MSR: 8000000000009033 <SF,EE,ME,IR,DR,RI,LE> CR: 24002220 XER: 20040006
CFAR: c000000000a13e64 DAR: 0000000000000030 DSISR: 40000000 IRQMASK: 0
...
NIP sysfs_add_link_to_group+0x34/0x94
LR iommu_device_link+0x5c/0x118
Call Trace:
iommu_init_device+0x26c/0x318 (unreliable)
iommu_device_link+0x5c/0x118
iommu_init_device+0xa8/0x318
iommu_probe_device+0xc0/0x134
iommu_bus_notifier+0x44/0x104
notifier_call_chain+0xb8/0x19c
blocking_notifier_call_chain+0x64/0x98
bus_notify+0x50/0x7c
device_add+0x640/0x918
pci_device_add+0x23c/0x298
of_create_pci_dev+0x400/0x884
of_scan_pci_dev+0x124/0x1b0
__of_scan_bus+0x78/0x18c
pcibios_scan_phb+0x2a4/0x3b0
init_phb_dynamic+0xb8/0x110
dlpar_add_slot+0x170/0x3b8 [rpadlpar_io]
add_slot_store.part.0+0xb4/0x130 [rpadlpar_io]
kobj_attr_store+0x2c/0x48
sysfs_kf_write+0x64/0x78
kernfs_fop_write_iter+0x1b0/0x290
vfs_write+0x350/0x4a0
ksys_write+0x84/0x140
system_call_exception+0x124/0x330
system_call_vectored_common+0x15c/0x2ec
Commit a940904443e4 ("powerpc/iommu: Add iommu_ops to report capabilities
and allow blocking domains") broke DLPAR add of PCI devices.
The above added iommu_device structure to pci_controller. During
system boot, PCI devices are discovered and this newly added iommu_device
structure is initialized by a call to iommu_device_register().
During DLPAR add of a PCI device, a new pci_controller structure is
allocated but there are no calls made to iommu_device_register()
interface.
Fix is to register the iommu device during DLPAR add as well.
Fixes: a940904443e4 ("powerpc/iommu: Add iommu_ops to report capabilities and allow blocking domains")
Signed-off-by: Gaurav Batra <gbatra@linux.ibm.com>
Reviewed-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20240215221833.4817-1-gbatra@linux.ibm.com
2024-02-15 16:18:33 -06:00
|
|
|
ppc_iommu_register_device(hose);
|
2023-03-06 11:31:00 -06:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
postcore_initcall_sync(spapr_tce_setup_phb_iommus_initcall);
|
2023-06-05 13:48:56 -05:00
|
|
|
#endif
|
2023-03-06 11:31:00 -06:00
|
|
|
|
2013-05-21 13:33:09 +10:00
|
|
|
#endif /* CONFIG_IOMMU_API */
|