mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
powerpc/iommu: Update the generic code to use dynamic iommu page sizes
This patch updates the generic iommu backend code to use the it_page_shift field to determine the iommu page size instead of using hardcoded values. Signed-off-by: Alistair Popple <alistair@popple.id.au> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:
parent
3a553170d3
commit
d084775738
6 changed files with 88 additions and 65 deletions
|
@ -35,17 +35,14 @@
|
||||||
#define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
|
#define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
|
||||||
#define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K)
|
#define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K)
|
||||||
|
|
||||||
|
#define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift)
|
||||||
|
#define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1))
|
||||||
|
#define IOMMU_PAGE_ALIGN(addr, tblptr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE(tblptr))
|
||||||
|
|
||||||
/* Boot time flags */
|
/* Boot time flags */
|
||||||
extern int iommu_is_off;
|
extern int iommu_is_off;
|
||||||
extern int iommu_force_on;
|
extern int iommu_force_on;
|
||||||
|
|
||||||
/* Pure 2^n version of get_order */
|
|
||||||
static __inline__ __attribute_const__ int get_iommu_order(unsigned long size)
|
|
||||||
{
|
|
||||||
return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT_4K) + 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* IOMAP_MAX_ORDER defines the largest contiguous block
|
* IOMAP_MAX_ORDER defines the largest contiguous block
|
||||||
* of dma space we can get. IOMAP_MAX_ORDER = 13
|
* of dma space we can get. IOMAP_MAX_ORDER = 13
|
||||||
|
@ -82,6 +79,14 @@ struct iommu_table {
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Pure 2^n version of get_order */
|
||||||
|
static inline __attribute_const__
|
||||||
|
int get_iommu_order(unsigned long size, struct iommu_table *tbl)
|
||||||
|
{
|
||||||
|
return __ilog2((size - 1) >> tbl->it_page_shift) + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
struct scatterlist;
|
struct scatterlist;
|
||||||
|
|
||||||
static inline void set_iommu_table_base(struct device *dev, void *base)
|
static inline void set_iommu_table_base(struct device *dev, void *base)
|
||||||
|
|
|
@ -83,10 +83,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT_4K)) {
|
if (tbl->it_offset > (mask >> tbl->it_page_shift)) {
|
||||||
dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
|
dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
|
||||||
dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
|
dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
|
||||||
mask, tbl->it_offset << IOMMU_PAGE_SHIFT_4K);
|
mask, tbl->it_offset << tbl->it_page_shift);
|
||||||
return 0;
|
return 0;
|
||||||
} else
|
} else
|
||||||
return 1;
|
return 1;
|
||||||
|
|
|
@ -251,14 +251,13 @@ again:
|
||||||
|
|
||||||
if (dev)
|
if (dev)
|
||||||
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
|
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
|
||||||
1 << IOMMU_PAGE_SHIFT_4K);
|
1 << tbl->it_page_shift);
|
||||||
else
|
else
|
||||||
boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT_4K);
|
boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
|
||||||
/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
|
/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
|
||||||
|
|
||||||
n = iommu_area_alloc(tbl->it_map, limit, start, npages,
|
n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
|
||||||
tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT_4K,
|
boundary_size >> tbl->it_page_shift, align_mask);
|
||||||
align_mask);
|
|
||||||
if (n == -1) {
|
if (n == -1) {
|
||||||
if (likely(pass == 0)) {
|
if (likely(pass == 0)) {
|
||||||
/* First try the pool from the start */
|
/* First try the pool from the start */
|
||||||
|
@ -320,12 +319,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
|
||||||
return DMA_ERROR_CODE;
|
return DMA_ERROR_CODE;
|
||||||
|
|
||||||
entry += tbl->it_offset; /* Offset into real TCE table */
|
entry += tbl->it_offset; /* Offset into real TCE table */
|
||||||
ret = entry << IOMMU_PAGE_SHIFT_4K; /* Set the return dma address */
|
ret = entry << tbl->it_page_shift; /* Set the return dma address */
|
||||||
|
|
||||||
/* Put the TCEs in the HW table */
|
/* Put the TCEs in the HW table */
|
||||||
build_fail = ppc_md.tce_build(tbl, entry, npages,
|
build_fail = ppc_md.tce_build(tbl, entry, npages,
|
||||||
(unsigned long)page & IOMMU_PAGE_MASK_4K,
|
(unsigned long)page &
|
||||||
direction, attrs);
|
IOMMU_PAGE_MASK(tbl), direction, attrs);
|
||||||
|
|
||||||
/* ppc_md.tce_build() only returns non-zero for transient errors.
|
/* ppc_md.tce_build() only returns non-zero for transient errors.
|
||||||
* Clean up the table bitmap in this case and return
|
* Clean up the table bitmap in this case and return
|
||||||
|
@ -352,7 +351,7 @@ static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
|
||||||
{
|
{
|
||||||
unsigned long entry, free_entry;
|
unsigned long entry, free_entry;
|
||||||
|
|
||||||
entry = dma_addr >> IOMMU_PAGE_SHIFT_4K;
|
entry = dma_addr >> tbl->it_page_shift;
|
||||||
free_entry = entry - tbl->it_offset;
|
free_entry = entry - tbl->it_offset;
|
||||||
|
|
||||||
if (((free_entry + npages) > tbl->it_size) ||
|
if (((free_entry + npages) > tbl->it_size) ||
|
||||||
|
@ -401,7 +400,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct iommu_pool *pool;
|
struct iommu_pool *pool;
|
||||||
|
|
||||||
entry = dma_addr >> IOMMU_PAGE_SHIFT_4K;
|
entry = dma_addr >> tbl->it_page_shift;
|
||||||
free_entry = entry - tbl->it_offset;
|
free_entry = entry - tbl->it_offset;
|
||||||
|
|
||||||
pool = get_pool(tbl, free_entry);
|
pool = get_pool(tbl, free_entry);
|
||||||
|
@ -468,13 +467,13 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
||||||
}
|
}
|
||||||
/* Allocate iommu entries for that segment */
|
/* Allocate iommu entries for that segment */
|
||||||
vaddr = (unsigned long) sg_virt(s);
|
vaddr = (unsigned long) sg_virt(s);
|
||||||
npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE_4K);
|
npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
|
||||||
align = 0;
|
align = 0;
|
||||||
if (IOMMU_PAGE_SHIFT_4K < PAGE_SHIFT && slen >= PAGE_SIZE &&
|
if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
|
||||||
(vaddr & ~PAGE_MASK) == 0)
|
(vaddr & ~PAGE_MASK) == 0)
|
||||||
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT_4K;
|
align = PAGE_SHIFT - tbl->it_page_shift;
|
||||||
entry = iommu_range_alloc(dev, tbl, npages, &handle,
|
entry = iommu_range_alloc(dev, tbl, npages, &handle,
|
||||||
mask >> IOMMU_PAGE_SHIFT_4K, align);
|
mask >> tbl->it_page_shift, align);
|
||||||
|
|
||||||
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
|
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
|
||||||
|
|
||||||
|
@ -489,16 +488,16 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
||||||
|
|
||||||
/* Convert entry to a dma_addr_t */
|
/* Convert entry to a dma_addr_t */
|
||||||
entry += tbl->it_offset;
|
entry += tbl->it_offset;
|
||||||
dma_addr = entry << IOMMU_PAGE_SHIFT_4K;
|
dma_addr = entry << tbl->it_page_shift;
|
||||||
dma_addr |= (s->offset & ~IOMMU_PAGE_MASK_4K);
|
dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
|
||||||
|
|
||||||
DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
|
DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
|
||||||
npages, entry, dma_addr);
|
npages, entry, dma_addr);
|
||||||
|
|
||||||
/* Insert into HW table */
|
/* Insert into HW table */
|
||||||
build_fail = ppc_md.tce_build(tbl, entry, npages,
|
build_fail = ppc_md.tce_build(tbl, entry, npages,
|
||||||
vaddr & IOMMU_PAGE_MASK_4K,
|
vaddr & IOMMU_PAGE_MASK(tbl),
|
||||||
direction, attrs);
|
direction, attrs);
|
||||||
if(unlikely(build_fail))
|
if(unlikely(build_fail))
|
||||||
goto failure;
|
goto failure;
|
||||||
|
|
||||||
|
@ -559,9 +558,9 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
||||||
if (s->dma_length != 0) {
|
if (s->dma_length != 0) {
|
||||||
unsigned long vaddr, npages;
|
unsigned long vaddr, npages;
|
||||||
|
|
||||||
vaddr = s->dma_address & IOMMU_PAGE_MASK_4K;
|
vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
|
||||||
npages = iommu_num_pages(s->dma_address, s->dma_length,
|
npages = iommu_num_pages(s->dma_address, s->dma_length,
|
||||||
IOMMU_PAGE_SIZE_4K);
|
IOMMU_PAGE_SIZE(tbl));
|
||||||
__iommu_free(tbl, vaddr, npages);
|
__iommu_free(tbl, vaddr, npages);
|
||||||
s->dma_address = DMA_ERROR_CODE;
|
s->dma_address = DMA_ERROR_CODE;
|
||||||
s->dma_length = 0;
|
s->dma_length = 0;
|
||||||
|
@ -592,7 +591,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
||||||
if (sg->dma_length == 0)
|
if (sg->dma_length == 0)
|
||||||
break;
|
break;
|
||||||
npages = iommu_num_pages(dma_handle, sg->dma_length,
|
npages = iommu_num_pages(dma_handle, sg->dma_length,
|
||||||
IOMMU_PAGE_SIZE_4K);
|
IOMMU_PAGE_SIZE(tbl));
|
||||||
__iommu_free(tbl, dma_handle, npages);
|
__iommu_free(tbl, dma_handle, npages);
|
||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
@ -676,7 +675,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
|
||||||
set_bit(0, tbl->it_map);
|
set_bit(0, tbl->it_map);
|
||||||
|
|
||||||
/* We only split the IOMMU table if we have 1GB or more of space */
|
/* We only split the IOMMU table if we have 1GB or more of space */
|
||||||
if ((tbl->it_size << IOMMU_PAGE_SHIFT_4K) >= (1UL * 1024 * 1024 * 1024))
|
if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
|
||||||
tbl->nr_pools = IOMMU_NR_POOLS;
|
tbl->nr_pools = IOMMU_NR_POOLS;
|
||||||
else
|
else
|
||||||
tbl->nr_pools = 1;
|
tbl->nr_pools = 1;
|
||||||
|
@ -768,16 +767,16 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
|
||||||
|
|
||||||
vaddr = page_address(page) + offset;
|
vaddr = page_address(page) + offset;
|
||||||
uaddr = (unsigned long)vaddr;
|
uaddr = (unsigned long)vaddr;
|
||||||
npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE_4K);
|
npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
|
||||||
|
|
||||||
if (tbl) {
|
if (tbl) {
|
||||||
align = 0;
|
align = 0;
|
||||||
if (IOMMU_PAGE_SHIFT_4K < PAGE_SHIFT && size >= PAGE_SIZE &&
|
if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
|
||||||
((unsigned long)vaddr & ~PAGE_MASK) == 0)
|
((unsigned long)vaddr & ~PAGE_MASK) == 0)
|
||||||
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT_4K;
|
align = PAGE_SHIFT - tbl->it_page_shift;
|
||||||
|
|
||||||
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
|
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
|
||||||
mask >> IOMMU_PAGE_SHIFT_4K, align,
|
mask >> tbl->it_page_shift, align,
|
||||||
attrs);
|
attrs);
|
||||||
if (dma_handle == DMA_ERROR_CODE) {
|
if (dma_handle == DMA_ERROR_CODE) {
|
||||||
if (printk_ratelimit()) {
|
if (printk_ratelimit()) {
|
||||||
|
@ -786,7 +785,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
|
||||||
npages);
|
npages);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
dma_handle |= (uaddr & ~IOMMU_PAGE_MASK_4K);
|
dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
|
||||||
}
|
}
|
||||||
|
|
||||||
return dma_handle;
|
return dma_handle;
|
||||||
|
@ -801,7 +800,8 @@ void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
|
||||||
BUG_ON(direction == DMA_NONE);
|
BUG_ON(direction == DMA_NONE);
|
||||||
|
|
||||||
if (tbl) {
|
if (tbl) {
|
||||||
npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE_4K);
|
npages = iommu_num_pages(dma_handle, size,
|
||||||
|
IOMMU_PAGE_SIZE(tbl));
|
||||||
iommu_free(tbl, dma_handle, npages);
|
iommu_free(tbl, dma_handle, npages);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -845,10 +845,10 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
|
|
||||||
/* Set up tces to cover the allocated range */
|
/* Set up tces to cover the allocated range */
|
||||||
nio_pages = size >> IOMMU_PAGE_SHIFT_4K;
|
nio_pages = size >> tbl->it_page_shift;
|
||||||
io_order = get_iommu_order(size);
|
io_order = get_iommu_order(size, tbl);
|
||||||
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
|
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
|
||||||
mask >> IOMMU_PAGE_SHIFT_4K, io_order, NULL);
|
mask >> tbl->it_page_shift, io_order, NULL);
|
||||||
if (mapping == DMA_ERROR_CODE) {
|
if (mapping == DMA_ERROR_CODE) {
|
||||||
free_pages((unsigned long)ret, order);
|
free_pages((unsigned long)ret, order);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -864,7 +864,7 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
|
||||||
unsigned int nio_pages;
|
unsigned int nio_pages;
|
||||||
|
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
nio_pages = size >> IOMMU_PAGE_SHIFT_4K;
|
nio_pages = size >> tbl->it_page_shift;
|
||||||
iommu_free(tbl, dma_handle, nio_pages);
|
iommu_free(tbl, dma_handle, nio_pages);
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
free_pages((unsigned long)vaddr, get_order(size));
|
free_pages((unsigned long)vaddr, get_order(size));
|
||||||
|
@ -935,10 +935,10 @@ int iommu_tce_clear_param_check(struct iommu_table *tbl,
|
||||||
if (tce_value)
|
if (tce_value)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (ioba & ~IOMMU_PAGE_MASK_4K)
|
if (ioba & ~IOMMU_PAGE_MASK(tbl))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ioba >>= IOMMU_PAGE_SHIFT_4K;
|
ioba >>= tbl->it_page_shift;
|
||||||
if (ioba < tbl->it_offset)
|
if (ioba < tbl->it_offset)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -955,13 +955,13 @@ int iommu_tce_put_param_check(struct iommu_table *tbl,
|
||||||
if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ)))
|
if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (tce & ~(IOMMU_PAGE_MASK_4K | TCE_PCI_WRITE | TCE_PCI_READ))
|
if (tce & ~(IOMMU_PAGE_MASK(tbl) | TCE_PCI_WRITE | TCE_PCI_READ))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (ioba & ~IOMMU_PAGE_MASK_4K)
|
if (ioba & ~IOMMU_PAGE_MASK(tbl))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ioba >>= IOMMU_PAGE_SHIFT_4K;
|
ioba >>= tbl->it_page_shift;
|
||||||
if (ioba < tbl->it_offset)
|
if (ioba < tbl->it_offset)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -1037,7 +1037,7 @@ int iommu_tce_build(struct iommu_table *tbl, unsigned long entry,
|
||||||
|
|
||||||
/* if (unlikely(ret))
|
/* if (unlikely(ret))
|
||||||
pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
|
pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
|
||||||
__func__, hwaddr, entry << IOMMU_PAGE_SHIFT_4K,
|
__func__, hwaddr, entry << IOMMU_PAGE_SHIFT(tbl),
|
||||||
hwaddr, ret); */
|
hwaddr, ret); */
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1049,14 +1049,14 @@ int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry,
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK_4K & ~PAGE_MASK;
|
unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
|
||||||
enum dma_data_direction direction = iommu_tce_direction(tce);
|
enum dma_data_direction direction = iommu_tce_direction(tce);
|
||||||
|
|
||||||
ret = get_user_pages_fast(tce & PAGE_MASK, 1,
|
ret = get_user_pages_fast(tce & PAGE_MASK, 1,
|
||||||
direction != DMA_TO_DEVICE, &page);
|
direction != DMA_TO_DEVICE, &page);
|
||||||
if (unlikely(ret != 1)) {
|
if (unlikely(ret != 1)) {
|
||||||
/* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n",
|
/* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n",
|
||||||
tce, entry << IOMMU_PAGE_SHIFT_4K, ret); */
|
tce, entry << IOMMU_PAGE_SHIFT(tbl), ret); */
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
hwaddr = (unsigned long) page_address(page) + offset;
|
hwaddr = (unsigned long) page_address(page) + offset;
|
||||||
|
@ -1067,7 +1067,7 @@ int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry,
|
||||||
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n",
|
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n",
|
||||||
__func__, entry << IOMMU_PAGE_SHIFT_4K, tce, ret);
|
__func__, entry << tbl->it_page_shift, tce, ret);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1127,6 +1127,12 @@ int iommu_add_device(struct device *dev)
|
||||||
pr_debug("iommu_tce: adding %s to iommu group %d\n",
|
pr_debug("iommu_tce: adding %s to iommu group %d\n",
|
||||||
dev_name(dev), iommu_group_id(tbl->it_group));
|
dev_name(dev), iommu_group_id(tbl->it_group));
|
||||||
|
|
||||||
|
if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) {
|
||||||
|
pr_err("iommu_tce: unsupported iommu page size.");
|
||||||
|
pr_err("%s has not been added\n", dev_name(dev));
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
ret = iommu_group_add_device(tbl->it_group, dev);
|
ret = iommu_group_add_device(tbl->it_group, dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
pr_err("iommu_tce: %s has not been added, ret=%d\n",
|
pr_err("iommu_tce: %s has not been added, ret=%d\n",
|
||||||
|
|
|
@ -518,16 +518,18 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
struct vio_dev *viodev = to_vio_dev(dev);
|
struct vio_dev *viodev = to_vio_dev(dev);
|
||||||
|
struct iommu_table *tbl;
|
||||||
dma_addr_t ret = DMA_ERROR_CODE;
|
dma_addr_t ret = DMA_ERROR_CODE;
|
||||||
|
|
||||||
if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K))) {
|
tbl = get_iommu_table_base(dev);
|
||||||
|
if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
|
||||||
atomic_inc(&viodev->cmo.allocs_failed);
|
atomic_inc(&viodev->cmo.allocs_failed);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
|
ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
|
||||||
if (unlikely(dma_mapping_error(dev, ret))) {
|
if (unlikely(dma_mapping_error(dev, ret))) {
|
||||||
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K));
|
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
|
||||||
atomic_inc(&viodev->cmo.allocs_failed);
|
atomic_inc(&viodev->cmo.allocs_failed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -540,10 +542,12 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
struct vio_dev *viodev = to_vio_dev(dev);
|
struct vio_dev *viodev = to_vio_dev(dev);
|
||||||
|
struct iommu_table *tbl;
|
||||||
|
|
||||||
|
tbl = get_iommu_table_base(dev);
|
||||||
dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
|
dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
|
||||||
|
|
||||||
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K));
|
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
|
@ -551,12 +555,14 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
struct vio_dev *viodev = to_vio_dev(dev);
|
struct vio_dev *viodev = to_vio_dev(dev);
|
||||||
|
struct iommu_table *tbl;
|
||||||
struct scatterlist *sgl;
|
struct scatterlist *sgl;
|
||||||
int ret, count = 0;
|
int ret, count = 0;
|
||||||
size_t alloc_size = 0;
|
size_t alloc_size = 0;
|
||||||
|
|
||||||
|
tbl = get_iommu_table_base(dev);
|
||||||
for (sgl = sglist; count < nelems; count++, sgl++)
|
for (sgl = sglist; count < nelems; count++, sgl++)
|
||||||
alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE_4K);
|
alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
|
||||||
|
|
||||||
if (vio_cmo_alloc(viodev, alloc_size)) {
|
if (vio_cmo_alloc(viodev, alloc_size)) {
|
||||||
atomic_inc(&viodev->cmo.allocs_failed);
|
atomic_inc(&viodev->cmo.allocs_failed);
|
||||||
|
@ -572,7 +578,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||||
}
|
}
|
||||||
|
|
||||||
for (sgl = sglist, count = 0; count < ret; count++, sgl++)
|
for (sgl = sglist, count = 0; count < ret; count++, sgl++)
|
||||||
alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE_4K);
|
alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
|
||||||
if (alloc_size)
|
if (alloc_size)
|
||||||
vio_cmo_dealloc(viodev, alloc_size);
|
vio_cmo_dealloc(viodev, alloc_size);
|
||||||
|
|
||||||
|
@ -585,12 +591,14 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
struct vio_dev *viodev = to_vio_dev(dev);
|
struct vio_dev *viodev = to_vio_dev(dev);
|
||||||
|
struct iommu_table *tbl;
|
||||||
struct scatterlist *sgl;
|
struct scatterlist *sgl;
|
||||||
size_t alloc_size = 0;
|
size_t alloc_size = 0;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
|
tbl = get_iommu_table_base(dev);
|
||||||
for (sgl = sglist; count < nelems; count++, sgl++)
|
for (sgl = sglist; count < nelems; count++, sgl++)
|
||||||
alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE_4K);
|
alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
|
||||||
|
|
||||||
dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
|
dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
|
||||||
|
|
||||||
|
@ -706,11 +714,14 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
|
||||||
{
|
{
|
||||||
struct vio_cmo_dev_entry *dev_ent;
|
struct vio_cmo_dev_entry *dev_ent;
|
||||||
struct device *dev = &viodev->dev;
|
struct device *dev = &viodev->dev;
|
||||||
|
struct iommu_table *tbl;
|
||||||
struct vio_driver *viodrv = to_vio_driver(dev->driver);
|
struct vio_driver *viodrv = to_vio_driver(dev->driver);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
size_t size;
|
size_t size;
|
||||||
bool dma_capable = false;
|
bool dma_capable = false;
|
||||||
|
|
||||||
|
tbl = get_iommu_table_base(dev);
|
||||||
|
|
||||||
/* A device requires entitlement if it has a DMA window property */
|
/* A device requires entitlement if it has a DMA window property */
|
||||||
switch (viodev->family) {
|
switch (viodev->family) {
|
||||||
case VDEVICE:
|
case VDEVICE:
|
||||||
|
@ -737,7 +748,7 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
|
||||||
}
|
}
|
||||||
|
|
||||||
viodev->cmo.desired =
|
viodev->cmo.desired =
|
||||||
IOMMU_PAGE_ALIGN_4K(viodrv->get_desired_dma(viodev));
|
IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
|
||||||
if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
|
if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
|
||||||
viodev->cmo.desired = VIO_CMO_MIN_ENT;
|
viodev->cmo.desired = VIO_CMO_MIN_ENT;
|
||||||
size = VIO_CMO_MIN_ENT;
|
size = VIO_CMO_MIN_ENT;
|
||||||
|
|
|
@ -762,8 +762,6 @@ static struct notifier_block tce_iommu_bus_nb = {
|
||||||
|
|
||||||
static int __init tce_iommu_bus_notifier_init(void)
|
static int __init tce_iommu_bus_notifier_init(void)
|
||||||
{
|
{
|
||||||
BUILD_BUG_ON(PAGE_SIZE < IOMMU_PAGE_SIZE_4K);
|
|
||||||
|
|
||||||
bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
|
bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1276,31 +1276,34 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
|
||||||
{
|
{
|
||||||
struct net_device *netdev = dev_get_drvdata(&vdev->dev);
|
struct net_device *netdev = dev_get_drvdata(&vdev->dev);
|
||||||
struct ibmveth_adapter *adapter;
|
struct ibmveth_adapter *adapter;
|
||||||
|
struct iommu_table *tbl;
|
||||||
unsigned long ret;
|
unsigned long ret;
|
||||||
int i;
|
int i;
|
||||||
int rxqentries = 1;
|
int rxqentries = 1;
|
||||||
|
|
||||||
|
tbl = get_iommu_table_base(&vdev->dev);
|
||||||
|
|
||||||
/* netdev inits at probe time along with the structures we need below*/
|
/* netdev inits at probe time along with the structures we need below*/
|
||||||
if (netdev == NULL)
|
if (netdev == NULL)
|
||||||
return IOMMU_PAGE_ALIGN_4K(IBMVETH_IO_ENTITLEMENT_DEFAULT);
|
return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
|
||||||
|
|
||||||
adapter = netdev_priv(netdev);
|
adapter = netdev_priv(netdev);
|
||||||
|
|
||||||
ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
|
ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
|
||||||
ret += IOMMU_PAGE_ALIGN_4K(netdev->mtu);
|
ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
|
||||||
|
|
||||||
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
|
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
|
||||||
/* add the size of the active receive buffers */
|
/* add the size of the active receive buffers */
|
||||||
if (adapter->rx_buff_pool[i].active)
|
if (adapter->rx_buff_pool[i].active)
|
||||||
ret +=
|
ret +=
|
||||||
adapter->rx_buff_pool[i].size *
|
adapter->rx_buff_pool[i].size *
|
||||||
IOMMU_PAGE_ALIGN_4K(adapter->rx_buff_pool[i].
|
IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
|
||||||
buff_size);
|
buff_size, tbl);
|
||||||
rxqentries += adapter->rx_buff_pool[i].size;
|
rxqentries += adapter->rx_buff_pool[i].size;
|
||||||
}
|
}
|
||||||
/* add the size of the receive queue entries */
|
/* add the size of the receive queue entries */
|
||||||
ret += IOMMU_PAGE_ALIGN_4K(
|
ret += IOMMU_PAGE_ALIGN(
|
||||||
rxqentries * sizeof(struct ibmveth_rx_q_entry));
|
rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue