mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00

ioremap_prot() currently accepts pgprot_val parameter as an unsigned long, thus implicitly assuming that pgprot_val and pgprot_t could never be bigger than unsigned long. But this assumption soon will not be true on arm64 when using D128 pgtables. In 128 bit page table configuration, unsigned long is 64 bit, but pgprot_t is 128 bit. Passing platform abstracted pgprot_t argument is better as compared to size based data types. Let's change the parameter to directly pass pgprot_t like another similar helper generic_ioremap_prot(). Without this change in place, D128 configuration does not work on arm64 as the top 64 bits gets silently stripped when passing the protection value to this function. Link: https://lkml.kernel.org/r/20250218101954.415331-1-anshuman.khandual@arm.com Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> Co-developed-by: Anshuman Khandual <anshuman.khandual@arm.com> Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> [arm64] Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
62 lines
1.6 KiB
C
62 lines
1.6 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
|
*/
|
|
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/init.h>
|
|
#include <linux/module.h>
|
|
#include <linux/io.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/cache.h>
|
|
|
|
static inline bool arc_uncached_addr_space(phys_addr_t paddr)
|
|
{
|
|
if (is_isa_arcompact()) {
|
|
if (paddr >= ARC_UNCACHED_ADDR_SPACE)
|
|
return true;
|
|
} else if (paddr >= perip_base && paddr <= perip_end) {
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
|
|
{
|
|
/*
|
|
* If the region is h/w uncached, MMU mapping can be elided as optim
|
|
* The cast to u32 is fine as this region can only be inside 4GB
|
|
*/
|
|
if (arc_uncached_addr_space(paddr))
|
|
return (void __iomem *)(u32)paddr;
|
|
|
|
return ioremap_prot(paddr, size,
|
|
pgprot_noncached(PAGE_KERNEL));
|
|
}
|
|
EXPORT_SYMBOL(ioremap);
|
|
|
|
/*
|
|
* ioremap with access flags
|
|
* Cache semantics wise it is same as ioremap - "forced" uncached.
|
|
* However unlike vanilla ioremap which bypasses ARC MMU for addresses in
|
|
* ARC hardware uncached region, this one still goes thru the MMU as caller
|
|
* might need finer access control (R/W/X)
|
|
*/
|
|
void __iomem *ioremap_prot(phys_addr_t paddr, size_t size,
|
|
pgprot_t prot)
|
|
{
|
|
/* force uncached */
|
|
return generic_ioremap_prot(paddr, size, pgprot_noncached(prot));
|
|
}
|
|
EXPORT_SYMBOL(ioremap_prot);
|
|
|
|
void iounmap(volatile void __iomem *addr)
|
|
{
|
|
/* weird double cast to handle phys_addr_t > 32 bits */
|
|
if (arc_uncached_addr_space((phys_addr_t)(u32)addr))
|
|
return;
|
|
|
|
generic_iounmap(addr);
|
|
}
|
|
EXPORT_SYMBOL(iounmap);
|