linux/arch/loongarch/include/asm/io.h
Ryan Roberts 86758b5048 mm/ioremap: pass pgprot_t to ioremap_prot() instead of unsigned long
ioremap_prot() currently accepts pgprot_val parameter as an unsigned long,
thus implicitly assuming that pgprot_val and pgprot_t could never be
bigger than unsigned long.  But this assumption soon will not be true on
arm64 when using D128 pgtables.  In 128 bit page table configuration,
unsigned long is 64 bit, but pgprot_t is 128 bit.

Passing platform abstracted pgprot_t argument is better as compared to
size based data types.  Let's change the parameter to directly pass
pgprot_t like another similar helper generic_ioremap_prot().

Without this change in place, D128 configuration does not work on arm64 as
the top 64 bits gets silently stripped when passing the protection value
to this function.

Link: https://lkml.kernel.org/r/20250218101954.415331-1-anshuman.khandual@arm.com
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Co-developed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com> [arm64]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2025-03-16 22:06:23 -07:00

88 lines
2.5 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef _ASM_IO_H
#define _ASM_IO_H
#include <linux/kernel.h>
#include <linux/types.h>
#include <asm/addrspace.h>
#include <asm/cpu.h>
#include <asm/page.h>
#include <asm/pgtable-bits.h>
#include <asm/string.h>
extern void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size);
extern void __init early_iounmap(void __iomem *addr, unsigned long size);
#define early_memremap early_ioremap
#define early_memunmap early_iounmap
#ifdef CONFIG_ARCH_IOREMAP
static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
pgprot_t prot)
{
switch (pgprot_val(prot) & _CACHE_MASK) {
case _CACHE_CC:
return (void __iomem *)(unsigned long)(CACHE_BASE + offset);
case _CACHE_SUC:
return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset);
case _CACHE_WUC:
return (void __iomem *)(unsigned long)(WRITECOMBINE_BASE + offset);
default:
return NULL;
}
}
#define ioremap(offset, size) \
ioremap_prot((offset), (size), PAGE_KERNEL_SUC)
#define iounmap(addr) ((void)(addr))
#endif
/*
* On LoongArch, ioremap() has two variants, ioremap_wc() and ioremap_cache().
* They map bus memory into CPU space, the mapped memory is marked uncachable
* (_CACHE_SUC), uncachable but accelerated by write-combine (_CACHE_WUC) and
* cachable (_CACHE_CC) respectively for CPU access.
*
* @offset: bus address of the memory
* @size: size of the resource to map
*/
#define ioremap_wc(offset, size) \
ioremap_prot((offset), (size), \
wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC)
#define ioremap_cache(offset, size) \
ioremap_prot((offset), (size), PAGE_KERNEL)
#define mmiowb() wmb()
#define __io_aw() mmiowb()
#ifdef CONFIG_KFENCE
#define virt_to_phys(kaddr) \
({ \
(likely((unsigned long)kaddr < vm_map_base)) ? __pa((unsigned long)kaddr) : \
page_to_phys(tlb_virt_to_page((unsigned long)kaddr)) + offset_in_page((unsigned long)kaddr);\
})
#define phys_to_virt(paddr) \
({ \
extern char *__kfence_pool; \
(unlikely(__kfence_pool == NULL)) ? __va((unsigned long)paddr) : \
page_address(phys_to_page((unsigned long)paddr)) + offset_in_page((unsigned long)paddr);\
})
#endif
#include <asm-generic/io.h>
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
#endif /* _ASM_IO_H */