2022-05-31 18:04:11 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
|
|
|
*/
|
|
|
|
#ifndef _ASM_IO_H
|
|
|
|
#define _ASM_IO_H
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
|
|
|
|
#include <asm/addrspace.h>
|
|
|
|
#include <asm/cpu.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/pgtable-bits.h>
|
|
|
|
#include <asm/string.h>
|
|
|
|
|
|
|
|
extern void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size);
|
|
|
|
extern void __init early_iounmap(void __iomem *addr, unsigned long size);
|
|
|
|
|
|
|
|
#define early_memremap early_ioremap
|
|
|
|
#define early_memunmap early_iounmap
|
|
|
|
|
2022-10-12 16:36:14 +08:00
|
|
|
#ifdef CONFIG_ARCH_IOREMAP
|
|
|
|
|
2022-05-31 18:04:11 +08:00
|
|
|
static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
|
2025-02-18 15:49:54 +05:30
|
|
|
pgprot_t prot)
|
2022-05-31 18:04:11 +08:00
|
|
|
{
|
2025-02-18 15:49:54 +05:30
|
|
|
switch (pgprot_val(prot) & _CACHE_MASK) {
|
2024-07-20 22:40:59 +08:00
|
|
|
case _CACHE_CC:
|
2022-05-31 18:04:11 +08:00
|
|
|
return (void __iomem *)(unsigned long)(CACHE_BASE + offset);
|
2024-07-20 22:40:59 +08:00
|
|
|
case _CACHE_SUC:
|
2022-05-31 18:04:11 +08:00
|
|
|
return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset);
|
2024-07-20 22:40:59 +08:00
|
|
|
case _CACHE_WUC:
|
|
|
|
return (void __iomem *)(unsigned long)(WRITECOMBINE_BASE + offset);
|
|
|
|
default:
|
|
|
|
return NULL;
|
|
|
|
}
|
2022-05-31 18:04:11 +08:00
|
|
|
}
|
|
|
|
|
2022-10-12 16:36:14 +08:00
|
|
|
#define ioremap(offset, size) \
|
2025-02-18 15:49:54 +05:30
|
|
|
ioremap_prot((offset), (size), PAGE_KERNEL_SUC)
|
2022-05-31 18:04:11 +08:00
|
|
|
|
2022-10-12 16:36:14 +08:00
|
|
|
#define iounmap(addr) ((void)(addr))
|
|
|
|
|
|
|
|
#endif
|
2022-05-31 18:04:11 +08:00
|
|
|
|
|
|
|
/*
|
2022-10-12 16:36:14 +08:00
|
|
|
* On LoongArch, ioremap() has two variants, ioremap_wc() and ioremap_cache().
|
|
|
|
* They map bus memory into CPU space, the mapped memory is marked uncachable
|
|
|
|
* (_CACHE_SUC), uncachable but accelerated by write-combine (_CACHE_WUC) and
|
|
|
|
* cachable (_CACHE_CC) respectively for CPU access.
|
2022-05-31 18:04:11 +08:00
|
|
|
*
|
2022-10-12 16:36:14 +08:00
|
|
|
* @offset: bus address of the memory
|
|
|
|
* @size: size of the resource to map
|
2022-05-31 18:04:11 +08:00
|
|
|
*/
|
2022-10-12 16:36:14 +08:00
|
|
|
#define ioremap_wc(offset, size) \
|
2023-10-18 08:42:52 +08:00
|
|
|
ioremap_prot((offset), (size), \
|
2025-02-18 15:49:54 +05:30
|
|
|
wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC)
|
2022-05-31 18:04:11 +08:00
|
|
|
|
2022-10-12 16:36:14 +08:00
|
|
|
#define ioremap_cache(offset, size) \
|
2025-02-18 15:49:54 +05:30
|
|
|
ioremap_prot((offset), (size), PAGE_KERNEL)
|
2022-05-31 18:04:11 +08:00
|
|
|
|
LoongArch: Support dbar with different hints
Traditionally, LoongArch uses "dbar 0" (full completion barrier) for
everything. But the full completion barrier is a performance killer, so
Loongson-3A6000 and newer processors have made finer granularity hints
available:
Bit4: ordering or completion (0: completion, 1: ordering)
Bit3: barrier for previous read (0: true, 1: false)
Bit2: barrier for previous write (0: true, 1: false)
Bit1: barrier for succeeding read (0: true, 1: false)
Bit0: barrier for succeeding write (0: true, 1: false)
Hint 0x700: barrier for "read after read" from the same address, which
is needed by LL-SC loops on old models (dbar 0x700 behaves the same as
nop if such reordering is disabled on new models).
This patch makes use of the various new hints for different kinds of
memory barriers. It brings performance improvements on Loongson-3A6000
series, while not affecting the existing models because all variants are
treated as 'dbar 0' there.
Why override queued_spin_unlock()?
After commit 01e3b958efe85a26d9b ("drivers: Remove explicit invocations
of mmiowb()") we need a completion barrier in queued_spin_unlock(), but
the generic implementation use smp_store_release() which only provide an
ordering barrier.
Signed-off-by: Jun Yi <yijun@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2023-06-29 20:58:44 +08:00
|
|
|
#define mmiowb() wmb()
|
2022-05-31 18:04:11 +08:00
|
|
|
|
2024-03-19 15:50:34 +08:00
|
|
|
#define __io_aw() mmiowb()
|
|
|
|
|
LoongArch: Make {virt, phys, page, pfn} translation work with KFENCE
KFENCE changes virt_to_page() to be able to translate tlb mapped virtual
addresses, but forget to change virt_to_phys()/phys_to_virt() and other
translation functions as well. This patch fix it, otherwise some drivers
(such as nvme and virtio-blk) cannot work with KFENCE.
All {virt, phys, page, pfn} translation functions are updated:
1, virt_to_pfn()/pfn_to_virt();
2, virt_to_page()/page_to_virt();
3, virt_to_phys()/phys_to_virt().
DMW/TLB mapped addresses are distinguished by comparing the vaddress
with vm_map_base in virt_to_xyz(), and we define WANT_PAGE_VIRTUAL in
the KFENCE case for the reverse translations, xyz_to_virt().
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2024-04-10 21:08:51 +08:00
|
|
|
#ifdef CONFIG_KFENCE
|
|
|
|
#define virt_to_phys(kaddr) \
|
|
|
|
({ \
|
|
|
|
(likely((unsigned long)kaddr < vm_map_base)) ? __pa((unsigned long)kaddr) : \
|
|
|
|
page_to_phys(tlb_virt_to_page((unsigned long)kaddr)) + offset_in_page((unsigned long)kaddr);\
|
|
|
|
})
|
|
|
|
|
|
|
|
#define phys_to_virt(paddr) \
|
|
|
|
({ \
|
|
|
|
extern char *__kfence_pool; \
|
|
|
|
(unlikely(__kfence_pool == NULL)) ? __va((unsigned long)paddr) : \
|
|
|
|
page_address(phys_to_page((unsigned long)paddr)) + offset_in_page((unsigned long)paddr);\
|
|
|
|
})
|
|
|
|
#endif
|
|
|
|
|
2022-05-31 18:04:11 +08:00
|
|
|
#include <asm-generic/io.h>
|
|
|
|
|
2022-10-12 16:36:14 +08:00
|
|
|
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
|
|
|
|
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
|
|
|
|
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
|
|
|
|
|
2022-05-31 18:04:11 +08:00
|
|
|
#endif /* _ASM_IO_H */
|