linux/arch/mips/include/asm/io.h

571 lines
16 KiB
C
Raw Normal View History

/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994, 1995 Waldorf GmbH
* Copyright (C) 1994 - 2000, 06 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
* Author: Maciej W. Rozycki <macro@mips.com>
*/
#ifndef _ASM_IO_H
#define _ASM_IO_H
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/irqflags.h>
#include <asm/addrspace.h>
#include <asm/barrier.h>
#include <asm/bug.h>
#include <asm/byteorder.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/page.h>
#include <asm/pgtable-bits.h>
#include <asm/string.h>
#include <mangle-port.h>
/*
* Raw operations are never swapped in software. OTOH values that raw
* operations are working on may or may not have been swapped by the bus
* hardware. An example use would be for flash memory that's used for
* execute in place.
*/
# define __raw_ioswabb(a, x) (x)
# define __raw_ioswabw(a, x) (x)
# define __raw_ioswabl(a, x) (x)
# define __raw_ioswabq(a, x) (x)
# define ____raw_ioswabq(a, x) (x)
# define _ioswabb ioswabb
# define _ioswabw ioswabw
# define _ioswabl ioswabl
# define _ioswabq ioswabq
# define __relaxed_ioswabb ioswabb
# define __relaxed_ioswabw ioswabw
# define __relaxed_ioswabl ioswabl
# define __relaxed_ioswabq ioswabq
/* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
/*
* On MIPS I/O ports are memory mapped, so we access them using normal
* load/store instructions. mips_io_port_base is the virtual address to
* which all ports are being mapped. For sake of efficiency some code
* assumes that this is an address that can be loaded with a single lui
* instruction, so the lower 16 bits must be zero. Should be true on
* any sane architecture; generic code does not use this assumption.
*/
mips: avoid explicit UB in assignment of mips_io_port_base The code in question is modifying a variable declared const through pointer manipulation. Such code is explicitly undefined behavior, and is the lone issue preventing malta_defconfig from booting when built with Clang: If an attempt is made to modify an object defined with a const-qualified type through use of an lvalue with non-const-qualified type, the behavior is undefined. LLVM is removing such assignments. A simple fix is to not declare variables const that you plan on modifying. Limiting the scope would be a better method of preventing unwanted writes to such a variable. Further, the code in question mentions "compiler bugs" without any links to bug reports, so it is difficult to know if the issue is resolved in GCC. The patch was authored in 2006, which would have been GCC 4.0.3 or 4.1.1. The minimal supported version of GCC in the Linux kernel is currently 4.6. For what its worth, there was UB before the commit in question, it just added a barrier and got lucky IRT codegen. I don't think there's any actual compiler bugs related, just runtime bugs due to UB. Link: https://github.com/ClangBuiltLinux/linux/issues/610 Fixes: 966f4406d903 ("[MIPS] Work around bad code generation for <asm/io.h>.") Reported-by: Nathan Chancellor <natechancellor@gmail.com> Debugged-by: Nathan Chancellor <natechancellor@gmail.com> Suggested-by: Eli Friedman <efriedma@quicinc.com> Signed-off-by: Nick Desaulniers <ndesaulniers@google.com> Reviewed-by: Nathan Chancellor <natechancellor@gmail.com> Tested-by: Nathan Chancellor <natechancellor@gmail.com> Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: ralf@linux-mips.org Cc: jhogan@kernel.org Cc: Maciej W. Rozycki <macro@linux-mips.org> Cc: Hassan Naveed <hnaveed@wavecomp.com> Cc: Stephen Kitt <steve@sk2.org> Cc: Serge Semin <fancer.lancer@gmail.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.com> Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: clang-built-linux@googlegroups.com
2019-07-29 14:10:12 -07:00
extern unsigned long mips_io_port_base;
static inline void set_io_port_base(unsigned long base)
{
mips: avoid explicit UB in assignment of mips_io_port_base The code in question is modifying a variable declared const through pointer manipulation. Such code is explicitly undefined behavior, and is the lone issue preventing malta_defconfig from booting when built with Clang: If an attempt is made to modify an object defined with a const-qualified type through use of an lvalue with non-const-qualified type, the behavior is undefined. LLVM is removing such assignments. A simple fix is to not declare variables const that you plan on modifying. Limiting the scope would be a better method of preventing unwanted writes to such a variable. Further, the code in question mentions "compiler bugs" without any links to bug reports, so it is difficult to know if the issue is resolved in GCC. The patch was authored in 2006, which would have been GCC 4.0.3 or 4.1.1. The minimal supported version of GCC in the Linux kernel is currently 4.6. For what its worth, there was UB before the commit in question, it just added a barrier and got lucky IRT codegen. I don't think there's any actual compiler bugs related, just runtime bugs due to UB. Link: https://github.com/ClangBuiltLinux/linux/issues/610 Fixes: 966f4406d903 ("[MIPS] Work around bad code generation for <asm/io.h>.") Reported-by: Nathan Chancellor <natechancellor@gmail.com> Debugged-by: Nathan Chancellor <natechancellor@gmail.com> Suggested-by: Eli Friedman <efriedma@quicinc.com> Signed-off-by: Nick Desaulniers <ndesaulniers@google.com> Reviewed-by: Nathan Chancellor <natechancellor@gmail.com> Tested-by: Nathan Chancellor <natechancellor@gmail.com> Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: ralf@linux-mips.org Cc: jhogan@kernel.org Cc: Maciej W. Rozycki <macro@linux-mips.org> Cc: Hassan Naveed <hnaveed@wavecomp.com> Cc: Stephen Kitt <steve@sk2.org> Cc: Serge Semin <fancer.lancer@gmail.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.com> Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: clang-built-linux@googlegroups.com
2019-07-29 14:10:12 -07:00
mips_io_port_base = base;
}
/*
* Provide the necessary definitions for generic iomap. We make use of
* mips_io_port_base for iomap(), but we don't reserve any low addresses for
* use with I/O ports.
*/
#define HAVE_ARCH_PIO_SIZE
#define PIO_OFFSET mips_io_port_base
#define PIO_MASK IO_SPACE_LIMIT
#define PIO_RESERVED 0x0UL
/*
* Enforce in-order execution of data I/O. In the MIPS architecture
* these are equivalent to corresponding platform-specific memory
* barriers defined in <asm/barrier.h>. API pinched from PowerPC,
* with sync additionally defined.
*/
#define iobarrier_rw() mb()
#define iobarrier_r() rmb()
#define iobarrier_w() wmb()
#define iobarrier_sync() iob()
/*
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
*
* The returned physical address is the physical (CPU) mapping for
* the memory address given. It is only valid to use this function on
* addresses directly mapped or allocated via kmalloc.
*
* This function does not give bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/
static inline unsigned long __virt_to_phys_nodebug(volatile const void *address)
{
return __pa(address);
}
#ifdef CONFIG_DEBUG_VIRTUAL
extern phys_addr_t __virt_to_phys(volatile const void *x);
#else
#define __virt_to_phys(x) __virt_to_phys_nodebug(x)
#endif
#define virt_to_phys virt_to_phys
static inline phys_addr_t virt_to_phys(const volatile void *x)
{
return __virt_to_phys(x);
}
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
static inline unsigned long isa_virt_to_bus(volatile void *address)
{
return virt_to_phys(address);
}
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
unsigned long prot_val);
void iounmap(const volatile void __iomem *addr);
/*
* ioremap - map bus memory into CPU space
* @offset: bus address of the memory
* @size: size of the resource to map
*
* ioremap performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*/
#define ioremap(offset, size) \
ioremap_prot((offset), (size), _CACHE_UNCACHED)
/*
* ioremap_cache - map bus memory into CPU space
* @offset: bus address of the memory
* @size: size of the resource to map
*
* ioremap_cache performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*
* This version of ioremap ensures that the memory is marked cacheable by
* the CPU. Also enables full write-combining. Useful for some
* memory-like regions on I/O busses.
*/
#define ioremap_cache(offset, size) \
ioremap_prot((offset), (size), _page_cachable_default)
mips: mm: Create UCA-based ioremap_wc() method Modern MIPS cores (like P5600/6600, M5150/6520, end so on) which got L2-cache on chip also can enable a special type Cache-Coherency attribute (CCA) named UnCached Accelerated attribute (UCA). In this way uncached accelerated accesses are treated the same way as non-accelerated uncached accesses, but uncached stores are gathered together for more efficient bus utilization. So to speak this CCA enables uncached transactions to better utilize bus bandwidth via burst transactions. This is exactly why ioremap_wc() method has been introduced in Linux. Alas MIPS-platform code hasn't implemented it so far, instead default one has been used which was an alias to ioremap_nocache. In order to fix this we added MIPS-specific ioremap_wc() macro substituted by generic __ioremap_mode() method call with writecombine CPU-info field passed. It shall create real ioremap_wc() method if CPU-cache supports UCA feature and fall-back to _CACHE_UNCACHED attribute if one doesn't. Additionally platform-specific io.h shall declare ARCH_HAS_IOREMAP_WC macro as indication of architectural definition of ioremap_wc() (similar to x86/powerpc). [paul.burton@mips.com: - Remove CC stable, this is new functionality.] Signed-off-by: Serge Semin <fancer.lancer@gmail.com> Signed-off-by: Paul Burton <paul.burton@mips.com> Patchwork: https://patchwork.linux-mips.org/patch/19789/ Cc: James Hogan <jhogan@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: okaya@codeaurora.org Cc: chenhc@lemote.com Cc: Sergey.Semin@t-platforms.ru Cc: linux-kernel@vger.kernel.org
2018-07-09 16:57:12 +03:00
/*
* ioremap_wc - map bus memory into CPU space
* @offset: bus address of the memory
* @size: size of the resource to map
*
* ioremap_wc performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*
* This version of ioremap ensures that the memory is marked uncacheable
mips: mm: Create UCA-based ioremap_wc() method Modern MIPS cores (like P5600/6600, M5150/6520, end so on) which got L2-cache on chip also can enable a special type Cache-Coherency attribute (CCA) named UnCached Accelerated attribute (UCA). In this way uncached accelerated accesses are treated the same way as non-accelerated uncached accesses, but uncached stores are gathered together for more efficient bus utilization. So to speak this CCA enables uncached transactions to better utilize bus bandwidth via burst transactions. This is exactly why ioremap_wc() method has been introduced in Linux. Alas MIPS-platform code hasn't implemented it so far, instead default one has been used which was an alias to ioremap_nocache. In order to fix this we added MIPS-specific ioremap_wc() macro substituted by generic __ioremap_mode() method call with writecombine CPU-info field passed. It shall create real ioremap_wc() method if CPU-cache supports UCA feature and fall-back to _CACHE_UNCACHED attribute if one doesn't. Additionally platform-specific io.h shall declare ARCH_HAS_IOREMAP_WC macro as indication of architectural definition of ioremap_wc() (similar to x86/powerpc). [paul.burton@mips.com: - Remove CC stable, this is new functionality.] Signed-off-by: Serge Semin <fancer.lancer@gmail.com> Signed-off-by: Paul Burton <paul.burton@mips.com> Patchwork: https://patchwork.linux-mips.org/patch/19789/ Cc: James Hogan <jhogan@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: okaya@codeaurora.org Cc: chenhc@lemote.com Cc: Sergey.Semin@t-platforms.ru Cc: linux-kernel@vger.kernel.org
2018-07-09 16:57:12 +03:00
* but accelerated by means of write-combining feature. It is specifically
* useful for PCIe prefetchable windows, which may vastly improve a
* communications performance. If it was determined on boot stage, what
* CPU CCA doesn't support UCA, the method shall fall-back to the
* _CACHE_UNCACHED option (see cpu_probe() method).
*/
#define ioremap_wc(offset, size) \
ioremap_prot((offset), (size), boot_cpu_data.writecombine)
mips: mm: Create UCA-based ioremap_wc() method Modern MIPS cores (like P5600/6600, M5150/6520, end so on) which got L2-cache on chip also can enable a special type Cache-Coherency attribute (CCA) named UnCached Accelerated attribute (UCA). In this way uncached accelerated accesses are treated the same way as non-accelerated uncached accesses, but uncached stores are gathered together for more efficient bus utilization. So to speak this CCA enables uncached transactions to better utilize bus bandwidth via burst transactions. This is exactly why ioremap_wc() method has been introduced in Linux. Alas MIPS-platform code hasn't implemented it so far, instead default one has been used which was an alias to ioremap_nocache. In order to fix this we added MIPS-specific ioremap_wc() macro substituted by generic __ioremap_mode() method call with writecombine CPU-info field passed. It shall create real ioremap_wc() method if CPU-cache supports UCA feature and fall-back to _CACHE_UNCACHED attribute if one doesn't. Additionally platform-specific io.h shall declare ARCH_HAS_IOREMAP_WC macro as indication of architectural definition of ioremap_wc() (similar to x86/powerpc). [paul.burton@mips.com: - Remove CC stable, this is new functionality.] Signed-off-by: Serge Semin <fancer.lancer@gmail.com> Signed-off-by: Paul Burton <paul.burton@mips.com> Patchwork: https://patchwork.linux-mips.org/patch/19789/ Cc: James Hogan <jhogan@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: okaya@codeaurora.org Cc: chenhc@lemote.com Cc: Sergey.Semin@t-platforms.ru Cc: linux-kernel@vger.kernel.org
2018-07-09 16:57:12 +03:00
#if defined(CONFIG_CPU_CAVIUM_OCTEON)
MIPS: Loongson-3: Introduce CONFIG_LOONGSON3_ENHANCEMENT New Loongson 3 CPU (since Loongson-3A R2, as opposed to Loongson-3A R1, Loongson-3B R1 and Loongson-3B R2) has many enhancements, such as FTLB, L1-VCache, EI/DI/Wait/Prefetch instruction, DSP/DSPv2 ASE, User Local register, Read-Inhibit/Execute-Inhibit, SFB (Store Fill Buffer), Fast TLB refill support, etc. This patch introduce a config option, CONFIG_LOONGSON3_ENHANCEMENT, to enable those enhancements which are not probed at run time. If you want a generic kernel to run on all Loongson 3 machines, please say 'N' here. If you want a high-performance kernel to run on new Loongson 3 machines only, please say 'Y' here. Some additional explanations: 1) SFB locates between core and L1 cache, it causes memory access out of order, so writel/outl (and other similar functions) need a I/O reorder barrier. 2) Loongson 3 has a bug that di instruction can not save the irqflag, so arch_local_irq_save() is modified. Since CPU_MIPSR2 is selected by CONFIG_LOONGSON3_ENHANCEMENT, generic kernel doesn't use ei/di at all. 3) CPU_HAS_PREFETCH is selected by CONFIG_LOONGSON3_ENHANCEMENT, so MIPS_CPU_PREFETCH (used by uasm) probing is also put in this patch. Signed-off-by: Huacai Chen <chenhc@lemote.com> Cc: Aurelien Jarno <aurelien@aurel32.net> Cc: Steven J . Hill <sjhill@realitydiluted.com> Cc: Fuxin Zhang <zhangfx@lemote.com> Cc: Zhangjin Wu <wuzhangjin@gmail.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/12755/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-03-03 09:45:13 +08:00
#define war_io_reorder_wmb() wmb()
#else
#define war_io_reorder_wmb() barrier()
#endif
#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, barrier, relax, irq) \
\
static inline void pfx##write##bwlq(type val, \
volatile void __iomem *mem) \
{ \
volatile type *__mem; \
type __val; \
\
MIPS: Enforce strong ordering for MMIO accessors Architecturally the MIPS ISA does not specify ordering requirements for uncached bus accesses such as MMIO operations normally use and therefore explicit barriers have to be inserted between MMIO accesses where unspecified ordering of operations would cause unpredictable results. For example the R2020 write buffer implements write gathering and combining[1] and as used with the DECstation models 2100 and 3100 for MMIO accesses it bypasses the read buffer entirely, because conflicts are resolved by the memory controller for DRAM accesses only[2] (NB the R2020 and R3020 buffers are the same except for the maximum clock rate). Consequently if a device has say a 16-bit control register at offset 0, a 16-bit event mask register at offset 2 and a 16-bit reset register at offset 4, and the initial value of the control register is 0x1111, then in the absence of barriers a hypothetical code sequence like this: u16 init_dev(u16 __iomem *dev); u16 x; write16(dev + 2, 0xffff); write16(dev + 0, 0x2222); x = read16(dev + 0); write16(dev + 1, 0x3333); write16(dev + 0, 0x4444); return x; } will return 0x1111 and issue a single 32-bit write of 0x33334444 (in the little-endian bus configuration) to offset 0 on the system bus. This is because the read to set `x' from offset 0 bypasses the write of 0x2222 that is still in the write buffer pending the completion of the write of 0xffff to the reset register. Then the write of 0x3333 to the event mask register is merged with the preceding write to the control register as they share the same word address, making it a 32-bit write of 0x33332222 to offset 0. Finally the write of 0x4444 to the control register is combined with the outstanding 32-bit write of 0x33332222 to offset 0, because, again, it shares the same address. This is an example from a legacy system, given here because it is well documented and affects a machine we actually support. But likewise modern MIPS systems may implement weak MMIO ordering, possibly even without having it clearly documented except for being compliant with the architecture specification with respect to the currently defined SYNC instruction variants[3]. Considering the above and that we are required to implement MMIO accessors such that individual accesses made with them are strongly ordered with respect to each other[4], add the necessary barriers to our `inX', `outX', `readX' and `writeX' handlers, as well the associated special use variants. It's up to platforms then to possibly define the respective barriers so as to expand to nil if no ordering enforcement is actually needed for a given system; SYNC is supposed to be as cheap as a NOP on strongly ordered MIPS implementations though. Retain the option to generate weakly-ordered accessors, so that the arrangement for `war_io_reorder_wmb' is not lost in case we need it for fully raw accessors in the future. The reason for this is that it is unclear from commit 1e820da3c9af ("MIPS: Loongson-3: Introduce CONFIG_LOONGSON3_ENHANCEMENT") and especially commit 8faca49a6731 ("MIPS: Modify core io.h macros to account for the Octeon Errata Core-301.") why they are needed there under the previous assumption that these accessors can be weakly ordered. References: [1] "LR3020 Write Buffer", LSI Logic Corporation, September 1988, Section "Byte Gathering", pp. 6-7 [2] "DECstation 3100 Desktop Workstation Functional Specification", Digital Equipment Corporation, Revision 1.3, August 28, 1990, Section 6.1 "Processor", p. 4 [3] "MIPS Architecture For Programmers, Volume II-A: The MIPS32 Instruction Set Manual", Imagination Technologies LTD, Document Number: MD00086, Revision 6.06, December 15, 2016, Table 5.5 "Encodings of the Bits[10:6] of the SYNC instruction; the SType Field", p. 409 [4] "LINUX KERNEL MEMORY BARRIERS", Documentation/memory-barriers.txt, Section "KERNEL I/O BARRIER EFFECTS" Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org> References: 8faca49a6731 ("MIPS: Modify core io.h macros to account for the Octeon Errata Core-301.") References: 1e820da3c9af ("MIPS: Loongson-3: Introduce CONFIG_LOONGSON3_ENHANCEMENT") Signed-off-by: Paul Burton <paul.burton@mips.com> Patchwork: https://patchwork.linux-mips.org/patch/20864/ Cc: Ralf Baechle <ralf@linux-mips.org>
2018-10-08 01:37:16 +01:00
if (barrier) \
iobarrier_rw(); \
else \
war_io_reorder_wmb(); \
\
__mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
\
__val = pfx##ioswab##bwlq(__mem, val); \
\
if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
*__mem = __val; \
else if (cpu_has_64bits) { \
unsigned long __flags; \
type __tmp; \
\
if (irq) \
local_irq_save(__flags); \
__asm__ __volatile__( \
".set push" "\t\t# __writeq""\n\t" \
".set arch=r4000" "\n\t" \
"dsll32 %L0, %L0, 0" "\n\t" \
"dsrl32 %L0, %L0, 0" "\n\t" \
"dsll32 %M0, %M0, 0" "\n\t" \
"or %L0, %L0, %M0" "\n\t" \
"sd %L0, %2" "\n\t" \
".set pop" "\n" \
: "=r" (__tmp) \
: "0" (__val), "m" (*__mem)); \
if (irq) \
local_irq_restore(__flags); \
} else \
BUG(); \
} \
\
static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
{ \
volatile type *__mem; \
type __val; \
\
__mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
\
MIPS: Enforce strong ordering for MMIO accessors Architecturally the MIPS ISA does not specify ordering requirements for uncached bus accesses such as MMIO operations normally use and therefore explicit barriers have to be inserted between MMIO accesses where unspecified ordering of operations would cause unpredictable results. For example the R2020 write buffer implements write gathering and combining[1] and as used with the DECstation models 2100 and 3100 for MMIO accesses it bypasses the read buffer entirely, because conflicts are resolved by the memory controller for DRAM accesses only[2] (NB the R2020 and R3020 buffers are the same except for the maximum clock rate). Consequently if a device has say a 16-bit control register at offset 0, a 16-bit event mask register at offset 2 and a 16-bit reset register at offset 4, and the initial value of the control register is 0x1111, then in the absence of barriers a hypothetical code sequence like this: u16 init_dev(u16 __iomem *dev); u16 x; write16(dev + 2, 0xffff); write16(dev + 0, 0x2222); x = read16(dev + 0); write16(dev + 1, 0x3333); write16(dev + 0, 0x4444); return x; } will return 0x1111 and issue a single 32-bit write of 0x33334444 (in the little-endian bus configuration) to offset 0 on the system bus. This is because the read to set `x' from offset 0 bypasses the write of 0x2222 that is still in the write buffer pending the completion of the write of 0xffff to the reset register. Then the write of 0x3333 to the event mask register is merged with the preceding write to the control register as they share the same word address, making it a 32-bit write of 0x33332222 to offset 0. Finally the write of 0x4444 to the control register is combined with the outstanding 32-bit write of 0x33332222 to offset 0, because, again, it shares the same address. This is an example from a legacy system, given here because it is well documented and affects a machine we actually support. But likewise modern MIPS systems may implement weak MMIO ordering, possibly even without having it clearly documented except for being compliant with the architecture specification with respect to the currently defined SYNC instruction variants[3]. Considering the above and that we are required to implement MMIO accessors such that individual accesses made with them are strongly ordered with respect to each other[4], add the necessary barriers to our `inX', `outX', `readX' and `writeX' handlers, as well the associated special use variants. It's up to platforms then to possibly define the respective barriers so as to expand to nil if no ordering enforcement is actually needed for a given system; SYNC is supposed to be as cheap as a NOP on strongly ordered MIPS implementations though. Retain the option to generate weakly-ordered accessors, so that the arrangement for `war_io_reorder_wmb' is not lost in case we need it for fully raw accessors in the future. The reason for this is that it is unclear from commit 1e820da3c9af ("MIPS: Loongson-3: Introduce CONFIG_LOONGSON3_ENHANCEMENT") and especially commit 8faca49a6731 ("MIPS: Modify core io.h macros to account for the Octeon Errata Core-301.") why they are needed there under the previous assumption that these accessors can be weakly ordered. References: [1] "LR3020 Write Buffer", LSI Logic Corporation, September 1988, Section "Byte Gathering", pp. 6-7 [2] "DECstation 3100 Desktop Workstation Functional Specification", Digital Equipment Corporation, Revision 1.3, August 28, 1990, Section 6.1 "Processor", p. 4 [3] "MIPS Architecture For Programmers, Volume II-A: The MIPS32 Instruction Set Manual", Imagination Technologies LTD, Document Number: MD00086, Revision 6.06, December 15, 2016, Table 5.5 "Encodings of the Bits[10:6] of the SYNC instruction; the SType Field", p. 409 [4] "LINUX KERNEL MEMORY BARRIERS", Documentation/memory-barriers.txt, Section "KERNEL I/O BARRIER EFFECTS" Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org> References: 8faca49a6731 ("MIPS: Modify core io.h macros to account for the Octeon Errata Core-301.") References: 1e820da3c9af ("MIPS: Loongson-3: Introduce CONFIG_LOONGSON3_ENHANCEMENT") Signed-off-by: Paul Burton <paul.burton@mips.com> Patchwork: https://patchwork.linux-mips.org/patch/20864/ Cc: Ralf Baechle <ralf@linux-mips.org>
2018-10-08 01:37:16 +01:00
if (barrier) \
iobarrier_rw(); \
\
if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
__val = *__mem; \
else if (cpu_has_64bits) { \
unsigned long __flags; \
\
if (irq) \
local_irq_save(__flags); \
__asm__ __volatile__( \
".set push" "\t\t# __readq" "\n\t" \
".set arch=r4000" "\n\t" \
"ld %L0, %1" "\n\t" \
"dsra32 %M0, %L0, 0" "\n\t" \
"sll %L0, %L0, 0" "\n\t" \
".set pop" "\n" \
: "=r" (__val) \
: "m" (*__mem)); \
if (irq) \
local_irq_restore(__flags); \
} else { \
__val = 0; \
BUG(); \
} \
\
/* prevent prefetching of coherent DMA data prematurely */ \
if (!relax) \
rmb(); \
return pfx##ioswab##bwlq(__mem, __val); \
}
#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, relax) \
\
static inline void pfx##out##bwlq(type val, unsigned long port) \
{ \
volatile type *__addr; \
type __val; \
\
MIPS: Enforce strong ordering for MMIO accessors Architecturally the MIPS ISA does not specify ordering requirements for uncached bus accesses such as MMIO operations normally use and therefore explicit barriers have to be inserted between MMIO accesses where unspecified ordering of operations would cause unpredictable results. For example the R2020 write buffer implements write gathering and combining[1] and as used with the DECstation models 2100 and 3100 for MMIO accesses it bypasses the read buffer entirely, because conflicts are resolved by the memory controller for DRAM accesses only[2] (NB the R2020 and R3020 buffers are the same except for the maximum clock rate). Consequently if a device has say a 16-bit control register at offset 0, a 16-bit event mask register at offset 2 and a 16-bit reset register at offset 4, and the initial value of the control register is 0x1111, then in the absence of barriers a hypothetical code sequence like this: u16 init_dev(u16 __iomem *dev); u16 x; write16(dev + 2, 0xffff); write16(dev + 0, 0x2222); x = read16(dev + 0); write16(dev + 1, 0x3333); write16(dev + 0, 0x4444); return x; } will return 0x1111 and issue a single 32-bit write of 0x33334444 (in the little-endian bus configuration) to offset 0 on the system bus. This is because the read to set `x' from offset 0 bypasses the write of 0x2222 that is still in the write buffer pending the completion of the write of 0xffff to the reset register. Then the write of 0x3333 to the event mask register is merged with the preceding write to the control register as they share the same word address, making it a 32-bit write of 0x33332222 to offset 0. Finally the write of 0x4444 to the control register is combined with the outstanding 32-bit write of 0x33332222 to offset 0, because, again, it shares the same address. This is an example from a legacy system, given here because it is well documented and affects a machine we actually support. But likewise modern MIPS systems may implement weak MMIO ordering, possibly even without having it clearly documented except for being compliant with the architecture specification with respect to the currently defined SYNC instruction variants[3]. Considering the above and that we are required to implement MMIO accessors such that individual accesses made with them are strongly ordered with respect to each other[4], add the necessary barriers to our `inX', `outX', `readX' and `writeX' handlers, as well the associated special use variants. It's up to platforms then to possibly define the respective barriers so as to expand to nil if no ordering enforcement is actually needed for a given system; SYNC is supposed to be as cheap as a NOP on strongly ordered MIPS implementations though. Retain the option to generate weakly-ordered accessors, so that the arrangement for `war_io_reorder_wmb' is not lost in case we need it for fully raw accessors in the future. The reason for this is that it is unclear from commit 1e820da3c9af ("MIPS: Loongson-3: Introduce CONFIG_LOONGSON3_ENHANCEMENT") and especially commit 8faca49a6731 ("MIPS: Modify core io.h macros to account for the Octeon Errata Core-301.") why they are needed there under the previous assumption that these accessors can be weakly ordered. References: [1] "LR3020 Write Buffer", LSI Logic Corporation, September 1988, Section "Byte Gathering", pp. 6-7 [2] "DECstation 3100 Desktop Workstation Functional Specification", Digital Equipment Corporation, Revision 1.3, August 28, 1990, Section 6.1 "Processor", p. 4 [3] "MIPS Architecture For Programmers, Volume II-A: The MIPS32 Instruction Set Manual", Imagination Technologies LTD, Document Number: MD00086, Revision 6.06, December 15, 2016, Table 5.5 "Encodings of the Bits[10:6] of the SYNC instruction; the SType Field", p. 409 [4] "LINUX KERNEL MEMORY BARRIERS", Documentation/memory-barriers.txt, Section "KERNEL I/O BARRIER EFFECTS" Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org> References: 8faca49a6731 ("MIPS: Modify core io.h macros to account for the Octeon Errata Core-301.") References: 1e820da3c9af ("MIPS: Loongson-3: Introduce CONFIG_LOONGSON3_ENHANCEMENT") Signed-off-by: Paul Burton <paul.burton@mips.com> Patchwork: https://patchwork.linux-mips.org/patch/20864/ Cc: Ralf Baechle <ralf@linux-mips.org>
2018-10-08 01:37:16 +01:00
if (barrier) \
iobarrier_rw(); \
else \
war_io_reorder_wmb(); \
\
__addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
\
__val = pfx##ioswab##bwlq(__addr, val); \
\
/* Really, we want this to be atomic */ \
BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
\
*__addr = __val; \
} \
\
static inline type pfx##in##bwlq(unsigned long port) \
{ \
volatile type *__addr; \
type __val; \
\
__addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
\
BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
\
MIPS: Enforce strong ordering for MMIO accessors Architecturally the MIPS ISA does not specify ordering requirements for uncached bus accesses such as MMIO operations normally use and therefore explicit barriers have to be inserted between MMIO accesses where unspecified ordering of operations would cause unpredictable results. For example the R2020 write buffer implements write gathering and combining[1] and as used with the DECstation models 2100 and 3100 for MMIO accesses it bypasses the read buffer entirely, because conflicts are resolved by the memory controller for DRAM accesses only[2] (NB the R2020 and R3020 buffers are the same except for the maximum clock rate). Consequently if a device has say a 16-bit control register at offset 0, a 16-bit event mask register at offset 2 and a 16-bit reset register at offset 4, and the initial value of the control register is 0x1111, then in the absence of barriers a hypothetical code sequence like this: u16 init_dev(u16 __iomem *dev); u16 x; write16(dev + 2, 0xffff); write16(dev + 0, 0x2222); x = read16(dev + 0); write16(dev + 1, 0x3333); write16(dev + 0, 0x4444); return x; } will return 0x1111 and issue a single 32-bit write of 0x33334444 (in the little-endian bus configuration) to offset 0 on the system bus. This is because the read to set `x' from offset 0 bypasses the write of 0x2222 that is still in the write buffer pending the completion of the write of 0xffff to the reset register. Then the write of 0x3333 to the event mask register is merged with the preceding write to the control register as they share the same word address, making it a 32-bit write of 0x33332222 to offset 0. Finally the write of 0x4444 to the control register is combined with the outstanding 32-bit write of 0x33332222 to offset 0, because, again, it shares the same address. This is an example from a legacy system, given here because it is well documented and affects a machine we actually support. But likewise modern MIPS systems may implement weak MMIO ordering, possibly even without having it clearly documented except for being compliant with the architecture specification with respect to the currently defined SYNC instruction variants[3]. Considering the above and that we are required to implement MMIO accessors such that individual accesses made with them are strongly ordered with respect to each other[4], add the necessary barriers to our `inX', `outX', `readX' and `writeX' handlers, as well the associated special use variants. It's up to platforms then to possibly define the respective barriers so as to expand to nil if no ordering enforcement is actually needed for a given system; SYNC is supposed to be as cheap as a NOP on strongly ordered MIPS implementations though. Retain the option to generate weakly-ordered accessors, so that the arrangement for `war_io_reorder_wmb' is not lost in case we need it for fully raw accessors in the future. The reason for this is that it is unclear from commit 1e820da3c9af ("MIPS: Loongson-3: Introduce CONFIG_LOONGSON3_ENHANCEMENT") and especially commit 8faca49a6731 ("MIPS: Modify core io.h macros to account for the Octeon Errata Core-301.") why they are needed there under the previous assumption that these accessors can be weakly ordered. References: [1] "LR3020 Write Buffer", LSI Logic Corporation, September 1988, Section "Byte Gathering", pp. 6-7 [2] "DECstation 3100 Desktop Workstation Functional Specification", Digital Equipment Corporation, Revision 1.3, August 28, 1990, Section 6.1 "Processor", p. 4 [3] "MIPS Architecture For Programmers, Volume II-A: The MIPS32 Instruction Set Manual", Imagination Technologies LTD, Document Number: MD00086, Revision 6.06, December 15, 2016, Table 5.5 "Encodings of the Bits[10:6] of the SYNC instruction; the SType Field", p. 409 [4] "LINUX KERNEL MEMORY BARRIERS", Documentation/memory-barriers.txt, Section "KERNEL I/O BARRIER EFFECTS" Signed-off-by: Maciej W. Rozycki <macro@linux-mips.org> References: 8faca49a6731 ("MIPS: Modify core io.h macros to account for the Octeon Errata Core-301.") References: 1e820da3c9af ("MIPS: Loongson-3: Introduce CONFIG_LOONGSON3_ENHANCEMENT") Signed-off-by: Paul Burton <paul.burton@mips.com> Patchwork: https://patchwork.linux-mips.org/patch/20864/ Cc: Ralf Baechle <ralf@linux-mips.org>
2018-10-08 01:37:16 +01:00
if (barrier) \
iobarrier_rw(); \
\
__val = *__addr; \
\
/* prevent prefetching of coherent DMA data prematurely */ \
if (!relax) \
rmb(); \
return pfx##ioswab##bwlq(__addr, __val); \
}
#define __BUILD_MEMORY_PFX(bus, bwlq, type, relax) \
\
__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1, relax, 1)
#define BUILDIO_MEM(bwlq, type) \
\
__BUILD_MEMORY_PFX(__raw_, bwlq, type, 0) \
__BUILD_MEMORY_PFX(__relaxed_, bwlq, type, 1) \
__BUILD_MEMORY_PFX(__mem_, bwlq, type, 0) \
__BUILD_MEMORY_PFX(, bwlq, type, 0)
BUILDIO_MEM(b, u8)
BUILDIO_MEM(w, u16)
BUILDIO_MEM(l, u32)
mips: Remove q-accessors from non-64bit platforms There are some generic drivers in the kernel, which make use of the q-accessors or their derivatives. While at current asm/io.h the accessors are defined, their implementation is only applicable either for 64bit systems, or for systems with cpu_has_64bits flag set. Obviously there are MIPS systems which are neither of these, but still need to have those drivers supported. In this case the solution is to define some generic versions of the q-accessors, but with a limitation to be non-atomic. Such accessors are defined in the io-64-nonatomic-{hi-lo,lo-hi}.h file. The drivers which utilize the q-suffixed IO-methods are supposed to include the header file, so in case if these accessors aren't defined for the platform, the generic non-atomic versions are utilized. Currently the MIPS-specific asm/io.h file provides the q-accessors for any MIPS system even for ones, which in fact don't support them and raise BUG() in case if any of them is called. Due to this the generic versions of the accessors are never used while an attempt to call the IO-methods causes the kernel BUG(). In order to fix this we need to define the q-accessors only for the MIPS systems, which actually support them, and don't define them otherwise, so to let the corresponding drivers to use the non-atomic q-suffixed accessors. Signed-off-by: Serge Semin <fancer.lancer@gmail.com> Suggested-by: Arnd Bergmann <arnd@arndb.de> Cc: Vadim V. Vlasov <vadim.vlasov@t-platforms.ru> Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: James Hogan <jhogan@kernel.org> Cc: Serge Semin <Sergey.Semin@t-platforms.ru> Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org
2019-06-14 09:33:42 +03:00
#ifdef CONFIG_64BIT
BUILDIO_MEM(q, u64)
mips: Remove q-accessors from non-64bit platforms There are some generic drivers in the kernel, which make use of the q-accessors or their derivatives. While at current asm/io.h the accessors are defined, their implementation is only applicable either for 64bit systems, or for systems with cpu_has_64bits flag set. Obviously there are MIPS systems which are neither of these, but still need to have those drivers supported. In this case the solution is to define some generic versions of the q-accessors, but with a limitation to be non-atomic. Such accessors are defined in the io-64-nonatomic-{hi-lo,lo-hi}.h file. The drivers which utilize the q-suffixed IO-methods are supposed to include the header file, so in case if these accessors aren't defined for the platform, the generic non-atomic versions are utilized. Currently the MIPS-specific asm/io.h file provides the q-accessors for any MIPS system even for ones, which in fact don't support them and raise BUG() in case if any of them is called. Due to this the generic versions of the accessors are never used while an attempt to call the IO-methods causes the kernel BUG(). In order to fix this we need to define the q-accessors only for the MIPS systems, which actually support them, and don't define them otherwise, so to let the corresponding drivers to use the non-atomic q-suffixed accessors. Signed-off-by: Serge Semin <fancer.lancer@gmail.com> Suggested-by: Arnd Bergmann <arnd@arndb.de> Cc: Vadim V. Vlasov <vadim.vlasov@t-platforms.ru> Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: James Hogan <jhogan@kernel.org> Cc: Serge Semin <Sergey.Semin@t-platforms.ru> Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org
2019-06-14 09:33:42 +03:00
#else
__BUILD_MEMORY_PFX(__raw_, q, u64, 0)
__BUILD_MEMORY_PFX(__mem_, q, u64, 0)
#endif
#define __BUILD_IOPORT_PFX(bus, bwlq, type) \
__BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0)
#define BUILDIO_IOPORT(bwlq, type) \
__BUILD_IOPORT_PFX(_, bwlq, type) \
__BUILD_IOPORT_PFX(__mem_, bwlq, type)
BUILDIO_IOPORT(b, u8)
BUILDIO_IOPORT(w, u16)
BUILDIO_IOPORT(l, u32)
#ifdef CONFIG_64BIT
BUILDIO_IOPORT(q, u64)
#endif
#define __BUILDIO(bwlq, type) \
\
__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 1, 0, 0)
__BUILDIO(q, u64)
#define readb_relaxed __relaxed_readb
#define readw_relaxed __relaxed_readw
#define readl_relaxed __relaxed_readl
mips: Remove q-accessors from non-64bit platforms There are some generic drivers in the kernel, which make use of the q-accessors or their derivatives. While at current asm/io.h the accessors are defined, their implementation is only applicable either for 64bit systems, or for systems with cpu_has_64bits flag set. Obviously there are MIPS systems which are neither of these, but still need to have those drivers supported. In this case the solution is to define some generic versions of the q-accessors, but with a limitation to be non-atomic. Such accessors are defined in the io-64-nonatomic-{hi-lo,lo-hi}.h file. The drivers which utilize the q-suffixed IO-methods are supposed to include the header file, so in case if these accessors aren't defined for the platform, the generic non-atomic versions are utilized. Currently the MIPS-specific asm/io.h file provides the q-accessors for any MIPS system even for ones, which in fact don't support them and raise BUG() in case if any of them is called. Due to this the generic versions of the accessors are never used while an attempt to call the IO-methods causes the kernel BUG(). In order to fix this we need to define the q-accessors only for the MIPS systems, which actually support them, and don't define them otherwise, so to let the corresponding drivers to use the non-atomic q-suffixed accessors. Signed-off-by: Serge Semin <fancer.lancer@gmail.com> Suggested-by: Arnd Bergmann <arnd@arndb.de> Cc: Vadim V. Vlasov <vadim.vlasov@t-platforms.ru> Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: James Hogan <jhogan@kernel.org> Cc: Serge Semin <Sergey.Semin@t-platforms.ru> Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org
2019-06-14 09:33:42 +03:00
#ifdef CONFIG_64BIT
#define readq_relaxed __relaxed_readq
mips: Remove q-accessors from non-64bit platforms There are some generic drivers in the kernel, which make use of the q-accessors or their derivatives. While at current asm/io.h the accessors are defined, their implementation is only applicable either for 64bit systems, or for systems with cpu_has_64bits flag set. Obviously there are MIPS systems which are neither of these, but still need to have those drivers supported. In this case the solution is to define some generic versions of the q-accessors, but with a limitation to be non-atomic. Such accessors are defined in the io-64-nonatomic-{hi-lo,lo-hi}.h file. The drivers which utilize the q-suffixed IO-methods are supposed to include the header file, so in case if these accessors aren't defined for the platform, the generic non-atomic versions are utilized. Currently the MIPS-specific asm/io.h file provides the q-accessors for any MIPS system even for ones, which in fact don't support them and raise BUG() in case if any of them is called. Due to this the generic versions of the accessors are never used while an attempt to call the IO-methods causes the kernel BUG(). In order to fix this we need to define the q-accessors only for the MIPS systems, which actually support them, and don't define them otherwise, so to let the corresponding drivers to use the non-atomic q-suffixed accessors. Signed-off-by: Serge Semin <fancer.lancer@gmail.com> Suggested-by: Arnd Bergmann <arnd@arndb.de> Cc: Vadim V. Vlasov <vadim.vlasov@t-platforms.ru> Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: James Hogan <jhogan@kernel.org> Cc: Serge Semin <Sergey.Semin@t-platforms.ru> Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org
2019-06-14 09:33:42 +03:00
#endif
#define writeb_relaxed __relaxed_writeb
#define writew_relaxed __relaxed_writew
#define writel_relaxed __relaxed_writel
mips: Remove q-accessors from non-64bit platforms There are some generic drivers in the kernel, which make use of the q-accessors or their derivatives. While at current asm/io.h the accessors are defined, their implementation is only applicable either for 64bit systems, or for systems with cpu_has_64bits flag set. Obviously there are MIPS systems which are neither of these, but still need to have those drivers supported. In this case the solution is to define some generic versions of the q-accessors, but with a limitation to be non-atomic. Such accessors are defined in the io-64-nonatomic-{hi-lo,lo-hi}.h file. The drivers which utilize the q-suffixed IO-methods are supposed to include the header file, so in case if these accessors aren't defined for the platform, the generic non-atomic versions are utilized. Currently the MIPS-specific asm/io.h file provides the q-accessors for any MIPS system even for ones, which in fact don't support them and raise BUG() in case if any of them is called. Due to this the generic versions of the accessors are never used while an attempt to call the IO-methods causes the kernel BUG(). In order to fix this we need to define the q-accessors only for the MIPS systems, which actually support them, and don't define them otherwise, so to let the corresponding drivers to use the non-atomic q-suffixed accessors. Signed-off-by: Serge Semin <fancer.lancer@gmail.com> Suggested-by: Arnd Bergmann <arnd@arndb.de> Cc: Vadim V. Vlasov <vadim.vlasov@t-platforms.ru> Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: James Hogan <jhogan@kernel.org> Cc: Serge Semin <Sergey.Semin@t-platforms.ru> Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org
2019-06-14 09:33:42 +03:00
#ifdef CONFIG_64BIT
#define writeq_relaxed __relaxed_writeq
mips: Remove q-accessors from non-64bit platforms There are some generic drivers in the kernel, which make use of the q-accessors or their derivatives. While at current asm/io.h the accessors are defined, their implementation is only applicable either for 64bit systems, or for systems with cpu_has_64bits flag set. Obviously there are MIPS systems which are neither of these, but still need to have those drivers supported. In this case the solution is to define some generic versions of the q-accessors, but with a limitation to be non-atomic. Such accessors are defined in the io-64-nonatomic-{hi-lo,lo-hi}.h file. The drivers which utilize the q-suffixed IO-methods are supposed to include the header file, so in case if these accessors aren't defined for the platform, the generic non-atomic versions are utilized. Currently the MIPS-specific asm/io.h file provides the q-accessors for any MIPS system even for ones, which in fact don't support them and raise BUG() in case if any of them is called. Due to this the generic versions of the accessors are never used while an attempt to call the IO-methods causes the kernel BUG(). In order to fix this we need to define the q-accessors only for the MIPS systems, which actually support them, and don't define them otherwise, so to let the corresponding drivers to use the non-atomic q-suffixed accessors. Signed-off-by: Serge Semin <fancer.lancer@gmail.com> Suggested-by: Arnd Bergmann <arnd@arndb.de> Cc: Vadim V. Vlasov <vadim.vlasov@t-platforms.ru> Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: James Hogan <jhogan@kernel.org> Cc: Serge Semin <Sergey.Semin@t-platforms.ru> Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org
2019-06-14 09:33:42 +03:00
#endif
#define readb_be(addr) \
__raw_readb((__force unsigned *)(addr))
#define readw_be(addr) \
be16_to_cpu(__raw_readw((__force unsigned *)(addr)))
#define readl_be(addr) \
be32_to_cpu(__raw_readl((__force unsigned *)(addr)))
#define readq_be(addr) \
be64_to_cpu(__raw_readq((__force unsigned *)(addr)))
#define writeb_be(val, addr) \
__raw_writeb((val), (__force unsigned *)(addr))
#define writew_be(val, addr) \
__raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr))
#define writel_be(val, addr) \
__raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr))
#define writeq_be(val, addr) \
__raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr))
#define __BUILD_MEMORY_STRING(bwlq, type) \
\
static inline void writes##bwlq(volatile void __iomem *mem, \
const void *addr, unsigned int count) \
{ \
const volatile type *__addr = addr; \
\
while (count--) { \
__mem_write##bwlq(*__addr, mem); \
__addr++; \
} \
} \
\
static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
unsigned int count) \
{ \
volatile type *__addr = addr; \
\
while (count--) { \
*__addr = __mem_read##bwlq(mem); \
__addr++; \
} \
}
#define __BUILD_IOPORT_STRING(bwlq, type) \
\
static inline void outs##bwlq(unsigned long port, const void *addr, \
unsigned int count) \
{ \
const volatile type *__addr = addr; \
\
while (count--) { \
__mem_out##bwlq(*__addr, port); \
__addr++; \
} \
} \
\
static inline void ins##bwlq(unsigned long port, void *addr, \
unsigned int count) \
{ \
volatile type *__addr = addr; \
\
while (count--) { \
*__addr = __mem_in##bwlq(port); \
__addr++; \
} \
}
#define BUILDSTRING(bwlq, type) \
\
__BUILD_MEMORY_STRING(bwlq, type) \
__BUILD_IOPORT_STRING(bwlq, type)
BUILDSTRING(b, u8)
BUILDSTRING(w, u16)
BUILDSTRING(l, u32)
#ifdef CONFIG_64BIT
BUILDSTRING(q, u64)
#endif
/*
* The caches on some architectures aren't dma-coherent and have need to
* handle this in software. There are three types of operations that
* can be applied to dma buffers.
*
* - dma_cache_wback_inv(start, size) makes caches and coherent by
* writing the content of the caches back to memory, if necessary.
* The function also invalidates the affected part of the caches as
* necessary before DMA transfers from outside to memory.
* - dma_cache_wback(start, size) makes caches and coherent by
* writing the content of the caches back to memory, if necessary.
* The function also invalidates the affected part of the caches as
* necessary before DMA transfers from outside to memory.
* - dma_cache_inv(start, size) invalidates the affected parts of the
* caches. Dirty lines of the caches may be written back or simply
* be discarded. This operation is necessary before dma operations
* to the memory.
*
* This API used to be exported; it now is for arch code internal use only.
*/
#ifdef CONFIG_DMA_NONCOHERENT
extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
#define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size)
#define dma_cache_wback(start, size) _dma_cache_wback(start, size)
#define dma_cache_inv(start, size) _dma_cache_inv(start, size)
#else /* Sane hardware */
#define dma_cache_wback_inv(start,size) \
do { (void) (start); (void) (size); } while (0)
#define dma_cache_wback(start,size) \
do { (void) (start); (void) (size); } while (0)
#define dma_cache_inv(start,size) \
do { (void) (start); (void) (size); } while (0)
#endif /* CONFIG_DMA_NONCOHERENT */
/*
* Read a 32-bit register that requires a 64-bit read cycle on the bus.
* Avoid interrupt mucking, just adjust the address for 4-byte access.
* Assume the addresses are 8-byte aligned.
*/
#ifdef __MIPSEB__
#define __CSR_32_ADJUST 4
#else
#define __CSR_32_ADJUST 0
#endif
#define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
#define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
#define __raw_readb __raw_readb
#define __raw_readw __raw_readw
#define __raw_readl __raw_readl
#ifdef CONFIG_64BIT
#define __raw_readq __raw_readq
#endif
#define __raw_writeb __raw_writeb
#define __raw_writew __raw_writew
#define __raw_writel __raw_writel
#ifdef CONFIG_64BIT
#define __raw_writeq __raw_writeq
#endif
#define readb readb
#define readw readw
#define readl readl
#ifdef CONFIG_64BIT
#define readq readq
#endif
#define writeb writeb
#define writew writew
#define writel writel
#ifdef CONFIG_64BIT
#define writeq writeq
#endif
#define readsb readsb
#define readsw readsw
#define readsl readsl
#ifdef CONFIG_64BIT
#define readsq readsq
#endif
#define writesb writesb
#define writesw writesw
#define writesl writesl
#ifdef CONFIG_64BIT
#define writesq writesq
#endif
#define _inb _inb
#define _inw _inw
#define _inl _inl
#define insb insb
#define insw insw
#define insl insl
#define _outb _outb
#define _outw _outw
#define _outl _outl
#define outsb outsb
#define outsw outsw
#define outsl outsl
void __ioread64_copy(void *to, const void __iomem *from, size_t count);
#include <asm-generic/io.h>
static inline void *isa_bus_to_virt(unsigned long address)
{
return phys_to_virt(address);
}
#endif /* _ASM_IO_H */