2019-06-04 10:11:33 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2009-10-23 08:30:46 +00:00
|
|
|
/****************************************************************************
|
2013-08-29 23:32:48 +01:00
|
|
|
* Driver for Solarflare network controllers and boards
|
2009-10-23 08:30:46 +00:00
|
|
|
* Copyright 2005-2006 Fen Systems Ltd.
|
2013-08-29 23:32:48 +01:00
|
|
|
* Copyright 2006-2013 Solarflare Communications Inc.
|
2009-10-23 08:30:46 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef EFX_IO_H
|
|
|
|
#define EFX_IO_H
|
|
|
|
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
|
|
|
|
/**************************************************************************
|
|
|
|
*
|
|
|
|
* NIC register I/O
|
|
|
|
*
|
|
|
|
**************************************************************************
|
|
|
|
*
|
2023-07-27 11:41:32 +01:00
|
|
|
* The EF10 architecture exposes very few registers to the host and
|
|
|
|
* most of them are only 32 bits wide. The only exceptions are the MC
|
|
|
|
* doorbell register pair, which has its own latching, and
|
|
|
|
* TX_DESC_UPD.
|
2009-10-23 08:30:46 +00:00
|
|
|
*
|
2023-07-27 11:41:32 +01:00
|
|
|
* The TX_DESC_UPD DMA descriptor pointer is 128-bits but is a special
|
|
|
|
* case in the BIU to avoid the need for locking in the host:
|
2009-10-23 08:30:46 +00:00
|
|
|
*
|
2023-07-27 11:41:32 +01:00
|
|
|
* - It is write-only.
|
|
|
|
* - The semantics of writing to this register is such that
|
2010-12-06 22:55:00 +00:00
|
|
|
* replacing the low 96 bits with zero does not affect functionality.
|
2023-07-27 11:41:32 +01:00
|
|
|
* - If the host writes to the last dword address of the register
|
2010-12-06 22:55:00 +00:00
|
|
|
* (i.e. the high 32 bits) the underlying register will always be
|
2011-09-01 12:09:59 +00:00
|
|
|
* written. If the collector and the current write together do not
|
|
|
|
* provide values for all 128 bits of the register, the low 96 bits
|
|
|
|
* will be written as zero.
|
2009-10-23 08:30:46 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#if BITS_PER_LONG == 64
|
|
|
|
#define EFX_USE_QWORD_IO 1
|
|
|
|
#endif
|
|
|
|
|
2014-06-11 14:33:08 +01:00
|
|
|
/* Hardware issue requires that only 64-bit naturally aligned writes
|
|
|
|
* are seen by hardware. Its not strictly necessary to restrict to
|
|
|
|
* x86_64 arch, but done for safety since unusual write combining behaviour
|
|
|
|
* can break PIO.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_X86_64
|
2013-06-28 21:47:12 +01:00
|
|
|
/* PIO is a win only if write-combining is possible */
|
asm-generic/iomap.h: remove ARCH_HAS_IOREMAP_xx macros
Patch series "mm: ioremap: Convert architectures to take GENERIC_IOREMAP
way", v8.
Motivation and implementation:
==============================
Currently, many architecutres have't taken the standard GENERIC_IOREMAP
way to implement ioremap_prot(), iounmap(), and ioremap_xx(), but make
these functions specifically under each arch's folder. Those cause many
duplicated code of ioremap() and iounmap().
In this patchset, firstly introduce generic_ioremap_prot() and
generic_iounmap() to extract the generic code for GENERIC_IOREMAP. By
taking GENERIC_IOREMAP method, the generic generic_ioremap_prot(),
generic_iounmap(), and their generic wrapper ioremap_prot(), ioremap() and
iounmap() are all visible and available to arch. Arch needs to provide
wrapper functions to override the generic version if there's arch specific
handling in its corresponding ioremap_prot(), ioremap() or iounmap().
With these changes, duplicated ioremap/iounmap() code uder ARCH-es are
removed, and the equivalent functioality is kept as before.
Background info:
================
1) The converting more architectures to take GENERIC_IOREMAP way is
suggested by Christoph in below discussion:
https://lore.kernel.org/all/Yp7h0Jv6vpgt6xdZ@infradead.org/T/#u
2) In the previous v1 to v3, it's basically further action after arm64
has converted to GENERIC_IOREMAP way in below patchset. It's done by
adding hook ioremap_allowed() and iounmap_allowed() in ARCH to add ARCH
specific handling the middle of ioremap_prot() and iounmap().
[PATCH v5 0/6] arm64: Cleanup ioremap() and support ioremap_prot()
https://lore.kernel.org/all/20220607125027.44946-1-wangkefeng.wang@huawei.com/T/#u
Later, during v3 reviewing, Christophe Leroy suggested to introduce
generic_ioremap_prot() and generic_iounmap() to generic codes, and ARCH
can provide wrapper function ioremap_prot(), ioremap() or iounmap() if
needed. Christophe made a RFC patchset as below to specially demonstrate
his idea. This is what v4 and now v5 is doing.
[RFC PATCH 0/8] mm: ioremap: Convert architectures to take GENERIC_IOREMAP way
https://lore.kernel.org/all/cover.1665568707.git.christophe.leroy@csgroup.eu/T/#u
Testing:
========
In v8, I only applied this patchset onto the latest linus's tree to build
and run on arm64 and s390.
This patch (of 19):
Let's use '#define ioremap_xx' and "#ifdef ioremap_xx" instead.
To remove defined ARCH_HAS_IOREMAP_xx macros in <asm/io.h> of each ARCH,
the ARCH's own ioremap_wc|wt|np definition need be above "#include
<asm-generic/iomap.h>. Otherwise the redefinition error would be seen
during compiling. So the relevant adjustments are made to avoid compiling
error:
loongarch:
- doesn't include <asm-generic/iomap.h>, defining ARCH_HAS_IOREMAP_WC
is redundant, so simply remove it.
m68k:
- selected GENERIC_IOMAP, <asm-generic/iomap.h> has been added in
<asm-generic/io.h>, and <asm/kmap.h> is included above
<asm-generic/iomap.h>, so simply remove ARCH_HAS_IOREMAP_WT defining.
mips:
- move "#include <asm-generic/iomap.h>" below ioremap_wc definition
in <asm/io.h>
powerpc:
- remove "#include <asm-generic/iomap.h>" in <asm/io.h> because it's
duplicated with the one in <asm-generic/io.h>, let's rely on the
latter.
x86:
- selected GENERIC_IOMAP, remove #include <asm-generic/iomap.h> in
the middle of <asm/io.h>. Let's rely on <asm-generic/io.h>.
Link: https://lkml.kernel.org/r/20230706154520.11257-2-bhe@redhat.com
Signed-off-by: Baoquan He <bhe@redhat.com>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David Laight <David.Laight@ACULAB.COM>
Cc: Helge Deller <deller@gmx.de>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Nathan Chancellor <nathan@kernel.org>
Cc: Niklas Schnelle <schnelle@linux.ibm.com>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Brian Cain <bcain@quicinc.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Rich Felker <dalias@libc.org>
Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vineet Gupta <vgupta@kernel.org>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-07-06 23:45:02 +08:00
|
|
|
#ifdef ioremap_wc
|
2013-06-28 21:47:12 +01:00
|
|
|
#define EFX_USE_PIO 1
|
|
|
|
#endif
|
2014-06-11 14:33:08 +01:00
|
|
|
#endif
|
2013-06-28 21:47:12 +01:00
|
|
|
|
2020-07-27 12:55:41 +01:00
|
|
|
static inline u32 efx_reg(struct efx_nic *efx, unsigned int reg)
|
|
|
|
{
|
|
|
|
return efx->reg_base + reg;
|
|
|
|
}
|
|
|
|
|
2009-10-23 08:30:46 +00:00
|
|
|
#ifdef EFX_USE_QWORD_IO
|
|
|
|
static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
|
|
|
|
unsigned int reg)
|
|
|
|
{
|
|
|
|
__raw_writeq((__force u64)value, efx->membase + reg);
|
|
|
|
}
|
|
|
|
static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg)
|
|
|
|
{
|
|
|
|
return (__force __le64)__raw_readq(efx->membase + reg);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static inline void _efx_writed(struct efx_nic *efx, __le32 value,
|
|
|
|
unsigned int reg)
|
|
|
|
{
|
|
|
|
__raw_writel((__force u32)value, efx->membase + reg);
|
|
|
|
}
|
|
|
|
static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
|
|
|
|
{
|
|
|
|
return (__force __le32)__raw_readl(efx->membase + reg);
|
|
|
|
}
|
|
|
|
|
2010-12-06 22:55:00 +00:00
|
|
|
/* Write a normal 128-bit CSR, locking as appropriate. */
|
2012-09-13 01:11:23 +01:00
|
|
|
static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value,
|
2009-10-23 08:30:46 +00:00
|
|
|
unsigned int reg)
|
|
|
|
{
|
|
|
|
unsigned long flags __attribute__ ((unused));
|
|
|
|
|
2010-06-23 11:30:07 +00:00
|
|
|
netif_vdbg(efx, hw, efx->net_dev,
|
|
|
|
"writing register %x with " EFX_OWORD_FMT "\n", reg,
|
|
|
|
EFX_OWORD_VAL(*value));
|
2009-10-23 08:30:46 +00:00
|
|
|
|
|
|
|
spin_lock_irqsave(&efx->biu_lock, flags);
|
|
|
|
#ifdef EFX_USE_QWORD_IO
|
|
|
|
_efx_writeq(efx, value->u64[0], reg + 0);
|
|
|
|
_efx_writeq(efx, value->u64[1], reg + 8);
|
|
|
|
#else
|
|
|
|
_efx_writed(efx, value->u32[0], reg + 0);
|
|
|
|
_efx_writed(efx, value->u32[1], reg + 4);
|
|
|
|
_efx_writed(efx, value->u32[2], reg + 8);
|
|
|
|
_efx_writed(efx, value->u32[3], reg + 12);
|
|
|
|
#endif
|
|
|
|
spin_unlock_irqrestore(&efx->biu_lock, flags);
|
|
|
|
}
|
|
|
|
|
2010-12-06 22:55:00 +00:00
|
|
|
/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
|
2012-09-13 01:11:23 +01:00
|
|
|
static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value,
|
2009-10-23 08:30:46 +00:00
|
|
|
unsigned int reg)
|
|
|
|
{
|
2010-06-23 11:30:07 +00:00
|
|
|
netif_vdbg(efx, hw, efx->net_dev,
|
2010-12-06 22:55:00 +00:00
|
|
|
"writing register %x with "EFX_DWORD_FMT"\n",
|
2010-06-23 11:30:07 +00:00
|
|
|
reg, EFX_DWORD_VAL(*value));
|
2009-10-23 08:30:46 +00:00
|
|
|
|
|
|
|
/* No lock required */
|
|
|
|
_efx_writed(efx, value->u32[0], reg);
|
|
|
|
}
|
|
|
|
|
2010-12-06 22:55:00 +00:00
|
|
|
/* Read a 128-bit CSR, locking as appropriate. */
|
2009-10-23 08:30:46 +00:00
|
|
|
static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
|
|
|
|
unsigned int reg)
|
|
|
|
{
|
|
|
|
unsigned long flags __attribute__ ((unused));
|
|
|
|
|
|
|
|
spin_lock_irqsave(&efx->biu_lock, flags);
|
|
|
|
value->u32[0] = _efx_readd(efx, reg + 0);
|
|
|
|
value->u32[1] = _efx_readd(efx, reg + 4);
|
|
|
|
value->u32[2] = _efx_readd(efx, reg + 8);
|
|
|
|
value->u32[3] = _efx_readd(efx, reg + 12);
|
|
|
|
spin_unlock_irqrestore(&efx->biu_lock, flags);
|
|
|
|
|
2010-06-23 11:30:07 +00:00
|
|
|
netif_vdbg(efx, hw, efx->net_dev,
|
|
|
|
"read from register %x, got " EFX_OWORD_FMT "\n", reg,
|
|
|
|
EFX_OWORD_VAL(*value));
|
2009-10-23 08:30:46 +00:00
|
|
|
}
|
|
|
|
|
2010-12-06 22:55:00 +00:00
|
|
|
/* Read a 32-bit CSR or SRAM */
|
2009-10-23 08:30:46 +00:00
|
|
|
static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
|
|
|
|
unsigned int reg)
|
|
|
|
{
|
|
|
|
value->u32[0] = _efx_readd(efx, reg);
|
2010-06-23 11:30:07 +00:00
|
|
|
netif_vdbg(efx, hw, efx->net_dev,
|
|
|
|
"read from register %x, got "EFX_DWORD_FMT"\n",
|
|
|
|
reg, EFX_DWORD_VAL(*value));
|
2009-10-23 08:30:46 +00:00
|
|
|
}
|
|
|
|
|
2010-12-06 22:55:00 +00:00
|
|
|
/* Write a 128-bit CSR forming part of a table */
|
2012-09-13 01:11:23 +01:00
|
|
|
static inline void
|
|
|
|
efx_writeo_table(struct efx_nic *efx, const efx_oword_t *value,
|
|
|
|
unsigned int reg, unsigned int index)
|
2009-10-23 08:30:46 +00:00
|
|
|
{
|
|
|
|
efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
|
|
|
|
}
|
|
|
|
|
2010-12-06 22:55:00 +00:00
|
|
|
/* Read a 128-bit CSR forming part of a table */
|
2009-10-23 08:30:46 +00:00
|
|
|
static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
|
|
|
|
unsigned int reg, unsigned int index)
|
|
|
|
{
|
|
|
|
efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
|
|
|
|
}
|
|
|
|
|
2020-07-27 12:55:41 +01:00
|
|
|
/* default VI stride (step between per-VI registers) is 8K on EF10 and
|
|
|
|
* 64K on EF100
|
|
|
|
*/
|
|
|
|
#define EFX_DEFAULT_VI_STRIDE 0x2000
|
|
|
|
#define EF100_DEFAULT_VI_STRIDE 0x10000
|
2009-10-23 08:30:46 +00:00
|
|
|
|
2013-06-28 20:14:46 +01:00
|
|
|
/* Calculate offset to page-mapped register */
|
2017-12-18 16:56:19 +00:00
|
|
|
static inline unsigned int efx_paged_reg(struct efx_nic *efx, unsigned int page,
|
|
|
|
unsigned int reg)
|
|
|
|
{
|
|
|
|
return page * efx->vi_stride + reg;
|
|
|
|
}
|
2009-10-23 08:30:46 +00:00
|
|
|
|
2010-12-06 22:55:00 +00:00
|
|
|
/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
|
2010-12-06 22:55:33 +00:00
|
|
|
static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
|
|
|
|
unsigned int reg, unsigned int page)
|
2009-10-23 08:30:46 +00:00
|
|
|
{
|
2017-12-18 16:56:19 +00:00
|
|
|
reg = efx_paged_reg(efx, page, reg);
|
2010-12-06 22:58:41 +00:00
|
|
|
|
|
|
|
netif_vdbg(efx, hw, efx->net_dev,
|
|
|
|
"writing register %x with " EFX_OWORD_FMT "\n", reg,
|
|
|
|
EFX_OWORD_VAL(*value));
|
|
|
|
|
|
|
|
#ifdef EFX_USE_QWORD_IO
|
|
|
|
_efx_writeq(efx, value->u64[0], reg + 0);
|
2011-09-01 12:09:59 +00:00
|
|
|
_efx_writeq(efx, value->u64[1], reg + 8);
|
2010-12-06 22:58:41 +00:00
|
|
|
#else
|
|
|
|
_efx_writed(efx, value->u32[0], reg + 0);
|
|
|
|
_efx_writed(efx, value->u32[1], reg + 4);
|
|
|
|
_efx_writed(efx, value->u32[2], reg + 8);
|
|
|
|
_efx_writed(efx, value->u32[3], reg + 12);
|
2011-09-01 12:09:59 +00:00
|
|
|
#endif
|
2009-10-23 08:30:46 +00:00
|
|
|
}
|
2010-12-06 22:55:33 +00:00
|
|
|
#define efx_writeo_page(efx, value, reg, page) \
|
|
|
|
_efx_writeo_page(efx, value, \
|
|
|
|
reg + \
|
|
|
|
BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
|
|
|
|
page)
|
2009-10-23 08:30:46 +00:00
|
|
|
|
2012-09-19 17:47:08 +01:00
|
|
|
/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the
|
|
|
|
* high bits of RX_DESC_UPD or TX_DESC_UPD)
|
2010-12-06 22:55:00 +00:00
|
|
|
*/
|
2012-09-13 01:11:23 +01:00
|
|
|
static inline void
|
|
|
|
_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
|
|
|
|
unsigned int reg, unsigned int page)
|
2009-10-23 08:30:46 +00:00
|
|
|
{
|
2017-12-18 16:56:19 +00:00
|
|
|
efx_writed(efx, value, efx_paged_reg(efx, page, reg));
|
2009-10-23 08:30:46 +00:00
|
|
|
}
|
2010-12-06 22:55:33 +00:00
|
|
|
#define efx_writed_page(efx, value, reg, page) \
|
|
|
|
_efx_writed_page(efx, value, \
|
|
|
|
reg + \
|
2020-07-27 12:55:41 +01:00
|
|
|
BUILD_BUG_ON_ZERO((reg) != 0x180 && \
|
|
|
|
(reg) != 0x200 && \
|
|
|
|
(reg) != 0x400 && \
|
2012-09-19 17:47:08 +01:00
|
|
|
(reg) != 0x420 && \
|
|
|
|
(reg) != 0x830 && \
|
|
|
|
(reg) != 0x83c && \
|
|
|
|
(reg) != 0xa18 && \
|
|
|
|
(reg) != 0xa1c), \
|
2010-12-06 22:55:33 +00:00
|
|
|
page)
|
2009-10-23 08:30:46 +00:00
|
|
|
|
2010-12-06 22:55:00 +00:00
|
|
|
/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
|
|
|
|
* in the BIU means that writes to TIMER_COMMAND[0] invalidate the
|
|
|
|
* collector register.
|
|
|
|
*/
|
2010-12-06 22:55:33 +00:00
|
|
|
static inline void _efx_writed_page_locked(struct efx_nic *efx,
|
2012-09-13 01:11:23 +01:00
|
|
|
const efx_dword_t *value,
|
2010-12-06 22:55:33 +00:00
|
|
|
unsigned int reg,
|
|
|
|
unsigned int page)
|
2009-10-23 08:30:46 +00:00
|
|
|
{
|
|
|
|
unsigned long flags __attribute__ ((unused));
|
|
|
|
|
|
|
|
if (page == 0) {
|
|
|
|
spin_lock_irqsave(&efx->biu_lock, flags);
|
2017-12-18 16:56:19 +00:00
|
|
|
efx_writed(efx, value, efx_paged_reg(efx, page, reg));
|
2009-10-23 08:30:46 +00:00
|
|
|
spin_unlock_irqrestore(&efx->biu_lock, flags);
|
|
|
|
} else {
|
2017-12-18 16:56:19 +00:00
|
|
|
efx_writed(efx, value, efx_paged_reg(efx, page, reg));
|
2009-10-23 08:30:46 +00:00
|
|
|
}
|
|
|
|
}
|
2010-12-06 22:55:33 +00:00
|
|
|
#define efx_writed_page_locked(efx, value, reg, page) \
|
|
|
|
_efx_writed_page_locked(efx, value, \
|
|
|
|
reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
|
|
|
|
page)
|
2009-10-23 08:30:46 +00:00
|
|
|
|
|
|
|
#endif /* EFX_IO_H */
|