2019-06-04 10:11:33 +02:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2005-04-16 15:20:36 -07:00
|
|
|
/*
|
2008-08-02 10:55:55 +01:00
|
|
|
* arch/arm/include/asm/map.h
|
2005-04-16 15:20:36 -07:00
|
|
|
*
|
|
|
|
* Copyright (C) 1999-2000 Russell King
|
|
|
|
*
|
|
|
|
* Page table mapping constructs and function prototypes
|
|
|
|
*/
|
2012-02-29 18:10:58 -06:00
|
|
|
#ifndef __ASM_MACH_MAP_H
|
|
|
|
#define __ASM_MACH_MAP_H
|
|
|
|
|
2007-05-05 20:59:27 +01:00
|
|
|
#include <asm/io.h>
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
struct map_desc {
|
|
|
|
unsigned long virtual;
|
2005-10-28 15:19:11 +01:00
|
|
|
unsigned long pfn;
|
2005-04-16 15:20:36 -07:00
|
|
|
unsigned long length;
|
|
|
|
unsigned int type;
|
|
|
|
};
|
|
|
|
|
2008-09-07 12:42:51 +01:00
|
|
|
/* types 0-3 are defined in asm/io.h */
|
2013-10-24 10:26:40 +01:00
|
|
|
enum {
|
|
|
|
MT_UNCACHED = 4,
|
|
|
|
MT_CACHECLEAN,
|
|
|
|
MT_MINICLEAN,
|
|
|
|
MT_LOW_VECTORS,
|
|
|
|
MT_HIGH_VECTORS,
|
|
|
|
MT_MEMORY_RWX,
|
2013-10-24 08:12:39 +01:00
|
|
|
MT_MEMORY_RW,
|
ARM: 9210/1: Mark the FDT_FIXED sections as shareable
commit 7a1be318f579 ("ARM: 9012/1: move device tree mapping out of linear
region") use FDT_FIXED_BASE to map the whole FDT_FIXED_SIZE memory area
which contains fdt. But it only reserves the exact physical memory that
fdt occupied. Unfortunately, this mapping is non-shareable. An illegal or
speculative read access can bring the RAM content from non-fdt zone into
cache, PIPT makes it to be hit by subsequently read access through
shareable mapping(such as linear mapping), and the cache consistency
between cores is lost due to non-shareable property.
|<---------FDT_FIXED_SIZE------>|
| |
-------------------------------
| <non-fdt> | <fdt> | <non-fdt> |
-------------------------------
1. CoreA read <non-fdt> through MT_ROM mapping, the old data is loaded
into the cache.
2. CoreB write <non-fdt> to update data through linear mapping. CoreA
received the notification to invalid the corresponding cachelines, but
the property non-shareable makes it to be ignored.
3. CoreA read <non-fdt> through linear mapping, cache hit, the old data
is read.
To eliminate this risk, add a new memory type MT_MEMORY_RO. Compared to
MT_ROM, it is shareable and non-executable.
Here's an example:
list_del corruption. prev->next should be c0ecbf74, but was c08410dc
kernel BUG at lib/list_debug.c:53!
... ...
PC is at __list_del_entry_valid+0x58/0x98
LR is at __list_del_entry_valid+0x58/0x98
psr: 60000093
sp : c0ecbf30 ip : 00000000 fp : 00000001
r10: c08410d0 r9 : 00000001 r8 : c0825e0c
r7 : 20000013 r6 : c08410d0 r5 : c0ecbf74 r4 : c0ecbf74
r3 : c0825d08 r2 : 00000000 r1 : df7ce6f4 r0 : 00000044
... ...
Stack: (0xc0ecbf30 to 0xc0ecc000)
bf20: c0ecbf74 c0164fd0 c0ecbf70 c0165170
bf40: c0eca000 c0840c00 c0840c00 c0824500 c0825e0c c0189bbc c088f404 60000013
bf60: 60000013 c0e85100 000004ec 00000000 c0ebcdc0 c0ecbf74 c0ecbf74 c0825d08
... ... < next prev >
(__list_del_entry_valid) from (__list_del_entry+0xc/0x20)
(__list_del_entry) from (finish_swait+0x60/0x7c)
(finish_swait) from (rcu_gp_kthread+0x560/0xa20)
(rcu_gp_kthread) from (kthread+0x14c/0x15c)
(kthread) from (ret_from_fork+0x14/0x24)
The faulty list node to be deleted is a local variable, its address is
c0ecbf74. The dumped stack shows that 'prev' = c0ecbf74, but its value
before lib/list_debug.c:53 is c08410dc. A large amount of printing results
in swapping out the cacheline containing the old data(MT_ROM mapping is
read only, so the cacheline cannot be dirty), and the subsequent dump
operation obtains new data from the DDR.
Fixes: 7a1be318f579 ("ARM: 9012/1: move device tree mapping out of linear region")
Suggested-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
2022-06-13 15:05:41 +01:00
|
|
|
MT_MEMORY_RO,
|
2013-10-24 10:26:40 +01:00
|
|
|
MT_ROM,
|
|
|
|
MT_MEMORY_RWX_NONCACHED,
|
|
|
|
MT_MEMORY_RW_DTCM,
|
|
|
|
MT_MEMORY_RWX_ITCM,
|
|
|
|
MT_MEMORY_RW_SO,
|
|
|
|
MT_MEMORY_DMA_READY,
|
|
|
|
};
|
2007-05-05 20:28:16 +01:00
|
|
|
|
2006-06-24 17:34:50 +01:00
|
|
|
#ifdef CONFIG_MMU
|
2005-04-16 15:20:36 -07:00
|
|
|
extern void iotable_init(struct map_desc *, int);
|
2012-02-29 18:10:58 -06:00
|
|
|
extern void vm_reserve_area_early(unsigned long addr, unsigned long size,
|
|
|
|
void *caller);
|
2015-04-29 10:04:17 +02:00
|
|
|
extern void create_mapping_late(struct mm_struct *mm, struct map_desc *md,
|
|
|
|
bool ng);
|
2009-01-28 21:32:08 +02:00
|
|
|
|
ARM: implement debug_ll_io_init()
When using DEBUG_LL, the UART's (or other HW's) registers are mapped
into early page tables based on the results of assembly macro addruart.
Later, when the page tables are replaced, the same virtual address must
remain valid. Historically, this has been ensured by using defines from
<mach/iomap.h> in both the implementation of addruart, and the machine's
.map_io() function. However, with the move to single zImage, we wish to
remove <mach/iomap.h>. To enable this, the macro addruart may be used
when constructing the late page tables too; addruart is exposed as a
C function debug_ll_addr(), and used to set up the required mapping in
debug_ll_io_init(), which may called on an opt-in basis from a machine's
.map_io() function.
Signed-off-by: Rob Herring <rob.herring@calxeda.com>
[swarren: Mask map.virtual with PAGE_MASK. Checked for NULL results from
debug_ll_addr (e.g. when selected UART isn't valid). Fixed compile when
either !CONFIG_DEBUG_LL or CONFIG_DEBUG_SEMIHOSTING.]
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Signed-off-by: Olof Johansson <olof@lixom.net>
2012-10-22 11:42:54 -06:00
|
|
|
#ifdef CONFIG_DEBUG_LL
|
|
|
|
extern void debug_ll_addr(unsigned long *paddr, unsigned long *vaddr);
|
|
|
|
extern void debug_ll_io_init(void);
|
|
|
|
#else
|
|
|
|
static inline void debug_ll_io_init(void) {}
|
|
|
|
#endif
|
|
|
|
|
2009-01-28 21:32:08 +02:00
|
|
|
struct mem_type;
|
|
|
|
extern const struct mem_type *get_mem_type(unsigned int type);
|
|
|
|
/*
|
|
|
|
* external interface to remap single page with appropriate type
|
|
|
|
*/
|
|
|
|
extern int ioremap_page(unsigned long virt, unsigned long phys,
|
|
|
|
const struct mem_type *mtype);
|
2006-06-24 17:34:50 +01:00
|
|
|
#else
|
|
|
|
#define iotable_init(map,num) do { } while (0)
|
2012-02-29 18:10:58 -06:00
|
|
|
#define vm_reserve_area_early(a,s,c) do { } while (0)
|
|
|
|
#endif
|
|
|
|
|
2006-06-24 17:34:50 +01:00
|
|
|
#endif
|