2019-05-29 16:57:47 -07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2011-11-22 17:30:29 +00:00
|
|
|
/*
|
|
|
|
* arch/arm/include/asm/pgtable-3level.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 2011 ARM Ltd.
|
|
|
|
* Author: Catalin Marinas <catalin.marinas@arm.com>
|
|
|
|
*/
|
|
|
|
#ifndef _ASM_PGTABLE_3LEVEL_H
|
|
|
|
#define _ASM_PGTABLE_3LEVEL_H
|
|
|
|
|
|
|
|
/*
|
|
|
|
* With LPAE, there are 3 levels of page tables. Each level has 512 entries of
|
|
|
|
* 8 bytes each, occupying a 4K page. The first level table covers a range of
|
|
|
|
* 512GB, each entry representing 1GB. Since we are limited to 4GB input
|
|
|
|
* address range, only 4 entries in the PGD are used.
|
|
|
|
*
|
|
|
|
* There are enough spare bits in a page table entry for the kernel specific
|
|
|
|
* state.
|
|
|
|
*/
|
|
|
|
#define PTRS_PER_PTE 512
|
|
|
|
#define PTRS_PER_PMD 512
|
|
|
|
#define PTRS_PER_PGD 4
|
|
|
|
|
2013-05-02 13:52:01 +01:00
|
|
|
#define PTE_HWTABLE_PTRS (0)
|
2011-11-22 17:30:29 +00:00
|
|
|
#define PTE_HWTABLE_OFF (0)
|
|
|
|
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
|
|
|
|
|
arch: pgtable: define MAX_POSSIBLE_PHYSMEM_BITS where needed
Stefan Agner reported a bug when using zsram on 32-bit Arm machines
with RAM above the 4GB address boundary:
Unable to handle kernel NULL pointer dereference at virtual address 00000000
pgd = a27bd01c
[00000000] *pgd=236a0003, *pmd=1ffa64003
Internal error: Oops: 207 [#1] SMP ARM
Modules linked in: mdio_bcm_unimac(+) brcmfmac cfg80211 brcmutil raspberrypi_hwmon hci_uart crc32_arm_ce bcm2711_thermal phy_generic genet
CPU: 0 PID: 123 Comm: mkfs.ext4 Not tainted 5.9.6 #1
Hardware name: BCM2711
PC is at zs_map_object+0x94/0x338
LR is at zram_bvec_rw.constprop.0+0x330/0xa64
pc : [<c0602b38>] lr : [<c0bda6a0>] psr: 60000013
sp : e376bbe0 ip : 00000000 fp : c1e2921c
r10: 00000002 r9 : c1dda730 r8 : 00000000
r7 : e8ff7a00 r6 : 00000000 r5 : 02f9ffa0 r4 : e3710000
r3 : 000fdffe r2 : c1e0ce80 r1 : ebf979a0 r0 : 00000000
Flags: nZCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment user
Control: 30c5383d Table: 235c2a80 DAC: fffffffd
Process mkfs.ext4 (pid: 123, stack limit = 0x495a22e6)
Stack: (0xe376bbe0 to 0xe376c000)
As it turns out, zsram needs to know the maximum memory size, which
is defined in MAX_PHYSMEM_BITS when CONFIG_SPARSEMEM is set, or in
MAX_POSSIBLE_PHYSMEM_BITS on the x86 architecture.
The same problem will be hit on all 32-bit architectures that have a
physical address space larger than 4GB and happen to not enable sparsemem
and include asm/sparsemem.h from asm/pgtable.h.
After the initial discussion, I suggested just always defining
MAX_POSSIBLE_PHYSMEM_BITS whenever CONFIG_PHYS_ADDR_T_64BIT is
set, or provoking a build error otherwise. This addresses all
configurations that can currently have this runtime bug, but
leaves all other configurations unchanged.
I looked up the possible number of bits in source code and
datasheets, here is what I found:
- on ARC, CONFIG_ARC_HAS_PAE40 controls whether 32 or 40 bits are used
- on ARM, CONFIG_LPAE enables 40 bit addressing, without it we never
support more than 32 bits, even though supersections in theory allow
up to 40 bits as well.
- on MIPS, some MIPS32r1 or later chips support 36 bits, and MIPS32r5
XPA supports up to 60 bits in theory, but 40 bits are more than
anyone will ever ship
- On PowerPC, there are three different implementations of 36 bit
addressing, but 32-bit is used without CONFIG_PTE_64BIT
- On RISC-V, the normal page table format can support 34 bit
addressing. There is no highmem support on RISC-V, so anything
above 2GB is unused, but it might be useful to eventually support
CONFIG_ZRAM for high pages.
Fixes: 61989a80fb3a ("staging: zsmalloc: zsmalloc memory allocation library")
Fixes: 02390b87a945 ("mm/zsmalloc: Prepare to variable MAX_PHYSMEM_BITS")
Acked-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Reviewed-by: Stefan Agner <stefan@agner.ch>
Tested-by: Stefan Agner <stefan@agner.ch>
Acked-by: Mike Rapoport <rppt@linux.ibm.com>
Link: https://lore.kernel.org/linux-mm/bdfa44bf1c570b05d6c70898e2bbb0acf234ecdf.1604762181.git.stefan@agner.ch/
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
2020-11-11 17:52:58 +01:00
|
|
|
#define MAX_POSSIBLE_PHYSMEM_BITS 40
|
|
|
|
|
2011-11-22 17:30:29 +00:00
|
|
|
/*
|
|
|
|
* PGDIR_SHIFT determines the size a top-level page table entry can map.
|
|
|
|
*/
|
|
|
|
#define PGDIR_SHIFT 30
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PMD_SHIFT determines the size a middle-level page table entry can map.
|
|
|
|
*/
|
|
|
|
#define PMD_SHIFT 21
|
|
|
|
|
|
|
|
#define PMD_SIZE (1UL << PMD_SHIFT)
|
2012-07-22 13:40:38 -04:00
|
|
|
#define PMD_MASK (~((1 << PMD_SHIFT) - 1))
|
2011-11-22 17:30:29 +00:00
|
|
|
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
|
2012-07-22 13:40:38 -04:00
|
|
|
#define PGDIR_MASK (~((1 << PGDIR_SHIFT) - 1))
|
2011-11-22 17:30:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* section address mask and size definitions.
|
|
|
|
*/
|
|
|
|
#define SECTION_SHIFT 21
|
|
|
|
#define SECTION_SIZE (1UL << SECTION_SHIFT)
|
2012-07-22 13:40:38 -04:00
|
|
|
#define SECTION_MASK (~((1 << SECTION_SHIFT) - 1))
|
2011-11-22 17:30:29 +00:00
|
|
|
|
|
|
|
#define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE)
|
|
|
|
|
2012-07-25 14:32:38 +01:00
|
|
|
/*
|
|
|
|
* Hugetlb definitions.
|
|
|
|
*/
|
|
|
|
#define HPAGE_SHIFT PMD_SHIFT
|
|
|
|
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
|
|
|
|
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
|
|
|
|
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
|
|
|
|
|
2011-11-22 17:30:29 +00:00
|
|
|
/*
|
|
|
|
* "Linux" PTE definitions for LPAE.
|
|
|
|
*
|
|
|
|
* These bits overlap with the hardware bits but the naming is preserved for
|
|
|
|
* consistency with the classic page table format.
|
|
|
|
*/
|
2012-07-19 11:51:05 +01:00
|
|
|
#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */
|
|
|
|
#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */
|
2011-11-22 17:30:29 +00:00
|
|
|
#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
|
|
|
|
#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
|
|
|
|
#define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
|
|
|
|
#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
|
2014-07-18 16:16:15 +01:00
|
|
|
#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
|
|
|
|
#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
|
2012-09-01 05:22:12 +01:00
|
|
|
#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
|
2014-07-18 16:16:15 +01:00
|
|
|
#define L_PTE_RDONLY (_AT(pteval_t, 1) << 58) /* READ ONLY */
|
2011-11-22 17:30:29 +00:00
|
|
|
|
2023-01-13 18:10:04 +01:00
|
|
|
/* We borrow bit 7 to store the exclusive marker in swap PTEs. */
|
|
|
|
#define L_PTE_SWP_EXCLUSIVE (_AT(pteval_t, 1) << 7)
|
|
|
|
|
2014-07-18 16:16:15 +01:00
|
|
|
#define L_PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
|
|
|
|
#define L_PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
|
|
|
|
#define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
|
|
|
|
#define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
|
2012-07-25 14:39:26 +01:00
|
|
|
|
2011-11-22 17:30:29 +00:00
|
|
|
/*
|
|
|
|
* To be used in assembly code with the upper page attributes.
|
|
|
|
*/
|
|
|
|
#define L_PTE_XN_HIGH (1 << (54 - 32))
|
|
|
|
#define L_PTE_DIRTY_HIGH (1 << (55 - 32))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
|
|
|
|
*/
|
|
|
|
#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0) << 2) /* strongly ordered */
|
|
|
|
#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 1) << 2) /* normal non-cacheable */
|
|
|
|
#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 2) << 2) /* normal inner write-through */
|
|
|
|
#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 3) << 2) /* normal inner write-back */
|
|
|
|
#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 7) << 2) /* normal inner write-alloc */
|
|
|
|
#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 4) << 2) /* device */
|
|
|
|
#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 4) << 2) /* device */
|
|
|
|
#define L_PTE_MT_DEV_WC (_AT(pteval_t, 1) << 2) /* normal non-cacheable */
|
|
|
|
#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 3) << 2) /* normal inner write-back */
|
|
|
|
#define L_PTE_MT_MASK (_AT(pteval_t, 7) << 2)
|
|
|
|
|
2011-11-22 17:30:29 +00:00
|
|
|
/*
|
|
|
|
* Software PGD flags.
|
|
|
|
*/
|
|
|
|
#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
|
|
|
#define pud_none(pud) (!pud_val(pud))
|
|
|
|
#define pud_bad(pud) (!(pud_val(pud) & 2))
|
|
|
|
#define pud_present(pud) (pud_val(pud))
|
2013-01-20 18:28:04 -05:00
|
|
|
#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
|
|
|
|
PMD_TYPE_TABLE)
|
|
|
|
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
|
|
|
|
PMD_TYPE_SECT)
|
2013-10-23 16:13:02 +01:00
|
|
|
#define pmd_large(pmd) pmd_sect(pmd)
|
2020-02-03 17:35:10 -08:00
|
|
|
#define pmd_leaf(pmd) pmd_sect(pmd)
|
2011-11-22 17:30:29 +00:00
|
|
|
|
|
|
|
#define pud_clear(pudp) \
|
|
|
|
do { \
|
|
|
|
*pudp = __pud(0); \
|
|
|
|
clean_pmd_entry(pudp); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define set_pud(pudp, pud) \
|
|
|
|
do { \
|
|
|
|
*pudp = pud; \
|
|
|
|
flush_pmd_entry(pudp); \
|
|
|
|
} while (0)
|
|
|
|
|
2021-07-07 18:09:53 -07:00
|
|
|
static inline pmd_t *pud_pgtable(pud_t pud)
|
2011-11-22 17:30:29 +00:00
|
|
|
{
|
|
|
|
return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
|
|
|
|
|
|
|
|
#define copy_pmd(pmdpd,pmdps) \
|
|
|
|
do { \
|
|
|
|
*pmdpd = *pmdps; \
|
|
|
|
flush_pmd_entry(pmdpd); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define pmd_clear(pmdp) \
|
|
|
|
do { \
|
|
|
|
*pmdp = __pmd(0); \
|
|
|
|
clean_pmd_entry(pmdp); \
|
|
|
|
} while (0)
|
|
|
|
|
2013-05-17 12:32:55 +01:00
|
|
|
/*
|
|
|
|
* For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes
|
|
|
|
* that are written to a page table but not for ptes created with mk_pte.
|
|
|
|
*
|
|
|
|
* In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to
|
|
|
|
* hugetlb_cow, where it is compared with an entry in a page table.
|
|
|
|
* This comparison test fails erroneously leading ultimately to a memory leak.
|
|
|
|
*
|
|
|
|
* To correct this behaviour, we mask off PTE_EXT_NG for any pte that is
|
|
|
|
* present before running the comparison.
|
|
|
|
*/
|
|
|
|
#define __HAVE_ARCH_PTE_SAME
|
|
|
|
#define pte_same(pte_a,pte_b) ((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG \
|
|
|
|
: pte_val(pte_a)) \
|
|
|
|
== (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG \
|
|
|
|
: pte_val(pte_b)))
|
|
|
|
|
2011-11-22 17:30:29 +00:00
|
|
|
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext)))
|
|
|
|
|
2012-07-25 14:32:38 +01:00
|
|
|
#define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT))
|
|
|
|
#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
|
|
|
|
|
2014-07-18 16:15:27 +01:00
|
|
|
#define pmd_isset(pmd, val) ((u32)(val) == (val) ? pmd_val(pmd) & (val) \
|
|
|
|
: !!(pmd_val(pmd) & (val)))
|
|
|
|
#define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
|
|
|
|
|
2016-06-07 17:57:54 +01:00
|
|
|
#define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID))
|
2014-07-18 16:15:27 +01:00
|
|
|
#define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
|
2014-10-09 15:29:16 -07:00
|
|
|
#define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL))
|
|
|
|
static inline pte_t pte_mkspecial(pte_t pte)
|
|
|
|
{
|
|
|
|
pte_val(pte) |= L_PTE_SPECIAL;
|
|
|
|
return pte;
|
|
|
|
}
|
2012-07-25 14:39:26 +01:00
|
|
|
|
2014-07-18 16:16:15 +01:00
|
|
|
#define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
|
|
|
|
#define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY))
|
2012-07-25 14:39:26 +01:00
|
|
|
|
ARM: 7858/1: mm: make UACCESS_WITH_MEMCPY huge page aware
The memory pinning code in uaccess_with_memcpy.c does not check
for HugeTLB or THP pmds, and will enter an infinite loop should
a __copy_to_user or __clear_user occur against a huge page.
This patch adds detection code for huge pages to pin_page_for_write.
As this code can be executed in a fast path it refers to the actual
pmds rather than the vma. If a HugeTLB or THP is found (they have
the same pmd representation on ARM), the page table spinlock is
taken to prevent modification whilst the page is pinned.
On ARM, huge pages are only represented as pmds, thus no huge pud
checks are performed. (For huge puds one would lock the page table
in a similar manner as in the pmd case).
Two helper functions are introduced; pmd_thp_or_huge will check
whether or not a page is huge or transparent huge (which have the
same pmd layout on ARM), and pmd_hugewillfault will detect whether
or not a page fault will occur on write to the page.
Running the following test (with the chunking from read_zero
removed):
$ dd if=/dev/zero of=/dev/null bs=10M count=1024
Gave: 2.3 GB/s backed by normal pages,
2.9 GB/s backed by huge pages,
5.1 GB/s backed by huge pages, with page mask=HPAGE_MASK.
After some discussion, it was decided not to adopt the HPAGE_MASK,
as this would have a significant detrimental effect on the overall
system latency due to page_table_lock being held for too long.
This could be revisited if split huge page locks are adopted.
Signed-off-by: Steve Capper <steve.capper@linaro.org>
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-10-14 09:49:10 +01:00
|
|
|
#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
|
|
|
|
#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
|
|
|
|
|
2012-07-25 14:39:26 +01:00
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
2014-07-18 16:15:27 +01:00
|
|
|
#define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd))
|
2012-07-25 14:39:26 +01:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#define PMD_BIT_FUNC(fn,op) \
|
|
|
|
static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
|
|
|
|
|
2014-07-18 16:16:15 +01:00
|
|
|
PMD_BIT_FUNC(wrprotect, |= L_PMD_SECT_RDONLY);
|
2012-07-25 14:39:26 +01:00
|
|
|
PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
|
2014-07-18 16:16:15 +01:00
|
|
|
PMD_BIT_FUNC(mkwrite, &= ~L_PMD_SECT_RDONLY);
|
|
|
|
PMD_BIT_FUNC(mkdirty, |= L_PMD_SECT_DIRTY);
|
2016-01-15 16:55:33 -08:00
|
|
|
PMD_BIT_FUNC(mkclean, &= ~L_PMD_SECT_DIRTY);
|
2012-07-25 14:39:26 +01:00
|
|
|
PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
|
|
|
|
|
|
|
|
#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
|
|
|
|
|
|
|
|
#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
|
|
|
|
#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
|
|
|
|
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
|
|
|
|
|
2018-01-31 16:17:51 -08:00
|
|
|
/* No hardware dirty/accessed bits -- generic_pmdp_establish() fits */
|
|
|
|
#define pmdp_establish generic_pmdp_establish
|
|
|
|
|
2016-06-07 17:58:06 +01:00
|
|
|
/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
|
2020-06-03 16:03:45 -07:00
|
|
|
static inline pmd_t pmd_mkinvalid(pmd_t pmd)
|
2015-02-12 14:58:28 -08:00
|
|
|
{
|
2016-06-07 17:58:06 +01:00
|
|
|
return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
|
2015-02-12 14:58:28 -08:00
|
|
|
}
|
2012-07-25 14:39:26 +01:00
|
|
|
|
|
|
|
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
|
|
|
{
|
2014-07-18 16:16:15 +01:00
|
|
|
const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | L_PMD_SECT_RDONLY |
|
|
|
|
L_PMD_SECT_VALID | L_PMD_SECT_NONE;
|
2012-07-25 14:39:26 +01:00
|
|
|
pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
|
|
|
|
return pmd;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|
|
|
pmd_t *pmdp, pmd_t pmd)
|
|
|
|
{
|
|
|
|
BUG_ON(addr >= TASK_SIZE);
|
|
|
|
|
|
|
|
/* create a faulting entry if PROT_NONE protected */
|
2014-07-18 16:16:15 +01:00
|
|
|
if (pmd_val(pmd) & L_PMD_SECT_NONE)
|
|
|
|
pmd_val(pmd) &= ~L_PMD_SECT_VALID;
|
|
|
|
|
|
|
|
if (pmd_write(pmd) && pmd_dirty(pmd))
|
|
|
|
pmd_val(pmd) &= ~PMD_SECT_AP2;
|
|
|
|
else
|
|
|
|
pmd_val(pmd) |= PMD_SECT_AP2;
|
2012-07-25 14:39:26 +01:00
|
|
|
|
|
|
|
*pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG);
|
|
|
|
flush_pmd_entry(pmdp);
|
|
|
|
}
|
|
|
|
|
2011-11-22 17:30:29 +00:00
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
|
2011-11-22 17:30:29 +00:00
|
|
|
#endif /* _ASM_PGTABLE_3LEVEL_H */
|