mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 08:44:41 +00:00 
			
		
		
		
	 be43d72835
			
		
	
	
		be43d72835
		
	
	
	
	
		
			
			Use one of the software-defined PTE bits to indicate that a mapping is intended for an IO address. On native hardware this is irrelevent, since a physical address is a physical address. But in a virtual environment, physical addresses are also virtualized, so there needs to be some way to distinguish between pseudo-physical addresses and actual hardware addresses; _PAGE_IOMAP indicates this intent. By default, __supported_pte_mask masks out _PAGE_IOMAP, so it doesn't even appear in the final pagetable. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
		
			
				
	
	
		
			556 lines
		
	
	
	
		
			17 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			556 lines
		
	
	
	
		
			17 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| #ifndef ASM_X86__PGTABLE_H
 | |
| #define ASM_X86__PGTABLE_H
 | |
| 
 | |
| #define FIRST_USER_ADDRESS	0
 | |
| 
 | |
| #define _PAGE_BIT_PRESENT	0	/* is present */
 | |
| #define _PAGE_BIT_RW		1	/* writeable */
 | |
| #define _PAGE_BIT_USER		2	/* userspace addressable */
 | |
| #define _PAGE_BIT_PWT		3	/* page write through */
 | |
| #define _PAGE_BIT_PCD		4	/* page cache disabled */
 | |
| #define _PAGE_BIT_ACCESSED	5	/* was accessed (raised by CPU) */
 | |
| #define _PAGE_BIT_DIRTY		6	/* was written to (raised by CPU) */
 | |
| #define _PAGE_BIT_FILE		6
 | |
| #define _PAGE_BIT_PSE		7	/* 4 MB (or 2MB) page */
 | |
| #define _PAGE_BIT_PAT		7	/* on 4KB pages */
 | |
| #define _PAGE_BIT_GLOBAL	8	/* Global TLB entry PPro+ */
 | |
| #define _PAGE_BIT_UNUSED1	9	/* available for programmer */
 | |
| #define _PAGE_BIT_IOMAP		10	/* flag used to indicate IO mapping */
 | |
| #define _PAGE_BIT_UNUSED3	11
 | |
| #define _PAGE_BIT_PAT_LARGE	12	/* On 2MB or 1GB pages */
 | |
| #define _PAGE_BIT_SPECIAL	_PAGE_BIT_UNUSED1
 | |
| #define _PAGE_BIT_CPA_TEST	_PAGE_BIT_UNUSED1
 | |
| #define _PAGE_BIT_NX           63       /* No execute: only valid after cpuid check */
 | |
| 
 | |
| #define _PAGE_PRESENT	(_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
 | |
| #define _PAGE_RW	(_AT(pteval_t, 1) << _PAGE_BIT_RW)
 | |
| #define _PAGE_USER	(_AT(pteval_t, 1) << _PAGE_BIT_USER)
 | |
| #define _PAGE_PWT	(_AT(pteval_t, 1) << _PAGE_BIT_PWT)
 | |
| #define _PAGE_PCD	(_AT(pteval_t, 1) << _PAGE_BIT_PCD)
 | |
| #define _PAGE_ACCESSED	(_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
 | |
| #define _PAGE_DIRTY	(_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
 | |
| #define _PAGE_PSE	(_AT(pteval_t, 1) << _PAGE_BIT_PSE)
 | |
| #define _PAGE_GLOBAL	(_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
 | |
| #define _PAGE_UNUSED1	(_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
 | |
| #define _PAGE_IOMAP	(_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
 | |
| #define _PAGE_UNUSED3	(_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
 | |
| #define _PAGE_PAT	(_AT(pteval_t, 1) << _PAGE_BIT_PAT)
 | |
| #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
 | |
| #define _PAGE_SPECIAL	(_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
 | |
| #define _PAGE_CPA_TEST	(_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
 | |
| #define __HAVE_ARCH_PTE_SPECIAL
 | |
| 
 | |
| #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
 | |
| #define _PAGE_NX	(_AT(pteval_t, 1) << _PAGE_BIT_NX)
 | |
| #else
 | |
| #define _PAGE_NX	(_AT(pteval_t, 0))
 | |
| #endif
 | |
| 
 | |
| /* If _PAGE_PRESENT is clear, we use these: */
 | |
| #define _PAGE_FILE	_PAGE_DIRTY	/* nonlinear file mapping,
 | |
| 					 * saved PTE; unset:swap */
 | |
| #define _PAGE_PROTNONE	_PAGE_PSE	/* if the user mapped it with PROT_NONE;
 | |
| 					   pte_present gives true */
 | |
| 
 | |
| #define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |	\
 | |
| 			 _PAGE_ACCESSED | _PAGE_DIRTY)
 | |
| #define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |	\
 | |
| 			 _PAGE_DIRTY)
 | |
| 
 | |
| /* Set of bits not changed in pte_modify */
 | |
| #define _PAGE_CHG_MASK	(PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |		\
 | |
| 			 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
 | |
| 
 | |
| #define _PAGE_CACHE_MASK	(_PAGE_PCD | _PAGE_PWT)
 | |
| #define _PAGE_CACHE_WB		(0)
 | |
| #define _PAGE_CACHE_WC		(_PAGE_PWT)
 | |
| #define _PAGE_CACHE_UC_MINUS	(_PAGE_PCD)
 | |
| #define _PAGE_CACHE_UC		(_PAGE_PCD | _PAGE_PWT)
 | |
| 
 | |
| #define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
 | |
| #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
 | |
| 				 _PAGE_ACCESSED | _PAGE_NX)
 | |
| 
 | |
| #define PAGE_SHARED_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_RW |	\
 | |
| 					 _PAGE_USER | _PAGE_ACCESSED)
 | |
| #define PAGE_COPY_NOEXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
 | |
| 					 _PAGE_ACCESSED | _PAGE_NX)
 | |
| #define PAGE_COPY_EXEC		__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
 | |
| 					 _PAGE_ACCESSED)
 | |
| #define PAGE_COPY		PAGE_COPY_NOEXEC
 | |
| #define PAGE_READONLY		__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
 | |
| 					 _PAGE_ACCESSED | _PAGE_NX)
 | |
| #define PAGE_READONLY_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER |	\
 | |
| 					 _PAGE_ACCESSED)
 | |
| 
 | |
| #define __PAGE_KERNEL_EXEC						\
 | |
| 	(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
 | |
| #define __PAGE_KERNEL		(__PAGE_KERNEL_EXEC | _PAGE_NX)
 | |
| 
 | |
| #define __PAGE_KERNEL_RO		(__PAGE_KERNEL & ~_PAGE_RW)
 | |
| #define __PAGE_KERNEL_RX		(__PAGE_KERNEL_EXEC & ~_PAGE_RW)
 | |
| #define __PAGE_KERNEL_EXEC_NOCACHE	(__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
 | |
| #define __PAGE_KERNEL_WC		(__PAGE_KERNEL | _PAGE_CACHE_WC)
 | |
| #define __PAGE_KERNEL_NOCACHE		(__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
 | |
| #define __PAGE_KERNEL_UC_MINUS		(__PAGE_KERNEL | _PAGE_PCD)
 | |
| #define __PAGE_KERNEL_VSYSCALL		(__PAGE_KERNEL_RX | _PAGE_USER)
 | |
| #define __PAGE_KERNEL_VSYSCALL_NOCACHE	(__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
 | |
| #define __PAGE_KERNEL_LARGE		(__PAGE_KERNEL | _PAGE_PSE)
 | |
| #define __PAGE_KERNEL_LARGE_NOCACHE	(__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
 | |
| #define __PAGE_KERNEL_LARGE_EXEC	(__PAGE_KERNEL_EXEC | _PAGE_PSE)
 | |
| 
 | |
| #define __PAGE_KERNEL_IO		(__PAGE_KERNEL | _PAGE_IOMAP)
 | |
| #define __PAGE_KERNEL_IO_NOCACHE	(__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
 | |
| #define __PAGE_KERNEL_IO_UC_MINUS	(__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
 | |
| #define __PAGE_KERNEL_IO_WC		(__PAGE_KERNEL_WC | _PAGE_IOMAP)
 | |
| 
 | |
| #define PAGE_KERNEL			__pgprot(__PAGE_KERNEL)
 | |
| #define PAGE_KERNEL_RO			__pgprot(__PAGE_KERNEL_RO)
 | |
| #define PAGE_KERNEL_EXEC		__pgprot(__PAGE_KERNEL_EXEC)
 | |
| #define PAGE_KERNEL_RX			__pgprot(__PAGE_KERNEL_RX)
 | |
| #define PAGE_KERNEL_WC			__pgprot(__PAGE_KERNEL_WC)
 | |
| #define PAGE_KERNEL_NOCACHE		__pgprot(__PAGE_KERNEL_NOCACHE)
 | |
| #define PAGE_KERNEL_UC_MINUS		__pgprot(__PAGE_KERNEL_UC_MINUS)
 | |
| #define PAGE_KERNEL_EXEC_NOCACHE	__pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
 | |
| #define PAGE_KERNEL_LARGE		__pgprot(__PAGE_KERNEL_LARGE)
 | |
| #define PAGE_KERNEL_LARGE_NOCACHE	__pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
 | |
| #define PAGE_KERNEL_LARGE_EXEC		__pgprot(__PAGE_KERNEL_LARGE_EXEC)
 | |
| #define PAGE_KERNEL_VSYSCALL		__pgprot(__PAGE_KERNEL_VSYSCALL)
 | |
| #define PAGE_KERNEL_VSYSCALL_NOCACHE	__pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
 | |
| 
 | |
| #define PAGE_KERNEL_IO			__pgprot(__PAGE_KERNEL_IO)
 | |
| #define PAGE_KERNEL_IO_NOCACHE		__pgprot(__PAGE_KERNEL_IO_NOCACHE)
 | |
| #define PAGE_KERNEL_IO_UC_MINUS		__pgprot(__PAGE_KERNEL_IO_UC_MINUS)
 | |
| #define PAGE_KERNEL_IO_WC		__pgprot(__PAGE_KERNEL_IO_WC)
 | |
| 
 | |
| /*         xwr */
 | |
| #define __P000	PAGE_NONE
 | |
| #define __P001	PAGE_READONLY
 | |
| #define __P010	PAGE_COPY
 | |
| #define __P011	PAGE_COPY
 | |
| #define __P100	PAGE_READONLY_EXEC
 | |
| #define __P101	PAGE_READONLY_EXEC
 | |
| #define __P110	PAGE_COPY_EXEC
 | |
| #define __P111	PAGE_COPY_EXEC
 | |
| 
 | |
| #define __S000	PAGE_NONE
 | |
| #define __S001	PAGE_READONLY
 | |
| #define __S010	PAGE_SHARED
 | |
| #define __S011	PAGE_SHARED
 | |
| #define __S100	PAGE_READONLY_EXEC
 | |
| #define __S101	PAGE_READONLY_EXEC
 | |
| #define __S110	PAGE_SHARED_EXEC
 | |
| #define __S111	PAGE_SHARED_EXEC
 | |
| 
 | |
| /*
 | |
|  * early identity mapping  pte attrib macros.
 | |
|  */
 | |
| #ifdef CONFIG_X86_64
 | |
| #define __PAGE_KERNEL_IDENT_LARGE_EXEC	__PAGE_KERNEL_LARGE_EXEC
 | |
| #else
 | |
| #define PTE_IDENT_ATTR	 0x003		/* PRESENT+RW */
 | |
| #define PDE_IDENT_ATTR	 0x063		/* PRESENT+RW+DIRTY+ACCESSED */
 | |
| #define PGD_IDENT_ATTR	 0x001		/* PRESENT (no other attributes) */
 | |
| #endif
 | |
| 
 | |
| #ifndef __ASSEMBLY__
 | |
| 
 | |
| /*
 | |
|  * ZERO_PAGE is a global shared page that is always zero: used
 | |
|  * for zero-mapped memory areas etc..
 | |
|  */
 | |
| extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 | |
| #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 | |
| 
 | |
| extern spinlock_t pgd_lock;
 | |
| extern struct list_head pgd_list;
 | |
| 
 | |
| /*
 | |
|  * The following only work if pte_present() is true.
 | |
|  * Undefined behaviour if not..
 | |
|  */
 | |
| static inline int pte_dirty(pte_t pte)
 | |
| {
 | |
| 	return pte_flags(pte) & _PAGE_DIRTY;
 | |
| }
 | |
| 
 | |
| static inline int pte_young(pte_t pte)
 | |
| {
 | |
| 	return pte_flags(pte) & _PAGE_ACCESSED;
 | |
| }
 | |
| 
 | |
| static inline int pte_write(pte_t pte)
 | |
| {
 | |
| 	return pte_flags(pte) & _PAGE_RW;
 | |
| }
 | |
| 
 | |
| static inline int pte_file(pte_t pte)
 | |
| {
 | |
| 	return pte_flags(pte) & _PAGE_FILE;
 | |
| }
 | |
| 
 | |
| static inline int pte_huge(pte_t pte)
 | |
| {
 | |
| 	return pte_flags(pte) & _PAGE_PSE;
 | |
| }
 | |
| 
 | |
| static inline int pte_global(pte_t pte)
 | |
| {
 | |
| 	return pte_flags(pte) & _PAGE_GLOBAL;
 | |
| }
 | |
| 
 | |
| static inline int pte_exec(pte_t pte)
 | |
| {
 | |
| 	return !(pte_flags(pte) & _PAGE_NX);
 | |
| }
 | |
| 
 | |
| static inline int pte_special(pte_t pte)
 | |
| {
 | |
| 	return pte_val(pte) & _PAGE_SPECIAL;
 | |
| }
 | |
| 
 | |
| static inline unsigned long pte_pfn(pte_t pte)
 | |
| {
 | |
| 	return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
 | |
| }
 | |
| 
 | |
| #define pte_page(pte)	pfn_to_page(pte_pfn(pte))
 | |
| 
 | |
| static inline int pmd_large(pmd_t pte)
 | |
| {
 | |
| 	return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
 | |
| 		(_PAGE_PSE | _PAGE_PRESENT);
 | |
| }
 | |
| 
 | |
| static inline pte_t pte_mkclean(pte_t pte)
 | |
| {
 | |
| 	return __pte(pte_val(pte) & ~_PAGE_DIRTY);
 | |
| }
 | |
| 
 | |
| static inline pte_t pte_mkold(pte_t pte)
 | |
| {
 | |
| 	return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
 | |
| }
 | |
| 
 | |
| static inline pte_t pte_wrprotect(pte_t pte)
 | |
| {
 | |
| 	return __pte(pte_val(pte) & ~_PAGE_RW);
 | |
| }
 | |
| 
 | |
| static inline pte_t pte_mkexec(pte_t pte)
 | |
| {
 | |
| 	return __pte(pte_val(pte) & ~_PAGE_NX);
 | |
| }
 | |
| 
 | |
| static inline pte_t pte_mkdirty(pte_t pte)
 | |
| {
 | |
| 	return __pte(pte_val(pte) | _PAGE_DIRTY);
 | |
| }
 | |
| 
 | |
| static inline pte_t pte_mkyoung(pte_t pte)
 | |
| {
 | |
| 	return __pte(pte_val(pte) | _PAGE_ACCESSED);
 | |
| }
 | |
| 
 | |
| static inline pte_t pte_mkwrite(pte_t pte)
 | |
| {
 | |
| 	return __pte(pte_val(pte) | _PAGE_RW);
 | |
| }
 | |
| 
 | |
| static inline pte_t pte_mkhuge(pte_t pte)
 | |
| {
 | |
| 	return __pte(pte_val(pte) | _PAGE_PSE);
 | |
| }
 | |
| 
 | |
| static inline pte_t pte_clrhuge(pte_t pte)
 | |
| {
 | |
| 	return __pte(pte_val(pte) & ~_PAGE_PSE);
 | |
| }
 | |
| 
 | |
| static inline pte_t pte_mkglobal(pte_t pte)
 | |
| {
 | |
| 	return __pte(pte_val(pte) | _PAGE_GLOBAL);
 | |
| }
 | |
| 
 | |
| static inline pte_t pte_clrglobal(pte_t pte)
 | |
| {
 | |
| 	return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
 | |
| }
 | |
| 
 | |
| static inline pte_t pte_mkspecial(pte_t pte)
 | |
| {
 | |
| 	return __pte(pte_val(pte) | _PAGE_SPECIAL);
 | |
| }
 | |
| 
 | |
| extern pteval_t __supported_pte_mask;
 | |
| 
 | |
| static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
 | |
| {
 | |
| 	return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) |
 | |
| 		      pgprot_val(pgprot)) & __supported_pte_mask);
 | |
| }
 | |
| 
 | |
| static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
 | |
| {
 | |
| 	return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) |
 | |
| 		      pgprot_val(pgprot)) & __supported_pte_mask);
 | |
| }
 | |
| 
 | |
| static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 | |
| {
 | |
| 	pteval_t val = pte_val(pte);
 | |
| 
 | |
| 	/*
 | |
| 	 * Chop off the NX bit (if present), and add the NX portion of
 | |
| 	 * the newprot (if present):
 | |
| 	 */
 | |
| 	val &= _PAGE_CHG_MASK;
 | |
| 	val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask;
 | |
| 
 | |
| 	return __pte(val);
 | |
| }
 | |
| 
 | |
| /* mprotect needs to preserve PAT bits when updating vm_page_prot */
 | |
| #define pgprot_modify pgprot_modify
 | |
| static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
 | |
| {
 | |
| 	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
 | |
| 	pgprotval_t addbits = pgprot_val(newprot);
 | |
| 	return __pgprot(preservebits | addbits);
 | |
| }
 | |
| 
 | |
| #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
 | |
| 
 | |
| #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
 | |
| 
 | |
| #ifndef __ASSEMBLY__
 | |
| #define __HAVE_PHYS_MEM_ACCESS_PROT
 | |
| struct file;
 | |
| pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 | |
|                               unsigned long size, pgprot_t vma_prot);
 | |
| int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
 | |
|                               unsigned long size, pgprot_t *vma_prot);
 | |
| #endif
 | |
| 
 | |
| /* Install a pte for a particular vaddr in kernel space. */
 | |
| void set_pte_vaddr(unsigned long vaddr, pte_t pte);
 | |
| 
 | |
| #ifdef CONFIG_X86_32
 | |
| extern void native_pagetable_setup_start(pgd_t *base);
 | |
| extern void native_pagetable_setup_done(pgd_t *base);
 | |
| #else
 | |
| static inline void native_pagetable_setup_start(pgd_t *base) {}
 | |
| static inline void native_pagetable_setup_done(pgd_t *base) {}
 | |
| #endif
 | |
| 
 | |
| extern int arch_report_meminfo(char *page);
 | |
| 
 | |
| #ifdef CONFIG_PARAVIRT
 | |
| #include <asm/paravirt.h>
 | |
| #else  /* !CONFIG_PARAVIRT */
 | |
| #define set_pte(ptep, pte)		native_set_pte(ptep, pte)
 | |
| #define set_pte_at(mm, addr, ptep, pte)	native_set_pte_at(mm, addr, ptep, pte)
 | |
| 
 | |
| #define set_pte_present(mm, addr, ptep, pte)				\
 | |
| 	native_set_pte_present(mm, addr, ptep, pte)
 | |
| #define set_pte_atomic(ptep, pte)					\
 | |
| 	native_set_pte_atomic(ptep, pte)
 | |
| 
 | |
| #define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)
 | |
| 
 | |
| #ifndef __PAGETABLE_PUD_FOLDED
 | |
| #define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
 | |
| #define pgd_clear(pgd)			native_pgd_clear(pgd)
 | |
| #endif
 | |
| 
 | |
| #ifndef set_pud
 | |
| # define set_pud(pudp, pud)		native_set_pud(pudp, pud)
 | |
| #endif
 | |
| 
 | |
| #ifndef __PAGETABLE_PMD_FOLDED
 | |
| #define pud_clear(pud)			native_pud_clear(pud)
 | |
| #endif
 | |
| 
 | |
| #define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
 | |
| #define pmd_clear(pmd)			native_pmd_clear(pmd)
 | |
| 
 | |
| #define pte_update(mm, addr, ptep)              do { } while (0)
 | |
| #define pte_update_defer(mm, addr, ptep)        do { } while (0)
 | |
| 
 | |
| static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
 | |
| {
 | |
| 	native_pagetable_setup_start(base);
 | |
| }
 | |
| 
 | |
| static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
 | |
| {
 | |
| 	native_pagetable_setup_done(base);
 | |
| }
 | |
| #endif	/* CONFIG_PARAVIRT */
 | |
| 
 | |
| #endif	/* __ASSEMBLY__ */
 | |
| 
 | |
| #ifdef CONFIG_X86_32
 | |
| # include "pgtable_32.h"
 | |
| #else
 | |
| # include "pgtable_64.h"
 | |
| #endif
 | |
| 
 | |
| /*
 | |
|  * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
 | |
|  *
 | |
|  * this macro returns the index of the entry in the pgd page which would
 | |
|  * control the given virtual address
 | |
|  */
 | |
| #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
 | |
| 
 | |
| /*
 | |
|  * pgd_offset() returns a (pgd_t *)
 | |
|  * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
 | |
|  */
 | |
| #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
 | |
| /*
 | |
|  * a shortcut which implies the use of the kernel's pgd, instead
 | |
|  * of a process's
 | |
|  */
 | |
| #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
 | |
| 
 | |
| 
 | |
| #define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
 | |
| #define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
 | |
| 
 | |
| #ifndef __ASSEMBLY__
 | |
| 
 | |
| enum {
 | |
| 	PG_LEVEL_NONE,
 | |
| 	PG_LEVEL_4K,
 | |
| 	PG_LEVEL_2M,
 | |
| 	PG_LEVEL_1G,
 | |
| 	PG_LEVEL_NUM
 | |
| };
 | |
| 
 | |
| #ifdef CONFIG_PROC_FS
 | |
| extern void update_page_count(int level, unsigned long pages);
 | |
| #else
 | |
| static inline void update_page_count(int level, unsigned long pages) { }
 | |
| #endif
 | |
| 
 | |
| /*
 | |
|  * Helper function that returns the kernel pagetable entry controlling
 | |
|  * the virtual address 'address'. NULL means no pagetable entry present.
 | |
|  * NOTE: the return type is pte_t but if the pmd is PSE then we return it
 | |
|  * as a pte too.
 | |
|  */
 | |
| extern pte_t *lookup_address(unsigned long address, unsigned int *level);
 | |
| 
 | |
| /* local pte updates need not use xchg for locking */
 | |
| static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
 | |
| {
 | |
| 	pte_t res = *ptep;
 | |
| 
 | |
| 	/* Pure native function needs no input for mm, addr */
 | |
| 	native_pte_clear(NULL, 0, ptep);
 | |
| 	return res;
 | |
| }
 | |
| 
 | |
| static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
 | |
| 				     pte_t *ptep , pte_t pte)
 | |
| {
 | |
| 	native_set_pte(ptep, pte);
 | |
| }
 | |
| 
 | |
| #ifndef CONFIG_PARAVIRT
 | |
| /*
 | |
|  * Rules for using pte_update - it must be called after any PTE update which
 | |
|  * has not been done using the set_pte / clear_pte interfaces.  It is used by
 | |
|  * shadow mode hypervisors to resynchronize the shadow page tables.  Kernel PTE
 | |
|  * updates should either be sets, clears, or set_pte_atomic for P->P
 | |
|  * transitions, which means this hook should only be called for user PTEs.
 | |
|  * This hook implies a P->P protection or access change has taken place, which
 | |
|  * requires a subsequent TLB flush.  The notification can optionally be delayed
 | |
|  * until the TLB flush event by using the pte_update_defer form of the
 | |
|  * interface, but care must be taken to assure that the flush happens while
 | |
|  * still holding the same page table lock so that the shadow and primary pages
 | |
|  * do not become out of sync on SMP.
 | |
|  */
 | |
| #define pte_update(mm, addr, ptep)		do { } while (0)
 | |
| #define pte_update_defer(mm, addr, ptep)	do { } while (0)
 | |
| #endif
 | |
| 
 | |
| /*
 | |
|  * We only update the dirty/accessed state if we set
 | |
|  * the dirty bit by hand in the kernel, since the hardware
 | |
|  * will do the accessed bit for us, and we don't want to
 | |
|  * race with other CPU's that might be updating the dirty
 | |
|  * bit at the same time.
 | |
|  */
 | |
| struct vm_area_struct;
 | |
| 
 | |
| #define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 | |
| extern int ptep_set_access_flags(struct vm_area_struct *vma,
 | |
| 				 unsigned long address, pte_t *ptep,
 | |
| 				 pte_t entry, int dirty);
 | |
| 
 | |
| #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 | |
| extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
 | |
| 				     unsigned long addr, pte_t *ptep);
 | |
| 
 | |
| #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
 | |
| extern int ptep_clear_flush_young(struct vm_area_struct *vma,
 | |
| 				  unsigned long address, pte_t *ptep);
 | |
| 
 | |
| #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 | |
| static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 | |
| 				       pte_t *ptep)
 | |
| {
 | |
| 	pte_t pte = native_ptep_get_and_clear(ptep);
 | |
| 	pte_update(mm, addr, ptep);
 | |
| 	return pte;
 | |
| }
 | |
| 
 | |
| #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
 | |
| static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
 | |
| 					    unsigned long addr, pte_t *ptep,
 | |
| 					    int full)
 | |
| {
 | |
| 	pte_t pte;
 | |
| 	if (full) {
 | |
| 		/*
 | |
| 		 * Full address destruction in progress; paravirt does not
 | |
| 		 * care about updates and native needs no locking
 | |
| 		 */
 | |
| 		pte = native_local_ptep_get_and_clear(ptep);
 | |
| 	} else {
 | |
| 		pte = ptep_get_and_clear(mm, addr, ptep);
 | |
| 	}
 | |
| 	return pte;
 | |
| }
 | |
| 
 | |
| #define __HAVE_ARCH_PTEP_SET_WRPROTECT
 | |
| static inline void ptep_set_wrprotect(struct mm_struct *mm,
 | |
| 				      unsigned long addr, pte_t *ptep)
 | |
| {
 | |
| 	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
 | |
| 	pte_update(mm, addr, ptep);
 | |
| }
 | |
| 
 | |
| /*
 | |
|  * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
 | |
|  *
 | |
|  *  dst - pointer to pgd range anwhere on a pgd page
 | |
|  *  src - ""
 | |
|  *  count - the number of pgds to copy.
 | |
|  *
 | |
|  * dst and src can be on the same page, but the range must not overlap,
 | |
|  * and must not cross a page boundary.
 | |
|  */
 | |
| static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
 | |
| {
 | |
|        memcpy(dst, src, count * sizeof(pgd_t));
 | |
| }
 | |
| 
 | |
| 
 | |
| #include <asm-generic/pgtable.h>
 | |
| #endif	/* __ASSEMBLY__ */
 | |
| 
 | |
| #endif /* ASM_X86__PGTABLE_H */
 |