mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-05-24 10:39:52 +00:00
tile: fix some issues in hugepage support
First, in huge_pte_offset(), we were erroneously checking pgd_present(), which is always true, rather than pud_present(), which is the thing that tells us if there is a top-level (L0) PTE. Fixing this means we properly look up huge page entries only when the Present bit is actually set in the PTE. Second, use the standard pte_alloc_map() instead of the hand-rolled pte_alloc_hugetlb() routine that basically was written to avoid worrying about CONFIG_HIGHPTE. However, we no longer plan to support HIGHPTE, so a separate routine was just unnecessary code duplication. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
This commit is contained in:
parent
6b940606d9
commit
a0bd12d718
1 changed files with 3 additions and 35 deletions
|
@ -49,38 +49,6 @@ int huge_shift[HUGE_SHIFT_ENTRIES] = {
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
|
||||||
* This routine is a hybrid of pte_alloc_map() and pte_alloc_kernel().
|
|
||||||
* It assumes that L2 PTEs are never in HIGHMEM (we don't support that).
|
|
||||||
* It locks the user pagetable, and bumps up the mm->nr_ptes field,
|
|
||||||
* but otherwise allocate the page table using the kernel versions.
|
|
||||||
*/
|
|
||||||
static pte_t *pte_alloc_hugetlb(struct mm_struct *mm, pmd_t *pmd,
|
|
||||||
unsigned long address)
|
|
||||||
{
|
|
||||||
pte_t *new;
|
|
||||||
|
|
||||||
if (pmd_none(*pmd)) {
|
|
||||||
new = pte_alloc_one_kernel(mm, address);
|
|
||||||
if (!new)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
smp_wmb(); /* See comment in __pte_alloc */
|
|
||||||
|
|
||||||
spin_lock(&mm->page_table_lock);
|
|
||||||
if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
|
|
||||||
mm->nr_ptes++;
|
|
||||||
pmd_populate_kernel(mm, pmd, new);
|
|
||||||
new = NULL;
|
|
||||||
} else
|
|
||||||
VM_BUG_ON(pmd_trans_splitting(*pmd));
|
|
||||||
spin_unlock(&mm->page_table_lock);
|
|
||||||
if (new)
|
|
||||||
pte_free_kernel(mm, new);
|
|
||||||
}
|
|
||||||
|
|
||||||
return pte_offset_kernel(pmd, address);
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
||||||
|
@ -109,7 +77,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
|
||||||
else {
|
else {
|
||||||
if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
|
if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
|
||||||
panic("Unexpected page size %#lx\n", sz);
|
panic("Unexpected page size %#lx\n", sz);
|
||||||
return pte_alloc_hugetlb(mm, pmd, addr);
|
return pte_alloc_map(mm, NULL, pmd, addr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
@ -144,14 +112,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
|
||||||
|
|
||||||
/* Get the top-level page table entry. */
|
/* Get the top-level page table entry. */
|
||||||
pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
|
pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
|
||||||
if (!pgd_present(*pgd))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
/* We don't have four levels. */
|
/* We don't have four levels. */
|
||||||
pud = pud_offset(pgd, addr);
|
pud = pud_offset(pgd, addr);
|
||||||
#ifndef __PAGETABLE_PUD_FOLDED
|
#ifndef __PAGETABLE_PUD_FOLDED
|
||||||
# error support fourth page table level
|
# error support fourth page table level
|
||||||
#endif
|
#endif
|
||||||
|
if (!pud_present(*pud))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
/* Check for an L0 huge PTE, if we have three levels. */
|
/* Check for an L0 huge PTE, if we have three levels. */
|
||||||
#ifndef __PAGETABLE_PMD_FOLDED
|
#ifndef __PAGETABLE_PMD_FOLDED
|
||||||
|
|
Loading…
Add table
Reference in a new issue