MIPS: Shrink the size of tlb handler

By combining swapper_pg_dir and module_pg_dir, several if conditions
can be eliminated from the tlb exception handler. The reason they
can be combined is that, the effective virtual address of vmalloc
returned is at the bottom, and of module_alloc returned is at the
top. It also fixes the bug in vmalloc(), which happens when its
return address is not covered by the first pgd.

Signed-off-by: Wu Fei <at.wufei@gmail.com>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
Wu Fei 2009-09-03 22:29:53 +08:00 committed by Ralf Baechle
parent a7bcb1ae60
commit e0cc87f594
4 changed files with 3 additions and 63 deletions

View file

@ -109,13 +109,13 @@
#define VMALLOC_START MAP_BASE #define VMALLOC_START MAP_BASE
#define VMALLOC_END \ #define VMALLOC_END \
(VMALLOC_START + PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE) (VMALLOC_START + \
PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE - (1UL << 32))
#if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \ #if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
VMALLOC_START != CKSSEG VMALLOC_START != CKSSEG
/* Load modules into 32bit-compatible segment. */ /* Load modules into 32bit-compatible segment. */
#define MODULE_START CKSSEG #define MODULE_START CKSSEG
#define MODULE_END (FIXADDR_START-2*PAGE_SIZE) #define MODULE_END (FIXADDR_START-2*PAGE_SIZE)
extern pgd_t module_pg_dir[PTRS_PER_PGD];
#endif #endif
#define pte_ERROR(e) \ #define pte_ERROR(e) \
@ -188,12 +188,7 @@ static inline void pud_clear(pud_t *pudp)
#define __pmd_offset(address) pmd_index(address) #define __pmd_offset(address) pmd_index(address)
/* to find an entry in a kernel page-table-directory */ /* to find an entry in a kernel page-table-directory */
#ifdef MODULE_START #define pgd_offset_k(address) pgd_offset(&init_mm, address)
#define pgd_offset_k(address) \
((address) >= MODULE_START ? module_pg_dir : pgd_offset(&init_mm, 0UL))
#else
#define pgd_offset_k(address) pgd_offset(&init_mm, 0UL)
#endif
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))

View file

@ -475,9 +475,6 @@ unsigned long pgd_current[NR_CPUS];
*/ */
pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER); pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#ifdef MODULE_START
pgd_t module_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
#endif
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER); pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
#endif #endif
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER); pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);

View file

@ -59,9 +59,6 @@ void __init pagetable_init(void)
/* Initialize the entire pgd. */ /* Initialize the entire pgd. */
pgd_init((unsigned long)swapper_pg_dir); pgd_init((unsigned long)swapper_pg_dir);
#ifdef MODULE_START
pgd_init((unsigned long)module_pg_dir);
#endif
pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table); pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
pgd_base = swapper_pg_dir; pgd_base = swapper_pg_dir;

View file

@ -499,11 +499,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* The vmalloc handling is not in the hotpath. * The vmalloc handling is not in the hotpath.
*/ */
uasm_i_dmfc0(p, tmp, C0_BADVADDR); uasm_i_dmfc0(p, tmp, C0_BADVADDR);
#ifdef MODULE_START
uasm_il_bltz(p, r, tmp, label_module_alloc);
#else
uasm_il_bltz(p, r, tmp, label_vmalloc); uasm_il_bltz(p, r, tmp, label_vmalloc);
#endif
/* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -556,52 +552,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
{ {
long swpd = (long)swapper_pg_dir; long swpd = (long)swapper_pg_dir;
#ifdef MODULE_START
long modd = (long)module_pg_dir;
uasm_l_module_alloc(l, *p);
/*
* Assumption:
* VMALLOC_START >= 0xc000000000000000UL
* MODULE_START >= 0xe000000000000000UL
*/
UASM_i_SLL(p, ptr, bvaddr, 2);
uasm_il_bgez(p, r, ptr, label_vmalloc);
if (uasm_in_compat_space_p(MODULE_START) &&
!uasm_rel_lo(MODULE_START)) {
uasm_i_lui(p, ptr, uasm_rel_hi(MODULE_START)); /* delay slot */
} else {
/* unlikely configuration */
uasm_i_nop(p); /* delay slot */
UASM_i_LA(p, ptr, MODULE_START);
}
uasm_i_dsubu(p, bvaddr, bvaddr, ptr);
if (uasm_in_compat_space_p(modd) && !uasm_rel_lo(modd)) {
uasm_il_b(p, r, label_vmalloc_done);
uasm_i_lui(p, ptr, uasm_rel_hi(modd));
} else {
UASM_i_LA_mostly(p, ptr, modd);
uasm_il_b(p, r, label_vmalloc_done);
if (uasm_in_compat_space_p(modd))
uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(modd));
else
uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(modd));
}
uasm_l_vmalloc(l, *p); uasm_l_vmalloc(l, *p);
if (uasm_in_compat_space_p(MODULE_START) &&
!uasm_rel_lo(MODULE_START) &&
MODULE_START << 32 == VMALLOC_START)
uasm_i_dsll32(p, ptr, ptr, 0); /* typical case */
else
UASM_i_LA(p, ptr, VMALLOC_START);
#else
uasm_l_vmalloc(l, *p);
UASM_i_LA(p, ptr, VMALLOC_START);
#endif
uasm_i_dsubu(p, bvaddr, bvaddr, ptr);
if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
uasm_il_b(p, r, label_vmalloc_done); uasm_il_b(p, r, label_vmalloc_done);