remove unused flush_tlb_pgtables

Nobody uses flush_tlb_pgtables anymore, this patch removes all remaining
traces of it from all archs.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: <linux-arch@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Benjamin Herrenschmidt 2007-10-18 23:39:14 -07:00 committed by Linus Torvalds
parent 22124c9999
commit 1c7037db50
26 changed files with 2 additions and 195 deletions

View file

@ -87,30 +87,7 @@ changes occur:
This is used primarily during fault processing. This is used primarily during fault processing.
5) void flush_tlb_pgtables(struct mm_struct *mm, 5) void update_mmu_cache(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
The software page tables for address space 'mm' for virtual
addresses in the range 'start' to 'end-1' are being torn down.
Some platforms cache the lowest level of the software page tables
in a linear virtually mapped array, to make TLB miss processing
more efficient. On such platforms, since the TLB is caching the
software page table structure, it needs to be flushed when parts
of the software page table tree are unlinked/freed.
Sparc64 is one example of a platform which does this.
Usually, when munmap()'ing an area of user virtual address
space, the kernel leaves the page table parts around and just
marks the individual pte's as invalid. However, if very large
portions of the address space are unmapped, the kernel frees up
those portions of the software page tables to prevent potential
excessive kernel memory usage caused by erratic mmap/mmunmap
sequences. It is at these times that flush_tlb_pgtables will
be invoked.
6) void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t pte) unsigned long address, pte_t pte)
At the end of every page fault, this routine is invoked to At the end of every page fault, this routine is invoked to
@ -123,7 +100,7 @@ changes occur:
translations for software managed TLB configurations. translations for software managed TLB configurations.
The sparc64 port currently does this. The sparc64 port currently does this.
7) void tlb_migrate_finish(struct mm_struct *mm) 6) void tlb_migrate_finish(struct mm_struct *mm)
This interface is called at the end of an explicit This interface is called at the end of an explicit
process migration. This interface provides a hook process migration. This interface provides a hook

View file

@ -92,17 +92,6 @@ flush_tlb_other(struct mm_struct *mm)
if (*mmc) *mmc = 0; if (*mmc) *mmc = 0;
} }
/* Flush a specified range of user mapping page tables from TLB.
Although Alpha uses VPTE caches, this can be a nop, as Alpha does
not have finegrained tlb flushing, so it will flush VPTE stuff
during next flush_tlb_range. */
static inline void
flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
}
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
/* Flush everything (kernel mapping may also have changed /* Flush everything (kernel mapping may also have changed
due to vmalloc/vfree). */ due to vmalloc/vfree). */

View file

@ -463,11 +463,6 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
*/ */
extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte); extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);
/*
* ARM processors do not cache TLB tables in RAM.
*/
#define flush_tlb_pgtables(mm,start,end) do { } while (0)
#endif #endif
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */

View file

@ -19,7 +19,6 @@
* - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/ */
extern void flush_tlb(void); extern void flush_tlb(void);
extern void flush_tlb_all(void); extern void flush_tlb_all(void);
@ -29,12 +28,6 @@ extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
extern void __flush_tlb_page(unsigned long asid, unsigned long page); extern void __flush_tlb_page(unsigned long asid, unsigned long page);
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* Nothing to do */
}
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#endif /* __ASM_AVR32_TLBFLUSH_H */ #endif /* __ASM_AVR32_TLBFLUSH_H */

View file

@ -53,10 +53,4 @@ static inline void flush_tlb_kernel_page(unsigned long addr)
BUG(); BUG();
} }
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
BUG();
}
#endif #endif

View file

@ -38,13 +38,6 @@ static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long st
flush_tlb_mm(vma->vm_mm); flush_tlb_mm(vma->vm_mm);
} }
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* CRIS does not keep any page table caches in TLB */
}
static inline void flush_tlb(void) static inline void flush_tlb(void)
{ {
flush_tlb_mm(current->mm); flush_tlb_mm(current->mm);

View file

@ -57,7 +57,6 @@ do { \
#define __flush_tlb_global() flush_tlb_all() #define __flush_tlb_global() flush_tlb_all()
#define flush_tlb() flush_tlb_all() #define flush_tlb() flush_tlb_all()
#define flush_tlb_kernel_range(start, end) flush_tlb_all() #define flush_tlb_kernel_range(start, end) flush_tlb_all()
#define flush_tlb_pgtables(mm,start,end) do { } while(0)
#else #else
@ -66,7 +65,6 @@ do { \
#define flush_tlb_mm(mm) BUG() #define flush_tlb_mm(mm) BUG()
#define flush_tlb_page(vma,addr) BUG() #define flush_tlb_page(vma,addr) BUG()
#define flush_tlb_range(mm,start,end) BUG() #define flush_tlb_range(mm,start,end) BUG()
#define flush_tlb_pgtables(mm,start,end) BUG()
#define flush_tlb_kernel_range(start, end) BUG() #define flush_tlb_kernel_range(start, end) BUG()
#endif #endif

View file

@ -52,10 +52,4 @@ static inline void flush_tlb_kernel_page(unsigned long addr)
BUG(); BUG();
} }
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
BUG();
}
#endif /* _H8300_TLBFLUSH_H */ #endif /* _H8300_TLBFLUSH_H */

View file

@ -83,19 +83,6 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
#endif #endif
} }
/*
* Flush the TLB entries mapping the virtually mapped linear page
* table corresponding to address range [START-END).
*/
static inline void
flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end)
{
/*
* Deprecated. The virtual page table is now flushed via the normal gather/flush
* interface (see tlb.h).
*/
}
/* /*
* Flush the local TLB. Invoked from another cpu using an IPI. * Flush the local TLB. Invoked from another cpu using an IPI.
*/ */

View file

@ -12,7 +12,6 @@
* - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/ */
extern void local_flush_tlb_all(void); extern void local_flush_tlb_all(void);
@ -93,8 +92,6 @@ static __inline__ void __flush_tlb_all(void)
); );
} }
#define flush_tlb_pgtables(mm, start, end) do { } while (0)
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
#endif /* _ASM_M32R_TLBFLUSH_H */ #endif /* _ASM_M32R_TLBFLUSH_H */

View file

@ -92,11 +92,6 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
flush_tlb_all(); flush_tlb_all();
} }
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
#else #else
@ -219,11 +214,6 @@ static inline void flush_tlb_kernel_page (unsigned long addr)
sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG); sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
} }
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
#endif #endif
#endif /* _M68K_TLBFLUSH_H */ #endif /* _M68K_TLBFLUSH_H */

View file

@ -52,10 +52,4 @@ static inline void flush_tlb_kernel_page(unsigned long addr)
BUG(); BUG();
} }
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
BUG();
}
#endif /* _M68KNOMMU_TLBFLUSH_H */ #endif /* _M68KNOMMU_TLBFLUSH_H */

View file

@ -11,7 +11,6 @@
* - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/ */
extern void local_flush_tlb_all(void); extern void local_flush_tlb_all(void);
extern void local_flush_tlb_mm(struct mm_struct *mm); extern void local_flush_tlb_mm(struct mm_struct *mm);
@ -45,10 +44,4 @@ extern void flush_tlb_one(unsigned long vaddr);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* Nothing to do on MIPS. */
}
#endif /* __ASM_TLBFLUSH_H */ #endif /* __ASM_TLBFLUSH_H */

View file

@ -57,10 +57,6 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
#endif #endif
} }
extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
{
}
static inline void flush_tlb_page(struct vm_area_struct *vma, static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr) unsigned long addr)
{ {

View file

@ -8,7 +8,6 @@
* - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
* - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License * modify it under the terms of the GNU General Public License
@ -174,15 +173,5 @@ extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
*/ */
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
/*
* This is called in munmap when we have freed up some page-table
* pages. We don't need to do anything here, there's nothing special
* about our page-table pages. -- paulus
*/
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
#endif /*__KERNEL__ */ #endif /*__KERNEL__ */
#endif /* _ASM_POWERPC_TLBFLUSH_H */ #endif /* _ASM_POWERPC_TLBFLUSH_H */

View file

@ -14,7 +14,6 @@
* - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/ */
/* /*
@ -152,10 +151,4 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
#endif #endif
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* S/390 does not keep any page table caches in TLB */
}
#endif /* _S390_TLBFLUSH_H */ #endif /* _S390_TLBFLUSH_H */

View file

@ -9,7 +9,6 @@
* - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/ */
extern void local_flush_tlb_all(void); extern void local_flush_tlb_all(void);
extern void local_flush_tlb_mm(struct mm_struct *mm); extern void local_flush_tlb_mm(struct mm_struct *mm);
@ -47,9 +46,4 @@ extern void flush_tlb_one(unsigned long asid, unsigned long page);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* Nothing to do */
}
#endif /* __ASM_SH_TLBFLUSH_H */ #endif /* __ASM_SH_TLBFLUSH_H */

View file

@ -20,10 +20,6 @@ extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end); unsigned long end);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);

View file

@ -13,7 +13,6 @@
* - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/ */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -42,11 +41,6 @@ BTFIXUPDEF_CALL(void, flush_tlb_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long) BTFIXUPDEF_CALL(void, flush_tlb_range, struct vm_area_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, flush_tlb_page, struct vm_area_struct *, unsigned long) BTFIXUPDEF_CALL(void, flush_tlb_page, struct vm_area_struct *, unsigned long)
// Thanks to Anton Blanchard, our pagetables became uncached in 2.4. Wee!
// extern void flush_tlb_pgtables(struct mm_struct *mm,
// unsigned long start, unsigned long end);
#define flush_tlb_pgtables(mm, start, end) do{ }while(0)
#define flush_tlb_all() BTFIXUP_CALL(flush_tlb_all)() #define flush_tlb_all() BTFIXUP_CALL(flush_tlb_all)()
#define flush_tlb_mm(mm) BTFIXUP_CALL(flush_tlb_mm)(mm) #define flush_tlb_mm(mm) BTFIXUP_CALL(flush_tlb_mm)(mm)
#define flush_tlb_range(vma,start,end) BTFIXUP_CALL(flush_tlb_range)(vma,start,end) #define flush_tlb_range(vma,start,end) BTFIXUP_CALL(flush_tlb_range)(vma,start,end)

View file

@ -41,11 +41,4 @@ do { flush_tsb_kernel_range(start,end); \
#endif /* ! CONFIG_SMP */ #endif /* ! CONFIG_SMP */
static inline void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
{
/* We don't use virtual page tables for TLB miss processing
* any more. Nowadays we use the TSB.
*/
}
#endif /* _SPARC64_TLBFLUSH_H */ #endif /* _SPARC64_TLBFLUSH_H */

View file

@ -17,7 +17,6 @@
* - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_kernel_vm() flushes the kernel vm area * - flush_tlb_kernel_vm() flushes the kernel vm area
* - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/ */
extern void flush_tlb_all(void); extern void flush_tlb_all(void);
@ -29,9 +28,4 @@ extern void flush_tlb_kernel_vm(void);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void __flush_tlb_one(unsigned long addr); extern void __flush_tlb_one(unsigned long addr);
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
#endif #endif

View file

@ -61,10 +61,4 @@ static inline void flush_tlb_kernel_page(unsigned long addr)
BUG (); BUG ();
} }
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
BUG ();
}
#endif /* __V850_TLBFLUSH_H__ */ #endif /* __V850_TLBFLUSH_H__ */

View file

@ -78,7 +78,6 @@
* - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
* - flush_tlb_others(cpumask, mm, va) flushes a TLBs on other cpus * - flush_tlb_others(cpumask, mm, va) flushes a TLBs on other cpus
* *
* ..but the i386 has somewhat limited tlb flushing capabilities, * ..but the i386 has somewhat limited tlb flushing capabilities,
@ -166,10 +165,4 @@ static inline void flush_tlb_kernel_range(unsigned long start,
flush_tlb_all(); flush_tlb_all();
} }
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* i386 does not keep any page table caches in TLB */
}
#endif /* _I386_TLBFLUSH_H */ #endif /* _I386_TLBFLUSH_H */

View file

@ -31,7 +31,6 @@ static inline void __flush_tlb_all(void)
* - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
* *
* x86-64 can only flush individual pages or full VMs. For a range flush * x86-64 can only flush individual pages or full VMs. For a range flush
* we always do the full VM. Might be worth trying if for a small * we always do the full VM. Might be worth trying if for a small
@ -98,12 +97,4 @@ static inline void flush_tlb_kernel_range(unsigned long start,
flush_tlb_all(); flush_tlb_all();
} }
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* x86_64 does not keep any page table caches in a software TLB.
The CPUs do in their hardware TLBs, but they are handled
by the normal TLB flushing algorithms. */
}
#endif /* _X8664_TLBFLUSH_H */ #endif /* _X8664_TLBFLUSH_H */

View file

@ -41,17 +41,6 @@ extern void flush_tlb_range(struct vm_area_struct*,unsigned long,unsigned long);
#define flush_tlb_kernel_range(start,end) flush_tlb_all() #define flush_tlb_kernel_range(start,end) flush_tlb_all()
/* This is calld in munmap when we have freed up some page-table pages.
* We don't need to do anything here, there's nothing special about our
* page-table pages.
*/
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
/* TLB operations. */ /* TLB operations. */
static inline unsigned long itlb_probe(unsigned long addr) static inline unsigned long itlb_probe(unsigned long addr)

View file

@ -259,9 +259,6 @@ void free_pgd_range(struct mmu_gather **tlb,
continue; continue;
free_pud_range(*tlb, pgd, addr, next, floor, ceiling); free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
} while (pgd++, addr = next, addr != end); } while (pgd++, addr = next, addr != end);
if (!(*tlb)->fullmm)
flush_tlb_pgtables((*tlb)->mm, start, end);
} }
void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,