mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
sh: Add kmap_coherent()/kunmap_coherent() interface for SH-4.
This wires up kmap_coherent() and kunmap_coherent() on SH-4, and moves away from the p3map_mutex and reserved P3 space, opting to use fixmaps for colouring instead. The copy_user_page()/clear_user_page() implementations are moved to this, which fixes the nasty blowups with spinlock debugging as a result of having some of these calls nested under the page table lock. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
parent
f695baf2df
commit
8cf1a74305
3 changed files with 39 additions and 55 deletions
|
@ -77,16 +77,8 @@ static void __init emit_cache_params(void)
|
|||
/*
|
||||
* SH-4 has virtually indexed and physically tagged cache.
|
||||
*/
|
||||
|
||||
/* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */
|
||||
#define MAX_P3_MUTEXES 16
|
||||
|
||||
struct mutex p3map_mutex[MAX_P3_MUTEXES];
|
||||
|
||||
void __init p3_cache_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
compute_alias(¤t_cpu_data.icache);
|
||||
compute_alias(¤t_cpu_data.dcache);
|
||||
|
||||
|
@ -109,9 +101,6 @@ void __init p3_cache_init(void)
|
|||
|
||||
if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL))
|
||||
panic("%s failed.", __FUNCTION__);
|
||||
|
||||
for (i = 0; i < current_cpu_data.dcache.n_aliases; i++)
|
||||
mutex_init(&p3map_mutex[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* arch/sh/mm/pg-sh4.c
|
||||
*
|
||||
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
|
||||
* Copyright (C) 2002 - 2005 Paul Mundt
|
||||
* Copyright (C) 2002 - 2007 Paul Mundt
|
||||
*
|
||||
* Released under the terms of the GNU GPL v2.0.
|
||||
*/
|
||||
|
@ -11,10 +11,35 @@
|
|||
#include <asm/mmu_context.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
extern struct mutex p3map_mutex[];
|
||||
|
||||
#define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
|
||||
|
||||
static inline void *kmap_coherent(struct page *page, unsigned long addr)
|
||||
{
|
||||
enum fixed_addresses idx;
|
||||
unsigned long vaddr, flags;
|
||||
pte_t pte;
|
||||
|
||||
inc_preempt_count();
|
||||
|
||||
idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT;
|
||||
vaddr = __fix_to_virt(FIX_CMAP_END - idx);
|
||||
pte = mk_pte(page, PAGE_KERNEL);
|
||||
|
||||
local_irq_save(flags);
|
||||
flush_tlb_one(get_asid(), vaddr);
|
||||
local_irq_restore(flags);
|
||||
|
||||
update_mmu_cache(NULL, vaddr, pte);
|
||||
|
||||
return (void *)vaddr;
|
||||
}
|
||||
|
||||
static inline void kunmap_coherent(struct page *page)
|
||||
{
|
||||
dec_preempt_count();
|
||||
preempt_check_resched();
|
||||
}
|
||||
|
||||
/*
|
||||
* clear_user_page
|
||||
* @to: P1 address
|
||||
|
@ -27,25 +52,9 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
|
|||
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
|
||||
clear_page(to);
|
||||
else {
|
||||
unsigned long phys_addr = PHYSADDR(to);
|
||||
unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
|
||||
pgd_t *pgd = pgd_offset_k(p3_addr);
|
||||
pud_t *pud = pud_offset(pgd, p3_addr);
|
||||
pmd_t *pmd = pmd_offset(pud, p3_addr);
|
||||
pte_t *pte = pte_offset_kernel(pmd, p3_addr);
|
||||
pte_t entry;
|
||||
unsigned long flags;
|
||||
|
||||
entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
|
||||
mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
|
||||
set_pte(pte, entry);
|
||||
local_irq_save(flags);
|
||||
flush_tlb_one(get_asid(), p3_addr);
|
||||
local_irq_restore(flags);
|
||||
update_mmu_cache(NULL, p3_addr, entry);
|
||||
__clear_user_page((void *)p3_addr, to);
|
||||
pte_clear(&init_mm, p3_addr, pte);
|
||||
mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
|
||||
void *vto = kmap_coherent(page, address);
|
||||
__clear_user_page(vto, to);
|
||||
kunmap_coherent(vto);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -63,25 +72,9 @@ void copy_user_page(void *to, void *from, unsigned long address,
|
|||
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
|
||||
copy_page(to, from);
|
||||
else {
|
||||
unsigned long phys_addr = PHYSADDR(to);
|
||||
unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS);
|
||||
pgd_t *pgd = pgd_offset_k(p3_addr);
|
||||
pud_t *pud = pud_offset(pgd, p3_addr);
|
||||
pmd_t *pmd = pmd_offset(pud, p3_addr);
|
||||
pte_t *pte = pte_offset_kernel(pmd, p3_addr);
|
||||
pte_t entry;
|
||||
unsigned long flags;
|
||||
|
||||
entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL);
|
||||
mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
|
||||
set_pte(pte, entry);
|
||||
local_irq_save(flags);
|
||||
flush_tlb_one(get_asid(), p3_addr);
|
||||
local_irq_restore(flags);
|
||||
update_mmu_cache(NULL, p3_addr, entry);
|
||||
__copy_user_page((void *)p3_addr, from, to);
|
||||
pte_clear(&init_mm, p3_addr, pte);
|
||||
mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
|
||||
void *vfrom = kmap_coherent(page, address);
|
||||
__copy_user_page(vfrom, from, to);
|
||||
kunmap_coherent(vfrom);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -46,6 +46,9 @@
|
|||
* fix-mapped?
|
||||
*/
|
||||
enum fixed_addresses {
|
||||
#define FIX_N_COLOURS 16
|
||||
FIX_CMAP_BEGIN,
|
||||
FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS,
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
|
||||
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
|
||||
|
@ -53,8 +56,8 @@ enum fixed_addresses {
|
|||
__end_of_fixed_addresses
|
||||
};
|
||||
|
||||
extern void __set_fixmap (enum fixed_addresses idx,
|
||||
unsigned long phys, pgprot_t flags);
|
||||
extern void __set_fixmap(enum fixed_addresses idx,
|
||||
unsigned long phys, pgprot_t flags);
|
||||
|
||||
#define set_fixmap(idx, phys) \
|
||||
__set_fixmap(idx, phys, PAGE_KERNEL)
|
||||
|
@ -106,5 +109,4 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
|
|||
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
|
||||
return __virt_to_fix(vaddr);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in a new issue