mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 11:46:19 +00:00
x86: change_page_attr bandaids
- Disable CLFLUSH again; it is still broken. Always do WBINVD. - Always flush in the i386 case, not only when there are deferred pages. These are both brute-force inefficient fixes, to be improved next release cycle. The changes to i386 are a little more extensive than strictly needed (some dead code added), but it is more similar to the x86-64 version now and the dead code will be used soon. Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
55181000cd
commit
018d2ad0cc
2 changed files with 22 additions and 15 deletions
|
@ -68,14 +68,23 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
|
|||
return base;
|
||||
}
|
||||
|
||||
static void flush_kernel_map(void *arg)
|
||||
static void cache_flush_page(struct page *p)
|
||||
{
|
||||
unsigned long adr = (unsigned long)arg;
|
||||
unsigned long adr = (unsigned long)page_address(p);
|
||||
int i;
|
||||
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
|
||||
asm volatile("clflush (%0)" :: "r" (adr + i));
|
||||
}
|
||||
|
||||
if (adr && cpu_has_clflush) {
|
||||
int i;
|
||||
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
|
||||
asm volatile("clflush (%0)" :: "r" (adr + i));
|
||||
static void flush_kernel_map(void *arg)
|
||||
{
|
||||
struct list_head *lh = (struct list_head *)arg;
|
||||
struct page *p;
|
||||
|
||||
/* High level code is not ready for clflush yet */
|
||||
if (0 && cpu_has_clflush) {
|
||||
list_for_each_entry (p, lh, lru)
|
||||
cache_flush_page(p);
|
||||
} else if (boot_cpu_data.x86_model >= 4)
|
||||
wbinvd();
|
||||
|
||||
|
@ -181,9 +190,9 @@ __change_page_attr(struct page *page, pgprot_t prot)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void flush_map(void *adr)
|
||||
static inline void flush_map(struct list_head *l)
|
||||
{
|
||||
on_each_cpu(flush_kernel_map, adr, 1, 1);
|
||||
on_each_cpu(flush_kernel_map, l, 1, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -225,11 +234,8 @@ void global_flush_tlb(void)
|
|||
spin_lock_irq(&cpa_lock);
|
||||
list_replace_init(&df_list, &l);
|
||||
spin_unlock_irq(&cpa_lock);
|
||||
if (!cpu_has_clflush)
|
||||
flush_map(NULL);
|
||||
flush_map(&l);
|
||||
list_for_each_entry_safe(pg, next, &l, lru) {
|
||||
if (cpu_has_clflush)
|
||||
flush_map(page_address(pg));
|
||||
__free_page(pg);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -74,10 +74,11 @@ static void flush_kernel_map(void *arg)
|
|||
struct page *pg;
|
||||
|
||||
/* When clflush is available always use it because it is
|
||||
much cheaper than WBINVD */
|
||||
if (!cpu_has_clflush)
|
||||
much cheaper than WBINVD. Disable clflush for now because
|
||||
the high level code is not ready yet */
|
||||
if (1 || !cpu_has_clflush)
|
||||
asm volatile("wbinvd" ::: "memory");
|
||||
list_for_each_entry(pg, l, lru) {
|
||||
else list_for_each_entry(pg, l, lru) {
|
||||
void *adr = page_address(pg);
|
||||
if (cpu_has_clflush)
|
||||
cache_flush_page(adr);
|
||||
|
|
Loading…
Reference in a new issue