mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 11:46:19 +00:00
x86: PAT phys_mem_access_prot_allowed for dev/mem mmap
Introduce phys_mem_access_prot_allowed(), which checks whether the mapping is possible, without any conflicts and returns success or failure based on that. phys_mem_access_prot() by itself does not allow failure case. This ability to return error is needed for PAT where we may have aliasing conflicts. x86 setup __HAVE_PHYS_MEM_ACCESS_PROT and move x86 specific code out of /dev/mem into arch specific area. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
e045fb2a98
commit
f0970c13b6
3 changed files with 59 additions and 30 deletions
|
@ -419,3 +419,42 @@ int free_memtype(u64 start, u64 end)
|
|||
return err;
|
||||
}
|
||||
|
||||
|
||||
/* /dev/mem interface. Use the previous mapping */
|
||||
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot)
|
||||
{
|
||||
return vma_prot;
|
||||
}
|
||||
|
||||
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t *vma_prot)
|
||||
{
|
||||
|
||||
if (file->f_flags & O_SYNC) {
|
||||
*vma_prot = pgprot_noncached(*vma_prot);
|
||||
return 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* On the PPro and successors, the MTRRs are used to set
|
||||
* memory types for physical addresses outside main memory,
|
||||
* so blindly setting UC or PWT on those pages is wrong.
|
||||
* For Pentiums and earlier, the surround logic should disable
|
||||
* caching for the high addresses through the KEN pin, but
|
||||
* we maintain the tradition of paranoia in this code.
|
||||
*/
|
||||
if (!pat_wc_enabled &&
|
||||
! ( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
|
||||
test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
|
||||
test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
|
||||
test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability)) &&
|
||||
(pfn << PAGE_SHIFT) >= __pa(high_memory)) {
|
||||
*vma_prot = pgprot_noncached(*vma_prot);
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -41,36 +41,7 @@
|
|||
*/
|
||||
static inline int uncached_access(struct file *file, unsigned long addr)
|
||||
{
|
||||
#if defined(__i386__) && !defined(__arch_um__)
|
||||
/*
|
||||
* On the PPro and successors, the MTRRs are used to set
|
||||
* memory types for physical addresses outside main memory,
|
||||
* so blindly setting PCD or PWT on those pages is wrong.
|
||||
* For Pentiums and earlier, the surround logic should disable
|
||||
* caching for the high addresses through the KEN pin, but
|
||||
* we maintain the tradition of paranoia in this code.
|
||||
*/
|
||||
if (file->f_flags & O_SYNC)
|
||||
return 1;
|
||||
return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
|
||||
test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
|
||||
test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
|
||||
test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
|
||||
&& addr >= __pa(high_memory);
|
||||
#elif defined(__x86_64__) && !defined(__arch_um__)
|
||||
/*
|
||||
* This is broken because it can generate memory type aliases,
|
||||
* which can cause cache corruptions
|
||||
* But it is only available for root and we have to be bug-to-bug
|
||||
* compatible with i386.
|
||||
*/
|
||||
if (file->f_flags & O_SYNC)
|
||||
return 1;
|
||||
/* same behaviour as i386. PAT always set to cached and MTRRs control the
|
||||
caching behaviour.
|
||||
Hopefully a full PAT implementation will fix that soon. */
|
||||
return 0;
|
||||
#elif defined(CONFIG_IA64)
|
||||
#if defined(CONFIG_IA64)
|
||||
/*
|
||||
* On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
|
||||
*/
|
||||
|
@ -283,6 +254,12 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
|
|||
return written;
|
||||
}
|
||||
|
||||
int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
|
||||
unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
|
||||
static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot)
|
||||
|
@ -336,6 +313,10 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
|
|||
if (!range_is_allowed(vma->vm_pgoff, size))
|
||||
return -EPERM;
|
||||
|
||||
if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
|
||||
&vma->vm_page_prot))
|
||||
return -EINVAL;
|
||||
|
||||
vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
|
||||
size,
|
||||
vma->vm_page_prot);
|
||||
|
|
|
@ -289,6 +289,15 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|||
|
||||
#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define __HAVE_PHYS_MEM_ACCESS_PROT
|
||||
struct file;
|
||||
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot);
|
||||
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t *vma_prot);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
#include <asm/paravirt.h>
|
||||
#else /* !CONFIG_PARAVIRT */
|
||||
|
|
Loading…
Reference in a new issue