mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
Move FAULT_FLAG_xyz into handle_mm_fault() callers
This allows the callers to now pass down the full set of FAULT_FLAG_xyz flags to handle_mm_fault(). All callers have been (mechanically) converted to the new calling convention, there's almost certainly room for architectures to clean up their code and then add FAULT_FLAG_RETRY when that support is added. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
30c9f3a9fa
commit
d06063cc22
25 changed files with 30 additions and 30 deletions
|
@ -146,7 +146,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
|
|||
/* If for any reason at all we couldn't handle the fault,
|
||||
make sure we exit gracefully rather than endlessly redo
|
||||
the fault. */
|
||||
fault = handle_mm_fault(mm, vma, address, cause > 0);
|
||||
fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0);
|
||||
up_read(&mm->mmap_sem);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
|
|
|
@ -208,7 +208,7 @@ good_area:
|
|||
* than endlessly redo the fault.
|
||||
*/
|
||||
survive:
|
||||
fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11));
|
||||
fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & (1 << 11)) ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -133,7 +133,7 @@ good_area:
|
|||
* fault.
|
||||
*/
|
||||
survive:
|
||||
fault = handle_mm_fault(mm, vma, address, writeaccess);
|
||||
fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -163,7 +163,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
|
|||
* the fault.
|
||||
*/
|
||||
|
||||
fault = handle_mm_fault(mm, vma, address, writeaccess & 1);
|
||||
fault = handle_mm_fault(mm, vma, address, (writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -163,7 +163,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, ear0, write);
|
||||
fault = handle_mm_fault(mm, vma, ear0, write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -154,7 +154,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
|
|||
* sure we exit gracefully rather than endlessly redo the
|
||||
* fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0);
|
||||
fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
/*
|
||||
* We ran out of memory, or some other thing happened
|
||||
|
|
|
@ -196,7 +196,7 @@ survive:
|
|||
*/
|
||||
addr = (address & PAGE_MASK);
|
||||
set_thread_fault_code(error_code);
|
||||
fault = handle_mm_fault(mm, vma, addr, write);
|
||||
fault = handle_mm_fault(mm, vma, addr, write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -155,7 +155,7 @@ good_area:
|
|||
*/
|
||||
|
||||
survive:
|
||||
fault = handle_mm_fault(mm, vma, address, write);
|
||||
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
|
||||
#ifdef DEBUG
|
||||
printk("handle_mm_fault returns %d\n",fault);
|
||||
#endif
|
||||
|
|
|
@ -232,7 +232,7 @@ good_area:
|
|||
* the fault.
|
||||
*/
|
||||
survive:
|
||||
fault = handle_mm_fault(mm, vma, address, is_write);
|
||||
fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -102,7 +102,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, write);
|
||||
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -258,7 +258,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, write);
|
||||
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -202,7 +202,7 @@ good_area:
|
|||
* fault.
|
||||
*/
|
||||
|
||||
fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) != 0);
|
||||
fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
/*
|
||||
* We hit a shared mapping outside of the file, or some
|
||||
|
|
|
@ -302,7 +302,7 @@ good_area:
|
|||
* the fault.
|
||||
*/
|
||||
survive:
|
||||
ret = handle_mm_fault(mm, vma, address, is_write);
|
||||
ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(ret & VM_FAULT_ERROR)) {
|
||||
if (ret & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -70,7 +70,7 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
|
|||
}
|
||||
|
||||
ret = 0;
|
||||
*flt = handle_mm_fault(mm, vma, ea, is_write);
|
||||
*flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(*flt & VM_FAULT_ERROR)) {
|
||||
if (*flt & VM_FAULT_OOM) {
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -66,7 +66,7 @@ static int __handle_fault(struct mm_struct *mm, unsigned long address,
|
|||
}
|
||||
|
||||
survive:
|
||||
fault = handle_mm_fault(mm, vma, address, write_access);
|
||||
fault = handle_mm_fault(mm, vma, address, write_access ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -352,7 +352,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, write);
|
||||
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM) {
|
||||
up_read(&mm->mmap_sem);
|
||||
|
|
|
@ -133,7 +133,7 @@ good_area:
|
|||
* the fault.
|
||||
*/
|
||||
survive:
|
||||
fault = handle_mm_fault(mm, vma, address, writeaccess);
|
||||
fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -187,7 +187,7 @@ good_area:
|
|||
* the fault.
|
||||
*/
|
||||
survive:
|
||||
fault = handle_mm_fault(mm, vma, address, writeaccess);
|
||||
fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -241,7 +241,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, write);
|
||||
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
@ -484,7 +484,7 @@ good_area:
|
|||
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
|
||||
goto bad_area;
|
||||
}
|
||||
switch (handle_mm_fault(mm, vma, address, write)) {
|
||||
switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
|
||||
case VM_FAULT_SIGBUS:
|
||||
case VM_FAULT_OOM:
|
||||
goto do_sigbus;
|
||||
|
|
|
@ -398,7 +398,7 @@ good_area:
|
|||
goto bad_area;
|
||||
}
|
||||
|
||||
fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE));
|
||||
fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -65,7 +65,7 @@ good_area:
|
|||
do {
|
||||
int fault;
|
||||
|
||||
fault = handle_mm_fault(mm, vma, address, is_write);
|
||||
fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM) {
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -1113,7 +1113,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault:
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, write);
|
||||
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
|
||||
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
mm_fault_error(regs, error_code, address, fault);
|
||||
|
|
|
@ -106,7 +106,7 @@ good_area:
|
|||
* the fault.
|
||||
*/
|
||||
survive:
|
||||
fault = handle_mm_fault(mm, vma, address, is_write);
|
||||
fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -810,11 +810,11 @@ extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
|
|||
|
||||
#ifdef CONFIG_MMU
|
||||
extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long address, int write_access);
|
||||
unsigned long address, unsigned int flags);
|
||||
#else
|
||||
static inline int handle_mm_fault(struct mm_struct *mm,
|
||||
struct vm_area_struct *vma, unsigned long address,
|
||||
int write_access)
|
||||
unsigned int flags)
|
||||
{
|
||||
/* should never happen if there's no MMU */
|
||||
BUG();
|
||||
|
|
|
@ -1310,8 +1310,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
cond_resched();
|
||||
while (!(page = follow_page(vma, start, foll_flags))) {
|
||||
int ret;
|
||||
ret = handle_mm_fault(mm, vma, start,
|
||||
foll_flags & FOLL_WRITE);
|
||||
|
||||
/* FOLL_WRITE matches FAULT_FLAG_WRITE! */
|
||||
ret = handle_mm_fault(mm, vma, start, foll_flags & FOLL_WRITE);
|
||||
if (ret & VM_FAULT_ERROR) {
|
||||
if (ret & VM_FAULT_OOM)
|
||||
return i ? i : -ENOMEM;
|
||||
|
@ -2958,13 +2959,12 @@ unlock:
|
|||
* By the time we get here, we already hold the mm semaphore
|
||||
*/
|
||||
int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long address, int write_access)
|
||||
unsigned long address, unsigned int flags)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
unsigned int flags = write_access ? FAULT_FLAG_WRITE : 0;
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
|
|
Loading…
Reference in a new issue