mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
clarify get_user_pages() prototype
Currently the 4th parameter of get_user_pages() is called len, but its in pages, not bytes. Rename the thing to nr_pages to avoid future confusion. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
ab420e6d9c
commit
9d73777e50
3 changed files with 18 additions and 22 deletions
|
@ -826,7 +826,7 @@ extern int make_pages_present(unsigned long addr, unsigned long end);
|
|||
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
|
||||
|
||||
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, int len, int write, int force,
|
||||
unsigned long start, int nr_pages, int write, int force,
|
||||
struct page **pages, struct vm_area_struct **vmas);
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages);
|
||||
|
|
26
mm/memory.c
26
mm/memory.c
|
@ -1207,8 +1207,8 @@ static inline int use_zero_page(struct vm_area_struct *vma)
|
|||
|
||||
|
||||
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, int len, int flags,
|
||||
struct page **pages, struct vm_area_struct **vmas)
|
||||
unsigned long start, int nr_pages, int flags,
|
||||
struct page **pages, struct vm_area_struct **vmas)
|
||||
{
|
||||
int i;
|
||||
unsigned int vm_flags = 0;
|
||||
|
@ -1217,7 +1217,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
|
||||
int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
|
||||
|
||||
if (len <= 0)
|
||||
if (nr_pages <= 0)
|
||||
return 0;
|
||||
/*
|
||||
* Require read or write permissions.
|
||||
|
@ -1269,7 +1269,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
vmas[i] = gate_vma;
|
||||
i++;
|
||||
start += PAGE_SIZE;
|
||||
len--;
|
||||
nr_pages--;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1280,7 +1280,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
|
||||
if (is_vm_hugetlb_page(vma)) {
|
||||
i = follow_hugetlb_page(mm, vma, pages, vmas,
|
||||
&start, &len, i, write);
|
||||
&start, &nr_pages, i, write);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1357,9 +1357,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
vmas[i] = vma;
|
||||
i++;
|
||||
start += PAGE_SIZE;
|
||||
len--;
|
||||
} while (len && start < vma->vm_end);
|
||||
} while (len);
|
||||
nr_pages--;
|
||||
} while (nr_pages && start < vma->vm_end);
|
||||
} while (nr_pages);
|
||||
return i;
|
||||
}
|
||||
|
||||
|
@ -1368,7 +1368,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
* @tsk: task_struct of target task
|
||||
* @mm: mm_struct of target mm
|
||||
* @start: starting user address
|
||||
* @len: number of pages from start to pin
|
||||
* @nr_pages: number of pages from start to pin
|
||||
* @write: whether pages will be written to by the caller
|
||||
* @force: whether to force write access even if user mapping is
|
||||
* readonly. This will result in the page being COWed even
|
||||
|
@ -1380,7 +1380,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
* Or NULL if the caller does not require them.
|
||||
*
|
||||
* Returns number of pages pinned. This may be fewer than the number
|
||||
* requested. If len is 0 or negative, returns 0. If no pages
|
||||
* requested. If nr_pages is 0 or negative, returns 0. If no pages
|
||||
* were pinned, returns -errno. Each page returned must be released
|
||||
* with a put_page() call when it is finished with. vmas will only
|
||||
* remain valid while mmap_sem is held.
|
||||
|
@ -1414,7 +1414,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
* See also get_user_pages_fast, for performance critical applications.
|
||||
*/
|
||||
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, int len, int write, int force,
|
||||
unsigned long start, int nr_pages, int write, int force,
|
||||
struct page **pages, struct vm_area_struct **vmas)
|
||||
{
|
||||
int flags = 0;
|
||||
|
@ -1424,9 +1424,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
if (force)
|
||||
flags |= GUP_FLAGS_FORCE;
|
||||
|
||||
return __get_user_pages(tsk, mm,
|
||||
start, len, flags,
|
||||
pages, vmas);
|
||||
return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(get_user_pages);
|
||||
|
|
12
mm/nommu.c
12
mm/nommu.c
|
@ -173,8 +173,8 @@ unsigned int kobjsize(const void *objp)
|
|||
}
|
||||
|
||||
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, int len, int flags,
|
||||
struct page **pages, struct vm_area_struct **vmas)
|
||||
unsigned long start, int nr_pages, int flags,
|
||||
struct page **pages, struct vm_area_struct **vmas)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long vm_flags;
|
||||
|
@ -189,7 +189,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
|
||||
vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
vma = find_vma(mm, start);
|
||||
if (!vma)
|
||||
goto finish_or_fault;
|
||||
|
@ -224,7 +224,7 @@ finish_or_fault:
|
|||
* - don't permit access to VMAs that don't support it, such as I/O mappings
|
||||
*/
|
||||
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, int len, int write, int force,
|
||||
unsigned long start, int nr_pages, int write, int force,
|
||||
struct page **pages, struct vm_area_struct **vmas)
|
||||
{
|
||||
int flags = 0;
|
||||
|
@ -234,9 +234,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
if (force)
|
||||
flags |= GUP_FLAGS_FORCE;
|
||||
|
||||
return __get_user_pages(tsk, mm,
|
||||
start, len, flags,
|
||||
pages, vmas);
|
||||
return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages);
|
||||
|
||||
|
|
Loading…
Reference in a new issue