mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
[PATCH] __user annotations: futex
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
28c4dadd3a
commit
ba46df984b
4 changed files with 16 additions and 15 deletions
|
@ -163,7 +163,7 @@ asmlinkage long
|
|||
compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
|
||||
compat_size_t len);
|
||||
asmlinkage long
|
||||
compat_sys_get_robust_list(int pid, compat_uptr_t *head_ptr,
|
||||
compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
|
||||
compat_size_t __user *len_ptr);
|
||||
|
||||
long compat_sys_semctl(int first, int second, int third, void __user *uptr);
|
||||
|
|
|
@ -593,7 +593,7 @@ asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags);
|
|||
asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
|
||||
unsigned int flags);
|
||||
asmlinkage long sys_get_robust_list(int pid,
|
||||
struct robust_list_head __user **head_ptr,
|
||||
struct robust_list_head __user * __user *head_ptr,
|
||||
size_t __user *len_ptr);
|
||||
asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
|
||||
size_t len);
|
||||
|
|
|
@ -1612,10 +1612,10 @@ sys_set_robust_list(struct robust_list_head __user *head,
|
|||
* @len_ptr: pointer to a length field, the kernel fills in the header size
|
||||
*/
|
||||
asmlinkage long
|
||||
sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr,
|
||||
sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
|
||||
size_t __user *len_ptr)
|
||||
{
|
||||
struct robust_list_head *head;
|
||||
struct robust_list_head __user *head;
|
||||
unsigned long ret;
|
||||
|
||||
if (!pid)
|
||||
|
@ -1694,14 +1694,15 @@ retry:
|
|||
* Fetch a robust-list pointer. Bit 0 signals PI futexes:
|
||||
*/
|
||||
static inline int fetch_robust_entry(struct robust_list __user **entry,
|
||||
struct robust_list __user **head, int *pi)
|
||||
struct robust_list __user * __user *head,
|
||||
int *pi)
|
||||
{
|
||||
unsigned long uentry;
|
||||
|
||||
if (get_user(uentry, (unsigned long *)head))
|
||||
if (get_user(uentry, (unsigned long __user *)head))
|
||||
return -EFAULT;
|
||||
|
||||
*entry = (void *)(uentry & ~1UL);
|
||||
*entry = (void __user *)(uentry & ~1UL);
|
||||
*pi = uentry & 1;
|
||||
|
||||
return 0;
|
||||
|
@ -1739,7 +1740,7 @@ void exit_robust_list(struct task_struct *curr)
|
|||
return;
|
||||
|
||||
if (pending)
|
||||
handle_futex_death((void *)pending + futex_offset, curr, pip);
|
||||
handle_futex_death((void __user *)pending + futex_offset, curr, pip);
|
||||
|
||||
while (entry != &head->list) {
|
||||
/*
|
||||
|
@ -1747,7 +1748,7 @@ void exit_robust_list(struct task_struct *curr)
|
|||
* don't process it twice:
|
||||
*/
|
||||
if (entry != pending)
|
||||
if (handle_futex_death((void *)entry + futex_offset,
|
||||
if (handle_futex_death((void __user *)entry + futex_offset,
|
||||
curr, pi))
|
||||
return;
|
||||
/*
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
*/
|
||||
static inline int
|
||||
fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
|
||||
compat_uptr_t *head, int *pi)
|
||||
compat_uptr_t __user *head, int *pi)
|
||||
{
|
||||
if (get_user(*uentry, head))
|
||||
return -EFAULT;
|
||||
|
@ -62,7 +62,7 @@ void compat_exit_robust_list(struct task_struct *curr)
|
|||
&head->list_op_pending, &pip))
|
||||
return;
|
||||
if (upending)
|
||||
handle_futex_death((void *)pending + futex_offset, curr, pip);
|
||||
handle_futex_death((void __user *)pending + futex_offset, curr, pip);
|
||||
|
||||
while (compat_ptr(uentry) != &head->list) {
|
||||
/*
|
||||
|
@ -70,7 +70,7 @@ void compat_exit_robust_list(struct task_struct *curr)
|
|||
* dont process it twice:
|
||||
*/
|
||||
if (entry != pending)
|
||||
if (handle_futex_death((void *)entry + futex_offset,
|
||||
if (handle_futex_death((void __user *)entry + futex_offset,
|
||||
curr, pi))
|
||||
return;
|
||||
|
||||
|
@ -78,7 +78,7 @@ void compat_exit_robust_list(struct task_struct *curr)
|
|||
* Fetch the next entry in the list:
|
||||
*/
|
||||
if (fetch_robust_entry(&uentry, &entry,
|
||||
(compat_uptr_t *)&entry->next, &pi))
|
||||
(compat_uptr_t __user *)&entry->next, &pi))
|
||||
return;
|
||||
/*
|
||||
* Avoid excessively long or circular lists:
|
||||
|
@ -103,10 +103,10 @@ compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
|
|||
}
|
||||
|
||||
asmlinkage long
|
||||
compat_sys_get_robust_list(int pid, compat_uptr_t *head_ptr,
|
||||
compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
|
||||
compat_size_t __user *len_ptr)
|
||||
{
|
||||
struct compat_robust_list_head *head;
|
||||
struct compat_robust_list_head __user *head;
|
||||
unsigned long ret;
|
||||
|
||||
if (!pid)
|
||||
|
|
Loading…
Reference in a new issue