mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
futex: separate futex_wait_queue_me() logic from futex_wait()
Refactor futex_wait() in preparation for futex_wait_requeue_pi(). In order to reuse a good chunk of the futex_wait() code for the upcoming futex_wait_requeue_pi() function, this patch breaks out the queue-to-wakeup section of futex_wait() into futex_wait_queue_me(). Signed-off-by: Darren Hart <dvhltc@us.ibm.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
6bb597507f
commit
ca5f9524d6
1 changed files with 76 additions and 62 deletions
138
kernel/futex.c
138
kernel/futex.c
|
@ -1115,24 +1115,87 @@ handle_fault:
|
||||||
|
|
||||||
static long futex_wait_restart(struct restart_block *restart);
|
static long futex_wait_restart(struct restart_block *restart);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
|
||||||
|
* @hb: the futex hash bucket, must be locked by the caller
|
||||||
|
* @q: the futex_q to queue up on
|
||||||
|
* @timeout: the prepared hrtimer_sleeper, or null for no timeout
|
||||||
|
* @wait: the wait_queue to add to the futex_q after queueing in the hb
|
||||||
|
*/
|
||||||
|
static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
|
||||||
|
struct hrtimer_sleeper *timeout,
|
||||||
|
wait_queue_t *wait)
|
||||||
|
{
|
||||||
|
queue_me(q, hb);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* There might have been scheduling since the queue_me(), as we
|
||||||
|
* cannot hold a spinlock across the get_user() in case it
|
||||||
|
* faults, and we cannot just set TASK_INTERRUPTIBLE state when
|
||||||
|
* queueing ourselves into the futex hash. This code thus has to
|
||||||
|
* rely on the futex_wake() code removing us from hash when it
|
||||||
|
* wakes us up.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* add_wait_queue is the barrier after __set_current_state. */
|
||||||
|
__set_current_state(TASK_INTERRUPTIBLE);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Add current as the futex_q waiter. We don't remove ourselves from
|
||||||
|
* the wait_queue because we are the only user of it.
|
||||||
|
*/
|
||||||
|
add_wait_queue(&q->waiter, wait);
|
||||||
|
|
||||||
|
/* Arm the timer */
|
||||||
|
if (timeout) {
|
||||||
|
hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
|
||||||
|
if (!hrtimer_active(&timeout->timer))
|
||||||
|
timeout->task = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* !plist_node_empty() is safe here without any lock.
|
||||||
|
* q.lock_ptr != 0 is not safe, because of ordering against wakeup.
|
||||||
|
*/
|
||||||
|
if (likely(!plist_node_empty(&q->list))) {
|
||||||
|
/*
|
||||||
|
* If the timer has already expired, current will already be
|
||||||
|
* flagged for rescheduling. Only call schedule if there
|
||||||
|
* is no timeout, or if it has yet to expire.
|
||||||
|
*/
|
||||||
|
if (!timeout || timeout->task)
|
||||||
|
schedule();
|
||||||
|
}
|
||||||
|
__set_current_state(TASK_RUNNING);
|
||||||
|
}
|
||||||
|
|
||||||
static int futex_wait(u32 __user *uaddr, int fshared,
|
static int futex_wait(u32 __user *uaddr, int fshared,
|
||||||
u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
|
u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
|
||||||
{
|
{
|
||||||
struct task_struct *curr = current;
|
struct hrtimer_sleeper timeout, *to = NULL;
|
||||||
|
DECLARE_WAITQUEUE(wait, current);
|
||||||
struct restart_block *restart;
|
struct restart_block *restart;
|
||||||
DECLARE_WAITQUEUE(wait, curr);
|
|
||||||
struct futex_hash_bucket *hb;
|
struct futex_hash_bucket *hb;
|
||||||
struct futex_q q;
|
struct futex_q q;
|
||||||
u32 uval;
|
u32 uval;
|
||||||
int ret;
|
int ret;
|
||||||
struct hrtimer_sleeper t;
|
|
||||||
int rem = 0;
|
|
||||||
|
|
||||||
if (!bitset)
|
if (!bitset)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
q.pi_state = NULL;
|
q.pi_state = NULL;
|
||||||
q.bitset = bitset;
|
q.bitset = bitset;
|
||||||
|
|
||||||
|
if (abs_time) {
|
||||||
|
to = &timeout;
|
||||||
|
|
||||||
|
hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
|
||||||
|
CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
||||||
|
hrtimer_init_sleeper(to, current);
|
||||||
|
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
|
||||||
|
current->timer_slack_ns);
|
||||||
|
}
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
q.key = FUTEX_KEY_INIT;
|
q.key = FUTEX_KEY_INIT;
|
||||||
ret = get_futex_key(uaddr, fshared, &q.key);
|
ret = get_futex_key(uaddr, fshared, &q.key);
|
||||||
|
@ -1178,75 +1241,22 @@ retry_private:
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
ret = -EWOULDBLOCK;
|
ret = -EWOULDBLOCK;
|
||||||
|
|
||||||
|
/* Only actually queue if *uaddr contained val. */
|
||||||
if (unlikely(uval != val)) {
|
if (unlikely(uval != val)) {
|
||||||
queue_unlock(&q, hb);
|
queue_unlock(&q, hb);
|
||||||
goto out_put_key;
|
goto out_put_key;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Only actually queue if *uaddr contained val. */
|
/* queue_me and wait for wakeup, timeout, or a signal. */
|
||||||
queue_me(&q, hb);
|
futex_wait_queue_me(hb, &q, to, &wait);
|
||||||
|
|
||||||
/*
|
|
||||||
* There might have been scheduling since the queue_me(), as we
|
|
||||||
* cannot hold a spinlock across the get_user() in case it
|
|
||||||
* faults, and we cannot just set TASK_INTERRUPTIBLE state when
|
|
||||||
* queueing ourselves into the futex hash. This code thus has to
|
|
||||||
* rely on the futex_wake() code removing us from hash when it
|
|
||||||
* wakes us up.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* add_wait_queue is the barrier after __set_current_state. */
|
|
||||||
__set_current_state(TASK_INTERRUPTIBLE);
|
|
||||||
add_wait_queue(&q.waiter, &wait);
|
|
||||||
/*
|
|
||||||
* !plist_node_empty() is safe here without any lock.
|
|
||||||
* q.lock_ptr != 0 is not safe, because of ordering against wakeup.
|
|
||||||
*/
|
|
||||||
if (likely(!plist_node_empty(&q.list))) {
|
|
||||||
if (!abs_time)
|
|
||||||
schedule();
|
|
||||||
else {
|
|
||||||
hrtimer_init_on_stack(&t.timer,
|
|
||||||
clockrt ? CLOCK_REALTIME :
|
|
||||||
CLOCK_MONOTONIC,
|
|
||||||
HRTIMER_MODE_ABS);
|
|
||||||
hrtimer_init_sleeper(&t, current);
|
|
||||||
hrtimer_set_expires_range_ns(&t.timer, *abs_time,
|
|
||||||
current->timer_slack_ns);
|
|
||||||
|
|
||||||
hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
|
|
||||||
if (!hrtimer_active(&t.timer))
|
|
||||||
t.task = NULL;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* the timer could have already expired, in which
|
|
||||||
* case current would be flagged for rescheduling.
|
|
||||||
* Don't bother calling schedule.
|
|
||||||
*/
|
|
||||||
if (likely(t.task))
|
|
||||||
schedule();
|
|
||||||
|
|
||||||
hrtimer_cancel(&t.timer);
|
|
||||||
|
|
||||||
/* Flag if a timeout occured */
|
|
||||||
rem = (t.task == NULL);
|
|
||||||
|
|
||||||
destroy_hrtimer_on_stack(&t.timer);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
__set_current_state(TASK_RUNNING);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* NOTE: we don't remove ourselves from the waitqueue because
|
|
||||||
* we are the only user of it.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* If we were woken (and unqueued), we succeeded, whatever. */
|
/* If we were woken (and unqueued), we succeeded, whatever. */
|
||||||
ret = 0;
|
ret = 0;
|
||||||
if (!unqueue_me(&q))
|
if (!unqueue_me(&q))
|
||||||
goto out_put_key;
|
goto out_put_key;
|
||||||
ret = -ETIMEDOUT;
|
ret = -ETIMEDOUT;
|
||||||
if (rem)
|
if (to && !to->task)
|
||||||
goto out_put_key;
|
goto out_put_key;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1275,6 +1285,10 @@ retry_private:
|
||||||
out_put_key:
|
out_put_key:
|
||||||
put_futex_key(fshared, &q.key);
|
put_futex_key(fshared, &q.key);
|
||||||
out:
|
out:
|
||||||
|
if (to) {
|
||||||
|
hrtimer_cancel(&to->timer);
|
||||||
|
destroy_hrtimer_on_stack(&to->timer);
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue