mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
sched: Convert rt_runtime_lock to raw_spinlock
Convert locks which cannot be sleeping locks in preempt-rt to raw_spinlocks. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
05fa785cf8
commit
0986b11b12
2 changed files with 37 additions and 37 deletions
|
@ -141,7 +141,7 @@ struct rt_prio_array {
|
||||||
|
|
||||||
struct rt_bandwidth {
|
struct rt_bandwidth {
|
||||||
/* nests inside the rq lock: */
|
/* nests inside the rq lock: */
|
||||||
spinlock_t rt_runtime_lock;
|
raw_spinlock_t rt_runtime_lock;
|
||||||
ktime_t rt_period;
|
ktime_t rt_period;
|
||||||
u64 rt_runtime;
|
u64 rt_runtime;
|
||||||
struct hrtimer rt_period_timer;
|
struct hrtimer rt_period_timer;
|
||||||
|
@ -178,7 +178,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
|
||||||
rt_b->rt_period = ns_to_ktime(period);
|
rt_b->rt_period = ns_to_ktime(period);
|
||||||
rt_b->rt_runtime = runtime;
|
rt_b->rt_runtime = runtime;
|
||||||
|
|
||||||
spin_lock_init(&rt_b->rt_runtime_lock);
|
raw_spin_lock_init(&rt_b->rt_runtime_lock);
|
||||||
|
|
||||||
hrtimer_init(&rt_b->rt_period_timer,
|
hrtimer_init(&rt_b->rt_period_timer,
|
||||||
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||||
|
@ -200,7 +200,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
|
||||||
if (hrtimer_active(&rt_b->rt_period_timer))
|
if (hrtimer_active(&rt_b->rt_period_timer))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock(&rt_b->rt_runtime_lock);
|
raw_spin_lock(&rt_b->rt_runtime_lock);
|
||||||
for (;;) {
|
for (;;) {
|
||||||
unsigned long delta;
|
unsigned long delta;
|
||||||
ktime_t soft, hard;
|
ktime_t soft, hard;
|
||||||
|
@ -217,7 +217,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
|
||||||
__hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
|
__hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
|
||||||
HRTIMER_MODE_ABS_PINNED, 0);
|
HRTIMER_MODE_ABS_PINNED, 0);
|
||||||
}
|
}
|
||||||
spin_unlock(&rt_b->rt_runtime_lock);
|
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_RT_GROUP_SCHED
|
#ifdef CONFIG_RT_GROUP_SCHED
|
||||||
|
@ -470,7 +470,7 @@ struct rt_rq {
|
||||||
u64 rt_time;
|
u64 rt_time;
|
||||||
u64 rt_runtime;
|
u64 rt_runtime;
|
||||||
/* Nests inside the rq lock: */
|
/* Nests inside the rq lock: */
|
||||||
spinlock_t rt_runtime_lock;
|
raw_spinlock_t rt_runtime_lock;
|
||||||
|
|
||||||
#ifdef CONFIG_RT_GROUP_SCHED
|
#ifdef CONFIG_RT_GROUP_SCHED
|
||||||
unsigned long rt_nr_boosted;
|
unsigned long rt_nr_boosted;
|
||||||
|
@ -9366,7 +9366,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
|
||||||
rt_rq->rt_time = 0;
|
rt_rq->rt_time = 0;
|
||||||
rt_rq->rt_throttled = 0;
|
rt_rq->rt_throttled = 0;
|
||||||
rt_rq->rt_runtime = 0;
|
rt_rq->rt_runtime = 0;
|
||||||
spin_lock_init(&rt_rq->rt_runtime_lock);
|
raw_spin_lock_init(&rt_rq->rt_runtime_lock);
|
||||||
|
|
||||||
#ifdef CONFIG_RT_GROUP_SCHED
|
#ifdef CONFIG_RT_GROUP_SCHED
|
||||||
rt_rq->rt_nr_boosted = 0;
|
rt_rq->rt_nr_boosted = 0;
|
||||||
|
@ -10305,18 +10305,18 @@ static int tg_set_bandwidth(struct task_group *tg,
|
||||||
if (err)
|
if (err)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
|
raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
|
||||||
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
|
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
|
||||||
tg->rt_bandwidth.rt_runtime = rt_runtime;
|
tg->rt_bandwidth.rt_runtime = rt_runtime;
|
||||||
|
|
||||||
for_each_possible_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
struct rt_rq *rt_rq = tg->rt_rq[i];
|
struct rt_rq *rt_rq = tg->rt_rq[i];
|
||||||
|
|
||||||
spin_lock(&rt_rq->rt_runtime_lock);
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
||||||
rt_rq->rt_runtime = rt_runtime;
|
rt_rq->rt_runtime = rt_runtime;
|
||||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
|
raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
|
||||||
unlock:
|
unlock:
|
||||||
read_unlock(&tasklist_lock);
|
read_unlock(&tasklist_lock);
|
||||||
mutex_unlock(&rt_constraints_mutex);
|
mutex_unlock(&rt_constraints_mutex);
|
||||||
|
@ -10421,15 +10421,15 @@ static int sched_rt_global_constraints(void)
|
||||||
if (sysctl_sched_rt_runtime == 0)
|
if (sysctl_sched_rt_runtime == 0)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
|
raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
|
||||||
for_each_possible_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
|
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
|
||||||
|
|
||||||
spin_lock(&rt_rq->rt_runtime_lock);
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
||||||
rt_rq->rt_runtime = global_rt_runtime();
|
rt_rq->rt_runtime = global_rt_runtime();
|
||||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
|
raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -327,7 +327,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
|
||||||
|
|
||||||
weight = cpumask_weight(rd->span);
|
weight = cpumask_weight(rd->span);
|
||||||
|
|
||||||
spin_lock(&rt_b->rt_runtime_lock);
|
raw_spin_lock(&rt_b->rt_runtime_lock);
|
||||||
rt_period = ktime_to_ns(rt_b->rt_period);
|
rt_period = ktime_to_ns(rt_b->rt_period);
|
||||||
for_each_cpu(i, rd->span) {
|
for_each_cpu(i, rd->span) {
|
||||||
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
|
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
|
||||||
|
@ -336,7 +336,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
|
||||||
if (iter == rt_rq)
|
if (iter == rt_rq)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
spin_lock(&iter->rt_runtime_lock);
|
raw_spin_lock(&iter->rt_runtime_lock);
|
||||||
/*
|
/*
|
||||||
* Either all rqs have inf runtime and there's nothing to steal
|
* Either all rqs have inf runtime and there's nothing to steal
|
||||||
* or __disable_runtime() below sets a specific rq to inf to
|
* or __disable_runtime() below sets a specific rq to inf to
|
||||||
|
@ -358,14 +358,14 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
|
||||||
rt_rq->rt_runtime += diff;
|
rt_rq->rt_runtime += diff;
|
||||||
more = 1;
|
more = 1;
|
||||||
if (rt_rq->rt_runtime == rt_period) {
|
if (rt_rq->rt_runtime == rt_period) {
|
||||||
spin_unlock(&iter->rt_runtime_lock);
|
raw_spin_unlock(&iter->rt_runtime_lock);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
next:
|
next:
|
||||||
spin_unlock(&iter->rt_runtime_lock);
|
raw_spin_unlock(&iter->rt_runtime_lock);
|
||||||
}
|
}
|
||||||
spin_unlock(&rt_b->rt_runtime_lock);
|
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
||||||
|
|
||||||
return more;
|
return more;
|
||||||
}
|
}
|
||||||
|
@ -386,8 +386,8 @@ static void __disable_runtime(struct rq *rq)
|
||||||
s64 want;
|
s64 want;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
spin_lock(&rt_b->rt_runtime_lock);
|
raw_spin_lock(&rt_b->rt_runtime_lock);
|
||||||
spin_lock(&rt_rq->rt_runtime_lock);
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
||||||
/*
|
/*
|
||||||
* Either we're all inf and nobody needs to borrow, or we're
|
* Either we're all inf and nobody needs to borrow, or we're
|
||||||
* already disabled and thus have nothing to do, or we have
|
* already disabled and thus have nothing to do, or we have
|
||||||
|
@ -396,7 +396,7 @@ static void __disable_runtime(struct rq *rq)
|
||||||
if (rt_rq->rt_runtime == RUNTIME_INF ||
|
if (rt_rq->rt_runtime == RUNTIME_INF ||
|
||||||
rt_rq->rt_runtime == rt_b->rt_runtime)
|
rt_rq->rt_runtime == rt_b->rt_runtime)
|
||||||
goto balanced;
|
goto balanced;
|
||||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Calculate the difference between what we started out with
|
* Calculate the difference between what we started out with
|
||||||
|
@ -418,7 +418,7 @@ static void __disable_runtime(struct rq *rq)
|
||||||
if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
|
if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
spin_lock(&iter->rt_runtime_lock);
|
raw_spin_lock(&iter->rt_runtime_lock);
|
||||||
if (want > 0) {
|
if (want > 0) {
|
||||||
diff = min_t(s64, iter->rt_runtime, want);
|
diff = min_t(s64, iter->rt_runtime, want);
|
||||||
iter->rt_runtime -= diff;
|
iter->rt_runtime -= diff;
|
||||||
|
@ -427,13 +427,13 @@ static void __disable_runtime(struct rq *rq)
|
||||||
iter->rt_runtime -= want;
|
iter->rt_runtime -= want;
|
||||||
want -= want;
|
want -= want;
|
||||||
}
|
}
|
||||||
spin_unlock(&iter->rt_runtime_lock);
|
raw_spin_unlock(&iter->rt_runtime_lock);
|
||||||
|
|
||||||
if (!want)
|
if (!want)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&rt_rq->rt_runtime_lock);
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
||||||
/*
|
/*
|
||||||
* We cannot be left wanting - that would mean some runtime
|
* We cannot be left wanting - that would mean some runtime
|
||||||
* leaked out of the system.
|
* leaked out of the system.
|
||||||
|
@ -445,8 +445,8 @@ balanced:
|
||||||
* runtime - in which case borrowing doesn't make sense.
|
* runtime - in which case borrowing doesn't make sense.
|
||||||
*/
|
*/
|
||||||
rt_rq->rt_runtime = RUNTIME_INF;
|
rt_rq->rt_runtime = RUNTIME_INF;
|
||||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
||||||
spin_unlock(&rt_b->rt_runtime_lock);
|
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -472,13 +472,13 @@ static void __enable_runtime(struct rq *rq)
|
||||||
for_each_leaf_rt_rq(rt_rq, rq) {
|
for_each_leaf_rt_rq(rt_rq, rq) {
|
||||||
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
||||||
|
|
||||||
spin_lock(&rt_b->rt_runtime_lock);
|
raw_spin_lock(&rt_b->rt_runtime_lock);
|
||||||
spin_lock(&rt_rq->rt_runtime_lock);
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
||||||
rt_rq->rt_runtime = rt_b->rt_runtime;
|
rt_rq->rt_runtime = rt_b->rt_runtime;
|
||||||
rt_rq->rt_time = 0;
|
rt_rq->rt_time = 0;
|
||||||
rt_rq->rt_throttled = 0;
|
rt_rq->rt_throttled = 0;
|
||||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
||||||
spin_unlock(&rt_b->rt_runtime_lock);
|
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -496,9 +496,9 @@ static int balance_runtime(struct rt_rq *rt_rq)
|
||||||
int more = 0;
|
int more = 0;
|
||||||
|
|
||||||
if (rt_rq->rt_time > rt_rq->rt_runtime) {
|
if (rt_rq->rt_time > rt_rq->rt_runtime) {
|
||||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
||||||
more = do_balance_runtime(rt_rq);
|
more = do_balance_runtime(rt_rq);
|
||||||
spin_lock(&rt_rq->rt_runtime_lock);
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
return more;
|
return more;
|
||||||
|
@ -528,7 +528,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
||||||
if (rt_rq->rt_time) {
|
if (rt_rq->rt_time) {
|
||||||
u64 runtime;
|
u64 runtime;
|
||||||
|
|
||||||
spin_lock(&rt_rq->rt_runtime_lock);
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
||||||
if (rt_rq->rt_throttled)
|
if (rt_rq->rt_throttled)
|
||||||
balance_runtime(rt_rq);
|
balance_runtime(rt_rq);
|
||||||
runtime = rt_rq->rt_runtime;
|
runtime = rt_rq->rt_runtime;
|
||||||
|
@ -539,7 +539,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
||||||
}
|
}
|
||||||
if (rt_rq->rt_time || rt_rq->rt_nr_running)
|
if (rt_rq->rt_time || rt_rq->rt_nr_running)
|
||||||
idle = 0;
|
idle = 0;
|
||||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
||||||
} else if (rt_rq->rt_nr_running)
|
} else if (rt_rq->rt_nr_running)
|
||||||
idle = 0;
|
idle = 0;
|
||||||
|
|
||||||
|
@ -624,11 +624,11 @@ static void update_curr_rt(struct rq *rq)
|
||||||
rt_rq = rt_rq_of_se(rt_se);
|
rt_rq = rt_rq_of_se(rt_se);
|
||||||
|
|
||||||
if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
|
if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
|
||||||
spin_lock(&rt_rq->rt_runtime_lock);
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
||||||
rt_rq->rt_time += delta_exec;
|
rt_rq->rt_time += delta_exec;
|
||||||
if (sched_rt_runtime_exceeded(rt_rq))
|
if (sched_rt_runtime_exceeded(rt_rq))
|
||||||
resched_task(curr);
|
resched_task(curr);
|
||||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue