mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
[PATCH] use smp_mb/wmb/rmb where possible
Replace a number of memory barriers with smp_ variants. This means we won't take the unnecessary hit on UP machines. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
0d8d4d42f2
commit
d59dd4620f
11 changed files with 31 additions and 31 deletions
|
@ -218,7 +218,7 @@ struct super_block *freeze_bdev(struct block_device *bdev)
|
||||||
sb = get_super(bdev);
|
sb = get_super(bdev);
|
||||||
if (sb && !(sb->s_flags & MS_RDONLY)) {
|
if (sb && !(sb->s_flags & MS_RDONLY)) {
|
||||||
sb->s_frozen = SB_FREEZE_WRITE;
|
sb->s_frozen = SB_FREEZE_WRITE;
|
||||||
wmb();
|
smp_wmb();
|
||||||
|
|
||||||
sync_inodes_sb(sb, 0);
|
sync_inodes_sb(sb, 0);
|
||||||
DQUOT_SYNC(sb);
|
DQUOT_SYNC(sb);
|
||||||
|
@ -235,7 +235,7 @@ struct super_block *freeze_bdev(struct block_device *bdev)
|
||||||
sync_inodes_sb(sb, 1);
|
sync_inodes_sb(sb, 1);
|
||||||
|
|
||||||
sb->s_frozen = SB_FREEZE_TRANS;
|
sb->s_frozen = SB_FREEZE_TRANS;
|
||||||
wmb();
|
smp_wmb();
|
||||||
|
|
||||||
sync_blockdev(sb->s_bdev);
|
sync_blockdev(sb->s_bdev);
|
||||||
|
|
||||||
|
@ -263,7 +263,7 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb)
|
||||||
if (sb->s_op->unlockfs)
|
if (sb->s_op->unlockfs)
|
||||||
sb->s_op->unlockfs(sb);
|
sb->s_op->unlockfs(sb);
|
||||||
sb->s_frozen = SB_UNFROZEN;
|
sb->s_frozen = SB_UNFROZEN;
|
||||||
wmb();
|
smp_wmb();
|
||||||
wake_up(&sb->s_wait_unfrozen);
|
wake_up(&sb->s_wait_unfrozen);
|
||||||
drop_super(sb);
|
drop_super(sb);
|
||||||
}
|
}
|
||||||
|
|
|
@ -767,7 +767,7 @@ static inline void pipelined_send(struct mqueue_inode_info *info,
|
||||||
list_del(&receiver->list);
|
list_del(&receiver->list);
|
||||||
receiver->state = STATE_PENDING;
|
receiver->state = STATE_PENDING;
|
||||||
wake_up_process(receiver->task);
|
wake_up_process(receiver->task);
|
||||||
wmb();
|
smp_wmb();
|
||||||
receiver->state = STATE_READY;
|
receiver->state = STATE_READY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -786,7 +786,7 @@ static inline void pipelined_receive(struct mqueue_inode_info *info)
|
||||||
list_del(&sender->list);
|
list_del(&sender->list);
|
||||||
sender->state = STATE_PENDING;
|
sender->state = STATE_PENDING;
|
||||||
wake_up_process(sender->task);
|
wake_up_process(sender->task);
|
||||||
wmb();
|
smp_wmb();
|
||||||
sender->state = STATE_READY;
|
sender->state = STATE_READY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -174,7 +174,7 @@ int kthread_stop(struct task_struct *k)
|
||||||
|
|
||||||
/* Must init completion *before* thread sees kthread_stop_info.k */
|
/* Must init completion *before* thread sees kthread_stop_info.k */
|
||||||
init_completion(&kthread_stop_info.done);
|
init_completion(&kthread_stop_info.done);
|
||||||
wmb();
|
smp_wmb();
|
||||||
|
|
||||||
/* Now set kthread_should_stop() to true, and wake it up. */
|
/* Now set kthread_should_stop() to true, and wake it up. */
|
||||||
kthread_stop_info.k = k;
|
kthread_stop_info.k = k;
|
||||||
|
|
|
@ -522,7 +522,7 @@ static int __init create_hash_tables(void)
|
||||||
return 0;
|
return 0;
|
||||||
out_cleanup:
|
out_cleanup:
|
||||||
prof_on = 0;
|
prof_on = 0;
|
||||||
mb();
|
smp_mb();
|
||||||
on_each_cpu(profile_nop, NULL, 0, 1);
|
on_each_cpu(profile_nop, NULL, 0, 1);
|
||||||
for_each_online_cpu(cpu) {
|
for_each_online_cpu(cpu) {
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
|
@ -135,7 +135,7 @@ int ptrace_attach(struct task_struct *task)
|
||||||
(current->gid != task->sgid) ||
|
(current->gid != task->sgid) ||
|
||||||
(current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
|
(current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
|
||||||
goto bad;
|
goto bad;
|
||||||
rmb();
|
smp_rmb();
|
||||||
if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE))
|
if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE))
|
||||||
goto bad;
|
goto bad;
|
||||||
/* the same process cannot be attached many times */
|
/* the same process cannot be attached many times */
|
||||||
|
|
|
@ -33,7 +33,7 @@ static int stopmachine(void *cpu)
|
||||||
set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu));
|
set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu));
|
||||||
|
|
||||||
/* Ack: we are alive */
|
/* Ack: we are alive */
|
||||||
mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
|
smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
|
||||||
atomic_inc(&stopmachine_thread_ack);
|
atomic_inc(&stopmachine_thread_ack);
|
||||||
|
|
||||||
/* Simple state machine */
|
/* Simple state machine */
|
||||||
|
@ -43,14 +43,14 @@ static int stopmachine(void *cpu)
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
irqs_disabled = 1;
|
irqs_disabled = 1;
|
||||||
/* Ack: irqs disabled. */
|
/* Ack: irqs disabled. */
|
||||||
mb(); /* Must read state first. */
|
smp_mb(); /* Must read state first. */
|
||||||
atomic_inc(&stopmachine_thread_ack);
|
atomic_inc(&stopmachine_thread_ack);
|
||||||
} else if (stopmachine_state == STOPMACHINE_PREPARE
|
} else if (stopmachine_state == STOPMACHINE_PREPARE
|
||||||
&& !prepared) {
|
&& !prepared) {
|
||||||
/* Everyone is in place, hold CPU. */
|
/* Everyone is in place, hold CPU. */
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
prepared = 1;
|
prepared = 1;
|
||||||
mb(); /* Must read state first. */
|
smp_mb(); /* Must read state first. */
|
||||||
atomic_inc(&stopmachine_thread_ack);
|
atomic_inc(&stopmachine_thread_ack);
|
||||||
}
|
}
|
||||||
/* Yield in first stage: migration threads need to
|
/* Yield in first stage: migration threads need to
|
||||||
|
@ -62,7 +62,7 @@ static int stopmachine(void *cpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Ack: we are exiting. */
|
/* Ack: we are exiting. */
|
||||||
mb(); /* Must read state first. */
|
smp_mb(); /* Must read state first. */
|
||||||
atomic_inc(&stopmachine_thread_ack);
|
atomic_inc(&stopmachine_thread_ack);
|
||||||
|
|
||||||
if (irqs_disabled)
|
if (irqs_disabled)
|
||||||
|
@ -77,7 +77,7 @@ static int stopmachine(void *cpu)
|
||||||
static void stopmachine_set_state(enum stopmachine_state state)
|
static void stopmachine_set_state(enum stopmachine_state state)
|
||||||
{
|
{
|
||||||
atomic_set(&stopmachine_thread_ack, 0);
|
atomic_set(&stopmachine_thread_ack, 0);
|
||||||
wmb();
|
smp_wmb();
|
||||||
stopmachine_state = state;
|
stopmachine_state = state;
|
||||||
while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads)
|
while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads)
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
|
|
20
kernel/sys.c
20
kernel/sys.c
|
@ -525,7 +525,7 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
|
||||||
if (new_egid != old_egid)
|
if (new_egid != old_egid)
|
||||||
{
|
{
|
||||||
current->mm->dumpable = 0;
|
current->mm->dumpable = 0;
|
||||||
wmb();
|
smp_wmb();
|
||||||
}
|
}
|
||||||
if (rgid != (gid_t) -1 ||
|
if (rgid != (gid_t) -1 ||
|
||||||
(egid != (gid_t) -1 && egid != old_rgid))
|
(egid != (gid_t) -1 && egid != old_rgid))
|
||||||
|
@ -556,7 +556,7 @@ asmlinkage long sys_setgid(gid_t gid)
|
||||||
if(old_egid != gid)
|
if(old_egid != gid)
|
||||||
{
|
{
|
||||||
current->mm->dumpable=0;
|
current->mm->dumpable=0;
|
||||||
wmb();
|
smp_wmb();
|
||||||
}
|
}
|
||||||
current->gid = current->egid = current->sgid = current->fsgid = gid;
|
current->gid = current->egid = current->sgid = current->fsgid = gid;
|
||||||
}
|
}
|
||||||
|
@ -565,7 +565,7 @@ asmlinkage long sys_setgid(gid_t gid)
|
||||||
if(old_egid != gid)
|
if(old_egid != gid)
|
||||||
{
|
{
|
||||||
current->mm->dumpable=0;
|
current->mm->dumpable=0;
|
||||||
wmb();
|
smp_wmb();
|
||||||
}
|
}
|
||||||
current->egid = current->fsgid = gid;
|
current->egid = current->fsgid = gid;
|
||||||
}
|
}
|
||||||
|
@ -596,7 +596,7 @@ static int set_user(uid_t new_ruid, int dumpclear)
|
||||||
if(dumpclear)
|
if(dumpclear)
|
||||||
{
|
{
|
||||||
current->mm->dumpable = 0;
|
current->mm->dumpable = 0;
|
||||||
wmb();
|
smp_wmb();
|
||||||
}
|
}
|
||||||
current->uid = new_ruid;
|
current->uid = new_ruid;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -653,7 +653,7 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
|
||||||
if (new_euid != old_euid)
|
if (new_euid != old_euid)
|
||||||
{
|
{
|
||||||
current->mm->dumpable=0;
|
current->mm->dumpable=0;
|
||||||
wmb();
|
smp_wmb();
|
||||||
}
|
}
|
||||||
current->fsuid = current->euid = new_euid;
|
current->fsuid = current->euid = new_euid;
|
||||||
if (ruid != (uid_t) -1 ||
|
if (ruid != (uid_t) -1 ||
|
||||||
|
@ -703,7 +703,7 @@ asmlinkage long sys_setuid(uid_t uid)
|
||||||
if (old_euid != uid)
|
if (old_euid != uid)
|
||||||
{
|
{
|
||||||
current->mm->dumpable = 0;
|
current->mm->dumpable = 0;
|
||||||
wmb();
|
smp_wmb();
|
||||||
}
|
}
|
||||||
current->fsuid = current->euid = uid;
|
current->fsuid = current->euid = uid;
|
||||||
current->suid = new_suid;
|
current->suid = new_suid;
|
||||||
|
@ -748,7 +748,7 @@ asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
|
||||||
if (euid != current->euid)
|
if (euid != current->euid)
|
||||||
{
|
{
|
||||||
current->mm->dumpable = 0;
|
current->mm->dumpable = 0;
|
||||||
wmb();
|
smp_wmb();
|
||||||
}
|
}
|
||||||
current->euid = euid;
|
current->euid = euid;
|
||||||
}
|
}
|
||||||
|
@ -798,7 +798,7 @@ asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
|
||||||
if (egid != current->egid)
|
if (egid != current->egid)
|
||||||
{
|
{
|
||||||
current->mm->dumpable = 0;
|
current->mm->dumpable = 0;
|
||||||
wmb();
|
smp_wmb();
|
||||||
}
|
}
|
||||||
current->egid = egid;
|
current->egid = egid;
|
||||||
}
|
}
|
||||||
|
@ -845,7 +845,7 @@ asmlinkage long sys_setfsuid(uid_t uid)
|
||||||
if (uid != old_fsuid)
|
if (uid != old_fsuid)
|
||||||
{
|
{
|
||||||
current->mm->dumpable = 0;
|
current->mm->dumpable = 0;
|
||||||
wmb();
|
smp_wmb();
|
||||||
}
|
}
|
||||||
current->fsuid = uid;
|
current->fsuid = uid;
|
||||||
}
|
}
|
||||||
|
@ -875,7 +875,7 @@ asmlinkage long sys_setfsgid(gid_t gid)
|
||||||
if (gid != old_fsgid)
|
if (gid != old_fsgid)
|
||||||
{
|
{
|
||||||
current->mm->dumpable = 0;
|
current->mm->dumpable = 0;
|
||||||
wmb();
|
smp_wmb();
|
||||||
}
|
}
|
||||||
current->fsgid = gid;
|
current->fsgid = gid;
|
||||||
key_fsgid_changed(current);
|
key_fsgid_changed(current);
|
||||||
|
|
|
@ -1007,7 +1007,7 @@ asmlinkage long sys_getppid(void)
|
||||||
* Make sure we read the pid before re-reading the
|
* Make sure we read the pid before re-reading the
|
||||||
* parent pointer:
|
* parent pointer:
|
||||||
*/
|
*/
|
||||||
rmb();
|
smp_rmb();
|
||||||
parent = me->group_leader->real_parent;
|
parent = me->group_leader->real_parent;
|
||||||
if (old != parent)
|
if (old != parent)
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -76,7 +76,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
|
||||||
list_del(&waiter->list);
|
list_del(&waiter->list);
|
||||||
tsk = waiter->task;
|
tsk = waiter->task;
|
||||||
/* Don't touch waiter after ->task has been NULLed */
|
/* Don't touch waiter after ->task has been NULLed */
|
||||||
mb();
|
smp_mb();
|
||||||
waiter->task = NULL;
|
waiter->task = NULL;
|
||||||
wake_up_process(tsk);
|
wake_up_process(tsk);
|
||||||
put_task_struct(tsk);
|
put_task_struct(tsk);
|
||||||
|
@ -91,7 +91,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
|
||||||
|
|
||||||
list_del(&waiter->list);
|
list_del(&waiter->list);
|
||||||
tsk = waiter->task;
|
tsk = waiter->task;
|
||||||
mb();
|
smp_mb();
|
||||||
waiter->task = NULL;
|
waiter->task = NULL;
|
||||||
wake_up_process(tsk);
|
wake_up_process(tsk);
|
||||||
put_task_struct(tsk);
|
put_task_struct(tsk);
|
||||||
|
@ -123,7 +123,7 @@ __rwsem_wake_one_writer(struct rw_semaphore *sem)
|
||||||
list_del(&waiter->list);
|
list_del(&waiter->list);
|
||||||
|
|
||||||
tsk = waiter->task;
|
tsk = waiter->task;
|
||||||
mb();
|
smp_mb();
|
||||||
waiter->task = NULL;
|
waiter->task = NULL;
|
||||||
wake_up_process(tsk);
|
wake_up_process(tsk);
|
||||||
put_task_struct(tsk);
|
put_task_struct(tsk);
|
||||||
|
|
|
@ -74,7 +74,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
|
||||||
*/
|
*/
|
||||||
list_del(&waiter->list);
|
list_del(&waiter->list);
|
||||||
tsk = waiter->task;
|
tsk = waiter->task;
|
||||||
mb();
|
smp_mb();
|
||||||
waiter->task = NULL;
|
waiter->task = NULL;
|
||||||
wake_up_process(tsk);
|
wake_up_process(tsk);
|
||||||
put_task_struct(tsk);
|
put_task_struct(tsk);
|
||||||
|
@ -117,7 +117,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
|
||||||
waiter = list_entry(next, struct rwsem_waiter, list);
|
waiter = list_entry(next, struct rwsem_waiter, list);
|
||||||
next = waiter->list.next;
|
next = waiter->list.next;
|
||||||
tsk = waiter->task;
|
tsk = waiter->task;
|
||||||
mb();
|
smp_mb();
|
||||||
waiter->task = NULL;
|
waiter->task = NULL;
|
||||||
wake_up_process(tsk);
|
wake_up_process(tsk);
|
||||||
put_task_struct(tsk);
|
put_task_struct(tsk);
|
||||||
|
|
|
@ -229,7 +229,7 @@ repeat_alloc:
|
||||||
/* Now start performing page reclaim */
|
/* Now start performing page reclaim */
|
||||||
gfp_temp = gfp_mask;
|
gfp_temp = gfp_mask;
|
||||||
prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
|
prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
|
||||||
mb();
|
smp_mb();
|
||||||
if (!pool->curr_nr)
|
if (!pool->curr_nr)
|
||||||
io_schedule();
|
io_schedule();
|
||||||
finish_wait(&pool->wait, &wait);
|
finish_wait(&pool->wait, &wait);
|
||||||
|
@ -250,7 +250,7 @@ void mempool_free(void *element, mempool_t *pool)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
mb();
|
smp_mb();
|
||||||
if (pool->curr_nr < pool->min_nr) {
|
if (pool->curr_nr < pool->min_nr) {
|
||||||
spin_lock_irqsave(&pool->lock, flags);
|
spin_lock_irqsave(&pool->lock, flags);
|
||||||
if (pool->curr_nr < pool->min_nr) {
|
if (pool->curr_nr < pool->min_nr) {
|
||||||
|
|
Loading…
Reference in a new issue