mirror of
https://github.com/adulau/aha.git
synced 2024-12-28 03:36:19 +00:00
[PATCH] taskstats: kill ->taskstats_lock in favor of ->siglock
signal_struct is (mostly) protected by ->sighand->siglock, I think we don't need ->taskstats_lock to protect ->stats. This also allows us to simplify the locking in fill_tgid(). Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: Shailabh Nagar <nagar@watson.ibm.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Jay Lan <jlan@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
17b02695b2
commit
b8534d7bd8
4 changed files with 13 additions and 21 deletions
|
@ -466,7 +466,6 @@ struct signal_struct {
|
||||||
struct pacct_struct pacct; /* per-process accounting information */
|
struct pacct_struct pacct; /* per-process accounting information */
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_TASKSTATS
|
#ifdef CONFIG_TASKSTATS
|
||||||
spinlock_t stats_lock;
|
|
||||||
struct taskstats *stats;
|
struct taskstats *stats;
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
|
@ -23,28 +23,26 @@ static inline void taskstats_exit_free(struct taskstats *tidstats)
|
||||||
|
|
||||||
static inline void taskstats_tgid_init(struct signal_struct *sig)
|
static inline void taskstats_tgid_init(struct signal_struct *sig)
|
||||||
{
|
{
|
||||||
spin_lock_init(&sig->stats_lock);
|
|
||||||
sig->stats = NULL;
|
sig->stats = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void taskstats_tgid_alloc(struct signal_struct *sig)
|
static inline void taskstats_tgid_alloc(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
|
struct signal_struct *sig = tsk->signal;
|
||||||
struct taskstats *stats;
|
struct taskstats *stats;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (sig->stats != NULL)
|
if (sig->stats != NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/* No problem if kmem_cache_zalloc() fails */
|
||||||
stats = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
|
stats = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
|
||||||
if (!stats)
|
|
||||||
return;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&sig->stats_lock, flags);
|
spin_lock_irq(&tsk->sighand->siglock);
|
||||||
if (!sig->stats) {
|
if (!sig->stats) {
|
||||||
sig->stats = stats;
|
sig->stats = stats;
|
||||||
stats = NULL;
|
stats = NULL;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&sig->stats_lock, flags);
|
spin_unlock_irq(&tsk->sighand->siglock);
|
||||||
|
|
||||||
if (stats)
|
if (stats)
|
||||||
kmem_cache_free(taskstats_cache, stats);
|
kmem_cache_free(taskstats_cache, stats);
|
||||||
|
@ -59,7 +57,6 @@ static inline void taskstats_tgid_free(struct signal_struct *sig)
|
||||||
extern void taskstats_exit_alloc(struct taskstats **, unsigned int *);
|
extern void taskstats_exit_alloc(struct taskstats **, unsigned int *);
|
||||||
extern void taskstats_exit_send(struct task_struct *, struct taskstats *, int, unsigned int);
|
extern void taskstats_exit_send(struct task_struct *, struct taskstats *, int, unsigned int);
|
||||||
extern void taskstats_init_early(void);
|
extern void taskstats_init_early(void);
|
||||||
extern void taskstats_tgid_alloc(struct signal_struct *);
|
|
||||||
#else
|
#else
|
||||||
static inline void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
|
static inline void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
|
||||||
{}
|
{}
|
||||||
|
@ -71,7 +68,7 @@ static inline void taskstats_exit_send(struct task_struct *tsk,
|
||||||
{}
|
{}
|
||||||
static inline void taskstats_tgid_init(struct signal_struct *sig)
|
static inline void taskstats_tgid_init(struct signal_struct *sig)
|
||||||
{}
|
{}
|
||||||
static inline void taskstats_tgid_alloc(struct signal_struct *sig)
|
static inline void taskstats_tgid_alloc(struct task_struct *tsk)
|
||||||
{}
|
{}
|
||||||
static inline void taskstats_tgid_free(struct signal_struct *sig)
|
static inline void taskstats_tgid_free(struct signal_struct *sig)
|
||||||
{}
|
{}
|
||||||
|
|
|
@ -830,7 +830,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
|
||||||
if (clone_flags & CLONE_THREAD) {
|
if (clone_flags & CLONE_THREAD) {
|
||||||
atomic_inc(¤t->signal->count);
|
atomic_inc(¤t->signal->count);
|
||||||
atomic_inc(¤t->signal->live);
|
atomic_inc(¤t->signal->live);
|
||||||
taskstats_tgid_alloc(current->signal);
|
taskstats_tgid_alloc(current);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
|
sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
|
||||||
|
|
|
@ -241,11 +241,11 @@ static int fill_tgid(pid_t tgid, struct task_struct *tgidtsk,
|
||||||
tsk = first;
|
tsk = first;
|
||||||
read_lock(&tasklist_lock);
|
read_lock(&tasklist_lock);
|
||||||
/* Start with stats from dead tasks */
|
/* Start with stats from dead tasks */
|
||||||
if (first->signal) {
|
if (first->sighand) {
|
||||||
spin_lock_irqsave(&first->signal->stats_lock, flags);
|
spin_lock_irqsave(&first->sighand->siglock, flags);
|
||||||
if (first->signal->stats)
|
if (first->signal->stats)
|
||||||
memcpy(stats, first->signal->stats, sizeof(*stats));
|
memcpy(stats, first->signal->stats, sizeof(*stats));
|
||||||
spin_unlock_irqrestore(&first->signal->stats_lock, flags);
|
spin_unlock_irqrestore(&first->sighand->siglock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
@ -276,7 +276,7 @@ static void fill_tgid_exit(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&tsk->signal->stats_lock, flags);
|
spin_lock_irqsave(&tsk->sighand->siglock, flags);
|
||||||
if (!tsk->signal->stats)
|
if (!tsk->signal->stats)
|
||||||
goto ret;
|
goto ret;
|
||||||
|
|
||||||
|
@ -288,7 +288,7 @@ static void fill_tgid_exit(struct task_struct *tsk)
|
||||||
*/
|
*/
|
||||||
delayacct_add_tsk(tsk->signal->stats, tsk);
|
delayacct_add_tsk(tsk->signal->stats, tsk);
|
||||||
ret:
|
ret:
|
||||||
spin_unlock_irqrestore(&tsk->signal->stats_lock, flags);
|
spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -464,15 +464,10 @@ void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats,
|
||||||
size_t size;
|
size_t size;
|
||||||
int is_thread_group;
|
int is_thread_group;
|
||||||
struct nlattr *na;
|
struct nlattr *na;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (!family_registered || !tidstats)
|
if (!family_registered || !tidstats)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&tsk->signal->stats_lock, flags);
|
|
||||||
is_thread_group = tsk->signal->stats ? 1 : 0;
|
|
||||||
spin_unlock_irqrestore(&tsk->signal->stats_lock, flags);
|
|
||||||
|
|
||||||
rc = 0;
|
rc = 0;
|
||||||
/*
|
/*
|
||||||
* Size includes space for nested attributes
|
* Size includes space for nested attributes
|
||||||
|
@ -480,6 +475,7 @@ void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats,
|
||||||
size = nla_total_size(sizeof(u32)) +
|
size = nla_total_size(sizeof(u32)) +
|
||||||
nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
|
nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
|
||||||
|
|
||||||
|
is_thread_group = (tsk->signal->stats != NULL);
|
||||||
if (is_thread_group)
|
if (is_thread_group)
|
||||||
size = 2 * size; /* PID + STATS + TGID + STATS */
|
size = 2 * size; /* PID + STATS + TGID + STATS */
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue