mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
perf_counter: Propagate inheritance failures down the fork() path
Fail fork() when we fail inheritance for some reason (-ENOMEM most likely). Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <20090525124600.324656474@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
771d7cde14
commit
6ab423e0ea
3 changed files with 19 additions and 11 deletions
|
@ -566,7 +566,7 @@ extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
|
|||
extern void perf_counter_task_sched_out(struct task_struct *task,
|
||||
struct task_struct *next, int cpu);
|
||||
extern void perf_counter_task_tick(struct task_struct *task, int cpu);
|
||||
extern void perf_counter_init_task(struct task_struct *child);
|
||||
extern int perf_counter_init_task(struct task_struct *child);
|
||||
extern void perf_counter_exit_task(struct task_struct *child);
|
||||
extern void perf_counter_do_pending(void);
|
||||
extern void perf_counter_print_debug(void);
|
||||
|
@ -631,7 +631,7 @@ perf_counter_task_sched_out(struct task_struct *task,
|
|||
struct task_struct *next, int cpu) { }
|
||||
static inline void
|
||||
perf_counter_task_tick(struct task_struct *task, int cpu) { }
|
||||
static inline void perf_counter_init_task(struct task_struct *child) { }
|
||||
static inline int perf_counter_init_task(struct task_struct *child) { }
|
||||
static inline void perf_counter_exit_task(struct task_struct *child) { }
|
||||
static inline void perf_counter_do_pending(void) { }
|
||||
static inline void perf_counter_print_debug(void) { }
|
||||
|
|
|
@ -1095,7 +1095,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|||
|
||||
/* Perform scheduler related setup. Assign this task to a CPU. */
|
||||
sched_fork(p, clone_flags);
|
||||
perf_counter_init_task(p);
|
||||
|
||||
retval = perf_counter_init_task(p);
|
||||
if (retval)
|
||||
goto bad_fork_cleanup_policy;
|
||||
|
||||
if ((retval = audit_alloc(p)))
|
||||
goto bad_fork_cleanup_policy;
|
||||
|
@ -1295,6 +1298,7 @@ bad_fork_cleanup_semundo:
|
|||
bad_fork_cleanup_audit:
|
||||
audit_free(p);
|
||||
bad_fork_cleanup_policy:
|
||||
perf_counter_exit_task(p);
|
||||
#ifdef CONFIG_NUMA
|
||||
mpol_put(p->mempolicy);
|
||||
bad_fork_cleanup_cgroup:
|
||||
|
|
|
@ -3434,18 +3434,23 @@ again:
|
|||
/*
|
||||
* Initialize the perf_counter context in task_struct
|
||||
*/
|
||||
void perf_counter_init_task(struct task_struct *child)
|
||||
int perf_counter_init_task(struct task_struct *child)
|
||||
{
|
||||
struct perf_counter_context *child_ctx, *parent_ctx;
|
||||
struct perf_counter *counter;
|
||||
struct task_struct *parent = current;
|
||||
int inherited_all = 1;
|
||||
int ret = 0;
|
||||
|
||||
child->perf_counter_ctxp = NULL;
|
||||
|
||||
mutex_init(&child->perf_counter_mutex);
|
||||
INIT_LIST_HEAD(&child->perf_counter_list);
|
||||
|
||||
parent_ctx = parent->perf_counter_ctxp;
|
||||
if (likely(!parent_ctx || !parent_ctx->nr_counters))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* This is executed from the parent task context, so inherit
|
||||
* counters that have been marked for cloning.
|
||||
|
@ -3454,11 +3459,7 @@ void perf_counter_init_task(struct task_struct *child)
|
|||
|
||||
child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
|
||||
if (!child_ctx)
|
||||
return;
|
||||
|
||||
parent_ctx = parent->perf_counter_ctxp;
|
||||
if (likely(!parent_ctx || !parent_ctx->nr_counters))
|
||||
return;
|
||||
return -ENOMEM;
|
||||
|
||||
__perf_counter_init_context(child_ctx, child);
|
||||
child->perf_counter_ctxp = child_ctx;
|
||||
|
@ -3482,8 +3483,9 @@ void perf_counter_init_task(struct task_struct *child)
|
|||
continue;
|
||||
}
|
||||
|
||||
if (inherit_group(counter, parent,
|
||||
parent_ctx, child, child_ctx)) {
|
||||
ret = inherit_group(counter, parent, parent_ctx,
|
||||
child, child_ctx);
|
||||
if (ret) {
|
||||
inherited_all = 0;
|
||||
break;
|
||||
}
|
||||
|
@ -3505,6 +3507,8 @@ void perf_counter_init_task(struct task_struct *child)
|
|||
}
|
||||
|
||||
mutex_unlock(&parent_ctx->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __cpuinit perf_counter_init_cpu(int cpu)
|
||||
|
|
Loading…
Reference in a new issue