mirror of
https://github.com/adulau/aha.git
synced 2024-12-26 18:56:14 +00:00
sched: Restore printk sanity
Revert the braindead pr_* crap. (Commit 663997d
"sched: Use
pr_fmt() and pr_<level>()")
It's dumb and causes stupid "sched: " strings all over the place.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Mike Galbraith <efault@gmx.de>
Cc: Joe Perches <joe@perches.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
LKML-Reference: <1261315437.4314.6.camel@laptop>
[ i dont mind the pr_*() patterns that much - but Peter dislikes them with a vengence. ]
[ - v2: remove spurious diffstat from changelog :-/ ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
dd59f6c76b
commit
3df0fc5b2e
2 changed files with 50 additions and 41 deletions
|
@ -26,8 +26,6 @@
|
||||||
* Thomas Gleixner, Mike Kravetz
|
* Thomas Gleixner, Mike Kravetz
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
||||||
|
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/nmi.h>
|
#include <linux/nmi.h>
|
||||||
|
@ -5375,8 +5373,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
|
||||||
{
|
{
|
||||||
struct pt_regs *regs = get_irq_regs();
|
struct pt_regs *regs = get_irq_regs();
|
||||||
|
|
||||||
pr_err("BUG: scheduling while atomic: %s/%d/0x%08x\n",
|
printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
|
||||||
prev->comm, prev->pid, preempt_count());
|
prev->comm, prev->pid, preempt_count());
|
||||||
|
|
||||||
debug_show_held_locks(prev);
|
debug_show_held_locks(prev);
|
||||||
print_modules();
|
print_modules();
|
||||||
|
@ -6940,23 +6938,23 @@ void sched_show_task(struct task_struct *p)
|
||||||
unsigned state;
|
unsigned state;
|
||||||
|
|
||||||
state = p->state ? __ffs(p->state) + 1 : 0;
|
state = p->state ? __ffs(p->state) + 1 : 0;
|
||||||
pr_info("%-13.13s %c", p->comm,
|
printk(KERN_INFO "%-13.13s %c", p->comm,
|
||||||
state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
|
state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
|
||||||
#if BITS_PER_LONG == 32
|
#if BITS_PER_LONG == 32
|
||||||
if (state == TASK_RUNNING)
|
if (state == TASK_RUNNING)
|
||||||
pr_cont(" running ");
|
printk(KERN_CONT " running ");
|
||||||
else
|
else
|
||||||
pr_cont(" %08lx ", thread_saved_pc(p));
|
printk(KERN_CONT " %08lx ", thread_saved_pc(p));
|
||||||
#else
|
#else
|
||||||
if (state == TASK_RUNNING)
|
if (state == TASK_RUNNING)
|
||||||
pr_cont(" running task ");
|
printk(KERN_CONT " running task ");
|
||||||
else
|
else
|
||||||
pr_cont(" %016lx ", thread_saved_pc(p));
|
printk(KERN_CONT " %016lx ", thread_saved_pc(p));
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||||
free = stack_not_used(p);
|
free = stack_not_used(p);
|
||||||
#endif
|
#endif
|
||||||
pr_cont("%5lu %5d %6d 0x%08lx\n", free,
|
printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
|
||||||
task_pid_nr(p), task_pid_nr(p->real_parent),
|
task_pid_nr(p), task_pid_nr(p->real_parent),
|
||||||
(unsigned long)task_thread_info(p)->flags);
|
(unsigned long)task_thread_info(p)->flags);
|
||||||
|
|
||||||
|
@ -6968,9 +6966,11 @@ void show_state_filter(unsigned long state_filter)
|
||||||
struct task_struct *g, *p;
|
struct task_struct *g, *p;
|
||||||
|
|
||||||
#if BITS_PER_LONG == 32
|
#if BITS_PER_LONG == 32
|
||||||
pr_info(" task PC stack pid father\n");
|
printk(KERN_INFO
|
||||||
|
" task PC stack pid father\n");
|
||||||
#else
|
#else
|
||||||
pr_info(" task PC stack pid father\n");
|
printk(KERN_INFO
|
||||||
|
" task PC stack pid father\n");
|
||||||
#endif
|
#endif
|
||||||
read_lock(&tasklist_lock);
|
read_lock(&tasklist_lock);
|
||||||
do_each_thread(g, p) {
|
do_each_thread(g, p) {
|
||||||
|
@ -7828,44 +7828,48 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
|
||||||
printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
|
printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
|
||||||
|
|
||||||
if (!(sd->flags & SD_LOAD_BALANCE)) {
|
if (!(sd->flags & SD_LOAD_BALANCE)) {
|
||||||
pr_cont("does not load-balance\n");
|
printk("does not load-balance\n");
|
||||||
if (sd->parent)
|
if (sd->parent)
|
||||||
pr_err("ERROR: !SD_LOAD_BALANCE domain has parent\n");
|
printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
|
||||||
|
" has parent");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_cont("span %s level %s\n", str, sd->name);
|
printk(KERN_CONT "span %s level %s\n", str, sd->name);
|
||||||
|
|
||||||
if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
|
if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
|
||||||
pr_err("ERROR: domain->span does not contain CPU%d\n", cpu);
|
printk(KERN_ERR "ERROR: domain->span does not contain "
|
||||||
|
"CPU%d\n", cpu);
|
||||||
}
|
}
|
||||||
if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
|
if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
|
||||||
pr_err("ERROR: domain->groups does not contain CPU%d\n", cpu);
|
printk(KERN_ERR "ERROR: domain->groups does not contain"
|
||||||
|
" CPU%d\n", cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
printk(KERN_DEBUG "%*s groups:", level + 1, "");
|
printk(KERN_DEBUG "%*s groups:", level + 1, "");
|
||||||
do {
|
do {
|
||||||
if (!group) {
|
if (!group) {
|
||||||
pr_cont("\n");
|
printk("\n");
|
||||||
pr_err("ERROR: group is NULL\n");
|
printk(KERN_ERR "ERROR: group is NULL\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!group->cpu_power) {
|
if (!group->cpu_power) {
|
||||||
pr_cont("\n");
|
printk(KERN_CONT "\n");
|
||||||
pr_err("ERROR: domain->cpu_power not set\n");
|
printk(KERN_ERR "ERROR: domain->cpu_power not "
|
||||||
|
"set\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!cpumask_weight(sched_group_cpus(group))) {
|
if (!cpumask_weight(sched_group_cpus(group))) {
|
||||||
pr_cont("\n");
|
printk(KERN_CONT "\n");
|
||||||
pr_err("ERROR: empty group\n");
|
printk(KERN_ERR "ERROR: empty group\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
|
if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
|
||||||
pr_cont("\n");
|
printk(KERN_CONT "\n");
|
||||||
pr_err("ERROR: repeated CPUs\n");
|
printk(KERN_ERR "ERROR: repeated CPUs\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7873,21 +7877,23 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
|
||||||
|
|
||||||
cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
|
cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
|
||||||
|
|
||||||
pr_cont(" %s", str);
|
printk(KERN_CONT " %s", str);
|
||||||
if (group->cpu_power != SCHED_LOAD_SCALE) {
|
if (group->cpu_power != SCHED_LOAD_SCALE) {
|
||||||
pr_cont(" (cpu_power = %d)", group->cpu_power);
|
printk(KERN_CONT " (cpu_power = %d)",
|
||||||
|
group->cpu_power);
|
||||||
}
|
}
|
||||||
|
|
||||||
group = group->next;
|
group = group->next;
|
||||||
} while (group != sd->groups);
|
} while (group != sd->groups);
|
||||||
pr_cont("\n");
|
printk(KERN_CONT "\n");
|
||||||
|
|
||||||
if (!cpumask_equal(sched_domain_span(sd), groupmask))
|
if (!cpumask_equal(sched_domain_span(sd), groupmask))
|
||||||
pr_err("ERROR: groups don't span domain->span\n");
|
printk(KERN_ERR "ERROR: groups don't span domain->span\n");
|
||||||
|
|
||||||
if (sd->parent &&
|
if (sd->parent &&
|
||||||
!cpumask_subset(groupmask, sched_domain_span(sd->parent)))
|
!cpumask_subset(groupmask, sched_domain_span(sd->parent)))
|
||||||
pr_err("ERROR: parent span is not a superset of domain->span\n");
|
printk(KERN_ERR "ERROR: parent span is not a superset "
|
||||||
|
"of domain->span\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8443,7 +8449,8 @@ static int build_numa_sched_groups(struct s_data *d,
|
||||||
sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
|
sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
|
||||||
GFP_KERNEL, num);
|
GFP_KERNEL, num);
|
||||||
if (!sg) {
|
if (!sg) {
|
||||||
pr_warning("Can not alloc domain group for node %d\n", num);
|
printk(KERN_WARNING "Can not alloc domain group for node %d\n",
|
||||||
|
num);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
d->sched_group_nodes[num] = sg;
|
d->sched_group_nodes[num] = sg;
|
||||||
|
@ -8472,8 +8479,8 @@ static int build_numa_sched_groups(struct s_data *d,
|
||||||
sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
|
sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
|
||||||
GFP_KERNEL, num);
|
GFP_KERNEL, num);
|
||||||
if (!sg) {
|
if (!sg) {
|
||||||
pr_warning("Can not alloc domain group for node %d\n",
|
printk(KERN_WARNING
|
||||||
j);
|
"Can not alloc domain group for node %d\n", j);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
sg->cpu_power = 0;
|
sg->cpu_power = 0;
|
||||||
|
@ -8701,7 +8708,7 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
|
||||||
d->sched_group_nodes = kcalloc(nr_node_ids,
|
d->sched_group_nodes = kcalloc(nr_node_ids,
|
||||||
sizeof(struct sched_group *), GFP_KERNEL);
|
sizeof(struct sched_group *), GFP_KERNEL);
|
||||||
if (!d->sched_group_nodes) {
|
if (!d->sched_group_nodes) {
|
||||||
pr_warning("Can not alloc sched group node list\n");
|
printk(KERN_WARNING "Can not alloc sched group node list\n");
|
||||||
return sa_notcovered;
|
return sa_notcovered;
|
||||||
}
|
}
|
||||||
sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
|
sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
|
||||||
|
@ -8718,7 +8725,7 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
|
||||||
return sa_send_covered;
|
return sa_send_covered;
|
||||||
d->rd = alloc_rootdomain();
|
d->rd = alloc_rootdomain();
|
||||||
if (!d->rd) {
|
if (!d->rd) {
|
||||||
pr_warning("Cannot alloc root domain\n");
|
printk(KERN_WARNING "Cannot alloc root domain\n");
|
||||||
return sa_tmpmask;
|
return sa_tmpmask;
|
||||||
}
|
}
|
||||||
return sa_rootdomain;
|
return sa_rootdomain;
|
||||||
|
@ -9700,11 +9707,13 @@ void __might_sleep(char *file, int line, int preempt_offset)
|
||||||
return;
|
return;
|
||||||
prev_jiffy = jiffies;
|
prev_jiffy = jiffies;
|
||||||
|
|
||||||
pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
|
printk(KERN_ERR
|
||||||
file, line);
|
"BUG: sleeping function called from invalid context at %s:%d\n",
|
||||||
pr_err("in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
|
file, line);
|
||||||
in_atomic(), irqs_disabled(),
|
printk(KERN_ERR
|
||||||
current->pid, current->comm);
|
"in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
|
||||||
|
in_atomic(), irqs_disabled(),
|
||||||
|
current->pid, current->comm);
|
||||||
|
|
||||||
debug_show_held_locks(current);
|
debug_show_held_locks(current);
|
||||||
if (irqs_disabled())
|
if (irqs_disabled())
|
||||||
|
|
|
@ -35,7 +35,7 @@ static void
|
||||||
dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
|
dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
|
||||||
{
|
{
|
||||||
raw_spin_unlock_irq(&rq->lock);
|
raw_spin_unlock_irq(&rq->lock);
|
||||||
pr_err("bad: scheduling from the idle thread!\n");
|
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
|
||||||
dump_stack();
|
dump_stack();
|
||||||
raw_spin_lock_irq(&rq->lock);
|
raw_spin_lock_irq(&rq->lock);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue