mirror of
https://github.com/adulau/aha.git
synced 2024-12-29 12:16:20 +00:00
aa0ce5bbc2
Statistics for softirq doesn't exist. It will be helpful like statistics for interrupts. This patch introduces counting the number of softirq, which will be exported in /proc/softirqs. When softirq handler consumes much CPU time, /proc/stat is like the following. $ while :; do cat /proc/stat | head -n1 ; sleep 10 ; done cpu 88 0 408 739665 583 28 2 0 0 cpu 450 0 1090 740970 594 28 1294 0 0 ^^^^ softirq In such a situation, /proc/softirqs shows us which softirq handler is invoked. We can see the increase rate of softirqs. <before> $ cat /proc/softirqs CPU0 CPU1 CPU2 CPU3 HI 0 0 0 0 TIMER 462850 462805 462782 462718 NET_TX 0 0 0 365 NET_RX 2472 2 2 40 BLOCK 0 0 381 1164 TASKLET 0 0 0 224 SCHED 462654 462689 462698 462427 RCU 3046 2423 3367 3173 <after> $ cat /proc/softirqs CPU0 CPU1 CPU2 CPU3 HI 0 0 0 0 TIMER 463361 465077 465056 464991 NET_TX 53 0 1 365 NET_RX 3757 2 2 40 BLOCK 0 0 398 1170 TASKLET 0 0 0 224 SCHED 463074 464318 464612 463330 RCU 3505 2948 3947 3673 When CPU TIME of softirq is high, the rates of increase is the following. TIMER : 220/sec : CPU1-3 NET_TX : 5/sec : CPU0 NET_RX : 120/sec : CPU0 SCHED : 40-200/sec : all CPU RCU : 45-58/sec : all CPU The rates of increase in an idle mode is the following. TIMER : 250/sec SCHED : 250/sec RCU : 2/sec It seems many softirqs for receiving packets and rcu are invoked. This gives us help for checking system. Signed-off-by: Keika Kobayashi <kobayashi.kk@ncos.nec.co.jp> Reviewed-by: Hiroshi Shimamoto <h-shimamoto@ct.jp.nec.com> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Eric Dumazet <dada1@cosmosbay.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
111 lines
2.6 KiB
C
111 lines
2.6 KiB
C
#ifndef _LINUX_KERNEL_STAT_H
|
|
#define _LINUX_KERNEL_STAT_H
|
|
|
|
#include <linux/smp.h>
|
|
#include <linux/threads.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/interrupt.h>
|
|
#include <asm/irq.h>
|
|
#include <asm/cputime.h>
|
|
|
|
/*
|
|
* 'kernel_stat.h' contains the definitions needed for doing
|
|
* some kernel statistics (CPU usage, context switches ...),
|
|
* used by rstatd/perfmeter
|
|
*/
|
|
|
|
struct cpu_usage_stat {
|
|
cputime64_t user;
|
|
cputime64_t nice;
|
|
cputime64_t system;
|
|
cputime64_t softirq;
|
|
cputime64_t irq;
|
|
cputime64_t idle;
|
|
cputime64_t iowait;
|
|
cputime64_t steal;
|
|
cputime64_t guest;
|
|
};
|
|
|
|
struct kernel_stat {
|
|
struct cpu_usage_stat cpustat;
|
|
#ifndef CONFIG_GENERIC_HARDIRQS
|
|
unsigned int irqs[NR_IRQS];
|
|
#endif
|
|
unsigned int softirqs[NR_SOFTIRQS];
|
|
};
|
|
|
|
DECLARE_PER_CPU(struct kernel_stat, kstat);
|
|
|
|
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
|
|
/* Must have preemption disabled for this to be meaningful. */
|
|
#define kstat_this_cpu __get_cpu_var(kstat)
|
|
|
|
extern unsigned long long nr_context_switches(void);
|
|
|
|
#ifndef CONFIG_GENERIC_HARDIRQS
|
|
#define kstat_irqs_this_cpu(irq) \
|
|
(kstat_this_cpu.irqs[irq])
|
|
|
|
struct irq_desc;
|
|
|
|
static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
|
|
struct irq_desc *desc)
|
|
{
|
|
kstat_this_cpu.irqs[irq]++;
|
|
}
|
|
|
|
static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
|
|
{
|
|
return kstat_cpu(cpu).irqs[irq];
|
|
}
|
|
#else
|
|
#include <linux/irq.h>
|
|
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
|
|
#define kstat_irqs_this_cpu(DESC) \
|
|
((DESC)->kstat_irqs[smp_processor_id()])
|
|
#define kstat_incr_irqs_this_cpu(irqno, DESC) \
|
|
((DESC)->kstat_irqs[smp_processor_id()]++)
|
|
|
|
#endif
|
|
|
|
static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
|
|
{
|
|
kstat_this_cpu.softirqs[irq]++;
|
|
}
|
|
|
|
static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
|
|
{
|
|
return kstat_cpu(cpu).softirqs[irq];
|
|
}
|
|
|
|
/*
|
|
* Number of interrupts per specific IRQ source, since bootup
|
|
*/
|
|
static inline unsigned int kstat_irqs(unsigned int irq)
|
|
{
|
|
unsigned int sum = 0;
|
|
int cpu;
|
|
|
|
for_each_possible_cpu(cpu)
|
|
sum += kstat_irqs_cpu(irq, cpu);
|
|
|
|
return sum;
|
|
}
|
|
|
|
|
|
/*
|
|
* Lock/unlock the current runqueue - to extract task statistics:
|
|
*/
|
|
extern unsigned long long task_delta_exec(struct task_struct *);
|
|
|
|
extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
|
|
extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
|
|
extern void account_steal_time(cputime_t);
|
|
extern void account_idle_time(cputime_t);
|
|
|
|
extern void account_process_tick(struct task_struct *, int user);
|
|
extern void account_steal_ticks(unsigned long ticks);
|
|
extern void account_idle_ticks(unsigned long ticks);
|
|
|
|
#endif /* _LINUX_KERNEL_STAT_H */
|