softirq: introduce statistics for softirq
[linux-2.6-block.git] / include / linux / kernel_stat.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_KERNEL_STAT_H
2#define _LINUX_KERNEL_STAT_H
3
1da177e4
LT
4#include <linux/smp.h>
5#include <linux/threads.h>
6#include <linux/percpu.h>
28ef3584 7#include <linux/cpumask.h>
aa0ce5bb 8#include <linux/interrupt.h>
6859a840 9#include <asm/irq.h>
1da177e4
LT
10#include <asm/cputime.h>
11
12/*
13 * 'kernel_stat.h' contains the definitions needed for doing
14 * some kernel statistics (CPU usage, context switches ...),
15 * used by rstatd/perfmeter
16 */
17
18struct cpu_usage_stat {
19 cputime64_t user;
20 cputime64_t nice;
21 cputime64_t system;
22 cputime64_t softirq;
23 cputime64_t irq;
24 cputime64_t idle;
25 cputime64_t iowait;
26 cputime64_t steal;
5e84cfde 27 cputime64_t guest;
1da177e4
LT
28};
29
30struct kernel_stat {
31 struct cpu_usage_stat cpustat;
d7e51e66 32#ifndef CONFIG_GENERIC_HARDIRQS
0b8f1efa
YL
33 unsigned int irqs[NR_IRQS];
34#endif
aa0ce5bb 35 unsigned int softirqs[NR_SOFTIRQS];
1da177e4
LT
36};
37
38DECLARE_PER_CPU(struct kernel_stat, kstat);
39
40#define kstat_cpu(cpu) per_cpu(kstat, cpu)
41/* Must have preemption disabled for this to be meaningful. */
42#define kstat_this_cpu __get_cpu_var(kstat)
43
44extern unsigned long long nr_context_switches(void);
45
d7e51e66 46#ifndef CONFIG_GENERIC_HARDIRQS
0b8f1efa
YL
47#define kstat_irqs_this_cpu(irq) \
48 (kstat_this_cpu.irqs[irq])
49
d6c88a50 50struct irq_desc;
8c464a4b 51
d6c88a50
TG
52static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
53 struct irq_desc *desc)
54{
55 kstat_this_cpu.irqs[irq]++;
56}
8c464a4b 57
7f95ec9e
YL
58static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
59{
60 return kstat_cpu(cpu).irqs[irq];
61}
0b8f1efa 62#else
d52a61c0 63#include <linux/irq.h>
0b8f1efa 64extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
d52a61c0
YL
65#define kstat_irqs_this_cpu(DESC) \
66 ((DESC)->kstat_irqs[smp_processor_id()])
67#define kstat_incr_irqs_this_cpu(irqno, DESC) \
68 ((DESC)->kstat_irqs[smp_processor_id()]++)
69
0b8f1efa 70#endif
7f95ec9e 71
aa0ce5bb
KK
72static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
73{
74 kstat_this_cpu.softirqs[irq]++;
75}
76
77static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
78{
79 return kstat_cpu(cpu).softirqs[irq];
80}
81
1da177e4
LT
82/*
83 * Number of interrupts per specific IRQ source, since bootup
84 */
7f95ec9e 85static inline unsigned int kstat_irqs(unsigned int irq)
1da177e4 86{
7f95ec9e
YL
87 unsigned int sum = 0;
88 int cpu;
1da177e4 89
0a945022 90 for_each_possible_cpu(cpu)
7f95ec9e 91 sum += kstat_irqs_cpu(irq, cpu);
1da177e4
LT
92
93 return sum;
94}
95
aa9c4c0f
IM
96
97/*
98 * Lock/unlock the current runqueue - to extract task statistics:
99 */
bb34d92f 100extern unsigned long long task_delta_exec(struct task_struct *);
aa9c4c0f 101
457533a7
MS
102extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
103extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
79741dd3
MS
104extern void account_steal_time(cputime_t);
105extern void account_idle_time(cputime_t);
106
107extern void account_process_tick(struct task_struct *, int user);
108extern void account_steal_ticks(unsigned long ticks);
109extern void account_idle_ticks(unsigned long ticks);
1da177e4
LT
110
111#endif /* _LINUX_KERNEL_STAT_H */