1 #ifndef _LINUX_KERNEL_STAT_H
2 #define _LINUX_KERNEL_STAT_H
3 
4 #include <linux/smp.h>
5 #include <linux/threads.h>
6 #include <linux/percpu.h>
7 #include <linux/cpumask.h>
8 #include <linux/interrupt.h>
9 #include <asm/irq.h>
10 #include <asm/cputime.h>
11 
12 /*
13  * 'kernel_stat.h' contains the definitions needed for doing
14  * some kernel statistics (CPU usage, context switches ...),
15  * used by rstatd/perfmeter
16  */
17 
18 struct cpu_usage_stat {
19 	cputime64_t user;
20 	cputime64_t nice;
21 	cputime64_t system;
22 	cputime64_t softirq;
23 	cputime64_t irq;
24 	cputime64_t idle;
25 	cputime64_t iowait;
26 	cputime64_t steal;
27 	cputime64_t guest;
28 	cputime64_t guest_nice;
29 };
30 
31 struct kernel_stat {
32 	struct cpu_usage_stat	cpustat;
33 #ifndef CONFIG_GENERIC_HARDIRQS
34        unsigned int irqs[NR_IRQS];
35 #endif
36 	unsigned long irqs_sum;
37 	unsigned int softirqs[NR_SOFTIRQS];
38 };
39 
40 DECLARE_PER_CPU(struct kernel_stat, kstat);
41 
42 #define kstat_cpu(cpu)	per_cpu(kstat, cpu)
43 /* Must have preemption disabled for this to be meaningful. */
44 #define kstat_this_cpu	__get_cpu_var(kstat)
45 
46 extern unsigned long long nr_context_switches(void);
47 
48 #ifndef CONFIG_GENERIC_HARDIRQS
49 
50 struct irq_desc;
51 
kstat_incr_irqs_this_cpu(unsigned int irq,struct irq_desc * desc)52 static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
53 					    struct irq_desc *desc)
54 {
55 	__this_cpu_inc(kstat.irqs[irq]);
56 	__this_cpu_inc(kstat.irqs_sum);
57 }
58 
kstat_irqs_cpu(unsigned int irq,int cpu)59 static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
60 {
61        return kstat_cpu(cpu).irqs[irq];
62 }
63 #else
64 #include <linux/irq.h>
65 extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
66 
67 #define kstat_incr_irqs_this_cpu(irqno, DESC)		\
68 do {							\
69 	__this_cpu_inc(*(DESC)->kstat_irqs);		\
70 	__this_cpu_inc(kstat.irqs_sum);			\
71 } while (0)
72 
73 #endif
74 
kstat_incr_softirqs_this_cpu(unsigned int irq)75 static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
76 {
77 	__this_cpu_inc(kstat.softirqs[irq]);
78 }
79 
kstat_softirqs_cpu(unsigned int irq,int cpu)80 static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
81 {
82        return kstat_cpu(cpu).softirqs[irq];
83 }
84 
85 /*
86  * Number of interrupts per specific IRQ source, since bootup
87  */
88 #ifndef CONFIG_GENERIC_HARDIRQS
kstat_irqs(unsigned int irq)89 static inline unsigned int kstat_irqs(unsigned int irq)
90 {
91 	unsigned int sum = 0;
92 	int cpu;
93 
94 	for_each_possible_cpu(cpu)
95 		sum += kstat_irqs_cpu(irq, cpu);
96 
97 	return sum;
98 }
99 #else
100 extern unsigned int kstat_irqs(unsigned int irq);
101 #endif
102 
103 /*
104  * Number of interrupts per cpu, since bootup
105  */
kstat_cpu_irqs_sum(unsigned int cpu)106 static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
107 {
108 	return kstat_cpu(cpu).irqs_sum;
109 }
110 
111 /*
112  * Lock/unlock the current runqueue - to extract task statistics:
113  */
114 extern unsigned long long task_delta_exec(struct task_struct *);
115 
116 extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
117 extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
118 extern void account_steal_time(cputime_t);
119 extern void account_idle_time(cputime_t);
120 
121 extern void account_process_tick(struct task_struct *, int user);
122 extern void account_steal_ticks(unsigned long ticks);
123 extern void account_idle_ticks(unsigned long ticks);
124 
125 #endif /* _LINUX_KERNEL_STAT_H */
126