1 #ifndef _LINUX_KERNEL_STAT_H 2 #define _LINUX_KERNEL_STAT_H 3 4 #include <linux/smp.h> 5 #include <linux/threads.h> 6 #include <linux/percpu.h> 7 #include <linux/cpumask.h> 8 #include <linux/interrupt.h> 9 #include <linux/sched.h> 10 #include <asm/irq.h> 11 #include <asm/cputime.h> 12 13 /* 14 * 'kernel_stat.h' contains the definitions needed for doing 15 * some kernel statistics (CPU usage, context switches ...), 16 * used by rstatd/perfmeter 17 */ 18 19 enum cpu_usage_stat { 20 CPUTIME_USER, 21 CPUTIME_NICE, 22 CPUTIME_SYSTEM, 23 CPUTIME_SOFTIRQ, 24 CPUTIME_IRQ, 25 CPUTIME_IDLE, 26 CPUTIME_IOWAIT, 27 CPUTIME_STEAL, 28 CPUTIME_GUEST, 29 CPUTIME_GUEST_NICE, 30 NR_STATS, 31 }; 32 33 struct kernel_cpustat { 34 u64 cpustat[NR_STATS]; 35 }; 36 37 struct kernel_stat { 38 #ifndef CONFIG_GENERIC_HARDIRQS 39 unsigned int irqs[NR_IRQS]; 40 #endif 41 unsigned long irqs_sum; 42 unsigned int softirqs[NR_SOFTIRQS]; 43 }; 44 45 DECLARE_PER_CPU(struct kernel_stat, kstat); 46 DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 47 48 /* Must have preemption disabled for this to be meaningful. */ 49 #define kstat_this_cpu (&__get_cpu_var(kstat)) 50 #define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat)) 51 #define kstat_cpu(cpu) per_cpu(kstat, cpu) 52 #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) 53 54 extern unsigned long long nr_context_switches(void); 55 56 #ifndef CONFIG_GENERIC_HARDIRQS 57 58 struct irq_desc; 59 60 static inline void kstat_incr_irqs_this_cpu(unsigned int irq, 61 struct irq_desc *desc) 62 { 63 __this_cpu_inc(kstat.irqs[irq]); 64 __this_cpu_inc(kstat.irqs_sum); 65 } 66 67 static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 68 { 69 return kstat_cpu(cpu).irqs[irq]; 70 } 71 #else 72 #include <linux/irq.h> 73 extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); 74 75 #define kstat_incr_irqs_this_cpu(irqno, DESC) \ 76 do { \ 77 __this_cpu_inc(*(DESC)->kstat_irqs); \ 78 __this_cpu_inc(kstat.irqs_sum); \ 79 } while (0) 80 81 #endif 82 83 static inline void kstat_incr_softirqs_this_cpu(unsigned int irq) 84 { 85 __this_cpu_inc(kstat.softirqs[irq]); 86 } 87 88 static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) 89 { 90 return kstat_cpu(cpu).softirqs[irq]; 91 } 92 93 /* 94 * Number of interrupts per specific IRQ source, since bootup 95 */ 96 #ifndef CONFIG_GENERIC_HARDIRQS 97 static inline unsigned int kstat_irqs(unsigned int irq) 98 { 99 unsigned int sum = 0; 100 int cpu; 101 102 for_each_possible_cpu(cpu) 103 sum += kstat_irqs_cpu(irq, cpu); 104 105 return sum; 106 } 107 #else 108 extern unsigned int kstat_irqs(unsigned int irq); 109 #endif 110 111 /* 112 * Number of interrupts per cpu, since bootup 113 */ 114 static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) 115 { 116 return kstat_cpu(cpu).irqs_sum; 117 } 118 119 /* 120 * Lock/unlock the current runqueue - to extract task statistics: 121 */ 122 extern unsigned long long task_delta_exec(struct task_struct *); 123 124 extern void account_user_time(struct task_struct *, cputime_t, cputime_t); 125 extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); 126 extern void account_steal_time(cputime_t); 127 extern void account_idle_time(cputime_t); 128 129 extern void account_process_tick(struct task_struct *, int user); 130 extern void account_steal_ticks(unsigned long ticks); 131 extern void account_idle_ticks(unsigned long ticks); 132 133 #endif /* _LINUX_KERNEL_STAT_H */ 134