1 #ifndef _LINUX_KERNEL_VTIME_H 2 #define _LINUX_KERNEL_VTIME_H 3 4 struct task_struct; 5 6 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 7 extern void vtime_task_switch(struct task_struct *prev); 8 extern void vtime_account_system(struct task_struct *tsk); 9 extern void vtime_account_system_irqsafe(struct task_struct *tsk); 10 extern void vtime_account_idle(struct task_struct *tsk); 11 extern void vtime_account_user(struct task_struct *tsk); 12 extern void vtime_account(struct task_struct *tsk); 13 #else 14 static inline void vtime_task_switch(struct task_struct *prev) { } 15 static inline void vtime_account_system(struct task_struct *tsk) { } 16 static inline void vtime_account_system_irqsafe(struct task_struct *tsk) { } 17 static inline void vtime_account(struct task_struct *tsk) { } 18 #endif 19 20 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 21 extern void irqtime_account_irq(struct task_struct *tsk); 22 #else 23 static inline void irqtime_account_irq(struct task_struct *tsk) { } 24 #endif 25 26 static inline void vtime_account_irq_enter(struct task_struct *tsk) 27 { 28 /* 29 * Hardirq can interrupt idle task anytime. So we need vtime_account() 30 * that performs the idle check in CONFIG_VIRT_CPU_ACCOUNTING. 31 * Softirq can also interrupt idle task directly if it calls 32 * local_bh_enable(). Such case probably don't exist but we never know. 33 * Ksoftirqd is not concerned because idle time is flushed on context 34 * switch. Softirqs in the end of hardirqs are also not a problem because 35 * the idle time is flushed on hardirq time already. 36 */ 37 vtime_account(tsk); 38 irqtime_account_irq(tsk); 39 } 40 41 static inline void vtime_account_irq_exit(struct task_struct *tsk) 42 { 43 /* On hard|softirq exit we always account to hard|softirq cputime */ 44 vtime_account_system(tsk); 45 irqtime_account_irq(tsk); 46 } 47 48 #endif /* _LINUX_KERNEL_VTIME_H */ 49