1 #ifndef _LINUX_KERNEL_VTIME_H 2 #define _LINUX_KERNEL_VTIME_H 3 4 #include <linux/context_tracking_state.h> 5 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 6 #include <asm/vtime.h> 7 #endif 8 9 10 struct task_struct; 11 12 /* 13 * vtime_accounting_cpu_enabled() definitions/declarations 14 */ 15 #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) 16 static inline bool vtime_accounting_cpu_enabled(void) { return true; } 17 #elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN) 18 /* 19 * Checks if vtime is enabled on some CPU. Cputime readers want to be careful 20 * in that case and compute the tickless cputime. 21 * For now vtime state is tied to context tracking. We might want to decouple 22 * those later if necessary. 23 */ 24 static inline bool vtime_accounting_enabled(void) 25 { 26 return context_tracking_is_enabled(); 27 } 28 29 static inline bool vtime_accounting_cpu_enabled(void) 30 { 31 if (vtime_accounting_enabled()) { 32 if (context_tracking_cpu_is_enabled()) 33 return true; 34 } 35 36 return false; 37 } 38 #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ 39 static inline bool vtime_accounting_cpu_enabled(void) { return false; } 40 #endif 41 42 43 /* 44 * Common vtime APIs 45 */ 46 #ifdef CONFIG_VIRT_CPU_ACCOUNTING 47 48 #ifdef __ARCH_HAS_VTIME_TASK_SWITCH 49 extern void vtime_task_switch(struct task_struct *prev); 50 #else 51 extern void vtime_common_task_switch(struct task_struct *prev); 52 static inline void vtime_task_switch(struct task_struct *prev) 53 { 54 if (vtime_accounting_cpu_enabled()) 55 vtime_common_task_switch(prev); 56 } 57 #endif /* __ARCH_HAS_VTIME_TASK_SWITCH */ 58 59 extern void vtime_account_system(struct task_struct *tsk); 60 extern void vtime_account_idle(struct task_struct *tsk); 61 62 #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ 63 64 static inline void vtime_task_switch(struct task_struct *prev) { } 65 static inline void vtime_account_system(struct task_struct *tsk) { } 66 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ 67 68 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 69 extern void arch_vtime_task_switch(struct task_struct *tsk); 70 extern void vtime_user_enter(struct task_struct *tsk); 71 extern void vtime_user_exit(struct task_struct *tsk); 72 extern void vtime_guest_enter(struct task_struct *tsk); 73 extern void vtime_guest_exit(struct task_struct *tsk); 74 extern void vtime_init_idle(struct task_struct *tsk, int cpu); 75 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */ 76 static inline void vtime_user_enter(struct task_struct *tsk) { } 77 static inline void vtime_user_exit(struct task_struct *tsk) { } 78 static inline void vtime_guest_enter(struct task_struct *tsk) { } 79 static inline void vtime_guest_exit(struct task_struct *tsk) { } 80 static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } 81 #endif 82 83 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 84 extern void vtime_account_irq_enter(struct task_struct *tsk); 85 static inline void vtime_account_irq_exit(struct task_struct *tsk) 86 { 87 /* On hard|softirq exit we always account to hard|softirq cputime */ 88 vtime_account_system(tsk); 89 } 90 extern void vtime_flush(struct task_struct *tsk); 91 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 92 static inline void vtime_account_irq_enter(struct task_struct *tsk) { } 93 static inline void vtime_account_irq_exit(struct task_struct *tsk) { } 94 static inline void vtime_flush(struct task_struct *tsk) { } 95 #endif 96 97 98 #ifdef CONFIG_IRQ_TIME_ACCOUNTING 99 extern void irqtime_account_irq(struct task_struct *tsk); 100 #else 101 static inline void irqtime_account_irq(struct task_struct *tsk) { } 102 #endif 103 104 static inline void account_irq_enter_time(struct task_struct *tsk) 105 { 106 vtime_account_irq_enter(tsk); 107 irqtime_account_irq(tsk); 108 } 109 110 static inline void account_irq_exit_time(struct task_struct *tsk) 111 { 112 vtime_account_irq_exit(tsk); 113 irqtime_account_irq(tsk); 114 } 115 116 #endif /* _LINUX_KERNEL_VTIME_H */ 117