1935ace2fSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0 */ 2935ace2fSThomas Gleixner #ifndef __LINUX_ENTRYKVM_H 3935ace2fSThomas Gleixner #define __LINUX_ENTRYKVM_H 4935ace2fSThomas Gleixner 5e1c6b9e1SOliver Upton #include <linux/static_call_types.h> 603248addSEric W. Biederman #include <linux/resume_user_mode.h> 7e1c6b9e1SOliver Upton #include <linux/syscalls.h> 8e1c6b9e1SOliver Upton #include <linux/seccomp.h> 9e1c6b9e1SOliver Upton #include <linux/sched.h> 10f268c373SFrederic Weisbecker #include <linux/tick.h> 11935ace2fSThomas Gleixner 12935ace2fSThomas Gleixner /* Transfer to guest mode work */ 13935ace2fSThomas Gleixner #ifdef CONFIG_KVM_XFER_TO_GUEST_WORK 14935ace2fSThomas Gleixner 15935ace2fSThomas Gleixner #ifndef ARCH_XFER_TO_GUEST_MODE_WORK 16935ace2fSThomas Gleixner # define ARCH_XFER_TO_GUEST_MODE_WORK (0) 17935ace2fSThomas Gleixner #endif 18935ace2fSThomas Gleixner 19935ace2fSThomas Gleixner #define XFER_TO_GUEST_MODE_WORK \ 20*26baa1f1SPeter Zijlstra (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | _TIF_SIGPENDING | \ 21*26baa1f1SPeter Zijlstra _TIF_NOTIFY_SIGNAL | _TIF_NOTIFY_RESUME | \ 22*26baa1f1SPeter Zijlstra ARCH_XFER_TO_GUEST_MODE_WORK) 23935ace2fSThomas Gleixner 24935ace2fSThomas Gleixner struct kvm_vcpu; 25935ace2fSThomas Gleixner 26935ace2fSThomas Gleixner /** 27935ace2fSThomas Gleixner * arch_xfer_to_guest_mode_handle_work - Architecture specific xfer to guest 28935ace2fSThomas Gleixner * mode work handling function. 29935ace2fSThomas Gleixner * @vcpu: Pointer to current's VCPU data 30935ace2fSThomas Gleixner * @ti_work: Cached TIF flags gathered in xfer_to_guest_mode_handle_work() 31935ace2fSThomas Gleixner * 32935ace2fSThomas Gleixner * Invoked from xfer_to_guest_mode_handle_work(). Defaults to NOOP. Can be 33935ace2fSThomas Gleixner * replaced by architecture specific code. 34935ace2fSThomas Gleixner */ 35935ace2fSThomas Gleixner static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu, 36935ace2fSThomas Gleixner unsigned long ti_work); 37935ace2fSThomas Gleixner 38935ace2fSThomas Gleixner #ifndef arch_xfer_to_guest_mode_work arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu * vcpu,unsigned long ti_work)39935ace2fSThomas Gleixnerstatic inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu, 40935ace2fSThomas Gleixner unsigned long ti_work) 41935ace2fSThomas Gleixner { 42935ace2fSThomas Gleixner return 0; 43935ace2fSThomas Gleixner } 44935ace2fSThomas Gleixner #endif 45935ace2fSThomas Gleixner 46935ace2fSThomas Gleixner /** 47935ace2fSThomas Gleixner * xfer_to_guest_mode_handle_work - Check and handle pending work which needs 48935ace2fSThomas Gleixner * to be handled before going to guest mode 49935ace2fSThomas Gleixner * @vcpu: Pointer to current's VCPU data 50935ace2fSThomas Gleixner * 51935ace2fSThomas Gleixner * Returns: 0 or an error code 52935ace2fSThomas Gleixner */ 53935ace2fSThomas Gleixner int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu); 54935ace2fSThomas Gleixner 55935ace2fSThomas Gleixner /** 564ae7dc97SFrederic Weisbecker * xfer_to_guest_mode_prepare - Perform last minute preparation work that 574ae7dc97SFrederic Weisbecker * need to be handled while IRQs are disabled 584ae7dc97SFrederic Weisbecker * upon entering to guest. 594ae7dc97SFrederic Weisbecker * 604ae7dc97SFrederic Weisbecker * Has to be invoked with interrupts disabled before the last call 614ae7dc97SFrederic Weisbecker * to xfer_to_guest_mode_work_pending(). 624ae7dc97SFrederic Weisbecker */ xfer_to_guest_mode_prepare(void)634ae7dc97SFrederic Weisbeckerstatic inline void xfer_to_guest_mode_prepare(void) 644ae7dc97SFrederic Weisbecker { 654ae7dc97SFrederic Weisbecker lockdep_assert_irqs_disabled(); 66f268c373SFrederic Weisbecker tick_nohz_user_enter_prepare(); 674ae7dc97SFrederic Weisbecker } 684ae7dc97SFrederic Weisbecker 694ae7dc97SFrederic Weisbecker /** 70935ace2fSThomas Gleixner * __xfer_to_guest_mode_work_pending - Check if work is pending 71935ace2fSThomas Gleixner * 72935ace2fSThomas Gleixner * Returns: True if work pending, False otherwise. 73935ace2fSThomas Gleixner * 74935ace2fSThomas Gleixner * Bare variant of xfer_to_guest_mode_work_pending(). Can be called from 75935ace2fSThomas Gleixner * interrupt enabled code for racy quick checks with care. 76935ace2fSThomas Gleixner */ __xfer_to_guest_mode_work_pending(void)77935ace2fSThomas Gleixnerstatic inline bool __xfer_to_guest_mode_work_pending(void) 78935ace2fSThomas Gleixner { 796ce89512SMark Rutland unsigned long ti_work = read_thread_flags(); 80935ace2fSThomas Gleixner 81935ace2fSThomas Gleixner return !!(ti_work & XFER_TO_GUEST_MODE_WORK); 82935ace2fSThomas Gleixner } 83935ace2fSThomas Gleixner 84935ace2fSThomas Gleixner /** 85935ace2fSThomas Gleixner * xfer_to_guest_mode_work_pending - Check if work is pending which needs to be 86935ace2fSThomas Gleixner * handled before returning to guest mode 87935ace2fSThomas Gleixner * 88935ace2fSThomas Gleixner * Returns: True if work pending, False otherwise. 89935ace2fSThomas Gleixner * 90935ace2fSThomas Gleixner * Has to be invoked with interrupts disabled before the transition to 91935ace2fSThomas Gleixner * guest mode. 92935ace2fSThomas Gleixner */ xfer_to_guest_mode_work_pending(void)93935ace2fSThomas Gleixnerstatic inline bool xfer_to_guest_mode_work_pending(void) 94935ace2fSThomas Gleixner { 95935ace2fSThomas Gleixner lockdep_assert_irqs_disabled(); 96935ace2fSThomas Gleixner return __xfer_to_guest_mode_work_pending(); 97935ace2fSThomas Gleixner } 98935ace2fSThomas Gleixner #endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */ 99935ace2fSThomas Gleixner 100935ace2fSThomas Gleixner #endif 101