1e5a26a40SBeau Belgrave /* SPDX-License-Identifier: GPL-2.0-only */ 25cfff569SSteven Rostedt (Google) /* 3e5a26a40SBeau Belgrave * Copyright (c) 2022, Microsoft Corporation. 45cfff569SSteven Rostedt (Google) * 55cfff569SSteven Rostedt (Google) * Authors: 65cfff569SSteven Rostedt (Google) * Beau Belgrave <[email protected]> 75cfff569SSteven Rostedt (Google) */ 85cfff569SSteven Rostedt (Google) 9e5a26a40SBeau Belgrave #ifndef _LINUX_USER_EVENTS_H 10e5a26a40SBeau Belgrave #define _LINUX_USER_EVENTS_H 115cfff569SSteven Rostedt (Google) 1272357590SBeau Belgrave #include <linux/list.h> 1372357590SBeau Belgrave #include <linux/refcount.h> 1472357590SBeau Belgrave #include <linux/mm_types.h> 1572357590SBeau Belgrave #include <linux/workqueue.h> 16e5a26a40SBeau Belgrave #include <uapi/linux/user_events.h> 175cfff569SSteven Rostedt (Google) 18fd593511SBeau Belgrave #ifdef CONFIG_USER_EVENTS 19fd593511SBeau Belgrave struct user_event_mm { 20dcbd1ac2SBeau Belgrave struct list_head mms_link; 2172357590SBeau Belgrave struct list_head enablers; 2272357590SBeau Belgrave struct mm_struct *mm; 23*ff9e1632SBeau Belgrave /* Used for one-shot lists, protected by event_mutex */ 2472357590SBeau Belgrave struct user_event_mm *next; 2572357590SBeau Belgrave refcount_t refcnt; 2672357590SBeau Belgrave refcount_t tasks; 2772357590SBeau Belgrave struct rcu_work put_rwork; 28fd593511SBeau Belgrave }; 29fd593511SBeau Belgrave 3072357590SBeau Belgrave extern void user_event_mm_dup(struct task_struct *t, 3172357590SBeau Belgrave struct user_event_mm *old_mm); 3272357590SBeau Belgrave 3372357590SBeau Belgrave extern void user_event_mm_remove(struct task_struct *t); 3472357590SBeau Belgrave user_events_fork(struct task_struct * t,unsigned long clone_flags)3572357590SBeau Belgravestatic inline void user_events_fork(struct task_struct *t, 3672357590SBeau Belgrave unsigned long clone_flags) 3772357590SBeau Belgrave { 3872357590SBeau Belgrave struct user_event_mm *old_mm; 3972357590SBeau Belgrave 4072357590SBeau Belgrave if (!t || !current->user_event_mm) 4172357590SBeau Belgrave return; 4272357590SBeau Belgrave 4372357590SBeau Belgrave old_mm = current->user_event_mm; 4472357590SBeau Belgrave 4572357590SBeau Belgrave if (clone_flags & CLONE_VM) { 4672357590SBeau Belgrave t->user_event_mm = old_mm; 4772357590SBeau Belgrave refcount_inc(&old_mm->tasks); 4872357590SBeau Belgrave return; 4972357590SBeau Belgrave } 5072357590SBeau Belgrave 5172357590SBeau Belgrave user_event_mm_dup(t, old_mm); 5272357590SBeau Belgrave } 5372357590SBeau Belgrave user_events_execve(struct task_struct * t)5472357590SBeau Belgravestatic inline void user_events_execve(struct task_struct *t) 5572357590SBeau Belgrave { 5672357590SBeau Belgrave if (!t || !t->user_event_mm) 5772357590SBeau Belgrave return; 5872357590SBeau Belgrave 5972357590SBeau Belgrave user_event_mm_remove(t); 6072357590SBeau Belgrave } 6172357590SBeau Belgrave user_events_exit(struct task_struct * t)6272357590SBeau Belgravestatic inline void user_events_exit(struct task_struct *t) 6372357590SBeau Belgrave { 6472357590SBeau Belgrave if (!t || !t->user_event_mm) 6572357590SBeau Belgrave return; 6672357590SBeau Belgrave 6772357590SBeau Belgrave user_event_mm_remove(t); 6872357590SBeau Belgrave } 6972357590SBeau Belgrave #else user_events_fork(struct task_struct * t,unsigned long clone_flags)70fd593511SBeau Belgravestatic inline void user_events_fork(struct task_struct *t, 71fd593511SBeau Belgrave unsigned long clone_flags) 72fd593511SBeau Belgrave { 73fd593511SBeau Belgrave } 74fd593511SBeau Belgrave user_events_execve(struct task_struct * t)75fd593511SBeau Belgravestatic inline void user_events_execve(struct task_struct *t) 76fd593511SBeau Belgrave { 77fd593511SBeau Belgrave } 78fd593511SBeau Belgrave user_events_exit(struct task_struct * t)79fd593511SBeau Belgravestatic inline void user_events_exit(struct task_struct *t) 80fd593511SBeau Belgrave { 81fd593511SBeau Belgrave } 8272357590SBeau Belgrave #endif /* CONFIG_USER_EVENTS */ 83fd593511SBeau Belgrave 84e5a26a40SBeau Belgrave #endif /* _LINUX_USER_EVENTS_H */ 85