1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_SCHED_MM_H 3 #define _LINUX_SCHED_MM_H 4 5 #include <linux/kernel.h> 6 #include <linux/atomic.h> 7 #include <linux/sched.h> 8 #include <linux/mm_types.h> 9 #include <linux/gfp.h> 10 #include <linux/sync_core.h> 11 12 /* 13 * Routines for handling mm_structs 14 */ 15 extern struct mm_struct *mm_alloc(void); 16 17 /** 18 * mmgrab() - Pin a &struct mm_struct. 19 * @mm: The &struct mm_struct to pin. 20 * 21 * Make sure that @mm will not get freed even after the owning task 22 * exits. This doesn't guarantee that the associated address space 23 * will still exist later on and mmget_not_zero() has to be used before 24 * accessing it. 25 * 26 * This is a preferred way to to pin @mm for a longer/unbounded amount 27 * of time. 28 * 29 * Use mmdrop() to release the reference acquired by mmgrab(). 30 * 31 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation 32 * of &mm_struct.mm_count vs &mm_struct.mm_users. 33 */ 34 static inline void mmgrab(struct mm_struct *mm) 35 { 36 atomic_inc(&mm->mm_count); 37 } 38 39 extern void mmdrop(struct mm_struct *mm); 40 41 /** 42 * mmget() - Pin the address space associated with a &struct mm_struct. 43 * @mm: The address space to pin. 44 * 45 * Make sure that the address space of the given &struct mm_struct doesn't 46 * go away. This does not protect against parts of the address space being 47 * modified or freed, however. 48 * 49 * Never use this function to pin this address space for an 50 * unbounded/indefinite amount of time. 51 * 52 * Use mmput() to release the reference acquired by mmget(). 53 * 54 * See also <Documentation/vm/active_mm.txt> for an in-depth explanation 55 * of &mm_struct.mm_count vs &mm_struct.mm_users. 56 */ 57 static inline void mmget(struct mm_struct *mm) 58 { 59 atomic_inc(&mm->mm_users); 60 } 61 62 static inline bool mmget_not_zero(struct mm_struct *mm) 63 { 64 return atomic_inc_not_zero(&mm->mm_users); 65 } 66 67 /* mmput gets rid of the mappings and all user-space */ 68 extern void mmput(struct mm_struct *); 69 #ifdef CONFIG_MMU 70 /* same as above but performs the slow path from the async context. Can 71 * be called from the atomic context as well 72 */ 73 void mmput_async(struct mm_struct *); 74 #endif 75 76 /* Grab a reference to a task's mm, if it is not already going away */ 77 extern struct mm_struct *get_task_mm(struct task_struct *task); 78 /* 79 * Grab a reference to a task's mm, if it is not already going away 80 * and ptrace_may_access with the mode parameter passed to it 81 * succeeds. 82 */ 83 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); 84 /* Remove the current tasks stale references to the old mm_struct */ 85 extern void mm_release(struct task_struct *, struct mm_struct *); 86 87 #ifdef CONFIG_MEMCG 88 extern void mm_update_next_owner(struct mm_struct *mm); 89 #else 90 static inline void mm_update_next_owner(struct mm_struct *mm) 91 { 92 } 93 #endif /* CONFIG_MEMCG */ 94 95 #ifdef CONFIG_MMU 96 extern void arch_pick_mmap_layout(struct mm_struct *mm); 97 extern unsigned long 98 arch_get_unmapped_area(struct file *, unsigned long, unsigned long, 99 unsigned long, unsigned long); 100 extern unsigned long 101 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 102 unsigned long len, unsigned long pgoff, 103 unsigned long flags); 104 #else 105 static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} 106 #endif 107 108 static inline bool in_vfork(struct task_struct *tsk) 109 { 110 bool ret; 111 112 /* 113 * need RCU to access ->real_parent if CLONE_VM was used along with 114 * CLONE_PARENT. 115 * 116 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not 117 * imply CLONE_VM 118 * 119 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus 120 * ->real_parent is not necessarily the task doing vfork(), so in 121 * theory we can't rely on task_lock() if we want to dereference it. 122 * 123 * And in this case we can't trust the real_parent->mm == tsk->mm 124 * check, it can be false negative. But we do not care, if init or 125 * another oom-unkillable task does this it should blame itself. 126 */ 127 rcu_read_lock(); 128 ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; 129 rcu_read_unlock(); 130 131 return ret; 132 } 133 134 /* 135 * Applies per-task gfp context to the given allocation flags. 136 * PF_MEMALLOC_NOIO implies GFP_NOIO 137 * PF_MEMALLOC_NOFS implies GFP_NOFS 138 */ 139 static inline gfp_t current_gfp_context(gfp_t flags) 140 { 141 /* 142 * NOIO implies both NOIO and NOFS and it is a weaker context 143 * so always make sure it makes precendence 144 */ 145 if (unlikely(current->flags & PF_MEMALLOC_NOIO)) 146 flags &= ~(__GFP_IO | __GFP_FS); 147 else if (unlikely(current->flags & PF_MEMALLOC_NOFS)) 148 flags &= ~__GFP_FS; 149 return flags; 150 } 151 152 #ifdef CONFIG_LOCKDEP 153 extern void fs_reclaim_acquire(gfp_t gfp_mask); 154 extern void fs_reclaim_release(gfp_t gfp_mask); 155 #else 156 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } 157 static inline void fs_reclaim_release(gfp_t gfp_mask) { } 158 #endif 159 160 static inline unsigned int memalloc_noio_save(void) 161 { 162 unsigned int flags = current->flags & PF_MEMALLOC_NOIO; 163 current->flags |= PF_MEMALLOC_NOIO; 164 return flags; 165 } 166 167 static inline void memalloc_noio_restore(unsigned int flags) 168 { 169 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; 170 } 171 172 static inline unsigned int memalloc_nofs_save(void) 173 { 174 unsigned int flags = current->flags & PF_MEMALLOC_NOFS; 175 current->flags |= PF_MEMALLOC_NOFS; 176 return flags; 177 } 178 179 static inline void memalloc_nofs_restore(unsigned int flags) 180 { 181 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags; 182 } 183 184 static inline unsigned int memalloc_noreclaim_save(void) 185 { 186 unsigned int flags = current->flags & PF_MEMALLOC; 187 current->flags |= PF_MEMALLOC; 188 return flags; 189 } 190 191 static inline void memalloc_noreclaim_restore(unsigned int flags) 192 { 193 current->flags = (current->flags & ~PF_MEMALLOC) | flags; 194 } 195 196 #ifdef CONFIG_MEMBARRIER 197 enum { 198 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0), 199 MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1), 200 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2), 201 MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3), 202 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4), 203 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5), 204 }; 205 206 enum { 207 MEMBARRIER_FLAG_SYNC_CORE = (1U << 0), 208 }; 209 210 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS 211 #include <asm/membarrier.h> 212 #endif 213 214 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) 215 { 216 if (likely(!(atomic_read(&mm->membarrier_state) & 217 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE))) 218 return; 219 sync_core_before_usermode(); 220 } 221 222 static inline void membarrier_execve(struct task_struct *t) 223 { 224 atomic_set(&t->mm->membarrier_state, 0); 225 } 226 #else 227 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS 228 static inline void membarrier_arch_switch_mm(struct mm_struct *prev, 229 struct mm_struct *next, 230 struct task_struct *tsk) 231 { 232 } 233 #endif 234 static inline void membarrier_execve(struct task_struct *t) 235 { 236 } 237 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) 238 { 239 } 240 #endif 241 242 #endif /* _LINUX_SCHED_MM_H */ 243