1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_SCHED_MM_H 3 #define _LINUX_SCHED_MM_H 4 5 #include <linux/kernel.h> 6 #include <linux/atomic.h> 7 #include <linux/sched.h> 8 #include <linux/mm_types.h> 9 #include <linux/gfp.h> 10 #include <linux/sync_core.h> 11 12 /* 13 * Routines for handling mm_structs 14 */ 15 extern struct mm_struct *mm_alloc(void); 16 17 /** 18 * mmgrab() - Pin a &struct mm_struct. 19 * @mm: The &struct mm_struct to pin. 20 * 21 * Make sure that @mm will not get freed even after the owning task 22 * exits. This doesn't guarantee that the associated address space 23 * will still exist later on and mmget_not_zero() has to be used before 24 * accessing it. 25 * 26 * This is a preferred way to pin @mm for a longer/unbounded amount 27 * of time. 28 * 29 * Use mmdrop() to release the reference acquired by mmgrab(). 30 * 31 * See also <Documentation/vm/active_mm.rst> for an in-depth explanation 32 * of &mm_struct.mm_count vs &mm_struct.mm_users. 33 */ 34 static inline void mmgrab(struct mm_struct *mm) 35 { 36 atomic_inc(&mm->mm_count); 37 } 38 39 extern void __mmdrop(struct mm_struct *mm); 40 41 static inline void mmdrop(struct mm_struct *mm) 42 { 43 /* 44 * The implicit full barrier implied by atomic_dec_and_test() is 45 * required by the membarrier system call before returning to 46 * user-space, after storing to rq->curr. 47 */ 48 if (unlikely(atomic_dec_and_test(&mm->mm_count))) 49 __mmdrop(mm); 50 } 51 52 /** 53 * mmget() - Pin the address space associated with a &struct mm_struct. 54 * @mm: The address space to pin. 55 * 56 * Make sure that the address space of the given &struct mm_struct doesn't 57 * go away. This does not protect against parts of the address space being 58 * modified or freed, however. 59 * 60 * Never use this function to pin this address space for an 61 * unbounded/indefinite amount of time. 62 * 63 * Use mmput() to release the reference acquired by mmget(). 64 * 65 * See also <Documentation/vm/active_mm.rst> for an in-depth explanation 66 * of &mm_struct.mm_count vs &mm_struct.mm_users. 67 */ 68 static inline void mmget(struct mm_struct *mm) 69 { 70 atomic_inc(&mm->mm_users); 71 } 72 73 static inline bool mmget_not_zero(struct mm_struct *mm) 74 { 75 return atomic_inc_not_zero(&mm->mm_users); 76 } 77 78 /* mmput gets rid of the mappings and all user-space */ 79 extern void mmput(struct mm_struct *); 80 #ifdef CONFIG_MMU 81 /* same as above but performs the slow path from the async context. Can 82 * be called from the atomic context as well 83 */ 84 void mmput_async(struct mm_struct *); 85 #endif 86 87 /* Grab a reference to a task's mm, if it is not already going away */ 88 extern struct mm_struct *get_task_mm(struct task_struct *task); 89 /* 90 * Grab a reference to a task's mm, if it is not already going away 91 * and ptrace_may_access with the mode parameter passed to it 92 * succeeds. 93 */ 94 extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); 95 /* Remove the current tasks stale references to the old mm_struct on exit() */ 96 extern void exit_mm_release(struct task_struct *, struct mm_struct *); 97 /* Remove the current tasks stale references to the old mm_struct on exec() */ 98 extern void exec_mm_release(struct task_struct *, struct mm_struct *); 99 100 #ifdef CONFIG_MEMCG 101 extern void mm_update_next_owner(struct mm_struct *mm); 102 #else 103 static inline void mm_update_next_owner(struct mm_struct *mm) 104 { 105 } 106 #endif /* CONFIG_MEMCG */ 107 108 #ifdef CONFIG_MMU 109 extern void arch_pick_mmap_layout(struct mm_struct *mm, 110 struct rlimit *rlim_stack); 111 extern unsigned long 112 arch_get_unmapped_area(struct file *, unsigned long, unsigned long, 113 unsigned long, unsigned long); 114 extern unsigned long 115 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, 116 unsigned long len, unsigned long pgoff, 117 unsigned long flags); 118 #else 119 static inline void arch_pick_mmap_layout(struct mm_struct *mm, 120 struct rlimit *rlim_stack) {} 121 #endif 122 123 static inline bool in_vfork(struct task_struct *tsk) 124 { 125 bool ret; 126 127 /* 128 * need RCU to access ->real_parent if CLONE_VM was used along with 129 * CLONE_PARENT. 130 * 131 * We check real_parent->mm == tsk->mm because CLONE_VFORK does not 132 * imply CLONE_VM 133 * 134 * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus 135 * ->real_parent is not necessarily the task doing vfork(), so in 136 * theory we can't rely on task_lock() if we want to dereference it. 137 * 138 * And in this case we can't trust the real_parent->mm == tsk->mm 139 * check, it can be false negative. But we do not care, if init or 140 * another oom-unkillable task does this it should blame itself. 141 */ 142 rcu_read_lock(); 143 ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm; 144 rcu_read_unlock(); 145 146 return ret; 147 } 148 149 /* 150 * Applies per-task gfp context to the given allocation flags. 151 * PF_MEMALLOC_NOIO implies GFP_NOIO 152 * PF_MEMALLOC_NOFS implies GFP_NOFS 153 */ 154 static inline gfp_t current_gfp_context(gfp_t flags) 155 { 156 unsigned int pflags = READ_ONCE(current->flags); 157 158 if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) { 159 /* 160 * NOIO implies both NOIO and NOFS and it is a weaker context 161 * so always make sure it makes precedence 162 */ 163 if (pflags & PF_MEMALLOC_NOIO) 164 flags &= ~(__GFP_IO | __GFP_FS); 165 else if (pflags & PF_MEMALLOC_NOFS) 166 flags &= ~__GFP_FS; 167 } 168 return flags; 169 } 170 171 #ifdef CONFIG_LOCKDEP 172 extern void __fs_reclaim_acquire(void); 173 extern void __fs_reclaim_release(void); 174 extern void fs_reclaim_acquire(gfp_t gfp_mask); 175 extern void fs_reclaim_release(gfp_t gfp_mask); 176 #else 177 static inline void __fs_reclaim_acquire(void) { } 178 static inline void __fs_reclaim_release(void) { } 179 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } 180 static inline void fs_reclaim_release(gfp_t gfp_mask) { } 181 #endif 182 183 /** 184 * might_alloc - Mark possible allocation sites 185 * @gfp_mask: gfp_t flags that would be used to allocate 186 * 187 * Similar to might_sleep() and other annotations, this can be used in functions 188 * that might allocate, but often don't. Compiles to nothing without 189 * CONFIG_LOCKDEP. Includes a conditional might_sleep() if @gfp allows blocking. 190 */ 191 static inline void might_alloc(gfp_t gfp_mask) 192 { 193 fs_reclaim_acquire(gfp_mask); 194 fs_reclaim_release(gfp_mask); 195 196 might_sleep_if(gfpflags_allow_blocking(gfp_mask)); 197 } 198 199 /** 200 * memalloc_noio_save - Marks implicit GFP_NOIO allocation scope. 201 * 202 * This functions marks the beginning of the GFP_NOIO allocation scope. 203 * All further allocations will implicitly drop __GFP_IO flag and so 204 * they are safe for the IO critical section from the allocation recursion 205 * point of view. Use memalloc_noio_restore to end the scope with flags 206 * returned by this function. 207 * 208 * This function is safe to be used from any context. 209 */ 210 static inline unsigned int memalloc_noio_save(void) 211 { 212 unsigned int flags = current->flags & PF_MEMALLOC_NOIO; 213 current->flags |= PF_MEMALLOC_NOIO; 214 return flags; 215 } 216 217 /** 218 * memalloc_noio_restore - Ends the implicit GFP_NOIO scope. 219 * @flags: Flags to restore. 220 * 221 * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function. 222 * Always make sure that the given flags is the return value from the 223 * pairing memalloc_noio_save call. 224 */ 225 static inline void memalloc_noio_restore(unsigned int flags) 226 { 227 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; 228 } 229 230 /** 231 * memalloc_nofs_save - Marks implicit GFP_NOFS allocation scope. 232 * 233 * This functions marks the beginning of the GFP_NOFS allocation scope. 234 * All further allocations will implicitly drop __GFP_FS flag and so 235 * they are safe for the FS critical section from the allocation recursion 236 * point of view. Use memalloc_nofs_restore to end the scope with flags 237 * returned by this function. 238 * 239 * This function is safe to be used from any context. 240 */ 241 static inline unsigned int memalloc_nofs_save(void) 242 { 243 unsigned int flags = current->flags & PF_MEMALLOC_NOFS; 244 current->flags |= PF_MEMALLOC_NOFS; 245 return flags; 246 } 247 248 /** 249 * memalloc_nofs_restore - Ends the implicit GFP_NOFS scope. 250 * @flags: Flags to restore. 251 * 252 * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function. 253 * Always make sure that the given flags is the return value from the 254 * pairing memalloc_nofs_save call. 255 */ 256 static inline void memalloc_nofs_restore(unsigned int flags) 257 { 258 current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags; 259 } 260 261 static inline unsigned int memalloc_noreclaim_save(void) 262 { 263 unsigned int flags = current->flags & PF_MEMALLOC; 264 current->flags |= PF_MEMALLOC; 265 return flags; 266 } 267 268 static inline void memalloc_noreclaim_restore(unsigned int flags) 269 { 270 current->flags = (current->flags & ~PF_MEMALLOC) | flags; 271 } 272 273 #ifdef CONFIG_CMA 274 static inline unsigned int memalloc_nocma_save(void) 275 { 276 unsigned int flags = current->flags & PF_MEMALLOC_NOCMA; 277 278 current->flags |= PF_MEMALLOC_NOCMA; 279 return flags; 280 } 281 282 static inline void memalloc_nocma_restore(unsigned int flags) 283 { 284 current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags; 285 } 286 #else 287 static inline unsigned int memalloc_nocma_save(void) 288 { 289 return 0; 290 } 291 292 static inline void memalloc_nocma_restore(unsigned int flags) 293 { 294 } 295 #endif 296 297 #ifdef CONFIG_MEMCG 298 DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg); 299 /** 300 * set_active_memcg - Starts the remote memcg charging scope. 301 * @memcg: memcg to charge. 302 * 303 * This function marks the beginning of the remote memcg charging scope. All the 304 * __GFP_ACCOUNT allocations till the end of the scope will be charged to the 305 * given memcg. 306 * 307 * NOTE: This function can nest. Users must save the return value and 308 * reset the previous value after their own charging scope is over. 309 */ 310 static inline struct mem_cgroup * 311 set_active_memcg(struct mem_cgroup *memcg) 312 { 313 struct mem_cgroup *old; 314 315 if (in_interrupt()) { 316 old = this_cpu_read(int_active_memcg); 317 this_cpu_write(int_active_memcg, memcg); 318 } else { 319 old = current->active_memcg; 320 current->active_memcg = memcg; 321 } 322 323 return old; 324 } 325 #else 326 static inline struct mem_cgroup * 327 set_active_memcg(struct mem_cgroup *memcg) 328 { 329 return NULL; 330 } 331 #endif 332 333 #ifdef CONFIG_MEMBARRIER 334 enum { 335 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0), 336 MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1), 337 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2), 338 MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3), 339 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4), 340 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5), 341 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ_READY = (1U << 6), 342 MEMBARRIER_STATE_PRIVATE_EXPEDITED_RSEQ = (1U << 7), 343 }; 344 345 enum { 346 MEMBARRIER_FLAG_SYNC_CORE = (1U << 0), 347 MEMBARRIER_FLAG_RSEQ = (1U << 1), 348 }; 349 350 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS 351 #include <asm/membarrier.h> 352 #endif 353 354 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) 355 { 356 if (current->mm != mm) 357 return; 358 if (likely(!(atomic_read(&mm->membarrier_state) & 359 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE))) 360 return; 361 sync_core_before_usermode(); 362 } 363 364 extern void membarrier_exec_mmap(struct mm_struct *mm); 365 366 extern void membarrier_update_current_mm(struct mm_struct *next_mm); 367 368 #else 369 #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS 370 static inline void membarrier_arch_switch_mm(struct mm_struct *prev, 371 struct mm_struct *next, 372 struct task_struct *tsk) 373 { 374 } 375 #endif 376 static inline void membarrier_exec_mmap(struct mm_struct *mm) 377 { 378 } 379 static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) 380 { 381 } 382 static inline void membarrier_update_current_mm(struct mm_struct *next_mm) 383 { 384 } 385 #endif 386 387 #endif /* _LINUX_SCHED_MM_H */ 388