xref: /linux-6.15/include/linux/mman.h (revision b6fb293f)
1 #ifndef _LINUX_MMAN_H
2 #define _LINUX_MMAN_H
3 
4 #include <linux/mm.h>
5 #include <linux/percpu_counter.h>
6 
7 #include <linux/atomic.h>
8 #include <uapi/linux/mman.h>
9 
10 /*
11  * Arrange for legacy / undefined architecture specific flags to be
12  * ignored by mmap handling code.
13  */
14 #ifndef MAP_32BIT
15 #define MAP_32BIT 0
16 #endif
17 #ifndef MAP_HUGE_2MB
18 #define MAP_HUGE_2MB 0
19 #endif
20 #ifndef MAP_HUGE_1GB
21 #define MAP_HUGE_1GB 0
22 #endif
23 #ifndef MAP_UNINITIALIZED
24 #define MAP_UNINITIALIZED 0
25 #endif
26 #ifndef MAP_SYNC
27 #define MAP_SYNC 0
28 #endif
29 
30 /*
31  * The historical set of flags that all mmap implementations implicitly
32  * support when a ->mmap_validate() op is not provided in file_operations.
33  */
34 #define LEGACY_MAP_MASK (MAP_SHARED \
35 		| MAP_PRIVATE \
36 		| MAP_FIXED \
37 		| MAP_ANONYMOUS \
38 		| MAP_DENYWRITE \
39 		| MAP_EXECUTABLE \
40 		| MAP_UNINITIALIZED \
41 		| MAP_GROWSDOWN \
42 		| MAP_LOCKED \
43 		| MAP_NORESERVE \
44 		| MAP_POPULATE \
45 		| MAP_NONBLOCK \
46 		| MAP_STACK \
47 		| MAP_HUGETLB \
48 		| MAP_32BIT \
49 		| MAP_HUGE_2MB \
50 		| MAP_HUGE_1GB)
51 
52 extern int sysctl_overcommit_memory;
53 extern int sysctl_overcommit_ratio;
54 extern unsigned long sysctl_overcommit_kbytes;
55 extern struct percpu_counter vm_committed_as;
56 
57 #ifdef CONFIG_SMP
58 extern s32 vm_committed_as_batch;
59 #else
60 #define vm_committed_as_batch 0
61 #endif
62 
63 unsigned long vm_memory_committed(void);
64 
65 static inline void vm_acct_memory(long pages)
66 {
67 	percpu_counter_add_batch(&vm_committed_as, pages, vm_committed_as_batch);
68 }
69 
70 static inline void vm_unacct_memory(long pages)
71 {
72 	vm_acct_memory(-pages);
73 }
74 
75 /*
76  * Allow architectures to handle additional protection bits
77  */
78 
79 #ifndef arch_calc_vm_prot_bits
80 #define arch_calc_vm_prot_bits(prot, pkey) 0
81 #endif
82 
83 #ifndef arch_vm_get_page_prot
84 #define arch_vm_get_page_prot(vm_flags) __pgprot(0)
85 #endif
86 
87 #ifndef arch_validate_prot
88 /*
89  * This is called from mprotect().  PROT_GROWSDOWN and PROT_GROWSUP have
90  * already been masked out.
91  *
92  * Returns true if the prot flags are valid
93  */
94 static inline bool arch_validate_prot(unsigned long prot)
95 {
96 	return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
97 }
98 #define arch_validate_prot arch_validate_prot
99 #endif
100 
101 /*
102  * Optimisation macro.  It is equivalent to:
103  *      (x & bit1) ? bit2 : 0
104  * but this version is faster.
105  * ("bit1" and "bit2" must be single bits)
106  */
107 #define _calc_vm_trans(x, bit1, bit2) \
108   ((!(bit1) || !(bit2)) ? 0 : \
109   ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
110    : ((x) & (bit1)) / ((bit1) / (bit2))))
111 
112 /*
113  * Combine the mmap "prot" argument into "vm_flags" used internally.
114  */
115 static inline unsigned long
116 calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
117 {
118 	return _calc_vm_trans(prot, PROT_READ,  VM_READ ) |
119 	       _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
120 	       _calc_vm_trans(prot, PROT_EXEC,  VM_EXEC) |
121 	       arch_calc_vm_prot_bits(prot, pkey);
122 }
123 
124 /*
125  * Combine the mmap "flags" argument into "vm_flags" used internally.
126  */
127 static inline unsigned long
128 calc_vm_flag_bits(unsigned long flags)
129 {
130 	return _calc_vm_trans(flags, MAP_GROWSDOWN,  VM_GROWSDOWN ) |
131 	       _calc_vm_trans(flags, MAP_DENYWRITE,  VM_DENYWRITE ) |
132 	       _calc_vm_trans(flags, MAP_LOCKED,     VM_LOCKED    ) |
133 	       _calc_vm_trans(flags, MAP_SYNC,	     VM_SYNC      );
134 }
135 
136 unsigned long vm_commit_limit(void);
137 #endif /* _LINUX_MMAN_H */
138