1 #ifndef _LINUX_RMAP_H 2 #define _LINUX_RMAP_H 3 /* 4 * Declarations for Reverse Mapping functions in mm/rmap.c 5 */ 6 7 #include <linux/list.h> 8 #include <linux/slab.h> 9 #include <linux/mm.h> 10 #include <linux/rwsem.h> 11 #include <linux/memcontrol.h> 12 13 /* 14 * The anon_vma heads a list of private "related" vmas, to scan if 15 * an anonymous page pointing to this anon_vma needs to be unmapped: 16 * the vmas on the list will be related by forking, or by splitting. 17 * 18 * Since vmas come and go as they are split and merged (particularly 19 * in mprotect), the mapping field of an anonymous page cannot point 20 * directly to a vma: instead it points to an anon_vma, on whose list 21 * the related vmas can be easily linked or unlinked. 22 * 23 * After unlinking the last vma on the list, we must garbage collect 24 * the anon_vma object itself: we're guaranteed no page can be 25 * pointing to this anon_vma once its vma list is empty. 26 */ 27 struct anon_vma { 28 struct anon_vma *root; /* Root of this anon_vma tree */ 29 struct rw_semaphore rwsem; /* W: modification, R: walking the list */ 30 /* 31 * The refcount is taken on an anon_vma when there is no 32 * guarantee that the vma of page tables will exist for 33 * the duration of the operation. A caller that takes 34 * the reference is responsible for clearing up the 35 * anon_vma if they are the last user on release 36 */ 37 atomic_t refcount; 38 39 /* 40 * Count of child anon_vmas and VMAs which points to this anon_vma. 41 * 42 * This counter is used for making decision about reusing anon_vma 43 * instead of forking new one. See comments in function anon_vma_clone. 44 */ 45 unsigned degree; 46 47 struct anon_vma *parent; /* Parent of this anon_vma */ 48 49 /* 50 * NOTE: the LSB of the rb_root.rb_node is set by 51 * mm_take_all_locks() _after_ taking the above lock. So the 52 * rb_root must only be read/written after taking the above lock 53 * to be sure to see a valid next pointer. The LSB bit itself 54 * is serialized by a system wide lock only visible to 55 * mm_take_all_locks() (mm_all_locks_mutex). 56 */ 57 struct rb_root rb_root; /* Interval tree of private "related" vmas */ 58 }; 59 60 /* 61 * The copy-on-write semantics of fork mean that an anon_vma 62 * can become associated with multiple processes. Furthermore, 63 * each child process will have its own anon_vma, where new 64 * pages for that process are instantiated. 65 * 66 * This structure allows us to find the anon_vmas associated 67 * with a VMA, or the VMAs associated with an anon_vma. 68 * The "same_vma" list contains the anon_vma_chains linking 69 * all the anon_vmas associated with this VMA. 70 * The "rb" field indexes on an interval tree the anon_vma_chains 71 * which link all the VMAs associated with this anon_vma. 72 */ 73 struct anon_vma_chain { 74 struct vm_area_struct *vma; 75 struct anon_vma *anon_vma; 76 struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ 77 struct rb_node rb; /* locked by anon_vma->rwsem */ 78 unsigned long rb_subtree_last; 79 #ifdef CONFIG_DEBUG_VM_RB 80 unsigned long cached_vma_start, cached_vma_last; 81 #endif 82 }; 83 84 enum ttu_flags { 85 TTU_UNMAP = 1, /* unmap mode */ 86 TTU_MIGRATION = 2, /* migration mode */ 87 TTU_MUNLOCK = 4, /* munlock mode */ 88 89 TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ 90 TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ 91 TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ 92 TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible 93 * and caller guarantees they will 94 * do a final flush if necessary */ 95 }; 96 97 #ifdef CONFIG_MMU 98 static inline void get_anon_vma(struct anon_vma *anon_vma) 99 { 100 atomic_inc(&anon_vma->refcount); 101 } 102 103 void __put_anon_vma(struct anon_vma *anon_vma); 104 105 static inline void put_anon_vma(struct anon_vma *anon_vma) 106 { 107 if (atomic_dec_and_test(&anon_vma->refcount)) 108 __put_anon_vma(anon_vma); 109 } 110 111 static inline void vma_lock_anon_vma(struct vm_area_struct *vma) 112 { 113 struct anon_vma *anon_vma = vma->anon_vma; 114 if (anon_vma) 115 down_write(&anon_vma->root->rwsem); 116 } 117 118 static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) 119 { 120 struct anon_vma *anon_vma = vma->anon_vma; 121 if (anon_vma) 122 up_write(&anon_vma->root->rwsem); 123 } 124 125 static inline void anon_vma_lock_write(struct anon_vma *anon_vma) 126 { 127 down_write(&anon_vma->root->rwsem); 128 } 129 130 static inline void anon_vma_unlock_write(struct anon_vma *anon_vma) 131 { 132 up_write(&anon_vma->root->rwsem); 133 } 134 135 static inline void anon_vma_lock_read(struct anon_vma *anon_vma) 136 { 137 down_read(&anon_vma->root->rwsem); 138 } 139 140 static inline void anon_vma_unlock_read(struct anon_vma *anon_vma) 141 { 142 up_read(&anon_vma->root->rwsem); 143 } 144 145 146 /* 147 * anon_vma helper functions. 148 */ 149 void anon_vma_init(void); /* create anon_vma_cachep */ 150 int anon_vma_prepare(struct vm_area_struct *); 151 void unlink_anon_vmas(struct vm_area_struct *); 152 int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); 153 int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); 154 155 static inline void anon_vma_merge(struct vm_area_struct *vma, 156 struct vm_area_struct *next) 157 { 158 VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); 159 unlink_anon_vmas(next); 160 } 161 162 struct anon_vma *page_get_anon_vma(struct page *page); 163 164 /* 165 * rmap interfaces called when adding or removing pte of page 166 */ 167 void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 168 void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 169 void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, 170 unsigned long, int); 171 void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 172 void page_add_file_rmap(struct page *); 173 void page_remove_rmap(struct page *); 174 175 void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, 176 unsigned long); 177 void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, 178 unsigned long); 179 180 static inline void page_dup_rmap(struct page *page) 181 { 182 atomic_inc(&page->_mapcount); 183 } 184 185 /* 186 * Called from mm/vmscan.c to handle paging out 187 */ 188 int page_referenced(struct page *, int is_locked, 189 struct mem_cgroup *memcg, unsigned long *vm_flags); 190 191 #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) 192 193 int try_to_unmap(struct page *, enum ttu_flags flags); 194 195 /* 196 * Used by uprobes to replace a userspace page safely 197 */ 198 pte_t *__page_check_address(struct page *, struct mm_struct *, 199 unsigned long, spinlock_t **, int); 200 201 static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm, 202 unsigned long address, 203 spinlock_t **ptlp, int sync) 204 { 205 pte_t *ptep; 206 207 __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address, 208 ptlp, sync)); 209 return ptep; 210 } 211 212 /* 213 * Used by swapoff to help locate where page is expected in vma. 214 */ 215 unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); 216 217 /* 218 * Cleans the PTEs of shared mappings. 219 * (and since clean PTEs should also be readonly, write protects them too) 220 * 221 * returns the number of cleaned PTEs. 222 */ 223 int page_mkclean(struct page *); 224 225 /* 226 * called in munlock()/munmap() path to check for other vmas holding 227 * the page mlocked. 228 */ 229 int try_to_munlock(struct page *); 230 231 /* 232 * Called by memory-failure.c to kill processes. 233 */ 234 struct anon_vma *page_lock_anon_vma_read(struct page *page); 235 void page_unlock_anon_vma_read(struct anon_vma *anon_vma); 236 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); 237 238 /* 239 * rmap_walk_control: To control rmap traversing for specific needs 240 * 241 * arg: passed to rmap_one() and invalid_vma() 242 * rmap_one: executed on each vma where page is mapped 243 * done: for checking traversing termination condition 244 * anon_lock: for getting anon_lock by optimized way rather than default 245 * invalid_vma: for skipping uninterested vma 246 */ 247 struct rmap_walk_control { 248 void *arg; 249 int (*rmap_one)(struct page *page, struct vm_area_struct *vma, 250 unsigned long addr, void *arg); 251 int (*done)(struct page *page); 252 struct anon_vma *(*anon_lock)(struct page *page); 253 bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); 254 }; 255 256 int rmap_walk(struct page *page, struct rmap_walk_control *rwc); 257 258 #else /* !CONFIG_MMU */ 259 260 #define anon_vma_init() do {} while (0) 261 #define anon_vma_prepare(vma) (0) 262 #define anon_vma_link(vma) do {} while (0) 263 264 static inline int page_referenced(struct page *page, int is_locked, 265 struct mem_cgroup *memcg, 266 unsigned long *vm_flags) 267 { 268 *vm_flags = 0; 269 return 0; 270 } 271 272 #define try_to_unmap(page, refs) SWAP_FAIL 273 274 static inline int page_mkclean(struct page *page) 275 { 276 return 0; 277 } 278 279 280 #endif /* CONFIG_MMU */ 281 282 /* 283 * Return values of try_to_unmap 284 */ 285 #define SWAP_SUCCESS 0 286 #define SWAP_AGAIN 1 287 #define SWAP_FAIL 2 288 #define SWAP_MLOCK 3 289 290 #endif /* _LINUX_RMAP_H */ 291