1 #ifndef _LINUX_RMAP_H 2 #define _LINUX_RMAP_H 3 /* 4 * Declarations for Reverse Mapping functions in mm/rmap.c 5 */ 6 7 #include <linux/list.h> 8 #include <linux/slab.h> 9 #include <linux/mm.h> 10 #include <linux/spinlock.h> 11 #include <linux/memcontrol.h> 12 13 /* 14 * The anon_vma heads a list of private "related" vmas, to scan if 15 * an anonymous page pointing to this anon_vma needs to be unmapped: 16 * the vmas on the list will be related by forking, or by splitting. 17 * 18 * Since vmas come and go as they are split and merged (particularly 19 * in mprotect), the mapping field of an anonymous page cannot point 20 * directly to a vma: instead it points to an anon_vma, on whose list 21 * the related vmas can be easily linked or unlinked. 22 * 23 * After unlinking the last vma on the list, we must garbage collect 24 * the anon_vma object itself: we're guaranteed no page can be 25 * pointing to this anon_vma once its vma list is empty. 26 */ 27 struct anon_vma { 28 spinlock_t lock; /* Serialize access to vma list */ 29 #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) 30 31 /* 32 * The external_refcount is taken by either KSM or page migration 33 * to take a reference to an anon_vma when there is no 34 * guarantee that the vma of page tables will exist for 35 * the duration of the operation. A caller that takes 36 * the reference is responsible for clearing up the 37 * anon_vma if they are the last user on release 38 */ 39 atomic_t external_refcount; 40 #endif 41 /* 42 * NOTE: the LSB of the head.next is set by 43 * mm_take_all_locks() _after_ taking the above lock. So the 44 * head must only be read/written after taking the above lock 45 * to be sure to see a valid next pointer. The LSB bit itself 46 * is serialized by a system wide lock only visible to 47 * mm_take_all_locks() (mm_all_locks_mutex). 48 */ 49 struct list_head head; /* Chain of private "related" vmas */ 50 }; 51 52 /* 53 * The copy-on-write semantics of fork mean that an anon_vma 54 * can become associated with multiple processes. Furthermore, 55 * each child process will have its own anon_vma, where new 56 * pages for that process are instantiated. 57 * 58 * This structure allows us to find the anon_vmas associated 59 * with a VMA, or the VMAs associated with an anon_vma. 60 * The "same_vma" list contains the anon_vma_chains linking 61 * all the anon_vmas associated with this VMA. 62 * The "same_anon_vma" list contains the anon_vma_chains 63 * which link all the VMAs associated with this anon_vma. 64 */ 65 struct anon_vma_chain { 66 struct vm_area_struct *vma; 67 struct anon_vma *anon_vma; 68 struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ 69 struct list_head same_anon_vma; /* locked by anon_vma->lock */ 70 }; 71 72 #ifdef CONFIG_MMU 73 #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) 74 static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma) 75 { 76 atomic_set(&anon_vma->external_refcount, 0); 77 } 78 79 static inline int anonvma_external_refcount(struct anon_vma *anon_vma) 80 { 81 return atomic_read(&anon_vma->external_refcount); 82 } 83 #else 84 static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma) 85 { 86 } 87 88 static inline int anonvma_external_refcount(struct anon_vma *anon_vma) 89 { 90 return 0; 91 } 92 #endif /* CONFIG_KSM */ 93 94 static inline struct anon_vma *page_anon_vma(struct page *page) 95 { 96 if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 97 PAGE_MAPPING_ANON) 98 return NULL; 99 return page_rmapping(page); 100 } 101 102 static inline void anon_vma_lock(struct vm_area_struct *vma) 103 { 104 struct anon_vma *anon_vma = vma->anon_vma; 105 if (anon_vma) 106 spin_lock(&anon_vma->lock); 107 } 108 109 static inline void anon_vma_unlock(struct vm_area_struct *vma) 110 { 111 struct anon_vma *anon_vma = vma->anon_vma; 112 if (anon_vma) 113 spin_unlock(&anon_vma->lock); 114 } 115 116 /* 117 * anon_vma helper functions. 118 */ 119 void anon_vma_init(void); /* create anon_vma_cachep */ 120 int anon_vma_prepare(struct vm_area_struct *); 121 void unlink_anon_vmas(struct vm_area_struct *); 122 int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); 123 int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); 124 void __anon_vma_link(struct vm_area_struct *); 125 void anon_vma_free(struct anon_vma *); 126 127 static inline void anon_vma_merge(struct vm_area_struct *vma, 128 struct vm_area_struct *next) 129 { 130 VM_BUG_ON(vma->anon_vma != next->anon_vma); 131 unlink_anon_vmas(next); 132 } 133 134 /* 135 * rmap interfaces called when adding or removing pte of page 136 */ 137 void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 138 void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 139 void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 140 void page_add_file_rmap(struct page *); 141 void page_remove_rmap(struct page *); 142 143 static inline void page_dup_rmap(struct page *page) 144 { 145 atomic_inc(&page->_mapcount); 146 } 147 148 /* 149 * Called from mm/vmscan.c to handle paging out 150 */ 151 int page_referenced(struct page *, int is_locked, 152 struct mem_cgroup *cnt, unsigned long *vm_flags); 153 int page_referenced_one(struct page *, struct vm_area_struct *, 154 unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); 155 156 enum ttu_flags { 157 TTU_UNMAP = 0, /* unmap mode */ 158 TTU_MIGRATION = 1, /* migration mode */ 159 TTU_MUNLOCK = 2, /* munlock mode */ 160 TTU_ACTION_MASK = 0xff, 161 162 TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */ 163 TTU_IGNORE_ACCESS = (1 << 9), /* don't age */ 164 TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */ 165 }; 166 #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) 167 168 int try_to_unmap(struct page *, enum ttu_flags flags); 169 int try_to_unmap_one(struct page *, struct vm_area_struct *, 170 unsigned long address, enum ttu_flags flags); 171 172 /* 173 * Called from mm/filemap_xip.c to unmap empty zero page 174 */ 175 pte_t *page_check_address(struct page *, struct mm_struct *, 176 unsigned long, spinlock_t **, int); 177 178 /* 179 * Used by swapoff to help locate where page is expected in vma. 180 */ 181 unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); 182 183 /* 184 * Cleans the PTEs of shared mappings. 185 * (and since clean PTEs should also be readonly, write protects them too) 186 * 187 * returns the number of cleaned PTEs. 188 */ 189 int page_mkclean(struct page *); 190 191 /* 192 * called in munlock()/munmap() path to check for other vmas holding 193 * the page mlocked. 194 */ 195 int try_to_munlock(struct page *); 196 197 /* 198 * Called by memory-failure.c to kill processes. 199 */ 200 struct anon_vma *page_lock_anon_vma(struct page *page); 201 void page_unlock_anon_vma(struct anon_vma *anon_vma); 202 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); 203 204 /* 205 * Called by migrate.c to remove migration ptes, but might be used more later. 206 */ 207 int rmap_walk(struct page *page, int (*rmap_one)(struct page *, 208 struct vm_area_struct *, unsigned long, void *), void *arg); 209 210 #else /* !CONFIG_MMU */ 211 212 #define anon_vma_init() do {} while (0) 213 #define anon_vma_prepare(vma) (0) 214 #define anon_vma_link(vma) do {} while (0) 215 216 static inline int page_referenced(struct page *page, int is_locked, 217 struct mem_cgroup *cnt, 218 unsigned long *vm_flags) 219 { 220 *vm_flags = 0; 221 return 0; 222 } 223 224 #define try_to_unmap(page, refs) SWAP_FAIL 225 226 static inline int page_mkclean(struct page *page) 227 { 228 return 0; 229 } 230 231 232 #endif /* CONFIG_MMU */ 233 234 /* 235 * Return values of try_to_unmap 236 */ 237 #define SWAP_SUCCESS 0 238 #define SWAP_AGAIN 1 239 #define SWAP_FAIL 2 240 #define SWAP_MLOCK 3 241 242 #endif /* _LINUX_RMAP_H */ 243