1 #ifndef _LINUX_RMAP_H 2 #define _LINUX_RMAP_H 3 /* 4 * Declarations for Reverse Mapping functions in mm/rmap.c 5 */ 6 7 #include <linux/list.h> 8 #include <linux/slab.h> 9 #include <linux/mm.h> 10 #include <linux/spinlock.h> 11 #include <linux/memcontrol.h> 12 13 /* 14 * The anon_vma heads a list of private "related" vmas, to scan if 15 * an anonymous page pointing to this anon_vma needs to be unmapped: 16 * the vmas on the list will be related by forking, or by splitting. 17 * 18 * Since vmas come and go as they are split and merged (particularly 19 * in mprotect), the mapping field of an anonymous page cannot point 20 * directly to a vma: instead it points to an anon_vma, on whose list 21 * the related vmas can be easily linked or unlinked. 22 * 23 * After unlinking the last vma on the list, we must garbage collect 24 * the anon_vma object itself: we're guaranteed no page can be 25 * pointing to this anon_vma once its vma list is empty. 26 */ 27 struct anon_vma { 28 spinlock_t lock; /* Serialize access to vma list */ 29 /* 30 * NOTE: the LSB of the head.next is set by 31 * mm_take_all_locks() _after_ taking the above lock. So the 32 * head must only be read/written after taking the above lock 33 * to be sure to see a valid next pointer. The LSB bit itself 34 * is serialized by a system wide lock only visible to 35 * mm_take_all_locks() (mm_all_locks_mutex). 36 */ 37 struct list_head head; /* List of private "related" vmas */ 38 }; 39 40 #ifdef CONFIG_MMU 41 42 static inline void anon_vma_lock(struct vm_area_struct *vma) 43 { 44 struct anon_vma *anon_vma = vma->anon_vma; 45 if (anon_vma) 46 spin_lock(&anon_vma->lock); 47 } 48 49 static inline void anon_vma_unlock(struct vm_area_struct *vma) 50 { 51 struct anon_vma *anon_vma = vma->anon_vma; 52 if (anon_vma) 53 spin_unlock(&anon_vma->lock); 54 } 55 56 /* 57 * anon_vma helper functions. 58 */ 59 void anon_vma_init(void); /* create anon_vma_cachep */ 60 int anon_vma_prepare(struct vm_area_struct *); 61 void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *); 62 void anon_vma_unlink(struct vm_area_struct *); 63 void anon_vma_link(struct vm_area_struct *); 64 void __anon_vma_link(struct vm_area_struct *); 65 66 /* 67 * rmap interfaces called when adding or removing pte of page 68 */ 69 void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 70 void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 71 void page_add_file_rmap(struct page *); 72 void page_remove_rmap(struct page *); 73 74 #ifdef CONFIG_DEBUG_VM 75 void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address); 76 #else 77 static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) 78 { 79 atomic_inc(&page->_mapcount); 80 } 81 #endif 82 83 /* 84 * Called from mm/vmscan.c to handle paging out 85 */ 86 int page_referenced(struct page *, int is_locked, 87 struct mem_cgroup *cnt, unsigned long *vm_flags); 88 int try_to_unmap(struct page *, int ignore_refs); 89 90 /* 91 * Called from mm/filemap_xip.c to unmap empty zero page 92 */ 93 pte_t *page_check_address(struct page *, struct mm_struct *, 94 unsigned long, spinlock_t **, int); 95 96 /* 97 * Used by swapoff to help locate where page is expected in vma. 98 */ 99 unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); 100 101 /* 102 * Cleans the PTEs of shared mappings. 103 * (and since clean PTEs should also be readonly, write protects them too) 104 * 105 * returns the number of cleaned PTEs. 106 */ 107 int page_mkclean(struct page *); 108 109 /* 110 * called in munlock()/munmap() path to check for other vmas holding 111 * the page mlocked. 112 */ 113 int try_to_munlock(struct page *); 114 115 #else /* !CONFIG_MMU */ 116 117 #define anon_vma_init() do {} while (0) 118 #define anon_vma_prepare(vma) (0) 119 #define anon_vma_link(vma) do {} while (0) 120 121 static inline int page_referenced(struct page *page, int is_locked, 122 struct mem_cgroup *cnt, 123 unsigned long *vm_flags) 124 { 125 *vm_flags = 0; 126 return TestClearPageReferenced(page); 127 } 128 129 #define try_to_unmap(page, refs) SWAP_FAIL 130 131 static inline int page_mkclean(struct page *page) 132 { 133 return 0; 134 } 135 136 137 #endif /* CONFIG_MMU */ 138 139 /* 140 * Return values of try_to_unmap 141 */ 142 #define SWAP_SUCCESS 0 143 #define SWAP_AGAIN 1 144 #define SWAP_FAIL 2 145 #define SWAP_MLOCK 3 146 147 #endif /* _LINUX_RMAP_H */ 148