1 #ifndef _LINUX_PAGE_REF_H 2 #define _LINUX_PAGE_REF_H 3 4 #include <linux/atomic.h> 5 #include <linux/mm_types.h> 6 #include <linux/page-flags.h> 7 #include <linux/tracepoint-defs.h> 8 9 extern struct tracepoint __tracepoint_page_ref_set; 10 extern struct tracepoint __tracepoint_page_ref_mod; 11 extern struct tracepoint __tracepoint_page_ref_mod_and_test; 12 extern struct tracepoint __tracepoint_page_ref_mod_and_return; 13 extern struct tracepoint __tracepoint_page_ref_mod_unless; 14 extern struct tracepoint __tracepoint_page_ref_freeze; 15 extern struct tracepoint __tracepoint_page_ref_unfreeze; 16 17 #ifdef CONFIG_DEBUG_PAGE_REF 18 19 /* 20 * Ideally we would want to use the trace_<tracepoint>_enabled() helper 21 * functions. But due to include header file issues, that is not 22 * feasible. Instead we have to open code the static key functions. 23 * 24 * See trace_##name##_enabled(void) in include/linux/tracepoint.h 25 */ 26 #define page_ref_tracepoint_active(t) static_key_false(&(t).key) 27 28 extern void __page_ref_set(struct page *page, int v); 29 extern void __page_ref_mod(struct page *page, int v); 30 extern void __page_ref_mod_and_test(struct page *page, int v, int ret); 31 extern void __page_ref_mod_and_return(struct page *page, int v, int ret); 32 extern void __page_ref_mod_unless(struct page *page, int v, int u); 33 extern void __page_ref_freeze(struct page *page, int v, int ret); 34 extern void __page_ref_unfreeze(struct page *page, int v); 35 36 #else 37 38 #define page_ref_tracepoint_active(t) false 39 40 static inline void __page_ref_set(struct page *page, int v) 41 { 42 } 43 static inline void __page_ref_mod(struct page *page, int v) 44 { 45 } 46 static inline void __page_ref_mod_and_test(struct page *page, int v, int ret) 47 { 48 } 49 static inline void __page_ref_mod_and_return(struct page *page, int v, int ret) 50 { 51 } 52 static inline void __page_ref_mod_unless(struct page *page, int v, int u) 53 { 54 } 55 static inline void __page_ref_freeze(struct page *page, int v, int ret) 56 { 57 } 58 static inline void __page_ref_unfreeze(struct page *page, int v) 59 { 60 } 61 62 #endif 63 64 static inline int page_ref_count(struct page *page) 65 { 66 return atomic_read(&page->_refcount); 67 } 68 69 static inline int page_count(struct page *page) 70 { 71 return atomic_read(&compound_head(page)->_refcount); 72 } 73 74 static inline void set_page_count(struct page *page, int v) 75 { 76 atomic_set(&page->_refcount, v); 77 if (page_ref_tracepoint_active(__tracepoint_page_ref_set)) 78 __page_ref_set(page, v); 79 } 80 81 /* 82 * Setup the page count before being freed into the page allocator for 83 * the first time (boot or memory hotplug) 84 */ 85 static inline void init_page_count(struct page *page) 86 { 87 set_page_count(page, 1); 88 } 89 90 static inline void page_ref_add(struct page *page, int nr) 91 { 92 atomic_add(nr, &page->_refcount); 93 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) 94 __page_ref_mod(page, nr); 95 } 96 97 static inline void page_ref_sub(struct page *page, int nr) 98 { 99 atomic_sub(nr, &page->_refcount); 100 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) 101 __page_ref_mod(page, -nr); 102 } 103 104 static inline void page_ref_inc(struct page *page) 105 { 106 atomic_inc(&page->_refcount); 107 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) 108 __page_ref_mod(page, 1); 109 } 110 111 static inline void page_ref_dec(struct page *page) 112 { 113 atomic_dec(&page->_refcount); 114 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod)) 115 __page_ref_mod(page, -1); 116 } 117 118 static inline int page_ref_sub_and_test(struct page *page, int nr) 119 { 120 int ret = atomic_sub_and_test(nr, &page->_refcount); 121 122 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) 123 __page_ref_mod_and_test(page, -nr, ret); 124 return ret; 125 } 126 127 static inline int page_ref_inc_return(struct page *page) 128 { 129 int ret = atomic_inc_return(&page->_refcount); 130 131 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) 132 __page_ref_mod_and_return(page, 1, ret); 133 return ret; 134 } 135 136 static inline int page_ref_dec_and_test(struct page *page) 137 { 138 int ret = atomic_dec_and_test(&page->_refcount); 139 140 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test)) 141 __page_ref_mod_and_test(page, -1, ret); 142 return ret; 143 } 144 145 static inline int page_ref_dec_return(struct page *page) 146 { 147 int ret = atomic_dec_return(&page->_refcount); 148 149 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return)) 150 __page_ref_mod_and_return(page, -1, ret); 151 return ret; 152 } 153 154 static inline int page_ref_add_unless(struct page *page, int nr, int u) 155 { 156 int ret = atomic_add_unless(&page->_refcount, nr, u); 157 158 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless)) 159 __page_ref_mod_unless(page, nr, ret); 160 return ret; 161 } 162 163 static inline int page_ref_freeze(struct page *page, int count) 164 { 165 int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count); 166 167 if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze)) 168 __page_ref_freeze(page, count, ret); 169 return ret; 170 } 171 172 static inline void page_ref_unfreeze(struct page *page, int count) 173 { 174 VM_BUG_ON_PAGE(page_count(page) != 0, page); 175 VM_BUG_ON(count == 0); 176 177 smp_mb(); 178 atomic_set(&page->_refcount, count); 179 if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze)) 180 __page_ref_unfreeze(page, count); 181 } 182 183 #endif 184