1 #ifndef _LINUX_HIGHMEM_H 2 #define _LINUX_HIGHMEM_H 3 4 #include <linux/fs.h> 5 #include <linux/mm.h> 6 #include <linux/uaccess.h> 7 8 #include <asm/cacheflush.h> 9 10 #ifndef ARCH_HAS_FLUSH_ANON_PAGE 11 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) 12 { 13 } 14 #endif 15 16 #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 17 static inline void flush_kernel_dcache_page(struct page *page) 18 { 19 } 20 #endif 21 22 #include <asm/kmap_types.h> 23 24 #if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT) 25 26 void debug_kmap_atomic(enum km_type type); 27 28 #else 29 30 static inline void debug_kmap_atomic(enum km_type type) 31 { 32 } 33 34 #endif 35 36 #ifdef CONFIG_HIGHMEM 37 #include <asm/highmem.h> 38 39 /* declarations for linux/mm/highmem.c */ 40 unsigned int nr_free_highpages(void); 41 extern unsigned long totalhigh_pages; 42 43 void kmap_flush_unused(void); 44 45 #else /* CONFIG_HIGHMEM */ 46 47 static inline unsigned int nr_free_highpages(void) { return 0; } 48 49 #define totalhigh_pages 0 50 51 #ifndef ARCH_HAS_KMAP 52 static inline void *kmap(struct page *page) 53 { 54 might_sleep(); 55 return page_address(page); 56 } 57 58 #define kunmap(page) do { (void) (page); } while (0) 59 60 static inline void *kmap_atomic(struct page *page, enum km_type idx) 61 { 62 pagefault_disable(); 63 return page_address(page); 64 } 65 #define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx) 66 67 #define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0) 68 #define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) 69 #define kmap_atomic_to_page(ptr) virt_to_page(ptr) 70 71 #define kmap_flush_unused() do {} while(0) 72 #endif 73 74 #endif /* CONFIG_HIGHMEM */ 75 76 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ 77 #ifndef clear_user_highpage 78 static inline void clear_user_highpage(struct page *page, unsigned long vaddr) 79 { 80 void *addr = kmap_atomic(page, KM_USER0); 81 clear_user_page(addr, vaddr, page); 82 kunmap_atomic(addr, KM_USER0); 83 } 84 #endif 85 86 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 87 /** 88 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags 89 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE 90 * @vma: The VMA the page is to be allocated for 91 * @vaddr: The virtual address the page will be inserted into 92 * 93 * This function will allocate a page for a VMA but the caller is expected 94 * to specify via movableflags whether the page will be movable in the 95 * future or not 96 * 97 * An architecture may override this function by defining 98 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own 99 * implementation. 100 */ 101 static inline struct page * 102 __alloc_zeroed_user_highpage(gfp_t movableflags, 103 struct vm_area_struct *vma, 104 unsigned long vaddr) 105 { 106 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, 107 vma, vaddr); 108 109 if (page) 110 clear_user_highpage(page, vaddr); 111 112 return page; 113 } 114 #endif 115 116 /** 117 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move 118 * @vma: The VMA the page is to be allocated for 119 * @vaddr: The virtual address the page will be inserted into 120 * 121 * This function will allocate a page for a VMA that the caller knows will 122 * be able to migrate in the future using move_pages() or reclaimed 123 */ 124 static inline struct page * 125 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, 126 unsigned long vaddr) 127 { 128 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); 129 } 130 131 static inline void clear_highpage(struct page *page) 132 { 133 void *kaddr = kmap_atomic(page, KM_USER0); 134 clear_page(kaddr); 135 kunmap_atomic(kaddr, KM_USER0); 136 } 137 138 static inline void zero_user_segments(struct page *page, 139 unsigned start1, unsigned end1, 140 unsigned start2, unsigned end2) 141 { 142 void *kaddr = kmap_atomic(page, KM_USER0); 143 144 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); 145 146 if (end1 > start1) 147 memset(kaddr + start1, 0, end1 - start1); 148 149 if (end2 > start2) 150 memset(kaddr + start2, 0, end2 - start2); 151 152 kunmap_atomic(kaddr, KM_USER0); 153 flush_dcache_page(page); 154 } 155 156 static inline void zero_user_segment(struct page *page, 157 unsigned start, unsigned end) 158 { 159 zero_user_segments(page, start, end, 0, 0); 160 } 161 162 static inline void zero_user(struct page *page, 163 unsigned start, unsigned size) 164 { 165 zero_user_segments(page, start, start + size, 0, 0); 166 } 167 168 static inline void __deprecated memclear_highpage_flush(struct page *page, 169 unsigned int offset, unsigned int size) 170 { 171 zero_user(page, offset, size); 172 } 173 174 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE 175 176 static inline void copy_user_highpage(struct page *to, struct page *from, 177 unsigned long vaddr, struct vm_area_struct *vma) 178 { 179 char *vfrom, *vto; 180 181 vfrom = kmap_atomic(from, KM_USER0); 182 vto = kmap_atomic(to, KM_USER1); 183 copy_user_page(vto, vfrom, vaddr, to); 184 kunmap_atomic(vfrom, KM_USER0); 185 kunmap_atomic(vto, KM_USER1); 186 } 187 188 #endif 189 190 static inline void copy_highpage(struct page *to, struct page *from) 191 { 192 char *vfrom, *vto; 193 194 vfrom = kmap_atomic(from, KM_USER0); 195 vto = kmap_atomic(to, KM_USER1); 196 copy_page(vto, vfrom); 197 kunmap_atomic(vfrom, KM_USER0); 198 kunmap_atomic(vto, KM_USER1); 199 } 200 201 #endif /* _LINUX_HIGHMEM_H */ 202