1 #ifndef _LINUX_HIGHMEM_H 2 #define _LINUX_HIGHMEM_H 3 4 #include <linux/fs.h> 5 #include <linux/mm.h> 6 #include <linux/uaccess.h> 7 8 #include <asm/cacheflush.h> 9 10 #ifndef ARCH_HAS_FLUSH_ANON_PAGE 11 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) 12 { 13 } 14 #endif 15 16 #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 17 static inline void flush_kernel_dcache_page(struct page *page) 18 { 19 } 20 #endif 21 22 #ifdef CONFIG_HIGHMEM 23 24 #include <asm/highmem.h> 25 26 /* declarations for linux/mm/highmem.c */ 27 unsigned int nr_free_highpages(void); 28 extern unsigned long totalhigh_pages; 29 30 void kmap_flush_unused(void); 31 32 #else /* CONFIG_HIGHMEM */ 33 34 static inline unsigned int nr_free_highpages(void) { return 0; } 35 36 #define totalhigh_pages 0 37 38 #ifndef ARCH_HAS_KMAP 39 static inline void *kmap(struct page *page) 40 { 41 might_sleep(); 42 return page_address(page); 43 } 44 45 #define kunmap(page) do { (void) (page); } while (0) 46 47 #include <asm/kmap_types.h> 48 49 static inline void *kmap_atomic(struct page *page, enum km_type idx) 50 { 51 pagefault_disable(); 52 return page_address(page); 53 } 54 #define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx) 55 56 #define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0) 57 #define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) 58 #define kmap_atomic_to_page(ptr) virt_to_page(ptr) 59 60 #define kmap_flush_unused() do {} while(0) 61 #endif 62 63 #endif /* CONFIG_HIGHMEM */ 64 65 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ 66 static inline void clear_user_highpage(struct page *page, unsigned long vaddr) 67 { 68 void *addr = kmap_atomic(page, KM_USER0); 69 clear_user_page(addr, vaddr, page); 70 kunmap_atomic(addr, KM_USER0); 71 } 72 73 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 74 /** 75 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags 76 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE 77 * @vma: The VMA the page is to be allocated for 78 * @vaddr: The virtual address the page will be inserted into 79 * 80 * This function will allocate a page for a VMA but the caller is expected 81 * to specify via movableflags whether the page will be movable in the 82 * future or not 83 * 84 * An architecture may override this function by defining 85 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own 86 * implementation. 87 */ 88 static inline struct page * 89 __alloc_zeroed_user_highpage(gfp_t movableflags, 90 struct vm_area_struct *vma, 91 unsigned long vaddr) 92 { 93 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, 94 vma, vaddr); 95 96 if (page) 97 clear_user_highpage(page, vaddr); 98 99 return page; 100 } 101 #endif 102 103 /** 104 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move 105 * @vma: The VMA the page is to be allocated for 106 * @vaddr: The virtual address the page will be inserted into 107 * 108 * This function will allocate a page for a VMA that the caller knows will 109 * be able to migrate in the future using move_pages() or reclaimed 110 */ 111 static inline struct page * 112 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, 113 unsigned long vaddr) 114 { 115 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); 116 } 117 118 static inline void clear_highpage(struct page *page) 119 { 120 void *kaddr = kmap_atomic(page, KM_USER0); 121 clear_page(kaddr); 122 kunmap_atomic(kaddr, KM_USER0); 123 } 124 125 static inline void zero_user_segments(struct page *page, 126 unsigned start1, unsigned end1, 127 unsigned start2, unsigned end2) 128 { 129 void *kaddr = kmap_atomic(page, KM_USER0); 130 131 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); 132 133 if (end1 > start1) 134 memset(kaddr + start1, 0, end1 - start1); 135 136 if (end2 > start2) 137 memset(kaddr + start2, 0, end2 - start2); 138 139 kunmap_atomic(kaddr, KM_USER0); 140 flush_dcache_page(page); 141 } 142 143 static inline void zero_user_segment(struct page *page, 144 unsigned start, unsigned end) 145 { 146 zero_user_segments(page, start, end, 0, 0); 147 } 148 149 static inline void zero_user(struct page *page, 150 unsigned start, unsigned size) 151 { 152 zero_user_segments(page, start, start + size, 0, 0); 153 } 154 155 static inline void __deprecated memclear_highpage_flush(struct page *page, 156 unsigned int offset, unsigned int size) 157 { 158 zero_user(page, offset, size); 159 } 160 161 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE 162 163 static inline void copy_user_highpage(struct page *to, struct page *from, 164 unsigned long vaddr, struct vm_area_struct *vma) 165 { 166 char *vfrom, *vto; 167 168 vfrom = kmap_atomic(from, KM_USER0); 169 vto = kmap_atomic(to, KM_USER1); 170 copy_user_page(vto, vfrom, vaddr, to); 171 kunmap_atomic(vfrom, KM_USER0); 172 kunmap_atomic(vto, KM_USER1); 173 } 174 175 #endif 176 177 static inline void copy_highpage(struct page *to, struct page *from) 178 { 179 char *vfrom, *vto; 180 181 vfrom = kmap_atomic(from, KM_USER0); 182 vto = kmap_atomic(to, KM_USER1); 183 copy_page(vto, vfrom); 184 kunmap_atomic(vfrom, KM_USER0); 185 kunmap_atomic(vto, KM_USER1); 186 } 187 188 #endif /* _LINUX_HIGHMEM_H */ 189