1 #ifndef _LINUX_HIGHMEM_H 2 #define _LINUX_HIGHMEM_H 3 4 #include <linux/fs.h> 5 #include <linux/mm.h> 6 #include <linux/uaccess.h> 7 8 #include <asm/cacheflush.h> 9 10 #ifndef ARCH_HAS_FLUSH_ANON_PAGE 11 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) 12 { 13 } 14 #endif 15 16 #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 17 static inline void flush_kernel_dcache_page(struct page *page) 18 { 19 } 20 #endif 21 22 #ifdef CONFIG_HIGHMEM 23 24 #include <asm/highmem.h> 25 26 /* declarations for linux/mm/highmem.c */ 27 unsigned int nr_free_highpages(void); 28 extern unsigned long totalhigh_pages; 29 30 void kmap_flush_unused(void); 31 32 #else /* CONFIG_HIGHMEM */ 33 34 static inline unsigned int nr_free_highpages(void) { return 0; } 35 36 #define totalhigh_pages 0 37 38 #ifndef ARCH_HAS_KMAP 39 static inline void *kmap(struct page *page) 40 { 41 might_sleep(); 42 return page_address(page); 43 } 44 45 #define kunmap(page) do { (void) (page); } while (0) 46 47 #include <asm/kmap_types.h> 48 49 static inline void *kmap_atomic(struct page *page, enum km_type idx) 50 { 51 pagefault_disable(); 52 return page_address(page); 53 } 54 #define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx) 55 56 #define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0) 57 #define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) 58 #define kmap_atomic_to_page(ptr) virt_to_page(ptr) 59 60 #define kmap_flush_unused() do {} while(0) 61 #endif 62 63 #endif /* CONFIG_HIGHMEM */ 64 65 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ 66 #ifndef clear_user_highpage 67 static inline void clear_user_highpage(struct page *page, unsigned long vaddr) 68 { 69 void *addr = kmap_atomic(page, KM_USER0); 70 clear_user_page(addr, vaddr, page); 71 kunmap_atomic(addr, KM_USER0); 72 } 73 #endif 74 75 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 76 /** 77 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags 78 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE 79 * @vma: The VMA the page is to be allocated for 80 * @vaddr: The virtual address the page will be inserted into 81 * 82 * This function will allocate a page for a VMA but the caller is expected 83 * to specify via movableflags whether the page will be movable in the 84 * future or not 85 * 86 * An architecture may override this function by defining 87 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own 88 * implementation. 89 */ 90 static inline struct page * 91 __alloc_zeroed_user_highpage(gfp_t movableflags, 92 struct vm_area_struct *vma, 93 unsigned long vaddr) 94 { 95 struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags, 96 vma, vaddr); 97 98 if (page) 99 clear_user_highpage(page, vaddr); 100 101 return page; 102 } 103 #endif 104 105 /** 106 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move 107 * @vma: The VMA the page is to be allocated for 108 * @vaddr: The virtual address the page will be inserted into 109 * 110 * This function will allocate a page for a VMA that the caller knows will 111 * be able to migrate in the future using move_pages() or reclaimed 112 */ 113 static inline struct page * 114 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, 115 unsigned long vaddr) 116 { 117 return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); 118 } 119 120 static inline void clear_highpage(struct page *page) 121 { 122 void *kaddr = kmap_atomic(page, KM_USER0); 123 clear_page(kaddr); 124 kunmap_atomic(kaddr, KM_USER0); 125 } 126 127 static inline void zero_user_segments(struct page *page, 128 unsigned start1, unsigned end1, 129 unsigned start2, unsigned end2) 130 { 131 void *kaddr = kmap_atomic(page, KM_USER0); 132 133 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); 134 135 if (end1 > start1) 136 memset(kaddr + start1, 0, end1 - start1); 137 138 if (end2 > start2) 139 memset(kaddr + start2, 0, end2 - start2); 140 141 kunmap_atomic(kaddr, KM_USER0); 142 flush_dcache_page(page); 143 } 144 145 static inline void zero_user_segment(struct page *page, 146 unsigned start, unsigned end) 147 { 148 zero_user_segments(page, start, end, 0, 0); 149 } 150 151 static inline void zero_user(struct page *page, 152 unsigned start, unsigned size) 153 { 154 zero_user_segments(page, start, start + size, 0, 0); 155 } 156 157 static inline void __deprecated memclear_highpage_flush(struct page *page, 158 unsigned int offset, unsigned int size) 159 { 160 zero_user(page, offset, size); 161 } 162 163 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE 164 165 static inline void copy_user_highpage(struct page *to, struct page *from, 166 unsigned long vaddr, struct vm_area_struct *vma) 167 { 168 char *vfrom, *vto; 169 170 vfrom = kmap_atomic(from, KM_USER0); 171 vto = kmap_atomic(to, KM_USER1); 172 copy_user_page(vto, vfrom, vaddr, to); 173 kunmap_atomic(vfrom, KM_USER0); 174 kunmap_atomic(vto, KM_USER1); 175 } 176 177 #endif 178 179 static inline void copy_highpage(struct page *to, struct page *from) 180 { 181 char *vfrom, *vto; 182 183 vfrom = kmap_atomic(from, KM_USER0); 184 vto = kmap_atomic(to, KM_USER1); 185 copy_page(vto, vfrom); 186 kunmap_atomic(vfrom, KM_USER0); 187 kunmap_atomic(vto, KM_USER1); 188 } 189 190 #endif /* _LINUX_HIGHMEM_H */ 191