1 #ifndef _LINUX_VMALLOC_H 2 #define _LINUX_VMALLOC_H 3 4 #include <linux/spinlock.h> 5 #include <linux/init.h> 6 #include <linux/list.h> 7 #include <asm/page.h> /* pgprot_t */ 8 #include <linux/rbtree.h> 9 10 struct vm_area_struct; /* vma defining user mapping in mm_types.h */ 11 12 /* bits in flags of vmalloc's vm_struct below */ 13 #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ 14 #define VM_ALLOC 0x00000002 /* vmalloc() */ 15 #define VM_MAP 0x00000004 /* vmap()ed pages */ 16 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ 17 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ 18 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */ 19 /* bits [20..32] reserved for arch specific ioremap internals */ 20 21 /* 22 * Maximum alignment for ioremap() regions. 23 * Can be overriden by arch-specific value. 24 */ 25 #ifndef IOREMAP_MAX_ORDER 26 #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ 27 #endif 28 29 struct vm_struct { 30 struct vm_struct *next; 31 void *addr; 32 unsigned long size; 33 unsigned long flags; 34 struct page **pages; 35 unsigned int nr_pages; 36 phys_addr_t phys_addr; 37 const void *caller; 38 }; 39 40 struct vmap_area { 41 unsigned long va_start; 42 unsigned long va_end; 43 unsigned long flags; 44 struct rb_node rb_node; /* address sorted rbtree */ 45 struct list_head list; /* address sorted list */ 46 struct list_head purge_list; /* "lazy purge" list */ 47 struct vm_struct *vm; 48 struct rcu_head rcu_head; 49 }; 50 51 /* 52 * Highlevel APIs for driver use 53 */ 54 extern void vm_unmap_ram(const void *mem, unsigned int count); 55 extern void *vm_map_ram(struct page **pages, unsigned int count, 56 int node, pgprot_t prot); 57 extern void vm_unmap_aliases(void); 58 59 #ifdef CONFIG_MMU 60 extern void __init vmalloc_init(void); 61 #else 62 static inline void vmalloc_init(void) 63 { 64 } 65 #endif 66 67 extern void *vmalloc(unsigned long size); 68 extern void *vzalloc(unsigned long size); 69 extern void *vmalloc_user(unsigned long size); 70 extern void *vmalloc_node(unsigned long size, int node); 71 extern void *vzalloc_node(unsigned long size, int node); 72 extern void *vmalloc_exec(unsigned long size); 73 extern void *vmalloc_32(unsigned long size); 74 extern void *vmalloc_32_user(unsigned long size); 75 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); 76 extern void *__vmalloc_node_range(unsigned long size, unsigned long align, 77 unsigned long start, unsigned long end, gfp_t gfp_mask, 78 pgprot_t prot, int node, const void *caller); 79 extern void vfree(const void *addr); 80 81 extern void *vmap(struct page **pages, unsigned int count, 82 unsigned long flags, pgprot_t prot); 83 extern void vunmap(const void *addr); 84 85 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 86 unsigned long pgoff); 87 void vmalloc_sync_all(void); 88 89 /* 90 * Lowlevel-APIs (not for driver use!) 91 */ 92 93 static inline size_t get_vm_area_size(const struct vm_struct *area) 94 { 95 /* return actual size without guard page */ 96 return area->size - PAGE_SIZE; 97 } 98 99 extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); 100 extern struct vm_struct *get_vm_area_caller(unsigned long size, 101 unsigned long flags, const void *caller); 102 extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 103 unsigned long start, unsigned long end); 104 extern struct vm_struct *__get_vm_area_caller(unsigned long size, 105 unsigned long flags, 106 unsigned long start, unsigned long end, 107 const void *caller); 108 extern struct vm_struct *remove_vm_area(const void *addr); 109 extern struct vm_struct *find_vm_area(const void *addr); 110 111 extern int map_vm_area(struct vm_struct *area, pgprot_t prot, 112 struct page ***pages); 113 #ifdef CONFIG_MMU 114 extern int map_kernel_range_noflush(unsigned long start, unsigned long size, 115 pgprot_t prot, struct page **pages); 116 extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); 117 extern void unmap_kernel_range(unsigned long addr, unsigned long size); 118 #else 119 static inline int 120 map_kernel_range_noflush(unsigned long start, unsigned long size, 121 pgprot_t prot, struct page **pages) 122 { 123 return size >> PAGE_SHIFT; 124 } 125 static inline void 126 unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 127 { 128 } 129 static inline void 130 unmap_kernel_range(unsigned long addr, unsigned long size) 131 { 132 } 133 #endif 134 135 /* Allocate/destroy a 'vmalloc' VM area. */ 136 extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes); 137 extern void free_vm_area(struct vm_struct *area); 138 139 /* for /dev/kmem */ 140 extern long vread(char *buf, char *addr, unsigned long count); 141 extern long vwrite(char *buf, char *addr, unsigned long count); 142 143 /* 144 * Internals. Dont't use.. 145 */ 146 extern struct list_head vmap_area_list; 147 extern __init void vm_area_add_early(struct vm_struct *vm); 148 extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); 149 150 #ifdef CONFIG_SMP 151 # ifdef CONFIG_MMU 152 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 153 const size_t *sizes, int nr_vms, 154 size_t align); 155 156 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); 157 # else 158 static inline struct vm_struct ** 159 pcpu_get_vm_areas(const unsigned long *offsets, 160 const size_t *sizes, int nr_vms, 161 size_t align) 162 { 163 return NULL; 164 } 165 166 static inline void 167 pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 168 { 169 } 170 # endif 171 #endif 172 173 struct vmalloc_info { 174 unsigned long used; 175 unsigned long largest_chunk; 176 }; 177 178 #ifdef CONFIG_MMU 179 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) 180 extern void get_vmalloc_info(struct vmalloc_info *vmi); 181 #else 182 183 #define VMALLOC_TOTAL 0UL 184 #define get_vmalloc_info(vmi) \ 185 do { \ 186 (vmi)->used = 0; \ 187 (vmi)->largest_chunk = 0; \ 188 } while (0) 189 #endif 190 191 #endif /* _LINUX_VMALLOC_H */ 192