1 #ifndef _LINUX_VMALLOC_H 2 #define _LINUX_VMALLOC_H 3 4 #include <linux/spinlock.h> 5 #include <linux/init.h> 6 #include <asm/page.h> /* pgprot_t */ 7 8 struct vm_area_struct; /* vma defining user mapping in mm_types.h */ 9 10 /* bits in flags of vmalloc's vm_struct below */ 11 #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ 12 #define VM_ALLOC 0x00000002 /* vmalloc() */ 13 #define VM_MAP 0x00000004 /* vmap()ed pages */ 14 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ 15 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ 16 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */ 17 /* bits [20..32] reserved for arch specific ioremap internals */ 18 19 /* 20 * Maximum alignment for ioremap() regions. 21 * Can be overriden by arch-specific value. 22 */ 23 #ifndef IOREMAP_MAX_ORDER 24 #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ 25 #endif 26 27 struct vm_struct { 28 struct vm_struct *next; 29 void *addr; 30 unsigned long size; 31 unsigned long flags; 32 struct page **pages; 33 unsigned int nr_pages; 34 phys_addr_t phys_addr; 35 void *caller; 36 }; 37 38 /* 39 * Highlevel APIs for driver use 40 */ 41 extern void vm_unmap_ram(const void *mem, unsigned int count); 42 extern void *vm_map_ram(struct page **pages, unsigned int count, 43 int node, pgprot_t prot); 44 extern void vm_unmap_aliases(void); 45 46 #ifdef CONFIG_MMU 47 extern void __init vmalloc_init(void); 48 #else 49 static inline void vmalloc_init(void) 50 { 51 } 52 #endif 53 54 extern void *vmalloc(unsigned long size); 55 extern void *vzalloc(unsigned long size); 56 extern void *vmalloc_user(unsigned long size); 57 extern void *vmalloc_node(unsigned long size, int node); 58 extern void *vzalloc_node(unsigned long size, int node); 59 extern void *vmalloc_exec(unsigned long size); 60 extern void *vmalloc_32(unsigned long size); 61 extern void *vmalloc_32_user(unsigned long size); 62 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); 63 extern void *__vmalloc_node_range(unsigned long size, unsigned long align, 64 unsigned long start, unsigned long end, gfp_t gfp_mask, 65 pgprot_t prot, int node, void *caller); 66 extern void vfree(const void *addr); 67 68 extern void *vmap(struct page **pages, unsigned int count, 69 unsigned long flags, pgprot_t prot); 70 extern void vunmap(const void *addr); 71 72 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 73 unsigned long pgoff); 74 void vmalloc_sync_all(void); 75 76 /* 77 * Lowlevel-APIs (not for driver use!) 78 */ 79 80 static inline size_t get_vm_area_size(const struct vm_struct *area) 81 { 82 /* return actual size without guard page */ 83 return area->size - PAGE_SIZE; 84 } 85 86 extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); 87 extern struct vm_struct *get_vm_area_caller(unsigned long size, 88 unsigned long flags, void *caller); 89 extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 90 unsigned long start, unsigned long end); 91 extern struct vm_struct *__get_vm_area_caller(unsigned long size, 92 unsigned long flags, 93 unsigned long start, unsigned long end, 94 void *caller); 95 extern struct vm_struct *remove_vm_area(const void *addr); 96 97 extern int map_vm_area(struct vm_struct *area, pgprot_t prot, 98 struct page ***pages); 99 #ifdef CONFIG_MMU 100 extern int map_kernel_range_noflush(unsigned long start, unsigned long size, 101 pgprot_t prot, struct page **pages); 102 extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); 103 extern void unmap_kernel_range(unsigned long addr, unsigned long size); 104 #else 105 static inline int 106 map_kernel_range_noflush(unsigned long start, unsigned long size, 107 pgprot_t prot, struct page **pages) 108 { 109 return size >> PAGE_SHIFT; 110 } 111 static inline void 112 unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 113 { 114 } 115 static inline void 116 unmap_kernel_range(unsigned long addr, unsigned long size) 117 { 118 } 119 #endif 120 121 /* Allocate/destroy a 'vmalloc' VM area. */ 122 extern struct vm_struct *alloc_vm_area(size_t size); 123 extern void free_vm_area(struct vm_struct *area); 124 125 /* for /dev/kmem */ 126 extern long vread(char *buf, char *addr, unsigned long count); 127 extern long vwrite(char *buf, char *addr, unsigned long count); 128 129 /* 130 * Internals. Dont't use.. 131 */ 132 extern rwlock_t vmlist_lock; 133 extern struct vm_struct *vmlist; 134 extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); 135 136 #ifdef CONFIG_SMP 137 # ifdef CONFIG_MMU 138 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 139 const size_t *sizes, int nr_vms, 140 size_t align); 141 142 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); 143 # else 144 static inline struct vm_struct ** 145 pcpu_get_vm_areas(const unsigned long *offsets, 146 const size_t *sizes, int nr_vms, 147 size_t align) 148 { 149 return NULL; 150 } 151 152 static inline void 153 pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 154 { 155 } 156 # endif 157 #endif 158 159 #endif /* _LINUX_VMALLOC_H */ 160