1 #ifndef _LINUX_MEMBLOCK_H 2 #define _LINUX_MEMBLOCK_H 3 #ifdef __KERNEL__ 4 5 #ifdef CONFIG_HAVE_MEMBLOCK 6 /* 7 * Logical memory blocks. 8 * 9 * Copyright (C) 2001 Peter Bergner, IBM Corp. 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 */ 16 17 #include <linux/init.h> 18 #include <linux/mm.h> 19 20 #include <asm/memblock.h> 21 22 #define INIT_MEMBLOCK_REGIONS 128 23 #define MEMBLOCK_ERROR 0 24 25 struct memblock_region { 26 phys_addr_t base; 27 phys_addr_t size; 28 }; 29 30 struct memblock_type { 31 unsigned long cnt; /* number of regions */ 32 unsigned long max; /* size of the allocated array */ 33 struct memblock_region *regions; 34 }; 35 36 struct memblock { 37 phys_addr_t current_limit; 38 phys_addr_t memory_size; /* Updated by memblock_analyze() */ 39 struct memblock_type memory; 40 struct memblock_type reserved; 41 }; 42 43 extern struct memblock memblock; 44 extern int memblock_debug; 45 extern int memblock_can_resize; 46 47 #define memblock_dbg(fmt, ...) \ 48 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) 49 50 u64 memblock_find_in_range(u64 start, u64 end, u64 size, u64 align); 51 int memblock_free_reserved_regions(void); 52 int memblock_reserve_reserved_regions(void); 53 54 extern void memblock_init(void); 55 extern void memblock_analyze(void); 56 extern long memblock_add(phys_addr_t base, phys_addr_t size); 57 extern long memblock_remove(phys_addr_t base, phys_addr_t size); 58 extern long memblock_free(phys_addr_t base, phys_addr_t size); 59 extern long memblock_reserve(phys_addr_t base, phys_addr_t size); 60 61 /* The numa aware allocator is only available if 62 * CONFIG_ARCH_POPULATES_NODE_MAP is set 63 */ 64 extern phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, 65 int nid); 66 extern phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, 67 int nid); 68 69 extern phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); 70 71 /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ 72 #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) 73 #define MEMBLOCK_ALLOC_ACCESSIBLE 0 74 75 extern phys_addr_t memblock_alloc_base(phys_addr_t size, 76 phys_addr_t align, 77 phys_addr_t max_addr); 78 extern phys_addr_t __memblock_alloc_base(phys_addr_t size, 79 phys_addr_t align, 80 phys_addr_t max_addr); 81 extern phys_addr_t memblock_phys_mem_size(void); 82 extern phys_addr_t memblock_end_of_DRAM(void); 83 extern void memblock_enforce_memory_limit(phys_addr_t memory_limit); 84 extern int memblock_is_memory(phys_addr_t addr); 85 extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); 86 extern int memblock_is_reserved(phys_addr_t addr); 87 extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); 88 89 extern void memblock_dump_all(void); 90 91 /* Provided by the architecture */ 92 extern phys_addr_t memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid); 93 extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, 94 phys_addr_t addr2, phys_addr_t size2); 95 96 /** 97 * memblock_set_current_limit - Set the current allocation limit to allow 98 * limiting allocations to what is currently 99 * accessible during boot 100 * @limit: New limit value (physical address) 101 */ 102 extern void memblock_set_current_limit(phys_addr_t limit); 103 104 105 /* 106 * pfn conversion functions 107 * 108 * While the memory MEMBLOCKs should always be page aligned, the reserved 109 * MEMBLOCKs may not be. This accessor attempt to provide a very clear 110 * idea of what they return for such non aligned MEMBLOCKs. 111 */ 112 113 /** 114 * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region 115 * @reg: memblock_region structure 116 */ 117 static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg) 118 { 119 return PFN_UP(reg->base); 120 } 121 122 /** 123 * memblock_region_memory_end_pfn - Return the end_pfn this region 124 * @reg: memblock_region structure 125 */ 126 static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg) 127 { 128 return PFN_DOWN(reg->base + reg->size); 129 } 130 131 /** 132 * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region 133 * @reg: memblock_region structure 134 */ 135 static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg) 136 { 137 return PFN_DOWN(reg->base); 138 } 139 140 /** 141 * memblock_region_reserved_end_pfn - Return the end_pfn this region 142 * @reg: memblock_region structure 143 */ 144 static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg) 145 { 146 return PFN_UP(reg->base + reg->size); 147 } 148 149 #define for_each_memblock(memblock_type, region) \ 150 for (region = memblock.memblock_type.regions; \ 151 region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ 152 region++) 153 154 155 #ifdef ARCH_DISCARD_MEMBLOCK 156 #define __init_memblock __init 157 #define __initdata_memblock __initdata 158 #else 159 #define __init_memblock 160 #define __initdata_memblock 161 #endif 162 163 #endif /* CONFIG_HAVE_MEMBLOCK */ 164 165 #endif /* __KERNEL__ */ 166 167 #endif /* _LINUX_MEMBLOCK_H */ 168