1 #ifndef _LINUX_MEMBLOCK_H 2 #define _LINUX_MEMBLOCK_H 3 #ifdef __KERNEL__ 4 5 #ifdef CONFIG_HAVE_MEMBLOCK 6 /* 7 * Logical memory blocks. 8 * 9 * Copyright (C) 2001 Peter Bergner, IBM Corp. 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 */ 16 17 #include <linux/init.h> 18 #include <linux/mm.h> 19 20 #define INIT_MEMBLOCK_REGIONS 128 21 #define INIT_PHYSMEM_REGIONS 4 22 23 /* Definition of memblock flags. */ 24 enum { 25 MEMBLOCK_NONE = 0x0, /* No special request */ 26 MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */ 27 MEMBLOCK_MIRROR = 0x2, /* mirrored region */ 28 MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */ 29 }; 30 31 struct memblock_region { 32 phys_addr_t base; 33 phys_addr_t size; 34 unsigned long flags; 35 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 36 int nid; 37 #endif 38 }; 39 40 struct memblock_type { 41 unsigned long cnt; /* number of regions */ 42 unsigned long max; /* size of the allocated array */ 43 phys_addr_t total_size; /* size of all regions */ 44 struct memblock_region *regions; 45 char *name; 46 }; 47 48 struct memblock { 49 bool bottom_up; /* is bottom up direction? */ 50 phys_addr_t current_limit; 51 struct memblock_type memory; 52 struct memblock_type reserved; 53 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 54 struct memblock_type physmem; 55 #endif 56 }; 57 58 extern struct memblock memblock; 59 extern int memblock_debug; 60 #ifdef CONFIG_MOVABLE_NODE 61 /* If movable_node boot option specified */ 62 extern bool movable_node_enabled; 63 #endif /* CONFIG_MOVABLE_NODE */ 64 65 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK 66 #define __init_memblock __meminit 67 #define __initdata_memblock __meminitdata 68 #else 69 #define __init_memblock 70 #define __initdata_memblock 71 #endif 72 73 #define memblock_dbg(fmt, ...) \ 74 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) 75 76 phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, 77 phys_addr_t start, phys_addr_t end, 78 int nid, ulong flags); 79 phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, 80 phys_addr_t size, phys_addr_t align); 81 phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); 82 phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr); 83 void memblock_allow_resize(void); 84 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); 85 int memblock_add(phys_addr_t base, phys_addr_t size); 86 int memblock_remove(phys_addr_t base, phys_addr_t size); 87 int memblock_free(phys_addr_t base, phys_addr_t size); 88 int memblock_reserve(phys_addr_t base, phys_addr_t size); 89 void memblock_trim_memory(phys_addr_t align); 90 bool memblock_overlaps_region(struct memblock_type *type, 91 phys_addr_t base, phys_addr_t size); 92 int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); 93 int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); 94 int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); 95 int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); 96 ulong choose_memblock_flags(void); 97 98 /* Low level functions */ 99 int memblock_add_range(struct memblock_type *type, 100 phys_addr_t base, phys_addr_t size, 101 int nid, unsigned long flags); 102 103 void __next_mem_range(u64 *idx, int nid, ulong flags, 104 struct memblock_type *type_a, 105 struct memblock_type *type_b, phys_addr_t *out_start, 106 phys_addr_t *out_end, int *out_nid); 107 108 void __next_mem_range_rev(u64 *idx, int nid, ulong flags, 109 struct memblock_type *type_a, 110 struct memblock_type *type_b, phys_addr_t *out_start, 111 phys_addr_t *out_end, int *out_nid); 112 113 void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, 114 phys_addr_t *out_end); 115 116 /** 117 * for_each_mem_range - iterate through memblock areas from type_a and not 118 * included in type_b. Or just type_a if type_b is NULL. 119 * @i: u64 used as loop variable 120 * @type_a: ptr to memblock_type to iterate 121 * @type_b: ptr to memblock_type which excludes from the iteration 122 * @nid: node selector, %NUMA_NO_NODE for all nodes 123 * @flags: pick from blocks based on memory attributes 124 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 125 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 126 * @p_nid: ptr to int for nid of the range, can be %NULL 127 */ 128 #define for_each_mem_range(i, type_a, type_b, nid, flags, \ 129 p_start, p_end, p_nid) \ 130 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \ 131 p_start, p_end, p_nid); \ 132 i != (u64)ULLONG_MAX; \ 133 __next_mem_range(&i, nid, flags, type_a, type_b, \ 134 p_start, p_end, p_nid)) 135 136 /** 137 * for_each_mem_range_rev - reverse iterate through memblock areas from 138 * type_a and not included in type_b. Or just type_a if type_b is NULL. 139 * @i: u64 used as loop variable 140 * @type_a: ptr to memblock_type to iterate 141 * @type_b: ptr to memblock_type which excludes from the iteration 142 * @nid: node selector, %NUMA_NO_NODE for all nodes 143 * @flags: pick from blocks based on memory attributes 144 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 145 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 146 * @p_nid: ptr to int for nid of the range, can be %NULL 147 */ 148 #define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \ 149 p_start, p_end, p_nid) \ 150 for (i = (u64)ULLONG_MAX, \ 151 __next_mem_range_rev(&i, nid, flags, type_a, type_b,\ 152 p_start, p_end, p_nid); \ 153 i != (u64)ULLONG_MAX; \ 154 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ 155 p_start, p_end, p_nid)) 156 157 /** 158 * for_each_reserved_mem_region - iterate over all reserved memblock areas 159 * @i: u64 used as loop variable 160 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 161 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 162 * 163 * Walks over reserved areas of memblock. Available as soon as memblock 164 * is initialized. 165 */ 166 #define for_each_reserved_mem_region(i, p_start, p_end) \ 167 for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \ 168 i != (u64)ULLONG_MAX; \ 169 __next_reserved_mem_region(&i, p_start, p_end)) 170 171 #ifdef CONFIG_MOVABLE_NODE 172 static inline bool memblock_is_hotpluggable(struct memblock_region *m) 173 { 174 return m->flags & MEMBLOCK_HOTPLUG; 175 } 176 177 static inline bool __init_memblock movable_node_is_enabled(void) 178 { 179 return movable_node_enabled; 180 } 181 #else 182 static inline bool memblock_is_hotpluggable(struct memblock_region *m) 183 { 184 return false; 185 } 186 static inline bool movable_node_is_enabled(void) 187 { 188 return false; 189 } 190 #endif 191 192 static inline bool memblock_is_mirror(struct memblock_region *m) 193 { 194 return m->flags & MEMBLOCK_MIRROR; 195 } 196 197 static inline bool memblock_is_nomap(struct memblock_region *m) 198 { 199 return m->flags & MEMBLOCK_NOMAP; 200 } 201 202 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 203 int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, 204 unsigned long *end_pfn); 205 void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, 206 unsigned long *out_end_pfn, int *out_nid); 207 unsigned long memblock_next_valid_pfn(unsigned long pfn, unsigned long max_pfn); 208 209 /** 210 * for_each_mem_pfn_range - early memory pfn range iterator 211 * @i: an integer used as loop variable 212 * @nid: node selector, %MAX_NUMNODES for all nodes 213 * @p_start: ptr to ulong for start pfn of the range, can be %NULL 214 * @p_end: ptr to ulong for end pfn of the range, can be %NULL 215 * @p_nid: ptr to int for nid of the range, can be %NULL 216 * 217 * Walks over configured memory ranges. 218 */ 219 #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ 220 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ 221 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) 222 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 223 224 /** 225 * for_each_free_mem_range - iterate through free memblock areas 226 * @i: u64 used as loop variable 227 * @nid: node selector, %NUMA_NO_NODE for all nodes 228 * @flags: pick from blocks based on memory attributes 229 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 230 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 231 * @p_nid: ptr to int for nid of the range, can be %NULL 232 * 233 * Walks over free (memory && !reserved) areas of memblock. Available as 234 * soon as memblock is initialized. 235 */ 236 #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \ 237 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ 238 nid, flags, p_start, p_end, p_nid) 239 240 /** 241 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas 242 * @i: u64 used as loop variable 243 * @nid: node selector, %NUMA_NO_NODE for all nodes 244 * @flags: pick from blocks based on memory attributes 245 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 246 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 247 * @p_nid: ptr to int for nid of the range, can be %NULL 248 * 249 * Walks over free (memory && !reserved) areas of memblock in reverse 250 * order. Available as soon as memblock is initialized. 251 */ 252 #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \ 253 p_nid) \ 254 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ 255 nid, flags, p_start, p_end, p_nid) 256 257 static inline void memblock_set_region_flags(struct memblock_region *r, 258 unsigned long flags) 259 { 260 r->flags |= flags; 261 } 262 263 static inline void memblock_clear_region_flags(struct memblock_region *r, 264 unsigned long flags) 265 { 266 r->flags &= ~flags; 267 } 268 269 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 270 int memblock_set_node(phys_addr_t base, phys_addr_t size, 271 struct memblock_type *type, int nid); 272 273 static inline void memblock_set_region_node(struct memblock_region *r, int nid) 274 { 275 r->nid = nid; 276 } 277 278 static inline int memblock_get_region_node(const struct memblock_region *r) 279 { 280 return r->nid; 281 } 282 #else 283 static inline void memblock_set_region_node(struct memblock_region *r, int nid) 284 { 285 } 286 287 static inline int memblock_get_region_node(const struct memblock_region *r) 288 { 289 return 0; 290 } 291 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 292 293 phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); 294 phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); 295 296 phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); 297 298 #ifdef CONFIG_MOVABLE_NODE 299 /* 300 * Set the allocation direction to bottom-up or top-down. 301 */ 302 static inline void __init memblock_set_bottom_up(bool enable) 303 { 304 memblock.bottom_up = enable; 305 } 306 307 /* 308 * Check if the allocation direction is bottom-up or not. 309 * if this is true, that said, memblock will allocate memory 310 * in bottom-up direction. 311 */ 312 static inline bool memblock_bottom_up(void) 313 { 314 return memblock.bottom_up; 315 } 316 #else 317 static inline void __init memblock_set_bottom_up(bool enable) {} 318 static inline bool memblock_bottom_up(void) { return false; } 319 #endif 320 321 /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ 322 #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) 323 #define MEMBLOCK_ALLOC_ACCESSIBLE 0 324 325 phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, 326 phys_addr_t start, phys_addr_t end, 327 ulong flags); 328 phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, 329 phys_addr_t max_addr); 330 phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, 331 phys_addr_t max_addr); 332 phys_addr_t memblock_phys_mem_size(void); 333 phys_addr_t memblock_reserved_size(void); 334 phys_addr_t memblock_mem_size(unsigned long limit_pfn); 335 phys_addr_t memblock_start_of_DRAM(void); 336 phys_addr_t memblock_end_of_DRAM(void); 337 void memblock_enforce_memory_limit(phys_addr_t memory_limit); 338 void memblock_mem_limit_remove_map(phys_addr_t limit); 339 bool memblock_is_memory(phys_addr_t addr); 340 int memblock_is_map_memory(phys_addr_t addr); 341 int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); 342 bool memblock_is_reserved(phys_addr_t addr); 343 bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); 344 345 extern void __memblock_dump_all(void); 346 347 static inline void memblock_dump_all(void) 348 { 349 if (memblock_debug) 350 __memblock_dump_all(); 351 } 352 353 /** 354 * memblock_set_current_limit - Set the current allocation limit to allow 355 * limiting allocations to what is currently 356 * accessible during boot 357 * @limit: New limit value (physical address) 358 */ 359 void memblock_set_current_limit(phys_addr_t limit); 360 361 362 phys_addr_t memblock_get_current_limit(void); 363 364 /* 365 * pfn conversion functions 366 * 367 * While the memory MEMBLOCKs should always be page aligned, the reserved 368 * MEMBLOCKs may not be. This accessor attempt to provide a very clear 369 * idea of what they return for such non aligned MEMBLOCKs. 370 */ 371 372 /** 373 * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region 374 * @reg: memblock_region structure 375 */ 376 static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg) 377 { 378 return PFN_UP(reg->base); 379 } 380 381 /** 382 * memblock_region_memory_end_pfn - Return the end_pfn this region 383 * @reg: memblock_region structure 384 */ 385 static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg) 386 { 387 return PFN_DOWN(reg->base + reg->size); 388 } 389 390 /** 391 * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region 392 * @reg: memblock_region structure 393 */ 394 static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg) 395 { 396 return PFN_DOWN(reg->base); 397 } 398 399 /** 400 * memblock_region_reserved_end_pfn - Return the end_pfn this region 401 * @reg: memblock_region structure 402 */ 403 static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg) 404 { 405 return PFN_UP(reg->base + reg->size); 406 } 407 408 #define for_each_memblock(memblock_type, region) \ 409 for (region = memblock.memblock_type.regions; \ 410 region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ 411 region++) 412 413 #define for_each_memblock_type(memblock_type, rgn) \ 414 for (idx = 0, rgn = &memblock_type->regions[0]; \ 415 idx < memblock_type->cnt; \ 416 idx++, rgn = &memblock_type->regions[idx]) 417 418 #ifdef CONFIG_MEMTEST 419 extern void early_memtest(phys_addr_t start, phys_addr_t end); 420 #else 421 static inline void early_memtest(phys_addr_t start, phys_addr_t end) 422 { 423 } 424 #endif 425 426 #else 427 static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) 428 { 429 return 0; 430 } 431 432 #endif /* CONFIG_HAVE_MEMBLOCK */ 433 434 #endif /* __KERNEL__ */ 435 436 #endif /* _LINUX_MEMBLOCK_H */ 437