1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 #ifndef _LINUX_MEMBLOCK_H 3 #define _LINUX_MEMBLOCK_H 4 #ifdef __KERNEL__ 5 6 /* 7 * Logical memory blocks. 8 * 9 * Copyright (C) 2001 Peter Bergner, IBM Corp. 10 */ 11 12 #include <linux/init.h> 13 #include <linux/mm.h> 14 #include <asm/dma.h> 15 16 extern unsigned long max_low_pfn; 17 extern unsigned long min_low_pfn; 18 19 /* 20 * highest page 21 */ 22 extern unsigned long max_pfn; 23 /* 24 * highest possible page 25 */ 26 extern unsigned long long max_possible_pfn; 27 28 /** 29 * enum memblock_flags - definition of memory region attributes 30 * @MEMBLOCK_NONE: no special request 31 * @MEMBLOCK_HOTPLUG: hotpluggable region 32 * @MEMBLOCK_MIRROR: mirrored region 33 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping 34 */ 35 enum memblock_flags { 36 MEMBLOCK_NONE = 0x0, /* No special request */ 37 MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */ 38 MEMBLOCK_MIRROR = 0x2, /* mirrored region */ 39 MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */ 40 }; 41 42 /** 43 * struct memblock_region - represents a memory region 44 * @base: base address of the region 45 * @size: size of the region 46 * @flags: memory region attributes 47 * @nid: NUMA node id 48 */ 49 struct memblock_region { 50 phys_addr_t base; 51 phys_addr_t size; 52 enum memblock_flags flags; 53 #ifdef CONFIG_NEED_MULTIPLE_NODES 54 int nid; 55 #endif 56 }; 57 58 /** 59 * struct memblock_type - collection of memory regions of certain type 60 * @cnt: number of regions 61 * @max: size of the allocated array 62 * @total_size: size of all regions 63 * @regions: array of regions 64 * @name: the memory type symbolic name 65 */ 66 struct memblock_type { 67 unsigned long cnt; 68 unsigned long max; 69 phys_addr_t total_size; 70 struct memblock_region *regions; 71 char *name; 72 }; 73 74 /** 75 * struct memblock - memblock allocator metadata 76 * @bottom_up: is bottom up direction? 77 * @current_limit: physical address of the current allocation limit 78 * @memory: usable memory regions 79 * @reserved: reserved memory regions 80 */ 81 struct memblock { 82 bool bottom_up; /* is bottom up direction? */ 83 phys_addr_t current_limit; 84 struct memblock_type memory; 85 struct memblock_type reserved; 86 }; 87 88 extern struct memblock memblock; 89 extern int memblock_debug; 90 91 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK 92 #define __init_memblock __meminit 93 #define __initdata_memblock __meminitdata 94 void memblock_discard(void); 95 #else 96 #define __init_memblock 97 #define __initdata_memblock 98 static inline void memblock_discard(void) {} 99 #endif 100 101 #define memblock_dbg(fmt, ...) \ 102 if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) 103 104 phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, 105 phys_addr_t size, phys_addr_t align); 106 void memblock_allow_resize(void); 107 int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); 108 int memblock_add(phys_addr_t base, phys_addr_t size); 109 int memblock_remove(phys_addr_t base, phys_addr_t size); 110 int memblock_free(phys_addr_t base, phys_addr_t size); 111 int memblock_reserve(phys_addr_t base, phys_addr_t size); 112 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 113 int memblock_physmem_add(phys_addr_t base, phys_addr_t size); 114 #endif 115 void memblock_trim_memory(phys_addr_t align); 116 bool memblock_overlaps_region(struct memblock_type *type, 117 phys_addr_t base, phys_addr_t size); 118 int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); 119 int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); 120 int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); 121 int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); 122 int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); 123 124 unsigned long memblock_free_all(void); 125 void reset_node_managed_pages(pg_data_t *pgdat); 126 void reset_all_zones_managed_pages(void); 127 128 /* Low level functions */ 129 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, 130 struct memblock_type *type_a, 131 struct memblock_type *type_b, phys_addr_t *out_start, 132 phys_addr_t *out_end, int *out_nid); 133 134 void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags, 135 struct memblock_type *type_a, 136 struct memblock_type *type_b, phys_addr_t *out_start, 137 phys_addr_t *out_end, int *out_nid); 138 139 void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, 140 phys_addr_t *out_end); 141 142 void __memblock_free_late(phys_addr_t base, phys_addr_t size); 143 144 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 145 static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, 146 phys_addr_t *out_start, 147 phys_addr_t *out_end) 148 { 149 extern struct memblock_type physmem; 150 151 __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type, 152 out_start, out_end, NULL); 153 } 154 155 /** 156 * for_each_physmem_range - iterate through physmem areas not included in type. 157 * @i: u64 used as loop variable 158 * @type: ptr to memblock_type which excludes from the iteration, can be %NULL 159 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 160 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 161 */ 162 #define for_each_physmem_range(i, type, p_start, p_end) \ 163 for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \ 164 i != (u64)ULLONG_MAX; \ 165 __next_physmem_range(&i, type, p_start, p_end)) 166 #endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */ 167 168 /** 169 * for_each_mem_range - iterate through memblock areas from type_a and not 170 * included in type_b. Or just type_a if type_b is NULL. 171 * @i: u64 used as loop variable 172 * @type_a: ptr to memblock_type to iterate 173 * @type_b: ptr to memblock_type which excludes from the iteration 174 * @nid: node selector, %NUMA_NO_NODE for all nodes 175 * @flags: pick from blocks based on memory attributes 176 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 177 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 178 * @p_nid: ptr to int for nid of the range, can be %NULL 179 */ 180 #define for_each_mem_range(i, type_a, type_b, nid, flags, \ 181 p_start, p_end, p_nid) \ 182 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \ 183 p_start, p_end, p_nid); \ 184 i != (u64)ULLONG_MAX; \ 185 __next_mem_range(&i, nid, flags, type_a, type_b, \ 186 p_start, p_end, p_nid)) 187 188 /** 189 * for_each_mem_range_rev - reverse iterate through memblock areas from 190 * type_a and not included in type_b. Or just type_a if type_b is NULL. 191 * @i: u64 used as loop variable 192 * @type_a: ptr to memblock_type to iterate 193 * @type_b: ptr to memblock_type which excludes from the iteration 194 * @nid: node selector, %NUMA_NO_NODE for all nodes 195 * @flags: pick from blocks based on memory attributes 196 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 197 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 198 * @p_nid: ptr to int for nid of the range, can be %NULL 199 */ 200 #define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \ 201 p_start, p_end, p_nid) \ 202 for (i = (u64)ULLONG_MAX, \ 203 __next_mem_range_rev(&i, nid, flags, type_a, type_b,\ 204 p_start, p_end, p_nid); \ 205 i != (u64)ULLONG_MAX; \ 206 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ 207 p_start, p_end, p_nid)) 208 209 /** 210 * for_each_reserved_mem_region - iterate over all reserved memblock areas 211 * @i: u64 used as loop variable 212 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 213 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 214 * 215 * Walks over reserved areas of memblock. Available as soon as memblock 216 * is initialized. 217 */ 218 #define for_each_reserved_mem_region(i, p_start, p_end) \ 219 for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \ 220 i != (u64)ULLONG_MAX; \ 221 __next_reserved_mem_region(&i, p_start, p_end)) 222 223 static inline bool memblock_is_hotpluggable(struct memblock_region *m) 224 { 225 return m->flags & MEMBLOCK_HOTPLUG; 226 } 227 228 static inline bool memblock_is_mirror(struct memblock_region *m) 229 { 230 return m->flags & MEMBLOCK_MIRROR; 231 } 232 233 static inline bool memblock_is_nomap(struct memblock_region *m) 234 { 235 return m->flags & MEMBLOCK_NOMAP; 236 } 237 238 int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, 239 unsigned long *end_pfn); 240 void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, 241 unsigned long *out_end_pfn, int *out_nid); 242 243 /** 244 * for_each_mem_pfn_range - early memory pfn range iterator 245 * @i: an integer used as loop variable 246 * @nid: node selector, %MAX_NUMNODES for all nodes 247 * @p_start: ptr to ulong for start pfn of the range, can be %NULL 248 * @p_end: ptr to ulong for end pfn of the range, can be %NULL 249 * @p_nid: ptr to int for nid of the range, can be %NULL 250 * 251 * Walks over configured memory ranges. 252 */ 253 #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ 254 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ 255 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) 256 257 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 258 void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, 259 unsigned long *out_spfn, 260 unsigned long *out_epfn); 261 /** 262 * for_each_free_mem_range_in_zone - iterate through zone specific free 263 * memblock areas 264 * @i: u64 used as loop variable 265 * @zone: zone in which all of the memory blocks reside 266 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 267 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 268 * 269 * Walks over free (memory && !reserved) areas of memblock in a specific 270 * zone. Available once memblock and an empty zone is initialized. The main 271 * assumption is that the zone start, end, and pgdat have been associated. 272 * This way we can use the zone to determine NUMA node, and if a given part 273 * of the memblock is valid for the zone. 274 */ 275 #define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \ 276 for (i = 0, \ 277 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \ 278 i != U64_MAX; \ 279 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) 280 281 /** 282 * for_each_free_mem_range_in_zone_from - iterate through zone specific 283 * free memblock areas from a given point 284 * @i: u64 used as loop variable 285 * @zone: zone in which all of the memory blocks reside 286 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 287 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 288 * 289 * Walks over free (memory && !reserved) areas of memblock in a specific 290 * zone, continuing from current position. Available as soon as memblock is 291 * initialized. 292 */ 293 #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \ 294 for (; i != U64_MAX; \ 295 __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) 296 297 int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask); 298 299 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 300 301 /** 302 * for_each_free_mem_range - iterate through free memblock areas 303 * @i: u64 used as loop variable 304 * @nid: node selector, %NUMA_NO_NODE for all nodes 305 * @flags: pick from blocks based on memory attributes 306 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 307 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 308 * @p_nid: ptr to int for nid of the range, can be %NULL 309 * 310 * Walks over free (memory && !reserved) areas of memblock. Available as 311 * soon as memblock is initialized. 312 */ 313 #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \ 314 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ 315 nid, flags, p_start, p_end, p_nid) 316 317 /** 318 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas 319 * @i: u64 used as loop variable 320 * @nid: node selector, %NUMA_NO_NODE for all nodes 321 * @flags: pick from blocks based on memory attributes 322 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 323 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 324 * @p_nid: ptr to int for nid of the range, can be %NULL 325 * 326 * Walks over free (memory && !reserved) areas of memblock in reverse 327 * order. Available as soon as memblock is initialized. 328 */ 329 #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \ 330 p_nid) \ 331 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ 332 nid, flags, p_start, p_end, p_nid) 333 334 int memblock_set_node(phys_addr_t base, phys_addr_t size, 335 struct memblock_type *type, int nid); 336 337 #ifdef CONFIG_NEED_MULTIPLE_NODES 338 static inline void memblock_set_region_node(struct memblock_region *r, int nid) 339 { 340 r->nid = nid; 341 } 342 343 static inline int memblock_get_region_node(const struct memblock_region *r) 344 { 345 return r->nid; 346 } 347 #else 348 static inline void memblock_set_region_node(struct memblock_region *r, int nid) 349 { 350 } 351 352 static inline int memblock_get_region_node(const struct memblock_region *r) 353 { 354 return 0; 355 } 356 #endif /* CONFIG_NEED_MULTIPLE_NODES */ 357 358 /* Flags for memblock allocation APIs */ 359 #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) 360 #define MEMBLOCK_ALLOC_ACCESSIBLE 0 361 #define MEMBLOCK_ALLOC_KASAN 1 362 363 /* We are using top down, so it is safe to use 0 here */ 364 #define MEMBLOCK_LOW_LIMIT 0 365 366 #ifndef ARCH_LOW_ADDRESS_LIMIT 367 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL 368 #endif 369 370 phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align, 371 phys_addr_t start, phys_addr_t end); 372 phys_addr_t memblock_alloc_range_nid(phys_addr_t size, 373 phys_addr_t align, phys_addr_t start, 374 phys_addr_t end, int nid, bool exact_nid); 375 phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); 376 377 static inline phys_addr_t memblock_phys_alloc(phys_addr_t size, 378 phys_addr_t align) 379 { 380 return memblock_phys_alloc_range(size, align, 0, 381 MEMBLOCK_ALLOC_ACCESSIBLE); 382 } 383 384 void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align, 385 phys_addr_t min_addr, phys_addr_t max_addr, 386 int nid); 387 void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, 388 phys_addr_t min_addr, phys_addr_t max_addr, 389 int nid); 390 void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, 391 phys_addr_t min_addr, phys_addr_t max_addr, 392 int nid); 393 394 static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align) 395 { 396 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, 397 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); 398 } 399 400 static inline void * __init memblock_alloc_raw(phys_addr_t size, 401 phys_addr_t align) 402 { 403 return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT, 404 MEMBLOCK_ALLOC_ACCESSIBLE, 405 NUMA_NO_NODE); 406 } 407 408 static inline void * __init memblock_alloc_from(phys_addr_t size, 409 phys_addr_t align, 410 phys_addr_t min_addr) 411 { 412 return memblock_alloc_try_nid(size, align, min_addr, 413 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); 414 } 415 416 static inline void * __init memblock_alloc_low(phys_addr_t size, 417 phys_addr_t align) 418 { 419 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, 420 ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE); 421 } 422 423 static inline void * __init memblock_alloc_node(phys_addr_t size, 424 phys_addr_t align, int nid) 425 { 426 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, 427 MEMBLOCK_ALLOC_ACCESSIBLE, nid); 428 } 429 430 static inline void __init memblock_free_early(phys_addr_t base, 431 phys_addr_t size) 432 { 433 memblock_free(base, size); 434 } 435 436 static inline void __init memblock_free_early_nid(phys_addr_t base, 437 phys_addr_t size, int nid) 438 { 439 memblock_free(base, size); 440 } 441 442 static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size) 443 { 444 __memblock_free_late(base, size); 445 } 446 447 /* 448 * Set the allocation direction to bottom-up or top-down. 449 */ 450 static inline void __init memblock_set_bottom_up(bool enable) 451 { 452 memblock.bottom_up = enable; 453 } 454 455 /* 456 * Check if the allocation direction is bottom-up or not. 457 * if this is true, that said, memblock will allocate memory 458 * in bottom-up direction. 459 */ 460 static inline bool memblock_bottom_up(void) 461 { 462 return memblock.bottom_up; 463 } 464 465 phys_addr_t memblock_phys_mem_size(void); 466 phys_addr_t memblock_reserved_size(void); 467 phys_addr_t memblock_mem_size(unsigned long limit_pfn); 468 phys_addr_t memblock_start_of_DRAM(void); 469 phys_addr_t memblock_end_of_DRAM(void); 470 void memblock_enforce_memory_limit(phys_addr_t memory_limit); 471 void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size); 472 void memblock_mem_limit_remove_map(phys_addr_t limit); 473 bool memblock_is_memory(phys_addr_t addr); 474 bool memblock_is_map_memory(phys_addr_t addr); 475 bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size); 476 bool memblock_is_reserved(phys_addr_t addr); 477 bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); 478 479 extern void __memblock_dump_all(void); 480 481 static inline void memblock_dump_all(void) 482 { 483 if (memblock_debug) 484 __memblock_dump_all(); 485 } 486 487 /** 488 * memblock_set_current_limit - Set the current allocation limit to allow 489 * limiting allocations to what is currently 490 * accessible during boot 491 * @limit: New limit value (physical address) 492 */ 493 void memblock_set_current_limit(phys_addr_t limit); 494 495 496 phys_addr_t memblock_get_current_limit(void); 497 498 /* 499 * pfn conversion functions 500 * 501 * While the memory MEMBLOCKs should always be page aligned, the reserved 502 * MEMBLOCKs may not be. This accessor attempt to provide a very clear 503 * idea of what they return for such non aligned MEMBLOCKs. 504 */ 505 506 /** 507 * memblock_region_memory_base_pfn - get the lowest pfn of the memory region 508 * @reg: memblock_region structure 509 * 510 * Return: the lowest pfn intersecting with the memory region 511 */ 512 static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg) 513 { 514 return PFN_UP(reg->base); 515 } 516 517 /** 518 * memblock_region_memory_end_pfn - get the end pfn of the memory region 519 * @reg: memblock_region structure 520 * 521 * Return: the end_pfn of the reserved region 522 */ 523 static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg) 524 { 525 return PFN_DOWN(reg->base + reg->size); 526 } 527 528 /** 529 * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region 530 * @reg: memblock_region structure 531 * 532 * Return: the lowest pfn intersecting with the reserved region 533 */ 534 static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg) 535 { 536 return PFN_DOWN(reg->base); 537 } 538 539 /** 540 * memblock_region_reserved_end_pfn - get the end pfn of the reserved region 541 * @reg: memblock_region structure 542 * 543 * Return: the end_pfn of the reserved region 544 */ 545 static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg) 546 { 547 return PFN_UP(reg->base + reg->size); 548 } 549 550 #define for_each_memblock(memblock_type, region) \ 551 for (region = memblock.memblock_type.regions; \ 552 region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ 553 region++) 554 555 #define for_each_memblock_type(i, memblock_type, rgn) \ 556 for (i = 0, rgn = &memblock_type->regions[0]; \ 557 i < memblock_type->cnt; \ 558 i++, rgn = &memblock_type->regions[i]) 559 560 extern void *alloc_large_system_hash(const char *tablename, 561 unsigned long bucketsize, 562 unsigned long numentries, 563 int scale, 564 int flags, 565 unsigned int *_hash_shift, 566 unsigned int *_hash_mask, 567 unsigned long low_limit, 568 unsigned long high_limit); 569 570 #define HASH_EARLY 0x00000001 /* Allocating during early boot? */ 571 #define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min 572 * shift passed via *_hash_shift */ 573 #define HASH_ZERO 0x00000004 /* Zero allocated hash table */ 574 575 /* Only NUMA needs hash distribution. 64bit NUMA architectures have 576 * sufficient vmalloc space. 577 */ 578 #ifdef CONFIG_NUMA 579 #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT) 580 extern int hashdist; /* Distribute hashes across NUMA nodes? */ 581 #else 582 #define hashdist (0) 583 #endif 584 585 #ifdef CONFIG_MEMTEST 586 extern void early_memtest(phys_addr_t start, phys_addr_t end); 587 #else 588 static inline void early_memtest(phys_addr_t start, phys_addr_t end) 589 { 590 } 591 #endif 592 593 #endif /* __KERNEL__ */ 594 595 #endif /* _LINUX_MEMBLOCK_H */ 596