1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2a117e66eSKAMEZAWA Hiroyuki #ifndef __ASM_MEMORY_MODEL_H
3a117e66eSKAMEZAWA Hiroyuki #define __ASM_MEMORY_MODEL_H
4a117e66eSKAMEZAWA Hiroyuki 
58f235d1aSChen Gang #include <linux/pfn.h>
68f235d1aSChen Gang 
7a117e66eSKAMEZAWA Hiroyuki #ifndef __ASSEMBLY__
8a117e66eSKAMEZAWA Hiroyuki 
9bb1c50d3SMike Rapoport /*
10bb1c50d3SMike Rapoport  * supports 3 memory models.
11bb1c50d3SMike Rapoport  */
12a117e66eSKAMEZAWA Hiroyuki #if defined(CONFIG_FLATMEM)
13a117e66eSKAMEZAWA Hiroyuki 
14a117e66eSKAMEZAWA Hiroyuki #ifndef ARCH_PFN_OFFSET
15a117e66eSKAMEZAWA Hiroyuki #define ARCH_PFN_OFFSET		(0UL)
16a117e66eSKAMEZAWA Hiroyuki #endif
17a117e66eSKAMEZAWA Hiroyuki 
1867de6482SAndy Whitcroft #define __pfn_to_page(pfn)	(mem_map + ((pfn) - ARCH_PFN_OFFSET))
1967de6482SAndy Whitcroft #define __page_to_pfn(page)	((unsigned long)((page) - mem_map) + \
20a117e66eSKAMEZAWA Hiroyuki 				 ARCH_PFN_OFFSET)
21a117e66eSKAMEZAWA Hiroyuki 
22*8268af30SMike Rapoport (Microsoft) /* avoid <linux/mm.h> include hell */
23*8268af30SMike Rapoport (Microsoft) extern unsigned long max_mapnr;
24*8268af30SMike Rapoport (Microsoft) 
25e5080a96SMike Rapoport (IBM) #ifndef pfn_valid
pfn_valid(unsigned long pfn)26e5080a96SMike Rapoport (IBM) static inline int pfn_valid(unsigned long pfn)
27e5080a96SMike Rapoport (IBM) {
28e5080a96SMike Rapoport (IBM) 	unsigned long pfn_offset = ARCH_PFN_OFFSET;
29e5080a96SMike Rapoport (IBM) 
30e5080a96SMike Rapoport (IBM) 	return pfn >= pfn_offset && (pfn - pfn_offset) < max_mapnr;
31e5080a96SMike Rapoport (IBM) }
32e5080a96SMike Rapoport (IBM) #define pfn_valid pfn_valid
33e5080a96SMike Rapoport (IBM) #endif
34e5080a96SMike Rapoport (IBM) 
358f6aac41SChristoph Lameter #elif defined(CONFIG_SPARSEMEM_VMEMMAP)
368f6aac41SChristoph Lameter 
37af901ca1SAndré Goddard Rosa /* memmap is virtually contiguous.  */
388f6aac41SChristoph Lameter #define __pfn_to_page(pfn)	(vmemmap + (pfn))
3932272a26SMartin Schwidefsky #define __page_to_pfn(page)	(unsigned long)((page) - vmemmap)
408f6aac41SChristoph Lameter 
41a117e66eSKAMEZAWA Hiroyuki #elif defined(CONFIG_SPARSEMEM)
42a117e66eSKAMEZAWA Hiroyuki /*
431a49123bSZhang Yanfei  * Note: section's mem_map is encoded to reflect its start_pfn.
44a117e66eSKAMEZAWA Hiroyuki  * section[i].section_mem_map == mem_map's address - start_pfn;
45a117e66eSKAMEZAWA Hiroyuki  */
4667de6482SAndy Whitcroft #define __page_to_pfn(pg)					\
47aa462abeSIan Campbell ({	const struct page *__pg = (pg);				\
48a117e66eSKAMEZAWA Hiroyuki 	int __sec = page_to_section(__pg);			\
49f05b6284SRandy Dunlap 	(unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec)));	\
50a117e66eSKAMEZAWA Hiroyuki })
51a117e66eSKAMEZAWA Hiroyuki 
5267de6482SAndy Whitcroft #define __pfn_to_page(pfn)				\
53a117e66eSKAMEZAWA Hiroyuki ({	unsigned long __pfn = (pfn);			\
54a117e66eSKAMEZAWA Hiroyuki 	struct mem_section *__sec = __pfn_to_section(__pfn);	\
55a117e66eSKAMEZAWA Hiroyuki 	__section_mem_map_addr(__sec) + __pfn;		\
56a117e66eSKAMEZAWA Hiroyuki })
57bb1c50d3SMike Rapoport #endif /* CONFIG_FLATMEM/SPARSEMEM */
5867de6482SAndy Whitcroft 
59012dcef3SChristoph Hellwig /*
60012dcef3SChristoph Hellwig  * Convert a physical address to a Page Frame Number and back
61012dcef3SChristoph Hellwig  */
628f235d1aSChen Gang #define	__phys_to_pfn(paddr)	PHYS_PFN(paddr)
63ae4f9769STyler Baker #define	__pfn_to_phys(pfn)	PFN_PHYS(pfn)
64012dcef3SChristoph Hellwig 
6567de6482SAndy Whitcroft #define page_to_pfn __page_to_pfn
6667de6482SAndy Whitcroft #define pfn_to_page __pfn_to_page
67a117e66eSKAMEZAWA Hiroyuki 
683e25d5a4SChristoph Hellwig #ifdef CONFIG_DEBUG_VIRTUAL
693e25d5a4SChristoph Hellwig #define page_to_phys(page)						\
703e25d5a4SChristoph Hellwig ({									\
713e25d5a4SChristoph Hellwig 	unsigned long __pfn = page_to_pfn(page);			\
723e25d5a4SChristoph Hellwig 									\
733e25d5a4SChristoph Hellwig 	WARN_ON_ONCE(!pfn_valid(__pfn));				\
743e25d5a4SChristoph Hellwig 	PFN_PHYS(__pfn);						\
753e25d5a4SChristoph Hellwig })
763e25d5a4SChristoph Hellwig #else
77c5c3238dSChristoph Hellwig #define page_to_phys(page)	PFN_PHYS(page_to_pfn(page))
783e25d5a4SChristoph Hellwig #endif /* CONFIG_DEBUG_VIRTUAL */
79c5c3238dSChristoph Hellwig #define phys_to_page(phys)	pfn_to_page(PHYS_PFN(phys))
80c5c3238dSChristoph Hellwig 
81a117e66eSKAMEZAWA Hiroyuki #endif /* __ASSEMBLY__ */
82a117e66eSKAMEZAWA Hiroyuki 
83a117e66eSKAMEZAWA Hiroyuki #endif
84