xref: /linux-6.15/include/linux/cma.h (revision 9320fa27)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2a254129eSJoonsoo Kim #ifndef __CMA_H__
3a254129eSJoonsoo Kim #define __CMA_H__
4a254129eSJoonsoo Kim 
5d5e6eff2SThierry Reding #include <linux/init.h>
6d5e6eff2SThierry Reding #include <linux/types.h>
78676af1fSAslan Bakirov #include <linux/numa.h>
8d5e6eff2SThierry Reding 
9a254129eSJoonsoo Kim #ifdef CONFIG_CMA_AREAS
1073307523SAnshuman Khandual #define MAX_CMA_AREAS	CONFIG_CMA_AREAS
11a254129eSJoonsoo Kim #endif
12a254129eSJoonsoo Kim 
132281f797SBarry Song #define CMA_MAX_NAME 64
142281f797SBarry Song 
15e16faf26SDavid Hildenbrand /*
1611ac3e87SZi Yan  *  the buddy -- especially pageblock merging and alloc_contig_range()
17e16faf26SDavid Hildenbrand  * -- can deal with only some pageblocks of a higher-order page being
18e16faf26SDavid Hildenbrand  *  MIGRATE_CMA, we can use pageblock_nr_pages.
19e16faf26SDavid Hildenbrand  */
2011ac3e87SZi Yan #define CMA_MIN_ALIGNMENT_PAGES pageblock_nr_pages
21e16faf26SDavid Hildenbrand #define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES)
22e16faf26SDavid Hildenbrand 
23a254129eSJoonsoo Kim struct cma;
24a254129eSJoonsoo Kim 
25e48322abSPintu Kumar extern unsigned long totalcma_pages;
26ac173824SSasha Levin extern phys_addr_t cma_get_base(const struct cma *cma);
27ac173824SSasha Levin extern unsigned long cma_get_size(const struct cma *cma);
28f318dd08SLaura Abbott extern const char *cma_get_name(const struct cma *cma);
29a254129eSJoonsoo Kim 
308676af1fSAslan Bakirov extern int __init cma_declare_contiguous_nid(phys_addr_t base,
31dda02fd6SWeijie Yang 			phys_addr_t size, phys_addr_t limit,
32a254129eSJoonsoo Kim 			phys_addr_t alignment, unsigned int order_per_bit,
338676af1fSAslan Bakirov 			bool fixed, const char *name, struct cma **res_cma,
348676af1fSAslan Bakirov 			int nid);
cma_declare_contiguous(phys_addr_t base,phys_addr_t size,phys_addr_t limit,phys_addr_t alignment,unsigned int order_per_bit,bool fixed,const char * name,struct cma ** res_cma)358676af1fSAslan Bakirov static inline int __init cma_declare_contiguous(phys_addr_t base,
368676af1fSAslan Bakirov 			phys_addr_t size, phys_addr_t limit,
378676af1fSAslan Bakirov 			phys_addr_t alignment, unsigned int order_per_bit,
388676af1fSAslan Bakirov 			bool fixed, const char *name, struct cma **res_cma)
398676af1fSAslan Bakirov {
408676af1fSAslan Bakirov 	return cma_declare_contiguous_nid(base, size, limit, alignment,
418676af1fSAslan Bakirov 			order_per_bit, fixed, name, res_cma, NUMA_NO_NODE);
428676af1fSAslan Bakirov }
43c009da42SFrank van der Linden extern int __init cma_declare_contiguous_multi(phys_addr_t size,
44c009da42SFrank van der Linden 			phys_addr_t align, unsigned int order_per_bit,
45c009da42SFrank van der Linden 			const char *name, struct cma **res_cma, int nid);
46ac173824SSasha Levin extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
47ac173824SSasha Levin 					unsigned int order_per_bit,
48f318dd08SLaura Abbott 					const char *name,
49de9e14eeSMarek Szyprowski 					struct cma **res_cma);
5078fa5150SMinchan Kim extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align,
5165182029SMarek Szyprowski 			      bool no_warn);
529871e2deSMike Kravetz extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count);
5378fa5150SMinchan Kim extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count);
54e4231bcdSLaura Abbott 
55e4231bcdSLaura Abbott extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
56624ab90bSFrank van der Linden extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end);
5727d121d0SHari Bathini 
5827d121d0SHari Bathini extern void cma_reserve_pages_on_error(struct cma *cma);
59463586e9SYu Zhao 
60463586e9SYu Zhao #ifdef CONFIG_CMA
61463586e9SYu Zhao struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp);
62463586e9SYu Zhao bool cma_free_folio(struct cma *cma, const struct folio *folio);
63*9320fa27SFrank van der Linden bool cma_validate_zones(struct cma *cma);
64463586e9SYu Zhao #else
cma_alloc_folio(struct cma * cma,int order,gfp_t gfp)65463586e9SYu Zhao static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp)
66463586e9SYu Zhao {
67463586e9SYu Zhao 	return NULL;
68463586e9SYu Zhao }
69463586e9SYu Zhao 
cma_free_folio(struct cma * cma,const struct folio * folio)70463586e9SYu Zhao static inline bool cma_free_folio(struct cma *cma, const struct folio *folio)
71463586e9SYu Zhao {
72463586e9SYu Zhao 	return false;
73463586e9SYu Zhao }
cma_validate_zones(struct cma * cma)74*9320fa27SFrank van der Linden static inline bool cma_validate_zones(struct cma *cma)
75*9320fa27SFrank van der Linden {
76*9320fa27SFrank van der Linden 	return false;
77*9320fa27SFrank van der Linden }
78463586e9SYu Zhao #endif
79463586e9SYu Zhao 
80a254129eSJoonsoo Kim #endif
81