xref: /linux-6.15/kernel/dma/contiguous.c (revision d7b98ae5)
1cf65a0f6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0+
2cf65a0f6SChristoph Hellwig /*
3cf65a0f6SChristoph Hellwig  * Contiguous Memory Allocator for DMA mapping framework
4cf65a0f6SChristoph Hellwig  * Copyright (c) 2010-2011 by Samsung Electronics.
5cf65a0f6SChristoph Hellwig  * Written by:
6cf65a0f6SChristoph Hellwig  *	Marek Szyprowski <[email protected]>
7cf65a0f6SChristoph Hellwig  *	Michal Nazarewicz <[email protected]>
80b1abd1fSChristoph Hellwig  *
90b1abd1fSChristoph Hellwig  * Contiguous Memory Allocator
100b1abd1fSChristoph Hellwig  *
110b1abd1fSChristoph Hellwig  *   The Contiguous Memory Allocator (CMA) makes it possible to
120b1abd1fSChristoph Hellwig  *   allocate big contiguous chunks of memory after the system has
130b1abd1fSChristoph Hellwig  *   booted.
140b1abd1fSChristoph Hellwig  *
150b1abd1fSChristoph Hellwig  * Why is it needed?
160b1abd1fSChristoph Hellwig  *
170b1abd1fSChristoph Hellwig  *   Various devices on embedded systems have no scatter-getter and/or
180b1abd1fSChristoph Hellwig  *   IO map support and require contiguous blocks of memory to
190b1abd1fSChristoph Hellwig  *   operate.  They include devices such as cameras, hardware video
200b1abd1fSChristoph Hellwig  *   coders, etc.
210b1abd1fSChristoph Hellwig  *
220b1abd1fSChristoph Hellwig  *   Such devices often require big memory buffers (a full HD frame
23819b70adStangjianqiang  *   is, for instance, more than 2 mega pixels large, i.e. more than 6
240b1abd1fSChristoph Hellwig  *   MB of memory), which makes mechanisms such as kmalloc() or
250b1abd1fSChristoph Hellwig  *   alloc_page() ineffective.
260b1abd1fSChristoph Hellwig  *
270b1abd1fSChristoph Hellwig  *   At the same time, a solution where a big memory region is
280b1abd1fSChristoph Hellwig  *   reserved for a device is suboptimal since often more memory is
290b1abd1fSChristoph Hellwig  *   reserved then strictly required and, moreover, the memory is
300b1abd1fSChristoph Hellwig  *   inaccessible to page system even if device drivers don't use it.
310b1abd1fSChristoph Hellwig  *
320b1abd1fSChristoph Hellwig  *   CMA tries to solve this issue by operating on memory regions
330b1abd1fSChristoph Hellwig  *   where only movable pages can be allocated from.  This way, kernel
340b1abd1fSChristoph Hellwig  *   can use the memory for pagecache and when device driver requests
350b1abd1fSChristoph Hellwig  *   it, allocated pages can be migrated.
36cf65a0f6SChristoph Hellwig  */
37cf65a0f6SChristoph Hellwig 
38cf65a0f6SChristoph Hellwig #define pr_fmt(fmt) "cma: " fmt
39cf65a0f6SChristoph Hellwig 
40cf65a0f6SChristoph Hellwig #include <asm/page.h>
41cf65a0f6SChristoph Hellwig 
42cf65a0f6SChristoph Hellwig #include <linux/memblock.h>
43cf65a0f6SChristoph Hellwig #include <linux/err.h>
44cf65a0f6SChristoph Hellwig #include <linux/sizes.h>
450b1abd1fSChristoph Hellwig #include <linux/dma-map-ops.h>
46cf65a0f6SChristoph Hellwig #include <linux/cma.h>
47bf29bfaaSYajun Deng #include <linux/nospec.h>
48cf65a0f6SChristoph Hellwig 
49cf65a0f6SChristoph Hellwig #ifdef CONFIG_CMA_SIZE_MBYTES
50cf65a0f6SChristoph Hellwig #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
51cf65a0f6SChristoph Hellwig #else
52cf65a0f6SChristoph Hellwig #define CMA_SIZE_MBYTES 0
53cf65a0f6SChristoph Hellwig #endif
54cf65a0f6SChristoph Hellwig 
55cf65a0f6SChristoph Hellwig struct cma *dma_contiguous_default_area;
56cf65a0f6SChristoph Hellwig 
57cf65a0f6SChristoph Hellwig /*
58cf65a0f6SChristoph Hellwig  * Default global CMA area size can be defined in kernel's .config.
59cf65a0f6SChristoph Hellwig  * This is useful mainly for distro maintainers to create a kernel
60cf65a0f6SChristoph Hellwig  * that works correctly for most supported systems.
61cf65a0f6SChristoph Hellwig  * The size can be set in bytes or as a percentage of the total memory
62cf65a0f6SChristoph Hellwig  * in the system.
63cf65a0f6SChristoph Hellwig  *
64cf65a0f6SChristoph Hellwig  * Users, who want to set the size of global CMA area for their system
65cf65a0f6SChristoph Hellwig  * should use cma= kernel parameter.
66cf65a0f6SChristoph Hellwig  */
67*d7b98ae5SArnd Bergmann #define size_bytes ((phys_addr_t)CMA_SIZE_MBYTES * SZ_1M)
68ca665368SShyam Saini static phys_addr_t  size_cmdline __initdata = -1;
69ca665368SShyam Saini static phys_addr_t base_cmdline __initdata;
70ca665368SShyam Saini static phys_addr_t limit_cmdline __initdata;
71cf65a0f6SChristoph Hellwig 
early_cma(char * p)72cf65a0f6SChristoph Hellwig static int __init early_cma(char *p)
73cf65a0f6SChristoph Hellwig {
74a3ceed87SHe Zhe 	if (!p) {
75a3ceed87SHe Zhe 		pr_err("Config string not provided\n");
76a3ceed87SHe Zhe 		return -EINVAL;
77a3ceed87SHe Zhe 	}
78a3ceed87SHe Zhe 
79cf65a0f6SChristoph Hellwig 	size_cmdline = memparse(p, &p);
80cf65a0f6SChristoph Hellwig 	if (*p != '@')
81cf65a0f6SChristoph Hellwig 		return 0;
82cf65a0f6SChristoph Hellwig 	base_cmdline = memparse(p + 1, &p);
83cf65a0f6SChristoph Hellwig 	if (*p != '-') {
84cf65a0f6SChristoph Hellwig 		limit_cmdline = base_cmdline + size_cmdline;
85cf65a0f6SChristoph Hellwig 		return 0;
86cf65a0f6SChristoph Hellwig 	}
87cf65a0f6SChristoph Hellwig 	limit_cmdline = memparse(p + 1, &p);
88cf65a0f6SChristoph Hellwig 
89cf65a0f6SChristoph Hellwig 	return 0;
90cf65a0f6SChristoph Hellwig }
91cf65a0f6SChristoph Hellwig early_param("cma", early_cma);
92cf65a0f6SChristoph Hellwig 
93bf29bfaaSYajun Deng #ifdef CONFIG_DMA_NUMA_CMA
94b7176c26SBarry Song 
95bf29bfaaSYajun Deng static struct cma *dma_contiguous_numa_area[MAX_NUMNODES];
96bf29bfaaSYajun Deng static phys_addr_t numa_cma_size[MAX_NUMNODES] __initdata;
97b7176c26SBarry Song static struct cma *dma_contiguous_pernuma_area[MAX_NUMNODES];
98b7176c26SBarry Song static phys_addr_t pernuma_size_bytes __initdata;
99b7176c26SBarry Song 
early_numa_cma(char * p)100bf29bfaaSYajun Deng static int __init early_numa_cma(char *p)
101bf29bfaaSYajun Deng {
102bf29bfaaSYajun Deng 	int nid, count = 0;
103bf29bfaaSYajun Deng 	unsigned long tmp;
104bf29bfaaSYajun Deng 	char *s = p;
105bf29bfaaSYajun Deng 
106bf29bfaaSYajun Deng 	while (*s) {
107bf29bfaaSYajun Deng 		if (sscanf(s, "%lu%n", &tmp, &count) != 1)
108bf29bfaaSYajun Deng 			break;
109bf29bfaaSYajun Deng 
110bf29bfaaSYajun Deng 		if (s[count] == ':') {
111bf29bfaaSYajun Deng 			if (tmp >= MAX_NUMNODES)
112bf29bfaaSYajun Deng 				break;
113bf29bfaaSYajun Deng 			nid = array_index_nospec(tmp, MAX_NUMNODES);
114bf29bfaaSYajun Deng 
115bf29bfaaSYajun Deng 			s += count + 1;
116bf29bfaaSYajun Deng 			tmp = memparse(s, &s);
117bf29bfaaSYajun Deng 			numa_cma_size[nid] = tmp;
118bf29bfaaSYajun Deng 
119bf29bfaaSYajun Deng 			if (*s == ',')
120bf29bfaaSYajun Deng 				s++;
121bf29bfaaSYajun Deng 			else
122bf29bfaaSYajun Deng 				break;
123bf29bfaaSYajun Deng 		} else
124bf29bfaaSYajun Deng 			break;
125bf29bfaaSYajun Deng 	}
126bf29bfaaSYajun Deng 
127bf29bfaaSYajun Deng 	return 0;
128bf29bfaaSYajun Deng }
129bf29bfaaSYajun Deng early_param("numa_cma", early_numa_cma);
130bf29bfaaSYajun Deng 
early_cma_pernuma(char * p)131b7176c26SBarry Song static int __init early_cma_pernuma(char *p)
132b7176c26SBarry Song {
133b7176c26SBarry Song 	pernuma_size_bytes = memparse(p, &p);
134b7176c26SBarry Song 	return 0;
135b7176c26SBarry Song }
136b7176c26SBarry Song early_param("cma_pernuma", early_cma_pernuma);
137b7176c26SBarry Song #endif
138b7176c26SBarry Song 
139cf65a0f6SChristoph Hellwig #ifdef CONFIG_CMA_SIZE_PERCENTAGE
140cf65a0f6SChristoph Hellwig 
cma_early_percent_memory(void)141cf65a0f6SChristoph Hellwig static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
142cf65a0f6SChristoph Hellwig {
143e9aa36ccSMike Rapoport 	unsigned long total_pages = PHYS_PFN(memblock_phys_mem_size());
144cf65a0f6SChristoph Hellwig 
145cf65a0f6SChristoph Hellwig 	return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
146cf65a0f6SChristoph Hellwig }
147cf65a0f6SChristoph Hellwig 
148cf65a0f6SChristoph Hellwig #else
149cf65a0f6SChristoph Hellwig 
cma_early_percent_memory(void)150cf65a0f6SChristoph Hellwig static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
151cf65a0f6SChristoph Hellwig {
152cf65a0f6SChristoph Hellwig 	return 0;
153cf65a0f6SChristoph Hellwig }
154cf65a0f6SChristoph Hellwig 
155cf65a0f6SChristoph Hellwig #endif
156cf65a0f6SChristoph Hellwig 
157bf29bfaaSYajun Deng #ifdef CONFIG_DMA_NUMA_CMA
dma_numa_cma_reserve(void)158bf29bfaaSYajun Deng static void __init dma_numa_cma_reserve(void)
159b7176c26SBarry Song {
160b7176c26SBarry Song 	int nid;
161b7176c26SBarry Song 
162bf29bfaaSYajun Deng 	for_each_node(nid) {
163b7176c26SBarry Song 		int ret;
1642281f797SBarry Song 		char name[CMA_MAX_NAME];
165bf29bfaaSYajun Deng 		struct cma **cma;
166b7176c26SBarry Song 
167bf29bfaaSYajun Deng 		if (!node_online(nid)) {
168bf29bfaaSYajun Deng 			if (pernuma_size_bytes || numa_cma_size[nid])
169bf29bfaaSYajun Deng 				pr_warn("invalid node %d specified\n", nid);
170b7176c26SBarry Song 			continue;
171b7176c26SBarry Song 		}
172b7176c26SBarry Song 
173bf29bfaaSYajun Deng 		if (pernuma_size_bytes) {
174bf29bfaaSYajun Deng 
175bf29bfaaSYajun Deng 			cma = &dma_contiguous_pernuma_area[nid];
176bf29bfaaSYajun Deng 			snprintf(name, sizeof(name), "pernuma%d", nid);
177bf29bfaaSYajun Deng 			ret = cma_declare_contiguous_nid(0, pernuma_size_bytes, 0, 0,
178bf29bfaaSYajun Deng 							 0, false, name, cma, nid);
179bf29bfaaSYajun Deng 			if (ret)
180bf29bfaaSYajun Deng 				pr_warn("%s: reservation failed: err %d, node %d", __func__,
181bf29bfaaSYajun Deng 					ret, nid);
182bf29bfaaSYajun Deng 		}
183bf29bfaaSYajun Deng 
184bf29bfaaSYajun Deng 		if (numa_cma_size[nid]) {
185bf29bfaaSYajun Deng 
186bf29bfaaSYajun Deng 			cma = &dma_contiguous_numa_area[nid];
187bf29bfaaSYajun Deng 			snprintf(name, sizeof(name), "numa%d", nid);
188bf29bfaaSYajun Deng 			ret = cma_declare_contiguous_nid(0, numa_cma_size[nid], 0, 0, 0, false,
189bf29bfaaSYajun Deng 							 name, cma, nid);
190bf29bfaaSYajun Deng 			if (ret)
191bf29bfaaSYajun Deng 				pr_warn("%s: reservation failed: err %d, node %d", __func__,
192bf29bfaaSYajun Deng 					ret, nid);
193bf29bfaaSYajun Deng 		}
194b7176c26SBarry Song 	}
195b7176c26SBarry Song }
19622e4a348SYajun Deng #else
dma_numa_cma_reserve(void)197bf29bfaaSYajun Deng static inline void __init dma_numa_cma_reserve(void)
19822e4a348SYajun Deng {
19922e4a348SYajun Deng }
200b7176c26SBarry Song #endif
201b7176c26SBarry Song 
202cf65a0f6SChristoph Hellwig /**
203cf65a0f6SChristoph Hellwig  * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
204cf65a0f6SChristoph Hellwig  * @limit: End address of the reserved memory (optional, 0 for any).
205cf65a0f6SChristoph Hellwig  *
206cf65a0f6SChristoph Hellwig  * This function reserves memory from early allocator. It should be
207cf65a0f6SChristoph Hellwig  * called by arch specific code once the early allocator (memblock or bootmem)
208cf65a0f6SChristoph Hellwig  * has been activated and all other subsystems have already allocated/reserved
209cf65a0f6SChristoph Hellwig  * memory.
210cf65a0f6SChristoph Hellwig  */
dma_contiguous_reserve(phys_addr_t limit)211cf65a0f6SChristoph Hellwig void __init dma_contiguous_reserve(phys_addr_t limit)
212cf65a0f6SChristoph Hellwig {
213cf65a0f6SChristoph Hellwig 	phys_addr_t selected_size = 0;
214cf65a0f6SChristoph Hellwig 	phys_addr_t selected_base = 0;
215cf65a0f6SChristoph Hellwig 	phys_addr_t selected_limit = limit;
216cf65a0f6SChristoph Hellwig 	bool fixed = false;
217cf65a0f6SChristoph Hellwig 
218bf29bfaaSYajun Deng 	dma_numa_cma_reserve();
21922e4a348SYajun Deng 
220cf65a0f6SChristoph Hellwig 	pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
221cf65a0f6SChristoph Hellwig 
222cf65a0f6SChristoph Hellwig 	if (size_cmdline != -1) {
223cf65a0f6SChristoph Hellwig 		selected_size = size_cmdline;
224cf65a0f6SChristoph Hellwig 		selected_base = base_cmdline;
225cf65a0f6SChristoph Hellwig 		selected_limit = min_not_zero(limit_cmdline, limit);
226cf65a0f6SChristoph Hellwig 		if (base_cmdline + size_cmdline == limit_cmdline)
227cf65a0f6SChristoph Hellwig 			fixed = true;
228cf65a0f6SChristoph Hellwig 	} else {
229cf65a0f6SChristoph Hellwig #ifdef CONFIG_CMA_SIZE_SEL_MBYTES
230cf65a0f6SChristoph Hellwig 		selected_size = size_bytes;
231cf65a0f6SChristoph Hellwig #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
232cf65a0f6SChristoph Hellwig 		selected_size = cma_early_percent_memory();
233cf65a0f6SChristoph Hellwig #elif defined(CONFIG_CMA_SIZE_SEL_MIN)
234cf65a0f6SChristoph Hellwig 		selected_size = min(size_bytes, cma_early_percent_memory());
235cf65a0f6SChristoph Hellwig #elif defined(CONFIG_CMA_SIZE_SEL_MAX)
236cf65a0f6SChristoph Hellwig 		selected_size = max(size_bytes, cma_early_percent_memory());
237cf65a0f6SChristoph Hellwig #endif
238cf65a0f6SChristoph Hellwig 	}
239cf65a0f6SChristoph Hellwig 
240cf65a0f6SChristoph Hellwig 	if (selected_size && !dma_contiguous_default_area) {
241cf65a0f6SChristoph Hellwig 		pr_debug("%s: reserving %ld MiB for global area\n", __func__,
242cf65a0f6SChristoph Hellwig 			 (unsigned long)selected_size / SZ_1M);
243cf65a0f6SChristoph Hellwig 
244cf65a0f6SChristoph Hellwig 		dma_contiguous_reserve_area(selected_size, selected_base,
245cf65a0f6SChristoph Hellwig 					    selected_limit,
246cf65a0f6SChristoph Hellwig 					    &dma_contiguous_default_area,
247cf65a0f6SChristoph Hellwig 					    fixed);
248cf65a0f6SChristoph Hellwig 	}
249cf65a0f6SChristoph Hellwig }
250cf65a0f6SChristoph Hellwig 
2515db5d930SChristoph Hellwig void __weak
dma_contiguous_early_fixup(phys_addr_t base,unsigned long size)2525db5d930SChristoph Hellwig dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
2535db5d930SChristoph Hellwig {
2545db5d930SChristoph Hellwig }
2555db5d930SChristoph Hellwig 
256cf65a0f6SChristoph Hellwig /**
257cf65a0f6SChristoph Hellwig  * dma_contiguous_reserve_area() - reserve custom contiguous area
258cf65a0f6SChristoph Hellwig  * @size: Size of the reserved area (in bytes),
259cf65a0f6SChristoph Hellwig  * @base: Base address of the reserved area optional, use 0 for any
260cf65a0f6SChristoph Hellwig  * @limit: End address of the reserved memory (optional, 0 for any).
261cf65a0f6SChristoph Hellwig  * @res_cma: Pointer to store the created cma region.
262cf65a0f6SChristoph Hellwig  * @fixed: hint about where to place the reserved area
263cf65a0f6SChristoph Hellwig  *
264cf65a0f6SChristoph Hellwig  * This function reserves memory from early allocator. It should be
265cf65a0f6SChristoph Hellwig  * called by arch specific code once the early allocator (memblock or bootmem)
266cf65a0f6SChristoph Hellwig  * has been activated and all other subsystems have already allocated/reserved
267cf65a0f6SChristoph Hellwig  * memory. This function allows to create custom reserved areas for specific
268cf65a0f6SChristoph Hellwig  * devices.
269cf65a0f6SChristoph Hellwig  *
270cf65a0f6SChristoph Hellwig  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
271cf65a0f6SChristoph Hellwig  * reserve in range from @base to @limit.
272cf65a0f6SChristoph Hellwig  */
dma_contiguous_reserve_area(phys_addr_t size,phys_addr_t base,phys_addr_t limit,struct cma ** res_cma,bool fixed)273cf65a0f6SChristoph Hellwig int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
274cf65a0f6SChristoph Hellwig 				       phys_addr_t limit, struct cma **res_cma,
275cf65a0f6SChristoph Hellwig 				       bool fixed)
276cf65a0f6SChristoph Hellwig {
277cf65a0f6SChristoph Hellwig 	int ret;
278cf65a0f6SChristoph Hellwig 
279cf65a0f6SChristoph Hellwig 	ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed,
280cf65a0f6SChristoph Hellwig 					"reserved", res_cma);
281cf65a0f6SChristoph Hellwig 	if (ret)
282cf65a0f6SChristoph Hellwig 		return ret;
283cf65a0f6SChristoph Hellwig 
284cf65a0f6SChristoph Hellwig 	/* Architecture specific contiguous memory fixup. */
285cf65a0f6SChristoph Hellwig 	dma_contiguous_early_fixup(cma_get_base(*res_cma),
286cf65a0f6SChristoph Hellwig 				cma_get_size(*res_cma));
287cf65a0f6SChristoph Hellwig 
288cf65a0f6SChristoph Hellwig 	return 0;
289cf65a0f6SChristoph Hellwig }
290cf65a0f6SChristoph Hellwig 
291cf65a0f6SChristoph Hellwig /**
292cf65a0f6SChristoph Hellwig  * dma_alloc_from_contiguous() - allocate pages from contiguous area
293cf65a0f6SChristoph Hellwig  * @dev:   Pointer to device for which the allocation is performed.
294cf65a0f6SChristoph Hellwig  * @count: Requested number of pages.
295cf65a0f6SChristoph Hellwig  * @align: Requested alignment of pages (in PAGE_SIZE order).
296d834c5abSMarek Szyprowski  * @no_warn: Avoid printing message about failed allocation.
297cf65a0f6SChristoph Hellwig  *
298cf65a0f6SChristoph Hellwig  * This function allocates memory buffer for specified device. It uses
299cf65a0f6SChristoph Hellwig  * device specific contiguous memory area if available or the default
300cf65a0f6SChristoph Hellwig  * global one. Requires architecture specific dev_get_cma_area() helper
301cf65a0f6SChristoph Hellwig  * function.
302cf65a0f6SChristoph Hellwig  */
dma_alloc_from_contiguous(struct device * dev,size_t count,unsigned int align,bool no_warn)303cf65a0f6SChristoph Hellwig struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
304d834c5abSMarek Szyprowski 				       unsigned int align, bool no_warn)
305cf65a0f6SChristoph Hellwig {
306cf65a0f6SChristoph Hellwig 	if (align > CONFIG_CMA_ALIGNMENT)
307cf65a0f6SChristoph Hellwig 		align = CONFIG_CMA_ALIGNMENT;
308cf65a0f6SChristoph Hellwig 
309d834c5abSMarek Szyprowski 	return cma_alloc(dev_get_cma_area(dev), count, align, no_warn);
310cf65a0f6SChristoph Hellwig }
311cf65a0f6SChristoph Hellwig 
312cf65a0f6SChristoph Hellwig /**
313cf65a0f6SChristoph Hellwig  * dma_release_from_contiguous() - release allocated pages
314cf65a0f6SChristoph Hellwig  * @dev:   Pointer to device for which the pages were allocated.
315cf65a0f6SChristoph Hellwig  * @pages: Allocated pages.
316cf65a0f6SChristoph Hellwig  * @count: Number of allocated pages.
317cf65a0f6SChristoph Hellwig  *
318cf65a0f6SChristoph Hellwig  * This function releases memory allocated by dma_alloc_from_contiguous().
319cf65a0f6SChristoph Hellwig  * It returns false when provided pages do not belong to contiguous area and
320cf65a0f6SChristoph Hellwig  * true otherwise.
321cf65a0f6SChristoph Hellwig  */
dma_release_from_contiguous(struct device * dev,struct page * pages,int count)322cf65a0f6SChristoph Hellwig bool dma_release_from_contiguous(struct device *dev, struct page *pages,
323cf65a0f6SChristoph Hellwig 				 int count)
324cf65a0f6SChristoph Hellwig {
325cf65a0f6SChristoph Hellwig 	return cma_release(dev_get_cma_area(dev), pages, count);
326cf65a0f6SChristoph Hellwig }
327cf65a0f6SChristoph Hellwig 
cma_alloc_aligned(struct cma * cma,size_t size,gfp_t gfp)328274b3f7bSChristoph Hellwig static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp)
329274b3f7bSChristoph Hellwig {
330274b3f7bSChristoph Hellwig 	unsigned int align = min(get_order(size), CONFIG_CMA_ALIGNMENT);
331274b3f7bSChristoph Hellwig 
332274b3f7bSChristoph Hellwig 	return cma_alloc(cma, size >> PAGE_SHIFT, align, gfp & __GFP_NOWARN);
333274b3f7bSChristoph Hellwig }
334274b3f7bSChristoph Hellwig 
335b1d2dc00SNicolin Chen /**
336b1d2dc00SNicolin Chen  * dma_alloc_contiguous() - allocate contiguous pages
337b1d2dc00SNicolin Chen  * @dev:   Pointer to device for which the allocation is performed.
338b1d2dc00SNicolin Chen  * @size:  Requested allocation size.
339b1d2dc00SNicolin Chen  * @gfp:   Allocation flags.
340b1d2dc00SNicolin Chen  *
341b7176c26SBarry Song  * tries to use device specific contiguous memory area if available, or it
342b7176c26SBarry Song  * tries to use per-numa cma, if the allocation fails, it will fallback to
343b7176c26SBarry Song  * try default global one.
344bd2e7563SNicolin Chen  *
345b7176c26SBarry Song  * Note that it bypass one-page size of allocations from the per-numa and
346b7176c26SBarry Song  * global area as the addresses within one page are always contiguous, so
347b7176c26SBarry Song  * there is no need to waste CMA pages for that kind; it also helps reduce
348b7176c26SBarry Song  * fragmentations.
349b1d2dc00SNicolin Chen  */
dma_alloc_contiguous(struct device * dev,size_t size,gfp_t gfp)350b1d2dc00SNicolin Chen struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
351b1d2dc00SNicolin Chen {
352bf29bfaaSYajun Deng #ifdef CONFIG_DMA_NUMA_CMA
353b7176c26SBarry Song 	int nid = dev_to_node(dev);
354b7176c26SBarry Song #endif
355b7176c26SBarry Song 
356b1d2dc00SNicolin Chen 	/* CMA can be used only in the context which permits sleeping */
357274b3f7bSChristoph Hellwig 	if (!gfpflags_allow_blocking(gfp))
358274b3f7bSChristoph Hellwig 		return NULL;
359274b3f7bSChristoph Hellwig 	if (dev->cma_area)
360274b3f7bSChristoph Hellwig 		return cma_alloc_aligned(dev->cma_area, size, gfp);
361b7176c26SBarry Song 	if (size <= PAGE_SIZE)
362274b3f7bSChristoph Hellwig 		return NULL;
363b7176c26SBarry Song 
364bf29bfaaSYajun Deng #ifdef CONFIG_DMA_NUMA_CMA
365b7176c26SBarry Song 	if (nid != NUMA_NO_NODE && !(gfp & (GFP_DMA | GFP_DMA32))) {
366b7176c26SBarry Song 		struct cma *cma = dma_contiguous_pernuma_area[nid];
367b7176c26SBarry Song 		struct page *page;
368b7176c26SBarry Song 
369b7176c26SBarry Song 		if (cma) {
370b7176c26SBarry Song 			page = cma_alloc_aligned(cma, size, gfp);
371b7176c26SBarry Song 			if (page)
372b7176c26SBarry Song 				return page;
373b7176c26SBarry Song 		}
374bf29bfaaSYajun Deng 
375bf29bfaaSYajun Deng 		cma = dma_contiguous_numa_area[nid];
376bf29bfaaSYajun Deng 		if (cma) {
377bf29bfaaSYajun Deng 			page = cma_alloc_aligned(cma, size, gfp);
378bf29bfaaSYajun Deng 			if (page)
379bf29bfaaSYajun Deng 				return page;
380bf29bfaaSYajun Deng 		}
381b7176c26SBarry Song 	}
382b7176c26SBarry Song #endif
383b7176c26SBarry Song 	if (!dma_contiguous_default_area)
384b7176c26SBarry Song 		return NULL;
385b7176c26SBarry Song 
386274b3f7bSChristoph Hellwig 	return cma_alloc_aligned(dma_contiguous_default_area, size, gfp);
387b1d2dc00SNicolin Chen }
388b1d2dc00SNicolin Chen 
389b1d2dc00SNicolin Chen /**
390b1d2dc00SNicolin Chen  * dma_free_contiguous() - release allocated pages
391b1d2dc00SNicolin Chen  * @dev:   Pointer to device for which the pages were allocated.
392b1d2dc00SNicolin Chen  * @page:  Pointer to the allocated pages.
393b1d2dc00SNicolin Chen  * @size:  Size of allocated pages.
394b1d2dc00SNicolin Chen  *
395b1d2dc00SNicolin Chen  * This function releases memory allocated by dma_alloc_contiguous(). As the
396b1d2dc00SNicolin Chen  * cma_release returns false when provided pages do not belong to contiguous
397b1d2dc00SNicolin Chen  * area and true otherwise, this function then does a fallback __free_pages()
398b1d2dc00SNicolin Chen  * upon a false-return.
399b1d2dc00SNicolin Chen  */
dma_free_contiguous(struct device * dev,struct page * page,size_t size)400b1d2dc00SNicolin Chen void dma_free_contiguous(struct device *dev, struct page *page, size_t size)
401b1d2dc00SNicolin Chen {
402b7176c26SBarry Song 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
403b7176c26SBarry Song 
404b7176c26SBarry Song 	/* if dev has its own cma, free page from there */
405b7176c26SBarry Song 	if (dev->cma_area) {
406b7176c26SBarry Song 		if (cma_release(dev->cma_area, page, count))
407b7176c26SBarry Song 			return;
408b7176c26SBarry Song 	} else {
409b7176c26SBarry Song 		/*
410b7176c26SBarry Song 		 * otherwise, page is from either per-numa cma or default cma
411b7176c26SBarry Song 		 */
412bf29bfaaSYajun Deng #ifdef CONFIG_DMA_NUMA_CMA
413b7176c26SBarry Song 		if (cma_release(dma_contiguous_pernuma_area[page_to_nid(page)],
414b7176c26SBarry Song 					page, count))
415b7176c26SBarry Song 			return;
416bf29bfaaSYajun Deng 		if (cma_release(dma_contiguous_numa_area[page_to_nid(page)],
417bf29bfaaSYajun Deng 					page, count))
418bf29bfaaSYajun Deng 			return;
419b7176c26SBarry Song #endif
420b7176c26SBarry Song 		if (cma_release(dma_contiguous_default_area, page, count))
421b7176c26SBarry Song 			return;
422b7176c26SBarry Song 	}
423b7176c26SBarry Song 
424b7176c26SBarry Song 	/* not in any cma, free from buddy */
425b1d2dc00SNicolin Chen 	__free_pages(page, get_order(size));
426b1d2dc00SNicolin Chen }
427b1d2dc00SNicolin Chen 
428cf65a0f6SChristoph Hellwig /*
429cf65a0f6SChristoph Hellwig  * Support for reserved memory regions defined in device tree
430cf65a0f6SChristoph Hellwig  */
431cf65a0f6SChristoph Hellwig #ifdef CONFIG_OF_RESERVED_MEM
432cf65a0f6SChristoph Hellwig #include <linux/of.h>
433cf65a0f6SChristoph Hellwig #include <linux/of_fdt.h>
434cf65a0f6SChristoph Hellwig #include <linux/of_reserved_mem.h>
435cf65a0f6SChristoph Hellwig 
436cf65a0f6SChristoph Hellwig #undef pr_fmt
437cf65a0f6SChristoph Hellwig #define pr_fmt(fmt) fmt
438cf65a0f6SChristoph Hellwig 
rmem_cma_device_init(struct reserved_mem * rmem,struct device * dev)439cf65a0f6SChristoph Hellwig static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
440cf65a0f6SChristoph Hellwig {
4415af63893SChristoph Hellwig 	dev->cma_area = rmem->priv;
442cf65a0f6SChristoph Hellwig 	return 0;
443cf65a0f6SChristoph Hellwig }
444cf65a0f6SChristoph Hellwig 
rmem_cma_device_release(struct reserved_mem * rmem,struct device * dev)445cf65a0f6SChristoph Hellwig static void rmem_cma_device_release(struct reserved_mem *rmem,
446cf65a0f6SChristoph Hellwig 				    struct device *dev)
447cf65a0f6SChristoph Hellwig {
4485af63893SChristoph Hellwig 	dev->cma_area = NULL;
449cf65a0f6SChristoph Hellwig }
450cf65a0f6SChristoph Hellwig 
451cf65a0f6SChristoph Hellwig static const struct reserved_mem_ops rmem_cma_ops = {
452cf65a0f6SChristoph Hellwig 	.device_init	= rmem_cma_device_init,
453cf65a0f6SChristoph Hellwig 	.device_release = rmem_cma_device_release,
454cf65a0f6SChristoph Hellwig };
455cf65a0f6SChristoph Hellwig 
rmem_cma_setup(struct reserved_mem * rmem)456cf65a0f6SChristoph Hellwig static int __init rmem_cma_setup(struct reserved_mem *rmem)
457cf65a0f6SChristoph Hellwig {
458cf65a0f6SChristoph Hellwig 	unsigned long node = rmem->fdt_node;
4598c8c5a49SNicolas Saenz Julienne 	bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
460cf65a0f6SChristoph Hellwig 	struct cma *cma;
461cf65a0f6SChristoph Hellwig 	int err;
462cf65a0f6SChristoph Hellwig 
4638c8c5a49SNicolas Saenz Julienne 	if (size_cmdline != -1 && default_cma) {
4648c8c5a49SNicolas Saenz Julienne 		pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n",
4658c8c5a49SNicolas Saenz Julienne 			rmem->name);
4668c8c5a49SNicolas Saenz Julienne 		return -EBUSY;
4678c8c5a49SNicolas Saenz Julienne 	}
4688c8c5a49SNicolas Saenz Julienne 
469cf65a0f6SChristoph Hellwig 	if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
470cf65a0f6SChristoph Hellwig 	    of_get_flat_dt_prop(node, "no-map", NULL))
471cf65a0f6SChristoph Hellwig 		return -EINVAL;
472cf65a0f6SChristoph Hellwig 
473e16faf26SDavid Hildenbrand 	if (!IS_ALIGNED(rmem->base | rmem->size, CMA_MIN_ALIGNMENT_BYTES)) {
474cf65a0f6SChristoph Hellwig 		pr_err("Reserved memory: incorrect alignment of CMA region\n");
475cf65a0f6SChristoph Hellwig 		return -EINVAL;
476cf65a0f6SChristoph Hellwig 	}
477cf65a0f6SChristoph Hellwig 
478cf65a0f6SChristoph Hellwig 	err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
479cf65a0f6SChristoph Hellwig 	if (err) {
480cf65a0f6SChristoph Hellwig 		pr_err("Reserved memory: unable to setup CMA region\n");
481cf65a0f6SChristoph Hellwig 		return err;
482cf65a0f6SChristoph Hellwig 	}
483cf65a0f6SChristoph Hellwig 	/* Architecture specific contiguous memory fixup. */
484cf65a0f6SChristoph Hellwig 	dma_contiguous_early_fixup(rmem->base, rmem->size);
485cf65a0f6SChristoph Hellwig 
4868c8c5a49SNicolas Saenz Julienne 	if (default_cma)
487580a0cc9SChristoph Hellwig 		dma_contiguous_default_area = cma;
488cf65a0f6SChristoph Hellwig 
489cf65a0f6SChristoph Hellwig 	rmem->ops = &rmem_cma_ops;
490cf65a0f6SChristoph Hellwig 	rmem->priv = cma;
491cf65a0f6SChristoph Hellwig 
492cf65a0f6SChristoph Hellwig 	pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
493cf65a0f6SChristoph Hellwig 		&rmem->base, (unsigned long)rmem->size / SZ_1M);
494cf65a0f6SChristoph Hellwig 
495cf65a0f6SChristoph Hellwig 	return 0;
496cf65a0f6SChristoph Hellwig }
497cf65a0f6SChristoph Hellwig RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
498cf65a0f6SChristoph Hellwig #endif
499