xref: /linux-6.15/kernel/dma/direct.c (revision 8324993f)
1cf65a0f6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2cf65a0f6SChristoph Hellwig /*
3efa70f2fSChristoph Hellwig  * Copyright (C) 2018-2020 Christoph Hellwig.
4bc3ec75dSChristoph Hellwig  *
5bc3ec75dSChristoph Hellwig  * DMA operations that map physical memory directly without using an IOMMU.
6cf65a0f6SChristoph Hellwig  */
757c8a661SMike Rapoport #include <linux/memblock.h> /* for max_pfn */
8cf65a0f6SChristoph Hellwig #include <linux/export.h>
9cf65a0f6SChristoph Hellwig #include <linux/mm.h>
100a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h>
11cf65a0f6SChristoph Hellwig #include <linux/scatterlist.h>
12cf65a0f6SChristoph Hellwig #include <linux/pfn.h>
133acac065SChristoph Hellwig #include <linux/vmalloc.h>
14cf65a0f6SChristoph Hellwig #include <linux/set_memory.h>
15e0d07278SJim Quinlan #include <linux/slab.h>
1619c65c3dSChristoph Hellwig #include "direct.h"
17cf65a0f6SChristoph Hellwig 
18cf65a0f6SChristoph Hellwig /*
197b7b8a2cSRandy Dunlap  * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use
208b5369eaSNicolas Saenz Julienne  * it for entirely different regions. In that case the arch code needs to
218b5369eaSNicolas Saenz Julienne  * override the variable below for dma-direct to work properly.
22cf65a0f6SChristoph Hellwig  */
23ba0fb44aSCatalin Marinas u64 zone_dma_limit __ro_after_init = DMA_BIT_MASK(24);
24cf65a0f6SChristoph Hellwig 
phys_to_dma_direct(struct device * dev,phys_addr_t phys)25a20bb058SChristoph Hellwig static inline dma_addr_t phys_to_dma_direct(struct device *dev,
26a20bb058SChristoph Hellwig 		phys_addr_t phys)
27a20bb058SChristoph Hellwig {
289087c375STom Lendacky 	if (force_dma_unencrypted(dev))
295ceda740SChristoph Hellwig 		return phys_to_dma_unencrypted(dev, phys);
30a20bb058SChristoph Hellwig 	return phys_to_dma(dev, phys);
31a20bb058SChristoph Hellwig }
32a20bb058SChristoph Hellwig 
dma_direct_to_page(struct device * dev,dma_addr_t dma_addr)3334dc0ea6SChristoph Hellwig static inline struct page *dma_direct_to_page(struct device *dev,
3434dc0ea6SChristoph Hellwig 		dma_addr_t dma_addr)
3534dc0ea6SChristoph Hellwig {
3634dc0ea6SChristoph Hellwig 	return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
3734dc0ea6SChristoph Hellwig }
3834dc0ea6SChristoph Hellwig 
dma_direct_get_required_mask(struct device * dev)39a20bb058SChristoph Hellwig u64 dma_direct_get_required_mask(struct device *dev)
40a20bb058SChristoph Hellwig {
41cdcda0d1SKishon Vijay Abraham I 	phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
42cdcda0d1SKishon Vijay Abraham I 	u64 max_dma = phys_to_dma_direct(dev, phys);
43a20bb058SChristoph Hellwig 
44a20bb058SChristoph Hellwig 	return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
45a20bb058SChristoph Hellwig }
46a20bb058SChristoph Hellwig 
dma_direct_optimal_gfp_mask(struct device * dev,u64 * phys_limit)4725a4ce56SPetr Tesarik static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit)
487d21ee4cSChristoph Hellwig {
4925a4ce56SPetr Tesarik 	u64 dma_limit = min_not_zero(
5025a4ce56SPetr Tesarik 		dev->coherent_dma_mask,
5125a4ce56SPetr Tesarik 		dev->bus_dma_limit);
52b4ebe606SChristoph Hellwig 
5379ac32a4SChristoph Hellwig 	/*
5479ac32a4SChristoph Hellwig 	 * Optimistically try the zone that the physical address mask falls
5579ac32a4SChristoph Hellwig 	 * into first.  If that returns memory that isn't actually addressable
5679ac32a4SChristoph Hellwig 	 * we will fallback to the next lower zone and try again.
5779ac32a4SChristoph Hellwig 	 *
5879ac32a4SChristoph Hellwig 	 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
5979ac32a4SChristoph Hellwig 	 * zones.
6079ac32a4SChristoph Hellwig 	 */
617bc5c428SChristoph Hellwig 	*phys_limit = dma_to_phys(dev, dma_limit);
62ba0fb44aSCatalin Marinas 	if (*phys_limit <= zone_dma_limit)
637d21ee4cSChristoph Hellwig 		return GFP_DMA;
64a7ba70f1SNicolas Saenz Julienne 	if (*phys_limit <= DMA_BIT_MASK(32))
657d21ee4cSChristoph Hellwig 		return GFP_DMA32;
667d21ee4cSChristoph Hellwig 	return 0;
677d21ee4cSChristoph Hellwig }
687d21ee4cSChristoph Hellwig 
dma_coherent_ok(struct device * dev,phys_addr_t phys,size_t size)6979636caaSPetr Tesarik bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
70cf65a0f6SChristoph Hellwig {
71e0d07278SJim Quinlan 	dma_addr_t dma_addr = phys_to_dma_direct(dev, phys);
72e0d07278SJim Quinlan 
73e0d07278SJim Quinlan 	if (dma_addr == DMA_MAPPING_ERROR)
74e0d07278SJim Quinlan 		return false;
75e0d07278SJim Quinlan 	return dma_addr + size - 1 <=
76a7ba70f1SNicolas Saenz Julienne 		min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
77cf65a0f6SChristoph Hellwig }
78cf65a0f6SChristoph Hellwig 
dma_set_decrypted(struct device * dev,void * vaddr,size_t size)794d056478SChristoph Hellwig static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
804d056478SChristoph Hellwig {
814d056478SChristoph Hellwig 	if (!force_dma_unencrypted(dev))
824d056478SChristoph Hellwig 		return 0;
834a37f3ddSRobin Murphy 	return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size));
844d056478SChristoph Hellwig }
854d056478SChristoph Hellwig 
dma_set_encrypted(struct device * dev,void * vaddr,size_t size)864d056478SChristoph Hellwig static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
874d056478SChristoph Hellwig {
88a90cf304SChristoph Hellwig 	int ret;
89a90cf304SChristoph Hellwig 
904d056478SChristoph Hellwig 	if (!force_dma_unencrypted(dev))
914d056478SChristoph Hellwig 		return 0;
924a37f3ddSRobin Murphy 	ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
93a90cf304SChristoph Hellwig 	if (ret)
94a90cf304SChristoph Hellwig 		pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
95a90cf304SChristoph Hellwig 	return ret;
964d056478SChristoph Hellwig }
974d056478SChristoph Hellwig 
__dma_direct_free_pages(struct device * dev,struct page * page,size_t size)98f4111e39SClaire Chang static void __dma_direct_free_pages(struct device *dev, struct page *page,
99f4111e39SClaire Chang 				    size_t size)
100f4111e39SClaire Chang {
101f5d3939aSChristoph Hellwig 	if (swiotlb_free(dev, page, size))
102f4111e39SClaire Chang 		return;
103f4111e39SClaire Chang 	dma_free_contiguous(dev, page, size);
104f4111e39SClaire Chang }
105f4111e39SClaire Chang 
dma_direct_alloc_swiotlb(struct device * dev,size_t size)106aea7e2a8SChristoph Hellwig static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
107aea7e2a8SChristoph Hellwig {
108aea7e2a8SChristoph Hellwig 	struct page *page = swiotlb_alloc(dev, size);
109aea7e2a8SChristoph Hellwig 
110aea7e2a8SChristoph Hellwig 	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
111aea7e2a8SChristoph Hellwig 		swiotlb_free(dev, page, size);
112aea7e2a8SChristoph Hellwig 		return NULL;
113aea7e2a8SChristoph Hellwig 	}
114aea7e2a8SChristoph Hellwig 
115aea7e2a8SChristoph Hellwig 	return page;
116aea7e2a8SChristoph Hellwig }
117aea7e2a8SChristoph Hellwig 
__dma_direct_alloc_pages(struct device * dev,size_t size,gfp_t gfp,bool allow_highmem)11826749b32SChristoph Hellwig static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
11992826e96SChristoph Hellwig 		gfp_t gfp, bool allow_highmem)
120cf65a0f6SChristoph Hellwig {
12190ae409fSChristoph Hellwig 	int node = dev_to_node(dev);
122cf65a0f6SChristoph Hellwig 	struct page *page = NULL;
123a7ba70f1SNicolas Saenz Julienne 	u64 phys_limit;
124cf65a0f6SChristoph Hellwig 
125633d5fceSDavid Rientjes 	WARN_ON_ONCE(!PAGE_ALIGNED(size));
126633d5fceSDavid Rientjes 
127aea7e2a8SChristoph Hellwig 	if (is_swiotlb_for_alloc(dev))
128aea7e2a8SChristoph Hellwig 		return dma_direct_alloc_swiotlb(dev, size);
129aea7e2a8SChristoph Hellwig 
13025a4ce56SPetr Tesarik 	gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit);
131633d5fceSDavid Rientjes 	page = dma_alloc_contiguous(dev, size, gfp);
13292826e96SChristoph Hellwig 	if (page) {
13392826e96SChristoph Hellwig 		if (!dma_coherent_ok(dev, page_to_phys(page), size) ||
13492826e96SChristoph Hellwig 		    (!allow_highmem && PageHighMem(page))) {
135633d5fceSDavid Rientjes 			dma_free_contiguous(dev, page, size);
13690ae409fSChristoph Hellwig 			page = NULL;
13790ae409fSChristoph Hellwig 		}
13892826e96SChristoph Hellwig 	}
139cf65a0f6SChristoph Hellwig again:
14090ae409fSChristoph Hellwig 	if (!page)
141633d5fceSDavid Rientjes 		page = alloc_pages_node(node, gfp, get_order(size));
142cf65a0f6SChristoph Hellwig 	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
143f689a3abSChen Yu 		__free_pages(page, get_order(size));
144cf65a0f6SChristoph Hellwig 		page = NULL;
145cf65a0f6SChristoph Hellwig 
146cf65a0f6SChristoph Hellwig 		if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
147a7ba70f1SNicolas Saenz Julienne 		    phys_limit < DMA_BIT_MASK(64) &&
148cf65a0f6SChristoph Hellwig 		    !(gfp & (GFP_DMA32 | GFP_DMA))) {
149cf65a0f6SChristoph Hellwig 			gfp |= GFP_DMA32;
150cf65a0f6SChristoph Hellwig 			goto again;
151cf65a0f6SChristoph Hellwig 		}
152cf65a0f6SChristoph Hellwig 
153fbce251bSChristoph Hellwig 		if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
154cf65a0f6SChristoph Hellwig 			gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
155cf65a0f6SChristoph Hellwig 			goto again;
156cf65a0f6SChristoph Hellwig 		}
157cf65a0f6SChristoph Hellwig 	}
158cf65a0f6SChristoph Hellwig 
159b18814e7SChristoph Hellwig 	return page;
160b18814e7SChristoph Hellwig }
161b18814e7SChristoph Hellwig 
16228e4576dSChristoph Hellwig /*
16328e4576dSChristoph Hellwig  * Check if a potentially blocking operations needs to dip into the atomic
16428e4576dSChristoph Hellwig  * pools for the given device/gfp.
16528e4576dSChristoph Hellwig  */
dma_direct_use_pool(struct device * dev,gfp_t gfp)16628e4576dSChristoph Hellwig static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
16728e4576dSChristoph Hellwig {
16828e4576dSChristoph Hellwig 	return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
16928e4576dSChristoph Hellwig }
17028e4576dSChristoph Hellwig 
dma_direct_alloc_from_pool(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)1715b138c53SChristoph Hellwig static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
1725b138c53SChristoph Hellwig 		dma_addr_t *dma_handle, gfp_t gfp)
1735b138c53SChristoph Hellwig {
1745b138c53SChristoph Hellwig 	struct page *page;
17525a4ce56SPetr Tesarik 	u64 phys_limit;
1765b138c53SChristoph Hellwig 	void *ret;
1775b138c53SChristoph Hellwig 
17878bc7278SChristoph Hellwig 	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)))
17978bc7278SChristoph Hellwig 		return NULL;
18078bc7278SChristoph Hellwig 
18125a4ce56SPetr Tesarik 	gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit);
1825b138c53SChristoph Hellwig 	page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok);
1835b138c53SChristoph Hellwig 	if (!page)
1845b138c53SChristoph Hellwig 		return NULL;
1855b138c53SChristoph Hellwig 	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
1865b138c53SChristoph Hellwig 	return ret;
1875b138c53SChristoph Hellwig }
1885b138c53SChristoph Hellwig 
dma_direct_alloc_no_mapping(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)189d541ae55SChristoph Hellwig static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
190d541ae55SChristoph Hellwig 		dma_addr_t *dma_handle, gfp_t gfp)
191d541ae55SChristoph Hellwig {
192d541ae55SChristoph Hellwig 	struct page *page;
193d541ae55SChristoph Hellwig 
19492826e96SChristoph Hellwig 	page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
195d541ae55SChristoph Hellwig 	if (!page)
196d541ae55SChristoph Hellwig 		return NULL;
197d541ae55SChristoph Hellwig 
198d541ae55SChristoph Hellwig 	/* remove any dirty cache lines on the kernel alias */
199d541ae55SChristoph Hellwig 	if (!PageHighMem(page))
200d541ae55SChristoph Hellwig 		arch_dma_prep_coherent(page, size);
201d541ae55SChristoph Hellwig 
202d541ae55SChristoph Hellwig 	/* return the page pointer as the opaque cookie */
203d541ae55SChristoph Hellwig 	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
204d541ae55SChristoph Hellwig 	return page;
205d541ae55SChristoph Hellwig }
206d541ae55SChristoph Hellwig 
dma_direct_alloc(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)2072f5388a2SChristoph Hellwig void *dma_direct_alloc(struct device *dev, size_t size,
208b18814e7SChristoph Hellwig 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
209b18814e7SChristoph Hellwig {
210f3c96222SChristoph Hellwig 	bool remap = false, set_uncached = false;
211b18814e7SChristoph Hellwig 	struct page *page;
212b18814e7SChristoph Hellwig 	void *ret;
213b18814e7SChristoph Hellwig 
214633d5fceSDavid Rientjes 	size = PAGE_ALIGN(size);
2153773dfe6SChristoph Hellwig 	if (attrs & DMA_ATTR_NO_WARN)
2163773dfe6SChristoph Hellwig 		gfp |= __GFP_NOWARN;
217633d5fceSDavid Rientjes 
218849faceaSChristoph Hellwig 	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
219d541ae55SChristoph Hellwig 	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
220d541ae55SChristoph Hellwig 		return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
221849faceaSChristoph Hellwig 
222a86d1094SChristoph Hellwig 	if (!dev_is_dma_coherent(dev)) {
2232c8ed1b9SChristoph Hellwig 		if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALLOC) &&
224f4111e39SClaire Chang 		    !is_swiotlb_for_alloc(dev))
225a86d1094SChristoph Hellwig 			return arch_dma_alloc(dev, size, dma_handle, gfp,
226a86d1094SChristoph Hellwig 					      attrs);
227faf4ef82SChristoph Hellwig 
228849faceaSChristoph Hellwig 		/*
229a86d1094SChristoph Hellwig 		 * If there is a global pool, always allocate from it for
230a86d1094SChristoph Hellwig 		 * non-coherent devices.
231a86d1094SChristoph Hellwig 		 */
232a86d1094SChristoph Hellwig 		if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL))
233a86d1094SChristoph Hellwig 			return dma_alloc_from_global_coherent(dev, size,
234a86d1094SChristoph Hellwig 					dma_handle);
235a86d1094SChristoph Hellwig 
236a86d1094SChristoph Hellwig 		/*
237b1da46d7SChristoph Hellwig 		 * Otherwise we require the architecture to either be able to
238b1da46d7SChristoph Hellwig 		 * mark arbitrary parts of the kernel direct mapping uncached,
239b1da46d7SChristoph Hellwig 		 * or remapped it uncached.
240a86d1094SChristoph Hellwig 		 */
241b1da46d7SChristoph Hellwig 		set_uncached = IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED);
242a86d1094SChristoph Hellwig 		remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
24363f067e3SChristoph Hellwig 		if (!set_uncached && !remap) {
24463f067e3SChristoph Hellwig 			pr_warn_once("coherent DMA allocations not supported on this platform.\n");
245955f58f7SChristoph Hellwig 			return NULL;
246a86d1094SChristoph Hellwig 		}
24763f067e3SChristoph Hellwig 	}
248a86d1094SChristoph Hellwig 
249a86d1094SChristoph Hellwig 	/*
250b1da46d7SChristoph Hellwig 	 * Remapping or decrypting memory may block, allocate the memory from
251b1da46d7SChristoph Hellwig 	 * the atomic pools instead if we aren't allowed block.
252849faceaSChristoph Hellwig 	 */
253b1da46d7SChristoph Hellwig 	if ((remap || force_dma_unencrypted(dev)) &&
254b1da46d7SChristoph Hellwig 	    dma_direct_use_pool(dev, gfp))
2555b138c53SChristoph Hellwig 		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
2563acac065SChristoph Hellwig 
2573773dfe6SChristoph Hellwig 	/* we always manually zero the memory once we are done */
25892826e96SChristoph Hellwig 	page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
259cf65a0f6SChristoph Hellwig 	if (!page)
260cf65a0f6SChristoph Hellwig 		return NULL;
261f5ff79fdSChristoph Hellwig 
262f3c96222SChristoph Hellwig 	/*
263f5ff79fdSChristoph Hellwig 	 * dma_alloc_contiguous can return highmem pages depending on a
264f5ff79fdSChristoph Hellwig 	 * combination the cma= arguments and per-arch setup.  These need to be
265f5ff79fdSChristoph Hellwig 	 * remapped to return a kernel virtual address.
266f3c96222SChristoph Hellwig 	 */
267f5ff79fdSChristoph Hellwig 	if (PageHighMem(page)) {
268f3c96222SChristoph Hellwig 		remap = true;
269a86d1094SChristoph Hellwig 		set_uncached = false;
270a86d1094SChristoph Hellwig 	}
271f3c96222SChristoph Hellwig 
272f3c96222SChristoph Hellwig 	if (remap) {
2734fe87e81SChristoph Hellwig 		pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
2744fe87e81SChristoph Hellwig 
2754fe87e81SChristoph Hellwig 		if (force_dma_unencrypted(dev))
2764fe87e81SChristoph Hellwig 			prot = pgprot_decrypted(prot);
2774fe87e81SChristoph Hellwig 
2783acac065SChristoph Hellwig 		/* remove any dirty cache lines on the kernel alias */
279633d5fceSDavid Rientjes 		arch_dma_prep_coherent(page, size);
2803acac065SChristoph Hellwig 
2813acac065SChristoph Hellwig 		/* create a coherent mapping */
2824fe87e81SChristoph Hellwig 		ret = dma_common_contiguous_remap(page, size, prot,
2833acac065SChristoph Hellwig 				__builtin_return_address(0));
2843d0fc341SChristoph Hellwig 		if (!ret)
2853d0fc341SChristoph Hellwig 			goto out_free_pages;
286f3c96222SChristoph Hellwig 	} else {
287cf65a0f6SChristoph Hellwig 		ret = page_address(page);
2884d056478SChristoph Hellwig 		if (dma_set_decrypted(dev, ret, size))
289b9fa1694SRick Edgecombe 			goto out_leak_pages;
290f3c96222SChristoph Hellwig 	}
291f3c96222SChristoph Hellwig 
292cf65a0f6SChristoph Hellwig 	memset(ret, 0, size);
293c30700dbSChristoph Hellwig 
294f3c96222SChristoph Hellwig 	if (set_uncached) {
295c30700dbSChristoph Hellwig 		arch_dma_prep_coherent(page, size);
296fa7e2247SChristoph Hellwig 		ret = arch_dma_set_uncached(ret, size);
297fa7e2247SChristoph Hellwig 		if (IS_ERR(ret))
29896a539faSDavid Rientjes 			goto out_encrypt_pages;
299c30700dbSChristoph Hellwig 	}
300f3c96222SChristoph Hellwig 
30196eb89caSChristoph Hellwig 	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
302cf65a0f6SChristoph Hellwig 	return ret;
30396a539faSDavid Rientjes 
30496a539faSDavid Rientjes out_encrypt_pages:
3054d056478SChristoph Hellwig 	if (dma_set_encrypted(dev, page_address(page), size))
30656fccf21SDavid Rientjes 		return NULL;
3073d0fc341SChristoph Hellwig out_free_pages:
308f4111e39SClaire Chang 	__dma_direct_free_pages(dev, page, size);
3093d0fc341SChristoph Hellwig 	return NULL;
310b9fa1694SRick Edgecombe out_leak_pages:
311b9fa1694SRick Edgecombe 	return NULL;
312cf65a0f6SChristoph Hellwig }
313cf65a0f6SChristoph Hellwig 
dma_direct_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_addr,unsigned long attrs)3142f5388a2SChristoph Hellwig void dma_direct_free(struct device *dev, size_t size,
3152f5388a2SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
316cf65a0f6SChristoph Hellwig {
317cf65a0f6SChristoph Hellwig 	unsigned int page_order = get_order(size);
318cf65a0f6SChristoph Hellwig 
319cf14be0bSChristoph Hellwig 	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
320f4111e39SClaire Chang 	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
321d98849afSChristoph Hellwig 		/* cpu_addr is a struct page cookie, not a kernel address */
322acaade1aSChristoph Hellwig 		dma_free_contiguous(dev, cpu_addr, size);
323d98849afSChristoph Hellwig 		return;
324d98849afSChristoph Hellwig 	}
325d98849afSChristoph Hellwig 
3262c8ed1b9SChristoph Hellwig 	if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALLOC) &&
3273de18c86SLinus Torvalds 	    !dev_is_dma_coherent(dev) &&
328f4111e39SClaire Chang 	    !is_swiotlb_for_alloc(dev)) {
329849faceaSChristoph Hellwig 		arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
330849faceaSChristoph Hellwig 		return;
331849faceaSChristoph Hellwig 	}
332849faceaSChristoph Hellwig 
333faf4ef82SChristoph Hellwig 	if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
334faf4ef82SChristoph Hellwig 	    !dev_is_dma_coherent(dev)) {
335faf4ef82SChristoph Hellwig 		if (!dma_release_from_global_coherent(page_order, cpu_addr))
336faf4ef82SChristoph Hellwig 			WARN_ON_ONCE(1);
337faf4ef82SChristoph Hellwig 		return;
338faf4ef82SChristoph Hellwig 	}
339faf4ef82SChristoph Hellwig 
340849faceaSChristoph Hellwig 	/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
341849faceaSChristoph Hellwig 	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
342849faceaSChristoph Hellwig 	    dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
343849faceaSChristoph Hellwig 		return;
344849faceaSChristoph Hellwig 
345f5ff79fdSChristoph Hellwig 	if (is_vmalloc_addr(cpu_addr)) {
3463acac065SChristoph Hellwig 		vunmap(cpu_addr);
3475570449bSChristoph Hellwig 	} else {
3485570449bSChristoph Hellwig 		if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
349999a5d12SChristoph Hellwig 			arch_dma_clear_uncached(cpu_addr, size);
3503be45625SDexuan Cui 		if (dma_set_encrypted(dev, cpu_addr, size))
351a90cf304SChristoph Hellwig 			return;
3525570449bSChristoph Hellwig 	}
3533acac065SChristoph Hellwig 
354f4111e39SClaire Chang 	__dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
355cf65a0f6SChristoph Hellwig }
356cf65a0f6SChristoph Hellwig 
dma_direct_alloc_pages(struct device * dev,size_t size,dma_addr_t * dma_handle,enum dma_data_direction dir,gfp_t gfp)357efa70f2fSChristoph Hellwig struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
358efa70f2fSChristoph Hellwig 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
359efa70f2fSChristoph Hellwig {
360efa70f2fSChristoph Hellwig 	struct page *page;
361efa70f2fSChristoph Hellwig 	void *ret;
362efa70f2fSChristoph Hellwig 
36328e4576dSChristoph Hellwig 	if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
3645b138c53SChristoph Hellwig 		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
365efa70f2fSChristoph Hellwig 
36692826e96SChristoph Hellwig 	page = __dma_direct_alloc_pages(dev, size, gfp, false);
367efa70f2fSChristoph Hellwig 	if (!page)
368efa70f2fSChristoph Hellwig 		return NULL;
36908a89c28SChristoph Hellwig 
370efa70f2fSChristoph Hellwig 	ret = page_address(page);
3714d056478SChristoph Hellwig 	if (dma_set_decrypted(dev, ret, size))
372b9fa1694SRick Edgecombe 		goto out_leak_pages;
373efa70f2fSChristoph Hellwig 	memset(ret, 0, size);
374efa70f2fSChristoph Hellwig 	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
375efa70f2fSChristoph Hellwig 	return page;
376b9fa1694SRick Edgecombe out_leak_pages:
377efa70f2fSChristoph Hellwig 	return NULL;
378efa70f2fSChristoph Hellwig }
379efa70f2fSChristoph Hellwig 
dma_direct_free_pages(struct device * dev,size_t size,struct page * page,dma_addr_t dma_addr,enum dma_data_direction dir)380efa70f2fSChristoph Hellwig void dma_direct_free_pages(struct device *dev, size_t size,
381efa70f2fSChristoph Hellwig 		struct page *page, dma_addr_t dma_addr,
382efa70f2fSChristoph Hellwig 		enum dma_data_direction dir)
383efa70f2fSChristoph Hellwig {
384efa70f2fSChristoph Hellwig 	void *vaddr = page_address(page);
385efa70f2fSChristoph Hellwig 
386efa70f2fSChristoph Hellwig 	/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
387849faceaSChristoph Hellwig 	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
388efa70f2fSChristoph Hellwig 	    dma_free_from_pool(dev, vaddr, size))
389efa70f2fSChristoph Hellwig 		return;
390efa70f2fSChristoph Hellwig 
3913be45625SDexuan Cui 	if (dma_set_encrypted(dev, vaddr, size))
392a90cf304SChristoph Hellwig 		return;
393f4111e39SClaire Chang 	__dma_direct_free_pages(dev, page, size);
394efa70f2fSChristoph Hellwig }
395efa70f2fSChristoph Hellwig 
39655897af6SChristoph Hellwig #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
39755897af6SChristoph Hellwig     defined(CONFIG_SWIOTLB)
dma_direct_sync_sg_for_device(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir)39855897af6SChristoph Hellwig void dma_direct_sync_sg_for_device(struct device *dev,
399bc3ec75dSChristoph Hellwig 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
400bc3ec75dSChristoph Hellwig {
401bc3ec75dSChristoph Hellwig 	struct scatterlist *sg;
402bc3ec75dSChristoph Hellwig 	int i;
403bc3ec75dSChristoph Hellwig 
40455897af6SChristoph Hellwig 	for_each_sg(sgl, sg, nents, i) {
405449fa54dSFugang Duan 		phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
406449fa54dSFugang Duan 
4077296f230SMichael Kelley 		swiotlb_sync_single_for_device(dev, paddr, sg->length, dir);
408bc3ec75dSChristoph Hellwig 
40955897af6SChristoph Hellwig 		if (!dev_is_dma_coherent(dev))
41056e35f9cSChristoph Hellwig 			arch_sync_dma_for_device(paddr, sg->length,
41155897af6SChristoph Hellwig 					dir);
412bc3ec75dSChristoph Hellwig 	}
413bc3ec75dSChristoph Hellwig }
41417ac5247SChristoph Hellwig #endif
415bc3ec75dSChristoph Hellwig 
416bc3ec75dSChristoph Hellwig #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
41755897af6SChristoph Hellwig     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
41855897af6SChristoph Hellwig     defined(CONFIG_SWIOTLB)
dma_direct_sync_sg_for_cpu(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir)41955897af6SChristoph Hellwig void dma_direct_sync_sg_for_cpu(struct device *dev,
420bc3ec75dSChristoph Hellwig 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
421bc3ec75dSChristoph Hellwig {
422bc3ec75dSChristoph Hellwig 	struct scatterlist *sg;
423bc3ec75dSChristoph Hellwig 	int i;
424bc3ec75dSChristoph Hellwig 
42555897af6SChristoph Hellwig 	for_each_sg(sgl, sg, nents, i) {
426449fa54dSFugang Duan 		phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
42755897af6SChristoph Hellwig 
428449fa54dSFugang Duan 		if (!dev_is_dma_coherent(dev))
42956e35f9cSChristoph Hellwig 			arch_sync_dma_for_cpu(paddr, sg->length, dir);
430449fa54dSFugang Duan 
4317296f230SMichael Kelley 		swiotlb_sync_single_for_cpu(dev, paddr, sg->length, dir);
432abdaf11aSChristoph Hellwig 
433abdaf11aSChristoph Hellwig 		if (dir == DMA_FROM_DEVICE)
434abdaf11aSChristoph Hellwig 			arch_dma_mark_clean(paddr, sg->length);
43555897af6SChristoph Hellwig 	}
43655897af6SChristoph Hellwig 
43755897af6SChristoph Hellwig 	if (!dev_is_dma_coherent(dev))
43856e35f9cSChristoph Hellwig 		arch_sync_dma_for_cpu_all();
439bc3ec75dSChristoph Hellwig }
440bc3ec75dSChristoph Hellwig 
441f02ad36dSLogan Gunthorpe /*
442f02ad36dSLogan Gunthorpe  * Unmaps segments, except for ones marked as pci_p2pdma which do not
443f02ad36dSLogan Gunthorpe  * require any further action as they contain a bus address.
444f02ad36dSLogan Gunthorpe  */
dma_direct_unmap_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs)44555897af6SChristoph Hellwig void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
446bc3ec75dSChristoph Hellwig 		int nents, enum dma_data_direction dir, unsigned long attrs)
447bc3ec75dSChristoph Hellwig {
44855897af6SChristoph Hellwig 	struct scatterlist *sg;
44955897af6SChristoph Hellwig 	int i;
45055897af6SChristoph Hellwig 
451f02ad36dSLogan Gunthorpe 	for_each_sg(sgl,  sg, nents, i) {
452cb147bbeSRobin Murphy 		if (sg_dma_is_bus_address(sg))
453f02ad36dSLogan Gunthorpe 			sg_dma_unmark_bus_address(sg);
454f02ad36dSLogan Gunthorpe 		else
455f02ad36dSLogan Gunthorpe 			dma_direct_unmap_page(dev, sg->dma_address,
456f02ad36dSLogan Gunthorpe 					      sg_dma_len(sg), dir, attrs);
457f02ad36dSLogan Gunthorpe 	}
458bc3ec75dSChristoph Hellwig }
459bc3ec75dSChristoph Hellwig #endif
460bc3ec75dSChristoph Hellwig 
dma_direct_map_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs)461cf65a0f6SChristoph Hellwig int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
462cf65a0f6SChristoph Hellwig 		enum dma_data_direction dir, unsigned long attrs)
463cf65a0f6SChristoph Hellwig {
464f02ad36dSLogan Gunthorpe 	struct pci_p2pdma_map_state p2pdma_state = {};
465f02ad36dSLogan Gunthorpe 	enum pci_p2pdma_map_type map;
466cf65a0f6SChristoph Hellwig 	struct scatterlist *sg;
467f02ad36dSLogan Gunthorpe 	int i, ret;
468cf65a0f6SChristoph Hellwig 
469cf65a0f6SChristoph Hellwig 	for_each_sg(sgl, sg, nents, i) {
470f02ad36dSLogan Gunthorpe 		if (is_pci_p2pdma_page(sg_page(sg))) {
471f02ad36dSLogan Gunthorpe 			map = pci_p2pdma_map_segment(&p2pdma_state, dev, sg);
472f02ad36dSLogan Gunthorpe 			switch (map) {
473f02ad36dSLogan Gunthorpe 			case PCI_P2PDMA_MAP_BUS_ADDR:
474f02ad36dSLogan Gunthorpe 				continue;
475f02ad36dSLogan Gunthorpe 			case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
476f02ad36dSLogan Gunthorpe 				/*
477f02ad36dSLogan Gunthorpe 				 * Any P2P mapping that traverses the PCI
478f02ad36dSLogan Gunthorpe 				 * host bridge must be mapped with CPU physical
479f02ad36dSLogan Gunthorpe 				 * address and not PCI bus addresses. This is
480f02ad36dSLogan Gunthorpe 				 * done with dma_direct_map_page() below.
481f02ad36dSLogan Gunthorpe 				 */
482f02ad36dSLogan Gunthorpe 				break;
483f02ad36dSLogan Gunthorpe 			default:
484f02ad36dSLogan Gunthorpe 				ret = -EREMOTEIO;
485f02ad36dSLogan Gunthorpe 				goto out_unmap;
486f02ad36dSLogan Gunthorpe 			}
487f02ad36dSLogan Gunthorpe 		}
488f02ad36dSLogan Gunthorpe 
48917ac5247SChristoph Hellwig 		sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
49017ac5247SChristoph Hellwig 				sg->offset, sg->length, dir, attrs);
491f02ad36dSLogan Gunthorpe 		if (sg->dma_address == DMA_MAPPING_ERROR) {
492f02ad36dSLogan Gunthorpe 			ret = -EIO;
49355897af6SChristoph Hellwig 			goto out_unmap;
494f02ad36dSLogan Gunthorpe 		}
495cf65a0f6SChristoph Hellwig 		sg_dma_len(sg) = sg->length;
496cf65a0f6SChristoph Hellwig 	}
497cf65a0f6SChristoph Hellwig 
498cf65a0f6SChristoph Hellwig 	return nents;
49955897af6SChristoph Hellwig 
50055897af6SChristoph Hellwig out_unmap:
50155897af6SChristoph Hellwig 	dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
502f02ad36dSLogan Gunthorpe 	return ret;
503cf65a0f6SChristoph Hellwig }
504cf65a0f6SChristoph Hellwig 
dma_direct_map_resource(struct device * dev,phys_addr_t paddr,size_t size,enum dma_data_direction dir,unsigned long attrs)505cfced786SChristoph Hellwig dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
506cfced786SChristoph Hellwig 		size_t size, enum dma_data_direction dir, unsigned long attrs)
507cfced786SChristoph Hellwig {
508cfced786SChristoph Hellwig 	dma_addr_t dma_addr = paddr;
509cfced786SChristoph Hellwig 
51068a33b17SChristoph Hellwig 	if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
51175467ee4SChristoph Hellwig 		dev_err_once(dev,
51275467ee4SChristoph Hellwig 			     "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
51375467ee4SChristoph Hellwig 			     &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
51475467ee4SChristoph Hellwig 		WARN_ON_ONCE(1);
515cfced786SChristoph Hellwig 		return DMA_MAPPING_ERROR;
516cfced786SChristoph Hellwig 	}
517cfced786SChristoph Hellwig 
518cfced786SChristoph Hellwig 	return dma_addr;
519cfced786SChristoph Hellwig }
520cfced786SChristoph Hellwig 
dma_direct_get_sgtable(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)52134dc0ea6SChristoph Hellwig int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
52234dc0ea6SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
52334dc0ea6SChristoph Hellwig 		unsigned long attrs)
52434dc0ea6SChristoph Hellwig {
52534dc0ea6SChristoph Hellwig 	struct page *page = dma_direct_to_page(dev, dma_addr);
52634dc0ea6SChristoph Hellwig 	int ret;
52734dc0ea6SChristoph Hellwig 
52834dc0ea6SChristoph Hellwig 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
52934dc0ea6SChristoph Hellwig 	if (!ret)
53034dc0ea6SChristoph Hellwig 		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
53134dc0ea6SChristoph Hellwig 	return ret;
53234dc0ea6SChristoph Hellwig }
53334dc0ea6SChristoph Hellwig 
dma_direct_can_mmap(struct device * dev)53434dc0ea6SChristoph Hellwig bool dma_direct_can_mmap(struct device *dev)
53534dc0ea6SChristoph Hellwig {
53634dc0ea6SChristoph Hellwig 	return dev_is_dma_coherent(dev) ||
53734dc0ea6SChristoph Hellwig 		IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
53834dc0ea6SChristoph Hellwig }
53934dc0ea6SChristoph Hellwig 
dma_direct_mmap(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)54034dc0ea6SChristoph Hellwig int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
54134dc0ea6SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
54234dc0ea6SChristoph Hellwig 		unsigned long attrs)
54334dc0ea6SChristoph Hellwig {
54434dc0ea6SChristoph Hellwig 	unsigned long user_count = vma_pages(vma);
54534dc0ea6SChristoph Hellwig 	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
54634dc0ea6SChristoph Hellwig 	unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
54734dc0ea6SChristoph Hellwig 	int ret = -ENXIO;
54834dc0ea6SChristoph Hellwig 
54934dc0ea6SChristoph Hellwig 	vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
5504fe87e81SChristoph Hellwig 	if (force_dma_unencrypted(dev))
5514fe87e81SChristoph Hellwig 		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
55234dc0ea6SChristoph Hellwig 
55334dc0ea6SChristoph Hellwig 	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
55434dc0ea6SChristoph Hellwig 		return ret;
555faf4ef82SChristoph Hellwig 	if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
556faf4ef82SChristoph Hellwig 		return ret;
55734dc0ea6SChristoph Hellwig 
55834dc0ea6SChristoph Hellwig 	if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
55934dc0ea6SChristoph Hellwig 		return -ENXIO;
56034dc0ea6SChristoph Hellwig 	return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
56134dc0ea6SChristoph Hellwig 			user_count << PAGE_SHIFT, vma->vm_page_prot);
56234dc0ea6SChristoph Hellwig }
56334dc0ea6SChristoph Hellwig 
dma_direct_supported(struct device * dev,u64 mask)564cf65a0f6SChristoph Hellwig int dma_direct_supported(struct device *dev, u64 mask)
565cf65a0f6SChristoph Hellwig {
56691ef26f9SChristoph Hellwig 	u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
5679d7a224bSChristoph Hellwig 
56891ef26f9SChristoph Hellwig 	/*
56991ef26f9SChristoph Hellwig 	 * Because 32-bit DMA masks are so common we expect every architecture
57091ef26f9SChristoph Hellwig 	 * to be able to satisfy them - either by not supporting more physical
57191ef26f9SChristoph Hellwig 	 * memory, or by providing a ZONE_DMA32.  If neither is the case, the
57291ef26f9SChristoph Hellwig 	 * architecture needs to use an IOMMU instead of the direct mapping.
57391ef26f9SChristoph Hellwig 	 */
57491ef26f9SChristoph Hellwig 	if (mask >= DMA_BIT_MASK(32))
57591ef26f9SChristoph Hellwig 		return 1;
5769d7a224bSChristoph Hellwig 
577c92a54cfSLendacky, Thomas 	/*
5785ceda740SChristoph Hellwig 	 * This check needs to be against the actual bit mask value, so use
5795ceda740SChristoph Hellwig 	 * phys_to_dma_unencrypted() here so that the SME encryption mask isn't
580c92a54cfSLendacky, Thomas 	 * part of the check.
581c92a54cfSLendacky, Thomas 	 */
58291ef26f9SChristoph Hellwig 	if (IS_ENABLED(CONFIG_ZONE_DMA))
583ba0fb44aSCatalin Marinas 		min_mask = min_t(u64, min_mask, zone_dma_limit);
5845ceda740SChristoph Hellwig 	return mask >= phys_to_dma_unencrypted(dev, min_mask);
585cf65a0f6SChristoph Hellwig }
586133d624bSJoerg Roedel 
dma_find_range(struct device * dev,unsigned long start_pfn)587*8324993fSBaochen Qiang static const struct bus_dma_region *dma_find_range(struct device *dev,
588*8324993fSBaochen Qiang 						   unsigned long start_pfn)
589*8324993fSBaochen Qiang {
590*8324993fSBaochen Qiang 	const struct bus_dma_region *m;
591*8324993fSBaochen Qiang 
592*8324993fSBaochen Qiang 	for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) {
593*8324993fSBaochen Qiang 		unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start);
594*8324993fSBaochen Qiang 
595*8324993fSBaochen Qiang 		if (start_pfn >= cpu_start_pfn &&
596*8324993fSBaochen Qiang 		    start_pfn - cpu_start_pfn < PFN_DOWN(m->size))
597*8324993fSBaochen Qiang 			return m;
598*8324993fSBaochen Qiang 	}
599*8324993fSBaochen Qiang 
600*8324993fSBaochen Qiang 	return NULL;
601*8324993fSBaochen Qiang }
602*8324993fSBaochen Qiang 
603a409d960SJia He /*
604a409d960SJia He  * To check whether all ram resource ranges are covered by dma range map
605a409d960SJia He  * Returns 0 when further check is needed
606a409d960SJia He  * Returns 1 if there is some RAM range can't be covered by dma_range_map
607a409d960SJia He  */
check_ram_in_range_map(unsigned long start_pfn,unsigned long nr_pages,void * data)608a409d960SJia He static int check_ram_in_range_map(unsigned long start_pfn,
609a409d960SJia He 				  unsigned long nr_pages, void *data)
610a409d960SJia He {
611a409d960SJia He 	unsigned long end_pfn = start_pfn + nr_pages;
612a409d960SJia He 	struct device *dev = data;
613a409d960SJia He 
614a409d960SJia He 	while (start_pfn < end_pfn) {
615*8324993fSBaochen Qiang 		const struct bus_dma_region *bdr;
616a409d960SJia He 
617*8324993fSBaochen Qiang 		bdr = dma_find_range(dev, start_pfn);
618a409d960SJia He 		if (!bdr)
619a409d960SJia He 			return 1;
620a409d960SJia He 
621a409d960SJia He 		start_pfn = PFN_DOWN(bdr->cpu_start) + PFN_DOWN(bdr->size);
622a409d960SJia He 	}
623a409d960SJia He 
624a409d960SJia He 	return 0;
625a409d960SJia He }
626a409d960SJia He 
dma_direct_all_ram_mapped(struct device * dev)627a409d960SJia He bool dma_direct_all_ram_mapped(struct device *dev)
628a409d960SJia He {
629a409d960SJia He 	if (!dev->dma_range_map)
630a409d960SJia He 		return true;
631a409d960SJia He 	return !walk_system_ram_range(0, PFN_DOWN(ULONG_MAX) + 1, dev,
632a409d960SJia He 				      check_ram_in_range_map);
633a409d960SJia He }
634a409d960SJia He 
dma_direct_max_mapping_size(struct device * dev)635133d624bSJoerg Roedel size_t dma_direct_max_mapping_size(struct device *dev)
636133d624bSJoerg Roedel {
637133d624bSJoerg Roedel 	/* If SWIOTLB is active, use its maximum mapping size */
6386f2beb26SClaire Chang 	if (is_swiotlb_active(dev) &&
639903cd0f3SClaire Chang 	    (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev)))
640a5008b59SChristoph Hellwig 		return swiotlb_max_mapping_size(dev);
641a5008b59SChristoph Hellwig 	return SIZE_MAX;
642133d624bSJoerg Roedel }
6433aa91625SChristoph Hellwig 
dma_direct_need_sync(struct device * dev,dma_addr_t dma_addr)6443aa91625SChristoph Hellwig bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
6453aa91625SChristoph Hellwig {
6463aa91625SChristoph Hellwig 	return !dev_is_dma_coherent(dev) ||
6477296f230SMichael Kelley 	       swiotlb_find_pool(dev, dma_to_phys(dev, dma_addr));
6483aa91625SChristoph Hellwig }
649e0d07278SJim Quinlan 
650e0d07278SJim Quinlan /**
651e0d07278SJim Quinlan  * dma_direct_set_offset - Assign scalar offset for a single DMA range.
652e0d07278SJim Quinlan  * @dev:	device pointer; needed to "own" the alloced memory.
653e0d07278SJim Quinlan  * @cpu_start:  beginning of memory region covered by this offset.
654e0d07278SJim Quinlan  * @dma_start:  beginning of DMA/PCI region covered by this offset.
655e0d07278SJim Quinlan  * @size:	size of the region.
656e0d07278SJim Quinlan  *
657e0d07278SJim Quinlan  * This is for the simple case of a uniform offset which cannot
658e0d07278SJim Quinlan  * be discovered by "dma-ranges".
659e0d07278SJim Quinlan  *
660e0d07278SJim Quinlan  * It returns -ENOMEM if out of memory, -EINVAL if a map
661e0d07278SJim Quinlan  * already exists, 0 otherwise.
662e0d07278SJim Quinlan  *
663e0d07278SJim Quinlan  * Note: any call to this from a driver is a bug.  The mapping needs
664e0d07278SJim Quinlan  * to be described by the device tree or other firmware interfaces.
665e0d07278SJim Quinlan  */
dma_direct_set_offset(struct device * dev,phys_addr_t cpu_start,dma_addr_t dma_start,u64 size)666e0d07278SJim Quinlan int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
667e0d07278SJim Quinlan 			 dma_addr_t dma_start, u64 size)
668e0d07278SJim Quinlan {
669e0d07278SJim Quinlan 	struct bus_dma_region *map;
670e0d07278SJim Quinlan 	u64 offset = (u64)cpu_start - (u64)dma_start;
671e0d07278SJim Quinlan 
672e0d07278SJim Quinlan 	if (dev->dma_range_map) {
673e0d07278SJim Quinlan 		dev_err(dev, "attempt to add DMA range to existing map\n");
674e0d07278SJim Quinlan 		return -EINVAL;
675e0d07278SJim Quinlan 	}
676e0d07278SJim Quinlan 
677e0d07278SJim Quinlan 	if (!offset)
678e0d07278SJim Quinlan 		return 0;
679e0d07278SJim Quinlan 
680e0d07278SJim Quinlan 	map = kcalloc(2, sizeof(*map), GFP_KERNEL);
681e0d07278SJim Quinlan 	if (!map)
682e0d07278SJim Quinlan 		return -ENOMEM;
683e0d07278SJim Quinlan 	map[0].cpu_start = cpu_start;
684e0d07278SJim Quinlan 	map[0].dma_start = dma_start;
685e0d07278SJim Quinlan 	map[0].size = size;
686e0d07278SJim Quinlan 	dev->dma_range_map = map;
687e0d07278SJim Quinlan 	return 0;
688e0d07278SJim Quinlan }
689