xref: /linux-6.15/kernel/dma/mapping.c (revision cae5572e)
1cf65a0f6SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
2cf65a0f6SChristoph Hellwig /*
3cf65a0f6SChristoph Hellwig  * arch-independent dma-mapping routines
4cf65a0f6SChristoph Hellwig  *
5cf65a0f6SChristoph Hellwig  * Copyright (c) 2006  SUSE Linux Products GmbH
6cf65a0f6SChristoph Hellwig  * Copyright (c) 2006  Tejun Heo <[email protected]>
7cf65a0f6SChristoph Hellwig  */
805887cb6SChristoph Hellwig #include <linux/memblock.h> /* for max_pfn */
9cf65a0f6SChristoph Hellwig #include <linux/acpi.h>
100a0f0d8bSChristoph Hellwig #include <linux/dma-map-ops.h>
11cf65a0f6SChristoph Hellwig #include <linux/export.h>
12cf65a0f6SChristoph Hellwig #include <linux/gfp.h>
13b5c58b2fSLeon Romanovsky #include <linux/iommu-dma.h>
147ade4f10SAlexander Potapenko #include <linux/kmsan.h>
15cf65a0f6SChristoph Hellwig #include <linux/of_device.h>
16cf65a0f6SChristoph Hellwig #include <linux/slab.h>
17cf65a0f6SChristoph Hellwig #include <linux/vmalloc.h>
18a1fd09e8SChristoph Hellwig #include "debug.h"
1919c65c3dSChristoph Hellwig #include "direct.h"
20cf65a0f6SChristoph Hellwig 
21038eb433SSean Anderson #define CREATE_TRACE_POINTS
22038eb433SSean Anderson #include <trace/events/dma.h>
23038eb433SSean Anderson 
24fe4e5efaSJiaxun Yang #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
25fe4e5efaSJiaxun Yang 	defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
26fe4e5efaSJiaxun Yang 	defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
271d3f56b2SJiaxun Yang bool dma_default_coherent = IS_ENABLED(CONFIG_ARCH_DMA_DEFAULT_COHERENT);
28fe4e5efaSJiaxun Yang #endif
296d4e9a8eSChristoph Hellwig 
30cf65a0f6SChristoph Hellwig /*
31cf65a0f6SChristoph Hellwig  * Managed DMA API
32cf65a0f6SChristoph Hellwig  */
33cf65a0f6SChristoph Hellwig struct dma_devres {
34cf65a0f6SChristoph Hellwig 	size_t		size;
35cf65a0f6SChristoph Hellwig 	void		*vaddr;
36cf65a0f6SChristoph Hellwig 	dma_addr_t	dma_handle;
37cf65a0f6SChristoph Hellwig 	unsigned long	attrs;
38cf65a0f6SChristoph Hellwig };
39cf65a0f6SChristoph Hellwig 
dmam_release(struct device * dev,void * res)40cf65a0f6SChristoph Hellwig static void dmam_release(struct device *dev, void *res)
41cf65a0f6SChristoph Hellwig {
42cf65a0f6SChristoph Hellwig 	struct dma_devres *this = res;
43cf65a0f6SChristoph Hellwig 
44cf65a0f6SChristoph Hellwig 	dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
45cf65a0f6SChristoph Hellwig 			this->attrs);
46cf65a0f6SChristoph Hellwig }
47cf65a0f6SChristoph Hellwig 
dmam_match(struct device * dev,void * res,void * match_data)48cf65a0f6SChristoph Hellwig static int dmam_match(struct device *dev, void *res, void *match_data)
49cf65a0f6SChristoph Hellwig {
50cf65a0f6SChristoph Hellwig 	struct dma_devres *this = res, *match = match_data;
51cf65a0f6SChristoph Hellwig 
52cf65a0f6SChristoph Hellwig 	if (this->vaddr == match->vaddr) {
53cf65a0f6SChristoph Hellwig 		WARN_ON(this->size != match->size ||
54cf65a0f6SChristoph Hellwig 			this->dma_handle != match->dma_handle);
55cf65a0f6SChristoph Hellwig 		return 1;
56cf65a0f6SChristoph Hellwig 	}
57cf65a0f6SChristoph Hellwig 	return 0;
58cf65a0f6SChristoph Hellwig }
59cf65a0f6SChristoph Hellwig 
60cf65a0f6SChristoph Hellwig /**
61cf65a0f6SChristoph Hellwig  * dmam_free_coherent - Managed dma_free_coherent()
62cf65a0f6SChristoph Hellwig  * @dev: Device to free coherent memory for
63cf65a0f6SChristoph Hellwig  * @size: Size of allocation
64cf65a0f6SChristoph Hellwig  * @vaddr: Virtual address of the memory to free
65cf65a0f6SChristoph Hellwig  * @dma_handle: DMA handle of the memory to free
66cf65a0f6SChristoph Hellwig  *
67cf65a0f6SChristoph Hellwig  * Managed dma_free_coherent().
68cf65a0f6SChristoph Hellwig  */
dmam_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle)69cf65a0f6SChristoph Hellwig void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
70cf65a0f6SChristoph Hellwig 			dma_addr_t dma_handle)
71cf65a0f6SChristoph Hellwig {
72cf65a0f6SChristoph Hellwig 	struct dma_devres match_data = { size, vaddr, dma_handle };
73cf65a0f6SChristoph Hellwig 
74cf65a0f6SChristoph Hellwig 	WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
7528e8b740SLance Richardson 	dma_free_coherent(dev, size, vaddr, dma_handle);
76cf65a0f6SChristoph Hellwig }
77cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_free_coherent);
78cf65a0f6SChristoph Hellwig 
79cf65a0f6SChristoph Hellwig /**
80cf65a0f6SChristoph Hellwig  * dmam_alloc_attrs - Managed dma_alloc_attrs()
81cf65a0f6SChristoph Hellwig  * @dev: Device to allocate non_coherent memory for
82cf65a0f6SChristoph Hellwig  * @size: Size of allocation
83cf65a0f6SChristoph Hellwig  * @dma_handle: Out argument for allocated DMA handle
84cf65a0f6SChristoph Hellwig  * @gfp: Allocation flags
85cf65a0f6SChristoph Hellwig  * @attrs: Flags in the DMA_ATTR_* namespace.
86cf65a0f6SChristoph Hellwig  *
87cf65a0f6SChristoph Hellwig  * Managed dma_alloc_attrs().  Memory allocated using this function will be
88cf65a0f6SChristoph Hellwig  * automatically released on driver detach.
89cf65a0f6SChristoph Hellwig  *
90cf65a0f6SChristoph Hellwig  * RETURNS:
91cf65a0f6SChristoph Hellwig  * Pointer to allocated memory on success, NULL on failure.
92cf65a0f6SChristoph Hellwig  */
dmam_alloc_attrs(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)93cf65a0f6SChristoph Hellwig void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
94cf65a0f6SChristoph Hellwig 		gfp_t gfp, unsigned long attrs)
95cf65a0f6SChristoph Hellwig {
96cf65a0f6SChristoph Hellwig 	struct dma_devres *dr;
97cf65a0f6SChristoph Hellwig 	void *vaddr;
98cf65a0f6SChristoph Hellwig 
99cf65a0f6SChristoph Hellwig 	dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
100cf65a0f6SChristoph Hellwig 	if (!dr)
101cf65a0f6SChristoph Hellwig 		return NULL;
102cf65a0f6SChristoph Hellwig 
103cf65a0f6SChristoph Hellwig 	vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
104cf65a0f6SChristoph Hellwig 	if (!vaddr) {
105cf65a0f6SChristoph Hellwig 		devres_free(dr);
106cf65a0f6SChristoph Hellwig 		return NULL;
107cf65a0f6SChristoph Hellwig 	}
108cf65a0f6SChristoph Hellwig 
109cf65a0f6SChristoph Hellwig 	dr->vaddr = vaddr;
110cf65a0f6SChristoph Hellwig 	dr->dma_handle = *dma_handle;
111cf65a0f6SChristoph Hellwig 	dr->size = size;
112cf65a0f6SChristoph Hellwig 	dr->attrs = attrs;
113cf65a0f6SChristoph Hellwig 
114cf65a0f6SChristoph Hellwig 	devres_add(dev, dr);
115cf65a0f6SChristoph Hellwig 
116cf65a0f6SChristoph Hellwig 	return vaddr;
117cf65a0f6SChristoph Hellwig }
118cf65a0f6SChristoph Hellwig EXPORT_SYMBOL(dmam_alloc_attrs);
119cf65a0f6SChristoph Hellwig 
dma_go_direct(struct device * dev,dma_addr_t mask,const struct dma_map_ops * ops)120d35834c6SChristoph Hellwig static bool dma_go_direct(struct device *dev, dma_addr_t mask,
121d35834c6SChristoph Hellwig 		const struct dma_map_ops *ops)
122d3fa60d7SChristoph Hellwig {
123b5c58b2fSLeon Romanovsky 	if (use_dma_iommu(dev))
124b5c58b2fSLeon Romanovsky 		return false;
125b5c58b2fSLeon Romanovsky 
126d35834c6SChristoph Hellwig 	if (likely(!ops))
127d35834c6SChristoph Hellwig 		return true;
128b5c58b2fSLeon Romanovsky 
129d35834c6SChristoph Hellwig #ifdef CONFIG_DMA_OPS_BYPASS
130d35834c6SChristoph Hellwig 	if (dev->dma_ops_bypass)
131d35834c6SChristoph Hellwig 		return min_not_zero(mask, dev->bus_dma_limit) >=
132d35834c6SChristoph Hellwig 			    dma_direct_get_required_mask(dev);
133d35834c6SChristoph Hellwig #endif
134d35834c6SChristoph Hellwig 	return false;
135d35834c6SChristoph Hellwig }
136d35834c6SChristoph Hellwig 
137d35834c6SChristoph Hellwig 
138d35834c6SChristoph Hellwig /*
139d35834c6SChristoph Hellwig  * Check if the devices uses a direct mapping for streaming DMA operations.
140d35834c6SChristoph Hellwig  * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
141d35834c6SChristoph Hellwig  * enough.
142d35834c6SChristoph Hellwig  */
dma_alloc_direct(struct device * dev,const struct dma_map_ops * ops)143d35834c6SChristoph Hellwig static inline bool dma_alloc_direct(struct device *dev,
144d35834c6SChristoph Hellwig 		const struct dma_map_ops *ops)
145d35834c6SChristoph Hellwig {
146d35834c6SChristoph Hellwig 	return dma_go_direct(dev, dev->coherent_dma_mask, ops);
147d35834c6SChristoph Hellwig }
148d35834c6SChristoph Hellwig 
dma_map_direct(struct device * dev,const struct dma_map_ops * ops)149d35834c6SChristoph Hellwig static inline bool dma_map_direct(struct device *dev,
150d35834c6SChristoph Hellwig 		const struct dma_map_ops *ops)
151d35834c6SChristoph Hellwig {
152d35834c6SChristoph Hellwig 	return dma_go_direct(dev, *dev->dma_mask, ops);
153d3fa60d7SChristoph Hellwig }
154d3fa60d7SChristoph Hellwig 
dma_map_page_attrs(struct device * dev,struct page * page,size_t offset,size_t size,enum dma_data_direction dir,unsigned long attrs)155d3fa60d7SChristoph Hellwig dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
156d3fa60d7SChristoph Hellwig 		size_t offset, size_t size, enum dma_data_direction dir,
157d3fa60d7SChristoph Hellwig 		unsigned long attrs)
158d3fa60d7SChristoph Hellwig {
159d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
160d3fa60d7SChristoph Hellwig 	dma_addr_t addr;
161d3fa60d7SChristoph Hellwig 
162d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
163f959dcd6SThomas Tai 
164f959dcd6SThomas Tai 	if (WARN_ON_ONCE(!dev->dma_mask))
165f959dcd6SThomas Tai 		return DMA_MAPPING_ERROR;
166f959dcd6SThomas Tai 
1678d8d53cfSAlexey Kardashevskiy 	if (dma_map_direct(dev, ops) ||
1688d8d53cfSAlexey Kardashevskiy 	    arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size))
169d3fa60d7SChristoph Hellwig 		addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
170b5c58b2fSLeon Romanovsky 	else if (use_dma_iommu(dev))
171b5c58b2fSLeon Romanovsky 		addr = iommu_dma_map_page(dev, page, offset, size, dir, attrs);
172d3fa60d7SChristoph Hellwig 	else
173d3fa60d7SChristoph Hellwig 		addr = ops->map_page(dev, page, offset, size, dir, attrs);
1747ade4f10SAlexander Potapenko 	kmsan_handle_dma(page, offset, size, dir);
175038eb433SSean Anderson 	trace_dma_map_page(dev, page_to_phys(page) + offset, addr, size, dir,
176038eb433SSean Anderson 			   attrs);
177c2bbf9d1SHamza Mahfooz 	debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
178d3fa60d7SChristoph Hellwig 
179d3fa60d7SChristoph Hellwig 	return addr;
180d3fa60d7SChristoph Hellwig }
181d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_page_attrs);
182d3fa60d7SChristoph Hellwig 
dma_unmap_page_attrs(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)183d3fa60d7SChristoph Hellwig void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
184d3fa60d7SChristoph Hellwig 		enum dma_data_direction dir, unsigned long attrs)
185d3fa60d7SChristoph Hellwig {
186d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
187d3fa60d7SChristoph Hellwig 
188d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
1898d8d53cfSAlexey Kardashevskiy 	if (dma_map_direct(dev, ops) ||
1908d8d53cfSAlexey Kardashevskiy 	    arch_dma_unmap_page_direct(dev, addr + size))
191d3fa60d7SChristoph Hellwig 		dma_direct_unmap_page(dev, addr, size, dir, attrs);
192b5c58b2fSLeon Romanovsky 	else if (use_dma_iommu(dev))
193b5c58b2fSLeon Romanovsky 		iommu_dma_unmap_page(dev, addr, size, dir, attrs);
194f69e342eSLeon Romanovsky 	else
195d3fa60d7SChristoph Hellwig 		ops->unmap_page(dev, addr, size, dir, attrs);
196038eb433SSean Anderson 	trace_dma_unmap_page(dev, addr, size, dir, attrs);
197d3fa60d7SChristoph Hellwig 	debug_dma_unmap_page(dev, addr, size, dir);
198d3fa60d7SChristoph Hellwig }
199d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_page_attrs);
200d3fa60d7SChristoph Hellwig 
__dma_map_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)201fffe3cc8SLogan Gunthorpe static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
202fffe3cc8SLogan Gunthorpe 	 int nents, enum dma_data_direction dir, unsigned long attrs)
203d3fa60d7SChristoph Hellwig {
204d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
205d3fa60d7SChristoph Hellwig 	int ents;
206d3fa60d7SChristoph Hellwig 
207d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
208f959dcd6SThomas Tai 
209f959dcd6SThomas Tai 	if (WARN_ON_ONCE(!dev->dma_mask))
210f959dcd6SThomas Tai 		return 0;
211f959dcd6SThomas Tai 
2128d8d53cfSAlexey Kardashevskiy 	if (dma_map_direct(dev, ops) ||
2138d8d53cfSAlexey Kardashevskiy 	    arch_dma_map_sg_direct(dev, sg, nents))
214d3fa60d7SChristoph Hellwig 		ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
215b5c58b2fSLeon Romanovsky 	else if (use_dma_iommu(dev))
216b5c58b2fSLeon Romanovsky 		ents = iommu_dma_map_sg(dev, sg, nents, dir, attrs);
217d3fa60d7SChristoph Hellwig 	else
218d3fa60d7SChristoph Hellwig 		ents = ops->map_sg(dev, sg, nents, dir, attrs);
219fffe3cc8SLogan Gunthorpe 
2207ade4f10SAlexander Potapenko 	if (ents > 0) {
2217ade4f10SAlexander Potapenko 		kmsan_handle_dma_sg(sg, nents, dir);
222038eb433SSean Anderson 		trace_dma_map_sg(dev, sg, nents, ents, dir, attrs);
223c2bbf9d1SHamza Mahfooz 		debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
2247ade4f10SAlexander Potapenko 	} else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
2257ade4f10SAlexander Potapenko 				ents != -EIO && ents != -EREMOTEIO)) {
22668b6dbf1SSean Anderson 		trace_dma_map_sg_err(dev, sg, nents, ents, dir, attrs);
227fffe3cc8SLogan Gunthorpe 		return -EIO;
2287ade4f10SAlexander Potapenko 	}
229d3fa60d7SChristoph Hellwig 
230d3fa60d7SChristoph Hellwig 	return ents;
231d3fa60d7SChristoph Hellwig }
232fffe3cc8SLogan Gunthorpe 
233fffe3cc8SLogan Gunthorpe /**
234fffe3cc8SLogan Gunthorpe  * dma_map_sg_attrs - Map the given buffer for DMA
235fffe3cc8SLogan Gunthorpe  * @dev:	The device for which to perform the DMA operation
236fffe3cc8SLogan Gunthorpe  * @sg:		The sg_table object describing the buffer
237a61cb601SChristoph Hellwig  * @nents:	Number of entries to map
238fffe3cc8SLogan Gunthorpe  * @dir:	DMA direction
239fffe3cc8SLogan Gunthorpe  * @attrs:	Optional DMA attributes for the map operation
240fffe3cc8SLogan Gunthorpe  *
241fffe3cc8SLogan Gunthorpe  * Maps a buffer described by a scatterlist passed in the sg argument with
242fffe3cc8SLogan Gunthorpe  * nents segments for the @dir DMA operation by the @dev device.
243fffe3cc8SLogan Gunthorpe  *
244fffe3cc8SLogan Gunthorpe  * Returns the number of mapped entries (which can be less than nents)
245fffe3cc8SLogan Gunthorpe  * on success. Zero is returned for any error.
246fffe3cc8SLogan Gunthorpe  *
247fffe3cc8SLogan Gunthorpe  * dma_unmap_sg_attrs() should be used to unmap the buffer with the
248fffe3cc8SLogan Gunthorpe  * original sg and original nents (not the value returned by this funciton).
249fffe3cc8SLogan Gunthorpe  */
dma_map_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)2502a047e06SChristoph Hellwig unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
251fffe3cc8SLogan Gunthorpe 		    int nents, enum dma_data_direction dir, unsigned long attrs)
252fffe3cc8SLogan Gunthorpe {
253fffe3cc8SLogan Gunthorpe 	int ret;
254fffe3cc8SLogan Gunthorpe 
255fffe3cc8SLogan Gunthorpe 	ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs);
256fffe3cc8SLogan Gunthorpe 	if (ret < 0)
257fffe3cc8SLogan Gunthorpe 		return 0;
258fffe3cc8SLogan Gunthorpe 	return ret;
259fffe3cc8SLogan Gunthorpe }
260d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_sg_attrs);
261d3fa60d7SChristoph Hellwig 
262fffe3cc8SLogan Gunthorpe /**
263fffe3cc8SLogan Gunthorpe  * dma_map_sgtable - Map the given buffer for DMA
264fffe3cc8SLogan Gunthorpe  * @dev:	The device for which to perform the DMA operation
265fffe3cc8SLogan Gunthorpe  * @sgt:	The sg_table object describing the buffer
266fffe3cc8SLogan Gunthorpe  * @dir:	DMA direction
267fffe3cc8SLogan Gunthorpe  * @attrs:	Optional DMA attributes for the map operation
268fffe3cc8SLogan Gunthorpe  *
269fffe3cc8SLogan Gunthorpe  * Maps a buffer described by a scatterlist stored in the given sg_table
270fffe3cc8SLogan Gunthorpe  * object for the @dir DMA operation by the @dev device. After success, the
271fffe3cc8SLogan Gunthorpe  * ownership for the buffer is transferred to the DMA domain.  One has to
272fffe3cc8SLogan Gunthorpe  * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
273fffe3cc8SLogan Gunthorpe  * ownership of the buffer back to the CPU domain before touching the
274fffe3cc8SLogan Gunthorpe  * buffer by the CPU.
275fffe3cc8SLogan Gunthorpe  *
276fffe3cc8SLogan Gunthorpe  * Returns 0 on success or a negative error code on error. The following
277fffe3cc8SLogan Gunthorpe  * error codes are supported with the given meaning:
278fffe3cc8SLogan Gunthorpe  *
279011a9ce8SLogan Gunthorpe  *   -EINVAL		An invalid argument, unaligned access or other error
280fffe3cc8SLogan Gunthorpe  *			in usage. Will not succeed if retried.
281011a9ce8SLogan Gunthorpe  *   -ENOMEM		Insufficient resources (like memory or IOVA space) to
282fffe3cc8SLogan Gunthorpe  *			complete the mapping. Should succeed if retried later.
283011a9ce8SLogan Gunthorpe  *   -EIO		Legacy error code with an unknown meaning. eg. this is
28484197024SLogan Gunthorpe  *			returned if a lower level call returned
28584197024SLogan Gunthorpe  *			DMA_MAPPING_ERROR.
28684197024SLogan Gunthorpe  *   -EREMOTEIO		The DMA device cannot access P2PDMA memory specified
28784197024SLogan Gunthorpe  *			in the sg_table. This will not succeed if retried.
288fffe3cc8SLogan Gunthorpe  */
dma_map_sgtable(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)289fffe3cc8SLogan Gunthorpe int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
290fffe3cc8SLogan Gunthorpe 		    enum dma_data_direction dir, unsigned long attrs)
291fffe3cc8SLogan Gunthorpe {
292fffe3cc8SLogan Gunthorpe 	int nents;
293fffe3cc8SLogan Gunthorpe 
294fffe3cc8SLogan Gunthorpe 	nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
295fffe3cc8SLogan Gunthorpe 	if (nents < 0)
296fffe3cc8SLogan Gunthorpe 		return nents;
297fffe3cc8SLogan Gunthorpe 	sgt->nents = nents;
298fffe3cc8SLogan Gunthorpe 	return 0;
299fffe3cc8SLogan Gunthorpe }
300fffe3cc8SLogan Gunthorpe EXPORT_SYMBOL_GPL(dma_map_sgtable);
301fffe3cc8SLogan Gunthorpe 
dma_unmap_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)302d3fa60d7SChristoph Hellwig void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
303d3fa60d7SChristoph Hellwig 				      int nents, enum dma_data_direction dir,
304d3fa60d7SChristoph Hellwig 				      unsigned long attrs)
305d3fa60d7SChristoph Hellwig {
306d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
307d3fa60d7SChristoph Hellwig 
308d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
309038eb433SSean Anderson 	trace_dma_unmap_sg(dev, sg, nents, dir, attrs);
310d3fa60d7SChristoph Hellwig 	debug_dma_unmap_sg(dev, sg, nents, dir);
3118d8d53cfSAlexey Kardashevskiy 	if (dma_map_direct(dev, ops) ||
3128d8d53cfSAlexey Kardashevskiy 	    arch_dma_unmap_sg_direct(dev, sg, nents))
313d3fa60d7SChristoph Hellwig 		dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
314b5c58b2fSLeon Romanovsky 	else if (use_dma_iommu(dev))
315b5c58b2fSLeon Romanovsky 		iommu_dma_unmap_sg(dev, sg, nents, dir, attrs);
316b5c58b2fSLeon Romanovsky 	else if (ops->unmap_sg)
317d3fa60d7SChristoph Hellwig 		ops->unmap_sg(dev, sg, nents, dir, attrs);
318d3fa60d7SChristoph Hellwig }
319d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_sg_attrs);
320d3fa60d7SChristoph Hellwig 
dma_map_resource(struct device * dev,phys_addr_t phys_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)321d3fa60d7SChristoph Hellwig dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
322d3fa60d7SChristoph Hellwig 		size_t size, enum dma_data_direction dir, unsigned long attrs)
323d3fa60d7SChristoph Hellwig {
324d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
325d3fa60d7SChristoph Hellwig 	dma_addr_t addr = DMA_MAPPING_ERROR;
326d3fa60d7SChristoph Hellwig 
327d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
328d3fa60d7SChristoph Hellwig 
329f959dcd6SThomas Tai 	if (WARN_ON_ONCE(!dev->dma_mask))
330f959dcd6SThomas Tai 		return DMA_MAPPING_ERROR;
331f959dcd6SThomas Tai 
332d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
333d3fa60d7SChristoph Hellwig 		addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
334b5c58b2fSLeon Romanovsky 	else if (use_dma_iommu(dev))
335b5c58b2fSLeon Romanovsky 		addr = iommu_dma_map_resource(dev, phys_addr, size, dir, attrs);
336d3fa60d7SChristoph Hellwig 	else if (ops->map_resource)
337d3fa60d7SChristoph Hellwig 		addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
338d3fa60d7SChristoph Hellwig 
339038eb433SSean Anderson 	trace_dma_map_resource(dev, phys_addr, addr, size, dir, attrs);
340c2bbf9d1SHamza Mahfooz 	debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
341d3fa60d7SChristoph Hellwig 	return addr;
342d3fa60d7SChristoph Hellwig }
343d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_map_resource);
344d3fa60d7SChristoph Hellwig 
dma_unmap_resource(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)345d3fa60d7SChristoph Hellwig void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
346d3fa60d7SChristoph Hellwig 		enum dma_data_direction dir, unsigned long attrs)
347d3fa60d7SChristoph Hellwig {
348d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
349d3fa60d7SChristoph Hellwig 
350d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
351b5c58b2fSLeon Romanovsky 	if (dma_map_direct(dev, ops))
352b5c58b2fSLeon Romanovsky 		; /* nothing to do: uncached and no swiotlb */
353b5c58b2fSLeon Romanovsky 	else if (use_dma_iommu(dev))
354b5c58b2fSLeon Romanovsky 		iommu_dma_unmap_resource(dev, addr, size, dir, attrs);
355b5c58b2fSLeon Romanovsky 	else if (ops->unmap_resource)
356d3fa60d7SChristoph Hellwig 		ops->unmap_resource(dev, addr, size, dir, attrs);
357038eb433SSean Anderson 	trace_dma_unmap_resource(dev, addr, size, dir, attrs);
358d3fa60d7SChristoph Hellwig 	debug_dma_unmap_resource(dev, addr, size, dir);
359d3fa60d7SChristoph Hellwig }
360d3fa60d7SChristoph Hellwig EXPORT_SYMBOL(dma_unmap_resource);
361d3fa60d7SChristoph Hellwig 
362fe7514b1SAlexander Lobakin #ifdef CONFIG_DMA_NEED_SYNC
__dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)363f406c8e4SAlexander Lobakin void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
364d3fa60d7SChristoph Hellwig 		enum dma_data_direction dir)
365d3fa60d7SChristoph Hellwig {
366d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
367d3fa60d7SChristoph Hellwig 
368d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
369d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
370d3fa60d7SChristoph Hellwig 		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
371b5c58b2fSLeon Romanovsky 	else if (use_dma_iommu(dev))
372b5c58b2fSLeon Romanovsky 		iommu_dma_sync_single_for_cpu(dev, addr, size, dir);
373d3fa60d7SChristoph Hellwig 	else if (ops->sync_single_for_cpu)
374d3fa60d7SChristoph Hellwig 		ops->sync_single_for_cpu(dev, addr, size, dir);
375038eb433SSean Anderson 	trace_dma_sync_single_for_cpu(dev, addr, size, dir);
376d3fa60d7SChristoph Hellwig 	debug_dma_sync_single_for_cpu(dev, addr, size, dir);
377d3fa60d7SChristoph Hellwig }
378f406c8e4SAlexander Lobakin EXPORT_SYMBOL(__dma_sync_single_for_cpu);
379d3fa60d7SChristoph Hellwig 
__dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)380f406c8e4SAlexander Lobakin void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
381d3fa60d7SChristoph Hellwig 		size_t size, enum dma_data_direction dir)
382d3fa60d7SChristoph Hellwig {
383d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
384d3fa60d7SChristoph Hellwig 
385d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
386d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
387d3fa60d7SChristoph Hellwig 		dma_direct_sync_single_for_device(dev, addr, size, dir);
388b5c58b2fSLeon Romanovsky 	else if (use_dma_iommu(dev))
389b5c58b2fSLeon Romanovsky 		iommu_dma_sync_single_for_device(dev, addr, size, dir);
390d3fa60d7SChristoph Hellwig 	else if (ops->sync_single_for_device)
391d3fa60d7SChristoph Hellwig 		ops->sync_single_for_device(dev, addr, size, dir);
392038eb433SSean Anderson 	trace_dma_sync_single_for_device(dev, addr, size, dir);
393d3fa60d7SChristoph Hellwig 	debug_dma_sync_single_for_device(dev, addr, size, dir);
394d3fa60d7SChristoph Hellwig }
395f406c8e4SAlexander Lobakin EXPORT_SYMBOL(__dma_sync_single_for_device);
396d3fa60d7SChristoph Hellwig 
__dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)397f406c8e4SAlexander Lobakin void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
398d3fa60d7SChristoph Hellwig 		    int nelems, enum dma_data_direction dir)
399d3fa60d7SChristoph Hellwig {
400d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
401d3fa60d7SChristoph Hellwig 
402d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
403d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
404d3fa60d7SChristoph Hellwig 		dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
405b5c58b2fSLeon Romanovsky 	else if (use_dma_iommu(dev))
406b5c58b2fSLeon Romanovsky 		iommu_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
407d3fa60d7SChristoph Hellwig 	else if (ops->sync_sg_for_cpu)
408d3fa60d7SChristoph Hellwig 		ops->sync_sg_for_cpu(dev, sg, nelems, dir);
409038eb433SSean Anderson 	trace_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
410d3fa60d7SChristoph Hellwig 	debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
411d3fa60d7SChristoph Hellwig }
412f406c8e4SAlexander Lobakin EXPORT_SYMBOL(__dma_sync_sg_for_cpu);
413d3fa60d7SChristoph Hellwig 
__dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)414f406c8e4SAlexander Lobakin void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
415d3fa60d7SChristoph Hellwig 		       int nelems, enum dma_data_direction dir)
416d3fa60d7SChristoph Hellwig {
417d3fa60d7SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
418d3fa60d7SChristoph Hellwig 
419d3fa60d7SChristoph Hellwig 	BUG_ON(!valid_dma_direction(dir));
420d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
421d3fa60d7SChristoph Hellwig 		dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
422b5c58b2fSLeon Romanovsky 	else if (use_dma_iommu(dev))
423b5c58b2fSLeon Romanovsky 		iommu_dma_sync_sg_for_device(dev, sg, nelems, dir);
424d3fa60d7SChristoph Hellwig 	else if (ops->sync_sg_for_device)
425d3fa60d7SChristoph Hellwig 		ops->sync_sg_for_device(dev, sg, nelems, dir);
426038eb433SSean Anderson 	trace_dma_sync_sg_for_device(dev, sg, nelems, dir);
427d3fa60d7SChristoph Hellwig 	debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
428d3fa60d7SChristoph Hellwig }
429f406c8e4SAlexander Lobakin EXPORT_SYMBOL(__dma_sync_sg_for_device);
430d3fa60d7SChristoph Hellwig 
__dma_need_sync(struct device * dev,dma_addr_t dma_addr)431f406c8e4SAlexander Lobakin bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr)
432fe7514b1SAlexander Lobakin {
433fe7514b1SAlexander Lobakin 	const struct dma_map_ops *ops = get_dma_ops(dev);
434fe7514b1SAlexander Lobakin 
435fe7514b1SAlexander Lobakin 	if (dma_map_direct(dev, ops))
436f406c8e4SAlexander Lobakin 		/*
437a6016aacSAlexander Lobakin 		 * dma_skip_sync could've been reset on first SWIOTLB buffer
438f406c8e4SAlexander Lobakin 		 * mapping, but @dma_addr is not necessary an SWIOTLB buffer.
439f406c8e4SAlexander Lobakin 		 * In this case, fall back to more granular check.
440f406c8e4SAlexander Lobakin 		 */
441fe7514b1SAlexander Lobakin 		return dma_direct_need_sync(dev, dma_addr);
442f406c8e4SAlexander Lobakin 	return true;
443fe7514b1SAlexander Lobakin }
444f406c8e4SAlexander Lobakin EXPORT_SYMBOL_GPL(__dma_need_sync);
445f406c8e4SAlexander Lobakin 
dma_setup_need_sync(struct device * dev)446f406c8e4SAlexander Lobakin static void dma_setup_need_sync(struct device *dev)
447f406c8e4SAlexander Lobakin {
448f406c8e4SAlexander Lobakin 	const struct dma_map_ops *ops = get_dma_ops(dev);
449f406c8e4SAlexander Lobakin 
450b5c58b2fSLeon Romanovsky 	if (dma_map_direct(dev, ops) || use_dma_iommu(dev))
451f406c8e4SAlexander Lobakin 		/*
452a6016aacSAlexander Lobakin 		 * dma_skip_sync will be reset to %false on first SWIOTLB buffer
453f406c8e4SAlexander Lobakin 		 * mapping, if any. During the device initialization, it's
454f406c8e4SAlexander Lobakin 		 * enough to check only for the DMA coherence.
455f406c8e4SAlexander Lobakin 		 */
456a6016aacSAlexander Lobakin 		dev->dma_skip_sync = dev_is_dma_coherent(dev);
457f406c8e4SAlexander Lobakin 	else if (!ops->sync_single_for_device && !ops->sync_single_for_cpu &&
458f406c8e4SAlexander Lobakin 		 !ops->sync_sg_for_device && !ops->sync_sg_for_cpu)
459f406c8e4SAlexander Lobakin 		/*
460f406c8e4SAlexander Lobakin 		 * Synchronization is not possible when none of DMA sync ops
461f406c8e4SAlexander Lobakin 		 * is set.
462f406c8e4SAlexander Lobakin 		 */
463a6016aacSAlexander Lobakin 		dev->dma_skip_sync = true;
464f406c8e4SAlexander Lobakin 	else
465a6016aacSAlexander Lobakin 		dev->dma_skip_sync = false;
466f406c8e4SAlexander Lobakin }
467f406c8e4SAlexander Lobakin #else /* !CONFIG_DMA_NEED_SYNC */
dma_setup_need_sync(struct device * dev)468f406c8e4SAlexander Lobakin static inline void dma_setup_need_sync(struct device *dev) { }
469f406c8e4SAlexander Lobakin #endif /* !CONFIG_DMA_NEED_SYNC */
4707249c1a5SChristoph Hellwig 
471148a97d5SDan Carpenter /*
472cf65a0f6SChristoph Hellwig  * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
473cf65a0f6SChristoph Hellwig  * that the intention is to allow exporting memory allocated via the
474cf65a0f6SChristoph Hellwig  * coherent DMA APIs through the dma_buf API, which only accepts a
475cf65a0f6SChristoph Hellwig  * scattertable.  This presents a couple of problems:
476cf65a0f6SChristoph Hellwig  * 1. Not all memory allocated via the coherent DMA APIs is backed by
477cf65a0f6SChristoph Hellwig  *    a struct page
478cf65a0f6SChristoph Hellwig  * 2. Passing coherent DMA memory into the streaming APIs is not allowed
479cf65a0f6SChristoph Hellwig  *    as we will try to flush the memory through a different alias to that
480cf65a0f6SChristoph Hellwig  *    actually being used (and the flushes are redundant.)
481cf65a0f6SChristoph Hellwig  */
dma_get_sgtable_attrs(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)482cf65a0f6SChristoph Hellwig int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
483cf65a0f6SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
484cf65a0f6SChristoph Hellwig 		unsigned long attrs)
485cf65a0f6SChristoph Hellwig {
486cf65a0f6SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
487cf65a0f6SChristoph Hellwig 
488d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
489cf65a0f6SChristoph Hellwig 		return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
490cf65a0f6SChristoph Hellwig 				size, attrs);
491b5c58b2fSLeon Romanovsky 	if (use_dma_iommu(dev))
492b5c58b2fSLeon Romanovsky 		return iommu_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr,
493b5c58b2fSLeon Romanovsky 				size, attrs);
494cf65a0f6SChristoph Hellwig 	if (!ops->get_sgtable)
495cf65a0f6SChristoph Hellwig 		return -ENXIO;
496cf65a0f6SChristoph Hellwig 	return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
4979406a49fSChristoph Hellwig }
4989406a49fSChristoph Hellwig EXPORT_SYMBOL(dma_get_sgtable_attrs);
499cf65a0f6SChristoph Hellwig 
5009406a49fSChristoph Hellwig #ifdef CONFIG_MMU
501cf65a0f6SChristoph Hellwig /*
502cf65a0f6SChristoph Hellwig  * Return the page attributes used for mapping dma_alloc_* memory, either in
5039406a49fSChristoph Hellwig  * kernel space if remapping is needed, or to userspace through dma_mmap_*.
5049406a49fSChristoph Hellwig  */
dma_pgprot(struct device * dev,pgprot_t prot,unsigned long attrs)5059406a49fSChristoph Hellwig pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
506cf65a0f6SChristoph Hellwig {
507efa70f2fSChristoph Hellwig 	if (dev_is_dma_coherent(dev))
508cf65a0f6SChristoph Hellwig 		return prot;
509cf65a0f6SChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
510cf65a0f6SChristoph Hellwig 	if (attrs & DMA_ATTR_WRITE_COMBINE)
511cf65a0f6SChristoph Hellwig 		return pgprot_writecombine(prot);
512cf65a0f6SChristoph Hellwig #endif
513cf65a0f6SChristoph Hellwig 	return pgprot_dmacoherent(prot);
514cf65a0f6SChristoph Hellwig }
515cf65a0f6SChristoph Hellwig #endif /* CONFIG_MMU */
5169406a49fSChristoph Hellwig 
517cf65a0f6SChristoph Hellwig /**
518cf65a0f6SChristoph Hellwig  * dma_can_mmap - check if a given device supports dma_mmap_*
519cf65a0f6SChristoph Hellwig  * @dev: device to check
520cf65a0f6SChristoph Hellwig  *
521cf65a0f6SChristoph Hellwig  * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
522cf65a0f6SChristoph Hellwig  * map DMA allocations to userspace.
52358b04406SChristoph Hellwig  */
dma_can_mmap(struct device * dev)52458b04406SChristoph Hellwig bool dma_can_mmap(struct device *dev)
525cf65a0f6SChristoph Hellwig {
526cf65a0f6SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
527cf65a0f6SChristoph Hellwig 
528d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
529cf65a0f6SChristoph Hellwig 		return dma_direct_can_mmap(dev);
530b5c58b2fSLeon Romanovsky 	if (use_dma_iommu(dev))
531b5c58b2fSLeon Romanovsky 		return true;
53258b04406SChristoph Hellwig 	return ops->mmap != NULL;
53358b04406SChristoph Hellwig }
534cf65a0f6SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_can_mmap);
53558b04406SChristoph Hellwig 
536cf65a0f6SChristoph Hellwig /**
537cf65a0f6SChristoph Hellwig  * dma_mmap_attrs - map a coherent DMA allocation into user space
538cf65a0f6SChristoph Hellwig  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
539cf65a0f6SChristoph Hellwig  * @vma: vm_area_struct describing requested user mapping
54058b04406SChristoph Hellwig  * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
54158b04406SChristoph Hellwig  * @dma_addr: device-view address returned from dma_alloc_attrs
542cf65a0f6SChristoph Hellwig  * @size: size of memory originally requested in dma_alloc_attrs
54358b04406SChristoph Hellwig  * @attrs: attributes of mapping properties requested in dma_alloc_attrs
54458b04406SChristoph Hellwig  *
54558b04406SChristoph Hellwig  * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
54658b04406SChristoph Hellwig  * space.  The coherent DMA buffer must not be freed by the driver until the
54758b04406SChristoph Hellwig  * user space mapping has been released.
54858b04406SChristoph Hellwig  */
dma_mmap_attrs(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)54958b04406SChristoph Hellwig int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
55058b04406SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
55158b04406SChristoph Hellwig 		unsigned long attrs)
55258b04406SChristoph Hellwig {
55358b04406SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
55458b04406SChristoph Hellwig 
555d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
55658b04406SChristoph Hellwig 		return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
55758b04406SChristoph Hellwig 				attrs);
558b5c58b2fSLeon Romanovsky 	if (use_dma_iommu(dev))
559b5c58b2fSLeon Romanovsky 		return iommu_dma_mmap(dev, vma, cpu_addr, dma_addr, size,
560b5c58b2fSLeon Romanovsky 				      attrs);
56105887cb6SChristoph Hellwig 	if (!ops->mmap)
56205887cb6SChristoph Hellwig 		return -ENXIO;
56305887cb6SChristoph Hellwig 	return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
56405887cb6SChristoph Hellwig }
56505887cb6SChristoph Hellwig EXPORT_SYMBOL(dma_mmap_attrs);
56605887cb6SChristoph Hellwig 
dma_get_required_mask(struct device * dev)56705887cb6SChristoph Hellwig u64 dma_get_required_mask(struct device *dev)
56805887cb6SChristoph Hellwig {
56905887cb6SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
57005887cb6SChristoph Hellwig 
571d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
57205887cb6SChristoph Hellwig 		return dma_direct_get_required_mask(dev);
573b348b6d1SLeon Romanovsky 
574b348b6d1SLeon Romanovsky 	if (use_dma_iommu(dev))
575b348b6d1SLeon Romanovsky 		return DMA_BIT_MASK(32);
576b348b6d1SLeon Romanovsky 
57705887cb6SChristoph Hellwig 	if (ops->get_required_mask)
57805887cb6SChristoph Hellwig 		return ops->get_required_mask(dev);
57905887cb6SChristoph Hellwig 
58005887cb6SChristoph Hellwig 	/*
58105887cb6SChristoph Hellwig 	 * We require every DMA ops implementation to at least support a 32-bit
58205887cb6SChristoph Hellwig 	 * DMA mask (and use bounce buffering if that isn't supported in
58305887cb6SChristoph Hellwig 	 * hardware).  As the direct mapping code has its own routine to
58405887cb6SChristoph Hellwig 	 * actually report an optimal mask we default to 32-bit here as that
58505887cb6SChristoph Hellwig 	 * is the right thing for most IOMMUs, and at least not actively
58605887cb6SChristoph Hellwig 	 * harmful in general.
58705887cb6SChristoph Hellwig 	 */
58805887cb6SChristoph Hellwig 	return DMA_BIT_MASK(32);
58905887cb6SChristoph Hellwig }
59005887cb6SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_get_required_mask);
59105887cb6SChristoph Hellwig 
dma_alloc_attrs(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag,unsigned long attrs)59205887cb6SChristoph Hellwig void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
59305887cb6SChristoph Hellwig 		gfp_t flag, unsigned long attrs)
59405887cb6SChristoph Hellwig {
59505887cb6SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
59605887cb6SChristoph Hellwig 	void *cpu_addr;
59705887cb6SChristoph Hellwig 
59805887cb6SChristoph Hellwig 	WARN_ON_ONCE(!dev->coherent_dma_mask);
59905887cb6SChristoph Hellwig 
600ffcb7545SChristoph Hellwig 	/*
601ffcb7545SChristoph Hellwig 	 * DMA allocations can never be turned back into a page pointer, so
602ffcb7545SChristoph Hellwig 	 * requesting compound pages doesn't make sense (and can't even be
603ffcb7545SChristoph Hellwig 	 * supported at all by various backends).
604ffcb7545SChristoph Hellwig 	 */
605ffcb7545SChristoph Hellwig 	if (WARN_ON_ONCE(flag & __GFP_COMP))
606ffcb7545SChristoph Hellwig 		return NULL;
607ffcb7545SChristoph Hellwig 
60868b6dbf1SSean Anderson 	if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) {
60968b6dbf1SSean Anderson 		trace_dma_alloc(dev, cpu_addr, *dma_handle, size,
61068b6dbf1SSean Anderson 				DMA_BIDIRECTIONAL, flag, attrs);
6117249c1a5SChristoph Hellwig 		return cpu_addr;
61268b6dbf1SSean Anderson 	}
6137249c1a5SChristoph Hellwig 
6147249c1a5SChristoph Hellwig 	/* let the implementation decide on the zone to allocate from: */
6157249c1a5SChristoph Hellwig 	flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
6167249c1a5SChristoph Hellwig 
61768b6dbf1SSean Anderson 	if (dma_alloc_direct(dev, ops)) {
618356da6d0SChristoph Hellwig 		cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
61968b6dbf1SSean Anderson 	} else if (use_dma_iommu(dev)) {
620b5c58b2fSLeon Romanovsky 		cpu_addr = iommu_dma_alloc(dev, size, dma_handle, flag, attrs);
62168b6dbf1SSean Anderson 	} else if (ops->alloc) {
622356da6d0SChristoph Hellwig 		cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
62368b6dbf1SSean Anderson 	} else {
62468b6dbf1SSean Anderson 		trace_dma_alloc(dev, NULL, 0, size, DMA_BIDIRECTIONAL, flag,
62568b6dbf1SSean Anderson 				attrs);
6267249c1a5SChristoph Hellwig 		return NULL;
62768b6dbf1SSean Anderson 	}
6287249c1a5SChristoph Hellwig 
6293afff779SSean Anderson 	trace_dma_alloc(dev, cpu_addr, *dma_handle, size, DMA_BIDIRECTIONAL,
6303afff779SSean Anderson 			flag, attrs);
631c2bbf9d1SHamza Mahfooz 	debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
6327249c1a5SChristoph Hellwig 	return cpu_addr;
6337249c1a5SChristoph Hellwig }
6347249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_alloc_attrs);
6357249c1a5SChristoph Hellwig 
dma_free_attrs(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle,unsigned long attrs)6367249c1a5SChristoph Hellwig void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
6377249c1a5SChristoph Hellwig 		dma_addr_t dma_handle, unsigned long attrs)
6387249c1a5SChristoph Hellwig {
6397249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
6407249c1a5SChristoph Hellwig 
6417249c1a5SChristoph Hellwig 	if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
6427249c1a5SChristoph Hellwig 		return;
6437249c1a5SChristoph Hellwig 	/*
6447249c1a5SChristoph Hellwig 	 * On non-coherent platforms which implement DMA-coherent buffers via
6457249c1a5SChristoph Hellwig 	 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
6467249c1a5SChristoph Hellwig 	 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
6477249c1a5SChristoph Hellwig 	 * sleep on some machines, and b) an indication that the driver is
6487249c1a5SChristoph Hellwig 	 * probably misusing the coherent API anyway.
6497249c1a5SChristoph Hellwig 	 */
6507249c1a5SChristoph Hellwig 	WARN_ON(irqs_disabled());
6517249c1a5SChristoph Hellwig 
65268b6dbf1SSean Anderson 	trace_dma_free(dev, cpu_addr, dma_handle, size, DMA_BIDIRECTIONAL,
65368b6dbf1SSean Anderson 		       attrs);
654356da6d0SChristoph Hellwig 	if (!cpu_addr)
6557249c1a5SChristoph Hellwig 		return;
6567249c1a5SChristoph Hellwig 
6577249c1a5SChristoph Hellwig 	debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
658d35834c6SChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
659356da6d0SChristoph Hellwig 		dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
660b5c58b2fSLeon Romanovsky 	else if (use_dma_iommu(dev))
661b5c58b2fSLeon Romanovsky 		iommu_dma_free(dev, size, cpu_addr, dma_handle, attrs);
662356da6d0SChristoph Hellwig 	else if (ops->free)
6637249c1a5SChristoph Hellwig 		ops->free(dev, size, cpu_addr, dma_handle, attrs);
6647249c1a5SChristoph Hellwig }
6657249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_free_attrs);
6667249c1a5SChristoph Hellwig 
__dma_alloc_pages(struct device * dev,size_t size,dma_addr_t * dma_handle,enum dma_data_direction dir,gfp_t gfp)667198c50e2SChristoph Hellwig static struct page *__dma_alloc_pages(struct device *dev, size_t size,
668efa70f2fSChristoph Hellwig 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
669efa70f2fSChristoph Hellwig {
670efa70f2fSChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
671efa70f2fSChristoph Hellwig 
672efa70f2fSChristoph Hellwig 	if (WARN_ON_ONCE(!dev->coherent_dma_mask))
673efa70f2fSChristoph Hellwig 		return NULL;
674efa70f2fSChristoph Hellwig 	if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)))
675efa70f2fSChristoph Hellwig 		return NULL;
6763622b86fSChristoph Hellwig 	if (WARN_ON_ONCE(gfp & __GFP_COMP))
6773622b86fSChristoph Hellwig 		return NULL;
678efa70f2fSChristoph Hellwig 
679efa70f2fSChristoph Hellwig 	size = PAGE_ALIGN(size);
680efa70f2fSChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
681198c50e2SChristoph Hellwig 		return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
682b5c58b2fSLeon Romanovsky 	if (use_dma_iommu(dev))
683b5c58b2fSLeon Romanovsky 		return dma_common_alloc_pages(dev, size, dma_handle, dir, gfp);
6848a2f1187SSuren Baghdasaryan 	if (!ops->alloc_pages_op)
685efa70f2fSChristoph Hellwig 		return NULL;
6868a2f1187SSuren Baghdasaryan 	return ops->alloc_pages_op(dev, size, dma_handle, dir, gfp);
687198c50e2SChristoph Hellwig }
688efa70f2fSChristoph Hellwig 
dma_alloc_pages(struct device * dev,size_t size,dma_addr_t * dma_handle,enum dma_data_direction dir,gfp_t gfp)689198c50e2SChristoph Hellwig struct page *dma_alloc_pages(struct device *dev, size_t size,
690198c50e2SChristoph Hellwig 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
691198c50e2SChristoph Hellwig {
692198c50e2SChristoph Hellwig 	struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
693198c50e2SChristoph Hellwig 
694038eb433SSean Anderson 	if (page) {
695c4484ab8SSean Anderson 		trace_dma_alloc_pages(dev, page_to_virt(page), *dma_handle,
696c4484ab8SSean Anderson 				      size, dir, gfp, 0);
697c2bbf9d1SHamza Mahfooz 		debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
69868b6dbf1SSean Anderson 	} else {
69968b6dbf1SSean Anderson 		trace_dma_alloc_pages(dev, NULL, 0, size, dir, gfp, 0);
700038eb433SSean Anderson 	}
701efa70f2fSChristoph Hellwig 	return page;
702efa70f2fSChristoph Hellwig }
703efa70f2fSChristoph Hellwig EXPORT_SYMBOL_GPL(dma_alloc_pages);
704efa70f2fSChristoph Hellwig 
__dma_free_pages(struct device * dev,size_t size,struct page * page,dma_addr_t dma_handle,enum dma_data_direction dir)705198c50e2SChristoph Hellwig static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
706efa70f2fSChristoph Hellwig 		dma_addr_t dma_handle, enum dma_data_direction dir)
707efa70f2fSChristoph Hellwig {
708efa70f2fSChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
709efa70f2fSChristoph Hellwig 
710efa70f2fSChristoph Hellwig 	size = PAGE_ALIGN(size);
711efa70f2fSChristoph Hellwig 	if (dma_alloc_direct(dev, ops))
712efa70f2fSChristoph Hellwig 		dma_direct_free_pages(dev, size, page, dma_handle, dir);
713b5c58b2fSLeon Romanovsky 	else if (use_dma_iommu(dev))
714b5c58b2fSLeon Romanovsky 		dma_common_free_pages(dev, size, page, dma_handle, dir);
715efa70f2fSChristoph Hellwig 	else if (ops->free_pages)
716efa70f2fSChristoph Hellwig 		ops->free_pages(dev, size, page, dma_handle, dir);
717efa70f2fSChristoph Hellwig }
718198c50e2SChristoph Hellwig 
dma_free_pages(struct device * dev,size_t size,struct page * page,dma_addr_t dma_handle,enum dma_data_direction dir)719198c50e2SChristoph Hellwig void dma_free_pages(struct device *dev, size_t size, struct page *page,
720198c50e2SChristoph Hellwig 		dma_addr_t dma_handle, enum dma_data_direction dir)
721198c50e2SChristoph Hellwig {
722c4484ab8SSean Anderson 	trace_dma_free_pages(dev, page_to_virt(page), dma_handle, size, dir, 0);
723198c50e2SChristoph Hellwig 	debug_dma_unmap_page(dev, dma_handle, size, dir);
724198c50e2SChristoph Hellwig 	__dma_free_pages(dev, size, page, dma_handle, dir);
725198c50e2SChristoph Hellwig }
726efa70f2fSChristoph Hellwig EXPORT_SYMBOL_GPL(dma_free_pages);
727efa70f2fSChristoph Hellwig 
dma_mmap_pages(struct device * dev,struct vm_area_struct * vma,size_t size,struct page * page)728eedb0b12SChristoph Hellwig int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
729eedb0b12SChristoph Hellwig 		size_t size, struct page *page)
730eedb0b12SChristoph Hellwig {
731eedb0b12SChristoph Hellwig 	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
732eedb0b12SChristoph Hellwig 
733eedb0b12SChristoph Hellwig 	if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
734eedb0b12SChristoph Hellwig 		return -ENXIO;
735eedb0b12SChristoph Hellwig 	return remap_pfn_range(vma, vma->vm_start,
736eedb0b12SChristoph Hellwig 			       page_to_pfn(page) + vma->vm_pgoff,
737eedb0b12SChristoph Hellwig 			       vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot);
738eedb0b12SChristoph Hellwig }
739eedb0b12SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_mmap_pages);
740eedb0b12SChristoph Hellwig 
alloc_single_sgt(struct device * dev,size_t size,enum dma_data_direction dir,gfp_t gfp)7417d5b5738SChristoph Hellwig static struct sg_table *alloc_single_sgt(struct device *dev, size_t size,
7427d5b5738SChristoph Hellwig 		enum dma_data_direction dir, gfp_t gfp)
7437d5b5738SChristoph Hellwig {
7447d5b5738SChristoph Hellwig 	struct sg_table *sgt;
7457d5b5738SChristoph Hellwig 	struct page *page;
7467d5b5738SChristoph Hellwig 
7477d5b5738SChristoph Hellwig 	sgt = kmalloc(sizeof(*sgt), gfp);
7487d5b5738SChristoph Hellwig 	if (!sgt)
7497d5b5738SChristoph Hellwig 		return NULL;
7507d5b5738SChristoph Hellwig 	if (sg_alloc_table(sgt, 1, gfp))
7517d5b5738SChristoph Hellwig 		goto out_free_sgt;
7527d5b5738SChristoph Hellwig 	page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp);
7537d5b5738SChristoph Hellwig 	if (!page)
7547d5b5738SChristoph Hellwig 		goto out_free_table;
7557d5b5738SChristoph Hellwig 	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
7567d5b5738SChristoph Hellwig 	sg_dma_len(sgt->sgl) = sgt->sgl->length;
7577d5b5738SChristoph Hellwig 	return sgt;
7587d5b5738SChristoph Hellwig out_free_table:
7597d5b5738SChristoph Hellwig 	sg_free_table(sgt);
7607d5b5738SChristoph Hellwig out_free_sgt:
7617d5b5738SChristoph Hellwig 	kfree(sgt);
7627d5b5738SChristoph Hellwig 	return NULL;
7637d5b5738SChristoph Hellwig }
7647d5b5738SChristoph Hellwig 
dma_alloc_noncontiguous(struct device * dev,size_t size,enum dma_data_direction dir,gfp_t gfp,unsigned long attrs)7657d5b5738SChristoph Hellwig struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
7667d5b5738SChristoph Hellwig 		enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
7677d5b5738SChristoph Hellwig {
7687d5b5738SChristoph Hellwig 	struct sg_table *sgt;
7697d5b5738SChristoph Hellwig 
7707d5b5738SChristoph Hellwig 	if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES))
7717d5b5738SChristoph Hellwig 		return NULL;
7723622b86fSChristoph Hellwig 	if (WARN_ON_ONCE(gfp & __GFP_COMP))
7733622b86fSChristoph Hellwig 		return NULL;
7747d5b5738SChristoph Hellwig 
775bb0e3919SChristoph Hellwig 	if (use_dma_iommu(dev))
776b5c58b2fSLeon Romanovsky 		sgt = iommu_dma_alloc_noncontiguous(dev, size, dir, gfp, attrs);
7777d5b5738SChristoph Hellwig 	else
7787d5b5738SChristoph Hellwig 		sgt = alloc_single_sgt(dev, size, dir, gfp);
7797d5b5738SChristoph Hellwig 
7807d5b5738SChristoph Hellwig 	if (sgt) {
7817d5b5738SChristoph Hellwig 		sgt->nents = 1;
782c4484ab8SSean Anderson 		trace_dma_alloc_sgt(dev, sgt, size, dir, gfp, attrs);
783c2bbf9d1SHamza Mahfooz 		debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
78468b6dbf1SSean Anderson 	} else {
785d5bbfbadSSean Anderson 		trace_dma_alloc_sgt_err(dev, NULL, 0, size, dir, gfp, attrs);
7867d5b5738SChristoph Hellwig 	}
7877d5b5738SChristoph Hellwig 	return sgt;
7887d5b5738SChristoph Hellwig }
7897d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous);
7907d5b5738SChristoph Hellwig 
free_single_sgt(struct device * dev,size_t size,struct sg_table * sgt,enum dma_data_direction dir)7917d5b5738SChristoph Hellwig static void free_single_sgt(struct device *dev, size_t size,
7927d5b5738SChristoph Hellwig 		struct sg_table *sgt, enum dma_data_direction dir)
7937d5b5738SChristoph Hellwig {
7947d5b5738SChristoph Hellwig 	__dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address,
7957d5b5738SChristoph Hellwig 			 dir);
7967d5b5738SChristoph Hellwig 	sg_free_table(sgt);
7977d5b5738SChristoph Hellwig 	kfree(sgt);
7987d5b5738SChristoph Hellwig }
7997d5b5738SChristoph Hellwig 
dma_free_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt,enum dma_data_direction dir)8007d5b5738SChristoph Hellwig void dma_free_noncontiguous(struct device *dev, size_t size,
8017d5b5738SChristoph Hellwig 		struct sg_table *sgt, enum dma_data_direction dir)
8027d5b5738SChristoph Hellwig {
803c4484ab8SSean Anderson 	trace_dma_free_sgt(dev, sgt, size, dir);
8047d5b5738SChristoph Hellwig 	debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
805bb0e3919SChristoph Hellwig 
806bb0e3919SChristoph Hellwig 	if (use_dma_iommu(dev))
807b5c58b2fSLeon Romanovsky 		iommu_dma_free_noncontiguous(dev, size, sgt, dir);
8087d5b5738SChristoph Hellwig 	else
8097d5b5738SChristoph Hellwig 		free_single_sgt(dev, size, sgt, dir);
8107d5b5738SChristoph Hellwig }
8117d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_free_noncontiguous);
8127d5b5738SChristoph Hellwig 
dma_vmap_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt)8137d5b5738SChristoph Hellwig void *dma_vmap_noncontiguous(struct device *dev, size_t size,
8147d5b5738SChristoph Hellwig 		struct sg_table *sgt)
8157d5b5738SChristoph Hellwig {
8167d5b5738SChristoph Hellwig 
817bb0e3919SChristoph Hellwig 	if (use_dma_iommu(dev))
818bb0e3919SChristoph Hellwig 		return iommu_dma_vmap_noncontiguous(dev, size, sgt);
819bb0e3919SChristoph Hellwig 
8207d5b5738SChristoph Hellwig 	return page_address(sg_page(sgt->sgl));
8217d5b5738SChristoph Hellwig }
8227d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous);
8237d5b5738SChristoph Hellwig 
dma_vunmap_noncontiguous(struct device * dev,void * vaddr)8247d5b5738SChristoph Hellwig void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
8257d5b5738SChristoph Hellwig {
826bb0e3919SChristoph Hellwig 	if (use_dma_iommu(dev))
827bb0e3919SChristoph Hellwig 		iommu_dma_vunmap_noncontiguous(dev, vaddr);
8287d5b5738SChristoph Hellwig }
8297d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous);
8307d5b5738SChristoph Hellwig 
dma_mmap_noncontiguous(struct device * dev,struct vm_area_struct * vma,size_t size,struct sg_table * sgt)8317d5b5738SChristoph Hellwig int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
8327d5b5738SChristoph Hellwig 		size_t size, struct sg_table *sgt)
8337d5b5738SChristoph Hellwig {
834bb0e3919SChristoph Hellwig 	if (use_dma_iommu(dev))
835bb0e3919SChristoph Hellwig 		return iommu_dma_mmap_noncontiguous(dev, vma, size, sgt);
8367d5b5738SChristoph Hellwig 	return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl));
8377d5b5738SChristoph Hellwig }
8387d5b5738SChristoph Hellwig EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
8397d5b5738SChristoph Hellwig 
dma_supported(struct device * dev,u64 mask)8409fc18f6dSChristoph Hellwig static int dma_supported(struct device *dev, u64 mask)
8417249c1a5SChristoph Hellwig {
8427249c1a5SChristoph Hellwig 	const struct dma_map_ops *ops = get_dma_ops(dev);
8437249c1a5SChristoph Hellwig 
844a5fb217fSChristoph Hellwig 	if (use_dma_iommu(dev)) {
845a5fb217fSChristoph Hellwig 		if (WARN_ON(ops))
846b5c58b2fSLeon Romanovsky 			return false;
847f45cfab2SLeon Romanovsky 		return true;
848a5fb217fSChristoph Hellwig 	}
849a5fb217fSChristoph Hellwig 
850d35834c6SChristoph Hellwig 	/*
851a5fb217fSChristoph Hellwig 	 * ->dma_supported sets and clears the bypass flag, so ignore it here
852a5fb217fSChristoph Hellwig 	 * and always call into the method if there is one.
853d35834c6SChristoph Hellwig 	 */
854a5fb217fSChristoph Hellwig 	if (ops) {
8558b1cce9fSThierry Reding 		if (!ops->dma_supported)
856a5fb217fSChristoph Hellwig 			return true;
8577249c1a5SChristoph Hellwig 		return ops->dma_supported(dev, mask);
8587249c1a5SChristoph Hellwig 	}
8597249c1a5SChristoph Hellwig 
860a5fb217fSChristoph Hellwig 	return dma_direct_supported(dev, mask);
861a5fb217fSChristoph Hellwig }
862a5fb217fSChristoph Hellwig 
dma_pci_p2pdma_supported(struct device * dev)863159bf192SLogan Gunthorpe bool dma_pci_p2pdma_supported(struct device *dev)
864159bf192SLogan Gunthorpe {
865159bf192SLogan Gunthorpe 	const struct dma_map_ops *ops = get_dma_ops(dev);
866159bf192SLogan Gunthorpe 
867159bf192SLogan Gunthorpe 	/*
868159bf192SLogan Gunthorpe 	 * Note: dma_ops_bypass is not checked here because P2PDMA should
869159bf192SLogan Gunthorpe 	 * not be used with dma mapping ops that do not have support even
870159bf192SLogan Gunthorpe 	 * if the specific device is bypassing them.
871159bf192SLogan Gunthorpe 	 */
872159bf192SLogan Gunthorpe 
873b5c58b2fSLeon Romanovsky 	/* if ops is not set, dma direct and default IOMMU support P2PDMA */
874b5c58b2fSLeon Romanovsky 	return !ops;
875159bf192SLogan Gunthorpe }
876159bf192SLogan Gunthorpe EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported);
877159bf192SLogan Gunthorpe 
dma_set_mask(struct device * dev,u64 mask)8787249c1a5SChristoph Hellwig int dma_set_mask(struct device *dev, u64 mask)
8797249c1a5SChristoph Hellwig {
8804a54d16fSChristoph Hellwig 	/*
8814a54d16fSChristoph Hellwig 	 * Truncate the mask to the actually supported dma_addr_t width to
8824a54d16fSChristoph Hellwig 	 * avoid generating unsupportable addresses.
8834a54d16fSChristoph Hellwig 	 */
8844a54d16fSChristoph Hellwig 	mask = (dma_addr_t)mask;
8854a54d16fSChristoph Hellwig 
8867249c1a5SChristoph Hellwig 	if (!dev->dma_mask || !dma_supported(dev, mask))
8877249c1a5SChristoph Hellwig 		return -EIO;
8887249c1a5SChristoph Hellwig 
88911ddce15SChristoph Hellwig 	arch_dma_set_mask(dev, mask);
8907249c1a5SChristoph Hellwig 	*dev->dma_mask = mask;
891f406c8e4SAlexander Lobakin 	dma_setup_need_sync(dev);
892f406c8e4SAlexander Lobakin 
8937249c1a5SChristoph Hellwig 	return 0;
8947249c1a5SChristoph Hellwig }
8957249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_mask);
8967249c1a5SChristoph Hellwig 
dma_set_coherent_mask(struct device * dev,u64 mask)8977249c1a5SChristoph Hellwig int dma_set_coherent_mask(struct device *dev, u64 mask)
8987249c1a5SChristoph Hellwig {
8994a54d16fSChristoph Hellwig 	/*
9004a54d16fSChristoph Hellwig 	 * Truncate the mask to the actually supported dma_addr_t width to
9014a54d16fSChristoph Hellwig 	 * avoid generating unsupportable addresses.
9024a54d16fSChristoph Hellwig 	 */
9034a54d16fSChristoph Hellwig 	mask = (dma_addr_t)mask;
9044a54d16fSChristoph Hellwig 
9057249c1a5SChristoph Hellwig 	if (!dma_supported(dev, mask))
9067249c1a5SChristoph Hellwig 		return -EIO;
9077249c1a5SChristoph Hellwig 
9087249c1a5SChristoph Hellwig 	dev->coherent_dma_mask = mask;
9097249c1a5SChristoph Hellwig 	return 0;
9107249c1a5SChristoph Hellwig }
9117249c1a5SChristoph Hellwig EXPORT_SYMBOL(dma_set_coherent_mask);
9128ddbe594SChristoph Hellwig 
__dma_addressing_limited(struct device * dev)9132042c352SBalbir Singh static bool __dma_addressing_limited(struct device *dev)
9148ae0e970SJia He {
915a409d960SJia He 	const struct dma_map_ops *ops = get_dma_ops(dev);
916a409d960SJia He 
917a409d960SJia He 	if (min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
918a409d960SJia He 			 dma_get_required_mask(dev))
919a409d960SJia He 		return true;
920a409d960SJia He 
921b348b6d1SLeon Romanovsky 	if (unlikely(ops) || use_dma_iommu(dev))
922a409d960SJia He 		return false;
923a409d960SJia He 	return !dma_direct_all_ram_mapped(dev);
9248ae0e970SJia He }
9252042c352SBalbir Singh 
926*cae5572eSBalbir Singh /**
927*cae5572eSBalbir Singh  * dma_addressing_limited - return if the device is addressing limited
928*cae5572eSBalbir Singh  * @dev:	device to check
929*cae5572eSBalbir Singh  *
930*cae5572eSBalbir Singh  * Return %true if the devices DMA mask is too small to address all memory in
931*cae5572eSBalbir Singh  * the system, else %false.  Lack of addressing bits is the prime reason for
932*cae5572eSBalbir Singh  * bounce buffering, but might not be the only one.
933*cae5572eSBalbir Singh  */
dma_addressing_limited(struct device * dev)9342042c352SBalbir Singh bool dma_addressing_limited(struct device *dev)
9352042c352SBalbir Singh {
9362042c352SBalbir Singh 	if (!__dma_addressing_limited(dev))
9372042c352SBalbir Singh 		return false;
9382042c352SBalbir Singh 
9392042c352SBalbir Singh 	dev_dbg(dev, "device is DMA addressing limited\n");
9402042c352SBalbir Singh 	return true;
9412042c352SBalbir Singh }
9428ae0e970SJia He EXPORT_SYMBOL_GPL(dma_addressing_limited);
9438ae0e970SJia He 
dma_max_mapping_size(struct device * dev)944133d624bSJoerg Roedel size_t dma_max_mapping_size(struct device *dev)
945133d624bSJoerg Roedel {
946133d624bSJoerg Roedel 	const struct dma_map_ops *ops = get_dma_ops(dev);
947133d624bSJoerg Roedel 	size_t size = SIZE_MAX;
948133d624bSJoerg Roedel 
949d35834c6SChristoph Hellwig 	if (dma_map_direct(dev, ops))
950133d624bSJoerg Roedel 		size = dma_direct_max_mapping_size(dev);
951b5c58b2fSLeon Romanovsky 	else if (use_dma_iommu(dev))
952b5c58b2fSLeon Romanovsky 		size = iommu_dma_max_mapping_size(dev);
953133d624bSJoerg Roedel 	else if (ops && ops->max_mapping_size)
954133d624bSJoerg Roedel 		size = ops->max_mapping_size(dev);
955133d624bSJoerg Roedel 
956133d624bSJoerg Roedel 	return size;
957133d624bSJoerg Roedel }
958133d624bSJoerg Roedel EXPORT_SYMBOL_GPL(dma_max_mapping_size);
9596ba99411SYoshihiro Shimoda 
dma_opt_mapping_size(struct device * dev)960a229cc14SJohn Garry size_t dma_opt_mapping_size(struct device *dev)
961a229cc14SJohn Garry {
962a229cc14SJohn Garry 	const struct dma_map_ops *ops = get_dma_ops(dev);
963a229cc14SJohn Garry 	size_t size = SIZE_MAX;
964a229cc14SJohn Garry 
965b5c58b2fSLeon Romanovsky 	if (use_dma_iommu(dev))
966b5c58b2fSLeon Romanovsky 		size = iommu_dma_opt_mapping_size();
967b5c58b2fSLeon Romanovsky 	else if (ops && ops->opt_mapping_size)
968a229cc14SJohn Garry 		size = ops->opt_mapping_size();
969a229cc14SJohn Garry 
970a229cc14SJohn Garry 	return min(dma_max_mapping_size(dev), size);
971a229cc14SJohn Garry }
972a229cc14SJohn Garry EXPORT_SYMBOL_GPL(dma_opt_mapping_size);
973a229cc14SJohn Garry 
dma_get_merge_boundary(struct device * dev)9746ba99411SYoshihiro Shimoda unsigned long dma_get_merge_boundary(struct device *dev)
9756ba99411SYoshihiro Shimoda {
9766ba99411SYoshihiro Shimoda 	const struct dma_map_ops *ops = get_dma_ops(dev);
9776ba99411SYoshihiro Shimoda 
978b5c58b2fSLeon Romanovsky 	if (use_dma_iommu(dev))
979b5c58b2fSLeon Romanovsky 		return iommu_dma_get_merge_boundary(dev);
980b5c58b2fSLeon Romanovsky 
9816ba99411SYoshihiro Shimoda 	if (!ops || !ops->get_merge_boundary)
9826ba99411SYoshihiro Shimoda 		return 0;	/* can't merge */
9836ba99411SYoshihiro Shimoda 
9846ba99411SYoshihiro Shimoda 	return ops->get_merge_boundary(dev);
9856ba99411SYoshihiro Shimoda }
9866ba99411SYoshihiro Shimoda EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
987