xref: /linux-6.15/include/linux/dma-mapping.h (revision c9b19ea6)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
296532babSRobert P. J. Day #ifndef _LINUX_DMA_MAPPING_H
396532babSRobert P. J. Day #define _LINUX_DMA_MAPPING_H
41da177e4SLinus Torvalds 
51da177e4SLinus Torvalds #include <linux/device.h>
61da177e4SLinus Torvalds #include <linux/err.h>
7b7f080cfSAlexey Dobriyan #include <linux/dma-direction.h>
8f0402a26SFUJITA Tomonori #include <linux/scatterlist.h>
9e1c7e324SChristoph Hellwig #include <linux/bug.h>
101da177e4SLinus Torvalds 
1100085f1eSKrzysztof Kozlowski /**
1200085f1eSKrzysztof Kozlowski  * List of possible attributes associated with a DMA mapping. The semantics
13985098a0SMauro Carvalho Chehab  * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
1400085f1eSKrzysztof Kozlowski  */
157283fff8SChristoph Hellwig 
1600085f1eSKrzysztof Kozlowski /*
1700085f1eSKrzysztof Kozlowski  * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
1800085f1eSKrzysztof Kozlowski  * may be weakly ordered, that is that reads and writes may pass each other.
1900085f1eSKrzysztof Kozlowski  */
2000085f1eSKrzysztof Kozlowski #define DMA_ATTR_WEAK_ORDERING		(1UL << 1)
2100085f1eSKrzysztof Kozlowski /*
2200085f1eSKrzysztof Kozlowski  * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
2300085f1eSKrzysztof Kozlowski  * buffered to improve performance.
2400085f1eSKrzysztof Kozlowski  */
2500085f1eSKrzysztof Kozlowski #define DMA_ATTR_WRITE_COMBINE		(1UL << 2)
2600085f1eSKrzysztof Kozlowski /*
2700085f1eSKrzysztof Kozlowski  * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
2800085f1eSKrzysztof Kozlowski  * virtual mapping for the allocated buffer.
2900085f1eSKrzysztof Kozlowski  */
3000085f1eSKrzysztof Kozlowski #define DMA_ATTR_NO_KERNEL_MAPPING	(1UL << 4)
3100085f1eSKrzysztof Kozlowski /*
3200085f1eSKrzysztof Kozlowski  * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
3300085f1eSKrzysztof Kozlowski  * the CPU cache for the given buffer assuming that it has been already
3400085f1eSKrzysztof Kozlowski  * transferred to 'device' domain.
3500085f1eSKrzysztof Kozlowski  */
3600085f1eSKrzysztof Kozlowski #define DMA_ATTR_SKIP_CPU_SYNC		(1UL << 5)
3700085f1eSKrzysztof Kozlowski /*
3800085f1eSKrzysztof Kozlowski  * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
3900085f1eSKrzysztof Kozlowski  * in physical memory.
4000085f1eSKrzysztof Kozlowski  */
4100085f1eSKrzysztof Kozlowski #define DMA_ATTR_FORCE_CONTIGUOUS	(1UL << 6)
4200085f1eSKrzysztof Kozlowski /*
4300085f1eSKrzysztof Kozlowski  * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
4400085f1eSKrzysztof Kozlowski  * that it's probably not worth the time to try to allocate memory to in a way
4500085f1eSKrzysztof Kozlowski  * that gives better TLB efficiency.
4600085f1eSKrzysztof Kozlowski  */
4700085f1eSKrzysztof Kozlowski #define DMA_ATTR_ALLOC_SINGLE_PAGES	(1UL << 7)
48a9a62c93SMauricio Faria de Oliveira /*
49a9a62c93SMauricio Faria de Oliveira  * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
50a9a62c93SMauricio Faria de Oliveira  * allocation failure reports (similarly to __GFP_NOWARN).
51a9a62c93SMauricio Faria de Oliveira  */
52a9a62c93SMauricio Faria de Oliveira #define DMA_ATTR_NO_WARN	(1UL << 8)
5300085f1eSKrzysztof Kozlowski 
5477f2ea2fSBjorn Helgaas /*
55b2fb3664SMitchel Humpherys  * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
56b2fb3664SMitchel Humpherys  * accessible at an elevated privilege level (and ideally inaccessible or
57b2fb3664SMitchel Humpherys  * at least read-only at lesser-privileged levels).
58b2fb3664SMitchel Humpherys  */
59b2fb3664SMitchel Humpherys #define DMA_ATTR_PRIVILEGED		(1UL << 9)
60b2fb3664SMitchel Humpherys 
61eba304c6SChristoph Hellwig /*
62eba304c6SChristoph Hellwig  * A dma_addr_t can hold any valid DMA or bus address for the platform.  It can
63eba304c6SChristoph Hellwig  * be given to a device to use as a DMA source or target.  It is specific to a
64eba304c6SChristoph Hellwig  * given device and there may be a translation between the CPU physical address
65eba304c6SChristoph Hellwig  * space and the bus address space.
66eba304c6SChristoph Hellwig  *
67eba304c6SChristoph Hellwig  * DMA_MAPPING_ERROR is the magic error code if a mapping failed.  It should not
68eba304c6SChristoph Hellwig  * be used directly in drivers, but checked for using dma_mapping_error()
69eba304c6SChristoph Hellwig  * instead.
70eba304c6SChristoph Hellwig  */
7142ee3caeSChristoph Hellwig #define DMA_MAPPING_ERROR		(~(dma_addr_t)0)
7242ee3caeSChristoph Hellwig 
738f286c33SAndrew Morton #define DMA_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
7434c65384SBorislav Petkov 
75a1fd09e8SChristoph Hellwig #ifdef CONFIG_DMA_API_DEBUG
76a1fd09e8SChristoph Hellwig void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
77a1fd09e8SChristoph Hellwig void debug_dma_map_single(struct device *dev, const void *addr,
78a1fd09e8SChristoph Hellwig 		unsigned long len);
79a1fd09e8SChristoph Hellwig #else
debug_dma_mapping_error(struct device * dev,dma_addr_t dma_addr)80a1fd09e8SChristoph Hellwig static inline void debug_dma_mapping_error(struct device *dev,
81a1fd09e8SChristoph Hellwig 		dma_addr_t dma_addr)
82a1fd09e8SChristoph Hellwig {
83a1fd09e8SChristoph Hellwig }
debug_dma_map_single(struct device * dev,const void * addr,unsigned long len)84a1fd09e8SChristoph Hellwig static inline void debug_dma_map_single(struct device *dev, const void *addr,
85a1fd09e8SChristoph Hellwig 		unsigned long len)
86a1fd09e8SChristoph Hellwig {
87a1fd09e8SChristoph Hellwig }
88a1fd09e8SChristoph Hellwig #endif /* CONFIG_DMA_API_DEBUG */
89a1fd09e8SChristoph Hellwig 
90ed6ccf10SChristoph Hellwig #ifdef CONFIG_HAS_DMA
dma_mapping_error(struct device * dev,dma_addr_t dma_addr)91ed6ccf10SChristoph Hellwig static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
92ed6ccf10SChristoph Hellwig {
93ed6ccf10SChristoph Hellwig 	debug_dma_mapping_error(dev, dma_addr);
94ed6ccf10SChristoph Hellwig 
95a7f3d3d3SHeiner Kallweit 	if (unlikely(dma_addr == DMA_MAPPING_ERROR))
96ed6ccf10SChristoph Hellwig 		return -ENOMEM;
97ed6ccf10SChristoph Hellwig 	return 0;
98ed6ccf10SChristoph Hellwig }
99ed6ccf10SChristoph Hellwig 
100d3fa60d7SChristoph Hellwig dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
101d3fa60d7SChristoph Hellwig 		size_t offset, size_t size, enum dma_data_direction dir,
102d3fa60d7SChristoph Hellwig 		unsigned long attrs);
103d3fa60d7SChristoph Hellwig void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
104d3fa60d7SChristoph Hellwig 		enum dma_data_direction dir, unsigned long attrs);
1052a047e06SChristoph Hellwig unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
1062a047e06SChristoph Hellwig 		int nents, enum dma_data_direction dir, unsigned long attrs);
107d3fa60d7SChristoph Hellwig void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
108d3fa60d7SChristoph Hellwig 				      int nents, enum dma_data_direction dir,
109d3fa60d7SChristoph Hellwig 				      unsigned long attrs);
110fffe3cc8SLogan Gunthorpe int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
111fffe3cc8SLogan Gunthorpe 		enum dma_data_direction dir, unsigned long attrs);
112d3fa60d7SChristoph Hellwig dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
113d3fa60d7SChristoph Hellwig 		size_t size, enum dma_data_direction dir, unsigned long attrs);
114d3fa60d7SChristoph Hellwig void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
115d3fa60d7SChristoph Hellwig 		enum dma_data_direction dir, unsigned long attrs);
116ed6ccf10SChristoph Hellwig void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
117ed6ccf10SChristoph Hellwig 		gfp_t flag, unsigned long attrs);
118ed6ccf10SChristoph Hellwig void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
119ed6ccf10SChristoph Hellwig 		dma_addr_t dma_handle, unsigned long attrs);
120ed6ccf10SChristoph Hellwig void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
121ed6ccf10SChristoph Hellwig 		gfp_t gfp, unsigned long attrs);
122ed6ccf10SChristoph Hellwig void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
123ed6ccf10SChristoph Hellwig 		dma_addr_t dma_handle);
124ed6ccf10SChristoph Hellwig int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
125ed6ccf10SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
126ed6ccf10SChristoph Hellwig 		unsigned long attrs);
127ed6ccf10SChristoph Hellwig int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
128ed6ccf10SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
129ed6ccf10SChristoph Hellwig 		unsigned long attrs);
130e29ccc18SChristoph Hellwig bool dma_can_mmap(struct device *dev);
131159bf192SLogan Gunthorpe bool dma_pci_p2pdma_supported(struct device *dev);
132ed6ccf10SChristoph Hellwig int dma_set_mask(struct device *dev, u64 mask);
133ed6ccf10SChristoph Hellwig int dma_set_coherent_mask(struct device *dev, u64 mask);
134ed6ccf10SChristoph Hellwig u64 dma_get_required_mask(struct device *dev);
1358ae0e970SJia He bool dma_addressing_limited(struct device *dev);
136133d624bSJoerg Roedel size_t dma_max_mapping_size(struct device *dev);
137a229cc14SJohn Garry size_t dma_opt_mapping_size(struct device *dev);
1386ba99411SYoshihiro Shimoda unsigned long dma_get_merge_boundary(struct device *dev);
1397d5b5738SChristoph Hellwig struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
1407d5b5738SChristoph Hellwig 		enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
1417d5b5738SChristoph Hellwig void dma_free_noncontiguous(struct device *dev, size_t size,
1427d5b5738SChristoph Hellwig 		struct sg_table *sgt, enum dma_data_direction dir);
1437d5b5738SChristoph Hellwig void *dma_vmap_noncontiguous(struct device *dev, size_t size,
1447d5b5738SChristoph Hellwig 		struct sg_table *sgt);
1457d5b5738SChristoph Hellwig void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
1467d5b5738SChristoph Hellwig int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
1477d5b5738SChristoph Hellwig 		size_t size, struct sg_table *sgt);
148ed6ccf10SChristoph Hellwig #else /* CONFIG_HAS_DMA */
dma_map_page_attrs(struct device * dev,struct page * page,size_t offset,size_t size,enum dma_data_direction dir,unsigned long attrs)149ed6ccf10SChristoph Hellwig static inline dma_addr_t dma_map_page_attrs(struct device *dev,
150ed6ccf10SChristoph Hellwig 		struct page *page, size_t offset, size_t size,
151ed6ccf10SChristoph Hellwig 		enum dma_data_direction dir, unsigned long attrs)
152ed6ccf10SChristoph Hellwig {
153ed6ccf10SChristoph Hellwig 	return DMA_MAPPING_ERROR;
154ed6ccf10SChristoph Hellwig }
dma_unmap_page_attrs(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)155ed6ccf10SChristoph Hellwig static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
156ed6ccf10SChristoph Hellwig 		size_t size, enum dma_data_direction dir, unsigned long attrs)
157ed6ccf10SChristoph Hellwig {
158ed6ccf10SChristoph Hellwig }
dma_map_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)1592a047e06SChristoph Hellwig static inline unsigned int dma_map_sg_attrs(struct device *dev,
1602a047e06SChristoph Hellwig 		struct scatterlist *sg, int nents, enum dma_data_direction dir,
1612a047e06SChristoph Hellwig 		unsigned long attrs)
162ed6ccf10SChristoph Hellwig {
163ed6ccf10SChristoph Hellwig 	return 0;
164ed6ccf10SChristoph Hellwig }
dma_unmap_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)165ed6ccf10SChristoph Hellwig static inline void dma_unmap_sg_attrs(struct device *dev,
166ed6ccf10SChristoph Hellwig 		struct scatterlist *sg, int nents, enum dma_data_direction dir,
167ed6ccf10SChristoph Hellwig 		unsigned long attrs)
168ed6ccf10SChristoph Hellwig {
169ed6ccf10SChristoph Hellwig }
dma_map_sgtable(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)170fffe3cc8SLogan Gunthorpe static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
171fffe3cc8SLogan Gunthorpe 		enum dma_data_direction dir, unsigned long attrs)
172fffe3cc8SLogan Gunthorpe {
173fffe3cc8SLogan Gunthorpe 	return -EOPNOTSUPP;
174fffe3cc8SLogan Gunthorpe }
dma_map_resource(struct device * dev,phys_addr_t phys_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)175ed6ccf10SChristoph Hellwig static inline dma_addr_t dma_map_resource(struct device *dev,
176ed6ccf10SChristoph Hellwig 		phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
177ed6ccf10SChristoph Hellwig 		unsigned long attrs)
178ed6ccf10SChristoph Hellwig {
179ed6ccf10SChristoph Hellwig 	return DMA_MAPPING_ERROR;
180ed6ccf10SChristoph Hellwig }
dma_unmap_resource(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)181ed6ccf10SChristoph Hellwig static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
182ed6ccf10SChristoph Hellwig 		size_t size, enum dma_data_direction dir, unsigned long attrs)
183ed6ccf10SChristoph Hellwig {
184ed6ccf10SChristoph Hellwig }
dma_mapping_error(struct device * dev,dma_addr_t dma_addr)185ed6ccf10SChristoph Hellwig static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
186ed6ccf10SChristoph Hellwig {
187ed6ccf10SChristoph Hellwig 	return -ENOMEM;
188ed6ccf10SChristoph Hellwig }
dma_alloc_attrs(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag,unsigned long attrs)189ed6ccf10SChristoph Hellwig static inline void *dma_alloc_attrs(struct device *dev, size_t size,
190ed6ccf10SChristoph Hellwig 		dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
191ed6ccf10SChristoph Hellwig {
192ed6ccf10SChristoph Hellwig 	return NULL;
193ed6ccf10SChristoph Hellwig }
dma_free_attrs(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle,unsigned long attrs)194ed6ccf10SChristoph Hellwig static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
195ed6ccf10SChristoph Hellwig 		dma_addr_t dma_handle, unsigned long attrs)
196ed6ccf10SChristoph Hellwig {
197ed6ccf10SChristoph Hellwig }
dmam_alloc_attrs(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)198ed6ccf10SChristoph Hellwig static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
199ed6ccf10SChristoph Hellwig 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
200ed6ccf10SChristoph Hellwig {
201ed6ccf10SChristoph Hellwig 	return NULL;
202ed6ccf10SChristoph Hellwig }
dmam_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle)203ed6ccf10SChristoph Hellwig static inline void dmam_free_coherent(struct device *dev, size_t size,
204ed6ccf10SChristoph Hellwig 		void *vaddr, dma_addr_t dma_handle)
205ed6ccf10SChristoph Hellwig {
206ed6ccf10SChristoph Hellwig }
dma_get_sgtable_attrs(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)207ed6ccf10SChristoph Hellwig static inline int dma_get_sgtable_attrs(struct device *dev,
208ed6ccf10SChristoph Hellwig 		struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
209ed6ccf10SChristoph Hellwig 		size_t size, unsigned long attrs)
210ed6ccf10SChristoph Hellwig {
211ed6ccf10SChristoph Hellwig 	return -ENXIO;
212ed6ccf10SChristoph Hellwig }
dma_mmap_attrs(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)213ed6ccf10SChristoph Hellwig static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
214ed6ccf10SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
215ed6ccf10SChristoph Hellwig 		unsigned long attrs)
216ed6ccf10SChristoph Hellwig {
217ed6ccf10SChristoph Hellwig 	return -ENXIO;
218ed6ccf10SChristoph Hellwig }
dma_can_mmap(struct device * dev)219e29ccc18SChristoph Hellwig static inline bool dma_can_mmap(struct device *dev)
220e29ccc18SChristoph Hellwig {
221e29ccc18SChristoph Hellwig 	return false;
222e29ccc18SChristoph Hellwig }
dma_pci_p2pdma_supported(struct device * dev)223159bf192SLogan Gunthorpe static inline bool dma_pci_p2pdma_supported(struct device *dev)
224159bf192SLogan Gunthorpe {
225159bf192SLogan Gunthorpe 	return false;
226159bf192SLogan Gunthorpe }
dma_set_mask(struct device * dev,u64 mask)227ed6ccf10SChristoph Hellwig static inline int dma_set_mask(struct device *dev, u64 mask)
228ed6ccf10SChristoph Hellwig {
229ed6ccf10SChristoph Hellwig 	return -EIO;
230ed6ccf10SChristoph Hellwig }
dma_set_coherent_mask(struct device * dev,u64 mask)231ed6ccf10SChristoph Hellwig static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
232ed6ccf10SChristoph Hellwig {
233ed6ccf10SChristoph Hellwig 	return -EIO;
234ed6ccf10SChristoph Hellwig }
dma_get_required_mask(struct device * dev)235ed6ccf10SChristoph Hellwig static inline u64 dma_get_required_mask(struct device *dev)
236ed6ccf10SChristoph Hellwig {
237ed6ccf10SChristoph Hellwig 	return 0;
238ed6ccf10SChristoph Hellwig }
dma_addressing_limited(struct device * dev)2398ae0e970SJia He static inline bool dma_addressing_limited(struct device *dev)
2408ae0e970SJia He {
2418ae0e970SJia He 	return false;
2428ae0e970SJia He }
dma_max_mapping_size(struct device * dev)243133d624bSJoerg Roedel static inline size_t dma_max_mapping_size(struct device *dev)
244133d624bSJoerg Roedel {
245133d624bSJoerg Roedel 	return 0;
246133d624bSJoerg Roedel }
dma_opt_mapping_size(struct device * dev)247a229cc14SJohn Garry static inline size_t dma_opt_mapping_size(struct device *dev)
248a229cc14SJohn Garry {
249a229cc14SJohn Garry 	return 0;
250a229cc14SJohn Garry }
dma_get_merge_boundary(struct device * dev)2516ba99411SYoshihiro Shimoda static inline unsigned long dma_get_merge_boundary(struct device *dev)
2526ba99411SYoshihiro Shimoda {
2536ba99411SYoshihiro Shimoda 	return 0;
2546ba99411SYoshihiro Shimoda }
dma_alloc_noncontiguous(struct device * dev,size_t size,enum dma_data_direction dir,gfp_t gfp,unsigned long attrs)2557d5b5738SChristoph Hellwig static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev,
2567d5b5738SChristoph Hellwig 		size_t size, enum dma_data_direction dir, gfp_t gfp,
2577d5b5738SChristoph Hellwig 		unsigned long attrs)
2587d5b5738SChristoph Hellwig {
2597d5b5738SChristoph Hellwig 	return NULL;
2607d5b5738SChristoph Hellwig }
dma_free_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt,enum dma_data_direction dir)2617d5b5738SChristoph Hellwig static inline void dma_free_noncontiguous(struct device *dev, size_t size,
2627d5b5738SChristoph Hellwig 		struct sg_table *sgt, enum dma_data_direction dir)
2637d5b5738SChristoph Hellwig {
2647d5b5738SChristoph Hellwig }
dma_vmap_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt)2657d5b5738SChristoph Hellwig static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size,
2667d5b5738SChristoph Hellwig 		struct sg_table *sgt)
2677d5b5738SChristoph Hellwig {
2687d5b5738SChristoph Hellwig 	return NULL;
2697d5b5738SChristoph Hellwig }
dma_vunmap_noncontiguous(struct device * dev,void * vaddr)2707d5b5738SChristoph Hellwig static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
2717d5b5738SChristoph Hellwig {
2727d5b5738SChristoph Hellwig }
dma_mmap_noncontiguous(struct device * dev,struct vm_area_struct * vma,size_t size,struct sg_table * sgt)2737d5b5738SChristoph Hellwig static inline int dma_mmap_noncontiguous(struct device *dev,
2747d5b5738SChristoph Hellwig 		struct vm_area_struct *vma, size_t size, struct sg_table *sgt)
2757d5b5738SChristoph Hellwig {
2767d5b5738SChristoph Hellwig 	return -EINVAL;
2777d5b5738SChristoph Hellwig }
278ed6ccf10SChristoph Hellwig #endif /* CONFIG_HAS_DMA */
279ed6ccf10SChristoph Hellwig 
280fe7514b1SAlexander Lobakin #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
281f406c8e4SAlexander Lobakin void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
282fe7514b1SAlexander Lobakin 		enum dma_data_direction dir);
283f406c8e4SAlexander Lobakin void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
284fe7514b1SAlexander Lobakin 		size_t size, enum dma_data_direction dir);
285f406c8e4SAlexander Lobakin void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
286fe7514b1SAlexander Lobakin 		int nelems, enum dma_data_direction dir);
287f406c8e4SAlexander Lobakin void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
288fe7514b1SAlexander Lobakin 		int nelems, enum dma_data_direction dir);
289f406c8e4SAlexander Lobakin bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr);
290f406c8e4SAlexander Lobakin 
dma_dev_need_sync(const struct device * dev)291f406c8e4SAlexander Lobakin static inline bool dma_dev_need_sync(const struct device *dev)
292f406c8e4SAlexander Lobakin {
293f406c8e4SAlexander Lobakin 	/* Always call DMA sync operations when debugging is enabled */
294a6016aacSAlexander Lobakin 	return !dev->dma_skip_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG);
295f406c8e4SAlexander Lobakin }
296f406c8e4SAlexander Lobakin 
dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)297f406c8e4SAlexander Lobakin static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
298f406c8e4SAlexander Lobakin 		size_t size, enum dma_data_direction dir)
299f406c8e4SAlexander Lobakin {
300f406c8e4SAlexander Lobakin 	if (dma_dev_need_sync(dev))
301f406c8e4SAlexander Lobakin 		__dma_sync_single_for_cpu(dev, addr, size, dir);
302f406c8e4SAlexander Lobakin }
303f406c8e4SAlexander Lobakin 
dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)304f406c8e4SAlexander Lobakin static inline void dma_sync_single_for_device(struct device *dev,
305f406c8e4SAlexander Lobakin 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
306f406c8e4SAlexander Lobakin {
307f406c8e4SAlexander Lobakin 	if (dma_dev_need_sync(dev))
308f406c8e4SAlexander Lobakin 		__dma_sync_single_for_device(dev, addr, size, dir);
309f406c8e4SAlexander Lobakin }
310f406c8e4SAlexander Lobakin 
dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)311f406c8e4SAlexander Lobakin static inline void dma_sync_sg_for_cpu(struct device *dev,
312f406c8e4SAlexander Lobakin 		struct scatterlist *sg, int nelems, enum dma_data_direction dir)
313f406c8e4SAlexander Lobakin {
314f406c8e4SAlexander Lobakin 	if (dma_dev_need_sync(dev))
315f406c8e4SAlexander Lobakin 		__dma_sync_sg_for_cpu(dev, sg, nelems, dir);
316f406c8e4SAlexander Lobakin }
317f406c8e4SAlexander Lobakin 
dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)318f406c8e4SAlexander Lobakin static inline void dma_sync_sg_for_device(struct device *dev,
319f406c8e4SAlexander Lobakin 		struct scatterlist *sg, int nelems, enum dma_data_direction dir)
320f406c8e4SAlexander Lobakin {
321f406c8e4SAlexander Lobakin 	if (dma_dev_need_sync(dev))
322f406c8e4SAlexander Lobakin 		__dma_sync_sg_for_device(dev, sg, nelems, dir);
323f406c8e4SAlexander Lobakin }
324f406c8e4SAlexander Lobakin 
dma_need_sync(struct device * dev,dma_addr_t dma_addr)325f406c8e4SAlexander Lobakin static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
326f406c8e4SAlexander Lobakin {
327f406c8e4SAlexander Lobakin 	return dma_dev_need_sync(dev) ? __dma_need_sync(dev, dma_addr) : false;
328f406c8e4SAlexander Lobakin }
329fe7514b1SAlexander Lobakin #else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
dma_dev_need_sync(const struct device * dev)330f406c8e4SAlexander Lobakin static inline bool dma_dev_need_sync(const struct device *dev)
331f406c8e4SAlexander Lobakin {
332f406c8e4SAlexander Lobakin 	return false;
333f406c8e4SAlexander Lobakin }
dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)334fe7514b1SAlexander Lobakin static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
335fe7514b1SAlexander Lobakin 		size_t size, enum dma_data_direction dir)
336fe7514b1SAlexander Lobakin {
337fe7514b1SAlexander Lobakin }
dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)338fe7514b1SAlexander Lobakin static inline void dma_sync_single_for_device(struct device *dev,
339fe7514b1SAlexander Lobakin 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
340fe7514b1SAlexander Lobakin {
341fe7514b1SAlexander Lobakin }
dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)342fe7514b1SAlexander Lobakin static inline void dma_sync_sg_for_cpu(struct device *dev,
343fe7514b1SAlexander Lobakin 		struct scatterlist *sg, int nelems, enum dma_data_direction dir)
344fe7514b1SAlexander Lobakin {
345fe7514b1SAlexander Lobakin }
dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)346fe7514b1SAlexander Lobakin static inline void dma_sync_sg_for_device(struct device *dev,
347fe7514b1SAlexander Lobakin 		struct scatterlist *sg, int nelems, enum dma_data_direction dir)
348fe7514b1SAlexander Lobakin {
349fe7514b1SAlexander Lobakin }
dma_need_sync(struct device * dev,dma_addr_t dma_addr)350fe7514b1SAlexander Lobakin static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
351fe7514b1SAlexander Lobakin {
352fe7514b1SAlexander Lobakin 	return false;
353fe7514b1SAlexander Lobakin }
354fe7514b1SAlexander Lobakin #endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_SYNC */
355fe7514b1SAlexander Lobakin 
356efa70f2fSChristoph Hellwig struct page *dma_alloc_pages(struct device *dev, size_t size,
357efa70f2fSChristoph Hellwig 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
358efa70f2fSChristoph Hellwig void dma_free_pages(struct device *dev, size_t size, struct page *page,
359efa70f2fSChristoph Hellwig 		dma_addr_t dma_handle, enum dma_data_direction dir);
360eedb0b12SChristoph Hellwig int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
361eedb0b12SChristoph Hellwig 		size_t size, struct page *page);
36281d88ce5SChristoph Hellwig 
dma_alloc_noncoherent(struct device * dev,size_t size,dma_addr_t * dma_handle,enum dma_data_direction dir,gfp_t gfp)36381d88ce5SChristoph Hellwig static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
36481d88ce5SChristoph Hellwig 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
36581d88ce5SChristoph Hellwig {
36681d88ce5SChristoph Hellwig 	struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
36781d88ce5SChristoph Hellwig 	return page ? page_address(page) : NULL;
36881d88ce5SChristoph Hellwig }
36981d88ce5SChristoph Hellwig 
dma_free_noncoherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle,enum dma_data_direction dir)37081d88ce5SChristoph Hellwig static inline void dma_free_noncoherent(struct device *dev, size_t size,
37181d88ce5SChristoph Hellwig 		void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
37281d88ce5SChristoph Hellwig {
37381d88ce5SChristoph Hellwig 	dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
37481d88ce5SChristoph Hellwig }
3750d71675fSChristoph Hellwig 
dma_map_single_attrs(struct device * dev,void * ptr,size_t size,enum dma_data_direction dir,unsigned long attrs)3762e05ea5cSChristoph Hellwig static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
3772e05ea5cSChristoph Hellwig 		size_t size, enum dma_data_direction dir, unsigned long attrs)
3782e05ea5cSChristoph Hellwig {
3794544b9f2SKees Cook 	/* DMA must never operate on areas that might be remapped. */
3804544b9f2SKees Cook 	if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
3814544b9f2SKees Cook 			  "rejecting DMA map of vmalloc memory\n"))
3824544b9f2SKees Cook 		return DMA_MAPPING_ERROR;
3832e05ea5cSChristoph Hellwig 	debug_dma_map_single(dev, ptr, size);
3842e05ea5cSChristoph Hellwig 	return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
3852e05ea5cSChristoph Hellwig 			size, dir, attrs);
3862e05ea5cSChristoph Hellwig }
3872e05ea5cSChristoph Hellwig 
dma_unmap_single_attrs(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)3882e05ea5cSChristoph Hellwig static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
3892e05ea5cSChristoph Hellwig 		size_t size, enum dma_data_direction dir, unsigned long attrs)
3902e05ea5cSChristoph Hellwig {
3912e05ea5cSChristoph Hellwig 	return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
3922e05ea5cSChristoph Hellwig }
3932e05ea5cSChristoph Hellwig 
dma_sync_single_range_for_cpu(struct device * dev,dma_addr_t addr,unsigned long offset,size_t size,enum dma_data_direction dir)394ed6ccf10SChristoph Hellwig static inline void dma_sync_single_range_for_cpu(struct device *dev,
395ed6ccf10SChristoph Hellwig 		dma_addr_t addr, unsigned long offset, size_t size,
396ed6ccf10SChristoph Hellwig 		enum dma_data_direction dir)
397ed6ccf10SChristoph Hellwig {
398ed6ccf10SChristoph Hellwig 	return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
399ed6ccf10SChristoph Hellwig }
400ed6ccf10SChristoph Hellwig 
dma_sync_single_range_for_device(struct device * dev,dma_addr_t addr,unsigned long offset,size_t size,enum dma_data_direction dir)401ed6ccf10SChristoph Hellwig static inline void dma_sync_single_range_for_device(struct device *dev,
402ed6ccf10SChristoph Hellwig 		dma_addr_t addr, unsigned long offset, size_t size,
403ed6ccf10SChristoph Hellwig 		enum dma_data_direction dir)
404ed6ccf10SChristoph Hellwig {
405ed6ccf10SChristoph Hellwig 	return dma_sync_single_for_device(dev, addr + offset, size, dir);
406ed6ccf10SChristoph Hellwig }
407ed6ccf10SChristoph Hellwig 
408d9d200bcSMarek Szyprowski /**
409d9d200bcSMarek Szyprowski  * dma_unmap_sgtable - Unmap the given buffer for DMA
410d9d200bcSMarek Szyprowski  * @dev:	The device for which to perform the DMA operation
411d9d200bcSMarek Szyprowski  * @sgt:	The sg_table object describing the buffer
412d9d200bcSMarek Szyprowski  * @dir:	DMA direction
413d9d200bcSMarek Szyprowski  * @attrs:	Optional DMA attributes for the unmap operation
414d9d200bcSMarek Szyprowski  *
415d9d200bcSMarek Szyprowski  * Unmaps a buffer described by a scatterlist stored in the given sg_table
416d9d200bcSMarek Szyprowski  * object for the @dir DMA operation by the @dev device. After this function
417d9d200bcSMarek Szyprowski  * the ownership of the buffer is transferred back to the CPU domain.
418d9d200bcSMarek Szyprowski  */
dma_unmap_sgtable(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)419d9d200bcSMarek Szyprowski static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
420d9d200bcSMarek Szyprowski 		enum dma_data_direction dir, unsigned long attrs)
421d9d200bcSMarek Szyprowski {
422d9d200bcSMarek Szyprowski 	dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
423d9d200bcSMarek Szyprowski }
424d9d200bcSMarek Szyprowski 
425d9d200bcSMarek Szyprowski /**
426d9d200bcSMarek Szyprowski  * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
427d9d200bcSMarek Szyprowski  * @dev:	The device for which to perform the DMA operation
428d9d200bcSMarek Szyprowski  * @sgt:	The sg_table object describing the buffer
429d9d200bcSMarek Szyprowski  * @dir:	DMA direction
430d9d200bcSMarek Szyprowski  *
431d9d200bcSMarek Szyprowski  * Performs the needed cache synchronization and moves the ownership of the
432d9d200bcSMarek Szyprowski  * buffer back to the CPU domain, so it is safe to perform any access to it
433d9d200bcSMarek Szyprowski  * by the CPU. Before doing any further DMA operations, one has to transfer
434d9d200bcSMarek Szyprowski  * the ownership of the buffer back to the DMA domain by calling the
435d9d200bcSMarek Szyprowski  * dma_sync_sgtable_for_device().
436d9d200bcSMarek Szyprowski  */
dma_sync_sgtable_for_cpu(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)437d9d200bcSMarek Szyprowski static inline void dma_sync_sgtable_for_cpu(struct device *dev,
438d9d200bcSMarek Szyprowski 		struct sg_table *sgt, enum dma_data_direction dir)
439d9d200bcSMarek Szyprowski {
440d9d200bcSMarek Szyprowski 	dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
441d9d200bcSMarek Szyprowski }
442d9d200bcSMarek Szyprowski 
443d9d200bcSMarek Szyprowski /**
444d9d200bcSMarek Szyprowski  * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
445d9d200bcSMarek Szyprowski  * @dev:	The device for which to perform the DMA operation
446d9d200bcSMarek Szyprowski  * @sgt:	The sg_table object describing the buffer
447d9d200bcSMarek Szyprowski  * @dir:	DMA direction
448d9d200bcSMarek Szyprowski  *
449d9d200bcSMarek Szyprowski  * Performs the needed cache synchronization and moves the ownership of the
450d9d200bcSMarek Szyprowski  * buffer back to the DMA domain, so it is safe to perform the DMA operation.
451d9d200bcSMarek Szyprowski  * Once finished, one has to call dma_sync_sgtable_for_cpu() or
452d9d200bcSMarek Szyprowski  * dma_unmap_sgtable().
453d9d200bcSMarek Szyprowski  */
dma_sync_sgtable_for_device(struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)454d9d200bcSMarek Szyprowski static inline void dma_sync_sgtable_for_device(struct device *dev,
455d9d200bcSMarek Szyprowski 		struct sg_table *sgt, enum dma_data_direction dir)
456d9d200bcSMarek Szyprowski {
457d9d200bcSMarek Szyprowski 	dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
458d9d200bcSMarek Szyprowski }
459d9d200bcSMarek Szyprowski 
46000085f1eSKrzysztof Kozlowski #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
46100085f1eSKrzysztof Kozlowski #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
46200085f1eSKrzysztof Kozlowski #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
46300085f1eSKrzysztof Kozlowski #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
4640495c3d3SAlexander Duyck #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
4650495c3d3SAlexander Duyck #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
466ed6ccf10SChristoph Hellwig #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
467ed6ccf10SChristoph Hellwig #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
468c9eb6172SChristoph Hellwig 
46979636caaSPetr Tesarik bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
47079636caaSPetr Tesarik 
dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)471e1c7e324SChristoph Hellwig static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4727ed1d91aSChristoph Hellwig 		dma_addr_t *dma_handle, gfp_t gfp)
473e1c7e324SChristoph Hellwig {
4747ed1d91aSChristoph Hellwig 	return dma_alloc_attrs(dev, size, dma_handle, gfp,
4757ed1d91aSChristoph Hellwig 			(gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
476e1c7e324SChristoph Hellwig }
477e1c7e324SChristoph Hellwig 
dma_free_coherent(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle)478e1c7e324SChristoph Hellwig static inline void dma_free_coherent(struct device *dev, size_t size,
479e1c7e324SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_handle)
480e1c7e324SChristoph Hellwig {
48100085f1eSKrzysztof Kozlowski 	return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
482e1c7e324SChristoph Hellwig }
483e1c7e324SChristoph Hellwig 
4841da177e4SLinus Torvalds 
dma_get_mask(struct device * dev)485589fc9a6SFUJITA Tomonori static inline u64 dma_get_mask(struct device *dev)
486589fc9a6SFUJITA Tomonori {
487d7e02a93SChristoph Hellwig 	if (dev->dma_mask && *dev->dma_mask)
488589fc9a6SFUJITA Tomonori 		return *dev->dma_mask;
489284901a9SYang Hongyang 	return DMA_BIT_MASK(32);
490589fc9a6SFUJITA Tomonori }
491589fc9a6SFUJITA Tomonori 
4924aa806b7SRussell King /*
4934aa806b7SRussell King  * Set both the DMA mask and the coherent DMA mask to the same thing.
4944aa806b7SRussell King  * Note that we don't check the return value from dma_set_coherent_mask()
4954aa806b7SRussell King  * as the DMA API guarantees that the coherent DMA mask can be set to
4964aa806b7SRussell King  * the same or smaller than the streaming DMA mask.
4974aa806b7SRussell King  */
dma_set_mask_and_coherent(struct device * dev,u64 mask)4984aa806b7SRussell King static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
4994aa806b7SRussell King {
5004aa806b7SRussell King 	int rc = dma_set_mask(dev, mask);
5014aa806b7SRussell King 	if (rc == 0)
5024aa806b7SRussell King 		dma_set_coherent_mask(dev, mask);
5034aa806b7SRussell King 	return rc;
5044aa806b7SRussell King }
5054aa806b7SRussell King 
506fa6a8d6dSRussell King /*
507fa6a8d6dSRussell King  * Similar to the above, except it deals with the case where the device
508fa6a8d6dSRussell King  * does not have dev->dma_mask appropriately setup.
509fa6a8d6dSRussell King  */
dma_coerce_mask_and_coherent(struct device * dev,u64 mask)510fa6a8d6dSRussell King static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
511fa6a8d6dSRussell King {
512fa6a8d6dSRussell King 	dev->dma_mask = &dev->coherent_dma_mask;
513fa6a8d6dSRussell King 	return dma_set_mask_and_coherent(dev, mask);
514fa6a8d6dSRussell King }
515fa6a8d6dSRussell King 
dma_get_max_seg_size(struct device * dev)5166b7b6510SFUJITA Tomonori static inline unsigned int dma_get_max_seg_size(struct device *dev)
5176b7b6510SFUJITA Tomonori {
518002edb6fSRobin Murphy 	if (dev->dma_parms && dev->dma_parms->max_segment_size)
519002edb6fSRobin Murphy 		return dev->dma_parms->max_segment_size;
520002edb6fSRobin Murphy 	return SZ_64K;
5216b7b6510SFUJITA Tomonori }
5226b7b6510SFUJITA Tomonori 
dma_set_max_seg_size(struct device * dev,unsigned int size)523334304acSChristoph Hellwig static inline void dma_set_max_seg_size(struct device *dev, unsigned int size)
5246b7b6510SFUJITA Tomonori {
525334304acSChristoph Hellwig 	if (WARN_ON_ONCE(!dev->dma_parms))
526334304acSChristoph Hellwig 		return;
5276b7b6510SFUJITA Tomonori 	dev->dma_parms->max_segment_size = size;
5286b7b6510SFUJITA Tomonori }
5296b7b6510SFUJITA Tomonori 
dma_get_seg_boundary(struct device * dev)530d22a6966SFUJITA Tomonori static inline unsigned long dma_get_seg_boundary(struct device *dev)
531d22a6966SFUJITA Tomonori {
532002edb6fSRobin Murphy 	if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
533002edb6fSRobin Murphy 		return dev->dma_parms->segment_boundary_mask;
534135ba11aSNicolin Chen 	return ULONG_MAX;
535d22a6966SFUJITA Tomonori }
536d22a6966SFUJITA Tomonori 
5371e9d90dbSNicolin Chen /**
5381e9d90dbSNicolin Chen  * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
5391e9d90dbSNicolin Chen  * @dev: device to guery the boundary for
5401e9d90dbSNicolin Chen  * @page_shift: ilog() of the IOMMU page size
5411e9d90dbSNicolin Chen  *
5421e9d90dbSNicolin Chen  * Return the segment boundary in IOMMU page units (which may be different from
5431e9d90dbSNicolin Chen  * the CPU page size) for the passed in device.
5441e9d90dbSNicolin Chen  *
5451e9d90dbSNicolin Chen  * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
5461e9d90dbSNicolin Chen  * non-DMA API callers.
5471e9d90dbSNicolin Chen  */
dma_get_seg_boundary_nr_pages(struct device * dev,unsigned int page_shift)5481e9d90dbSNicolin Chen static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
5491e9d90dbSNicolin Chen 		unsigned int page_shift)
5501e9d90dbSNicolin Chen {
5511e9d90dbSNicolin Chen 	if (!dev)
5521e9d90dbSNicolin Chen 		return (U32_MAX >> page_shift) + 1;
5531e9d90dbSNicolin Chen 	return (dma_get_seg_boundary(dev) >> page_shift) + 1;
5541e9d90dbSNicolin Chen }
5551e9d90dbSNicolin Chen 
dma_set_seg_boundary(struct device * dev,unsigned long mask)556560a861aSChristoph Hellwig static inline void dma_set_seg_boundary(struct device *dev, unsigned long mask)
557d22a6966SFUJITA Tomonori {
558560a861aSChristoph Hellwig 	if (WARN_ON_ONCE(!dev->dma_parms))
559560a861aSChristoph Hellwig 		return;
560d22a6966SFUJITA Tomonori 	dev->dma_parms->segment_boundary_mask = mask;
561d22a6966SFUJITA Tomonori }
562d22a6966SFUJITA Tomonori 
dma_get_min_align_mask(struct device * dev)56336950f2dSJianxiong Gao static inline unsigned int dma_get_min_align_mask(struct device *dev)
56436950f2dSJianxiong Gao {
56536950f2dSJianxiong Gao 	if (dev->dma_parms)
56636950f2dSJianxiong Gao 		return dev->dma_parms->min_align_mask;
56736950f2dSJianxiong Gao 	return 0;
56836950f2dSJianxiong Gao }
56936950f2dSJianxiong Gao 
dma_set_min_align_mask(struct device * dev,unsigned int min_align_mask)570c42a0126SChristoph Hellwig static inline void dma_set_min_align_mask(struct device *dev,
57136950f2dSJianxiong Gao 		unsigned int min_align_mask)
57236950f2dSJianxiong Gao {
57336950f2dSJianxiong Gao 	if (WARN_ON_ONCE(!dev->dma_parms))
574c42a0126SChristoph Hellwig 		return;
57536950f2dSJianxiong Gao 	dev->dma_parms->min_align_mask = min_align_mask;
57636950f2dSJianxiong Gao }
57736950f2dSJianxiong Gao 
5788c57da28SCatalin Marinas #ifndef dma_get_cache_alignment
dma_get_cache_alignment(void)5794565f017SFUJITA Tomonori static inline int dma_get_cache_alignment(void)
5804565f017SFUJITA Tomonori {
5814ab5f8ecSCatalin Marinas #ifdef ARCH_HAS_DMA_MINALIGN
5824565f017SFUJITA Tomonori 	return ARCH_DMA_MINALIGN;
5834565f017SFUJITA Tomonori #endif
5844565f017SFUJITA Tomonori 	return 1;
5854565f017SFUJITA Tomonori }
5868c57da28SCatalin Marinas #endif
5874565f017SFUJITA Tomonori 
dmam_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp)588d7076f07SChristoph Hellwig static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
589d7076f07SChristoph Hellwig 		dma_addr_t *dma_handle, gfp_t gfp)
590d7076f07SChristoph Hellwig {
591d7076f07SChristoph Hellwig 	return dmam_alloc_attrs(dev, size, dma_handle, gfp,
592d7076f07SChristoph Hellwig 			(gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
593d7076f07SChristoph Hellwig }
594d7076f07SChristoph Hellwig 
dma_alloc_wc(struct device * dev,size_t size,dma_addr_t * dma_addr,gfp_t gfp)595f6e45661SLuis R. Rodriguez static inline void *dma_alloc_wc(struct device *dev, size_t size,
596b4bbb107SThierry Reding 				 dma_addr_t *dma_addr, gfp_t gfp)
597b4bbb107SThierry Reding {
5980cd60eb1SChristoph Hellwig 	unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
5997ed1d91aSChristoph Hellwig 
6007ed1d91aSChristoph Hellwig 	if (gfp & __GFP_NOWARN)
6017ed1d91aSChristoph Hellwig 		attrs |= DMA_ATTR_NO_WARN;
6027ed1d91aSChristoph Hellwig 
6037ed1d91aSChristoph Hellwig 	return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
604b4bbb107SThierry Reding }
605b4bbb107SThierry Reding 
dma_free_wc(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_addr)606f6e45661SLuis R. Rodriguez static inline void dma_free_wc(struct device *dev, size_t size,
607b4bbb107SThierry Reding 			       void *cpu_addr, dma_addr_t dma_addr)
608b4bbb107SThierry Reding {
60900085f1eSKrzysztof Kozlowski 	return dma_free_attrs(dev, size, cpu_addr, dma_addr,
61000085f1eSKrzysztof Kozlowski 			      DMA_ATTR_WRITE_COMBINE);
611b4bbb107SThierry Reding }
612b4bbb107SThierry Reding 
dma_mmap_wc(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size)613f6e45661SLuis R. Rodriguez static inline int dma_mmap_wc(struct device *dev,
614b4bbb107SThierry Reding 			      struct vm_area_struct *vma,
615b4bbb107SThierry Reding 			      void *cpu_addr, dma_addr_t dma_addr,
616b4bbb107SThierry Reding 			      size_t size)
617b4bbb107SThierry Reding {
61800085f1eSKrzysztof Kozlowski 	return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
61900085f1eSKrzysztof Kozlowski 			      DMA_ATTR_WRITE_COMBINE);
620b4bbb107SThierry Reding }
62174bc7ceeSArthur Kepner 
622f616ab59SChristoph Hellwig #ifdef CONFIG_NEED_DMA_MAP_STATE
6230acedc12SFUJITA Tomonori #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
6240acedc12SFUJITA Tomonori #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
6250acedc12SFUJITA Tomonori #define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
6260acedc12SFUJITA Tomonori #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  (((PTR)->ADDR_NAME) = (VAL))
6270acedc12SFUJITA Tomonori #define dma_unmap_len(PTR, LEN_NAME)             ((PTR)->LEN_NAME)
6280acedc12SFUJITA Tomonori #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    (((PTR)->LEN_NAME) = (VAL))
6290acedc12SFUJITA Tomonori #else
6300acedc12SFUJITA Tomonori #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
6310acedc12SFUJITA Tomonori #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
632*c9b19ea6SMarek Szyprowski #define dma_unmap_addr(PTR, ADDR_NAME)           \
633*c9b19ea6SMarek Szyprowski 	({ typeof(PTR) __p __maybe_unused = PTR; 0; })
634*c9b19ea6SMarek Szyprowski #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  \
635*c9b19ea6SMarek Szyprowski 	do { typeof(PTR) __p __maybe_unused = PTR; } while (0)
636*c9b19ea6SMarek Szyprowski #define dma_unmap_len(PTR, LEN_NAME)             \
637*c9b19ea6SMarek Szyprowski 	({ typeof(PTR) __p __maybe_unused = PTR; 0; })
638*c9b19ea6SMarek Szyprowski #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    \
639*c9b19ea6SMarek Szyprowski 	do { typeof(PTR) __p __maybe_unused = PTR; } while (0)
6400acedc12SFUJITA Tomonori #endif
6410acedc12SFUJITA Tomonori 
642e0d07278SJim Quinlan #endif /* _LINUX_DMA_MAPPING_H */
643