xref: /linux-6.15/include/linux/dma-map-ops.h (revision 5935b837)
10a0f0d8bSChristoph Hellwig /* SPDX-License-Identifier: GPL-2.0 */
20a0f0d8bSChristoph Hellwig /*
30a0f0d8bSChristoph Hellwig  * This header is for implementations of dma_map_ops and related code.
40a0f0d8bSChristoph Hellwig  * It should not be included in drivers just using the DMA API.
50a0f0d8bSChristoph Hellwig  */
60a0f0d8bSChristoph Hellwig #ifndef _LINUX_DMA_MAP_OPS_H
70a0f0d8bSChristoph Hellwig #define _LINUX_DMA_MAP_OPS_H
80a0f0d8bSChristoph Hellwig 
90a0f0d8bSChristoph Hellwig #include <linux/dma-mapping.h>
109f4df96bSChristoph Hellwig #include <linux/pgtable.h>
11370645f4SCatalin Marinas #include <linux/slab.h>
120a0f0d8bSChristoph Hellwig 
130b1abd1fSChristoph Hellwig struct cma;
1417de3f5fSRobin Murphy struct iommu_ops;
150b1abd1fSChristoph Hellwig 
160a0f0d8bSChristoph Hellwig struct dma_map_ops {
170a0f0d8bSChristoph Hellwig 	void *(*alloc)(struct device *dev, size_t size,
180a0f0d8bSChristoph Hellwig 			dma_addr_t *dma_handle, gfp_t gfp,
190a0f0d8bSChristoph Hellwig 			unsigned long attrs);
200a0f0d8bSChristoph Hellwig 	void (*free)(struct device *dev, size_t size, void *vaddr,
210a0f0d8bSChristoph Hellwig 			dma_addr_t dma_handle, unsigned long attrs);
228a2f1187SSuren Baghdasaryan 	struct page *(*alloc_pages_op)(struct device *dev, size_t size,
230a0f0d8bSChristoph Hellwig 			dma_addr_t *dma_handle, enum dma_data_direction dir,
240a0f0d8bSChristoph Hellwig 			gfp_t gfp);
250a0f0d8bSChristoph Hellwig 	void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
260a0f0d8bSChristoph Hellwig 			dma_addr_t dma_handle, enum dma_data_direction dir);
270a0f0d8bSChristoph Hellwig 	int (*mmap)(struct device *, struct vm_area_struct *,
280a0f0d8bSChristoph Hellwig 			void *, dma_addr_t, size_t, unsigned long attrs);
290a0f0d8bSChristoph Hellwig 
300a0f0d8bSChristoph Hellwig 	int (*get_sgtable)(struct device *dev, struct sg_table *sgt,
310a0f0d8bSChristoph Hellwig 			void *cpu_addr, dma_addr_t dma_addr, size_t size,
320a0f0d8bSChristoph Hellwig 			unsigned long attrs);
330a0f0d8bSChristoph Hellwig 
340a0f0d8bSChristoph Hellwig 	dma_addr_t (*map_page)(struct device *dev, struct page *page,
350a0f0d8bSChristoph Hellwig 			unsigned long offset, size_t size,
360a0f0d8bSChristoph Hellwig 			enum dma_data_direction dir, unsigned long attrs);
370a0f0d8bSChristoph Hellwig 	void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
380a0f0d8bSChristoph Hellwig 			size_t size, enum dma_data_direction dir,
390a0f0d8bSChristoph Hellwig 			unsigned long attrs);
400a0f0d8bSChristoph Hellwig 	/*
41fffe3cc8SLogan Gunthorpe 	 * map_sg should return a negative error code on error. See
42fffe3cc8SLogan Gunthorpe 	 * dma_map_sgtable() for a list of appropriate error codes
43fffe3cc8SLogan Gunthorpe 	 * and their meanings.
440a0f0d8bSChristoph Hellwig 	 */
450a0f0d8bSChristoph Hellwig 	int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
460a0f0d8bSChristoph Hellwig 			enum dma_data_direction dir, unsigned long attrs);
470a0f0d8bSChristoph Hellwig 	void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents,
480a0f0d8bSChristoph Hellwig 			enum dma_data_direction dir, unsigned long attrs);
490a0f0d8bSChristoph Hellwig 	dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
500a0f0d8bSChristoph Hellwig 			size_t size, enum dma_data_direction dir,
510a0f0d8bSChristoph Hellwig 			unsigned long attrs);
520a0f0d8bSChristoph Hellwig 	void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
530a0f0d8bSChristoph Hellwig 			size_t size, enum dma_data_direction dir,
540a0f0d8bSChristoph Hellwig 			unsigned long attrs);
550a0f0d8bSChristoph Hellwig 	void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle,
560a0f0d8bSChristoph Hellwig 			size_t size, enum dma_data_direction dir);
570a0f0d8bSChristoph Hellwig 	void (*sync_single_for_device)(struct device *dev,
580a0f0d8bSChristoph Hellwig 			dma_addr_t dma_handle, size_t size,
590a0f0d8bSChristoph Hellwig 			enum dma_data_direction dir);
600a0f0d8bSChristoph Hellwig 	void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
610a0f0d8bSChristoph Hellwig 			int nents, enum dma_data_direction dir);
620a0f0d8bSChristoph Hellwig 	void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
630a0f0d8bSChristoph Hellwig 			int nents, enum dma_data_direction dir);
640a0f0d8bSChristoph Hellwig 	void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
650a0f0d8bSChristoph Hellwig 			enum dma_data_direction direction);
660a0f0d8bSChristoph Hellwig 	int (*dma_supported)(struct device *dev, u64 mask);
670a0f0d8bSChristoph Hellwig 	u64 (*get_required_mask)(struct device *dev);
680a0f0d8bSChristoph Hellwig 	size_t (*max_mapping_size)(struct device *dev);
69a229cc14SJohn Garry 	size_t (*opt_mapping_size)(void);
700a0f0d8bSChristoph Hellwig 	unsigned long (*get_merge_boundary)(struct device *dev);
710a0f0d8bSChristoph Hellwig };
720a0f0d8bSChristoph Hellwig 
73de6c85bfSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_OPS
740a0f0d8bSChristoph Hellwig #include <asm/dma-mapping.h>
750a0f0d8bSChristoph Hellwig 
get_dma_ops(struct device * dev)760a0f0d8bSChristoph Hellwig static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
770a0f0d8bSChristoph Hellwig {
780a0f0d8bSChristoph Hellwig 	if (dev->dma_ops)
790a0f0d8bSChristoph Hellwig 		return dev->dma_ops;
80ade1229cSGreg Kroah-Hartman 	return get_arch_dma_ops();
810a0f0d8bSChristoph Hellwig }
820a0f0d8bSChristoph Hellwig 
set_dma_ops(struct device * dev,const struct dma_map_ops * dma_ops)830a0f0d8bSChristoph Hellwig static inline void set_dma_ops(struct device *dev,
840a0f0d8bSChristoph Hellwig 			       const struct dma_map_ops *dma_ops)
850a0f0d8bSChristoph Hellwig {
860a0f0d8bSChristoph Hellwig 	dev->dma_ops = dma_ops;
870a0f0d8bSChristoph Hellwig }
88de6c85bfSChristoph Hellwig #else /* CONFIG_ARCH_HAS_DMA_OPS */
get_dma_ops(struct device * dev)890a0f0d8bSChristoph Hellwig static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
900a0f0d8bSChristoph Hellwig {
910a0f0d8bSChristoph Hellwig 	return NULL;
920a0f0d8bSChristoph Hellwig }
set_dma_ops(struct device * dev,const struct dma_map_ops * dma_ops)930a0f0d8bSChristoph Hellwig static inline void set_dma_ops(struct device *dev,
940a0f0d8bSChristoph Hellwig 			       const struct dma_map_ops *dma_ops)
950a0f0d8bSChristoph Hellwig {
960a0f0d8bSChristoph Hellwig }
97de6c85bfSChristoph Hellwig #endif /* CONFIG_ARCH_HAS_DMA_OPS */
980a0f0d8bSChristoph Hellwig 
990b1abd1fSChristoph Hellwig #ifdef CONFIG_DMA_CMA
1000b1abd1fSChristoph Hellwig extern struct cma *dma_contiguous_default_area;
1010b1abd1fSChristoph Hellwig 
dev_get_cma_area(struct device * dev)1020b1abd1fSChristoph Hellwig static inline struct cma *dev_get_cma_area(struct device *dev)
1030b1abd1fSChristoph Hellwig {
1040b1abd1fSChristoph Hellwig 	if (dev && dev->cma_area)
1050b1abd1fSChristoph Hellwig 		return dev->cma_area;
1060b1abd1fSChristoph Hellwig 	return dma_contiguous_default_area;
1070b1abd1fSChristoph Hellwig }
1080b1abd1fSChristoph Hellwig 
1090b1abd1fSChristoph Hellwig void dma_contiguous_reserve(phys_addr_t addr_limit);
1100b1abd1fSChristoph Hellwig int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
1110b1abd1fSChristoph Hellwig 		phys_addr_t limit, struct cma **res_cma, bool fixed);
1120b1abd1fSChristoph Hellwig 
1130b1abd1fSChristoph Hellwig struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
1140b1abd1fSChristoph Hellwig 				       unsigned int order, bool no_warn);
1150b1abd1fSChristoph Hellwig bool dma_release_from_contiguous(struct device *dev, struct page *pages,
1160b1abd1fSChristoph Hellwig 				 int count);
1170b1abd1fSChristoph Hellwig struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
1180b1abd1fSChristoph Hellwig void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
1195db5d930SChristoph Hellwig 
1205db5d930SChristoph Hellwig void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
1210b1abd1fSChristoph Hellwig #else /* CONFIG_DMA_CMA */
dev_get_cma_area(struct device * dev)1220b1abd1fSChristoph Hellwig static inline struct cma *dev_get_cma_area(struct device *dev)
1230b1abd1fSChristoph Hellwig {
1240b1abd1fSChristoph Hellwig 	return NULL;
1250b1abd1fSChristoph Hellwig }
dma_contiguous_reserve(phys_addr_t limit)1260b1abd1fSChristoph Hellwig static inline void dma_contiguous_reserve(phys_addr_t limit)
1270b1abd1fSChristoph Hellwig {
1280b1abd1fSChristoph Hellwig }
dma_contiguous_reserve_area(phys_addr_t size,phys_addr_t base,phys_addr_t limit,struct cma ** res_cma,bool fixed)1290b1abd1fSChristoph Hellwig static inline int dma_contiguous_reserve_area(phys_addr_t size,
1300b1abd1fSChristoph Hellwig 		phys_addr_t base, phys_addr_t limit, struct cma **res_cma,
1310b1abd1fSChristoph Hellwig 		bool fixed)
1320b1abd1fSChristoph Hellwig {
1330b1abd1fSChristoph Hellwig 	return -ENOSYS;
1340b1abd1fSChristoph Hellwig }
dma_alloc_from_contiguous(struct device * dev,size_t count,unsigned int order,bool no_warn)1350b1abd1fSChristoph Hellwig static inline struct page *dma_alloc_from_contiguous(struct device *dev,
1360b1abd1fSChristoph Hellwig 		size_t count, unsigned int order, bool no_warn)
1370b1abd1fSChristoph Hellwig {
1380b1abd1fSChristoph Hellwig 	return NULL;
1390b1abd1fSChristoph Hellwig }
dma_release_from_contiguous(struct device * dev,struct page * pages,int count)1400b1abd1fSChristoph Hellwig static inline bool dma_release_from_contiguous(struct device *dev,
1410b1abd1fSChristoph Hellwig 		struct page *pages, int count)
1420b1abd1fSChristoph Hellwig {
1430b1abd1fSChristoph Hellwig 	return false;
1440b1abd1fSChristoph Hellwig }
1450b1abd1fSChristoph Hellwig /* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */
dma_alloc_contiguous(struct device * dev,size_t size,gfp_t gfp)1460b1abd1fSChristoph Hellwig static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
1470b1abd1fSChristoph Hellwig 		gfp_t gfp)
1480b1abd1fSChristoph Hellwig {
1490b1abd1fSChristoph Hellwig 	return NULL;
1500b1abd1fSChristoph Hellwig }
dma_free_contiguous(struct device * dev,struct page * page,size_t size)1510b1abd1fSChristoph Hellwig static inline void dma_free_contiguous(struct device *dev, struct page *page,
1520b1abd1fSChristoph Hellwig 		size_t size)
1530b1abd1fSChristoph Hellwig {
1540b1abd1fSChristoph Hellwig 	__free_pages(page, get_order(size));
1550b1abd1fSChristoph Hellwig }
1560b1abd1fSChristoph Hellwig #endif /* CONFIG_DMA_CMA*/
1570b1abd1fSChristoph Hellwig 
1580a0f0d8bSChristoph Hellwig #ifdef CONFIG_DMA_DECLARE_COHERENT
1590a0f0d8bSChristoph Hellwig int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
1600a0f0d8bSChristoph Hellwig 		dma_addr_t device_addr, size_t size);
161e61c4514SMark-PK Tsai void dma_release_coherent_memory(struct device *dev);
1620a0f0d8bSChristoph Hellwig int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
1630a0f0d8bSChristoph Hellwig 		dma_addr_t *dma_handle, void **ret);
1640a0f0d8bSChristoph Hellwig int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
1650a0f0d8bSChristoph Hellwig int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
1660a0f0d8bSChristoph Hellwig 		void *cpu_addr, size_t size, int *ret);
1670a0f0d8bSChristoph Hellwig #else
dma_declare_coherent_memory(struct device * dev,phys_addr_t phys_addr,dma_addr_t device_addr,size_t size)1680a0f0d8bSChristoph Hellwig static inline int dma_declare_coherent_memory(struct device *dev,
1690a0f0d8bSChristoph Hellwig 		phys_addr_t phys_addr, dma_addr_t device_addr, size_t size)
1700a0f0d8bSChristoph Hellwig {
1710a0f0d8bSChristoph Hellwig 	return -ENOSYS;
1720a0f0d8bSChristoph Hellwig }
173e61c4514SMark-PK Tsai 
1740a0f0d8bSChristoph Hellwig #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
1750a0f0d8bSChristoph Hellwig #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
1760a0f0d8bSChristoph Hellwig #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
dma_release_coherent_memory(struct device * dev)17750d6281cSRen Zhijie static inline void dma_release_coherent_memory(struct device *dev) { }
17822f9feb4SChristoph Hellwig #endif /* CONFIG_DMA_DECLARE_COHERENT */
1790a0f0d8bSChristoph Hellwig 
18022f9feb4SChristoph Hellwig #ifdef CONFIG_DMA_GLOBAL_POOL
18122f9feb4SChristoph Hellwig void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
18222f9feb4SChristoph Hellwig 		dma_addr_t *dma_handle);
18322f9feb4SChristoph Hellwig int dma_release_from_global_coherent(int order, void *vaddr);
18422f9feb4SChristoph Hellwig int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
18522f9feb4SChristoph Hellwig 		size_t size, int *ret);
18622f9feb4SChristoph Hellwig int dma_init_global_coherent(phys_addr_t phys_addr, size_t size);
18722f9feb4SChristoph Hellwig #else
dma_alloc_from_global_coherent(struct device * dev,ssize_t size,dma_addr_t * dma_handle)1880a0f0d8bSChristoph Hellwig static inline void *dma_alloc_from_global_coherent(struct device *dev,
1890a0f0d8bSChristoph Hellwig 		ssize_t size, dma_addr_t *dma_handle)
1900a0f0d8bSChristoph Hellwig {
1910a0f0d8bSChristoph Hellwig 	return NULL;
1920a0f0d8bSChristoph Hellwig }
dma_release_from_global_coherent(int order,void * vaddr)1930a0f0d8bSChristoph Hellwig static inline int dma_release_from_global_coherent(int order, void *vaddr)
1940a0f0d8bSChristoph Hellwig {
1950a0f0d8bSChristoph Hellwig 	return 0;
1960a0f0d8bSChristoph Hellwig }
dma_mmap_from_global_coherent(struct vm_area_struct * vma,void * cpu_addr,size_t size,int * ret)1970a0f0d8bSChristoph Hellwig static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
1980a0f0d8bSChristoph Hellwig 		void *cpu_addr, size_t size, int *ret)
1990a0f0d8bSChristoph Hellwig {
2000a0f0d8bSChristoph Hellwig 	return 0;
2010a0f0d8bSChristoph Hellwig }
20222f9feb4SChristoph Hellwig #endif /* CONFIG_DMA_GLOBAL_POOL */
2030a0f0d8bSChristoph Hellwig 
204695cebe5SChristoph Hellwig int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
205695cebe5SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
206695cebe5SChristoph Hellwig 		unsigned long attrs);
207695cebe5SChristoph Hellwig int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
208695cebe5SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
209695cebe5SChristoph Hellwig 		unsigned long attrs);
210695cebe5SChristoph Hellwig struct page *dma_common_alloc_pages(struct device *dev, size_t size,
211695cebe5SChristoph Hellwig 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
212695cebe5SChristoph Hellwig void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr,
213695cebe5SChristoph Hellwig 		dma_addr_t dma_handle, enum dma_data_direction dir);
214695cebe5SChristoph Hellwig 
215695cebe5SChristoph Hellwig struct page **dma_common_find_pages(void *cpu_addr);
216695cebe5SChristoph Hellwig void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot,
217695cebe5SChristoph Hellwig 		const void *caller);
218695cebe5SChristoph Hellwig void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot,
219695cebe5SChristoph Hellwig 		const void *caller);
220695cebe5SChristoph Hellwig void dma_common_free_remap(void *cpu_addr, size_t size);
221695cebe5SChristoph Hellwig 
222695cebe5SChristoph Hellwig struct page *dma_alloc_from_pool(struct device *dev, size_t size,
223695cebe5SChristoph Hellwig 		void **cpu_addr, gfp_t flags,
224695cebe5SChristoph Hellwig 		bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
225695cebe5SChristoph Hellwig bool dma_free_from_pool(struct device *dev, void *start, size_t size);
226695cebe5SChristoph Hellwig 
22716fee29bSChristoph Hellwig int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
22816fee29bSChristoph Hellwig 		dma_addr_t dma_start, u64 size);
22916fee29bSChristoph Hellwig 
2306d4e9a8eSChristoph Hellwig #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
2319f4df96bSChristoph Hellwig 	defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
2329f4df96bSChristoph Hellwig 	defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
2336d4e9a8eSChristoph Hellwig extern bool dma_default_coherent;
dev_is_dma_coherent(struct device * dev)2349f4df96bSChristoph Hellwig static inline bool dev_is_dma_coherent(struct device *dev)
2359f4df96bSChristoph Hellwig {
2369f4df96bSChristoph Hellwig 	return dev->dma_coherent;
2379f4df96bSChristoph Hellwig }
2389f4df96bSChristoph Hellwig #else
239fe4e5efaSJiaxun Yang #define dma_default_coherent true
240fe4e5efaSJiaxun Yang 
dev_is_dma_coherent(struct device * dev)2419f4df96bSChristoph Hellwig static inline bool dev_is_dma_coherent(struct device *dev)
2429f4df96bSChristoph Hellwig {
2439f4df96bSChristoph Hellwig 	return true;
2449f4df96bSChristoph Hellwig }
245*5935b837SSui Jingfeng #endif
2469f4df96bSChristoph Hellwig 
dma_reset_need_sync(struct device * dev)247f406c8e4SAlexander Lobakin static inline void dma_reset_need_sync(struct device *dev)
248f406c8e4SAlexander Lobakin {
249f406c8e4SAlexander Lobakin #ifdef CONFIG_DMA_NEED_SYNC
250f406c8e4SAlexander Lobakin 	/* Reset it only once so that the function can be called on hotpath */
251a6016aacSAlexander Lobakin 	if (unlikely(dev->dma_skip_sync))
252a6016aacSAlexander Lobakin 		dev->dma_skip_sync = false;
253f406c8e4SAlexander Lobakin #endif
254f406c8e4SAlexander Lobakin }
255f406c8e4SAlexander Lobakin 
256370645f4SCatalin Marinas /*
257370645f4SCatalin Marinas  * Check whether potential kmalloc() buffers are safe for non-coherent DMA.
258370645f4SCatalin Marinas  */
dma_kmalloc_safe(struct device * dev,enum dma_data_direction dir)259370645f4SCatalin Marinas static inline bool dma_kmalloc_safe(struct device *dev,
260370645f4SCatalin Marinas 				    enum dma_data_direction dir)
261370645f4SCatalin Marinas {
262370645f4SCatalin Marinas 	/*
263370645f4SCatalin Marinas 	 * If DMA bouncing of kmalloc() buffers is disabled, the kmalloc()
264370645f4SCatalin Marinas 	 * caches have already been aligned to a DMA-safe size.
265370645f4SCatalin Marinas 	 */
266370645f4SCatalin Marinas 	if (!IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC))
267370645f4SCatalin Marinas 		return true;
268370645f4SCatalin Marinas 
269370645f4SCatalin Marinas 	/*
270370645f4SCatalin Marinas 	 * kmalloc() buffers are DMA-safe irrespective of size if the device
271370645f4SCatalin Marinas 	 * is coherent or the direction is DMA_TO_DEVICE (non-desctructive
272370645f4SCatalin Marinas 	 * cache maintenance and benign cache line evictions).
273370645f4SCatalin Marinas 	 */
274370645f4SCatalin Marinas 	if (dev_is_dma_coherent(dev) || dir == DMA_TO_DEVICE)
275370645f4SCatalin Marinas 		return true;
276370645f4SCatalin Marinas 
277370645f4SCatalin Marinas 	return false;
278370645f4SCatalin Marinas }
279370645f4SCatalin Marinas 
280370645f4SCatalin Marinas /*
281370645f4SCatalin Marinas  * Check whether the given size, assuming it is for a kmalloc()'ed buffer, is
282370645f4SCatalin Marinas  * sufficiently aligned for non-coherent DMA.
283370645f4SCatalin Marinas  */
dma_kmalloc_size_aligned(size_t size)284370645f4SCatalin Marinas static inline bool dma_kmalloc_size_aligned(size_t size)
285370645f4SCatalin Marinas {
286370645f4SCatalin Marinas 	/*
287370645f4SCatalin Marinas 	 * Larger kmalloc() sizes are guaranteed to be aligned to
288370645f4SCatalin Marinas 	 * ARCH_DMA_MINALIGN.
289370645f4SCatalin Marinas 	 */
290370645f4SCatalin Marinas 	if (size >= 2 * ARCH_DMA_MINALIGN ||
291370645f4SCatalin Marinas 	    IS_ALIGNED(kmalloc_size_roundup(size), dma_get_cache_alignment()))
292370645f4SCatalin Marinas 		return true;
293370645f4SCatalin Marinas 
294370645f4SCatalin Marinas 	return false;
295370645f4SCatalin Marinas }
296370645f4SCatalin Marinas 
297370645f4SCatalin Marinas /*
298370645f4SCatalin Marinas  * Check whether the given object size may have originated from a kmalloc()
299370645f4SCatalin Marinas  * buffer with a slab alignment below the DMA-safe alignment and needs
300370645f4SCatalin Marinas  * bouncing for non-coherent DMA. The pointer alignment is not considered and
301370645f4SCatalin Marinas  * in-structure DMA-safe offsets are the responsibility of the caller. Such
302370645f4SCatalin Marinas  * code should use the static ARCH_DMA_MINALIGN for compiler annotations.
303370645f4SCatalin Marinas  *
304370645f4SCatalin Marinas  * The heuristics can have false positives, bouncing unnecessarily, though the
305370645f4SCatalin Marinas  * buffers would be small. False negatives are theoretically possible if, for
306370645f4SCatalin Marinas  * example, multiple small kmalloc() buffers are coalesced into a larger
307370645f4SCatalin Marinas  * buffer that passes the alignment check. There are no such known constructs
308370645f4SCatalin Marinas  * in the kernel.
309370645f4SCatalin Marinas  */
dma_kmalloc_needs_bounce(struct device * dev,size_t size,enum dma_data_direction dir)310370645f4SCatalin Marinas static inline bool dma_kmalloc_needs_bounce(struct device *dev, size_t size,
311370645f4SCatalin Marinas 					    enum dma_data_direction dir)
312370645f4SCatalin Marinas {
313370645f4SCatalin Marinas 	return !dma_kmalloc_safe(dev, dir) && !dma_kmalloc_size_aligned(size);
314370645f4SCatalin Marinas }
315370645f4SCatalin Marinas 
3169f4df96bSChristoph Hellwig void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
3179f4df96bSChristoph Hellwig 		gfp_t gfp, unsigned long attrs);
3189f4df96bSChristoph Hellwig void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
3199f4df96bSChristoph Hellwig 		dma_addr_t dma_addr, unsigned long attrs);
3209f4df96bSChristoph Hellwig 
3213d6f126bSArnd Bergmann #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
3223d6f126bSArnd Bergmann void arch_dma_set_mask(struct device *dev, u64 mask);
3233d6f126bSArnd Bergmann #else
3243d6f126bSArnd Bergmann #define arch_dma_set_mask(dev, mask)	do { } while (0)
3253d6f126bSArnd Bergmann #endif
3263d6f126bSArnd Bergmann 
3279f4df96bSChristoph Hellwig #ifdef CONFIG_MMU
3289f4df96bSChristoph Hellwig /*
3299f4df96bSChristoph Hellwig  * Page protection so that devices that can't snoop CPU caches can use the
3309f4df96bSChristoph Hellwig  * memory coherently.  We default to pgprot_noncached which is usually used
3319f4df96bSChristoph Hellwig  * for ioremap as a safe bet, but architectures can override this with less
3329f4df96bSChristoph Hellwig  * strict semantics if possible.
3339f4df96bSChristoph Hellwig  */
3349f4df96bSChristoph Hellwig #ifndef pgprot_dmacoherent
3359f4df96bSChristoph Hellwig #define pgprot_dmacoherent(prot)	pgprot_noncached(prot)
3369f4df96bSChristoph Hellwig #endif
3379f4df96bSChristoph Hellwig 
3389f4df96bSChristoph Hellwig pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
3399f4df96bSChristoph Hellwig #else
dma_pgprot(struct device * dev,pgprot_t prot,unsigned long attrs)3409f4df96bSChristoph Hellwig static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
3419f4df96bSChristoph Hellwig 		unsigned long attrs)
3429f4df96bSChristoph Hellwig {
3439f4df96bSChristoph Hellwig 	return prot;	/* no protection bits supported without page tables */
3449f4df96bSChristoph Hellwig }
3459f4df96bSChristoph Hellwig #endif /* CONFIG_MMU */
3469f4df96bSChristoph Hellwig 
3479f4df96bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
3489f4df96bSChristoph Hellwig void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
3499f4df96bSChristoph Hellwig 		enum dma_data_direction dir);
3509f4df96bSChristoph Hellwig #else
arch_sync_dma_for_device(phys_addr_t paddr,size_t size,enum dma_data_direction dir)3519f4df96bSChristoph Hellwig static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
3529f4df96bSChristoph Hellwig 		enum dma_data_direction dir)
3539f4df96bSChristoph Hellwig {
3549f4df96bSChristoph Hellwig }
3559f4df96bSChristoph Hellwig #endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
3569f4df96bSChristoph Hellwig 
3579f4df96bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
3589f4df96bSChristoph Hellwig void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
3599f4df96bSChristoph Hellwig 		enum dma_data_direction dir);
3609f4df96bSChristoph Hellwig #else
arch_sync_dma_for_cpu(phys_addr_t paddr,size_t size,enum dma_data_direction dir)3619f4df96bSChristoph Hellwig static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
3629f4df96bSChristoph Hellwig 		enum dma_data_direction dir)
3639f4df96bSChristoph Hellwig {
3649f4df96bSChristoph Hellwig }
3659f4df96bSChristoph Hellwig #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
3669f4df96bSChristoph Hellwig 
3679f4df96bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
3689f4df96bSChristoph Hellwig void arch_sync_dma_for_cpu_all(void);
3699f4df96bSChristoph Hellwig #else
arch_sync_dma_for_cpu_all(void)3709f4df96bSChristoph Hellwig static inline void arch_sync_dma_for_cpu_all(void)
3719f4df96bSChristoph Hellwig {
3729f4df96bSChristoph Hellwig }
3739f4df96bSChristoph Hellwig #endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
3749f4df96bSChristoph Hellwig 
3759f4df96bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
3769f4df96bSChristoph Hellwig void arch_dma_prep_coherent(struct page *page, size_t size);
3779f4df96bSChristoph Hellwig #else
arch_dma_prep_coherent(struct page * page,size_t size)3789f4df96bSChristoph Hellwig static inline void arch_dma_prep_coherent(struct page *page, size_t size)
3799f4df96bSChristoph Hellwig {
3809f4df96bSChristoph Hellwig }
3819f4df96bSChristoph Hellwig #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
3829f4df96bSChristoph Hellwig 
3839f4df96bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
3849f4df96bSChristoph Hellwig void arch_dma_mark_clean(phys_addr_t paddr, size_t size);
3859f4df96bSChristoph Hellwig #else
arch_dma_mark_clean(phys_addr_t paddr,size_t size)3869f4df96bSChristoph Hellwig static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
3879f4df96bSChristoph Hellwig {
3889f4df96bSChristoph Hellwig }
3899f4df96bSChristoph Hellwig #endif /* ARCH_HAS_DMA_MARK_CLEAN */
3909f4df96bSChristoph Hellwig 
3919f4df96bSChristoph Hellwig void *arch_dma_set_uncached(void *addr, size_t size);
3929f4df96bSChristoph Hellwig void arch_dma_clear_uncached(void *addr, size_t size);
3939f4df96bSChristoph Hellwig 
3948d8d53cfSAlexey Kardashevskiy #ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
3958d8d53cfSAlexey Kardashevskiy bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr);
3968d8d53cfSAlexey Kardashevskiy bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle);
3978d8d53cfSAlexey Kardashevskiy bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
3988d8d53cfSAlexey Kardashevskiy 		int nents);
3998d8d53cfSAlexey Kardashevskiy bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
4008d8d53cfSAlexey Kardashevskiy 		int nents);
4018d8d53cfSAlexey Kardashevskiy #else
4028d8d53cfSAlexey Kardashevskiy #define arch_dma_map_page_direct(d, a)		(false)
4038d8d53cfSAlexey Kardashevskiy #define arch_dma_unmap_page_direct(d, a)	(false)
4048d8d53cfSAlexey Kardashevskiy #define arch_dma_map_sg_direct(d, s, n)		(false)
4058d8d53cfSAlexey Kardashevskiy #define arch_dma_unmap_sg_direct(d, s, n)	(false)
4068d8d53cfSAlexey Kardashevskiy #endif
4078d8d53cfSAlexey Kardashevskiy 
4080a0f0d8bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
409f091e933SRobin Murphy void arch_setup_dma_ops(struct device *dev, bool coherent);
4100a0f0d8bSChristoph Hellwig #else
arch_setup_dma_ops(struct device * dev,bool coherent)411f091e933SRobin Murphy static inline void arch_setup_dma_ops(struct device *dev, bool coherent)
4120a0f0d8bSChristoph Hellwig {
4130a0f0d8bSChristoph Hellwig }
4140a0f0d8bSChristoph Hellwig #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
4150a0f0d8bSChristoph Hellwig 
4160a0f0d8bSChristoph Hellwig #ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
4170a0f0d8bSChristoph Hellwig void arch_teardown_dma_ops(struct device *dev);
4180a0f0d8bSChristoph Hellwig #else
arch_teardown_dma_ops(struct device * dev)4190a0f0d8bSChristoph Hellwig static inline void arch_teardown_dma_ops(struct device *dev)
4200a0f0d8bSChristoph Hellwig {
4210a0f0d8bSChristoph Hellwig }
4220a0f0d8bSChristoph Hellwig #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
4230a0f0d8bSChristoph Hellwig 
424a1fd09e8SChristoph Hellwig #ifdef CONFIG_DMA_API_DEBUG
42586438841SGreg Kroah-Hartman void dma_debug_add_bus(const struct bus_type *bus);
426a1fd09e8SChristoph Hellwig void debug_dma_dump_mappings(struct device *dev);
427a1fd09e8SChristoph Hellwig #else
dma_debug_add_bus(const struct bus_type * bus)42886438841SGreg Kroah-Hartman static inline void dma_debug_add_bus(const struct bus_type *bus)
429a1fd09e8SChristoph Hellwig {
430a1fd09e8SChristoph Hellwig }
debug_dma_dump_mappings(struct device * dev)431a1fd09e8SChristoph Hellwig static inline void debug_dma_dump_mappings(struct device *dev)
432a1fd09e8SChristoph Hellwig {
433a1fd09e8SChristoph Hellwig }
434a1fd09e8SChristoph Hellwig #endif /* CONFIG_DMA_API_DEBUG */
435a1fd09e8SChristoph Hellwig 
4360a0f0d8bSChristoph Hellwig extern const struct dma_map_ops dma_dummy_ops;
4370a0f0d8bSChristoph Hellwig 
4385e180ff3SLogan Gunthorpe enum pci_p2pdma_map_type {
4395e180ff3SLogan Gunthorpe 	/*
4405e180ff3SLogan Gunthorpe 	 * PCI_P2PDMA_MAP_UNKNOWN: Used internally for indicating the mapping
4415e180ff3SLogan Gunthorpe 	 * type hasn't been calculated yet. Functions that return this enum
4425e180ff3SLogan Gunthorpe 	 * never return this value.
4435e180ff3SLogan Gunthorpe 	 */
4445e180ff3SLogan Gunthorpe 	PCI_P2PDMA_MAP_UNKNOWN = 0,
4455e180ff3SLogan Gunthorpe 
4465e180ff3SLogan Gunthorpe 	/*
4475e180ff3SLogan Gunthorpe 	 * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will
4485e180ff3SLogan Gunthorpe 	 * traverse the host bridge and the host bridge is not in the
4495e180ff3SLogan Gunthorpe 	 * allowlist. DMA Mapping routines should return an error when
4505e180ff3SLogan Gunthorpe 	 * this is returned.
4515e180ff3SLogan Gunthorpe 	 */
4525e180ff3SLogan Gunthorpe 	PCI_P2PDMA_MAP_NOT_SUPPORTED,
4535e180ff3SLogan Gunthorpe 
4545e180ff3SLogan Gunthorpe 	/*
4555e180ff3SLogan Gunthorpe 	 * PCI_P2PDMA_BUS_ADDR: Indicates that two devices can talk to
4565e180ff3SLogan Gunthorpe 	 * each other directly through a PCI switch and the transaction will
4575e180ff3SLogan Gunthorpe 	 * not traverse the host bridge. Such a mapping should program
4585e180ff3SLogan Gunthorpe 	 * the DMA engine with PCI bus addresses.
4595e180ff3SLogan Gunthorpe 	 */
4605e180ff3SLogan Gunthorpe 	PCI_P2PDMA_MAP_BUS_ADDR,
4615e180ff3SLogan Gunthorpe 
4625e180ff3SLogan Gunthorpe 	/*
4635e180ff3SLogan Gunthorpe 	 * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk
4645e180ff3SLogan Gunthorpe 	 * to each other, but the transaction traverses a host bridge on the
4655e180ff3SLogan Gunthorpe 	 * allowlist. In this case, a normal mapping either with CPU physical
4665e180ff3SLogan Gunthorpe 	 * addresses (in the case of dma-direct) or IOVA addresses (in the
4675e180ff3SLogan Gunthorpe 	 * case of IOMMUs) should be used to program the DMA engine.
4685e180ff3SLogan Gunthorpe 	 */
4695e180ff3SLogan Gunthorpe 	PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
4705e180ff3SLogan Gunthorpe };
4715e180ff3SLogan Gunthorpe 
4725e180ff3SLogan Gunthorpe struct pci_p2pdma_map_state {
4735e180ff3SLogan Gunthorpe 	struct dev_pagemap *pgmap;
4745e180ff3SLogan Gunthorpe 	int map;
4755e180ff3SLogan Gunthorpe 	u64 bus_off;
4765e180ff3SLogan Gunthorpe };
4775e180ff3SLogan Gunthorpe 
4785e180ff3SLogan Gunthorpe #ifdef CONFIG_PCI_P2PDMA
4795e180ff3SLogan Gunthorpe enum pci_p2pdma_map_type
4805e180ff3SLogan Gunthorpe pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
4815e180ff3SLogan Gunthorpe 		       struct scatterlist *sg);
4825e180ff3SLogan Gunthorpe #else /* CONFIG_PCI_P2PDMA */
4835e180ff3SLogan Gunthorpe static inline enum pci_p2pdma_map_type
pci_p2pdma_map_segment(struct pci_p2pdma_map_state * state,struct device * dev,struct scatterlist * sg)4845e180ff3SLogan Gunthorpe pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev,
4855e180ff3SLogan Gunthorpe 		       struct scatterlist *sg)
4865e180ff3SLogan Gunthorpe {
4875e180ff3SLogan Gunthorpe 	return PCI_P2PDMA_MAP_NOT_SUPPORTED;
4885e180ff3SLogan Gunthorpe }
4895e180ff3SLogan Gunthorpe #endif /* CONFIG_PCI_P2PDMA */
4905e180ff3SLogan Gunthorpe 
4910a0f0d8bSChristoph Hellwig #endif /* _LINUX_DMA_MAP_OPS_H */
492