xref: /linux-6.15/include/linux/dma-mapping.h (revision dfd32cad)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_DMA_MAPPING_H
3 #define _LINUX_DMA_MAPPING_H
4 
5 #include <linux/sizes.h>
6 #include <linux/string.h>
7 #include <linux/device.h>
8 #include <linux/err.h>
9 #include <linux/dma-debug.h>
10 #include <linux/dma-direction.h>
11 #include <linux/scatterlist.h>
12 #include <linux/bug.h>
13 #include <linux/mem_encrypt.h>
14 
15 /**
16  * List of possible attributes associated with a DMA mapping. The semantics
17  * of each attribute should be defined in Documentation/DMA-attributes.txt.
18  *
19  * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
20  * forces all pending DMA writes to complete.
21  */
22 #define DMA_ATTR_WRITE_BARRIER		(1UL << 0)
23 /*
24  * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
25  * may be weakly ordered, that is that reads and writes may pass each other.
26  */
27 #define DMA_ATTR_WEAK_ORDERING		(1UL << 1)
28 /*
29  * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
30  * buffered to improve performance.
31  */
32 #define DMA_ATTR_WRITE_COMBINE		(1UL << 2)
33 /*
34  * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
35  * consistent or non-consistent memory as it sees fit.
36  */
37 #define DMA_ATTR_NON_CONSISTENT		(1UL << 3)
38 /*
39  * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
40  * virtual mapping for the allocated buffer.
41  */
42 #define DMA_ATTR_NO_KERNEL_MAPPING	(1UL << 4)
43 /*
44  * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
45  * the CPU cache for the given buffer assuming that it has been already
46  * transferred to 'device' domain.
47  */
48 #define DMA_ATTR_SKIP_CPU_SYNC		(1UL << 5)
49 /*
50  * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
51  * in physical memory.
52  */
53 #define DMA_ATTR_FORCE_CONTIGUOUS	(1UL << 6)
54 /*
55  * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
56  * that it's probably not worth the time to try to allocate memory to in a way
57  * that gives better TLB efficiency.
58  */
59 #define DMA_ATTR_ALLOC_SINGLE_PAGES	(1UL << 7)
60 /*
61  * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
62  * allocation failure reports (similarly to __GFP_NOWARN).
63  */
64 #define DMA_ATTR_NO_WARN	(1UL << 8)
65 
66 /*
67  * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
68  * accessible at an elevated privilege level (and ideally inaccessible or
69  * at least read-only at lesser-privileged levels).
70  */
71 #define DMA_ATTR_PRIVILEGED		(1UL << 9)
72 
73 /*
74  * A dma_addr_t can hold any valid DMA or bus address for the platform.
75  * It can be given to a device to use as a DMA source or target.  A CPU cannot
76  * reference a dma_addr_t directly because there may be translation between
77  * its physical address space and the bus address space.
78  */
79 struct dma_map_ops {
80 	void* (*alloc)(struct device *dev, size_t size,
81 				dma_addr_t *dma_handle, gfp_t gfp,
82 				unsigned long attrs);
83 	void (*free)(struct device *dev, size_t size,
84 			      void *vaddr, dma_addr_t dma_handle,
85 			      unsigned long attrs);
86 	int (*mmap)(struct device *, struct vm_area_struct *,
87 			  void *, dma_addr_t, size_t,
88 			  unsigned long attrs);
89 
90 	int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
91 			   dma_addr_t, size_t, unsigned long attrs);
92 
93 	dma_addr_t (*map_page)(struct device *dev, struct page *page,
94 			       unsigned long offset, size_t size,
95 			       enum dma_data_direction dir,
96 			       unsigned long attrs);
97 	void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
98 			   size_t size, enum dma_data_direction dir,
99 			   unsigned long attrs);
100 	/*
101 	 * map_sg returns 0 on error and a value > 0 on success.
102 	 * It should never return a value < 0.
103 	 */
104 	int (*map_sg)(struct device *dev, struct scatterlist *sg,
105 		      int nents, enum dma_data_direction dir,
106 		      unsigned long attrs);
107 	void (*unmap_sg)(struct device *dev,
108 			 struct scatterlist *sg, int nents,
109 			 enum dma_data_direction dir,
110 			 unsigned long attrs);
111 	dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
112 			       size_t size, enum dma_data_direction dir,
113 			       unsigned long attrs);
114 	void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
115 			   size_t size, enum dma_data_direction dir,
116 			   unsigned long attrs);
117 	void (*sync_single_for_cpu)(struct device *dev,
118 				    dma_addr_t dma_handle, size_t size,
119 				    enum dma_data_direction dir);
120 	void (*sync_single_for_device)(struct device *dev,
121 				       dma_addr_t dma_handle, size_t size,
122 				       enum dma_data_direction dir);
123 	void (*sync_sg_for_cpu)(struct device *dev,
124 				struct scatterlist *sg, int nents,
125 				enum dma_data_direction dir);
126 	void (*sync_sg_for_device)(struct device *dev,
127 				   struct scatterlist *sg, int nents,
128 				   enum dma_data_direction dir);
129 	void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
130 			enum dma_data_direction direction);
131 	int (*dma_supported)(struct device *dev, u64 mask);
132 	u64 (*get_required_mask)(struct device *dev);
133 };
134 
135 #define DMA_MAPPING_ERROR		(~(dma_addr_t)0)
136 
137 extern const struct dma_map_ops dma_virt_ops;
138 extern const struct dma_map_ops dma_dummy_ops;
139 
140 #define DMA_BIT_MASK(n)	(((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
141 
142 #define DMA_MASK_NONE	0x0ULL
143 
144 static inline int valid_dma_direction(int dma_direction)
145 {
146 	return ((dma_direction == DMA_BIDIRECTIONAL) ||
147 		(dma_direction == DMA_TO_DEVICE) ||
148 		(dma_direction == DMA_FROM_DEVICE));
149 }
150 
151 static inline int is_device_dma_capable(struct device *dev)
152 {
153 	return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
154 }
155 
156 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
157 /*
158  * These three functions are only for dma allocator.
159  * Don't use them in device drivers.
160  */
161 int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
162 				       dma_addr_t *dma_handle, void **ret);
163 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
164 
165 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
166 			    void *cpu_addr, size_t size, int *ret);
167 
168 void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
169 int dma_release_from_global_coherent(int order, void *vaddr);
170 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
171 				  size_t size, int *ret);
172 
173 #else
174 #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
175 #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
176 #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
177 
178 static inline void *dma_alloc_from_global_coherent(ssize_t size,
179 						   dma_addr_t *dma_handle)
180 {
181 	return NULL;
182 }
183 
184 static inline int dma_release_from_global_coherent(int order, void *vaddr)
185 {
186 	return 0;
187 }
188 
189 static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
190 						void *cpu_addr, size_t size,
191 						int *ret)
192 {
193 	return 0;
194 }
195 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
196 
197 static inline bool dma_is_direct(const struct dma_map_ops *ops)
198 {
199 	return likely(!ops);
200 }
201 
202 /*
203  * All the dma_direct_* declarations are here just for the indirect call bypass,
204  * and must not be used directly drivers!
205  */
206 dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
207 		unsigned long offset, size_t size, enum dma_data_direction dir,
208 		unsigned long attrs);
209 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
210 		enum dma_data_direction dir, unsigned long attrs);
211 
212 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
213     defined(CONFIG_SWIOTLB)
214 void dma_direct_sync_single_for_device(struct device *dev,
215 		dma_addr_t addr, size_t size, enum dma_data_direction dir);
216 void dma_direct_sync_sg_for_device(struct device *dev,
217 		struct scatterlist *sgl, int nents, enum dma_data_direction dir);
218 #else
219 static inline void dma_direct_sync_single_for_device(struct device *dev,
220 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
221 {
222 }
223 static inline void dma_direct_sync_sg_for_device(struct device *dev,
224 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
225 {
226 }
227 #endif
228 
229 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
230     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
231     defined(CONFIG_SWIOTLB)
232 void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
233 		size_t size, enum dma_data_direction dir, unsigned long attrs);
234 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
235 		int nents, enum dma_data_direction dir, unsigned long attrs);
236 void dma_direct_sync_single_for_cpu(struct device *dev,
237 		dma_addr_t addr, size_t size, enum dma_data_direction dir);
238 void dma_direct_sync_sg_for_cpu(struct device *dev,
239 		struct scatterlist *sgl, int nents, enum dma_data_direction dir);
240 #else
241 static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
242 		size_t size, enum dma_data_direction dir, unsigned long attrs)
243 {
244 }
245 static inline void dma_direct_unmap_sg(struct device *dev,
246 		struct scatterlist *sgl, int nents, enum dma_data_direction dir,
247 		unsigned long attrs)
248 {
249 }
250 static inline void dma_direct_sync_single_for_cpu(struct device *dev,
251 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
252 {
253 }
254 static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
255 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
256 {
257 }
258 #endif
259 
260 #ifdef CONFIG_HAS_DMA
261 #include <asm/dma-mapping.h>
262 
263 static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
264 {
265 	if (dev && dev->dma_ops)
266 		return dev->dma_ops;
267 	return get_arch_dma_ops(dev ? dev->bus : NULL);
268 }
269 
270 static inline void set_dma_ops(struct device *dev,
271 			       const struct dma_map_ops *dma_ops)
272 {
273 	dev->dma_ops = dma_ops;
274 }
275 
276 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
277 		struct page *page, size_t offset, size_t size,
278 		enum dma_data_direction dir, unsigned long attrs)
279 {
280 	const struct dma_map_ops *ops = get_dma_ops(dev);
281 	dma_addr_t addr;
282 
283 	BUG_ON(!valid_dma_direction(dir));
284 	if (dma_is_direct(ops))
285 		addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
286 	else
287 		addr = ops->map_page(dev, page, offset, size, dir, attrs);
288 	debug_dma_map_page(dev, page, offset, size, dir, addr);
289 
290 	return addr;
291 }
292 
293 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
294 		size_t size, enum dma_data_direction dir, unsigned long attrs)
295 {
296 	const struct dma_map_ops *ops = get_dma_ops(dev);
297 
298 	BUG_ON(!valid_dma_direction(dir));
299 	if (dma_is_direct(ops))
300 		dma_direct_unmap_page(dev, addr, size, dir, attrs);
301 	else if (ops->unmap_page)
302 		ops->unmap_page(dev, addr, size, dir, attrs);
303 	debug_dma_unmap_page(dev, addr, size, dir);
304 }
305 
306 /*
307  * dma_maps_sg_attrs returns 0 on error and > 0 on success.
308  * It should never return a value < 0.
309  */
310 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
311 				   int nents, enum dma_data_direction dir,
312 				   unsigned long attrs)
313 {
314 	const struct dma_map_ops *ops = get_dma_ops(dev);
315 	int ents;
316 
317 	BUG_ON(!valid_dma_direction(dir));
318 	if (dma_is_direct(ops))
319 		ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
320 	else
321 		ents = ops->map_sg(dev, sg, nents, dir, attrs);
322 	BUG_ON(ents < 0);
323 	debug_dma_map_sg(dev, sg, nents, ents, dir);
324 
325 	return ents;
326 }
327 
328 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
329 				      int nents, enum dma_data_direction dir,
330 				      unsigned long attrs)
331 {
332 	const struct dma_map_ops *ops = get_dma_ops(dev);
333 
334 	BUG_ON(!valid_dma_direction(dir));
335 	debug_dma_unmap_sg(dev, sg, nents, dir);
336 	if (dma_is_direct(ops))
337 		dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
338 	else if (ops->unmap_sg)
339 		ops->unmap_sg(dev, sg, nents, dir, attrs);
340 }
341 
342 static inline dma_addr_t dma_map_resource(struct device *dev,
343 					  phys_addr_t phys_addr,
344 					  size_t size,
345 					  enum dma_data_direction dir,
346 					  unsigned long attrs)
347 {
348 	const struct dma_map_ops *ops = get_dma_ops(dev);
349 	dma_addr_t addr;
350 
351 	BUG_ON(!valid_dma_direction(dir));
352 
353 	/* Don't allow RAM to be mapped */
354 	BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
355 
356 	addr = phys_addr;
357 	if (ops && ops->map_resource)
358 		addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
359 
360 	debug_dma_map_resource(dev, phys_addr, size, dir, addr);
361 
362 	return addr;
363 }
364 
365 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
366 				      size_t size, enum dma_data_direction dir,
367 				      unsigned long attrs)
368 {
369 	const struct dma_map_ops *ops = get_dma_ops(dev);
370 
371 	BUG_ON(!valid_dma_direction(dir));
372 	if (ops && ops->unmap_resource)
373 		ops->unmap_resource(dev, addr, size, dir, attrs);
374 	debug_dma_unmap_resource(dev, addr, size, dir);
375 }
376 
377 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
378 					   size_t size,
379 					   enum dma_data_direction dir)
380 {
381 	const struct dma_map_ops *ops = get_dma_ops(dev);
382 
383 	BUG_ON(!valid_dma_direction(dir));
384 	if (dma_is_direct(ops))
385 		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
386 	else if (ops->sync_single_for_cpu)
387 		ops->sync_single_for_cpu(dev, addr, size, dir);
388 	debug_dma_sync_single_for_cpu(dev, addr, size, dir);
389 }
390 
391 static inline void dma_sync_single_for_device(struct device *dev,
392 					      dma_addr_t addr, size_t size,
393 					      enum dma_data_direction dir)
394 {
395 	const struct dma_map_ops *ops = get_dma_ops(dev);
396 
397 	BUG_ON(!valid_dma_direction(dir));
398 	if (dma_is_direct(ops))
399 		dma_direct_sync_single_for_device(dev, addr, size, dir);
400 	else if (ops->sync_single_for_device)
401 		ops->sync_single_for_device(dev, addr, size, dir);
402 	debug_dma_sync_single_for_device(dev, addr, size, dir);
403 }
404 
405 static inline void
406 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
407 		    int nelems, enum dma_data_direction dir)
408 {
409 	const struct dma_map_ops *ops = get_dma_ops(dev);
410 
411 	BUG_ON(!valid_dma_direction(dir));
412 	if (dma_is_direct(ops))
413 		dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
414 	else if (ops->sync_sg_for_cpu)
415 		ops->sync_sg_for_cpu(dev, sg, nelems, dir);
416 	debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
417 }
418 
419 static inline void
420 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
421 		       int nelems, enum dma_data_direction dir)
422 {
423 	const struct dma_map_ops *ops = get_dma_ops(dev);
424 
425 	BUG_ON(!valid_dma_direction(dir));
426 	if (dma_is_direct(ops))
427 		dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
428 	else if (ops->sync_sg_for_device)
429 		ops->sync_sg_for_device(dev, sg, nelems, dir);
430 	debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
431 
432 }
433 
434 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
435 {
436 	debug_dma_mapping_error(dev, dma_addr);
437 
438 	if (dma_addr == DMA_MAPPING_ERROR)
439 		return -ENOMEM;
440 	return 0;
441 }
442 
443 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
444 		gfp_t flag, unsigned long attrs);
445 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
446 		dma_addr_t dma_handle, unsigned long attrs);
447 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
448 		gfp_t gfp, unsigned long attrs);
449 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
450 		dma_addr_t dma_handle);
451 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
452 		enum dma_data_direction dir);
453 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
454 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
455 		unsigned long attrs);
456 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
457 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
458 		unsigned long attrs);
459 int dma_supported(struct device *dev, u64 mask);
460 int dma_set_mask(struct device *dev, u64 mask);
461 int dma_set_coherent_mask(struct device *dev, u64 mask);
462 u64 dma_get_required_mask(struct device *dev);
463 #else /* CONFIG_HAS_DMA */
464 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
465 		struct page *page, size_t offset, size_t size,
466 		enum dma_data_direction dir, unsigned long attrs)
467 {
468 	return DMA_MAPPING_ERROR;
469 }
470 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
471 		size_t size, enum dma_data_direction dir, unsigned long attrs)
472 {
473 }
474 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
475 		int nents, enum dma_data_direction dir, unsigned long attrs)
476 {
477 	return 0;
478 }
479 static inline void dma_unmap_sg_attrs(struct device *dev,
480 		struct scatterlist *sg, int nents, enum dma_data_direction dir,
481 		unsigned long attrs)
482 {
483 }
484 static inline dma_addr_t dma_map_resource(struct device *dev,
485 		phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
486 		unsigned long attrs)
487 {
488 	return DMA_MAPPING_ERROR;
489 }
490 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
491 		size_t size, enum dma_data_direction dir, unsigned long attrs)
492 {
493 }
494 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
495 		size_t size, enum dma_data_direction dir)
496 {
497 }
498 static inline void dma_sync_single_for_device(struct device *dev,
499 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
500 {
501 }
502 static inline void dma_sync_sg_for_cpu(struct device *dev,
503 		struct scatterlist *sg, int nelems, enum dma_data_direction dir)
504 {
505 }
506 static inline void dma_sync_sg_for_device(struct device *dev,
507 		struct scatterlist *sg, int nelems, enum dma_data_direction dir)
508 {
509 }
510 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
511 {
512 	return -ENOMEM;
513 }
514 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
515 		dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
516 {
517 	return NULL;
518 }
519 static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
520 		dma_addr_t dma_handle, unsigned long attrs)
521 {
522 }
523 static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
524 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
525 {
526 	return NULL;
527 }
528 static inline void dmam_free_coherent(struct device *dev, size_t size,
529 		void *vaddr, dma_addr_t dma_handle)
530 {
531 }
532 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
533 		enum dma_data_direction dir)
534 {
535 }
536 static inline int dma_get_sgtable_attrs(struct device *dev,
537 		struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
538 		size_t size, unsigned long attrs)
539 {
540 	return -ENXIO;
541 }
542 static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
543 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
544 		unsigned long attrs)
545 {
546 	return -ENXIO;
547 }
548 static inline int dma_supported(struct device *dev, u64 mask)
549 {
550 	return 0;
551 }
552 static inline int dma_set_mask(struct device *dev, u64 mask)
553 {
554 	return -EIO;
555 }
556 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
557 {
558 	return -EIO;
559 }
560 static inline u64 dma_get_required_mask(struct device *dev)
561 {
562 	return 0;
563 }
564 #endif /* CONFIG_HAS_DMA */
565 
566 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
567 		size_t size, enum dma_data_direction dir, unsigned long attrs)
568 {
569 	debug_dma_map_single(dev, ptr, size);
570 	return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
571 			size, dir, attrs);
572 }
573 
574 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
575 		size_t size, enum dma_data_direction dir, unsigned long attrs)
576 {
577 	return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
578 }
579 
580 static inline void dma_sync_single_range_for_cpu(struct device *dev,
581 		dma_addr_t addr, unsigned long offset, size_t size,
582 		enum dma_data_direction dir)
583 {
584 	return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
585 }
586 
587 static inline void dma_sync_single_range_for_device(struct device *dev,
588 		dma_addr_t addr, unsigned long offset, size_t size,
589 		enum dma_data_direction dir)
590 {
591 	return dma_sync_single_for_device(dev, addr + offset, size, dir);
592 }
593 
594 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
595 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
596 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
597 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
598 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
599 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
600 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
601 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
602 
603 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
604 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
605 		unsigned long attrs);
606 
607 void *dma_common_contiguous_remap(struct page *page, size_t size,
608 			unsigned long vm_flags,
609 			pgprot_t prot, const void *caller);
610 
611 void *dma_common_pages_remap(struct page **pages, size_t size,
612 			unsigned long vm_flags, pgprot_t prot,
613 			const void *caller);
614 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
615 
616 int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot);
617 bool dma_in_atomic_pool(void *start, size_t size);
618 void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
619 bool dma_free_from_pool(void *start, size_t size);
620 
621 int
622 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
623 		dma_addr_t dma_addr, size_t size, unsigned long attrs);
624 
625 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
626 		dma_addr_t *dma_handle, gfp_t gfp)
627 {
628 
629 	return dma_alloc_attrs(dev, size, dma_handle, gfp,
630 			(gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
631 }
632 
633 static inline void dma_free_coherent(struct device *dev, size_t size,
634 		void *cpu_addr, dma_addr_t dma_handle)
635 {
636 	return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
637 }
638 
639 
640 static inline u64 dma_get_mask(struct device *dev)
641 {
642 	if (dev && dev->dma_mask && *dev->dma_mask)
643 		return *dev->dma_mask;
644 	return DMA_BIT_MASK(32);
645 }
646 
647 /*
648  * Set both the DMA mask and the coherent DMA mask to the same thing.
649  * Note that we don't check the return value from dma_set_coherent_mask()
650  * as the DMA API guarantees that the coherent DMA mask can be set to
651  * the same or smaller than the streaming DMA mask.
652  */
653 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
654 {
655 	int rc = dma_set_mask(dev, mask);
656 	if (rc == 0)
657 		dma_set_coherent_mask(dev, mask);
658 	return rc;
659 }
660 
661 /*
662  * Similar to the above, except it deals with the case where the device
663  * does not have dev->dma_mask appropriately setup.
664  */
665 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
666 {
667 	dev->dma_mask = &dev->coherent_dma_mask;
668 	return dma_set_mask_and_coherent(dev, mask);
669 }
670 
671 #ifndef arch_setup_dma_ops
672 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
673 				      u64 size, const struct iommu_ops *iommu,
674 				      bool coherent) { }
675 #endif
676 
677 #ifndef arch_teardown_dma_ops
678 static inline void arch_teardown_dma_ops(struct device *dev) { }
679 #endif
680 
681 static inline unsigned int dma_get_max_seg_size(struct device *dev)
682 {
683 	if (dev->dma_parms && dev->dma_parms->max_segment_size)
684 		return dev->dma_parms->max_segment_size;
685 	return SZ_64K;
686 }
687 
688 static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
689 {
690 	if (dev->dma_parms) {
691 		dev->dma_parms->max_segment_size = size;
692 		return 0;
693 	}
694 	return -EIO;
695 }
696 
697 static inline unsigned long dma_get_seg_boundary(struct device *dev)
698 {
699 	if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
700 		return dev->dma_parms->segment_boundary_mask;
701 	return DMA_BIT_MASK(32);
702 }
703 
704 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
705 {
706 	if (dev->dma_parms) {
707 		dev->dma_parms->segment_boundary_mask = mask;
708 		return 0;
709 	}
710 	return -EIO;
711 }
712 
713 #ifndef dma_max_pfn
714 static inline unsigned long dma_max_pfn(struct device *dev)
715 {
716 	return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset;
717 }
718 #endif
719 
720 static inline int dma_get_cache_alignment(void)
721 {
722 #ifdef ARCH_DMA_MINALIGN
723 	return ARCH_DMA_MINALIGN;
724 #endif
725 	return 1;
726 }
727 
728 /* flags for the coherent memory api */
729 #define DMA_MEMORY_EXCLUSIVE		0x01
730 
731 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
732 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
733 				dma_addr_t device_addr, size_t size, int flags);
734 void dma_release_declared_memory(struct device *dev);
735 void *dma_mark_declared_memory_occupied(struct device *dev,
736 					dma_addr_t device_addr, size_t size);
737 #else
738 static inline int
739 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
740 			    dma_addr_t device_addr, size_t size, int flags)
741 {
742 	return -ENOSYS;
743 }
744 
745 static inline void
746 dma_release_declared_memory(struct device *dev)
747 {
748 }
749 
750 static inline void *
751 dma_mark_declared_memory_occupied(struct device *dev,
752 				  dma_addr_t device_addr, size_t size)
753 {
754 	return ERR_PTR(-EBUSY);
755 }
756 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
757 
758 static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
759 		dma_addr_t *dma_handle, gfp_t gfp)
760 {
761 	return dmam_alloc_attrs(dev, size, dma_handle, gfp,
762 			(gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
763 }
764 
765 static inline void *dma_alloc_wc(struct device *dev, size_t size,
766 				 dma_addr_t *dma_addr, gfp_t gfp)
767 {
768 	unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
769 
770 	if (gfp & __GFP_NOWARN)
771 		attrs |= DMA_ATTR_NO_WARN;
772 
773 	return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
774 }
775 #ifndef dma_alloc_writecombine
776 #define dma_alloc_writecombine dma_alloc_wc
777 #endif
778 
779 static inline void dma_free_wc(struct device *dev, size_t size,
780 			       void *cpu_addr, dma_addr_t dma_addr)
781 {
782 	return dma_free_attrs(dev, size, cpu_addr, dma_addr,
783 			      DMA_ATTR_WRITE_COMBINE);
784 }
785 #ifndef dma_free_writecombine
786 #define dma_free_writecombine dma_free_wc
787 #endif
788 
789 static inline int dma_mmap_wc(struct device *dev,
790 			      struct vm_area_struct *vma,
791 			      void *cpu_addr, dma_addr_t dma_addr,
792 			      size_t size)
793 {
794 	return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
795 			      DMA_ATTR_WRITE_COMBINE);
796 }
797 #ifndef dma_mmap_writecombine
798 #define dma_mmap_writecombine dma_mmap_wc
799 #endif
800 
801 #ifdef CONFIG_NEED_DMA_MAP_STATE
802 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
803 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
804 #define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
805 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  (((PTR)->ADDR_NAME) = (VAL))
806 #define dma_unmap_len(PTR, LEN_NAME)             ((PTR)->LEN_NAME)
807 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    (((PTR)->LEN_NAME) = (VAL))
808 #else
809 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
810 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
811 #define dma_unmap_addr(PTR, ADDR_NAME)           (0)
812 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  do { } while (0)
813 #define dma_unmap_len(PTR, LEN_NAME)             (0)
814 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    do { } while (0)
815 #endif
816 
817 #endif
818