xref: /linux-6.15/include/linux/swiotlb.h (revision f96cffd7)
1 #ifndef __LINUX_SWIOTLB_H
2 #define __LINUX_SWIOTLB_H
3 
4 #include <linux/dma-direction.h>
5 #include <linux/init.h>
6 #include <linux/types.h>
7 
8 struct device;
9 struct page;
10 struct scatterlist;
11 
12 extern int swiotlb_force;
13 
14 /*
15  * Maximum allowable number of contiguous slabs to map,
16  * must be a power of 2.  What is the appropriate value ?
17  * The complexity of {map,unmap}_single is linearly dependent on this value.
18  */
19 #define IO_TLB_SEGSIZE	128
20 
21 /*
22  * log of the size of each IO TLB slab.  The number of slabs is command line
23  * controllable.
24  */
25 #define IO_TLB_SHIFT 11
26 
27 extern void swiotlb_init(int verbose);
28 int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
29 extern unsigned long swiotlb_nr_tbl(void);
30 unsigned long swiotlb_size_or_default(void);
31 extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
32 
33 /*
34  * Enumeration for sync targets
35  */
36 enum dma_sync_target {
37 	SYNC_FOR_CPU = 0,
38 	SYNC_FOR_DEVICE = 1,
39 };
40 
41 /* define the last possible byte of physical address space as a mapping error */
42 #define SWIOTLB_MAP_ERROR (~(phys_addr_t)0x0)
43 
44 extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
45 					  dma_addr_t tbl_dma_addr,
46 					  phys_addr_t phys, size_t size,
47 					  enum dma_data_direction dir);
48 
49 extern void swiotlb_tbl_unmap_single(struct device *hwdev,
50 				     phys_addr_t tlb_addr,
51 				     size_t size, enum dma_data_direction dir);
52 
53 extern void swiotlb_tbl_sync_single(struct device *hwdev,
54 				    phys_addr_t tlb_addr,
55 				    size_t size, enum dma_data_direction dir,
56 				    enum dma_sync_target target);
57 
58 /* Accessory functions. */
59 extern void
60 *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
61 			dma_addr_t *dma_handle, gfp_t flags);
62 
63 extern void
64 swiotlb_free_coherent(struct device *hwdev, size_t size,
65 		      void *vaddr, dma_addr_t dma_handle);
66 
67 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
68 				   unsigned long offset, size_t size,
69 				   enum dma_data_direction dir,
70 				   unsigned long attrs);
71 extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
72 			       size_t size, enum dma_data_direction dir,
73 			       unsigned long attrs);
74 
75 extern int
76 swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
77 	       enum dma_data_direction dir);
78 
79 extern void
80 swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
81 		 enum dma_data_direction dir);
82 
83 extern int
84 swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
85 		     enum dma_data_direction dir,
86 		     unsigned long attrs);
87 
88 extern void
89 swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
90 		       int nelems, enum dma_data_direction dir,
91 		       unsigned long attrs);
92 
93 extern void
94 swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
95 			    size_t size, enum dma_data_direction dir);
96 
97 extern void
98 swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
99 			int nelems, enum dma_data_direction dir);
100 
101 extern void
102 swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
103 			       size_t size, enum dma_data_direction dir);
104 
105 extern void
106 swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
107 			   int nelems, enum dma_data_direction dir);
108 
109 extern int
110 swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
111 
112 extern int
113 swiotlb_dma_supported(struct device *hwdev, u64 mask);
114 
115 #ifdef CONFIG_SWIOTLB
116 extern void __init swiotlb_free(void);
117 #else
118 static inline void swiotlb_free(void) { }
119 #endif
120 
121 extern void swiotlb_print_info(void);
122 extern int is_swiotlb_buffer(phys_addr_t paddr);
123 
124 #endif /* __LINUX_SWIOTLB_H */
125