xref: /linux-6.15/include/linux/dma-direct.h (revision dc16c8a9)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Internals of the DMA direct mapping implementation.  Only for use by the
4  * DMA mapping code and IOMMU drivers.
5  */
6 #ifndef _LINUX_DMA_DIRECT_H
7 #define _LINUX_DMA_DIRECT_H 1
8 
9 #include <linux/dma-mapping.h>
10 #include <linux/dma-noncoherent.h>
11 #include <linux/memblock.h> /* for min_low_pfn */
12 #include <linux/mem_encrypt.h>
13 #include <linux/swiotlb.h>
14 
15 extern unsigned int zone_dma_bits;
16 
17 #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
18 #include <asm/dma-direct.h>
19 #else
20 static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
21 {
22 	dma_addr_t dev_addr = (dma_addr_t)paddr;
23 
24 	return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
25 }
26 
27 static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
28 {
29 	phys_addr_t paddr = (phys_addr_t)dev_addr;
30 
31 	return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
32 }
33 #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
34 
35 #ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
36 bool force_dma_unencrypted(struct device *dev);
37 #else
38 static inline bool force_dma_unencrypted(struct device *dev)
39 {
40 	return false;
41 }
42 #endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
43 
44 /*
45  * If memory encryption is supported, phys_to_dma will set the memory encryption
46  * bit in the DMA address, and dma_to_phys will clear it.  The raw __phys_to_dma
47  * and __dma_to_phys versions should only be used on non-encrypted memory for
48  * special occasions like DMA coherent buffers.
49  */
50 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
51 {
52 	return __sme_set(__phys_to_dma(dev, paddr));
53 }
54 
55 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
56 {
57 	return __sme_clr(__dma_to_phys(dev, daddr));
58 }
59 
60 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
61 		bool is_ram)
62 {
63 	dma_addr_t end = addr + size - 1;
64 
65 	if (!dev->dma_mask)
66 		return false;
67 
68 	if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
69 	    min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
70 		return false;
71 
72 	return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
73 }
74 
75 u64 dma_direct_get_required_mask(struct device *dev);
76 gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
77 				  u64 *phys_mask);
78 bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
79 void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
80 		gfp_t gfp, unsigned long attrs);
81 void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
82 		dma_addr_t dma_addr, unsigned long attrs);
83 void *dma_direct_alloc_pages(struct device *dev, size_t size,
84 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
85 void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
86 		dma_addr_t dma_addr, unsigned long attrs);
87 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
88 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
89 		unsigned long attrs);
90 bool dma_direct_can_mmap(struct device *dev);
91 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
92 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
93 		unsigned long attrs);
94 int dma_direct_supported(struct device *dev, u64 mask);
95 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
96 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
97 		enum dma_data_direction dir, unsigned long attrs);
98 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
99 		size_t size, enum dma_data_direction dir, unsigned long attrs);
100 size_t dma_direct_max_mapping_size(struct device *dev);
101 
102 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
103     defined(CONFIG_SWIOTLB)
104 void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
105 		int nents, enum dma_data_direction dir);
106 #else
107 static inline void dma_direct_sync_sg_for_device(struct device *dev,
108 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
109 {
110 }
111 #endif
112 
113 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
114     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
115     defined(CONFIG_SWIOTLB)
116 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
117 		int nents, enum dma_data_direction dir, unsigned long attrs);
118 void dma_direct_sync_sg_for_cpu(struct device *dev,
119 		struct scatterlist *sgl, int nents, enum dma_data_direction dir);
120 #else
121 static inline void dma_direct_unmap_sg(struct device *dev,
122 		struct scatterlist *sgl, int nents, enum dma_data_direction dir,
123 		unsigned long attrs)
124 {
125 }
126 static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
127 		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
128 {
129 }
130 #endif
131 
132 static inline void dma_direct_sync_single_for_device(struct device *dev,
133 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
134 {
135 	phys_addr_t paddr = dma_to_phys(dev, addr);
136 
137 	if (unlikely(is_swiotlb_buffer(paddr)))
138 		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
139 
140 	if (!dev_is_dma_coherent(dev))
141 		arch_sync_dma_for_device(paddr, size, dir);
142 }
143 
144 static inline void dma_direct_sync_single_for_cpu(struct device *dev,
145 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
146 {
147 	phys_addr_t paddr = dma_to_phys(dev, addr);
148 
149 	if (!dev_is_dma_coherent(dev)) {
150 		arch_sync_dma_for_cpu(paddr, size, dir);
151 		arch_sync_dma_for_cpu_all();
152 	}
153 
154 	if (unlikely(is_swiotlb_buffer(paddr)))
155 		swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
156 }
157 
158 static inline dma_addr_t dma_direct_map_page(struct device *dev,
159 		struct page *page, unsigned long offset, size_t size,
160 		enum dma_data_direction dir, unsigned long attrs)
161 {
162 	phys_addr_t phys = page_to_phys(page) + offset;
163 	dma_addr_t dma_addr = phys_to_dma(dev, phys);
164 
165 	if (unlikely(swiotlb_force == SWIOTLB_FORCE))
166 		return swiotlb_map(dev, phys, size, dir, attrs);
167 
168 	if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
169 		if (swiotlb_force != SWIOTLB_NO_FORCE)
170 			return swiotlb_map(dev, phys, size, dir, attrs);
171 
172 		dev_WARN_ONCE(dev, 1,
173 			     "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
174 			     &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
175 		return DMA_MAPPING_ERROR;
176 	}
177 
178 	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
179 		arch_sync_dma_for_device(phys, size, dir);
180 	return dma_addr;
181 }
182 
183 static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
184 		size_t size, enum dma_data_direction dir, unsigned long attrs)
185 {
186 	phys_addr_t phys = dma_to_phys(dev, addr);
187 
188 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
189 		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
190 
191 	if (unlikely(is_swiotlb_buffer(phys)))
192 		swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
193 }
194 #endif /* _LINUX_DMA_DIRECT_H */
195