1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Internals of the DMA direct mapping implementation. Only for use by the 4 * DMA mapping code and IOMMU drivers. 5 */ 6 #ifndef _LINUX_DMA_DIRECT_H 7 #define _LINUX_DMA_DIRECT_H 1 8 9 #include <linux/dma-mapping.h> 10 #include <linux/dma-map-ops.h> 11 #include <linux/memblock.h> /* for min_low_pfn */ 12 #include <linux/mem_encrypt.h> 13 #include <linux/swiotlb.h> 14 15 extern unsigned int zone_dma_bits; 16 17 /* 18 * Record the mapping of CPU physical to DMA addresses for a given region. 19 */ 20 struct bus_dma_region { 21 phys_addr_t cpu_start; 22 dma_addr_t dma_start; 23 u64 size; 24 }; 25 26 static inline dma_addr_t translate_phys_to_dma(struct device *dev, 27 phys_addr_t paddr) 28 { 29 const struct bus_dma_region *m; 30 31 for (m = dev->dma_range_map; m->size; m++) { 32 u64 offset = paddr - m->cpu_start; 33 34 if (paddr >= m->cpu_start && offset < m->size) 35 return m->dma_start + offset; 36 } 37 38 /* make sure dma_capable fails when no translation is available */ 39 return DMA_MAPPING_ERROR; 40 } 41 42 static inline phys_addr_t translate_dma_to_phys(struct device *dev, 43 dma_addr_t dma_addr) 44 { 45 const struct bus_dma_region *m; 46 47 for (m = dev->dma_range_map; m->size; m++) { 48 u64 offset = dma_addr - m->dma_start; 49 50 if (dma_addr >= m->dma_start && offset < m->size) 51 return m->cpu_start + offset; 52 } 53 54 return (phys_addr_t)-1; 55 } 56 57 #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA 58 #include <asm/dma-direct.h> 59 #ifndef phys_to_dma_unencrypted 60 #define phys_to_dma_unencrypted phys_to_dma 61 #endif 62 #else 63 static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev, 64 phys_addr_t paddr) 65 { 66 if (dev->dma_range_map) 67 return translate_phys_to_dma(dev, paddr); 68 return paddr; 69 } 70 71 /* 72 * If memory encryption is supported, phys_to_dma will set the memory encryption 73 * bit in the DMA address, and dma_to_phys will clear it. 74 * phys_to_dma_unencrypted is for use on special unencrypted memory like swiotlb 75 * buffers. 76 */ 77 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 78 { 79 return __sme_set(phys_to_dma_unencrypted(dev, paddr)); 80 } 81 82 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr) 83 { 84 phys_addr_t paddr; 85 86 if (dev->dma_range_map) 87 paddr = translate_dma_to_phys(dev, dma_addr); 88 else 89 paddr = dma_addr; 90 91 return __sme_clr(paddr); 92 } 93 #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ 94 95 #ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED 96 bool force_dma_unencrypted(struct device *dev); 97 #else 98 static inline bool force_dma_unencrypted(struct device *dev) 99 { 100 return false; 101 } 102 #endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */ 103 104 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size, 105 bool is_ram) 106 { 107 dma_addr_t end = addr + size - 1; 108 109 if (addr == DMA_MAPPING_ERROR) 110 return false; 111 if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && 112 min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn))) 113 return false; 114 115 return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit); 116 } 117 118 u64 dma_direct_get_required_mask(struct device *dev); 119 void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 120 gfp_t gfp, unsigned long attrs); 121 void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, 122 dma_addr_t dma_addr, unsigned long attrs); 123 struct page *dma_direct_alloc_pages(struct device *dev, size_t size, 124 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); 125 void dma_direct_free_pages(struct device *dev, size_t size, 126 struct page *page, dma_addr_t dma_addr, 127 enum dma_data_direction dir); 128 int dma_direct_supported(struct device *dev, u64 mask); 129 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, 130 size_t size, enum dma_data_direction dir, unsigned long attrs); 131 132 #endif /* _LINUX_DMA_DIRECT_H */ 133