1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LINUX_SWIOTLB_H 3 #define __LINUX_SWIOTLB_H 4 5 #include <linux/device.h> 6 #include <linux/dma-direction.h> 7 #include <linux/init.h> 8 #include <linux/types.h> 9 #include <linux/limits.h> 10 #include <linux/spinlock.h> 11 #include <linux/workqueue.h> 12 13 struct device; 14 struct page; 15 struct scatterlist; 16 17 #define SWIOTLB_VERBOSE (1 << 0) /* verbose initialization */ 18 #define SWIOTLB_FORCE (1 << 1) /* force bounce buffering */ 19 #define SWIOTLB_ANY (1 << 2) /* allow any memory for the buffer */ 20 21 /* 22 * Maximum allowable number of contiguous slabs to map, 23 * must be a power of 2. What is the appropriate value ? 24 * The complexity of {map,unmap}_single is linearly dependent on this value. 25 */ 26 #define IO_TLB_SEGSIZE 128 27 28 /* 29 * log of the size of each IO TLB slab. The number of slabs is command line 30 * controllable. 31 */ 32 #define IO_TLB_SHIFT 11 33 #define IO_TLB_SIZE (1 << IO_TLB_SHIFT) 34 35 /* default to 64MB */ 36 #define IO_TLB_DEFAULT_SIZE (64UL<<20) 37 38 unsigned long swiotlb_size_or_default(void); 39 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, 40 int (*remap)(void *tlb, unsigned long nslabs)); 41 int swiotlb_init_late(size_t size, gfp_t gfp_mask, 42 int (*remap)(void *tlb, unsigned long nslabs)); 43 extern void __init swiotlb_update_mem_attributes(void); 44 45 phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, 46 size_t mapping_size, 47 unsigned int alloc_aligned_mask, enum dma_data_direction dir, 48 unsigned long attrs); 49 50 extern void swiotlb_tbl_unmap_single(struct device *hwdev, 51 phys_addr_t tlb_addr, 52 size_t mapping_size, 53 enum dma_data_direction dir, 54 unsigned long attrs); 55 56 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, 57 size_t size, enum dma_data_direction dir); 58 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, 59 size_t size, enum dma_data_direction dir); 60 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, 61 size_t size, enum dma_data_direction dir, unsigned long attrs); 62 63 #ifdef CONFIG_SWIOTLB 64 65 /** 66 * struct io_tlb_pool - IO TLB memory pool descriptor 67 * @start: The start address of the swiotlb memory pool. Used to do a quick 68 * range check to see if the memory was in fact allocated by this 69 * API. 70 * @end: The end address of the swiotlb memory pool. Used to do a quick 71 * range check to see if the memory was in fact allocated by this 72 * API. 73 * @vaddr: The vaddr of the swiotlb memory pool. The swiotlb memory pool 74 * may be remapped in the memory encrypted case and store virtual 75 * address for bounce buffer operation. 76 * @nslabs: The number of IO TLB slots between @start and @end. For the 77 * default swiotlb, this can be adjusted with a boot parameter, 78 * see setup_io_tlb_npages(). 79 * @late_alloc: %true if allocated using the page allocator. 80 * @nareas: Number of areas in the pool. 81 * @area_nslabs: Number of slots in each area. 82 * @areas: Array of memory area descriptors. 83 * @slots: Array of slot descriptors. 84 * @node: Member of the IO TLB memory pool list. 85 * @rcu: RCU head for swiotlb_dyn_free(). 86 * @transient: %true if transient memory pool. 87 */ 88 struct io_tlb_pool { 89 phys_addr_t start; 90 phys_addr_t end; 91 void *vaddr; 92 unsigned long nslabs; 93 bool late_alloc; 94 unsigned int nareas; 95 unsigned int area_nslabs; 96 struct io_tlb_area *areas; 97 struct io_tlb_slot *slots; 98 #ifdef CONFIG_SWIOTLB_DYNAMIC 99 struct list_head node; 100 struct rcu_head rcu; 101 bool transient; 102 #endif 103 }; 104 105 /** 106 * struct io_tlb_mem - Software IO TLB allocator 107 * @defpool: Default (initial) IO TLB memory pool descriptor. 108 * @pool: IO TLB memory pool descriptor (if not dynamic). 109 * @nslabs: Total number of IO TLB slabs in all pools. 110 * @debugfs: The dentry to debugfs. 111 * @force_bounce: %true if swiotlb bouncing is forced 112 * @for_alloc: %true if the pool is used for memory allocation 113 * @can_grow: %true if more pools can be allocated dynamically. 114 * @phys_limit: Maximum allowed physical address. 115 * @lock: Lock to synchronize changes to the list. 116 * @pools: List of IO TLB memory pool descriptors (if dynamic). 117 * @dyn_alloc: Dynamic IO TLB pool allocation work. 118 * @total_used: The total number of slots in the pool that are currently used 119 * across all areas. Used only for calculating used_hiwater in 120 * debugfs. 121 * @used_hiwater: The high water mark for total_used. Used only for reporting 122 * in debugfs. 123 * @transient_nslabs: The total number of slots in all transient pools that 124 * are currently used across all areas. 125 */ 126 struct io_tlb_mem { 127 struct io_tlb_pool defpool; 128 unsigned long nslabs; 129 struct dentry *debugfs; 130 bool force_bounce; 131 bool for_alloc; 132 #ifdef CONFIG_SWIOTLB_DYNAMIC 133 bool can_grow; 134 u64 phys_limit; 135 spinlock_t lock; 136 struct list_head pools; 137 struct work_struct dyn_alloc; 138 #endif 139 #ifdef CONFIG_DEBUG_FS 140 atomic_long_t total_used; 141 atomic_long_t used_hiwater; 142 atomic_long_t transient_nslabs; 143 #endif 144 }; 145 146 #ifdef CONFIG_SWIOTLB_DYNAMIC 147 148 struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr); 149 150 #else 151 152 static inline struct io_tlb_pool *swiotlb_find_pool(struct device *dev, 153 phys_addr_t paddr) 154 { 155 return &dev->dma_io_tlb_mem->defpool; 156 } 157 158 #endif 159 160 /** 161 * is_swiotlb_buffer() - check if a physical address belongs to a swiotlb 162 * @dev: Device which has mapped the buffer. 163 * @paddr: Physical address within the DMA buffer. 164 * 165 * Check if @paddr points into a bounce buffer. 166 * 167 * Return: 168 * * %true if @paddr points into a bounce buffer 169 * * %false otherwise 170 */ 171 static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) 172 { 173 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 174 175 if (!mem) 176 return false; 177 178 #ifdef CONFIG_SWIOTLB_DYNAMIC 179 /* 180 * All SWIOTLB buffer addresses must have been returned by 181 * swiotlb_tbl_map_single() and passed to a device driver. 182 * If a SWIOTLB address is checked on another CPU, then it was 183 * presumably loaded by the device driver from an unspecified private 184 * data structure. Make sure that this load is ordered before reading 185 * dev->dma_uses_io_tlb here and mem->pools in swiotlb_find_pool(). 186 * 187 * This barrier pairs with smp_mb() in swiotlb_find_slots(). 188 */ 189 smp_rmb(); 190 return READ_ONCE(dev->dma_uses_io_tlb) && 191 swiotlb_find_pool(dev, paddr); 192 #else 193 return paddr >= mem->defpool.start && paddr < mem->defpool.end; 194 #endif 195 } 196 197 static inline bool is_swiotlb_force_bounce(struct device *dev) 198 { 199 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 200 201 return mem && mem->force_bounce; 202 } 203 204 void swiotlb_init(bool addressing_limited, unsigned int flags); 205 void __init swiotlb_exit(void); 206 void swiotlb_dev_init(struct device *dev); 207 size_t swiotlb_max_mapping_size(struct device *dev); 208 bool is_swiotlb_allocated(void); 209 bool is_swiotlb_active(struct device *dev); 210 void __init swiotlb_adjust_size(unsigned long size); 211 phys_addr_t default_swiotlb_base(void); 212 phys_addr_t default_swiotlb_limit(void); 213 #else 214 static inline void swiotlb_init(bool addressing_limited, unsigned int flags) 215 { 216 } 217 218 static inline void swiotlb_dev_init(struct device *dev) 219 { 220 } 221 222 static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) 223 { 224 return false; 225 } 226 static inline bool is_swiotlb_force_bounce(struct device *dev) 227 { 228 return false; 229 } 230 static inline void swiotlb_exit(void) 231 { 232 } 233 static inline size_t swiotlb_max_mapping_size(struct device *dev) 234 { 235 return SIZE_MAX; 236 } 237 238 static inline bool is_swiotlb_allocated(void) 239 { 240 return false; 241 } 242 243 static inline bool is_swiotlb_active(struct device *dev) 244 { 245 return false; 246 } 247 248 static inline void swiotlb_adjust_size(unsigned long size) 249 { 250 } 251 252 static inline phys_addr_t default_swiotlb_base(void) 253 { 254 return 0; 255 } 256 257 static inline phys_addr_t default_swiotlb_limit(void) 258 { 259 return 0; 260 } 261 #endif /* CONFIG_SWIOTLB */ 262 263 extern void swiotlb_print_info(void); 264 265 #ifdef CONFIG_DMA_RESTRICTED_POOL 266 struct page *swiotlb_alloc(struct device *dev, size_t size); 267 bool swiotlb_free(struct device *dev, struct page *page, size_t size); 268 269 static inline bool is_swiotlb_for_alloc(struct device *dev) 270 { 271 return dev->dma_io_tlb_mem->for_alloc; 272 } 273 #else 274 static inline struct page *swiotlb_alloc(struct device *dev, size_t size) 275 { 276 return NULL; 277 } 278 static inline bool swiotlb_free(struct device *dev, struct page *page, 279 size_t size) 280 { 281 return false; 282 } 283 static inline bool is_swiotlb_for_alloc(struct device *dev) 284 { 285 return false; 286 } 287 #endif /* CONFIG_DMA_RESTRICTED_POOL */ 288 289 #endif /* __LINUX_SWIOTLB_H */ 290