1 #ifndef __LINUX_SWIOTLB_H 2 #define __LINUX_SWIOTLB_H 3 4 #include <linux/dma-direction.h> 5 #include <linux/init.h> 6 #include <linux/types.h> 7 8 struct device; 9 struct page; 10 struct scatterlist; 11 12 enum swiotlb_force { 13 SWIOTLB_NORMAL, /* Default - depending on HW DMA mask etc. */ 14 SWIOTLB_FORCE, /* swiotlb=force */ 15 SWIOTLB_NO_FORCE, /* swiotlb=noforce */ 16 }; 17 18 extern enum swiotlb_force swiotlb_force; 19 20 /* 21 * Maximum allowable number of contiguous slabs to map, 22 * must be a power of 2. What is the appropriate value ? 23 * The complexity of {map,unmap}_single is linearly dependent on this value. 24 */ 25 #define IO_TLB_SEGSIZE 128 26 27 /* 28 * log of the size of each IO TLB slab. The number of slabs is command line 29 * controllable. 30 */ 31 #define IO_TLB_SHIFT 11 32 33 extern void swiotlb_init(int verbose); 34 int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); 35 extern unsigned long swiotlb_nr_tbl(void); 36 unsigned long swiotlb_size_or_default(void); 37 extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); 38 extern void __init swiotlb_update_mem_attributes(void); 39 40 /* 41 * Enumeration for sync targets 42 */ 43 enum dma_sync_target { 44 SYNC_FOR_CPU = 0, 45 SYNC_FOR_DEVICE = 1, 46 }; 47 48 /* define the last possible byte of physical address space as a mapping error */ 49 #define SWIOTLB_MAP_ERROR (~(phys_addr_t)0x0) 50 51 extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, 52 dma_addr_t tbl_dma_addr, 53 phys_addr_t phys, size_t size, 54 enum dma_data_direction dir, 55 unsigned long attrs); 56 57 extern void swiotlb_tbl_unmap_single(struct device *hwdev, 58 phys_addr_t tlb_addr, 59 size_t size, enum dma_data_direction dir, 60 unsigned long attrs); 61 62 extern void swiotlb_tbl_sync_single(struct device *hwdev, 63 phys_addr_t tlb_addr, 64 size_t size, enum dma_data_direction dir, 65 enum dma_sync_target target); 66 67 /* Accessory functions. */ 68 extern void 69 *swiotlb_alloc_coherent(struct device *hwdev, size_t size, 70 dma_addr_t *dma_handle, gfp_t flags); 71 72 extern void 73 swiotlb_free_coherent(struct device *hwdev, size_t size, 74 void *vaddr, dma_addr_t dma_handle); 75 76 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, 77 unsigned long offset, size_t size, 78 enum dma_data_direction dir, 79 unsigned long attrs); 80 extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, 81 size_t size, enum dma_data_direction dir, 82 unsigned long attrs); 83 84 extern int 85 swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, 86 enum dma_data_direction dir, 87 unsigned long attrs); 88 89 extern void 90 swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, 91 int nelems, enum dma_data_direction dir, 92 unsigned long attrs); 93 94 extern void 95 swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, 96 size_t size, enum dma_data_direction dir); 97 98 extern void 99 swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 100 int nelems, enum dma_data_direction dir); 101 102 extern void 103 swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, 104 size_t size, enum dma_data_direction dir); 105 106 extern void 107 swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 108 int nelems, enum dma_data_direction dir); 109 110 extern int 111 swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); 112 113 extern int 114 swiotlb_dma_supported(struct device *hwdev, u64 mask); 115 116 #ifdef CONFIG_SWIOTLB 117 extern void __init swiotlb_free(void); 118 unsigned int swiotlb_max_segment(void); 119 #else 120 static inline void swiotlb_free(void) { } 121 static inline unsigned int swiotlb_max_segment(void) { return 0; } 122 #endif 123 124 extern void swiotlb_print_info(void); 125 extern int is_swiotlb_buffer(phys_addr_t paddr); 126 extern void swiotlb_set_max_segment(unsigned int); 127 128 #endif /* __LINUX_SWIOTLB_H */ 129