1 #ifndef _LINUX_DMA_MAPPING_H 2 #define _LINUX_DMA_MAPPING_H 3 4 #include <linux/string.h> 5 #include <linux/device.h> 6 #include <linux/err.h> 7 #include <linux/dma-attrs.h> 8 #include <linux/dma-direction.h> 9 #include <linux/scatterlist.h> 10 11 /* 12 * A dma_addr_t can hold any valid DMA or bus address for the platform. 13 * It can be given to a device to use as a DMA source or target. A CPU cannot 14 * reference a dma_addr_t directly because there may be translation between 15 * its physical address space and the bus address space. 16 */ 17 struct dma_map_ops { 18 void* (*alloc)(struct device *dev, size_t size, 19 dma_addr_t *dma_handle, gfp_t gfp, 20 struct dma_attrs *attrs); 21 void (*free)(struct device *dev, size_t size, 22 void *vaddr, dma_addr_t dma_handle, 23 struct dma_attrs *attrs); 24 int (*mmap)(struct device *, struct vm_area_struct *, 25 void *, dma_addr_t, size_t, struct dma_attrs *attrs); 26 27 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, 28 dma_addr_t, size_t, struct dma_attrs *attrs); 29 30 dma_addr_t (*map_page)(struct device *dev, struct page *page, 31 unsigned long offset, size_t size, 32 enum dma_data_direction dir, 33 struct dma_attrs *attrs); 34 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, 35 size_t size, enum dma_data_direction dir, 36 struct dma_attrs *attrs); 37 /* 38 * map_sg returns 0 on error and a value > 0 on success. 39 * It should never return a value < 0. 40 */ 41 int (*map_sg)(struct device *dev, struct scatterlist *sg, 42 int nents, enum dma_data_direction dir, 43 struct dma_attrs *attrs); 44 void (*unmap_sg)(struct device *dev, 45 struct scatterlist *sg, int nents, 46 enum dma_data_direction dir, 47 struct dma_attrs *attrs); 48 void (*sync_single_for_cpu)(struct device *dev, 49 dma_addr_t dma_handle, size_t size, 50 enum dma_data_direction dir); 51 void (*sync_single_for_device)(struct device *dev, 52 dma_addr_t dma_handle, size_t size, 53 enum dma_data_direction dir); 54 void (*sync_sg_for_cpu)(struct device *dev, 55 struct scatterlist *sg, int nents, 56 enum dma_data_direction dir); 57 void (*sync_sg_for_device)(struct device *dev, 58 struct scatterlist *sg, int nents, 59 enum dma_data_direction dir); 60 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); 61 int (*dma_supported)(struct device *dev, u64 mask); 62 int (*set_dma_mask)(struct device *dev, u64 mask); 63 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK 64 u64 (*get_required_mask)(struct device *dev); 65 #endif 66 int is_phys; 67 }; 68 69 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) 70 71 #define DMA_MASK_NONE 0x0ULL 72 73 static inline int valid_dma_direction(int dma_direction) 74 { 75 return ((dma_direction == DMA_BIDIRECTIONAL) || 76 (dma_direction == DMA_TO_DEVICE) || 77 (dma_direction == DMA_FROM_DEVICE)); 78 } 79 80 static inline int is_device_dma_capable(struct device *dev) 81 { 82 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; 83 } 84 85 #ifdef CONFIG_HAS_DMA 86 #include <asm/dma-mapping.h> 87 #else 88 #include <asm-generic/dma-mapping-broken.h> 89 #endif 90 91 static inline u64 dma_get_mask(struct device *dev) 92 { 93 if (dev && dev->dma_mask && *dev->dma_mask) 94 return *dev->dma_mask; 95 return DMA_BIT_MASK(32); 96 } 97 98 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK 99 int dma_set_coherent_mask(struct device *dev, u64 mask); 100 #else 101 static inline int dma_set_coherent_mask(struct device *dev, u64 mask) 102 { 103 if (!dma_supported(dev, mask)) 104 return -EIO; 105 dev->coherent_dma_mask = mask; 106 return 0; 107 } 108 #endif 109 110 /* 111 * Set both the DMA mask and the coherent DMA mask to the same thing. 112 * Note that we don't check the return value from dma_set_coherent_mask() 113 * as the DMA API guarantees that the coherent DMA mask can be set to 114 * the same or smaller than the streaming DMA mask. 115 */ 116 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) 117 { 118 int rc = dma_set_mask(dev, mask); 119 if (rc == 0) 120 dma_set_coherent_mask(dev, mask); 121 return rc; 122 } 123 124 /* 125 * Similar to the above, except it deals with the case where the device 126 * does not have dev->dma_mask appropriately setup. 127 */ 128 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) 129 { 130 dev->dma_mask = &dev->coherent_dma_mask; 131 return dma_set_mask_and_coherent(dev, mask); 132 } 133 134 extern u64 dma_get_required_mask(struct device *dev); 135 136 #ifndef arch_setup_dma_ops 137 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, 138 u64 size, struct iommu_ops *iommu, 139 bool coherent) { } 140 #endif 141 142 #ifndef arch_teardown_dma_ops 143 static inline void arch_teardown_dma_ops(struct device *dev) { } 144 #endif 145 146 static inline unsigned int dma_get_max_seg_size(struct device *dev) 147 { 148 return dev->dma_parms ? dev->dma_parms->max_segment_size : 65536; 149 } 150 151 static inline unsigned int dma_set_max_seg_size(struct device *dev, 152 unsigned int size) 153 { 154 if (dev->dma_parms) { 155 dev->dma_parms->max_segment_size = size; 156 return 0; 157 } else 158 return -EIO; 159 } 160 161 static inline unsigned long dma_get_seg_boundary(struct device *dev) 162 { 163 return dev->dma_parms ? 164 dev->dma_parms->segment_boundary_mask : 0xffffffff; 165 } 166 167 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) 168 { 169 if (dev->dma_parms) { 170 dev->dma_parms->segment_boundary_mask = mask; 171 return 0; 172 } else 173 return -EIO; 174 } 175 176 #ifndef dma_max_pfn 177 static inline unsigned long dma_max_pfn(struct device *dev) 178 { 179 return *dev->dma_mask >> PAGE_SHIFT; 180 } 181 #endif 182 183 static inline void *dma_zalloc_coherent(struct device *dev, size_t size, 184 dma_addr_t *dma_handle, gfp_t flag) 185 { 186 void *ret = dma_alloc_coherent(dev, size, dma_handle, 187 flag | __GFP_ZERO); 188 return ret; 189 } 190 191 #ifdef CONFIG_HAS_DMA 192 static inline int dma_get_cache_alignment(void) 193 { 194 #ifdef ARCH_DMA_MINALIGN 195 return ARCH_DMA_MINALIGN; 196 #endif 197 return 1; 198 } 199 #endif 200 201 /* flags for the coherent memory api */ 202 #define DMA_MEMORY_MAP 0x01 203 #define DMA_MEMORY_IO 0x02 204 #define DMA_MEMORY_INCLUDES_CHILDREN 0x04 205 #define DMA_MEMORY_EXCLUSIVE 0x08 206 207 #ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY 208 static inline int 209 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, 210 dma_addr_t device_addr, size_t size, int flags) 211 { 212 return 0; 213 } 214 215 static inline void 216 dma_release_declared_memory(struct device *dev) 217 { 218 } 219 220 static inline void * 221 dma_mark_declared_memory_occupied(struct device *dev, 222 dma_addr_t device_addr, size_t size) 223 { 224 return ERR_PTR(-EBUSY); 225 } 226 #endif 227 228 /* 229 * Managed DMA API 230 */ 231 extern void *dmam_alloc_coherent(struct device *dev, size_t size, 232 dma_addr_t *dma_handle, gfp_t gfp); 233 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, 234 dma_addr_t dma_handle); 235 extern void *dmam_alloc_noncoherent(struct device *dev, size_t size, 236 dma_addr_t *dma_handle, gfp_t gfp); 237 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, 238 dma_addr_t dma_handle); 239 #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY 240 extern int dmam_declare_coherent_memory(struct device *dev, 241 phys_addr_t phys_addr, 242 dma_addr_t device_addr, size_t size, 243 int flags); 244 extern void dmam_release_declared_memory(struct device *dev); 245 #else /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */ 246 static inline int dmam_declare_coherent_memory(struct device *dev, 247 phys_addr_t phys_addr, dma_addr_t device_addr, 248 size_t size, gfp_t gfp) 249 { 250 return 0; 251 } 252 253 static inline void dmam_release_declared_memory(struct device *dev) 254 { 255 } 256 #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */ 257 258 #ifndef CONFIG_HAVE_DMA_ATTRS 259 struct dma_attrs; 260 261 #define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \ 262 dma_map_single(dev, cpu_addr, size, dir) 263 264 #define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \ 265 dma_unmap_single(dev, dma_addr, size, dir) 266 267 #define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \ 268 dma_map_sg(dev, sgl, nents, dir) 269 270 #define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \ 271 dma_unmap_sg(dev, sgl, nents, dir) 272 273 #else 274 static inline void *dma_alloc_writecombine(struct device *dev, size_t size, 275 dma_addr_t *dma_addr, gfp_t gfp) 276 { 277 DEFINE_DMA_ATTRS(attrs); 278 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 279 return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs); 280 } 281 282 static inline void dma_free_writecombine(struct device *dev, size_t size, 283 void *cpu_addr, dma_addr_t dma_addr) 284 { 285 DEFINE_DMA_ATTRS(attrs); 286 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 287 return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs); 288 } 289 290 static inline int dma_mmap_writecombine(struct device *dev, 291 struct vm_area_struct *vma, 292 void *cpu_addr, dma_addr_t dma_addr, 293 size_t size) 294 { 295 DEFINE_DMA_ATTRS(attrs); 296 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 297 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); 298 } 299 #endif /* CONFIG_HAVE_DMA_ATTRS */ 300 301 #ifdef CONFIG_NEED_DMA_MAP_STATE 302 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME 303 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME 304 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) 305 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) 306 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) 307 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) 308 #else 309 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) 310 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) 311 #define dma_unmap_addr(PTR, ADDR_NAME) (0) 312 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) 313 #define dma_unmap_len(PTR, LEN_NAME) (0) 314 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) 315 #endif 316 317 #endif 318