1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_DMA_MAPPING_H 3 #define _LINUX_DMA_MAPPING_H 4 5 #include <linux/sizes.h> 6 #include <linux/string.h> 7 #include <linux/device.h> 8 #include <linux/err.h> 9 #include <linux/dma-direction.h> 10 #include <linux/scatterlist.h> 11 #include <linux/bug.h> 12 #include <linux/mem_encrypt.h> 13 14 /** 15 * List of possible attributes associated with a DMA mapping. The semantics 16 * of each attribute should be defined in Documentation/core-api/dma-attributes.rst. 17 */ 18 19 /* 20 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping 21 * may be weakly ordered, that is that reads and writes may pass each other. 22 */ 23 #define DMA_ATTR_WEAK_ORDERING (1UL << 1) 24 /* 25 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be 26 * buffered to improve performance. 27 */ 28 #define DMA_ATTR_WRITE_COMBINE (1UL << 2) 29 /* 30 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel 31 * virtual mapping for the allocated buffer. 32 */ 33 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) 34 /* 35 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of 36 * the CPU cache for the given buffer assuming that it has been already 37 * transferred to 'device' domain. 38 */ 39 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) 40 /* 41 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer 42 * in physical memory. 43 */ 44 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) 45 /* 46 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem 47 * that it's probably not worth the time to try to allocate memory to in a way 48 * that gives better TLB efficiency. 49 */ 50 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) 51 /* 52 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress 53 * allocation failure reports (similarly to __GFP_NOWARN). 54 */ 55 #define DMA_ATTR_NO_WARN (1UL << 8) 56 57 /* 58 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully 59 * accessible at an elevated privilege level (and ideally inaccessible or 60 * at least read-only at lesser-privileged levels). 61 */ 62 #define DMA_ATTR_PRIVILEGED (1UL << 9) 63 64 /* 65 * A dma_addr_t can hold any valid DMA or bus address for the platform. It can 66 * be given to a device to use as a DMA source or target. It is specific to a 67 * given device and there may be a translation between the CPU physical address 68 * space and the bus address space. 69 * 70 * DMA_MAPPING_ERROR is the magic error code if a mapping failed. It should not 71 * be used directly in drivers, but checked for using dma_mapping_error() 72 * instead. 73 */ 74 #define DMA_MAPPING_ERROR (~(dma_addr_t)0) 75 76 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) 77 78 #ifdef CONFIG_DMA_API_DEBUG 79 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); 80 void debug_dma_map_single(struct device *dev, const void *addr, 81 unsigned long len); 82 #else 83 static inline void debug_dma_mapping_error(struct device *dev, 84 dma_addr_t dma_addr) 85 { 86 } 87 static inline void debug_dma_map_single(struct device *dev, const void *addr, 88 unsigned long len) 89 { 90 } 91 #endif /* CONFIG_DMA_API_DEBUG */ 92 93 #ifdef CONFIG_HAS_DMA 94 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 95 { 96 debug_dma_mapping_error(dev, dma_addr); 97 98 if (unlikely(dma_addr == DMA_MAPPING_ERROR)) 99 return -ENOMEM; 100 return 0; 101 } 102 103 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, 104 size_t offset, size_t size, enum dma_data_direction dir, 105 unsigned long attrs); 106 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, 107 enum dma_data_direction dir, unsigned long attrs); 108 int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, 109 enum dma_data_direction dir, unsigned long attrs); 110 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, 111 int nents, enum dma_data_direction dir, 112 unsigned long attrs); 113 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, 114 size_t size, enum dma_data_direction dir, unsigned long attrs); 115 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, 116 enum dma_data_direction dir, unsigned long attrs); 117 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, 118 enum dma_data_direction dir); 119 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, 120 size_t size, enum dma_data_direction dir); 121 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 122 int nelems, enum dma_data_direction dir); 123 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 124 int nelems, enum dma_data_direction dir); 125 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 126 gfp_t flag, unsigned long attrs); 127 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, 128 dma_addr_t dma_handle, unsigned long attrs); 129 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 130 gfp_t gfp, unsigned long attrs); 131 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, 132 dma_addr_t dma_handle); 133 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, 134 void *cpu_addr, dma_addr_t dma_addr, size_t size, 135 unsigned long attrs); 136 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 137 void *cpu_addr, dma_addr_t dma_addr, size_t size, 138 unsigned long attrs); 139 bool dma_can_mmap(struct device *dev); 140 int dma_supported(struct device *dev, u64 mask); 141 int dma_set_mask(struct device *dev, u64 mask); 142 int dma_set_coherent_mask(struct device *dev, u64 mask); 143 u64 dma_get_required_mask(struct device *dev); 144 size_t dma_max_mapping_size(struct device *dev); 145 bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); 146 unsigned long dma_get_merge_boundary(struct device *dev); 147 struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, 148 enum dma_data_direction dir, gfp_t gfp, unsigned long attrs); 149 void dma_free_noncontiguous(struct device *dev, size_t size, 150 struct sg_table *sgt, enum dma_data_direction dir); 151 void *dma_vmap_noncontiguous(struct device *dev, size_t size, 152 struct sg_table *sgt); 153 void dma_vunmap_noncontiguous(struct device *dev, void *vaddr); 154 int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, 155 size_t size, struct sg_table *sgt); 156 #else /* CONFIG_HAS_DMA */ 157 static inline dma_addr_t dma_map_page_attrs(struct device *dev, 158 struct page *page, size_t offset, size_t size, 159 enum dma_data_direction dir, unsigned long attrs) 160 { 161 return DMA_MAPPING_ERROR; 162 } 163 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, 164 size_t size, enum dma_data_direction dir, unsigned long attrs) 165 { 166 } 167 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, 168 int nents, enum dma_data_direction dir, unsigned long attrs) 169 { 170 return 0; 171 } 172 static inline void dma_unmap_sg_attrs(struct device *dev, 173 struct scatterlist *sg, int nents, enum dma_data_direction dir, 174 unsigned long attrs) 175 { 176 } 177 static inline dma_addr_t dma_map_resource(struct device *dev, 178 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, 179 unsigned long attrs) 180 { 181 return DMA_MAPPING_ERROR; 182 } 183 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, 184 size_t size, enum dma_data_direction dir, unsigned long attrs) 185 { 186 } 187 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, 188 size_t size, enum dma_data_direction dir) 189 { 190 } 191 static inline void dma_sync_single_for_device(struct device *dev, 192 dma_addr_t addr, size_t size, enum dma_data_direction dir) 193 { 194 } 195 static inline void dma_sync_sg_for_cpu(struct device *dev, 196 struct scatterlist *sg, int nelems, enum dma_data_direction dir) 197 { 198 } 199 static inline void dma_sync_sg_for_device(struct device *dev, 200 struct scatterlist *sg, int nelems, enum dma_data_direction dir) 201 { 202 } 203 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 204 { 205 return -ENOMEM; 206 } 207 static inline void *dma_alloc_attrs(struct device *dev, size_t size, 208 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) 209 { 210 return NULL; 211 } 212 static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, 213 dma_addr_t dma_handle, unsigned long attrs) 214 { 215 } 216 static inline void *dmam_alloc_attrs(struct device *dev, size_t size, 217 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 218 { 219 return NULL; 220 } 221 static inline void dmam_free_coherent(struct device *dev, size_t size, 222 void *vaddr, dma_addr_t dma_handle) 223 { 224 } 225 static inline int dma_get_sgtable_attrs(struct device *dev, 226 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, 227 size_t size, unsigned long attrs) 228 { 229 return -ENXIO; 230 } 231 static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 232 void *cpu_addr, dma_addr_t dma_addr, size_t size, 233 unsigned long attrs) 234 { 235 return -ENXIO; 236 } 237 static inline bool dma_can_mmap(struct device *dev) 238 { 239 return false; 240 } 241 static inline int dma_supported(struct device *dev, u64 mask) 242 { 243 return 0; 244 } 245 static inline int dma_set_mask(struct device *dev, u64 mask) 246 { 247 return -EIO; 248 } 249 static inline int dma_set_coherent_mask(struct device *dev, u64 mask) 250 { 251 return -EIO; 252 } 253 static inline u64 dma_get_required_mask(struct device *dev) 254 { 255 return 0; 256 } 257 static inline size_t dma_max_mapping_size(struct device *dev) 258 { 259 return 0; 260 } 261 static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) 262 { 263 return false; 264 } 265 static inline unsigned long dma_get_merge_boundary(struct device *dev) 266 { 267 return 0; 268 } 269 static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev, 270 size_t size, enum dma_data_direction dir, gfp_t gfp, 271 unsigned long attrs) 272 { 273 return NULL; 274 } 275 static inline void dma_free_noncontiguous(struct device *dev, size_t size, 276 struct sg_table *sgt, enum dma_data_direction dir) 277 { 278 } 279 static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size, 280 struct sg_table *sgt) 281 { 282 return NULL; 283 } 284 static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) 285 { 286 } 287 static inline int dma_mmap_noncontiguous(struct device *dev, 288 struct vm_area_struct *vma, size_t size, struct sg_table *sgt) 289 { 290 return -EINVAL; 291 } 292 #endif /* CONFIG_HAS_DMA */ 293 294 struct page *dma_alloc_pages(struct device *dev, size_t size, 295 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); 296 void dma_free_pages(struct device *dev, size_t size, struct page *page, 297 dma_addr_t dma_handle, enum dma_data_direction dir); 298 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, 299 size_t size, struct page *page); 300 301 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, 302 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) 303 { 304 struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp); 305 return page ? page_address(page) : NULL; 306 } 307 308 static inline void dma_free_noncoherent(struct device *dev, size_t size, 309 void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir) 310 { 311 dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir); 312 } 313 314 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, 315 size_t size, enum dma_data_direction dir, unsigned long attrs) 316 { 317 /* DMA must never operate on areas that might be remapped. */ 318 if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr), 319 "rejecting DMA map of vmalloc memory\n")) 320 return DMA_MAPPING_ERROR; 321 debug_dma_map_single(dev, ptr, size); 322 return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr), 323 size, dir, attrs); 324 } 325 326 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, 327 size_t size, enum dma_data_direction dir, unsigned long attrs) 328 { 329 return dma_unmap_page_attrs(dev, addr, size, dir, attrs); 330 } 331 332 static inline void dma_sync_single_range_for_cpu(struct device *dev, 333 dma_addr_t addr, unsigned long offset, size_t size, 334 enum dma_data_direction dir) 335 { 336 return dma_sync_single_for_cpu(dev, addr + offset, size, dir); 337 } 338 339 static inline void dma_sync_single_range_for_device(struct device *dev, 340 dma_addr_t addr, unsigned long offset, size_t size, 341 enum dma_data_direction dir) 342 { 343 return dma_sync_single_for_device(dev, addr + offset, size, dir); 344 } 345 346 /** 347 * dma_map_sgtable - Map the given buffer for DMA 348 * @dev: The device for which to perform the DMA operation 349 * @sgt: The sg_table object describing the buffer 350 * @dir: DMA direction 351 * @attrs: Optional DMA attributes for the map operation 352 * 353 * Maps a buffer described by a scatterlist stored in the given sg_table 354 * object for the @dir DMA operation by the @dev device. After success the 355 * ownership for the buffer is transferred to the DMA domain. One has to 356 * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the 357 * ownership of the buffer back to the CPU domain before touching the 358 * buffer by the CPU. 359 * 360 * Returns 0 on success or -EINVAL on error during mapping the buffer. 361 */ 362 static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, 363 enum dma_data_direction dir, unsigned long attrs) 364 { 365 int nents; 366 367 nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); 368 if (nents <= 0) 369 return -EINVAL; 370 sgt->nents = nents; 371 return 0; 372 } 373 374 /** 375 * dma_unmap_sgtable - Unmap the given buffer for DMA 376 * @dev: The device for which to perform the DMA operation 377 * @sgt: The sg_table object describing the buffer 378 * @dir: DMA direction 379 * @attrs: Optional DMA attributes for the unmap operation 380 * 381 * Unmaps a buffer described by a scatterlist stored in the given sg_table 382 * object for the @dir DMA operation by the @dev device. After this function 383 * the ownership of the buffer is transferred back to the CPU domain. 384 */ 385 static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt, 386 enum dma_data_direction dir, unsigned long attrs) 387 { 388 dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); 389 } 390 391 /** 392 * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access 393 * @dev: The device for which to perform the DMA operation 394 * @sgt: The sg_table object describing the buffer 395 * @dir: DMA direction 396 * 397 * Performs the needed cache synchronization and moves the ownership of the 398 * buffer back to the CPU domain, so it is safe to perform any access to it 399 * by the CPU. Before doing any further DMA operations, one has to transfer 400 * the ownership of the buffer back to the DMA domain by calling the 401 * dma_sync_sgtable_for_device(). 402 */ 403 static inline void dma_sync_sgtable_for_cpu(struct device *dev, 404 struct sg_table *sgt, enum dma_data_direction dir) 405 { 406 dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir); 407 } 408 409 /** 410 * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA 411 * @dev: The device for which to perform the DMA operation 412 * @sgt: The sg_table object describing the buffer 413 * @dir: DMA direction 414 * 415 * Performs the needed cache synchronization and moves the ownership of the 416 * buffer back to the DMA domain, so it is safe to perform the DMA operation. 417 * Once finished, one has to call dma_sync_sgtable_for_cpu() or 418 * dma_unmap_sgtable(). 419 */ 420 static inline void dma_sync_sgtable_for_device(struct device *dev, 421 struct sg_table *sgt, enum dma_data_direction dir) 422 { 423 dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir); 424 } 425 426 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) 427 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) 428 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) 429 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) 430 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) 431 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) 432 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) 433 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) 434 435 static inline void *dma_alloc_coherent(struct device *dev, size_t size, 436 dma_addr_t *dma_handle, gfp_t gfp) 437 { 438 return dma_alloc_attrs(dev, size, dma_handle, gfp, 439 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); 440 } 441 442 static inline void dma_free_coherent(struct device *dev, size_t size, 443 void *cpu_addr, dma_addr_t dma_handle) 444 { 445 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); 446 } 447 448 449 static inline u64 dma_get_mask(struct device *dev) 450 { 451 if (dev->dma_mask && *dev->dma_mask) 452 return *dev->dma_mask; 453 return DMA_BIT_MASK(32); 454 } 455 456 /* 457 * Set both the DMA mask and the coherent DMA mask to the same thing. 458 * Note that we don't check the return value from dma_set_coherent_mask() 459 * as the DMA API guarantees that the coherent DMA mask can be set to 460 * the same or smaller than the streaming DMA mask. 461 */ 462 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) 463 { 464 int rc = dma_set_mask(dev, mask); 465 if (rc == 0) 466 dma_set_coherent_mask(dev, mask); 467 return rc; 468 } 469 470 /* 471 * Similar to the above, except it deals with the case where the device 472 * does not have dev->dma_mask appropriately setup. 473 */ 474 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) 475 { 476 dev->dma_mask = &dev->coherent_dma_mask; 477 return dma_set_mask_and_coherent(dev, mask); 478 } 479 480 /** 481 * dma_addressing_limited - return if the device is addressing limited 482 * @dev: device to check 483 * 484 * Return %true if the devices DMA mask is too small to address all memory in 485 * the system, else %false. Lack of addressing bits is the prime reason for 486 * bounce buffering, but might not be the only one. 487 */ 488 static inline bool dma_addressing_limited(struct device *dev) 489 { 490 return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < 491 dma_get_required_mask(dev); 492 } 493 494 static inline unsigned int dma_get_max_seg_size(struct device *dev) 495 { 496 if (dev->dma_parms && dev->dma_parms->max_segment_size) 497 return dev->dma_parms->max_segment_size; 498 return SZ_64K; 499 } 500 501 static inline int dma_set_max_seg_size(struct device *dev, unsigned int size) 502 { 503 if (dev->dma_parms) { 504 dev->dma_parms->max_segment_size = size; 505 return 0; 506 } 507 return -EIO; 508 } 509 510 static inline unsigned long dma_get_seg_boundary(struct device *dev) 511 { 512 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) 513 return dev->dma_parms->segment_boundary_mask; 514 return ULONG_MAX; 515 } 516 517 /** 518 * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units 519 * @dev: device to guery the boundary for 520 * @page_shift: ilog() of the IOMMU page size 521 * 522 * Return the segment boundary in IOMMU page units (which may be different from 523 * the CPU page size) for the passed in device. 524 * 525 * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for 526 * non-DMA API callers. 527 */ 528 static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev, 529 unsigned int page_shift) 530 { 531 if (!dev) 532 return (U32_MAX >> page_shift) + 1; 533 return (dma_get_seg_boundary(dev) >> page_shift) + 1; 534 } 535 536 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) 537 { 538 if (dev->dma_parms) { 539 dev->dma_parms->segment_boundary_mask = mask; 540 return 0; 541 } 542 return -EIO; 543 } 544 545 static inline unsigned int dma_get_min_align_mask(struct device *dev) 546 { 547 if (dev->dma_parms) 548 return dev->dma_parms->min_align_mask; 549 return 0; 550 } 551 552 static inline int dma_set_min_align_mask(struct device *dev, 553 unsigned int min_align_mask) 554 { 555 if (WARN_ON_ONCE(!dev->dma_parms)) 556 return -EIO; 557 dev->dma_parms->min_align_mask = min_align_mask; 558 return 0; 559 } 560 561 static inline int dma_get_cache_alignment(void) 562 { 563 #ifdef ARCH_DMA_MINALIGN 564 return ARCH_DMA_MINALIGN; 565 #endif 566 return 1; 567 } 568 569 static inline void *dmam_alloc_coherent(struct device *dev, size_t size, 570 dma_addr_t *dma_handle, gfp_t gfp) 571 { 572 return dmam_alloc_attrs(dev, size, dma_handle, gfp, 573 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); 574 } 575 576 static inline void *dma_alloc_wc(struct device *dev, size_t size, 577 dma_addr_t *dma_addr, gfp_t gfp) 578 { 579 unsigned long attrs = DMA_ATTR_WRITE_COMBINE; 580 581 if (gfp & __GFP_NOWARN) 582 attrs |= DMA_ATTR_NO_WARN; 583 584 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs); 585 } 586 587 static inline void dma_free_wc(struct device *dev, size_t size, 588 void *cpu_addr, dma_addr_t dma_addr) 589 { 590 return dma_free_attrs(dev, size, cpu_addr, dma_addr, 591 DMA_ATTR_WRITE_COMBINE); 592 } 593 594 static inline int dma_mmap_wc(struct device *dev, 595 struct vm_area_struct *vma, 596 void *cpu_addr, dma_addr_t dma_addr, 597 size_t size) 598 { 599 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, 600 DMA_ATTR_WRITE_COMBINE); 601 } 602 603 #ifdef CONFIG_NEED_DMA_MAP_STATE 604 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME 605 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME 606 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) 607 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) 608 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) 609 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) 610 #else 611 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) 612 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) 613 #define dma_unmap_addr(PTR, ADDR_NAME) (0) 614 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) 615 #define dma_unmap_len(PTR, LEN_NAME) (0) 616 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) 617 #endif 618 619 #endif /* _LINUX_DMA_MAPPING_H */ 620