1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_DMA_MAPPING_H 3 #define _LINUX_DMA_MAPPING_H 4 5 #include <linux/sizes.h> 6 #include <linux/string.h> 7 #include <linux/device.h> 8 #include <linux/err.h> 9 #include <linux/dma-debug.h> 10 #include <linux/dma-direction.h> 11 #include <linux/scatterlist.h> 12 #include <linux/bug.h> 13 #include <linux/mem_encrypt.h> 14 15 /** 16 * List of possible attributes associated with a DMA mapping. The semantics 17 * of each attribute should be defined in Documentation/DMA-attributes.txt. 18 * 19 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute 20 * forces all pending DMA writes to complete. 21 */ 22 #define DMA_ATTR_WRITE_BARRIER (1UL << 0) 23 /* 24 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping 25 * may be weakly ordered, that is that reads and writes may pass each other. 26 */ 27 #define DMA_ATTR_WEAK_ORDERING (1UL << 1) 28 /* 29 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be 30 * buffered to improve performance. 31 */ 32 #define DMA_ATTR_WRITE_COMBINE (1UL << 2) 33 /* 34 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either 35 * consistent or non-consistent memory as it sees fit. 36 */ 37 #define DMA_ATTR_NON_CONSISTENT (1UL << 3) 38 /* 39 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel 40 * virtual mapping for the allocated buffer. 41 */ 42 #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) 43 /* 44 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of 45 * the CPU cache for the given buffer assuming that it has been already 46 * transferred to 'device' domain. 47 */ 48 #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) 49 /* 50 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer 51 * in physical memory. 52 */ 53 #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) 54 /* 55 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem 56 * that it's probably not worth the time to try to allocate memory to in a way 57 * that gives better TLB efficiency. 58 */ 59 #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) 60 /* 61 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress 62 * allocation failure reports (similarly to __GFP_NOWARN). 63 */ 64 #define DMA_ATTR_NO_WARN (1UL << 8) 65 66 /* 67 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully 68 * accessible at an elevated privilege level (and ideally inaccessible or 69 * at least read-only at lesser-privileged levels). 70 */ 71 #define DMA_ATTR_PRIVILEGED (1UL << 9) 72 73 /* 74 * A dma_addr_t can hold any valid DMA or bus address for the platform. 75 * It can be given to a device to use as a DMA source or target. A CPU cannot 76 * reference a dma_addr_t directly because there may be translation between 77 * its physical address space and the bus address space. 78 */ 79 struct dma_map_ops { 80 void* (*alloc)(struct device *dev, size_t size, 81 dma_addr_t *dma_handle, gfp_t gfp, 82 unsigned long attrs); 83 void (*free)(struct device *dev, size_t size, 84 void *vaddr, dma_addr_t dma_handle, 85 unsigned long attrs); 86 int (*mmap)(struct device *, struct vm_area_struct *, 87 void *, dma_addr_t, size_t, 88 unsigned long attrs); 89 90 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, 91 dma_addr_t, size_t, unsigned long attrs); 92 93 dma_addr_t (*map_page)(struct device *dev, struct page *page, 94 unsigned long offset, size_t size, 95 enum dma_data_direction dir, 96 unsigned long attrs); 97 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, 98 size_t size, enum dma_data_direction dir, 99 unsigned long attrs); 100 /* 101 * map_sg returns 0 on error and a value > 0 on success. 102 * It should never return a value < 0. 103 */ 104 int (*map_sg)(struct device *dev, struct scatterlist *sg, 105 int nents, enum dma_data_direction dir, 106 unsigned long attrs); 107 void (*unmap_sg)(struct device *dev, 108 struct scatterlist *sg, int nents, 109 enum dma_data_direction dir, 110 unsigned long attrs); 111 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, 112 size_t size, enum dma_data_direction dir, 113 unsigned long attrs); 114 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, 115 size_t size, enum dma_data_direction dir, 116 unsigned long attrs); 117 void (*sync_single_for_cpu)(struct device *dev, 118 dma_addr_t dma_handle, size_t size, 119 enum dma_data_direction dir); 120 void (*sync_single_for_device)(struct device *dev, 121 dma_addr_t dma_handle, size_t size, 122 enum dma_data_direction dir); 123 void (*sync_sg_for_cpu)(struct device *dev, 124 struct scatterlist *sg, int nents, 125 enum dma_data_direction dir); 126 void (*sync_sg_for_device)(struct device *dev, 127 struct scatterlist *sg, int nents, 128 enum dma_data_direction dir); 129 void (*cache_sync)(struct device *dev, void *vaddr, size_t size, 130 enum dma_data_direction direction); 131 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); 132 int (*dma_supported)(struct device *dev, u64 mask); 133 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK 134 u64 (*get_required_mask)(struct device *dev); 135 #endif 136 }; 137 138 extern const struct dma_map_ops dma_direct_ops; 139 extern const struct dma_map_ops dma_noncoherent_ops; 140 extern const struct dma_map_ops dma_virt_ops; 141 142 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) 143 144 #define DMA_MASK_NONE 0x0ULL 145 146 static inline int valid_dma_direction(int dma_direction) 147 { 148 return ((dma_direction == DMA_BIDIRECTIONAL) || 149 (dma_direction == DMA_TO_DEVICE) || 150 (dma_direction == DMA_FROM_DEVICE)); 151 } 152 153 static inline int is_device_dma_capable(struct device *dev) 154 { 155 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; 156 } 157 158 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT 159 /* 160 * These three functions are only for dma allocator. 161 * Don't use them in device drivers. 162 */ 163 int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, 164 dma_addr_t *dma_handle, void **ret); 165 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); 166 167 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, 168 void *cpu_addr, size_t size, int *ret); 169 170 void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle); 171 int dma_release_from_global_coherent(int order, void *vaddr); 172 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, 173 size_t size, int *ret); 174 175 #else 176 #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) 177 #define dma_release_from_dev_coherent(dev, order, vaddr) (0) 178 #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) 179 180 static inline void *dma_alloc_from_global_coherent(ssize_t size, 181 dma_addr_t *dma_handle) 182 { 183 return NULL; 184 } 185 186 static inline int dma_release_from_global_coherent(int order, void *vaddr) 187 { 188 return 0; 189 } 190 191 static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, 192 void *cpu_addr, size_t size, 193 int *ret) 194 { 195 return 0; 196 } 197 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ 198 199 #ifdef CONFIG_HAS_DMA 200 #include <asm/dma-mapping.h> 201 static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 202 { 203 if (dev && dev->dma_ops) 204 return dev->dma_ops; 205 return get_arch_dma_ops(dev ? dev->bus : NULL); 206 } 207 208 static inline void set_dma_ops(struct device *dev, 209 const struct dma_map_ops *dma_ops) 210 { 211 dev->dma_ops = dma_ops; 212 } 213 #else 214 /* 215 * Define the dma api to allow compilation of dma dependent code. 216 * Code that depends on the dma-mapping API needs to set 'depends on HAS_DMA' 217 * in its Kconfig, unless it already depends on <something> || COMPILE_TEST, 218 * where <something> guarantuees the availability of the dma-mapping API. 219 */ 220 static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 221 { 222 return NULL; 223 } 224 #endif 225 226 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, 227 size_t size, 228 enum dma_data_direction dir, 229 unsigned long attrs) 230 { 231 const struct dma_map_ops *ops = get_dma_ops(dev); 232 dma_addr_t addr; 233 234 BUG_ON(!valid_dma_direction(dir)); 235 addr = ops->map_page(dev, virt_to_page(ptr), 236 offset_in_page(ptr), size, 237 dir, attrs); 238 debug_dma_map_page(dev, virt_to_page(ptr), 239 offset_in_page(ptr), size, 240 dir, addr, true); 241 return addr; 242 } 243 244 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, 245 size_t size, 246 enum dma_data_direction dir, 247 unsigned long attrs) 248 { 249 const struct dma_map_ops *ops = get_dma_ops(dev); 250 251 BUG_ON(!valid_dma_direction(dir)); 252 if (ops->unmap_page) 253 ops->unmap_page(dev, addr, size, dir, attrs); 254 debug_dma_unmap_page(dev, addr, size, dir, true); 255 } 256 257 /* 258 * dma_maps_sg_attrs returns 0 on error and > 0 on success. 259 * It should never return a value < 0. 260 */ 261 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, 262 int nents, enum dma_data_direction dir, 263 unsigned long attrs) 264 { 265 const struct dma_map_ops *ops = get_dma_ops(dev); 266 int ents; 267 268 BUG_ON(!valid_dma_direction(dir)); 269 ents = ops->map_sg(dev, sg, nents, dir, attrs); 270 BUG_ON(ents < 0); 271 debug_dma_map_sg(dev, sg, nents, ents, dir); 272 273 return ents; 274 } 275 276 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, 277 int nents, enum dma_data_direction dir, 278 unsigned long attrs) 279 { 280 const struct dma_map_ops *ops = get_dma_ops(dev); 281 282 BUG_ON(!valid_dma_direction(dir)); 283 debug_dma_unmap_sg(dev, sg, nents, dir); 284 if (ops->unmap_sg) 285 ops->unmap_sg(dev, sg, nents, dir, attrs); 286 } 287 288 static inline dma_addr_t dma_map_page_attrs(struct device *dev, 289 struct page *page, 290 size_t offset, size_t size, 291 enum dma_data_direction dir, 292 unsigned long attrs) 293 { 294 const struct dma_map_ops *ops = get_dma_ops(dev); 295 dma_addr_t addr; 296 297 BUG_ON(!valid_dma_direction(dir)); 298 addr = ops->map_page(dev, page, offset, size, dir, attrs); 299 debug_dma_map_page(dev, page, offset, size, dir, addr, false); 300 301 return addr; 302 } 303 304 static inline void dma_unmap_page_attrs(struct device *dev, 305 dma_addr_t addr, size_t size, 306 enum dma_data_direction dir, 307 unsigned long attrs) 308 { 309 const struct dma_map_ops *ops = get_dma_ops(dev); 310 311 BUG_ON(!valid_dma_direction(dir)); 312 if (ops->unmap_page) 313 ops->unmap_page(dev, addr, size, dir, attrs); 314 debug_dma_unmap_page(dev, addr, size, dir, false); 315 } 316 317 static inline dma_addr_t dma_map_resource(struct device *dev, 318 phys_addr_t phys_addr, 319 size_t size, 320 enum dma_data_direction dir, 321 unsigned long attrs) 322 { 323 const struct dma_map_ops *ops = get_dma_ops(dev); 324 dma_addr_t addr; 325 326 BUG_ON(!valid_dma_direction(dir)); 327 328 /* Don't allow RAM to be mapped */ 329 BUG_ON(pfn_valid(PHYS_PFN(phys_addr))); 330 331 addr = phys_addr; 332 if (ops->map_resource) 333 addr = ops->map_resource(dev, phys_addr, size, dir, attrs); 334 335 debug_dma_map_resource(dev, phys_addr, size, dir, addr); 336 337 return addr; 338 } 339 340 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, 341 size_t size, enum dma_data_direction dir, 342 unsigned long attrs) 343 { 344 const struct dma_map_ops *ops = get_dma_ops(dev); 345 346 BUG_ON(!valid_dma_direction(dir)); 347 if (ops->unmap_resource) 348 ops->unmap_resource(dev, addr, size, dir, attrs); 349 debug_dma_unmap_resource(dev, addr, size, dir); 350 } 351 352 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, 353 size_t size, 354 enum dma_data_direction dir) 355 { 356 const struct dma_map_ops *ops = get_dma_ops(dev); 357 358 BUG_ON(!valid_dma_direction(dir)); 359 if (ops->sync_single_for_cpu) 360 ops->sync_single_for_cpu(dev, addr, size, dir); 361 debug_dma_sync_single_for_cpu(dev, addr, size, dir); 362 } 363 364 static inline void dma_sync_single_for_device(struct device *dev, 365 dma_addr_t addr, size_t size, 366 enum dma_data_direction dir) 367 { 368 const struct dma_map_ops *ops = get_dma_ops(dev); 369 370 BUG_ON(!valid_dma_direction(dir)); 371 if (ops->sync_single_for_device) 372 ops->sync_single_for_device(dev, addr, size, dir); 373 debug_dma_sync_single_for_device(dev, addr, size, dir); 374 } 375 376 static inline void dma_sync_single_range_for_cpu(struct device *dev, 377 dma_addr_t addr, 378 unsigned long offset, 379 size_t size, 380 enum dma_data_direction dir) 381 { 382 const struct dma_map_ops *ops = get_dma_ops(dev); 383 384 BUG_ON(!valid_dma_direction(dir)); 385 if (ops->sync_single_for_cpu) 386 ops->sync_single_for_cpu(dev, addr + offset, size, dir); 387 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); 388 } 389 390 static inline void dma_sync_single_range_for_device(struct device *dev, 391 dma_addr_t addr, 392 unsigned long offset, 393 size_t size, 394 enum dma_data_direction dir) 395 { 396 const struct dma_map_ops *ops = get_dma_ops(dev); 397 398 BUG_ON(!valid_dma_direction(dir)); 399 if (ops->sync_single_for_device) 400 ops->sync_single_for_device(dev, addr + offset, size, dir); 401 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); 402 } 403 404 static inline void 405 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 406 int nelems, enum dma_data_direction dir) 407 { 408 const struct dma_map_ops *ops = get_dma_ops(dev); 409 410 BUG_ON(!valid_dma_direction(dir)); 411 if (ops->sync_sg_for_cpu) 412 ops->sync_sg_for_cpu(dev, sg, nelems, dir); 413 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); 414 } 415 416 static inline void 417 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 418 int nelems, enum dma_data_direction dir) 419 { 420 const struct dma_map_ops *ops = get_dma_ops(dev); 421 422 BUG_ON(!valid_dma_direction(dir)); 423 if (ops->sync_sg_for_device) 424 ops->sync_sg_for_device(dev, sg, nelems, dir); 425 debug_dma_sync_sg_for_device(dev, sg, nelems, dir); 426 427 } 428 429 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) 430 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) 431 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) 432 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) 433 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) 434 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) 435 436 static inline void 437 dma_cache_sync(struct device *dev, void *vaddr, size_t size, 438 enum dma_data_direction dir) 439 { 440 const struct dma_map_ops *ops = get_dma_ops(dev); 441 442 BUG_ON(!valid_dma_direction(dir)); 443 if (ops->cache_sync) 444 ops->cache_sync(dev, vaddr, size, dir); 445 } 446 447 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 448 void *cpu_addr, dma_addr_t dma_addr, size_t size); 449 450 void *dma_common_contiguous_remap(struct page *page, size_t size, 451 unsigned long vm_flags, 452 pgprot_t prot, const void *caller); 453 454 void *dma_common_pages_remap(struct page **pages, size_t size, 455 unsigned long vm_flags, pgprot_t prot, 456 const void *caller); 457 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); 458 459 /** 460 * dma_mmap_attrs - map a coherent DMA allocation into user space 461 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 462 * @vma: vm_area_struct describing requested user mapping 463 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs 464 * @handle: device-view address returned from dma_alloc_attrs 465 * @size: size of memory originally requested in dma_alloc_attrs 466 * @attrs: attributes of mapping properties requested in dma_alloc_attrs 467 * 468 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs 469 * into user space. The coherent DMA buffer must not be freed by the 470 * driver until the user space mapping has been released. 471 */ 472 static inline int 473 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, 474 dma_addr_t dma_addr, size_t size, unsigned long attrs) 475 { 476 const struct dma_map_ops *ops = get_dma_ops(dev); 477 BUG_ON(!ops); 478 if (ops->mmap) 479 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 480 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); 481 } 482 483 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) 484 485 int 486 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 487 void *cpu_addr, dma_addr_t dma_addr, size_t size); 488 489 static inline int 490 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, 491 dma_addr_t dma_addr, size_t size, 492 unsigned long attrs) 493 { 494 const struct dma_map_ops *ops = get_dma_ops(dev); 495 BUG_ON(!ops); 496 if (ops->get_sgtable) 497 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, 498 attrs); 499 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); 500 } 501 502 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) 503 504 #ifndef arch_dma_alloc_attrs 505 #define arch_dma_alloc_attrs(dev) (true) 506 #endif 507 508 static inline void *dma_alloc_attrs(struct device *dev, size_t size, 509 dma_addr_t *dma_handle, gfp_t flag, 510 unsigned long attrs) 511 { 512 const struct dma_map_ops *ops = get_dma_ops(dev); 513 void *cpu_addr; 514 515 BUG_ON(!ops); 516 WARN_ON_ONCE(dev && !dev->coherent_dma_mask); 517 518 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) 519 return cpu_addr; 520 521 /* let the implementation decide on the zone to allocate from: */ 522 flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); 523 524 if (!arch_dma_alloc_attrs(&dev)) 525 return NULL; 526 if (!ops->alloc) 527 return NULL; 528 529 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); 530 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); 531 return cpu_addr; 532 } 533 534 static inline void dma_free_attrs(struct device *dev, size_t size, 535 void *cpu_addr, dma_addr_t dma_handle, 536 unsigned long attrs) 537 { 538 const struct dma_map_ops *ops = get_dma_ops(dev); 539 540 BUG_ON(!ops); 541 WARN_ON(irqs_disabled()); 542 543 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) 544 return; 545 546 if (!ops->free || !cpu_addr) 547 return; 548 549 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); 550 ops->free(dev, size, cpu_addr, dma_handle, attrs); 551 } 552 553 static inline void *dma_alloc_coherent(struct device *dev, size_t size, 554 dma_addr_t *dma_handle, gfp_t flag) 555 { 556 return dma_alloc_attrs(dev, size, dma_handle, flag, 0); 557 } 558 559 static inline void dma_free_coherent(struct device *dev, size_t size, 560 void *cpu_addr, dma_addr_t dma_handle) 561 { 562 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); 563 } 564 565 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 566 { 567 const struct dma_map_ops *ops = get_dma_ops(dev); 568 569 debug_dma_mapping_error(dev, dma_addr); 570 if (ops->mapping_error) 571 return ops->mapping_error(dev, dma_addr); 572 return 0; 573 } 574 575 static inline void dma_check_mask(struct device *dev, u64 mask) 576 { 577 if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) 578 dev_warn(dev, "SME is active, device will require DMA bounce buffers\n"); 579 } 580 581 static inline int dma_supported(struct device *dev, u64 mask) 582 { 583 const struct dma_map_ops *ops = get_dma_ops(dev); 584 585 if (!ops) 586 return 0; 587 if (!ops->dma_supported) 588 return 1; 589 return ops->dma_supported(dev, mask); 590 } 591 592 #ifndef HAVE_ARCH_DMA_SET_MASK 593 static inline int dma_set_mask(struct device *dev, u64 mask) 594 { 595 if (!dev->dma_mask || !dma_supported(dev, mask)) 596 return -EIO; 597 598 dma_check_mask(dev, mask); 599 600 *dev->dma_mask = mask; 601 return 0; 602 } 603 #endif 604 605 static inline u64 dma_get_mask(struct device *dev) 606 { 607 if (dev && dev->dma_mask && *dev->dma_mask) 608 return *dev->dma_mask; 609 return DMA_BIT_MASK(32); 610 } 611 612 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK 613 int dma_set_coherent_mask(struct device *dev, u64 mask); 614 #else 615 static inline int dma_set_coherent_mask(struct device *dev, u64 mask) 616 { 617 if (!dma_supported(dev, mask)) 618 return -EIO; 619 620 dma_check_mask(dev, mask); 621 622 dev->coherent_dma_mask = mask; 623 return 0; 624 } 625 #endif 626 627 /* 628 * Set both the DMA mask and the coherent DMA mask to the same thing. 629 * Note that we don't check the return value from dma_set_coherent_mask() 630 * as the DMA API guarantees that the coherent DMA mask can be set to 631 * the same or smaller than the streaming DMA mask. 632 */ 633 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) 634 { 635 int rc = dma_set_mask(dev, mask); 636 if (rc == 0) 637 dma_set_coherent_mask(dev, mask); 638 return rc; 639 } 640 641 /* 642 * Similar to the above, except it deals with the case where the device 643 * does not have dev->dma_mask appropriately setup. 644 */ 645 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) 646 { 647 dev->dma_mask = &dev->coherent_dma_mask; 648 return dma_set_mask_and_coherent(dev, mask); 649 } 650 651 extern u64 dma_get_required_mask(struct device *dev); 652 653 #ifndef arch_setup_dma_ops 654 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, 655 u64 size, const struct iommu_ops *iommu, 656 bool coherent) { } 657 #endif 658 659 #ifndef arch_teardown_dma_ops 660 static inline void arch_teardown_dma_ops(struct device *dev) { } 661 #endif 662 663 static inline unsigned int dma_get_max_seg_size(struct device *dev) 664 { 665 if (dev->dma_parms && dev->dma_parms->max_segment_size) 666 return dev->dma_parms->max_segment_size; 667 return SZ_64K; 668 } 669 670 static inline unsigned int dma_set_max_seg_size(struct device *dev, 671 unsigned int size) 672 { 673 if (dev->dma_parms) { 674 dev->dma_parms->max_segment_size = size; 675 return 0; 676 } 677 return -EIO; 678 } 679 680 static inline unsigned long dma_get_seg_boundary(struct device *dev) 681 { 682 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) 683 return dev->dma_parms->segment_boundary_mask; 684 return DMA_BIT_MASK(32); 685 } 686 687 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) 688 { 689 if (dev->dma_parms) { 690 dev->dma_parms->segment_boundary_mask = mask; 691 return 0; 692 } 693 return -EIO; 694 } 695 696 #ifndef dma_max_pfn 697 static inline unsigned long dma_max_pfn(struct device *dev) 698 { 699 return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset; 700 } 701 #endif 702 703 static inline void *dma_zalloc_coherent(struct device *dev, size_t size, 704 dma_addr_t *dma_handle, gfp_t flag) 705 { 706 void *ret = dma_alloc_coherent(dev, size, dma_handle, 707 flag | __GFP_ZERO); 708 return ret; 709 } 710 711 static inline int dma_get_cache_alignment(void) 712 { 713 #ifdef ARCH_DMA_MINALIGN 714 return ARCH_DMA_MINALIGN; 715 #endif 716 return 1; 717 } 718 719 /* flags for the coherent memory api */ 720 #define DMA_MEMORY_EXCLUSIVE 0x01 721 722 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT 723 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, 724 dma_addr_t device_addr, size_t size, int flags); 725 void dma_release_declared_memory(struct device *dev); 726 void *dma_mark_declared_memory_occupied(struct device *dev, 727 dma_addr_t device_addr, size_t size); 728 #else 729 static inline int 730 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, 731 dma_addr_t device_addr, size_t size, int flags) 732 { 733 return -ENOSYS; 734 } 735 736 static inline void 737 dma_release_declared_memory(struct device *dev) 738 { 739 } 740 741 static inline void * 742 dma_mark_declared_memory_occupied(struct device *dev, 743 dma_addr_t device_addr, size_t size) 744 { 745 return ERR_PTR(-EBUSY); 746 } 747 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ 748 749 #ifdef CONFIG_HAS_DMA 750 int dma_configure(struct device *dev); 751 void dma_deconfigure(struct device *dev); 752 #else 753 static inline int dma_configure(struct device *dev) 754 { 755 return 0; 756 } 757 758 static inline void dma_deconfigure(struct device *dev) {} 759 #endif 760 761 /* 762 * Managed DMA API 763 */ 764 #ifdef CONFIG_HAS_DMA 765 extern void *dmam_alloc_coherent(struct device *dev, size_t size, 766 dma_addr_t *dma_handle, gfp_t gfp); 767 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, 768 dma_addr_t dma_handle); 769 #else /* !CONFIG_HAS_DMA */ 770 static inline void *dmam_alloc_coherent(struct device *dev, size_t size, 771 dma_addr_t *dma_handle, gfp_t gfp) 772 { return NULL; } 773 static inline void dmam_free_coherent(struct device *dev, size_t size, 774 void *vaddr, dma_addr_t dma_handle) { } 775 #endif /* !CONFIG_HAS_DMA */ 776 777 extern void *dmam_alloc_attrs(struct device *dev, size_t size, 778 dma_addr_t *dma_handle, gfp_t gfp, 779 unsigned long attrs); 780 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT 781 extern int dmam_declare_coherent_memory(struct device *dev, 782 phys_addr_t phys_addr, 783 dma_addr_t device_addr, size_t size, 784 int flags); 785 extern void dmam_release_declared_memory(struct device *dev); 786 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ 787 static inline int dmam_declare_coherent_memory(struct device *dev, 788 phys_addr_t phys_addr, dma_addr_t device_addr, 789 size_t size, gfp_t gfp) 790 { 791 return 0; 792 } 793 794 static inline void dmam_release_declared_memory(struct device *dev) 795 { 796 } 797 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ 798 799 static inline void *dma_alloc_wc(struct device *dev, size_t size, 800 dma_addr_t *dma_addr, gfp_t gfp) 801 { 802 return dma_alloc_attrs(dev, size, dma_addr, gfp, 803 DMA_ATTR_WRITE_COMBINE); 804 } 805 #ifndef dma_alloc_writecombine 806 #define dma_alloc_writecombine dma_alloc_wc 807 #endif 808 809 static inline void dma_free_wc(struct device *dev, size_t size, 810 void *cpu_addr, dma_addr_t dma_addr) 811 { 812 return dma_free_attrs(dev, size, cpu_addr, dma_addr, 813 DMA_ATTR_WRITE_COMBINE); 814 } 815 #ifndef dma_free_writecombine 816 #define dma_free_writecombine dma_free_wc 817 #endif 818 819 static inline int dma_mmap_wc(struct device *dev, 820 struct vm_area_struct *vma, 821 void *cpu_addr, dma_addr_t dma_addr, 822 size_t size) 823 { 824 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, 825 DMA_ATTR_WRITE_COMBINE); 826 } 827 #ifndef dma_mmap_writecombine 828 #define dma_mmap_writecombine dma_mmap_wc 829 #endif 830 831 #ifdef CONFIG_NEED_DMA_MAP_STATE 832 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME 833 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME 834 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) 835 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) 836 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) 837 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) 838 #else 839 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) 840 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) 841 #define dma_unmap_addr(PTR, ADDR_NAME) (0) 842 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) 843 #define dma_unmap_len(PTR, LEN_NAME) (0) 844 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) 845 #endif 846 847 #endif 848