1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <[email protected]> 5 */ 6 7 #ifndef __LINUX_IOMMU_H 8 #define __LINUX_IOMMU_H 9 10 #include <linux/scatterlist.h> 11 #include <linux/device.h> 12 #include <linux/types.h> 13 #include <linux/errno.h> 14 #include <linux/err.h> 15 #include <linux/of.h> 16 #include <linux/iova_bitmap.h> 17 #include <uapi/linux/iommu.h> 18 19 #define IOMMU_READ (1 << 0) 20 #define IOMMU_WRITE (1 << 1) 21 #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ 22 #define IOMMU_NOEXEC (1 << 3) 23 #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ 24 /* 25 * Where the bus hardware includes a privilege level as part of its access type 26 * markings, and certain devices are capable of issuing transactions marked as 27 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other 28 * given permission flags only apply to accesses at the higher privilege level, 29 * and that unprivileged transactions should have as little access as possible. 30 * This would usually imply the same permissions as kernel mappings on the CPU, 31 * if the IOMMU page table format is equivalent. 32 */ 33 #define IOMMU_PRIV (1 << 5) 34 35 struct iommu_ops; 36 struct iommu_group; 37 struct bus_type; 38 struct device; 39 struct iommu_domain; 40 struct iommu_domain_ops; 41 struct iommu_dirty_ops; 42 struct notifier_block; 43 struct iommu_sva; 44 struct iommu_fault_event; 45 struct iommu_dma_cookie; 46 47 /* iommu fault flags */ 48 #define IOMMU_FAULT_READ 0x0 49 #define IOMMU_FAULT_WRITE 0x1 50 51 typedef int (*iommu_fault_handler_t)(struct iommu_domain *, 52 struct device *, unsigned long, int, void *); 53 typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); 54 55 struct iommu_domain_geometry { 56 dma_addr_t aperture_start; /* First address that can be mapped */ 57 dma_addr_t aperture_end; /* Last address that can be mapped */ 58 bool force_aperture; /* DMA only allowed in mappable range? */ 59 }; 60 61 /* Domain feature flags */ 62 #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ 63 #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API 64 implementation */ 65 #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ 66 #define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */ 67 68 #define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */ 69 #define __IOMMU_DOMAIN_PLATFORM (1U << 5) 70 71 #define __IOMMU_DOMAIN_NESTED (1U << 6) /* User-managed address space nested 72 on a stage-2 translation */ 73 74 #define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ 75 /* 76 * This are the possible domain-types 77 * 78 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate 79 * devices 80 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses 81 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used 82 * for VMs 83 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. 84 * This flag allows IOMMU drivers to implement 85 * certain optimizations for these domains 86 * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB 87 * invalidation. 88 * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses 89 * represented by mm_struct's. 90 * IOMMU_DOMAIN_PLATFORM - Legacy domain for drivers that do their own 91 * dma_api stuff. Do not use in new drivers. 92 */ 93 #define IOMMU_DOMAIN_BLOCKED (0U) 94 #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) 95 #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) 96 #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ 97 __IOMMU_DOMAIN_DMA_API) 98 #define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \ 99 __IOMMU_DOMAIN_DMA_API | \ 100 __IOMMU_DOMAIN_DMA_FQ) 101 #define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA) 102 #define IOMMU_DOMAIN_PLATFORM (__IOMMU_DOMAIN_PLATFORM) 103 #define IOMMU_DOMAIN_NESTED (__IOMMU_DOMAIN_NESTED) 104 105 struct iommu_domain { 106 unsigned type; 107 const struct iommu_domain_ops *ops; 108 const struct iommu_dirty_ops *dirty_ops; 109 110 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ 111 struct iommu_domain_geometry geometry; 112 struct iommu_dma_cookie *iova_cookie; 113 enum iommu_page_response_code (*iopf_handler)(struct iommu_fault *fault, 114 void *data); 115 void *fault_data; 116 union { 117 struct { 118 iommu_fault_handler_t handler; 119 void *handler_token; 120 }; 121 struct { /* IOMMU_DOMAIN_SVA */ 122 struct mm_struct *mm; 123 int users; 124 }; 125 }; 126 }; 127 128 static inline bool iommu_is_dma_domain(struct iommu_domain *domain) 129 { 130 return domain->type & __IOMMU_DOMAIN_DMA_API; 131 } 132 133 enum iommu_cap { 134 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */ 135 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ 136 IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for 137 DMA protection and we should too */ 138 /* 139 * Per-device flag indicating if enforce_cache_coherency() will work on 140 * this device. 141 */ 142 IOMMU_CAP_ENFORCE_CACHE_COHERENCY, 143 /* 144 * IOMMU driver does not issue TLB maintenance during .unmap, so can 145 * usefully support the non-strict DMA flush queue. 146 */ 147 IOMMU_CAP_DEFERRED_FLUSH, 148 IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */ 149 }; 150 151 /* These are the possible reserved region types */ 152 enum iommu_resv_type { 153 /* Memory regions which must be mapped 1:1 at all times */ 154 IOMMU_RESV_DIRECT, 155 /* 156 * Memory regions which are advertised to be 1:1 but are 157 * commonly considered relaxable in some conditions, 158 * for instance in device assignment use case (USB, Graphics) 159 */ 160 IOMMU_RESV_DIRECT_RELAXABLE, 161 /* Arbitrary "never map this or give it to a device" address ranges */ 162 IOMMU_RESV_RESERVED, 163 /* Hardware MSI region (untranslated) */ 164 IOMMU_RESV_MSI, 165 /* Software-managed MSI translation window */ 166 IOMMU_RESV_SW_MSI, 167 }; 168 169 /** 170 * struct iommu_resv_region - descriptor for a reserved memory region 171 * @list: Linked list pointers 172 * @start: System physical start address of the region 173 * @length: Length of the region in bytes 174 * @prot: IOMMU Protection flags (READ/WRITE/...) 175 * @type: Type of the reserved region 176 * @free: Callback to free associated memory allocations 177 */ 178 struct iommu_resv_region { 179 struct list_head list; 180 phys_addr_t start; 181 size_t length; 182 int prot; 183 enum iommu_resv_type type; 184 void (*free)(struct device *dev, struct iommu_resv_region *region); 185 }; 186 187 struct iommu_iort_rmr_data { 188 struct iommu_resv_region rr; 189 190 /* Stream IDs associated with IORT RMR entry */ 191 const u32 *sids; 192 u32 num_sids; 193 }; 194 195 /** 196 * enum iommu_dev_features - Per device IOMMU features 197 * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses 198 * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally 199 * enabling %IOMMU_DEV_FEAT_SVA requires 200 * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page 201 * Faults themselves instead of relying on the IOMMU. When 202 * supported, this feature must be enabled before and 203 * disabled after %IOMMU_DEV_FEAT_SVA. 204 * 205 * Device drivers enable a feature using iommu_dev_enable_feature(). 206 */ 207 enum iommu_dev_features { 208 IOMMU_DEV_FEAT_SVA, 209 IOMMU_DEV_FEAT_IOPF, 210 }; 211 212 #define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */ 213 #define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */ 214 #define IOMMU_PASID_INVALID (-1U) 215 typedef unsigned int ioasid_t; 216 217 #ifdef CONFIG_IOMMU_API 218 219 /** 220 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush 221 * 222 * @start: IOVA representing the start of the range to be flushed 223 * @end: IOVA representing the end of the range to be flushed (inclusive) 224 * @pgsize: The interval at which to perform the flush 225 * @freelist: Removed pages to free after sync 226 * @queued: Indicates that the flush will be queued 227 * 228 * This structure is intended to be updated by multiple calls to the 229 * ->unmap() function in struct iommu_ops before eventually being passed 230 * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after 231 * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to 232 * them. @queued is set to indicate when ->iotlb_flush_all() will be called 233 * later instead of ->iotlb_sync(), so drivers may optimise accordingly. 234 */ 235 struct iommu_iotlb_gather { 236 unsigned long start; 237 unsigned long end; 238 size_t pgsize; 239 struct list_head freelist; 240 bool queued; 241 }; 242 243 /** 244 * struct iommu_dirty_bitmap - Dirty IOVA bitmap state 245 * @bitmap: IOVA bitmap 246 * @gather: Range information for a pending IOTLB flush 247 */ 248 struct iommu_dirty_bitmap { 249 struct iova_bitmap *bitmap; 250 struct iommu_iotlb_gather *gather; 251 }; 252 253 /* Read but do not clear any dirty bits */ 254 #define IOMMU_DIRTY_NO_CLEAR (1 << 0) 255 256 /** 257 * struct iommu_dirty_ops - domain specific dirty tracking operations 258 * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain 259 * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled 260 * into a bitmap, with a bit represented as a page. 261 * Reads the dirty PTE bits and clears it from IO 262 * pagetables. 263 */ 264 struct iommu_dirty_ops { 265 int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled); 266 int (*read_and_clear_dirty)(struct iommu_domain *domain, 267 unsigned long iova, size_t size, 268 unsigned long flags, 269 struct iommu_dirty_bitmap *dirty); 270 }; 271 272 /** 273 * struct iommu_user_data - iommu driver specific user space data info 274 * @type: The data type of the user buffer 275 * @uptr: Pointer to the user buffer for copy_from_user() 276 * @len: The length of the user buffer in bytes 277 * 278 * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h 279 * @type, @uptr and @len should be just copied from an iommufd core uAPI struct. 280 */ 281 struct iommu_user_data { 282 unsigned int type; 283 void __user *uptr; 284 size_t len; 285 }; 286 287 /** 288 * __iommu_copy_struct_from_user - Copy iommu driver specific user space data 289 * @dst_data: Pointer to an iommu driver specific user data that is defined in 290 * include/uapi/linux/iommufd.h 291 * @src_data: Pointer to a struct iommu_user_data for user space data info 292 * @data_type: The data type of the @dst_data. Must match with @src_data.type 293 * @data_len: Length of current user data structure, i.e. sizeof(struct _dst) 294 * @min_len: Initial length of user data structure for backward compatibility. 295 * This should be offsetofend using the last member in the user data 296 * struct that was initially added to include/uapi/linux/iommufd.h 297 */ 298 static inline int __iommu_copy_struct_from_user( 299 void *dst_data, const struct iommu_user_data *src_data, 300 unsigned int data_type, size_t data_len, size_t min_len) 301 { 302 if (src_data->type != data_type) 303 return -EINVAL; 304 if (WARN_ON(!dst_data || !src_data)) 305 return -EINVAL; 306 if (src_data->len < min_len || data_len < src_data->len) 307 return -EINVAL; 308 return copy_struct_from_user(dst_data, data_len, src_data->uptr, 309 src_data->len); 310 } 311 312 /** 313 * iommu_copy_struct_from_user - Copy iommu driver specific user space data 314 * @kdst: Pointer to an iommu driver specific user data that is defined in 315 * include/uapi/linux/iommufd.h 316 * @user_data: Pointer to a struct iommu_user_data for user space data info 317 * @data_type: The data type of the @kdst. Must match with @user_data->type 318 * @min_last: The last memember of the data structure @kdst points in the 319 * initial version. 320 * Return 0 for success, otherwise -error. 321 */ 322 #define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \ 323 __iommu_copy_struct_from_user(kdst, user_data, data_type, \ 324 sizeof(*kdst), \ 325 offsetofend(typeof(*kdst), min_last)) 326 327 /** 328 * struct iommu_ops - iommu ops and capabilities 329 * @capable: check capability 330 * @hw_info: report iommu hardware information. The data buffer returned by this 331 * op is allocated in the iommu driver and freed by the caller after 332 * use. The information type is one of enum iommu_hw_info_type defined 333 * in include/uapi/linux/iommufd.h. 334 * @domain_alloc: allocate and return an iommu domain if success. Otherwise 335 * NULL is returned. The domain is not fully initialized until 336 * the caller iommu_domain_alloc() returns. 337 * @domain_alloc_user: Allocate an iommu domain corresponding to the input 338 * parameters as defined in include/uapi/linux/iommufd.h. 339 * Unlike @domain_alloc, it is called only by IOMMUFD and 340 * must fully initialize the new domain before return. 341 * Upon success, if the @user_data is valid and the @parent 342 * points to a kernel-managed domain, the new domain must be 343 * IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be 344 * NULL while the @user_data can be optionally provided, the 345 * new domain must support __IOMMU_DOMAIN_PAGING. 346 * Upon failure, ERR_PTR must be returned. 347 * @domain_alloc_paging: Allocate an iommu_domain that can be used for 348 * UNMANAGED, DMA, and DMA_FQ domain types. 349 * @probe_device: Add device to iommu driver handling 350 * @release_device: Remove device from iommu driver handling 351 * @probe_finalize: Do final setup work after the device is added to an IOMMU 352 * group and attached to the groups domain 353 * @device_group: find iommu group for a particular device 354 * @get_resv_regions: Request list of reserved regions for a device 355 * @of_xlate: add OF master IDs to iommu grouping 356 * @is_attach_deferred: Check if domain attach should be deferred from iommu 357 * driver init to device driver init (default no) 358 * @dev_enable/disable_feat: per device entries to enable/disable 359 * iommu specific features. 360 * @page_response: handle page request response 361 * @def_domain_type: device default domain type, return value: 362 * - IOMMU_DOMAIN_IDENTITY: must use an identity domain 363 * - IOMMU_DOMAIN_DMA: must use a dma domain 364 * - 0: use the default setting 365 * @default_domain_ops: the default ops for domains 366 * @remove_dev_pasid: Remove any translation configurations of a specific 367 * pasid, so that any DMA transactions with this pasid 368 * will be blocked by the hardware. 369 * @pgsize_bitmap: bitmap of all possible supported page sizes 370 * @owner: Driver module providing these ops 371 * @identity_domain: An always available, always attachable identity 372 * translation. 373 * @blocked_domain: An always available, always attachable blocking 374 * translation. 375 * @default_domain: If not NULL this will always be set as the default domain. 376 * This should be an IDENTITY/BLOCKED/PLATFORM domain. 377 * Do not use in new drivers. 378 */ 379 struct iommu_ops { 380 bool (*capable)(struct device *dev, enum iommu_cap); 381 void *(*hw_info)(struct device *dev, u32 *length, u32 *type); 382 383 /* Domain allocation and freeing by the iommu driver */ 384 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); 385 struct iommu_domain *(*domain_alloc_user)( 386 struct device *dev, u32 flags, struct iommu_domain *parent, 387 const struct iommu_user_data *user_data); 388 struct iommu_domain *(*domain_alloc_paging)(struct device *dev); 389 390 struct iommu_device *(*probe_device)(struct device *dev); 391 void (*release_device)(struct device *dev); 392 void (*probe_finalize)(struct device *dev); 393 struct iommu_group *(*device_group)(struct device *dev); 394 395 /* Request/Free a list of reserved regions for a device */ 396 void (*get_resv_regions)(struct device *dev, struct list_head *list); 397 398 int (*of_xlate)(struct device *dev, struct of_phandle_args *args); 399 bool (*is_attach_deferred)(struct device *dev); 400 401 /* Per device IOMMU features */ 402 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); 403 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); 404 405 int (*page_response)(struct device *dev, 406 struct iommu_fault_event *evt, 407 struct iommu_page_response *msg); 408 409 int (*def_domain_type)(struct device *dev); 410 void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid); 411 412 const struct iommu_domain_ops *default_domain_ops; 413 unsigned long pgsize_bitmap; 414 struct module *owner; 415 struct iommu_domain *identity_domain; 416 struct iommu_domain *blocked_domain; 417 struct iommu_domain *default_domain; 418 }; 419 420 /** 421 * struct iommu_domain_ops - domain specific operations 422 * @attach_dev: attach an iommu domain to a device 423 * Return: 424 * * 0 - success 425 * * EINVAL - can indicate that device and domain are incompatible due to 426 * some previous configuration of the domain, in which case the 427 * driver shouldn't log an error, since it is legitimate for a 428 * caller to test reuse of existing domains. Otherwise, it may 429 * still represent some other fundamental problem 430 * * ENOMEM - out of memory 431 * * ENOSPC - non-ENOMEM type of resource allocation failures 432 * * EBUSY - device is attached to a domain and cannot be changed 433 * * ENODEV - device specific errors, not able to be attached 434 * * <others> - treated as ENODEV by the caller. Use is discouraged 435 * @set_dev_pasid: set an iommu domain to a pasid of device 436 * @map_pages: map a physically contiguous set of pages of the same size to 437 * an iommu domain. 438 * @unmap_pages: unmap a number of pages of the same size from an iommu domain 439 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain 440 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware 441 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush 442 * queue 443 * @iova_to_phys: translate iova to physical address 444 * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE, 445 * including no-snoop TLPs on PCIe or other platform 446 * specific mechanisms. 447 * @enable_nesting: Enable nesting 448 * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) 449 * @free: Release the domain after use. 450 */ 451 struct iommu_domain_ops { 452 int (*attach_dev)(struct iommu_domain *domain, struct device *dev); 453 int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev, 454 ioasid_t pasid); 455 456 int (*map_pages)(struct iommu_domain *domain, unsigned long iova, 457 phys_addr_t paddr, size_t pgsize, size_t pgcount, 458 int prot, gfp_t gfp, size_t *mapped); 459 size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, 460 size_t pgsize, size_t pgcount, 461 struct iommu_iotlb_gather *iotlb_gather); 462 463 void (*flush_iotlb_all)(struct iommu_domain *domain); 464 int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, 465 size_t size); 466 void (*iotlb_sync)(struct iommu_domain *domain, 467 struct iommu_iotlb_gather *iotlb_gather); 468 469 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, 470 dma_addr_t iova); 471 472 bool (*enforce_cache_coherency)(struct iommu_domain *domain); 473 int (*enable_nesting)(struct iommu_domain *domain); 474 int (*set_pgtable_quirks)(struct iommu_domain *domain, 475 unsigned long quirks); 476 477 void (*free)(struct iommu_domain *domain); 478 }; 479 480 /** 481 * struct iommu_device - IOMMU core representation of one IOMMU hardware 482 * instance 483 * @list: Used by the iommu-core to keep a list of registered iommus 484 * @ops: iommu-ops for talking to this iommu 485 * @dev: struct device for sysfs handling 486 * @singleton_group: Used internally for drivers that have only one group 487 * @max_pasids: number of supported PASIDs 488 */ 489 struct iommu_device { 490 struct list_head list; 491 const struct iommu_ops *ops; 492 struct fwnode_handle *fwnode; 493 struct device *dev; 494 struct iommu_group *singleton_group; 495 u32 max_pasids; 496 }; 497 498 /** 499 * struct iommu_fault_event - Generic fault event 500 * 501 * Can represent recoverable faults such as a page requests or 502 * unrecoverable faults such as DMA or IRQ remapping faults. 503 * 504 * @fault: fault descriptor 505 * @list: pending fault event list, used for tracking responses 506 */ 507 struct iommu_fault_event { 508 struct iommu_fault fault; 509 struct list_head list; 510 }; 511 512 /** 513 * struct iommu_fault_param - per-device IOMMU fault data 514 * @handler: Callback function to handle IOMMU faults at device level 515 * @data: handler private data 516 * @faults: holds the pending faults which needs response 517 * @lock: protect pending faults list 518 */ 519 struct iommu_fault_param { 520 iommu_dev_fault_handler_t handler; 521 void *data; 522 struct list_head faults; 523 struct mutex lock; 524 }; 525 526 /** 527 * struct dev_iommu - Collection of per-device IOMMU data 528 * 529 * @fault_param: IOMMU detected device fault reporting data 530 * @iopf_param: I/O Page Fault queue and data 531 * @fwspec: IOMMU fwspec data 532 * @iommu_dev: IOMMU device this device is linked to 533 * @priv: IOMMU Driver private data 534 * @max_pasids: number of PASIDs this device can consume 535 * @attach_deferred: the dma domain attachment is deferred 536 * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs 537 * @require_direct: device requires IOMMU_RESV_DIRECT regions 538 * @shadow_on_flush: IOTLB flushes are used to sync shadow tables 539 * 540 * TODO: migrate other per device data pointers under iommu_dev_data, e.g. 541 * struct iommu_group *iommu_group; 542 */ 543 struct dev_iommu { 544 struct mutex lock; 545 struct iommu_fault_param *fault_param; 546 struct iopf_device_param *iopf_param; 547 struct iommu_fwspec *fwspec; 548 struct iommu_device *iommu_dev; 549 void *priv; 550 u32 max_pasids; 551 u32 attach_deferred:1; 552 u32 pci_32bit_workaround:1; 553 u32 require_direct:1; 554 u32 shadow_on_flush:1; 555 }; 556 557 int iommu_device_register(struct iommu_device *iommu, 558 const struct iommu_ops *ops, 559 struct device *hwdev); 560 void iommu_device_unregister(struct iommu_device *iommu); 561 int iommu_device_sysfs_add(struct iommu_device *iommu, 562 struct device *parent, 563 const struct attribute_group **groups, 564 const char *fmt, ...) __printf(4, 5); 565 void iommu_device_sysfs_remove(struct iommu_device *iommu); 566 int iommu_device_link(struct iommu_device *iommu, struct device *link); 567 void iommu_device_unlink(struct iommu_device *iommu, struct device *link); 568 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain); 569 570 static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 571 { 572 return (struct iommu_device *)dev_get_drvdata(dev); 573 } 574 575 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 576 { 577 *gather = (struct iommu_iotlb_gather) { 578 .start = ULONG_MAX, 579 .freelist = LIST_HEAD_INIT(gather->freelist), 580 }; 581 } 582 583 extern int bus_iommu_probe(const struct bus_type *bus); 584 extern bool iommu_present(const struct bus_type *bus); 585 extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap); 586 extern bool iommu_group_has_isolated_msi(struct iommu_group *group); 587 extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus); 588 extern void iommu_domain_free(struct iommu_domain *domain); 589 extern int iommu_attach_device(struct iommu_domain *domain, 590 struct device *dev); 591 extern void iommu_detach_device(struct iommu_domain *domain, 592 struct device *dev); 593 extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain, 594 struct device *dev, ioasid_t pasid); 595 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); 596 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); 597 extern int iommu_map(struct iommu_domain *domain, unsigned long iova, 598 phys_addr_t paddr, size_t size, int prot, gfp_t gfp); 599 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, 600 size_t size); 601 extern size_t iommu_unmap_fast(struct iommu_domain *domain, 602 unsigned long iova, size_t size, 603 struct iommu_iotlb_gather *iotlb_gather); 604 extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 605 struct scatterlist *sg, unsigned int nents, 606 int prot, gfp_t gfp); 607 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); 608 extern void iommu_set_fault_handler(struct iommu_domain *domain, 609 iommu_fault_handler_t handler, void *token); 610 611 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); 612 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); 613 extern void iommu_set_default_passthrough(bool cmd_line); 614 extern void iommu_set_default_translated(bool cmd_line); 615 extern bool iommu_default_passthrough(void); 616 extern struct iommu_resv_region * 617 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, 618 enum iommu_resv_type type, gfp_t gfp); 619 extern int iommu_get_group_resv_regions(struct iommu_group *group, 620 struct list_head *head); 621 622 extern int iommu_attach_group(struct iommu_domain *domain, 623 struct iommu_group *group); 624 extern void iommu_detach_group(struct iommu_domain *domain, 625 struct iommu_group *group); 626 extern struct iommu_group *iommu_group_alloc(void); 627 extern void *iommu_group_get_iommudata(struct iommu_group *group); 628 extern void iommu_group_set_iommudata(struct iommu_group *group, 629 void *iommu_data, 630 void (*release)(void *iommu_data)); 631 extern int iommu_group_set_name(struct iommu_group *group, const char *name); 632 extern int iommu_group_add_device(struct iommu_group *group, 633 struct device *dev); 634 extern void iommu_group_remove_device(struct device *dev); 635 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, 636 int (*fn)(struct device *, void *)); 637 extern struct iommu_group *iommu_group_get(struct device *dev); 638 extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); 639 extern void iommu_group_put(struct iommu_group *group); 640 extern int iommu_register_device_fault_handler(struct device *dev, 641 iommu_dev_fault_handler_t handler, 642 void *data); 643 644 extern int iommu_unregister_device_fault_handler(struct device *dev); 645 646 extern int iommu_report_device_fault(struct device *dev, 647 struct iommu_fault_event *evt); 648 extern int iommu_page_response(struct device *dev, 649 struct iommu_page_response *msg); 650 651 extern int iommu_group_id(struct iommu_group *group); 652 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); 653 654 int iommu_enable_nesting(struct iommu_domain *domain); 655 int iommu_set_pgtable_quirks(struct iommu_domain *domain, 656 unsigned long quirks); 657 658 void iommu_set_dma_strict(void); 659 660 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 661 unsigned long iova, int flags); 662 663 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) 664 { 665 if (domain->ops->flush_iotlb_all) 666 domain->ops->flush_iotlb_all(domain); 667 } 668 669 static inline void iommu_iotlb_sync(struct iommu_domain *domain, 670 struct iommu_iotlb_gather *iotlb_gather) 671 { 672 if (domain->ops->iotlb_sync) 673 domain->ops->iotlb_sync(domain, iotlb_gather); 674 675 iommu_iotlb_gather_init(iotlb_gather); 676 } 677 678 /** 679 * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint 680 * 681 * @gather: TLB gather data 682 * @iova: start of page to invalidate 683 * @size: size of page to invalidate 684 * 685 * Helper for IOMMU drivers to check whether a new range and the gathered range 686 * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better 687 * than merging the two, which might lead to unnecessary invalidations. 688 */ 689 static inline 690 bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather, 691 unsigned long iova, size_t size) 692 { 693 unsigned long start = iova, end = start + size - 1; 694 695 return gather->end != 0 && 696 (end + 1 < gather->start || start > gather->end + 1); 697 } 698 699 700 /** 701 * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation 702 * @gather: TLB gather data 703 * @iova: start of page to invalidate 704 * @size: size of page to invalidate 705 * 706 * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands 707 * where only the address range matters, and simply minimising intermediate 708 * syncs is preferred. 709 */ 710 static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather, 711 unsigned long iova, size_t size) 712 { 713 unsigned long end = iova + size - 1; 714 715 if (gather->start > iova) 716 gather->start = iova; 717 if (gather->end < end) 718 gather->end = end; 719 } 720 721 /** 722 * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation 723 * @domain: IOMMU domain to be invalidated 724 * @gather: TLB gather data 725 * @iova: start of page to invalidate 726 * @size: size of page to invalidate 727 * 728 * Helper for IOMMU drivers to build invalidation commands based on individual 729 * pages, or with page size/table level hints which cannot be gathered if they 730 * differ. 731 */ 732 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, 733 struct iommu_iotlb_gather *gather, 734 unsigned long iova, size_t size) 735 { 736 /* 737 * If the new page is disjoint from the current range or is mapped at 738 * a different granularity, then sync the TLB so that the gather 739 * structure can be rewritten. 740 */ 741 if ((gather->pgsize && gather->pgsize != size) || 742 iommu_iotlb_gather_is_disjoint(gather, iova, size)) 743 iommu_iotlb_sync(domain, gather); 744 745 gather->pgsize = size; 746 iommu_iotlb_gather_add_range(gather, iova, size); 747 } 748 749 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) 750 { 751 return gather && gather->queued; 752 } 753 754 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, 755 struct iova_bitmap *bitmap, 756 struct iommu_iotlb_gather *gather) 757 { 758 if (gather) 759 iommu_iotlb_gather_init(gather); 760 761 dirty->bitmap = bitmap; 762 dirty->gather = gather; 763 } 764 765 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, 766 unsigned long iova, 767 unsigned long length) 768 { 769 if (dirty->bitmap) 770 iova_bitmap_set(dirty->bitmap, iova, length); 771 772 if (dirty->gather) 773 iommu_iotlb_gather_add_range(dirty->gather, iova, length); 774 } 775 776 /* PCI device grouping function */ 777 extern struct iommu_group *pci_device_group(struct device *dev); 778 /* Generic device grouping function */ 779 extern struct iommu_group *generic_device_group(struct device *dev); 780 /* FSL-MC device grouping function */ 781 struct iommu_group *fsl_mc_device_group(struct device *dev); 782 extern struct iommu_group *generic_single_device_group(struct device *dev); 783 784 /** 785 * struct iommu_fwspec - per-device IOMMU instance data 786 * @ops: ops for this device's IOMMU 787 * @iommu_fwnode: firmware handle for this device's IOMMU 788 * @flags: IOMMU_FWSPEC_* flags 789 * @num_ids: number of associated device IDs 790 * @ids: IDs which this device may present to the IOMMU 791 * 792 * Note that the IDs (and any other information, really) stored in this structure should be 793 * considered private to the IOMMU device driver and are not to be used directly by IOMMU 794 * consumers. 795 */ 796 struct iommu_fwspec { 797 const struct iommu_ops *ops; 798 struct fwnode_handle *iommu_fwnode; 799 u32 flags; 800 unsigned int num_ids; 801 u32 ids[]; 802 }; 803 804 /* ATS is supported */ 805 #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) 806 807 /** 808 * struct iommu_sva - handle to a device-mm bond 809 */ 810 struct iommu_sva { 811 struct device *dev; 812 struct iommu_domain *domain; 813 }; 814 815 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 816 const struct iommu_ops *ops); 817 void iommu_fwspec_free(struct device *dev); 818 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); 819 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); 820 821 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 822 { 823 if (dev->iommu) 824 return dev->iommu->fwspec; 825 else 826 return NULL; 827 } 828 829 static inline void dev_iommu_fwspec_set(struct device *dev, 830 struct iommu_fwspec *fwspec) 831 { 832 dev->iommu->fwspec = fwspec; 833 } 834 835 static inline void *dev_iommu_priv_get(struct device *dev) 836 { 837 if (dev->iommu) 838 return dev->iommu->priv; 839 else 840 return NULL; 841 } 842 843 static inline void dev_iommu_priv_set(struct device *dev, void *priv) 844 { 845 dev->iommu->priv = priv; 846 } 847 848 int iommu_probe_device(struct device *dev); 849 850 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); 851 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); 852 853 int iommu_device_use_default_domain(struct device *dev); 854 void iommu_device_unuse_default_domain(struct device *dev); 855 856 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner); 857 void iommu_group_release_dma_owner(struct iommu_group *group); 858 bool iommu_group_dma_owner_claimed(struct iommu_group *group); 859 860 int iommu_device_claim_dma_owner(struct device *dev, void *owner); 861 void iommu_device_release_dma_owner(struct device *dev); 862 863 struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, 864 struct mm_struct *mm); 865 int iommu_attach_device_pasid(struct iommu_domain *domain, 866 struct device *dev, ioasid_t pasid); 867 void iommu_detach_device_pasid(struct iommu_domain *domain, 868 struct device *dev, ioasid_t pasid); 869 struct iommu_domain * 870 iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, 871 unsigned int type); 872 ioasid_t iommu_alloc_global_pasid(struct device *dev); 873 void iommu_free_global_pasid(ioasid_t pasid); 874 #else /* CONFIG_IOMMU_API */ 875 876 struct iommu_ops {}; 877 struct iommu_group {}; 878 struct iommu_fwspec {}; 879 struct iommu_device {}; 880 struct iommu_fault_param {}; 881 struct iommu_iotlb_gather {}; 882 struct iommu_dirty_bitmap {}; 883 struct iommu_dirty_ops {}; 884 885 static inline bool iommu_present(const struct bus_type *bus) 886 { 887 return false; 888 } 889 890 static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap) 891 { 892 return false; 893 } 894 895 static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) 896 { 897 return NULL; 898 } 899 900 static inline void iommu_domain_free(struct iommu_domain *domain) 901 { 902 } 903 904 static inline int iommu_attach_device(struct iommu_domain *domain, 905 struct device *dev) 906 { 907 return -ENODEV; 908 } 909 910 static inline void iommu_detach_device(struct iommu_domain *domain, 911 struct device *dev) 912 { 913 } 914 915 static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 916 { 917 return NULL; 918 } 919 920 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, 921 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 922 { 923 return -ENODEV; 924 } 925 926 static inline size_t iommu_unmap(struct iommu_domain *domain, 927 unsigned long iova, size_t size) 928 { 929 return 0; 930 } 931 932 static inline size_t iommu_unmap_fast(struct iommu_domain *domain, 933 unsigned long iova, int gfp_order, 934 struct iommu_iotlb_gather *iotlb_gather) 935 { 936 return 0; 937 } 938 939 static inline ssize_t iommu_map_sg(struct iommu_domain *domain, 940 unsigned long iova, struct scatterlist *sg, 941 unsigned int nents, int prot, gfp_t gfp) 942 { 943 return -ENODEV; 944 } 945 946 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) 947 { 948 } 949 950 static inline void iommu_iotlb_sync(struct iommu_domain *domain, 951 struct iommu_iotlb_gather *iotlb_gather) 952 { 953 } 954 955 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 956 { 957 return 0; 958 } 959 960 static inline void iommu_set_fault_handler(struct iommu_domain *domain, 961 iommu_fault_handler_t handler, void *token) 962 { 963 } 964 965 static inline void iommu_get_resv_regions(struct device *dev, 966 struct list_head *list) 967 { 968 } 969 970 static inline void iommu_put_resv_regions(struct device *dev, 971 struct list_head *list) 972 { 973 } 974 975 static inline int iommu_get_group_resv_regions(struct iommu_group *group, 976 struct list_head *head) 977 { 978 return -ENODEV; 979 } 980 981 static inline void iommu_set_default_passthrough(bool cmd_line) 982 { 983 } 984 985 static inline void iommu_set_default_translated(bool cmd_line) 986 { 987 } 988 989 static inline bool iommu_default_passthrough(void) 990 { 991 return true; 992 } 993 994 static inline int iommu_attach_group(struct iommu_domain *domain, 995 struct iommu_group *group) 996 { 997 return -ENODEV; 998 } 999 1000 static inline void iommu_detach_group(struct iommu_domain *domain, 1001 struct iommu_group *group) 1002 { 1003 } 1004 1005 static inline struct iommu_group *iommu_group_alloc(void) 1006 { 1007 return ERR_PTR(-ENODEV); 1008 } 1009 1010 static inline void *iommu_group_get_iommudata(struct iommu_group *group) 1011 { 1012 return NULL; 1013 } 1014 1015 static inline void iommu_group_set_iommudata(struct iommu_group *group, 1016 void *iommu_data, 1017 void (*release)(void *iommu_data)) 1018 { 1019 } 1020 1021 static inline int iommu_group_set_name(struct iommu_group *group, 1022 const char *name) 1023 { 1024 return -ENODEV; 1025 } 1026 1027 static inline int iommu_group_add_device(struct iommu_group *group, 1028 struct device *dev) 1029 { 1030 return -ENODEV; 1031 } 1032 1033 static inline void iommu_group_remove_device(struct device *dev) 1034 { 1035 } 1036 1037 static inline int iommu_group_for_each_dev(struct iommu_group *group, 1038 void *data, 1039 int (*fn)(struct device *, void *)) 1040 { 1041 return -ENODEV; 1042 } 1043 1044 static inline struct iommu_group *iommu_group_get(struct device *dev) 1045 { 1046 return NULL; 1047 } 1048 1049 static inline void iommu_group_put(struct iommu_group *group) 1050 { 1051 } 1052 1053 static inline 1054 int iommu_register_device_fault_handler(struct device *dev, 1055 iommu_dev_fault_handler_t handler, 1056 void *data) 1057 { 1058 return -ENODEV; 1059 } 1060 1061 static inline int iommu_unregister_device_fault_handler(struct device *dev) 1062 { 1063 return 0; 1064 } 1065 1066 static inline 1067 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 1068 { 1069 return -ENODEV; 1070 } 1071 1072 static inline int iommu_page_response(struct device *dev, 1073 struct iommu_page_response *msg) 1074 { 1075 return -ENODEV; 1076 } 1077 1078 static inline int iommu_group_id(struct iommu_group *group) 1079 { 1080 return -ENODEV; 1081 } 1082 1083 static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain, 1084 unsigned long quirks) 1085 { 1086 return 0; 1087 } 1088 1089 static inline int iommu_device_register(struct iommu_device *iommu, 1090 const struct iommu_ops *ops, 1091 struct device *hwdev) 1092 { 1093 return -ENODEV; 1094 } 1095 1096 static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 1097 { 1098 return NULL; 1099 } 1100 1101 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 1102 { 1103 } 1104 1105 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, 1106 struct iommu_iotlb_gather *gather, 1107 unsigned long iova, size_t size) 1108 { 1109 } 1110 1111 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) 1112 { 1113 return false; 1114 } 1115 1116 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, 1117 struct iova_bitmap *bitmap, 1118 struct iommu_iotlb_gather *gather) 1119 { 1120 } 1121 1122 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, 1123 unsigned long iova, 1124 unsigned long length) 1125 { 1126 } 1127 1128 static inline void iommu_device_unregister(struct iommu_device *iommu) 1129 { 1130 } 1131 1132 static inline int iommu_device_sysfs_add(struct iommu_device *iommu, 1133 struct device *parent, 1134 const struct attribute_group **groups, 1135 const char *fmt, ...) 1136 { 1137 return -ENODEV; 1138 } 1139 1140 static inline void iommu_device_sysfs_remove(struct iommu_device *iommu) 1141 { 1142 } 1143 1144 static inline int iommu_device_link(struct device *dev, struct device *link) 1145 { 1146 return -EINVAL; 1147 } 1148 1149 static inline void iommu_device_unlink(struct device *dev, struct device *link) 1150 { 1151 } 1152 1153 static inline int iommu_fwspec_init(struct device *dev, 1154 struct fwnode_handle *iommu_fwnode, 1155 const struct iommu_ops *ops) 1156 { 1157 return -ENODEV; 1158 } 1159 1160 static inline void iommu_fwspec_free(struct device *dev) 1161 { 1162 } 1163 1164 static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, 1165 int num_ids) 1166 { 1167 return -ENODEV; 1168 } 1169 1170 static inline 1171 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 1172 { 1173 return NULL; 1174 } 1175 1176 static inline int 1177 iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 1178 { 1179 return -ENODEV; 1180 } 1181 1182 static inline int 1183 iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 1184 { 1185 return -ENODEV; 1186 } 1187 1188 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 1189 { 1190 return NULL; 1191 } 1192 1193 static inline int iommu_device_use_default_domain(struct device *dev) 1194 { 1195 return 0; 1196 } 1197 1198 static inline void iommu_device_unuse_default_domain(struct device *dev) 1199 { 1200 } 1201 1202 static inline int 1203 iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 1204 { 1205 return -ENODEV; 1206 } 1207 1208 static inline void iommu_group_release_dma_owner(struct iommu_group *group) 1209 { 1210 } 1211 1212 static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group) 1213 { 1214 return false; 1215 } 1216 1217 static inline void iommu_device_release_dma_owner(struct device *dev) 1218 { 1219 } 1220 1221 static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner) 1222 { 1223 return -ENODEV; 1224 } 1225 1226 static inline struct iommu_domain * 1227 iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm) 1228 { 1229 return NULL; 1230 } 1231 1232 static inline int iommu_attach_device_pasid(struct iommu_domain *domain, 1233 struct device *dev, ioasid_t pasid) 1234 { 1235 return -ENODEV; 1236 } 1237 1238 static inline void iommu_detach_device_pasid(struct iommu_domain *domain, 1239 struct device *dev, ioasid_t pasid) 1240 { 1241 } 1242 1243 static inline struct iommu_domain * 1244 iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, 1245 unsigned int type) 1246 { 1247 return NULL; 1248 } 1249 1250 static inline ioasid_t iommu_alloc_global_pasid(struct device *dev) 1251 { 1252 return IOMMU_PASID_INVALID; 1253 } 1254 1255 static inline void iommu_free_global_pasid(ioasid_t pasid) {} 1256 #endif /* CONFIG_IOMMU_API */ 1257 1258 /** 1259 * iommu_map_sgtable - Map the given buffer to the IOMMU domain 1260 * @domain: The IOMMU domain to perform the mapping 1261 * @iova: The start address to map the buffer 1262 * @sgt: The sg_table object describing the buffer 1263 * @prot: IOMMU protection bits 1264 * 1265 * Creates a mapping at @iova for the buffer described by a scatterlist 1266 * stored in the given sg_table object in the provided IOMMU domain. 1267 */ 1268 static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain, 1269 unsigned long iova, struct sg_table *sgt, int prot) 1270 { 1271 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot, 1272 GFP_KERNEL); 1273 } 1274 1275 #ifdef CONFIG_IOMMU_DEBUGFS 1276 extern struct dentry *iommu_debugfs_dir; 1277 void iommu_debugfs_setup(void); 1278 #else 1279 static inline void iommu_debugfs_setup(void) {} 1280 #endif 1281 1282 #ifdef CONFIG_IOMMU_DMA 1283 #include <linux/msi.h> 1284 1285 /* Setup call for arch DMA mapping code */ 1286 void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit); 1287 1288 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); 1289 1290 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr); 1291 void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg); 1292 1293 #else /* CONFIG_IOMMU_DMA */ 1294 1295 struct msi_desc; 1296 struct msi_msg; 1297 1298 static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) 1299 { 1300 } 1301 1302 static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) 1303 { 1304 return -ENODEV; 1305 } 1306 1307 static inline int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) 1308 { 1309 return 0; 1310 } 1311 1312 static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg) 1313 { 1314 } 1315 1316 #endif /* CONFIG_IOMMU_DMA */ 1317 1318 /* 1319 * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into 1320 * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents 1321 * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals. 1322 */ 1323 #define TEGRA_STREAM_ID_BYPASS 0x7f 1324 1325 static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id) 1326 { 1327 #ifdef CONFIG_IOMMU_API 1328 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 1329 1330 if (fwspec && fwspec->num_ids == 1) { 1331 *stream_id = fwspec->ids[0] & 0xffff; 1332 return true; 1333 } 1334 #endif 1335 1336 return false; 1337 } 1338 1339 #ifdef CONFIG_IOMMU_SVA 1340 static inline void mm_pasid_init(struct mm_struct *mm) 1341 { 1342 mm->pasid = IOMMU_PASID_INVALID; 1343 } 1344 static inline bool mm_valid_pasid(struct mm_struct *mm) 1345 { 1346 return mm->pasid != IOMMU_PASID_INVALID; 1347 } 1348 void mm_pasid_drop(struct mm_struct *mm); 1349 struct iommu_sva *iommu_sva_bind_device(struct device *dev, 1350 struct mm_struct *mm); 1351 void iommu_sva_unbind_device(struct iommu_sva *handle); 1352 u32 iommu_sva_get_pasid(struct iommu_sva *handle); 1353 #else 1354 static inline struct iommu_sva * 1355 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) 1356 { 1357 return NULL; 1358 } 1359 1360 static inline void iommu_sva_unbind_device(struct iommu_sva *handle) 1361 { 1362 } 1363 1364 static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle) 1365 { 1366 return IOMMU_PASID_INVALID; 1367 } 1368 static inline void mm_pasid_init(struct mm_struct *mm) {} 1369 static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; } 1370 static inline void mm_pasid_drop(struct mm_struct *mm) {} 1371 #endif /* CONFIG_IOMMU_SVA */ 1372 1373 #endif /* __LINUX_IOMMU_H */ 1374