1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <[email protected]> 5 */ 6 7 #ifndef __LINUX_IOMMU_H 8 #define __LINUX_IOMMU_H 9 10 #include <linux/scatterlist.h> 11 #include <linux/device.h> 12 #include <linux/types.h> 13 #include <linux/errno.h> 14 #include <linux/err.h> 15 #include <linux/of.h> 16 #include <linux/ioasid.h> 17 #include <uapi/linux/iommu.h> 18 19 #define IOMMU_READ (1 << 0) 20 #define IOMMU_WRITE (1 << 1) 21 #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ 22 #define IOMMU_NOEXEC (1 << 3) 23 #define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ 24 /* 25 * Where the bus hardware includes a privilege level as part of its access type 26 * markings, and certain devices are capable of issuing transactions marked as 27 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other 28 * given permission flags only apply to accesses at the higher privilege level, 29 * and that unprivileged transactions should have as little access as possible. 30 * This would usually imply the same permissions as kernel mappings on the CPU, 31 * if the IOMMU page table format is equivalent. 32 */ 33 #define IOMMU_PRIV (1 << 5) 34 /* 35 * Non-coherent masters can use this page protection flag to set cacheable 36 * memory attributes for only a transparent outer level of cache, also known as 37 * the last-level or system cache. 38 */ 39 #define IOMMU_SYS_CACHE_ONLY (1 << 6) 40 41 struct iommu_ops; 42 struct iommu_group; 43 struct bus_type; 44 struct device; 45 struct iommu_domain; 46 struct notifier_block; 47 struct iommu_sva; 48 struct iommu_fault_event; 49 50 /* iommu fault flags */ 51 #define IOMMU_FAULT_READ 0x0 52 #define IOMMU_FAULT_WRITE 0x1 53 54 typedef int (*iommu_fault_handler_t)(struct iommu_domain *, 55 struct device *, unsigned long, int, void *); 56 typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *, 57 void *); 58 typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); 59 60 struct iommu_domain_geometry { 61 dma_addr_t aperture_start; /* First address that can be mapped */ 62 dma_addr_t aperture_end; /* Last address that can be mapped */ 63 bool force_aperture; /* DMA only allowed in mappable range? */ 64 }; 65 66 /* Domain feature flags */ 67 #define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ 68 #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API 69 implementation */ 70 #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ 71 72 /* 73 * This are the possible domain-types 74 * 75 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate 76 * devices 77 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses 78 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used 79 * for VMs 80 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. 81 * This flag allows IOMMU drivers to implement 82 * certain optimizations for these domains 83 */ 84 #define IOMMU_DOMAIN_BLOCKED (0U) 85 #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) 86 #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) 87 #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ 88 __IOMMU_DOMAIN_DMA_API) 89 90 struct iommu_domain { 91 unsigned type; 92 const struct iommu_ops *ops; 93 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ 94 iommu_fault_handler_t handler; 95 void *handler_token; 96 struct iommu_domain_geometry geometry; 97 void *iova_cookie; 98 }; 99 100 enum iommu_cap { 101 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA 102 transactions */ 103 IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ 104 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ 105 }; 106 107 /* 108 * Following constraints are specifc to FSL_PAMUV1: 109 * -aperture must be power of 2, and naturally aligned 110 * -number of windows must be power of 2, and address space size 111 * of each window is determined by aperture size / # of windows 112 * -the actual size of the mapped region of a window must be power 113 * of 2 starting with 4KB and physical address must be naturally 114 * aligned. 115 * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints. 116 * The caller can invoke iommu_domain_get_attr to check if the underlying 117 * iommu implementation supports these constraints. 118 */ 119 120 enum iommu_attr { 121 DOMAIN_ATTR_GEOMETRY, 122 DOMAIN_ATTR_PAGING, 123 DOMAIN_ATTR_WINDOWS, 124 DOMAIN_ATTR_FSL_PAMU_STASH, 125 DOMAIN_ATTR_FSL_PAMU_ENABLE, 126 DOMAIN_ATTR_FSL_PAMUV1, 127 DOMAIN_ATTR_NESTING, /* two stages of translation */ 128 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, 129 DOMAIN_ATTR_MAX, 130 }; 131 132 /* These are the possible reserved region types */ 133 enum iommu_resv_type { 134 /* Memory regions which must be mapped 1:1 at all times */ 135 IOMMU_RESV_DIRECT, 136 /* 137 * Memory regions which are advertised to be 1:1 but are 138 * commonly considered relaxable in some conditions, 139 * for instance in device assignment use case (USB, Graphics) 140 */ 141 IOMMU_RESV_DIRECT_RELAXABLE, 142 /* Arbitrary "never map this or give it to a device" address ranges */ 143 IOMMU_RESV_RESERVED, 144 /* Hardware MSI region (untranslated) */ 145 IOMMU_RESV_MSI, 146 /* Software-managed MSI translation window */ 147 IOMMU_RESV_SW_MSI, 148 }; 149 150 /** 151 * struct iommu_resv_region - descriptor for a reserved memory region 152 * @list: Linked list pointers 153 * @start: System physical start address of the region 154 * @length: Length of the region in bytes 155 * @prot: IOMMU Protection flags (READ/WRITE/...) 156 * @type: Type of the reserved region 157 */ 158 struct iommu_resv_region { 159 struct list_head list; 160 phys_addr_t start; 161 size_t length; 162 int prot; 163 enum iommu_resv_type type; 164 }; 165 166 /* Per device IOMMU features */ 167 enum iommu_dev_features { 168 IOMMU_DEV_FEAT_AUX, /* Aux-domain feature */ 169 IOMMU_DEV_FEAT_SVA, /* Shared Virtual Addresses */ 170 }; 171 172 #define IOMMU_PASID_INVALID (-1U) 173 174 /** 175 * struct iommu_sva_ops - device driver callbacks for an SVA context 176 * 177 * @mm_exit: called when the mm is about to be torn down by exit_mmap. After 178 * @mm_exit returns, the device must not issue any more transaction 179 * with the PASID given as argument. 180 * 181 * The @mm_exit handler is allowed to sleep. Be careful about the 182 * locks taken in @mm_exit, because they might lead to deadlocks if 183 * they are also held when dropping references to the mm. Consider the 184 * following call chain: 185 * mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A) 186 * Using mmput_async() prevents this scenario. 187 * 188 */ 189 struct iommu_sva_ops { 190 iommu_mm_exit_handler_t mm_exit; 191 }; 192 193 #ifdef CONFIG_IOMMU_API 194 195 /** 196 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush 197 * 198 * @start: IOVA representing the start of the range to be flushed 199 * @end: IOVA representing the end of the range to be flushed (exclusive) 200 * @pgsize: The interval at which to perform the flush 201 * 202 * This structure is intended to be updated by multiple calls to the 203 * ->unmap() function in struct iommu_ops before eventually being passed 204 * into ->iotlb_sync(). 205 */ 206 struct iommu_iotlb_gather { 207 unsigned long start; 208 unsigned long end; 209 size_t pgsize; 210 }; 211 212 /** 213 * struct iommu_ops - iommu ops and capabilities 214 * @capable: check capability 215 * @domain_alloc: allocate iommu domain 216 * @domain_free: free iommu domain 217 * @attach_dev: attach device to an iommu domain 218 * @detach_dev: detach device from an iommu domain 219 * @map: map a physically contiguous memory region to an iommu domain 220 * @unmap: unmap a physically contiguous memory region from an iommu domain 221 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain 222 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware 223 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush 224 * queue 225 * @iova_to_phys: translate iova to physical address 226 * @add_device: add device to iommu grouping 227 * @remove_device: remove device from iommu grouping 228 * @device_group: find iommu group for a particular device 229 * @domain_get_attr: Query domain attributes 230 * @domain_set_attr: Change domain attributes 231 * @get_resv_regions: Request list of reserved regions for a device 232 * @put_resv_regions: Free list of reserved regions for a device 233 * @apply_resv_region: Temporary helper call-back for iova reserved ranges 234 * @domain_window_enable: Configure and enable a particular window for a domain 235 * @domain_window_disable: Disable a particular window for a domain 236 * @of_xlate: add OF master IDs to iommu grouping 237 * @is_attach_deferred: Check if domain attach should be deferred from iommu 238 * driver init to device driver init (default no) 239 * @dev_has/enable/disable_feat: per device entries to check/enable/disable 240 * iommu specific features. 241 * @dev_feat_enabled: check enabled feature 242 * @aux_attach/detach_dev: aux-domain specific attach/detach entries. 243 * @aux_get_pasid: get the pasid given an aux-domain 244 * @sva_bind: Bind process address space to device 245 * @sva_unbind: Unbind process address space from device 246 * @sva_get_pasid: Get PASID associated to a SVA handle 247 * @page_response: handle page request response 248 * @cache_invalidate: invalidate translation caches 249 * @sva_bind_gpasid: bind guest pasid and mm 250 * @sva_unbind_gpasid: unbind guest pasid and mm 251 * @pgsize_bitmap: bitmap of all possible supported page sizes 252 * @owner: Driver module providing these ops 253 */ 254 struct iommu_ops { 255 bool (*capable)(enum iommu_cap); 256 257 /* Domain allocation and freeing by the iommu driver */ 258 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); 259 void (*domain_free)(struct iommu_domain *); 260 261 int (*attach_dev)(struct iommu_domain *domain, struct device *dev); 262 void (*detach_dev)(struct iommu_domain *domain, struct device *dev); 263 int (*map)(struct iommu_domain *domain, unsigned long iova, 264 phys_addr_t paddr, size_t size, int prot, gfp_t gfp); 265 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, 266 size_t size, struct iommu_iotlb_gather *iotlb_gather); 267 void (*flush_iotlb_all)(struct iommu_domain *domain); 268 void (*iotlb_sync_map)(struct iommu_domain *domain); 269 void (*iotlb_sync)(struct iommu_domain *domain, 270 struct iommu_iotlb_gather *iotlb_gather); 271 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); 272 int (*add_device)(struct device *dev); 273 void (*remove_device)(struct device *dev); 274 struct iommu_group *(*device_group)(struct device *dev); 275 int (*domain_get_attr)(struct iommu_domain *domain, 276 enum iommu_attr attr, void *data); 277 int (*domain_set_attr)(struct iommu_domain *domain, 278 enum iommu_attr attr, void *data); 279 280 /* Request/Free a list of reserved regions for a device */ 281 void (*get_resv_regions)(struct device *dev, struct list_head *list); 282 void (*put_resv_regions)(struct device *dev, struct list_head *list); 283 void (*apply_resv_region)(struct device *dev, 284 struct iommu_domain *domain, 285 struct iommu_resv_region *region); 286 287 /* Window handling functions */ 288 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, 289 phys_addr_t paddr, u64 size, int prot); 290 void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr); 291 292 int (*of_xlate)(struct device *dev, struct of_phandle_args *args); 293 bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); 294 295 /* Per device IOMMU features */ 296 bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f); 297 bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f); 298 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); 299 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); 300 301 /* Aux-domain specific attach/detach entries */ 302 int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev); 303 void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev); 304 int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev); 305 306 struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm, 307 void *drvdata); 308 void (*sva_unbind)(struct iommu_sva *handle); 309 int (*sva_get_pasid)(struct iommu_sva *handle); 310 311 int (*page_response)(struct device *dev, 312 struct iommu_fault_event *evt, 313 struct iommu_page_response *msg); 314 int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev, 315 struct iommu_cache_invalidate_info *inv_info); 316 int (*sva_bind_gpasid)(struct iommu_domain *domain, 317 struct device *dev, struct iommu_gpasid_bind_data *data); 318 319 int (*sva_unbind_gpasid)(struct device *dev, int pasid); 320 321 unsigned long pgsize_bitmap; 322 struct module *owner; 323 }; 324 325 /** 326 * struct iommu_device - IOMMU core representation of one IOMMU hardware 327 * instance 328 * @list: Used by the iommu-core to keep a list of registered iommus 329 * @ops: iommu-ops for talking to this iommu 330 * @dev: struct device for sysfs handling 331 */ 332 struct iommu_device { 333 struct list_head list; 334 const struct iommu_ops *ops; 335 struct fwnode_handle *fwnode; 336 struct device *dev; 337 }; 338 339 /** 340 * struct iommu_fault_event - Generic fault event 341 * 342 * Can represent recoverable faults such as a page requests or 343 * unrecoverable faults such as DMA or IRQ remapping faults. 344 * 345 * @fault: fault descriptor 346 * @list: pending fault event list, used for tracking responses 347 */ 348 struct iommu_fault_event { 349 struct iommu_fault fault; 350 struct list_head list; 351 }; 352 353 /** 354 * struct iommu_fault_param - per-device IOMMU fault data 355 * @handler: Callback function to handle IOMMU faults at device level 356 * @data: handler private data 357 * @faults: holds the pending faults which needs response 358 * @lock: protect pending faults list 359 */ 360 struct iommu_fault_param { 361 iommu_dev_fault_handler_t handler; 362 void *data; 363 struct list_head faults; 364 struct mutex lock; 365 }; 366 367 /** 368 * struct iommu_param - collection of per-device IOMMU data 369 * 370 * @fault_param: IOMMU detected device fault reporting data 371 * 372 * TODO: migrate other per device data pointers under iommu_dev_data, e.g. 373 * struct iommu_group *iommu_group; 374 * struct iommu_fwspec *iommu_fwspec; 375 */ 376 struct iommu_param { 377 struct mutex lock; 378 struct iommu_fault_param *fault_param; 379 }; 380 381 int iommu_device_register(struct iommu_device *iommu); 382 void iommu_device_unregister(struct iommu_device *iommu); 383 int iommu_device_sysfs_add(struct iommu_device *iommu, 384 struct device *parent, 385 const struct attribute_group **groups, 386 const char *fmt, ...) __printf(4, 5); 387 void iommu_device_sysfs_remove(struct iommu_device *iommu); 388 int iommu_device_link(struct iommu_device *iommu, struct device *link); 389 void iommu_device_unlink(struct iommu_device *iommu, struct device *link); 390 391 static inline void __iommu_device_set_ops(struct iommu_device *iommu, 392 const struct iommu_ops *ops) 393 { 394 iommu->ops = ops; 395 } 396 397 #define iommu_device_set_ops(iommu, ops) \ 398 do { \ 399 struct iommu_ops *__ops = (struct iommu_ops *)(ops); \ 400 __ops->owner = THIS_MODULE; \ 401 __iommu_device_set_ops(iommu, __ops); \ 402 } while (0) 403 404 static inline void iommu_device_set_fwnode(struct iommu_device *iommu, 405 struct fwnode_handle *fwnode) 406 { 407 iommu->fwnode = fwnode; 408 } 409 410 static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 411 { 412 return (struct iommu_device *)dev_get_drvdata(dev); 413 } 414 415 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 416 { 417 *gather = (struct iommu_iotlb_gather) { 418 .start = ULONG_MAX, 419 }; 420 } 421 422 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ 423 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ 424 #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ 425 #define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */ 426 #define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */ 427 #define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */ 428 429 extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops); 430 extern bool iommu_present(struct bus_type *bus); 431 extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap); 432 extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); 433 extern struct iommu_group *iommu_group_get_by_id(int id); 434 extern void iommu_domain_free(struct iommu_domain *domain); 435 extern int iommu_attach_device(struct iommu_domain *domain, 436 struct device *dev); 437 extern void iommu_detach_device(struct iommu_domain *domain, 438 struct device *dev); 439 extern int iommu_cache_invalidate(struct iommu_domain *domain, 440 struct device *dev, 441 struct iommu_cache_invalidate_info *inv_info); 442 extern int iommu_sva_bind_gpasid(struct iommu_domain *domain, 443 struct device *dev, struct iommu_gpasid_bind_data *data); 444 extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain, 445 struct device *dev, ioasid_t pasid); 446 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); 447 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); 448 extern int iommu_map(struct iommu_domain *domain, unsigned long iova, 449 phys_addr_t paddr, size_t size, int prot); 450 extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, 451 phys_addr_t paddr, size_t size, int prot); 452 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, 453 size_t size); 454 extern size_t iommu_unmap_fast(struct iommu_domain *domain, 455 unsigned long iova, size_t size, 456 struct iommu_iotlb_gather *iotlb_gather); 457 extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 458 struct scatterlist *sg,unsigned int nents, int prot); 459 extern size_t iommu_map_sg_atomic(struct iommu_domain *domain, 460 unsigned long iova, struct scatterlist *sg, 461 unsigned int nents, int prot); 462 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); 463 extern void iommu_set_fault_handler(struct iommu_domain *domain, 464 iommu_fault_handler_t handler, void *token); 465 466 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); 467 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); 468 extern void generic_iommu_put_resv_regions(struct device *dev, 469 struct list_head *list); 470 extern int iommu_request_dm_for_dev(struct device *dev); 471 extern int iommu_request_dma_domain_for_dev(struct device *dev); 472 extern void iommu_set_default_passthrough(bool cmd_line); 473 extern void iommu_set_default_translated(bool cmd_line); 474 extern bool iommu_default_passthrough(void); 475 extern struct iommu_resv_region * 476 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, 477 enum iommu_resv_type type); 478 extern int iommu_get_group_resv_regions(struct iommu_group *group, 479 struct list_head *head); 480 481 extern int iommu_attach_group(struct iommu_domain *domain, 482 struct iommu_group *group); 483 extern void iommu_detach_group(struct iommu_domain *domain, 484 struct iommu_group *group); 485 extern struct iommu_group *iommu_group_alloc(void); 486 extern void *iommu_group_get_iommudata(struct iommu_group *group); 487 extern void iommu_group_set_iommudata(struct iommu_group *group, 488 void *iommu_data, 489 void (*release)(void *iommu_data)); 490 extern int iommu_group_set_name(struct iommu_group *group, const char *name); 491 extern int iommu_group_add_device(struct iommu_group *group, 492 struct device *dev); 493 extern void iommu_group_remove_device(struct device *dev); 494 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, 495 int (*fn)(struct device *, void *)); 496 extern struct iommu_group *iommu_group_get(struct device *dev); 497 extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); 498 extern void iommu_group_put(struct iommu_group *group); 499 extern int iommu_group_register_notifier(struct iommu_group *group, 500 struct notifier_block *nb); 501 extern int iommu_group_unregister_notifier(struct iommu_group *group, 502 struct notifier_block *nb); 503 extern int iommu_register_device_fault_handler(struct device *dev, 504 iommu_dev_fault_handler_t handler, 505 void *data); 506 507 extern int iommu_unregister_device_fault_handler(struct device *dev); 508 509 extern int iommu_report_device_fault(struct device *dev, 510 struct iommu_fault_event *evt); 511 extern int iommu_page_response(struct device *dev, 512 struct iommu_page_response *msg); 513 514 extern int iommu_group_id(struct iommu_group *group); 515 extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); 516 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); 517 518 extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, 519 void *data); 520 extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, 521 void *data); 522 523 /* Window handling function prototypes */ 524 extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, 525 phys_addr_t offset, u64 size, 526 int prot); 527 extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr); 528 529 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 530 unsigned long iova, int flags); 531 532 static inline void iommu_flush_tlb_all(struct iommu_domain *domain) 533 { 534 if (domain->ops->flush_iotlb_all) 535 domain->ops->flush_iotlb_all(domain); 536 } 537 538 static inline void iommu_tlb_sync(struct iommu_domain *domain, 539 struct iommu_iotlb_gather *iotlb_gather) 540 { 541 if (domain->ops->iotlb_sync) 542 domain->ops->iotlb_sync(domain, iotlb_gather); 543 544 iommu_iotlb_gather_init(iotlb_gather); 545 } 546 547 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, 548 struct iommu_iotlb_gather *gather, 549 unsigned long iova, size_t size) 550 { 551 unsigned long start = iova, end = start + size; 552 553 /* 554 * If the new page is disjoint from the current range or is mapped at 555 * a different granularity, then sync the TLB so that the gather 556 * structure can be rewritten. 557 */ 558 if (gather->pgsize != size || 559 end < gather->start || start > gather->end) { 560 if (gather->pgsize) 561 iommu_tlb_sync(domain, gather); 562 gather->pgsize = size; 563 } 564 565 if (gather->end < end) 566 gather->end = end; 567 568 if (gather->start > start) 569 gather->start = start; 570 } 571 572 /* PCI device grouping function */ 573 extern struct iommu_group *pci_device_group(struct device *dev); 574 /* Generic device grouping function */ 575 extern struct iommu_group *generic_device_group(struct device *dev); 576 /* FSL-MC device grouping function */ 577 struct iommu_group *fsl_mc_device_group(struct device *dev); 578 579 /** 580 * struct iommu_fwspec - per-device IOMMU instance data 581 * @ops: ops for this device's IOMMU 582 * @iommu_fwnode: firmware handle for this device's IOMMU 583 * @iommu_priv: IOMMU driver private data for this device 584 * @num_pasid_bits: number of PASID bits supported by this device 585 * @num_ids: number of associated device IDs 586 * @ids: IDs which this device may present to the IOMMU 587 */ 588 struct iommu_fwspec { 589 const struct iommu_ops *ops; 590 struct fwnode_handle *iommu_fwnode; 591 void *iommu_priv; 592 u32 flags; 593 u32 num_pasid_bits; 594 unsigned int num_ids; 595 u32 ids[1]; 596 }; 597 598 /* ATS is supported */ 599 #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) 600 601 /** 602 * struct iommu_sva - handle to a device-mm bond 603 */ 604 struct iommu_sva { 605 struct device *dev; 606 const struct iommu_sva_ops *ops; 607 }; 608 609 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 610 const struct iommu_ops *ops); 611 void iommu_fwspec_free(struct device *dev); 612 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); 613 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); 614 615 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 616 { 617 return dev->iommu_fwspec; 618 } 619 620 static inline void dev_iommu_fwspec_set(struct device *dev, 621 struct iommu_fwspec *fwspec) 622 { 623 dev->iommu_fwspec = fwspec; 624 } 625 626 int iommu_probe_device(struct device *dev); 627 void iommu_release_device(struct device *dev); 628 629 bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features f); 630 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); 631 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); 632 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f); 633 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev); 634 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev); 635 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev); 636 637 struct iommu_sva *iommu_sva_bind_device(struct device *dev, 638 struct mm_struct *mm, 639 void *drvdata); 640 void iommu_sva_unbind_device(struct iommu_sva *handle); 641 int iommu_sva_set_ops(struct iommu_sva *handle, 642 const struct iommu_sva_ops *ops); 643 int iommu_sva_get_pasid(struct iommu_sva *handle); 644 645 #else /* CONFIG_IOMMU_API */ 646 647 struct iommu_ops {}; 648 struct iommu_group {}; 649 struct iommu_fwspec {}; 650 struct iommu_device {}; 651 struct iommu_fault_param {}; 652 struct iommu_iotlb_gather {}; 653 654 static inline bool iommu_present(struct bus_type *bus) 655 { 656 return false; 657 } 658 659 static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) 660 { 661 return false; 662 } 663 664 static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) 665 { 666 return NULL; 667 } 668 669 static inline struct iommu_group *iommu_group_get_by_id(int id) 670 { 671 return NULL; 672 } 673 674 static inline void iommu_domain_free(struct iommu_domain *domain) 675 { 676 } 677 678 static inline int iommu_attach_device(struct iommu_domain *domain, 679 struct device *dev) 680 { 681 return -ENODEV; 682 } 683 684 static inline void iommu_detach_device(struct iommu_domain *domain, 685 struct device *dev) 686 { 687 } 688 689 static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 690 { 691 return NULL; 692 } 693 694 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, 695 phys_addr_t paddr, size_t size, int prot) 696 { 697 return -ENODEV; 698 } 699 700 static inline int iommu_map_atomic(struct iommu_domain *domain, 701 unsigned long iova, phys_addr_t paddr, 702 size_t size, int prot) 703 { 704 return -ENODEV; 705 } 706 707 static inline size_t iommu_unmap(struct iommu_domain *domain, 708 unsigned long iova, size_t size) 709 { 710 return 0; 711 } 712 713 static inline size_t iommu_unmap_fast(struct iommu_domain *domain, 714 unsigned long iova, int gfp_order, 715 struct iommu_iotlb_gather *iotlb_gather) 716 { 717 return 0; 718 } 719 720 static inline size_t iommu_map_sg(struct iommu_domain *domain, 721 unsigned long iova, struct scatterlist *sg, 722 unsigned int nents, int prot) 723 { 724 return 0; 725 } 726 727 static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain, 728 unsigned long iova, struct scatterlist *sg, 729 unsigned int nents, int prot) 730 { 731 return 0; 732 } 733 734 static inline void iommu_flush_tlb_all(struct iommu_domain *domain) 735 { 736 } 737 738 static inline void iommu_tlb_sync(struct iommu_domain *domain, 739 struct iommu_iotlb_gather *iotlb_gather) 740 { 741 } 742 743 static inline int iommu_domain_window_enable(struct iommu_domain *domain, 744 u32 wnd_nr, phys_addr_t paddr, 745 u64 size, int prot) 746 { 747 return -ENODEV; 748 } 749 750 static inline void iommu_domain_window_disable(struct iommu_domain *domain, 751 u32 wnd_nr) 752 { 753 } 754 755 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 756 { 757 return 0; 758 } 759 760 static inline void iommu_set_fault_handler(struct iommu_domain *domain, 761 iommu_fault_handler_t handler, void *token) 762 { 763 } 764 765 static inline void iommu_get_resv_regions(struct device *dev, 766 struct list_head *list) 767 { 768 } 769 770 static inline void iommu_put_resv_regions(struct device *dev, 771 struct list_head *list) 772 { 773 } 774 775 static inline int iommu_get_group_resv_regions(struct iommu_group *group, 776 struct list_head *head) 777 { 778 return -ENODEV; 779 } 780 781 static inline int iommu_request_dm_for_dev(struct device *dev) 782 { 783 return -ENODEV; 784 } 785 786 static inline int iommu_request_dma_domain_for_dev(struct device *dev) 787 { 788 return -ENODEV; 789 } 790 791 static inline void iommu_set_default_passthrough(bool cmd_line) 792 { 793 } 794 795 static inline void iommu_set_default_translated(bool cmd_line) 796 { 797 } 798 799 static inline bool iommu_default_passthrough(void) 800 { 801 return true; 802 } 803 804 static inline int iommu_attach_group(struct iommu_domain *domain, 805 struct iommu_group *group) 806 { 807 return -ENODEV; 808 } 809 810 static inline void iommu_detach_group(struct iommu_domain *domain, 811 struct iommu_group *group) 812 { 813 } 814 815 static inline struct iommu_group *iommu_group_alloc(void) 816 { 817 return ERR_PTR(-ENODEV); 818 } 819 820 static inline void *iommu_group_get_iommudata(struct iommu_group *group) 821 { 822 return NULL; 823 } 824 825 static inline void iommu_group_set_iommudata(struct iommu_group *group, 826 void *iommu_data, 827 void (*release)(void *iommu_data)) 828 { 829 } 830 831 static inline int iommu_group_set_name(struct iommu_group *group, 832 const char *name) 833 { 834 return -ENODEV; 835 } 836 837 static inline int iommu_group_add_device(struct iommu_group *group, 838 struct device *dev) 839 { 840 return -ENODEV; 841 } 842 843 static inline void iommu_group_remove_device(struct device *dev) 844 { 845 } 846 847 static inline int iommu_group_for_each_dev(struct iommu_group *group, 848 void *data, 849 int (*fn)(struct device *, void *)) 850 { 851 return -ENODEV; 852 } 853 854 static inline struct iommu_group *iommu_group_get(struct device *dev) 855 { 856 return NULL; 857 } 858 859 static inline void iommu_group_put(struct iommu_group *group) 860 { 861 } 862 863 static inline int iommu_group_register_notifier(struct iommu_group *group, 864 struct notifier_block *nb) 865 { 866 return -ENODEV; 867 } 868 869 static inline int iommu_group_unregister_notifier(struct iommu_group *group, 870 struct notifier_block *nb) 871 { 872 return 0; 873 } 874 875 static inline 876 int iommu_register_device_fault_handler(struct device *dev, 877 iommu_dev_fault_handler_t handler, 878 void *data) 879 { 880 return -ENODEV; 881 } 882 883 static inline int iommu_unregister_device_fault_handler(struct device *dev) 884 { 885 return 0; 886 } 887 888 static inline 889 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 890 { 891 return -ENODEV; 892 } 893 894 static inline int iommu_page_response(struct device *dev, 895 struct iommu_page_response *msg) 896 { 897 return -ENODEV; 898 } 899 900 static inline int iommu_group_id(struct iommu_group *group) 901 { 902 return -ENODEV; 903 } 904 905 static inline int iommu_domain_get_attr(struct iommu_domain *domain, 906 enum iommu_attr attr, void *data) 907 { 908 return -EINVAL; 909 } 910 911 static inline int iommu_domain_set_attr(struct iommu_domain *domain, 912 enum iommu_attr attr, void *data) 913 { 914 return -EINVAL; 915 } 916 917 static inline int iommu_device_register(struct iommu_device *iommu) 918 { 919 return -ENODEV; 920 } 921 922 static inline void iommu_device_set_ops(struct iommu_device *iommu, 923 const struct iommu_ops *ops) 924 { 925 } 926 927 static inline void iommu_device_set_fwnode(struct iommu_device *iommu, 928 struct fwnode_handle *fwnode) 929 { 930 } 931 932 static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 933 { 934 return NULL; 935 } 936 937 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 938 { 939 } 940 941 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, 942 struct iommu_iotlb_gather *gather, 943 unsigned long iova, size_t size) 944 { 945 } 946 947 static inline void iommu_device_unregister(struct iommu_device *iommu) 948 { 949 } 950 951 static inline int iommu_device_sysfs_add(struct iommu_device *iommu, 952 struct device *parent, 953 const struct attribute_group **groups, 954 const char *fmt, ...) 955 { 956 return -ENODEV; 957 } 958 959 static inline void iommu_device_sysfs_remove(struct iommu_device *iommu) 960 { 961 } 962 963 static inline int iommu_device_link(struct device *dev, struct device *link) 964 { 965 return -EINVAL; 966 } 967 968 static inline void iommu_device_unlink(struct device *dev, struct device *link) 969 { 970 } 971 972 static inline int iommu_fwspec_init(struct device *dev, 973 struct fwnode_handle *iommu_fwnode, 974 const struct iommu_ops *ops) 975 { 976 return -ENODEV; 977 } 978 979 static inline void iommu_fwspec_free(struct device *dev) 980 { 981 } 982 983 static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, 984 int num_ids) 985 { 986 return -ENODEV; 987 } 988 989 static inline 990 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 991 { 992 return NULL; 993 } 994 995 static inline bool 996 iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat) 997 { 998 return false; 999 } 1000 1001 static inline bool 1002 iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat) 1003 { 1004 return false; 1005 } 1006 1007 static inline int 1008 iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 1009 { 1010 return -ENODEV; 1011 } 1012 1013 static inline int 1014 iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 1015 { 1016 return -ENODEV; 1017 } 1018 1019 static inline int 1020 iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) 1021 { 1022 return -ENODEV; 1023 } 1024 1025 static inline void 1026 iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) 1027 { 1028 } 1029 1030 static inline int 1031 iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) 1032 { 1033 return -ENODEV; 1034 } 1035 1036 static inline struct iommu_sva * 1037 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) 1038 { 1039 return NULL; 1040 } 1041 1042 static inline void iommu_sva_unbind_device(struct iommu_sva *handle) 1043 { 1044 } 1045 1046 static inline int iommu_sva_set_ops(struct iommu_sva *handle, 1047 const struct iommu_sva_ops *ops) 1048 { 1049 return -EINVAL; 1050 } 1051 1052 static inline int iommu_sva_get_pasid(struct iommu_sva *handle) 1053 { 1054 return IOMMU_PASID_INVALID; 1055 } 1056 1057 static inline int 1058 iommu_cache_invalidate(struct iommu_domain *domain, 1059 struct device *dev, 1060 struct iommu_cache_invalidate_info *inv_info) 1061 { 1062 return -ENODEV; 1063 } 1064 static inline int iommu_sva_bind_gpasid(struct iommu_domain *domain, 1065 struct device *dev, struct iommu_gpasid_bind_data *data) 1066 { 1067 return -ENODEV; 1068 } 1069 1070 static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain, 1071 struct device *dev, int pasid) 1072 { 1073 return -ENODEV; 1074 } 1075 1076 #endif /* CONFIG_IOMMU_API */ 1077 1078 #ifdef CONFIG_IOMMU_DEBUGFS 1079 extern struct dentry *iommu_debugfs_dir; 1080 void iommu_debugfs_setup(void); 1081 #else 1082 static inline void iommu_debugfs_setup(void) {} 1083 #endif 1084 1085 #endif /* __LINUX_IOMMU_H */ 1086