xref: /linux-6.15/include/linux/iommu.h (revision bfca85fa)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4  * Author: Joerg Roedel <[email protected]>
5  */
6 
7 #ifndef __LINUX_IOMMU_H
8 #define __LINUX_IOMMU_H
9 
10 #include <linux/scatterlist.h>
11 #include <linux/device.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/of.h>
16 #include <linux/iova_bitmap.h>
17 
18 #define IOMMU_READ	(1 << 0)
19 #define IOMMU_WRITE	(1 << 1)
20 #define IOMMU_CACHE	(1 << 2) /* DMA cache coherency */
21 #define IOMMU_NOEXEC	(1 << 3)
22 #define IOMMU_MMIO	(1 << 4) /* e.g. things like MSI doorbells */
23 /*
24  * Where the bus hardware includes a privilege level as part of its access type
25  * markings, and certain devices are capable of issuing transactions marked as
26  * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
27  * given permission flags only apply to accesses at the higher privilege level,
28  * and that unprivileged transactions should have as little access as possible.
29  * This would usually imply the same permissions as kernel mappings on the CPU,
30  * if the IOMMU page table format is equivalent.
31  */
32 #define IOMMU_PRIV	(1 << 5)
33 
34 struct iommu_ops;
35 struct iommu_group;
36 struct bus_type;
37 struct device;
38 struct iommu_domain;
39 struct iommu_domain_ops;
40 struct iommu_dirty_ops;
41 struct notifier_block;
42 struct iommu_sva;
43 struct iommu_dma_cookie;
44 struct iommu_fault_param;
45 struct iommufd_ctx;
46 struct iommufd_viommu;
47 
48 #define IOMMU_FAULT_PERM_READ	(1 << 0) /* read */
49 #define IOMMU_FAULT_PERM_WRITE	(1 << 1) /* write */
50 #define IOMMU_FAULT_PERM_EXEC	(1 << 2) /* exec */
51 #define IOMMU_FAULT_PERM_PRIV	(1 << 3) /* privileged */
52 
53 /* Generic fault types, can be expanded IRQ remapping fault */
54 enum iommu_fault_type {
55 	IOMMU_FAULT_PAGE_REQ = 1,	/* page request fault */
56 };
57 
58 /**
59  * struct iommu_fault_page_request - Page Request data
60  * @flags: encodes whether the corresponding fields are valid and whether this
61  *         is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values).
62  *         When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response
63  *         must have the same PASID value as the page request. When it is clear,
64  *         the page response should not have a PASID.
65  * @pasid: Process Address Space ID
66  * @grpid: Page Request Group Index
67  * @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
68  * @addr: page address
69  * @private_data: device-specific private information
70  */
71 struct iommu_fault_page_request {
72 #define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID	(1 << 0)
73 #define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE	(1 << 1)
74 #define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID	(1 << 2)
75 	u32	flags;
76 	u32	pasid;
77 	u32	grpid;
78 	u32	perm;
79 	u64	addr;
80 	u64	private_data[2];
81 };
82 
83 /**
84  * struct iommu_fault - Generic fault data
85  * @type: fault type from &enum iommu_fault_type
86  * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ
87  */
88 struct iommu_fault {
89 	u32 type;
90 	struct iommu_fault_page_request prm;
91 };
92 
93 /**
94  * enum iommu_page_response_code - Return status of fault handlers
95  * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
96  *	populated, retry the access. This is "Success" in PCI PRI.
97  * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from
98  *	this device if possible. This is "Response Failure" in PCI PRI.
99  * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
100  *	access. This is "Invalid Request" in PCI PRI.
101  */
102 enum iommu_page_response_code {
103 	IOMMU_PAGE_RESP_SUCCESS = 0,
104 	IOMMU_PAGE_RESP_INVALID,
105 	IOMMU_PAGE_RESP_FAILURE,
106 };
107 
108 /**
109  * struct iommu_page_response - Generic page response information
110  * @pasid: Process Address Space ID
111  * @grpid: Page Request Group Index
112  * @code: response code from &enum iommu_page_response_code
113  */
114 struct iommu_page_response {
115 	u32	pasid;
116 	u32	grpid;
117 	u32	code;
118 };
119 
120 struct iopf_fault {
121 	struct iommu_fault fault;
122 	/* node for pending lists */
123 	struct list_head list;
124 };
125 
126 struct iopf_group {
127 	struct iopf_fault last_fault;
128 	struct list_head faults;
129 	size_t fault_count;
130 	/* list node for iommu_fault_param::faults */
131 	struct list_head pending_node;
132 	struct work_struct work;
133 	struct iommu_attach_handle *attach_handle;
134 	/* The device's fault data parameter. */
135 	struct iommu_fault_param *fault_param;
136 	/* Used by handler provider to hook the group on its own lists. */
137 	struct list_head node;
138 	u32 cookie;
139 };
140 
141 /**
142  * struct iopf_queue - IO Page Fault queue
143  * @wq: the fault workqueue
144  * @devices: devices attached to this queue
145  * @lock: protects the device list
146  */
147 struct iopf_queue {
148 	struct workqueue_struct *wq;
149 	struct list_head devices;
150 	struct mutex lock;
151 };
152 
153 /* iommu fault flags */
154 #define IOMMU_FAULT_READ	0x0
155 #define IOMMU_FAULT_WRITE	0x1
156 
157 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
158 			struct device *, unsigned long, int, void *);
159 
160 struct iommu_domain_geometry {
161 	dma_addr_t aperture_start; /* First address that can be mapped    */
162 	dma_addr_t aperture_end;   /* Last address that can be mapped     */
163 	bool force_aperture;       /* DMA only allowed in mappable range? */
164 };
165 
166 /* Domain feature flags */
167 #define __IOMMU_DOMAIN_PAGING	(1U << 0)  /* Support for iommu_map/unmap */
168 #define __IOMMU_DOMAIN_DMA_API	(1U << 1)  /* Domain for use in DMA-API
169 					      implementation              */
170 #define __IOMMU_DOMAIN_PT	(1U << 2)  /* Domain is identity mapped   */
171 #define __IOMMU_DOMAIN_DMA_FQ	(1U << 3)  /* DMA-API uses flush queue    */
172 
173 #define __IOMMU_DOMAIN_SVA	(1U << 4)  /* Shared process address space */
174 #define __IOMMU_DOMAIN_PLATFORM	(1U << 5)
175 
176 #define __IOMMU_DOMAIN_NESTED	(1U << 6)  /* User-managed address space nested
177 					      on a stage-2 translation        */
178 
179 #define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ
180 /*
181  * This are the possible domain-types
182  *
183  *	IOMMU_DOMAIN_BLOCKED	- All DMA is blocked, can be used to isolate
184  *				  devices
185  *	IOMMU_DOMAIN_IDENTITY	- DMA addresses are system physical addresses
186  *	IOMMU_DOMAIN_UNMANAGED	- DMA mappings managed by IOMMU-API user, used
187  *				  for VMs
188  *	IOMMU_DOMAIN_DMA	- Internally used for DMA-API implementations.
189  *				  This flag allows IOMMU drivers to implement
190  *				  certain optimizations for these domains
191  *	IOMMU_DOMAIN_DMA_FQ	- As above, but definitely using batched TLB
192  *				  invalidation.
193  *	IOMMU_DOMAIN_SVA	- DMA addresses are shared process addresses
194  *				  represented by mm_struct's.
195  *	IOMMU_DOMAIN_PLATFORM	- Legacy domain for drivers that do their own
196  *				  dma_api stuff. Do not use in new drivers.
197  */
198 #define IOMMU_DOMAIN_BLOCKED	(0U)
199 #define IOMMU_DOMAIN_IDENTITY	(__IOMMU_DOMAIN_PT)
200 #define IOMMU_DOMAIN_UNMANAGED	(__IOMMU_DOMAIN_PAGING)
201 #define IOMMU_DOMAIN_DMA	(__IOMMU_DOMAIN_PAGING |	\
202 				 __IOMMU_DOMAIN_DMA_API)
203 #define IOMMU_DOMAIN_DMA_FQ	(__IOMMU_DOMAIN_PAGING |	\
204 				 __IOMMU_DOMAIN_DMA_API |	\
205 				 __IOMMU_DOMAIN_DMA_FQ)
206 #define IOMMU_DOMAIN_SVA	(__IOMMU_DOMAIN_SVA)
207 #define IOMMU_DOMAIN_PLATFORM	(__IOMMU_DOMAIN_PLATFORM)
208 #define IOMMU_DOMAIN_NESTED	(__IOMMU_DOMAIN_NESTED)
209 
210 struct iommu_domain {
211 	unsigned type;
212 	const struct iommu_domain_ops *ops;
213 	const struct iommu_dirty_ops *dirty_ops;
214 	const struct iommu_ops *owner; /* Whose domain_alloc we came from */
215 	unsigned long pgsize_bitmap;	/* Bitmap of page sizes in use */
216 	struct iommu_domain_geometry geometry;
217 	struct iommu_dma_cookie *iova_cookie;
218 	int (*iopf_handler)(struct iopf_group *group);
219 	void *fault_data;
220 	union {
221 		struct {
222 			iommu_fault_handler_t handler;
223 			void *handler_token;
224 		};
225 		struct {	/* IOMMU_DOMAIN_SVA */
226 			struct mm_struct *mm;
227 			int users;
228 			/*
229 			 * Next iommu_domain in mm->iommu_mm->sva-domains list
230 			 * protected by iommu_sva_lock.
231 			 */
232 			struct list_head next;
233 		};
234 	};
235 };
236 
237 static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
238 {
239 	return domain->type & __IOMMU_DOMAIN_DMA_API;
240 }
241 
242 enum iommu_cap {
243 	IOMMU_CAP_CACHE_COHERENCY,	/* IOMMU_CACHE is supported */
244 	IOMMU_CAP_NOEXEC,		/* IOMMU_NOEXEC flag */
245 	IOMMU_CAP_PRE_BOOT_PROTECTION,	/* Firmware says it used the IOMMU for
246 					   DMA protection and we should too */
247 	/*
248 	 * Per-device flag indicating if enforce_cache_coherency() will work on
249 	 * this device.
250 	 */
251 	IOMMU_CAP_ENFORCE_CACHE_COHERENCY,
252 	/*
253 	 * IOMMU driver does not issue TLB maintenance during .unmap, so can
254 	 * usefully support the non-strict DMA flush queue.
255 	 */
256 	IOMMU_CAP_DEFERRED_FLUSH,
257 	IOMMU_CAP_DIRTY_TRACKING,	/* IOMMU supports dirty tracking */
258 };
259 
260 /* These are the possible reserved region types */
261 enum iommu_resv_type {
262 	/* Memory regions which must be mapped 1:1 at all times */
263 	IOMMU_RESV_DIRECT,
264 	/*
265 	 * Memory regions which are advertised to be 1:1 but are
266 	 * commonly considered relaxable in some conditions,
267 	 * for instance in device assignment use case (USB, Graphics)
268 	 */
269 	IOMMU_RESV_DIRECT_RELAXABLE,
270 	/* Arbitrary "never map this or give it to a device" address ranges */
271 	IOMMU_RESV_RESERVED,
272 	/* Hardware MSI region (untranslated) */
273 	IOMMU_RESV_MSI,
274 	/* Software-managed MSI translation window */
275 	IOMMU_RESV_SW_MSI,
276 };
277 
278 /**
279  * struct iommu_resv_region - descriptor for a reserved memory region
280  * @list: Linked list pointers
281  * @start: System physical start address of the region
282  * @length: Length of the region in bytes
283  * @prot: IOMMU Protection flags (READ/WRITE/...)
284  * @type: Type of the reserved region
285  * @free: Callback to free associated memory allocations
286  */
287 struct iommu_resv_region {
288 	struct list_head	list;
289 	phys_addr_t		start;
290 	size_t			length;
291 	int			prot;
292 	enum iommu_resv_type	type;
293 	void (*free)(struct device *dev, struct iommu_resv_region *region);
294 };
295 
296 struct iommu_iort_rmr_data {
297 	struct iommu_resv_region rr;
298 
299 	/* Stream IDs associated with IORT RMR entry */
300 	const u32 *sids;
301 	u32 num_sids;
302 };
303 
304 /**
305  * enum iommu_dev_features - Per device IOMMU features
306  * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses
307  * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally
308  *			 enabling %IOMMU_DEV_FEAT_SVA requires
309  *			 %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page
310  *			 Faults themselves instead of relying on the IOMMU. When
311  *			 supported, this feature must be enabled before and
312  *			 disabled after %IOMMU_DEV_FEAT_SVA.
313  *
314  * Device drivers enable a feature using iommu_dev_enable_feature().
315  */
316 enum iommu_dev_features {
317 	IOMMU_DEV_FEAT_SVA,
318 	IOMMU_DEV_FEAT_IOPF,
319 };
320 
321 #define IOMMU_NO_PASID	(0U) /* Reserved for DMA w/o PASID */
322 #define IOMMU_FIRST_GLOBAL_PASID	(1U) /*starting range for allocation */
323 #define IOMMU_PASID_INVALID	(-1U)
324 typedef unsigned int ioasid_t;
325 
326 /* Read but do not clear any dirty bits */
327 #define IOMMU_DIRTY_NO_CLEAR (1 << 0)
328 
329 #ifdef CONFIG_IOMMU_API
330 
331 /**
332  * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
333  *
334  * @start: IOVA representing the start of the range to be flushed
335  * @end: IOVA representing the end of the range to be flushed (inclusive)
336  * @pgsize: The interval at which to perform the flush
337  * @freelist: Removed pages to free after sync
338  * @queued: Indicates that the flush will be queued
339  *
340  * This structure is intended to be updated by multiple calls to the
341  * ->unmap() function in struct iommu_ops before eventually being passed
342  * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
343  * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
344  * them. @queued is set to indicate when ->iotlb_flush_all() will be called
345  * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
346  */
347 struct iommu_iotlb_gather {
348 	unsigned long		start;
349 	unsigned long		end;
350 	size_t			pgsize;
351 	struct list_head	freelist;
352 	bool			queued;
353 };
354 
355 /**
356  * struct iommu_dirty_bitmap - Dirty IOVA bitmap state
357  * @bitmap: IOVA bitmap
358  * @gather: Range information for a pending IOTLB flush
359  */
360 struct iommu_dirty_bitmap {
361 	struct iova_bitmap *bitmap;
362 	struct iommu_iotlb_gather *gather;
363 };
364 
365 /**
366  * struct iommu_dirty_ops - domain specific dirty tracking operations
367  * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain
368  * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled
369  *                        into a bitmap, with a bit represented as a page.
370  *                        Reads the dirty PTE bits and clears it from IO
371  *                        pagetables.
372  */
373 struct iommu_dirty_ops {
374 	int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled);
375 	int (*read_and_clear_dirty)(struct iommu_domain *domain,
376 				    unsigned long iova, size_t size,
377 				    unsigned long flags,
378 				    struct iommu_dirty_bitmap *dirty);
379 };
380 
381 /**
382  * struct iommu_user_data - iommu driver specific user space data info
383  * @type: The data type of the user buffer
384  * @uptr: Pointer to the user buffer for copy_from_user()
385  * @len: The length of the user buffer in bytes
386  *
387  * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h
388  * @type, @uptr and @len should be just copied from an iommufd core uAPI struct.
389  */
390 struct iommu_user_data {
391 	unsigned int type;
392 	void __user *uptr;
393 	size_t len;
394 };
395 
396 /**
397  * struct iommu_user_data_array - iommu driver specific user space data array
398  * @type: The data type of all the entries in the user buffer array
399  * @uptr: Pointer to the user buffer array
400  * @entry_len: The fixed-width length of an entry in the array, in bytes
401  * @entry_num: The number of total entries in the array
402  *
403  * The user buffer includes an array of requests with format defined in
404  * include/uapi/linux/iommufd.h
405  */
406 struct iommu_user_data_array {
407 	unsigned int type;
408 	void __user *uptr;
409 	size_t entry_len;
410 	u32 entry_num;
411 };
412 
413 /**
414  * __iommu_copy_struct_from_user - Copy iommu driver specific user space data
415  * @dst_data: Pointer to an iommu driver specific user data that is defined in
416  *            include/uapi/linux/iommufd.h
417  * @src_data: Pointer to a struct iommu_user_data for user space data info
418  * @data_type: The data type of the @dst_data. Must match with @src_data.type
419  * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
420  * @min_len: Initial length of user data structure for backward compatibility.
421  *           This should be offsetofend using the last member in the user data
422  *           struct that was initially added to include/uapi/linux/iommufd.h
423  */
424 static inline int __iommu_copy_struct_from_user(
425 	void *dst_data, const struct iommu_user_data *src_data,
426 	unsigned int data_type, size_t data_len, size_t min_len)
427 {
428 	if (src_data->type != data_type)
429 		return -EINVAL;
430 	if (WARN_ON(!dst_data || !src_data))
431 		return -EINVAL;
432 	if (src_data->len < min_len || data_len < src_data->len)
433 		return -EINVAL;
434 	return copy_struct_from_user(dst_data, data_len, src_data->uptr,
435 				     src_data->len);
436 }
437 
438 /**
439  * iommu_copy_struct_from_user - Copy iommu driver specific user space data
440  * @kdst: Pointer to an iommu driver specific user data that is defined in
441  *        include/uapi/linux/iommufd.h
442  * @user_data: Pointer to a struct iommu_user_data for user space data info
443  * @data_type: The data type of the @kdst. Must match with @user_data->type
444  * @min_last: The last memember of the data structure @kdst points in the
445  *            initial version.
446  * Return 0 for success, otherwise -error.
447  */
448 #define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \
449 	__iommu_copy_struct_from_user(kdst, user_data, data_type,         \
450 				      sizeof(*kdst),                      \
451 				      offsetofend(typeof(*kdst), min_last))
452 
453 /**
454  * __iommu_copy_struct_from_user_array - Copy iommu driver specific user space
455  *                                       data from an iommu_user_data_array
456  * @dst_data: Pointer to an iommu driver specific user data that is defined in
457  *            include/uapi/linux/iommufd.h
458  * @src_array: Pointer to a struct iommu_user_data_array for a user space array
459  * @data_type: The data type of the @dst_data. Must match with @src_array.type
460  * @index: Index to the location in the array to copy user data from
461  * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
462  * @min_len: Initial length of user data structure for backward compatibility.
463  *           This should be offsetofend using the last member in the user data
464  *           struct that was initially added to include/uapi/linux/iommufd.h
465  */
466 static inline int __iommu_copy_struct_from_user_array(
467 	void *dst_data, const struct iommu_user_data_array *src_array,
468 	unsigned int data_type, unsigned int index, size_t data_len,
469 	size_t min_len)
470 {
471 	struct iommu_user_data src_data;
472 
473 	if (WARN_ON(!src_array || index >= src_array->entry_num))
474 		return -EINVAL;
475 	if (!src_array->entry_num)
476 		return -EINVAL;
477 	src_data.uptr = src_array->uptr + src_array->entry_len * index;
478 	src_data.len = src_array->entry_len;
479 	src_data.type = src_array->type;
480 
481 	return __iommu_copy_struct_from_user(dst_data, &src_data, data_type,
482 					     data_len, min_len);
483 }
484 
485 /**
486  * iommu_copy_struct_from_user_array - Copy iommu driver specific user space
487  *                                     data from an iommu_user_data_array
488  * @kdst: Pointer to an iommu driver specific user data that is defined in
489  *        include/uapi/linux/iommufd.h
490  * @user_array: Pointer to a struct iommu_user_data_array for a user space
491  *              array
492  * @data_type: The data type of the @kdst. Must match with @user_array->type
493  * @index: Index to the location in the array to copy user data from
494  * @min_last: The last member of the data structure @kdst points in the
495  *            initial version.
496  *
497  * Copy a single entry from a user array. Return 0 for success, otherwise
498  * -error.
499  */
500 #define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \
501 					  min_last)                           \
502 	__iommu_copy_struct_from_user_array(                                  \
503 		kdst, user_array, data_type, index, sizeof(*(kdst)),          \
504 		offsetofend(typeof(*(kdst)), min_last))
505 
506 /**
507  * iommu_copy_struct_from_full_user_array - Copy iommu driver specific user
508  *         space data from an iommu_user_data_array
509  * @kdst: Pointer to an iommu driver specific user data that is defined in
510  *        include/uapi/linux/iommufd.h
511  * @kdst_entry_size: sizeof(*kdst)
512  * @user_array: Pointer to a struct iommu_user_data_array for a user space
513  *              array
514  * @data_type: The data type of the @kdst. Must match with @user_array->type
515  *
516  * Copy the entire user array. kdst must have room for kdst_entry_size *
517  * user_array->entry_num bytes. Return 0 for success, otherwise -error.
518  */
519 static inline int
520 iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size,
521 				       struct iommu_user_data_array *user_array,
522 				       unsigned int data_type)
523 {
524 	unsigned int i;
525 	int ret;
526 
527 	if (user_array->type != data_type)
528 		return -EINVAL;
529 	if (!user_array->entry_num)
530 		return -EINVAL;
531 	if (likely(user_array->entry_len == kdst_entry_size)) {
532 		if (copy_from_user(kdst, user_array->uptr,
533 				   user_array->entry_num *
534 					   user_array->entry_len))
535 			return -EFAULT;
536 	}
537 
538 	/* Copy item by item */
539 	for (i = 0; i != user_array->entry_num; i++) {
540 		ret = copy_struct_from_user(
541 			kdst + kdst_entry_size * i, kdst_entry_size,
542 			user_array->uptr + user_array->entry_len * i,
543 			user_array->entry_len);
544 		if (ret)
545 			return ret;
546 	}
547 	return 0;
548 }
549 
550 /**
551  * struct iommu_ops - iommu ops and capabilities
552  * @capable: check capability
553  * @hw_info: report iommu hardware information. The data buffer returned by this
554  *           op is allocated in the iommu driver and freed by the caller after
555  *           use. The information type is one of enum iommu_hw_info_type defined
556  *           in include/uapi/linux/iommufd.h.
557  * @domain_alloc: allocate and return an iommu domain if success. Otherwise
558  *                NULL is returned. The domain is not fully initialized until
559  *                the caller iommu_domain_alloc() returns.
560  * @domain_alloc_paging_flags: Allocate an iommu domain corresponding to the
561  *                     input parameters as defined in
562  *                     include/uapi/linux/iommufd.h. The @user_data can be
563  *                     optionally provided, the new domain must support
564  *                     __IOMMU_DOMAIN_PAGING. Upon failure, ERR_PTR must be
565  *                     returned.
566  * @domain_alloc_paging: Allocate an iommu_domain that can be used for
567  *                       UNMANAGED, DMA, and DMA_FQ domain types. This is the
568  *                       same as invoking domain_alloc_paging_flags() with
569  *                       @flags=0, @user_data=NULL. A driver should implement
570  *                       only one of the two ops.
571  * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing.
572  * @domain_alloc_nested: Allocate an iommu_domain for nested translation.
573  * @probe_device: Add device to iommu driver handling
574  * @release_device: Remove device from iommu driver handling
575  * @probe_finalize: Do final setup work after the device is added to an IOMMU
576  *                  group and attached to the groups domain
577  * @device_group: find iommu group for a particular device
578  * @get_resv_regions: Request list of reserved regions for a device
579  * @of_xlate: add OF master IDs to iommu grouping
580  * @is_attach_deferred: Check if domain attach should be deferred from iommu
581  *                      driver init to device driver init (default no)
582  * @dev_enable/disable_feat: per device entries to enable/disable
583  *                               iommu specific features.
584  * @page_response: handle page request response
585  * @def_domain_type: device default domain type, return value:
586  *		- IOMMU_DOMAIN_IDENTITY: must use an identity domain
587  *		- IOMMU_DOMAIN_DMA: must use a dma domain
588  *		- 0: use the default setting
589  * @default_domain_ops: the default ops for domains
590  * @remove_dev_pasid: Remove any translation configurations of a specific
591  *                    pasid, so that any DMA transactions with this pasid
592  *                    will be blocked by the hardware.
593  * @viommu_alloc: Allocate an iommufd_viommu on a physical IOMMU instance behind
594  *                the @dev, as the set of virtualization resources shared/passed
595  *                to user space IOMMU instance. And associate it with a nesting
596  *                @parent_domain. The @viommu_type must be defined in the header
597  *                include/uapi/linux/iommufd.h
598  *                It is required to call iommufd_viommu_alloc() helper for
599  *                a bundled allocation of the core and the driver structures,
600  *                using the given @ictx pointer.
601  * @pgsize_bitmap: bitmap of all possible supported page sizes
602  * @owner: Driver module providing these ops
603  * @identity_domain: An always available, always attachable identity
604  *                   translation.
605  * @blocked_domain: An always available, always attachable blocking
606  *                  translation.
607  * @default_domain: If not NULL this will always be set as the default domain.
608  *                  This should be an IDENTITY/BLOCKED/PLATFORM domain.
609  *                  Do not use in new drivers.
610  * @user_pasid_table: IOMMU driver supports user-managed PASID table. There is
611  *                    no user domain for each PASID and the I/O page faults are
612  *                    forwarded through the user domain attached to the device
613  *                    RID.
614  */
615 struct iommu_ops {
616 	bool (*capable)(struct device *dev, enum iommu_cap);
617 	void *(*hw_info)(struct device *dev, u32 *length, u32 *type);
618 
619 	/* Domain allocation and freeing by the iommu driver */
620 	struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
621 	struct iommu_domain *(*domain_alloc_paging_flags)(
622 		struct device *dev, u32 flags,
623 		const struct iommu_user_data *user_data);
624 	struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
625 	struct iommu_domain *(*domain_alloc_sva)(struct device *dev,
626 						 struct mm_struct *mm);
627 	struct iommu_domain *(*domain_alloc_nested)(
628 		struct device *dev, struct iommu_domain *parent, u32 flags,
629 		const struct iommu_user_data *user_data);
630 
631 	struct iommu_device *(*probe_device)(struct device *dev);
632 	void (*release_device)(struct device *dev);
633 	void (*probe_finalize)(struct device *dev);
634 	struct iommu_group *(*device_group)(struct device *dev);
635 
636 	/* Request/Free a list of reserved regions for a device */
637 	void (*get_resv_regions)(struct device *dev, struct list_head *list);
638 
639 	int (*of_xlate)(struct device *dev, const struct of_phandle_args *args);
640 	bool (*is_attach_deferred)(struct device *dev);
641 
642 	/* Per device IOMMU features */
643 	int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
644 	int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
645 
646 	void (*page_response)(struct device *dev, struct iopf_fault *evt,
647 			      struct iommu_page_response *msg);
648 
649 	int (*def_domain_type)(struct device *dev);
650 	void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid,
651 				 struct iommu_domain *domain);
652 
653 	struct iommufd_viommu *(*viommu_alloc)(
654 		struct device *dev, struct iommu_domain *parent_domain,
655 		struct iommufd_ctx *ictx, unsigned int viommu_type);
656 
657 	const struct iommu_domain_ops *default_domain_ops;
658 	unsigned long pgsize_bitmap;
659 	struct module *owner;
660 	struct iommu_domain *identity_domain;
661 	struct iommu_domain *blocked_domain;
662 	struct iommu_domain *release_domain;
663 	struct iommu_domain *default_domain;
664 	u8 user_pasid_table:1;
665 };
666 
667 /**
668  * struct iommu_domain_ops - domain specific operations
669  * @attach_dev: attach an iommu domain to a device
670  *  Return:
671  * * 0		- success
672  * * EINVAL	- can indicate that device and domain are incompatible due to
673  *		  some previous configuration of the domain, in which case the
674  *		  driver shouldn't log an error, since it is legitimate for a
675  *		  caller to test reuse of existing domains. Otherwise, it may
676  *		  still represent some other fundamental problem
677  * * ENOMEM	- out of memory
678  * * ENOSPC	- non-ENOMEM type of resource allocation failures
679  * * EBUSY	- device is attached to a domain and cannot be changed
680  * * ENODEV	- device specific errors, not able to be attached
681  * * <others>	- treated as ENODEV by the caller. Use is discouraged
682  * @set_dev_pasid: set or replace an iommu domain to a pasid of device. The pasid of
683  *                 the device should be left in the old config in error case.
684  * @map_pages: map a physically contiguous set of pages of the same size to
685  *             an iommu domain.
686  * @unmap_pages: unmap a number of pages of the same size from an iommu domain
687  * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
688  * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
689  * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
690  *            queue
691  * @cache_invalidate_user: Flush hardware cache for user space IO page table.
692  *                         The @domain must be IOMMU_DOMAIN_NESTED. The @array
693  *                         passes in the cache invalidation requests, in form
694  *                         of a driver data structure. The driver must update
695  *                         array->entry_num to report the number of handled
696  *                         invalidation requests. The driver data structure
697  *                         must be defined in include/uapi/linux/iommufd.h
698  * @iova_to_phys: translate iova to physical address
699  * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
700  *                           including no-snoop TLPs on PCIe or other platform
701  *                           specific mechanisms.
702  * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
703  * @free: Release the domain after use.
704  */
705 struct iommu_domain_ops {
706 	int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
707 	int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
708 			     ioasid_t pasid, struct iommu_domain *old);
709 
710 	int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
711 			 phys_addr_t paddr, size_t pgsize, size_t pgcount,
712 			 int prot, gfp_t gfp, size_t *mapped);
713 	size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
714 			      size_t pgsize, size_t pgcount,
715 			      struct iommu_iotlb_gather *iotlb_gather);
716 
717 	void (*flush_iotlb_all)(struct iommu_domain *domain);
718 	int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
719 			      size_t size);
720 	void (*iotlb_sync)(struct iommu_domain *domain,
721 			   struct iommu_iotlb_gather *iotlb_gather);
722 	int (*cache_invalidate_user)(struct iommu_domain *domain,
723 				     struct iommu_user_data_array *array);
724 
725 	phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
726 				    dma_addr_t iova);
727 
728 	bool (*enforce_cache_coherency)(struct iommu_domain *domain);
729 	int (*set_pgtable_quirks)(struct iommu_domain *domain,
730 				  unsigned long quirks);
731 
732 	void (*free)(struct iommu_domain *domain);
733 };
734 
735 /**
736  * struct iommu_device - IOMMU core representation of one IOMMU hardware
737  *			 instance
738  * @list: Used by the iommu-core to keep a list of registered iommus
739  * @ops: iommu-ops for talking to this iommu
740  * @dev: struct device for sysfs handling
741  * @singleton_group: Used internally for drivers that have only one group
742  * @max_pasids: number of supported PASIDs
743  */
744 struct iommu_device {
745 	struct list_head list;
746 	const struct iommu_ops *ops;
747 	struct fwnode_handle *fwnode;
748 	struct device *dev;
749 	struct iommu_group *singleton_group;
750 	u32 max_pasids;
751 };
752 
753 /**
754  * struct iommu_fault_param - per-device IOMMU fault data
755  * @lock: protect pending faults list
756  * @users: user counter to manage the lifetime of the data
757  * @rcu: rcu head for kfree_rcu()
758  * @dev: the device that owns this param
759  * @queue: IOPF queue
760  * @queue_list: index into queue->devices
761  * @partial: faults that are part of a Page Request Group for which the last
762  *           request hasn't been submitted yet.
763  * @faults: holds the pending faults which need response
764  */
765 struct iommu_fault_param {
766 	struct mutex lock;
767 	refcount_t users;
768 	struct rcu_head rcu;
769 
770 	struct device *dev;
771 	struct iopf_queue *queue;
772 	struct list_head queue_list;
773 
774 	struct list_head partial;
775 	struct list_head faults;
776 };
777 
778 /**
779  * struct dev_iommu - Collection of per-device IOMMU data
780  *
781  * @fault_param: IOMMU detected device fault reporting data
782  * @fwspec:	 IOMMU fwspec data
783  * @iommu_dev:	 IOMMU device this device is linked to
784  * @priv:	 IOMMU Driver private data
785  * @max_pasids:  number of PASIDs this device can consume
786  * @attach_deferred: the dma domain attachment is deferred
787  * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs
788  * @require_direct: device requires IOMMU_RESV_DIRECT regions
789  * @shadow_on_flush: IOTLB flushes are used to sync shadow tables
790  *
791  * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
792  *	struct iommu_group	*iommu_group;
793  */
794 struct dev_iommu {
795 	struct mutex lock;
796 	struct iommu_fault_param __rcu	*fault_param;
797 	struct iommu_fwspec		*fwspec;
798 	struct iommu_device		*iommu_dev;
799 	void				*priv;
800 	u32				max_pasids;
801 	u32				attach_deferred:1;
802 	u32				pci_32bit_workaround:1;
803 	u32				require_direct:1;
804 	u32				shadow_on_flush:1;
805 };
806 
807 int iommu_device_register(struct iommu_device *iommu,
808 			  const struct iommu_ops *ops,
809 			  struct device *hwdev);
810 void iommu_device_unregister(struct iommu_device *iommu);
811 int  iommu_device_sysfs_add(struct iommu_device *iommu,
812 			    struct device *parent,
813 			    const struct attribute_group **groups,
814 			    const char *fmt, ...) __printf(4, 5);
815 void iommu_device_sysfs_remove(struct iommu_device *iommu);
816 int  iommu_device_link(struct iommu_device   *iommu, struct device *link);
817 void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
818 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
819 
820 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
821 {
822 	return (struct iommu_device *)dev_get_drvdata(dev);
823 }
824 
825 /**
826  * iommu_get_iommu_dev - Get iommu_device for a device
827  * @dev: an end-point device
828  *
829  * Note that this function must be called from the iommu_ops
830  * to retrieve the iommu_device for a device, which the core code
831  * guarentees it will not invoke the op without an attached iommu.
832  */
833 static inline struct iommu_device *__iommu_get_iommu_dev(struct device *dev)
834 {
835 	return dev->iommu->iommu_dev;
836 }
837 
838 #define iommu_get_iommu_dev(dev, type, member) \
839 	container_of(__iommu_get_iommu_dev(dev), type, member)
840 
841 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
842 {
843 	*gather = (struct iommu_iotlb_gather) {
844 		.start	= ULONG_MAX,
845 		.freelist = LIST_HEAD_INIT(gather->freelist),
846 	};
847 }
848 
849 extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
850 extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
851 struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, unsigned int flags);
852 static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
853 {
854 	return iommu_paging_domain_alloc_flags(dev, 0);
855 }
856 extern void iommu_domain_free(struct iommu_domain *domain);
857 extern int iommu_attach_device(struct iommu_domain *domain,
858 			       struct device *dev);
859 extern void iommu_detach_device(struct iommu_domain *domain,
860 				struct device *dev);
861 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
862 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
863 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
864 		     phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
865 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
866 			  size_t size);
867 extern size_t iommu_unmap_fast(struct iommu_domain *domain,
868 			       unsigned long iova, size_t size,
869 			       struct iommu_iotlb_gather *iotlb_gather);
870 extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
871 			    struct scatterlist *sg, unsigned int nents,
872 			    int prot, gfp_t gfp);
873 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
874 extern void iommu_set_fault_handler(struct iommu_domain *domain,
875 			iommu_fault_handler_t handler, void *token);
876 
877 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
878 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
879 extern void iommu_set_default_passthrough(bool cmd_line);
880 extern void iommu_set_default_translated(bool cmd_line);
881 extern bool iommu_default_passthrough(void);
882 extern struct iommu_resv_region *
883 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
884 			enum iommu_resv_type type, gfp_t gfp);
885 extern int iommu_get_group_resv_regions(struct iommu_group *group,
886 					struct list_head *head);
887 
888 extern int iommu_attach_group(struct iommu_domain *domain,
889 			      struct iommu_group *group);
890 extern void iommu_detach_group(struct iommu_domain *domain,
891 			       struct iommu_group *group);
892 extern struct iommu_group *iommu_group_alloc(void);
893 extern void *iommu_group_get_iommudata(struct iommu_group *group);
894 extern void iommu_group_set_iommudata(struct iommu_group *group,
895 				      void *iommu_data,
896 				      void (*release)(void *iommu_data));
897 extern int iommu_group_set_name(struct iommu_group *group, const char *name);
898 extern int iommu_group_add_device(struct iommu_group *group,
899 				  struct device *dev);
900 extern void iommu_group_remove_device(struct device *dev);
901 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
902 				    int (*fn)(struct device *, void *));
903 extern struct iommu_group *iommu_group_get(struct device *dev);
904 extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
905 extern void iommu_group_put(struct iommu_group *group);
906 
907 extern int iommu_group_id(struct iommu_group *group);
908 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
909 
910 int iommu_set_pgtable_quirks(struct iommu_domain *domain,
911 		unsigned long quirks);
912 
913 void iommu_set_dma_strict(void);
914 
915 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
916 			      unsigned long iova, int flags);
917 
918 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
919 {
920 	if (domain->ops->flush_iotlb_all)
921 		domain->ops->flush_iotlb_all(domain);
922 }
923 
924 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
925 				  struct iommu_iotlb_gather *iotlb_gather)
926 {
927 	if (domain->ops->iotlb_sync)
928 		domain->ops->iotlb_sync(domain, iotlb_gather);
929 
930 	iommu_iotlb_gather_init(iotlb_gather);
931 }
932 
933 /**
934  * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
935  *
936  * @gather: TLB gather data
937  * @iova: start of page to invalidate
938  * @size: size of page to invalidate
939  *
940  * Helper for IOMMU drivers to check whether a new range and the gathered range
941  * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better
942  * than merging the two, which might lead to unnecessary invalidations.
943  */
944 static inline
945 bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
946 				    unsigned long iova, size_t size)
947 {
948 	unsigned long start = iova, end = start + size - 1;
949 
950 	return gather->end != 0 &&
951 		(end + 1 < gather->start || start > gather->end + 1);
952 }
953 
954 
955 /**
956  * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
957  * @gather: TLB gather data
958  * @iova: start of page to invalidate
959  * @size: size of page to invalidate
960  *
961  * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
962  * where only the address range matters, and simply minimising intermediate
963  * syncs is preferred.
964  */
965 static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
966 						unsigned long iova, size_t size)
967 {
968 	unsigned long end = iova + size - 1;
969 
970 	if (gather->start > iova)
971 		gather->start = iova;
972 	if (gather->end < end)
973 		gather->end = end;
974 }
975 
976 /**
977  * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
978  * @domain: IOMMU domain to be invalidated
979  * @gather: TLB gather data
980  * @iova: start of page to invalidate
981  * @size: size of page to invalidate
982  *
983  * Helper for IOMMU drivers to build invalidation commands based on individual
984  * pages, or with page size/table level hints which cannot be gathered if they
985  * differ.
986  */
987 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
988 					       struct iommu_iotlb_gather *gather,
989 					       unsigned long iova, size_t size)
990 {
991 	/*
992 	 * If the new page is disjoint from the current range or is mapped at
993 	 * a different granularity, then sync the TLB so that the gather
994 	 * structure can be rewritten.
995 	 */
996 	if ((gather->pgsize && gather->pgsize != size) ||
997 	    iommu_iotlb_gather_is_disjoint(gather, iova, size))
998 		iommu_iotlb_sync(domain, gather);
999 
1000 	gather->pgsize = size;
1001 	iommu_iotlb_gather_add_range(gather, iova, size);
1002 }
1003 
1004 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
1005 {
1006 	return gather && gather->queued;
1007 }
1008 
1009 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
1010 					   struct iova_bitmap *bitmap,
1011 					   struct iommu_iotlb_gather *gather)
1012 {
1013 	if (gather)
1014 		iommu_iotlb_gather_init(gather);
1015 
1016 	dirty->bitmap = bitmap;
1017 	dirty->gather = gather;
1018 }
1019 
1020 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
1021 					     unsigned long iova,
1022 					     unsigned long length)
1023 {
1024 	if (dirty->bitmap)
1025 		iova_bitmap_set(dirty->bitmap, iova, length);
1026 
1027 	if (dirty->gather)
1028 		iommu_iotlb_gather_add_range(dirty->gather, iova, length);
1029 }
1030 
1031 /* PCI device grouping function */
1032 extern struct iommu_group *pci_device_group(struct device *dev);
1033 /* Generic device grouping function */
1034 extern struct iommu_group *generic_device_group(struct device *dev);
1035 /* FSL-MC device grouping function */
1036 struct iommu_group *fsl_mc_device_group(struct device *dev);
1037 extern struct iommu_group *generic_single_device_group(struct device *dev);
1038 
1039 /**
1040  * struct iommu_fwspec - per-device IOMMU instance data
1041  * @iommu_fwnode: firmware handle for this device's IOMMU
1042  * @flags: IOMMU_FWSPEC_* flags
1043  * @num_ids: number of associated device IDs
1044  * @ids: IDs which this device may present to the IOMMU
1045  *
1046  * Note that the IDs (and any other information, really) stored in this structure should be
1047  * considered private to the IOMMU device driver and are not to be used directly by IOMMU
1048  * consumers.
1049  */
1050 struct iommu_fwspec {
1051 	struct fwnode_handle	*iommu_fwnode;
1052 	u32			flags;
1053 	unsigned int		num_ids;
1054 	u32			ids[];
1055 };
1056 
1057 /* ATS is supported */
1058 #define IOMMU_FWSPEC_PCI_RC_ATS			(1 << 0)
1059 /* CANWBS is supported */
1060 #define IOMMU_FWSPEC_PCI_RC_CANWBS		(1 << 1)
1061 
1062 /*
1063  * An iommu attach handle represents a relationship between an iommu domain
1064  * and a PASID or RID of a device. It is allocated and managed by the component
1065  * that manages the domain and is stored in the iommu group during the time the
1066  * domain is attached.
1067  */
1068 struct iommu_attach_handle {
1069 	struct iommu_domain		*domain;
1070 };
1071 
1072 /**
1073  * struct iommu_sva - handle to a device-mm bond
1074  */
1075 struct iommu_sva {
1076 	struct iommu_attach_handle	handle;
1077 	struct device			*dev;
1078 	refcount_t			users;
1079 };
1080 
1081 struct iommu_mm_data {
1082 	u32			pasid;
1083 	struct list_head	sva_domains;
1084 };
1085 
1086 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode);
1087 void iommu_fwspec_free(struct device *dev);
1088 int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids);
1089 
1090 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1091 {
1092 	if (dev->iommu)
1093 		return dev->iommu->fwspec;
1094 	else
1095 		return NULL;
1096 }
1097 
1098 static inline void dev_iommu_fwspec_set(struct device *dev,
1099 					struct iommu_fwspec *fwspec)
1100 {
1101 	dev->iommu->fwspec = fwspec;
1102 }
1103 
1104 static inline void *dev_iommu_priv_get(struct device *dev)
1105 {
1106 	if (dev->iommu)
1107 		return dev->iommu->priv;
1108 	else
1109 		return NULL;
1110 }
1111 
1112 void dev_iommu_priv_set(struct device *dev, void *priv);
1113 
1114 extern struct mutex iommu_probe_device_lock;
1115 int iommu_probe_device(struct device *dev);
1116 
1117 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
1118 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
1119 
1120 int iommu_device_use_default_domain(struct device *dev);
1121 void iommu_device_unuse_default_domain(struct device *dev);
1122 
1123 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
1124 void iommu_group_release_dma_owner(struct iommu_group *group);
1125 bool iommu_group_dma_owner_claimed(struct iommu_group *group);
1126 
1127 int iommu_device_claim_dma_owner(struct device *dev, void *owner);
1128 void iommu_device_release_dma_owner(struct device *dev);
1129 
1130 int iommu_attach_device_pasid(struct iommu_domain *domain,
1131 			      struct device *dev, ioasid_t pasid,
1132 			      struct iommu_attach_handle *handle);
1133 void iommu_detach_device_pasid(struct iommu_domain *domain,
1134 			       struct device *dev, ioasid_t pasid);
1135 ioasid_t iommu_alloc_global_pasid(struct device *dev);
1136 void iommu_free_global_pasid(ioasid_t pasid);
1137 #else /* CONFIG_IOMMU_API */
1138 
1139 struct iommu_ops {};
1140 struct iommu_group {};
1141 struct iommu_fwspec {};
1142 struct iommu_device {};
1143 struct iommu_fault_param {};
1144 struct iommu_iotlb_gather {};
1145 struct iommu_dirty_bitmap {};
1146 struct iommu_dirty_ops {};
1147 
1148 static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
1149 {
1150 	return false;
1151 }
1152 
1153 static inline struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev,
1154 						     unsigned int flags)
1155 {
1156 	return ERR_PTR(-ENODEV);
1157 }
1158 
1159 static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev)
1160 {
1161 	return ERR_PTR(-ENODEV);
1162 }
1163 
1164 static inline void iommu_domain_free(struct iommu_domain *domain)
1165 {
1166 }
1167 
1168 static inline int iommu_attach_device(struct iommu_domain *domain,
1169 				      struct device *dev)
1170 {
1171 	return -ENODEV;
1172 }
1173 
1174 static inline void iommu_detach_device(struct iommu_domain *domain,
1175 				       struct device *dev)
1176 {
1177 }
1178 
1179 static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1180 {
1181 	return NULL;
1182 }
1183 
1184 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
1185 			    phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
1186 {
1187 	return -ENODEV;
1188 }
1189 
1190 static inline size_t iommu_unmap(struct iommu_domain *domain,
1191 				 unsigned long iova, size_t size)
1192 {
1193 	return 0;
1194 }
1195 
1196 static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
1197 				      unsigned long iova, int gfp_order,
1198 				      struct iommu_iotlb_gather *iotlb_gather)
1199 {
1200 	return 0;
1201 }
1202 
1203 static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
1204 				   unsigned long iova, struct scatterlist *sg,
1205 				   unsigned int nents, int prot, gfp_t gfp)
1206 {
1207 	return -ENODEV;
1208 }
1209 
1210 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
1211 {
1212 }
1213 
1214 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
1215 				  struct iommu_iotlb_gather *iotlb_gather)
1216 {
1217 }
1218 
1219 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1220 {
1221 	return 0;
1222 }
1223 
1224 static inline void iommu_set_fault_handler(struct iommu_domain *domain,
1225 				iommu_fault_handler_t handler, void *token)
1226 {
1227 }
1228 
1229 static inline void iommu_get_resv_regions(struct device *dev,
1230 					struct list_head *list)
1231 {
1232 }
1233 
1234 static inline void iommu_put_resv_regions(struct device *dev,
1235 					struct list_head *list)
1236 {
1237 }
1238 
1239 static inline int iommu_get_group_resv_regions(struct iommu_group *group,
1240 					       struct list_head *head)
1241 {
1242 	return -ENODEV;
1243 }
1244 
1245 static inline void iommu_set_default_passthrough(bool cmd_line)
1246 {
1247 }
1248 
1249 static inline void iommu_set_default_translated(bool cmd_line)
1250 {
1251 }
1252 
1253 static inline bool iommu_default_passthrough(void)
1254 {
1255 	return true;
1256 }
1257 
1258 static inline int iommu_attach_group(struct iommu_domain *domain,
1259 				     struct iommu_group *group)
1260 {
1261 	return -ENODEV;
1262 }
1263 
1264 static inline void iommu_detach_group(struct iommu_domain *domain,
1265 				      struct iommu_group *group)
1266 {
1267 }
1268 
1269 static inline struct iommu_group *iommu_group_alloc(void)
1270 {
1271 	return ERR_PTR(-ENODEV);
1272 }
1273 
1274 static inline void *iommu_group_get_iommudata(struct iommu_group *group)
1275 {
1276 	return NULL;
1277 }
1278 
1279 static inline void iommu_group_set_iommudata(struct iommu_group *group,
1280 					     void *iommu_data,
1281 					     void (*release)(void *iommu_data))
1282 {
1283 }
1284 
1285 static inline int iommu_group_set_name(struct iommu_group *group,
1286 				       const char *name)
1287 {
1288 	return -ENODEV;
1289 }
1290 
1291 static inline int iommu_group_add_device(struct iommu_group *group,
1292 					 struct device *dev)
1293 {
1294 	return -ENODEV;
1295 }
1296 
1297 static inline void iommu_group_remove_device(struct device *dev)
1298 {
1299 }
1300 
1301 static inline int iommu_group_for_each_dev(struct iommu_group *group,
1302 					   void *data,
1303 					   int (*fn)(struct device *, void *))
1304 {
1305 	return -ENODEV;
1306 }
1307 
1308 static inline struct iommu_group *iommu_group_get(struct device *dev)
1309 {
1310 	return NULL;
1311 }
1312 
1313 static inline void iommu_group_put(struct iommu_group *group)
1314 {
1315 }
1316 
1317 static inline int iommu_group_id(struct iommu_group *group)
1318 {
1319 	return -ENODEV;
1320 }
1321 
1322 static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain,
1323 		unsigned long quirks)
1324 {
1325 	return 0;
1326 }
1327 
1328 static inline int iommu_device_register(struct iommu_device *iommu,
1329 					const struct iommu_ops *ops,
1330 					struct device *hwdev)
1331 {
1332 	return -ENODEV;
1333 }
1334 
1335 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
1336 {
1337 	return NULL;
1338 }
1339 
1340 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
1341 {
1342 }
1343 
1344 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
1345 					       struct iommu_iotlb_gather *gather,
1346 					       unsigned long iova, size_t size)
1347 {
1348 }
1349 
1350 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
1351 {
1352 	return false;
1353 }
1354 
1355 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
1356 					   struct iova_bitmap *bitmap,
1357 					   struct iommu_iotlb_gather *gather)
1358 {
1359 }
1360 
1361 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
1362 					     unsigned long iova,
1363 					     unsigned long length)
1364 {
1365 }
1366 
1367 static inline void iommu_device_unregister(struct iommu_device *iommu)
1368 {
1369 }
1370 
1371 static inline int  iommu_device_sysfs_add(struct iommu_device *iommu,
1372 					  struct device *parent,
1373 					  const struct attribute_group **groups,
1374 					  const char *fmt, ...)
1375 {
1376 	return -ENODEV;
1377 }
1378 
1379 static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
1380 {
1381 }
1382 
1383 static inline int iommu_device_link(struct device *dev, struct device *link)
1384 {
1385 	return -EINVAL;
1386 }
1387 
1388 static inline void iommu_device_unlink(struct device *dev, struct device *link)
1389 {
1390 }
1391 
1392 static inline int iommu_fwspec_init(struct device *dev,
1393 				    struct fwnode_handle *iommu_fwnode)
1394 {
1395 	return -ENODEV;
1396 }
1397 
1398 static inline void iommu_fwspec_free(struct device *dev)
1399 {
1400 }
1401 
1402 static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
1403 				       int num_ids)
1404 {
1405 	return -ENODEV;
1406 }
1407 
1408 static inline int
1409 iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
1410 {
1411 	return -ENODEV;
1412 }
1413 
1414 static inline int
1415 iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
1416 {
1417 	return -ENODEV;
1418 }
1419 
1420 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1421 {
1422 	return NULL;
1423 }
1424 
1425 static inline int iommu_device_use_default_domain(struct device *dev)
1426 {
1427 	return 0;
1428 }
1429 
1430 static inline void iommu_device_unuse_default_domain(struct device *dev)
1431 {
1432 }
1433 
1434 static inline int
1435 iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
1436 {
1437 	return -ENODEV;
1438 }
1439 
1440 static inline void iommu_group_release_dma_owner(struct iommu_group *group)
1441 {
1442 }
1443 
1444 static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
1445 {
1446 	return false;
1447 }
1448 
1449 static inline void iommu_device_release_dma_owner(struct device *dev)
1450 {
1451 }
1452 
1453 static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner)
1454 {
1455 	return -ENODEV;
1456 }
1457 
1458 static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
1459 					    struct device *dev, ioasid_t pasid,
1460 					    struct iommu_attach_handle *handle)
1461 {
1462 	return -ENODEV;
1463 }
1464 
1465 static inline void iommu_detach_device_pasid(struct iommu_domain *domain,
1466 					     struct device *dev, ioasid_t pasid)
1467 {
1468 }
1469 
1470 static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
1471 {
1472 	return IOMMU_PASID_INVALID;
1473 }
1474 
1475 static inline void iommu_free_global_pasid(ioasid_t pasid) {}
1476 #endif /* CONFIG_IOMMU_API */
1477 
1478 #if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API)
1479 void iommu_group_mutex_assert(struct device *dev);
1480 #else
1481 static inline void iommu_group_mutex_assert(struct device *dev)
1482 {
1483 }
1484 #endif
1485 
1486 /**
1487  * iommu_map_sgtable - Map the given buffer to the IOMMU domain
1488  * @domain:	The IOMMU domain to perform the mapping
1489  * @iova:	The start address to map the buffer
1490  * @sgt:	The sg_table object describing the buffer
1491  * @prot:	IOMMU protection bits
1492  *
1493  * Creates a mapping at @iova for the buffer described by a scatterlist
1494  * stored in the given sg_table object in the provided IOMMU domain.
1495  */
1496 static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain,
1497 			unsigned long iova, struct sg_table *sgt, int prot)
1498 {
1499 	return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot,
1500 			    GFP_KERNEL);
1501 }
1502 
1503 #ifdef CONFIG_IOMMU_DEBUGFS
1504 extern	struct dentry *iommu_debugfs_dir;
1505 void iommu_debugfs_setup(void);
1506 #else
1507 static inline void iommu_debugfs_setup(void) {}
1508 #endif
1509 
1510 #ifdef CONFIG_IOMMU_DMA
1511 #include <linux/msi.h>
1512 
1513 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
1514 
1515 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
1516 void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg);
1517 
1518 #else /* CONFIG_IOMMU_DMA */
1519 
1520 struct msi_desc;
1521 struct msi_msg;
1522 
1523 static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
1524 {
1525 	return -ENODEV;
1526 }
1527 
1528 static inline int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1529 {
1530 	return 0;
1531 }
1532 
1533 static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
1534 {
1535 }
1536 
1537 #endif	/* CONFIG_IOMMU_DMA */
1538 
1539 /*
1540  * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into
1541  * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents
1542  * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals.
1543  */
1544 #define TEGRA_STREAM_ID_BYPASS 0x7f
1545 
1546 static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id)
1547 {
1548 #ifdef CONFIG_IOMMU_API
1549 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1550 
1551 	if (fwspec && fwspec->num_ids == 1) {
1552 		*stream_id = fwspec->ids[0] & 0xffff;
1553 		return true;
1554 	}
1555 #endif
1556 
1557 	return false;
1558 }
1559 
1560 #ifdef CONFIG_IOMMU_MM_DATA
1561 static inline void mm_pasid_init(struct mm_struct *mm)
1562 {
1563 	/*
1564 	 * During dup_mm(), a new mm will be memcpy'd from an old one and that makes
1565 	 * the new mm and the old one point to a same iommu_mm instance. When either
1566 	 * one of the two mms gets released, the iommu_mm instance is freed, leaving
1567 	 * the other mm running into a use-after-free/double-free problem. To avoid
1568 	 * the problem, zeroing the iommu_mm pointer of a new mm is needed here.
1569 	 */
1570 	mm->iommu_mm = NULL;
1571 }
1572 
1573 static inline bool mm_valid_pasid(struct mm_struct *mm)
1574 {
1575 	return READ_ONCE(mm->iommu_mm);
1576 }
1577 
1578 static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
1579 {
1580 	struct iommu_mm_data *iommu_mm = READ_ONCE(mm->iommu_mm);
1581 
1582 	if (!iommu_mm)
1583 		return IOMMU_PASID_INVALID;
1584 	return iommu_mm->pasid;
1585 }
1586 
1587 void mm_pasid_drop(struct mm_struct *mm);
1588 struct iommu_sva *iommu_sva_bind_device(struct device *dev,
1589 					struct mm_struct *mm);
1590 void iommu_sva_unbind_device(struct iommu_sva *handle);
1591 u32 iommu_sva_get_pasid(struct iommu_sva *handle);
1592 #else
1593 static inline struct iommu_sva *
1594 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
1595 {
1596 	return ERR_PTR(-ENODEV);
1597 }
1598 
1599 static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
1600 {
1601 }
1602 
1603 static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
1604 {
1605 	return IOMMU_PASID_INVALID;
1606 }
1607 static inline void mm_pasid_init(struct mm_struct *mm) {}
1608 static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; }
1609 
1610 static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
1611 {
1612 	return IOMMU_PASID_INVALID;
1613 }
1614 
1615 static inline void mm_pasid_drop(struct mm_struct *mm) {}
1616 #endif /* CONFIG_IOMMU_SVA */
1617 
1618 #ifdef CONFIG_IOMMU_IOPF
1619 int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
1620 void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev);
1621 int iopf_queue_flush_dev(struct device *dev);
1622 struct iopf_queue *iopf_queue_alloc(const char *name);
1623 void iopf_queue_free(struct iopf_queue *queue);
1624 int iopf_queue_discard_partial(struct iopf_queue *queue);
1625 void iopf_free_group(struct iopf_group *group);
1626 int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
1627 void iopf_group_response(struct iopf_group *group,
1628 			 enum iommu_page_response_code status);
1629 #else
1630 static inline int
1631 iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
1632 {
1633 	return -ENODEV;
1634 }
1635 
1636 static inline void
1637 iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
1638 {
1639 }
1640 
1641 static inline int iopf_queue_flush_dev(struct device *dev)
1642 {
1643 	return -ENODEV;
1644 }
1645 
1646 static inline struct iopf_queue *iopf_queue_alloc(const char *name)
1647 {
1648 	return NULL;
1649 }
1650 
1651 static inline void iopf_queue_free(struct iopf_queue *queue)
1652 {
1653 }
1654 
1655 static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
1656 {
1657 	return -ENODEV;
1658 }
1659 
1660 static inline void iopf_free_group(struct iopf_group *group)
1661 {
1662 }
1663 
1664 static inline int
1665 iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
1666 {
1667 	return -ENODEV;
1668 }
1669 
1670 static inline void iopf_group_response(struct iopf_group *group,
1671 				       enum iommu_page_response_code status)
1672 {
1673 }
1674 #endif /* CONFIG_IOMMU_IOPF */
1675 #endif /* __LINUX_IOMMU_H */
1676