xref: /linux-6.15/include/linux/iommu.h (revision ea217fef)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4  * Author: Joerg Roedel <[email protected]>
5  */
6 
7 #ifndef __LINUX_IOMMU_H
8 #define __LINUX_IOMMU_H
9 
10 #include <linux/scatterlist.h>
11 #include <linux/device.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/of.h>
16 #include <linux/iova_bitmap.h>
17 
18 #define IOMMU_READ	(1 << 0)
19 #define IOMMU_WRITE	(1 << 1)
20 #define IOMMU_CACHE	(1 << 2) /* DMA cache coherency */
21 #define IOMMU_NOEXEC	(1 << 3)
22 #define IOMMU_MMIO	(1 << 4) /* e.g. things like MSI doorbells */
23 /*
24  * Where the bus hardware includes a privilege level as part of its access type
25  * markings, and certain devices are capable of issuing transactions marked as
26  * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
27  * given permission flags only apply to accesses at the higher privilege level,
28  * and that unprivileged transactions should have as little access as possible.
29  * This would usually imply the same permissions as kernel mappings on the CPU,
30  * if the IOMMU page table format is equivalent.
31  */
32 #define IOMMU_PRIV	(1 << 5)
33 
34 struct iommu_ops;
35 struct iommu_group;
36 struct bus_type;
37 struct device;
38 struct iommu_domain;
39 struct iommu_domain_ops;
40 struct iommu_dirty_ops;
41 struct notifier_block;
42 struct iommu_sva;
43 struct iommu_dma_cookie;
44 struct iommu_fault_param;
45 
46 #define IOMMU_FAULT_PERM_READ	(1 << 0) /* read */
47 #define IOMMU_FAULT_PERM_WRITE	(1 << 1) /* write */
48 #define IOMMU_FAULT_PERM_EXEC	(1 << 2) /* exec */
49 #define IOMMU_FAULT_PERM_PRIV	(1 << 3) /* privileged */
50 
51 /* Generic fault types, can be expanded IRQ remapping fault */
52 enum iommu_fault_type {
53 	IOMMU_FAULT_PAGE_REQ = 1,	/* page request fault */
54 };
55 
56 /**
57  * struct iommu_fault_page_request - Page Request data
58  * @flags: encodes whether the corresponding fields are valid and whether this
59  *         is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values).
60  *         When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response
61  *         must have the same PASID value as the page request. When it is clear,
62  *         the page response should not have a PASID.
63  * @pasid: Process Address Space ID
64  * @grpid: Page Request Group Index
65  * @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
66  * @addr: page address
67  * @private_data: device-specific private information
68  */
69 struct iommu_fault_page_request {
70 #define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID	(1 << 0)
71 #define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE	(1 << 1)
72 #define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA	(1 << 2)
73 #define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID	(1 << 3)
74 	u32	flags;
75 	u32	pasid;
76 	u32	grpid;
77 	u32	perm;
78 	u64	addr;
79 	u64	private_data[2];
80 };
81 
82 /**
83  * struct iommu_fault - Generic fault data
84  * @type: fault type from &enum iommu_fault_type
85  * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ
86  */
87 struct iommu_fault {
88 	u32 type;
89 	struct iommu_fault_page_request prm;
90 };
91 
92 /**
93  * enum iommu_page_response_code - Return status of fault handlers
94  * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
95  *	populated, retry the access. This is "Success" in PCI PRI.
96  * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from
97  *	this device if possible. This is "Response Failure" in PCI PRI.
98  * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
99  *	access. This is "Invalid Request" in PCI PRI.
100  */
101 enum iommu_page_response_code {
102 	IOMMU_PAGE_RESP_SUCCESS = 0,
103 	IOMMU_PAGE_RESP_INVALID,
104 	IOMMU_PAGE_RESP_FAILURE,
105 };
106 
107 /**
108  * struct iommu_page_response - Generic page response information
109  * @pasid: Process Address Space ID
110  * @grpid: Page Request Group Index
111  * @code: response code from &enum iommu_page_response_code
112  */
113 struct iommu_page_response {
114 	u32	pasid;
115 	u32	grpid;
116 	u32	code;
117 };
118 
119 struct iopf_fault {
120 	struct iommu_fault fault;
121 	/* node for pending lists */
122 	struct list_head list;
123 };
124 
125 struct iopf_group {
126 	struct iopf_fault last_fault;
127 	struct list_head faults;
128 	/* list node for iommu_fault_param::faults */
129 	struct list_head pending_node;
130 	struct work_struct work;
131 	struct iommu_domain *domain;
132 	/* The device's fault data parameter. */
133 	struct iommu_fault_param *fault_param;
134 };
135 
136 /**
137  * struct iopf_queue - IO Page Fault queue
138  * @wq: the fault workqueue
139  * @devices: devices attached to this queue
140  * @lock: protects the device list
141  */
142 struct iopf_queue {
143 	struct workqueue_struct *wq;
144 	struct list_head devices;
145 	struct mutex lock;
146 };
147 
148 /* iommu fault flags */
149 #define IOMMU_FAULT_READ	0x0
150 #define IOMMU_FAULT_WRITE	0x1
151 
152 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
153 			struct device *, unsigned long, int, void *);
154 
155 struct iommu_domain_geometry {
156 	dma_addr_t aperture_start; /* First address that can be mapped    */
157 	dma_addr_t aperture_end;   /* Last address that can be mapped     */
158 	bool force_aperture;       /* DMA only allowed in mappable range? */
159 };
160 
161 /* Domain feature flags */
162 #define __IOMMU_DOMAIN_PAGING	(1U << 0)  /* Support for iommu_map/unmap */
163 #define __IOMMU_DOMAIN_DMA_API	(1U << 1)  /* Domain for use in DMA-API
164 					      implementation              */
165 #define __IOMMU_DOMAIN_PT	(1U << 2)  /* Domain is identity mapped   */
166 #define __IOMMU_DOMAIN_DMA_FQ	(1U << 3)  /* DMA-API uses flush queue    */
167 
168 #define __IOMMU_DOMAIN_SVA	(1U << 4)  /* Shared process address space */
169 #define __IOMMU_DOMAIN_PLATFORM	(1U << 5)
170 
171 #define __IOMMU_DOMAIN_NESTED	(1U << 6)  /* User-managed address space nested
172 					      on a stage-2 translation        */
173 
174 #define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ
175 /*
176  * This are the possible domain-types
177  *
178  *	IOMMU_DOMAIN_BLOCKED	- All DMA is blocked, can be used to isolate
179  *				  devices
180  *	IOMMU_DOMAIN_IDENTITY	- DMA addresses are system physical addresses
181  *	IOMMU_DOMAIN_UNMANAGED	- DMA mappings managed by IOMMU-API user, used
182  *				  for VMs
183  *	IOMMU_DOMAIN_DMA	- Internally used for DMA-API implementations.
184  *				  This flag allows IOMMU drivers to implement
185  *				  certain optimizations for these domains
186  *	IOMMU_DOMAIN_DMA_FQ	- As above, but definitely using batched TLB
187  *				  invalidation.
188  *	IOMMU_DOMAIN_SVA	- DMA addresses are shared process addresses
189  *				  represented by mm_struct's.
190  *	IOMMU_DOMAIN_PLATFORM	- Legacy domain for drivers that do their own
191  *				  dma_api stuff. Do not use in new drivers.
192  */
193 #define IOMMU_DOMAIN_BLOCKED	(0U)
194 #define IOMMU_DOMAIN_IDENTITY	(__IOMMU_DOMAIN_PT)
195 #define IOMMU_DOMAIN_UNMANAGED	(__IOMMU_DOMAIN_PAGING)
196 #define IOMMU_DOMAIN_DMA	(__IOMMU_DOMAIN_PAGING |	\
197 				 __IOMMU_DOMAIN_DMA_API)
198 #define IOMMU_DOMAIN_DMA_FQ	(__IOMMU_DOMAIN_PAGING |	\
199 				 __IOMMU_DOMAIN_DMA_API |	\
200 				 __IOMMU_DOMAIN_DMA_FQ)
201 #define IOMMU_DOMAIN_SVA	(__IOMMU_DOMAIN_SVA)
202 #define IOMMU_DOMAIN_PLATFORM	(__IOMMU_DOMAIN_PLATFORM)
203 #define IOMMU_DOMAIN_NESTED	(__IOMMU_DOMAIN_NESTED)
204 
205 struct iommu_domain {
206 	unsigned type;
207 	const struct iommu_domain_ops *ops;
208 	const struct iommu_dirty_ops *dirty_ops;
209 	const struct iommu_ops *owner; /* Whose domain_alloc we came from */
210 	unsigned long pgsize_bitmap;	/* Bitmap of page sizes in use */
211 	struct iommu_domain_geometry geometry;
212 	struct iommu_dma_cookie *iova_cookie;
213 	int (*iopf_handler)(struct iopf_group *group);
214 	void *fault_data;
215 	union {
216 		struct {
217 			iommu_fault_handler_t handler;
218 			void *handler_token;
219 		};
220 		struct {	/* IOMMU_DOMAIN_SVA */
221 			struct mm_struct *mm;
222 			int users;
223 			/*
224 			 * Next iommu_domain in mm->iommu_mm->sva-domains list
225 			 * protected by iommu_sva_lock.
226 			 */
227 			struct list_head next;
228 		};
229 	};
230 };
231 
232 static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
233 {
234 	return domain->type & __IOMMU_DOMAIN_DMA_API;
235 }
236 
237 enum iommu_cap {
238 	IOMMU_CAP_CACHE_COHERENCY,	/* IOMMU_CACHE is supported */
239 	IOMMU_CAP_NOEXEC,		/* IOMMU_NOEXEC flag */
240 	IOMMU_CAP_PRE_BOOT_PROTECTION,	/* Firmware says it used the IOMMU for
241 					   DMA protection and we should too */
242 	/*
243 	 * Per-device flag indicating if enforce_cache_coherency() will work on
244 	 * this device.
245 	 */
246 	IOMMU_CAP_ENFORCE_CACHE_COHERENCY,
247 	/*
248 	 * IOMMU driver does not issue TLB maintenance during .unmap, so can
249 	 * usefully support the non-strict DMA flush queue.
250 	 */
251 	IOMMU_CAP_DEFERRED_FLUSH,
252 	IOMMU_CAP_DIRTY_TRACKING,	/* IOMMU supports dirty tracking */
253 };
254 
255 /* These are the possible reserved region types */
256 enum iommu_resv_type {
257 	/* Memory regions which must be mapped 1:1 at all times */
258 	IOMMU_RESV_DIRECT,
259 	/*
260 	 * Memory regions which are advertised to be 1:1 but are
261 	 * commonly considered relaxable in some conditions,
262 	 * for instance in device assignment use case (USB, Graphics)
263 	 */
264 	IOMMU_RESV_DIRECT_RELAXABLE,
265 	/* Arbitrary "never map this or give it to a device" address ranges */
266 	IOMMU_RESV_RESERVED,
267 	/* Hardware MSI region (untranslated) */
268 	IOMMU_RESV_MSI,
269 	/* Software-managed MSI translation window */
270 	IOMMU_RESV_SW_MSI,
271 };
272 
273 /**
274  * struct iommu_resv_region - descriptor for a reserved memory region
275  * @list: Linked list pointers
276  * @start: System physical start address of the region
277  * @length: Length of the region in bytes
278  * @prot: IOMMU Protection flags (READ/WRITE/...)
279  * @type: Type of the reserved region
280  * @free: Callback to free associated memory allocations
281  */
282 struct iommu_resv_region {
283 	struct list_head	list;
284 	phys_addr_t		start;
285 	size_t			length;
286 	int			prot;
287 	enum iommu_resv_type	type;
288 	void (*free)(struct device *dev, struct iommu_resv_region *region);
289 };
290 
291 struct iommu_iort_rmr_data {
292 	struct iommu_resv_region rr;
293 
294 	/* Stream IDs associated with IORT RMR entry */
295 	const u32 *sids;
296 	u32 num_sids;
297 };
298 
299 /**
300  * enum iommu_dev_features - Per device IOMMU features
301  * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses
302  * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally
303  *			 enabling %IOMMU_DEV_FEAT_SVA requires
304  *			 %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page
305  *			 Faults themselves instead of relying on the IOMMU. When
306  *			 supported, this feature must be enabled before and
307  *			 disabled after %IOMMU_DEV_FEAT_SVA.
308  *
309  * Device drivers enable a feature using iommu_dev_enable_feature().
310  */
311 enum iommu_dev_features {
312 	IOMMU_DEV_FEAT_SVA,
313 	IOMMU_DEV_FEAT_IOPF,
314 };
315 
316 #define IOMMU_NO_PASID	(0U) /* Reserved for DMA w/o PASID */
317 #define IOMMU_FIRST_GLOBAL_PASID	(1U) /*starting range for allocation */
318 #define IOMMU_PASID_INVALID	(-1U)
319 typedef unsigned int ioasid_t;
320 
321 #ifdef CONFIG_IOMMU_API
322 
323 /**
324  * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
325  *
326  * @start: IOVA representing the start of the range to be flushed
327  * @end: IOVA representing the end of the range to be flushed (inclusive)
328  * @pgsize: The interval at which to perform the flush
329  * @freelist: Removed pages to free after sync
330  * @queued: Indicates that the flush will be queued
331  *
332  * This structure is intended to be updated by multiple calls to the
333  * ->unmap() function in struct iommu_ops before eventually being passed
334  * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
335  * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
336  * them. @queued is set to indicate when ->iotlb_flush_all() will be called
337  * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
338  */
339 struct iommu_iotlb_gather {
340 	unsigned long		start;
341 	unsigned long		end;
342 	size_t			pgsize;
343 	struct list_head	freelist;
344 	bool			queued;
345 };
346 
347 /**
348  * struct iommu_dirty_bitmap - Dirty IOVA bitmap state
349  * @bitmap: IOVA bitmap
350  * @gather: Range information for a pending IOTLB flush
351  */
352 struct iommu_dirty_bitmap {
353 	struct iova_bitmap *bitmap;
354 	struct iommu_iotlb_gather *gather;
355 };
356 
357 /* Read but do not clear any dirty bits */
358 #define IOMMU_DIRTY_NO_CLEAR (1 << 0)
359 
360 /**
361  * struct iommu_dirty_ops - domain specific dirty tracking operations
362  * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain
363  * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled
364  *                        into a bitmap, with a bit represented as a page.
365  *                        Reads the dirty PTE bits and clears it from IO
366  *                        pagetables.
367  */
368 struct iommu_dirty_ops {
369 	int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled);
370 	int (*read_and_clear_dirty)(struct iommu_domain *domain,
371 				    unsigned long iova, size_t size,
372 				    unsigned long flags,
373 				    struct iommu_dirty_bitmap *dirty);
374 };
375 
376 /**
377  * struct iommu_user_data - iommu driver specific user space data info
378  * @type: The data type of the user buffer
379  * @uptr: Pointer to the user buffer for copy_from_user()
380  * @len: The length of the user buffer in bytes
381  *
382  * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h
383  * @type, @uptr and @len should be just copied from an iommufd core uAPI struct.
384  */
385 struct iommu_user_data {
386 	unsigned int type;
387 	void __user *uptr;
388 	size_t len;
389 };
390 
391 /**
392  * struct iommu_user_data_array - iommu driver specific user space data array
393  * @type: The data type of all the entries in the user buffer array
394  * @uptr: Pointer to the user buffer array
395  * @entry_len: The fixed-width length of an entry in the array, in bytes
396  * @entry_num: The number of total entries in the array
397  *
398  * The user buffer includes an array of requests with format defined in
399  * include/uapi/linux/iommufd.h
400  */
401 struct iommu_user_data_array {
402 	unsigned int type;
403 	void __user *uptr;
404 	size_t entry_len;
405 	u32 entry_num;
406 };
407 
408 /**
409  * __iommu_copy_struct_from_user - Copy iommu driver specific user space data
410  * @dst_data: Pointer to an iommu driver specific user data that is defined in
411  *            include/uapi/linux/iommufd.h
412  * @src_data: Pointer to a struct iommu_user_data for user space data info
413  * @data_type: The data type of the @dst_data. Must match with @src_data.type
414  * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
415  * @min_len: Initial length of user data structure for backward compatibility.
416  *           This should be offsetofend using the last member in the user data
417  *           struct that was initially added to include/uapi/linux/iommufd.h
418  */
419 static inline int __iommu_copy_struct_from_user(
420 	void *dst_data, const struct iommu_user_data *src_data,
421 	unsigned int data_type, size_t data_len, size_t min_len)
422 {
423 	if (src_data->type != data_type)
424 		return -EINVAL;
425 	if (WARN_ON(!dst_data || !src_data))
426 		return -EINVAL;
427 	if (src_data->len < min_len || data_len < src_data->len)
428 		return -EINVAL;
429 	return copy_struct_from_user(dst_data, data_len, src_data->uptr,
430 				     src_data->len);
431 }
432 
433 /**
434  * iommu_copy_struct_from_user - Copy iommu driver specific user space data
435  * @kdst: Pointer to an iommu driver specific user data that is defined in
436  *        include/uapi/linux/iommufd.h
437  * @user_data: Pointer to a struct iommu_user_data for user space data info
438  * @data_type: The data type of the @kdst. Must match with @user_data->type
439  * @min_last: The last memember of the data structure @kdst points in the
440  *            initial version.
441  * Return 0 for success, otherwise -error.
442  */
443 #define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \
444 	__iommu_copy_struct_from_user(kdst, user_data, data_type,         \
445 				      sizeof(*kdst),                      \
446 				      offsetofend(typeof(*kdst), min_last))
447 
448 /**
449  * __iommu_copy_struct_from_user_array - Copy iommu driver specific user space
450  *                                       data from an iommu_user_data_array
451  * @dst_data: Pointer to an iommu driver specific user data that is defined in
452  *            include/uapi/linux/iommufd.h
453  * @src_array: Pointer to a struct iommu_user_data_array for a user space array
454  * @data_type: The data type of the @dst_data. Must match with @src_array.type
455  * @index: Index to the location in the array to copy user data from
456  * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
457  * @min_len: Initial length of user data structure for backward compatibility.
458  *           This should be offsetofend using the last member in the user data
459  *           struct that was initially added to include/uapi/linux/iommufd.h
460  */
461 static inline int __iommu_copy_struct_from_user_array(
462 	void *dst_data, const struct iommu_user_data_array *src_array,
463 	unsigned int data_type, unsigned int index, size_t data_len,
464 	size_t min_len)
465 {
466 	struct iommu_user_data src_data;
467 
468 	if (WARN_ON(!src_array || index >= src_array->entry_num))
469 		return -EINVAL;
470 	if (!src_array->entry_num)
471 		return -EINVAL;
472 	src_data.uptr = src_array->uptr + src_array->entry_len * index;
473 	src_data.len = src_array->entry_len;
474 	src_data.type = src_array->type;
475 
476 	return __iommu_copy_struct_from_user(dst_data, &src_data, data_type,
477 					     data_len, min_len);
478 }
479 
480 /**
481  * iommu_copy_struct_from_user_array - Copy iommu driver specific user space
482  *                                     data from an iommu_user_data_array
483  * @kdst: Pointer to an iommu driver specific user data that is defined in
484  *        include/uapi/linux/iommufd.h
485  * @user_array: Pointer to a struct iommu_user_data_array for a user space
486  *              array
487  * @data_type: The data type of the @kdst. Must match with @user_array->type
488  * @index: Index to the location in the array to copy user data from
489  * @min_last: The last member of the data structure @kdst points in the
490  *            initial version.
491  * Return 0 for success, otherwise -error.
492  */
493 #define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \
494 					  min_last)                           \
495 	__iommu_copy_struct_from_user_array(                                  \
496 		kdst, user_array, data_type, index, sizeof(*(kdst)),          \
497 		offsetofend(typeof(*(kdst)), min_last))
498 
499 /**
500  * struct iommu_ops - iommu ops and capabilities
501  * @capable: check capability
502  * @hw_info: report iommu hardware information. The data buffer returned by this
503  *           op is allocated in the iommu driver and freed by the caller after
504  *           use. The information type is one of enum iommu_hw_info_type defined
505  *           in include/uapi/linux/iommufd.h.
506  * @domain_alloc: allocate and return an iommu domain if success. Otherwise
507  *                NULL is returned. The domain is not fully initialized until
508  *                the caller iommu_domain_alloc() returns.
509  * @domain_alloc_user: Allocate an iommu domain corresponding to the input
510  *                     parameters as defined in include/uapi/linux/iommufd.h.
511  *                     Unlike @domain_alloc, it is called only by IOMMUFD and
512  *                     must fully initialize the new domain before return.
513  *                     Upon success, if the @user_data is valid and the @parent
514  *                     points to a kernel-managed domain, the new domain must be
515  *                     IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be
516  *                     NULL while the @user_data can be optionally provided, the
517  *                     new domain must support __IOMMU_DOMAIN_PAGING.
518  *                     Upon failure, ERR_PTR must be returned.
519  * @domain_alloc_paging: Allocate an iommu_domain that can be used for
520  *                       UNMANAGED, DMA, and DMA_FQ domain types.
521  * @probe_device: Add device to iommu driver handling
522  * @release_device: Remove device from iommu driver handling
523  * @probe_finalize: Do final setup work after the device is added to an IOMMU
524  *                  group and attached to the groups domain
525  * @device_group: find iommu group for a particular device
526  * @get_resv_regions: Request list of reserved regions for a device
527  * @of_xlate: add OF master IDs to iommu grouping
528  * @is_attach_deferred: Check if domain attach should be deferred from iommu
529  *                      driver init to device driver init (default no)
530  * @dev_enable/disable_feat: per device entries to enable/disable
531  *                               iommu specific features.
532  * @page_response: handle page request response
533  * @def_domain_type: device default domain type, return value:
534  *		- IOMMU_DOMAIN_IDENTITY: must use an identity domain
535  *		- IOMMU_DOMAIN_DMA: must use a dma domain
536  *		- 0: use the default setting
537  * @default_domain_ops: the default ops for domains
538  * @remove_dev_pasid: Remove any translation configurations of a specific
539  *                    pasid, so that any DMA transactions with this pasid
540  *                    will be blocked by the hardware.
541  * @pgsize_bitmap: bitmap of all possible supported page sizes
542  * @owner: Driver module providing these ops
543  * @identity_domain: An always available, always attachable identity
544  *                   translation.
545  * @blocked_domain: An always available, always attachable blocking
546  *                  translation.
547  * @default_domain: If not NULL this will always be set as the default domain.
548  *                  This should be an IDENTITY/BLOCKED/PLATFORM domain.
549  *                  Do not use in new drivers.
550  */
551 struct iommu_ops {
552 	bool (*capable)(struct device *dev, enum iommu_cap);
553 	void *(*hw_info)(struct device *dev, u32 *length, u32 *type);
554 
555 	/* Domain allocation and freeing by the iommu driver */
556 	struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
557 	struct iommu_domain *(*domain_alloc_user)(
558 		struct device *dev, u32 flags, struct iommu_domain *parent,
559 		const struct iommu_user_data *user_data);
560 	struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
561 
562 	struct iommu_device *(*probe_device)(struct device *dev);
563 	void (*release_device)(struct device *dev);
564 	void (*probe_finalize)(struct device *dev);
565 	struct iommu_group *(*device_group)(struct device *dev);
566 
567 	/* Request/Free a list of reserved regions for a device */
568 	void (*get_resv_regions)(struct device *dev, struct list_head *list);
569 
570 	int (*of_xlate)(struct device *dev, const struct of_phandle_args *args);
571 	bool (*is_attach_deferred)(struct device *dev);
572 
573 	/* Per device IOMMU features */
574 	int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
575 	int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
576 
577 	void (*page_response)(struct device *dev, struct iopf_fault *evt,
578 			      struct iommu_page_response *msg);
579 
580 	int (*def_domain_type)(struct device *dev);
581 	void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid);
582 
583 	const struct iommu_domain_ops *default_domain_ops;
584 	unsigned long pgsize_bitmap;
585 	struct module *owner;
586 	struct iommu_domain *identity_domain;
587 	struct iommu_domain *blocked_domain;
588 	struct iommu_domain *release_domain;
589 	struct iommu_domain *default_domain;
590 };
591 
592 /**
593  * struct iommu_domain_ops - domain specific operations
594  * @attach_dev: attach an iommu domain to a device
595  *  Return:
596  * * 0		- success
597  * * EINVAL	- can indicate that device and domain are incompatible due to
598  *		  some previous configuration of the domain, in which case the
599  *		  driver shouldn't log an error, since it is legitimate for a
600  *		  caller to test reuse of existing domains. Otherwise, it may
601  *		  still represent some other fundamental problem
602  * * ENOMEM	- out of memory
603  * * ENOSPC	- non-ENOMEM type of resource allocation failures
604  * * EBUSY	- device is attached to a domain and cannot be changed
605  * * ENODEV	- device specific errors, not able to be attached
606  * * <others>	- treated as ENODEV by the caller. Use is discouraged
607  * @set_dev_pasid: set an iommu domain to a pasid of device
608  * @map_pages: map a physically contiguous set of pages of the same size to
609  *             an iommu domain.
610  * @unmap_pages: unmap a number of pages of the same size from an iommu domain
611  * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
612  * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
613  * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
614  *            queue
615  * @cache_invalidate_user: Flush hardware cache for user space IO page table.
616  *                         The @domain must be IOMMU_DOMAIN_NESTED. The @array
617  *                         passes in the cache invalidation requests, in form
618  *                         of a driver data structure. The driver must update
619  *                         array->entry_num to report the number of handled
620  *                         invalidation requests. The driver data structure
621  *                         must be defined in include/uapi/linux/iommufd.h
622  * @iova_to_phys: translate iova to physical address
623  * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
624  *                           including no-snoop TLPs on PCIe or other platform
625  *                           specific mechanisms.
626  * @enable_nesting: Enable nesting
627  * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
628  * @free: Release the domain after use.
629  */
630 struct iommu_domain_ops {
631 	int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
632 	int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
633 			     ioasid_t pasid);
634 
635 	int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
636 			 phys_addr_t paddr, size_t pgsize, size_t pgcount,
637 			 int prot, gfp_t gfp, size_t *mapped);
638 	size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
639 			      size_t pgsize, size_t pgcount,
640 			      struct iommu_iotlb_gather *iotlb_gather);
641 
642 	void (*flush_iotlb_all)(struct iommu_domain *domain);
643 	int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
644 			      size_t size);
645 	void (*iotlb_sync)(struct iommu_domain *domain,
646 			   struct iommu_iotlb_gather *iotlb_gather);
647 	int (*cache_invalidate_user)(struct iommu_domain *domain,
648 				     struct iommu_user_data_array *array);
649 
650 	phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
651 				    dma_addr_t iova);
652 
653 	bool (*enforce_cache_coherency)(struct iommu_domain *domain);
654 	int (*enable_nesting)(struct iommu_domain *domain);
655 	int (*set_pgtable_quirks)(struct iommu_domain *domain,
656 				  unsigned long quirks);
657 
658 	void (*free)(struct iommu_domain *domain);
659 };
660 
661 /**
662  * struct iommu_device - IOMMU core representation of one IOMMU hardware
663  *			 instance
664  * @list: Used by the iommu-core to keep a list of registered iommus
665  * @ops: iommu-ops for talking to this iommu
666  * @dev: struct device for sysfs handling
667  * @singleton_group: Used internally for drivers that have only one group
668  * @max_pasids: number of supported PASIDs
669  */
670 struct iommu_device {
671 	struct list_head list;
672 	const struct iommu_ops *ops;
673 	struct fwnode_handle *fwnode;
674 	struct device *dev;
675 	struct iommu_group *singleton_group;
676 	u32 max_pasids;
677 };
678 
679 /**
680  * struct iommu_fault_param - per-device IOMMU fault data
681  * @lock: protect pending faults list
682  * @users: user counter to manage the lifetime of the data
683  * @rcu: rcu head for kfree_rcu()
684  * @dev: the device that owns this param
685  * @queue: IOPF queue
686  * @queue_list: index into queue->devices
687  * @partial: faults that are part of a Page Request Group for which the last
688  *           request hasn't been submitted yet.
689  * @faults: holds the pending faults which need response
690  */
691 struct iommu_fault_param {
692 	struct mutex lock;
693 	refcount_t users;
694 	struct rcu_head rcu;
695 
696 	struct device *dev;
697 	struct iopf_queue *queue;
698 	struct list_head queue_list;
699 
700 	struct list_head partial;
701 	struct list_head faults;
702 };
703 
704 /**
705  * struct dev_iommu - Collection of per-device IOMMU data
706  *
707  * @fault_param: IOMMU detected device fault reporting data
708  * @fwspec:	 IOMMU fwspec data
709  * @iommu_dev:	 IOMMU device this device is linked to
710  * @priv:	 IOMMU Driver private data
711  * @max_pasids:  number of PASIDs this device can consume
712  * @attach_deferred: the dma domain attachment is deferred
713  * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs
714  * @require_direct: device requires IOMMU_RESV_DIRECT regions
715  * @shadow_on_flush: IOTLB flushes are used to sync shadow tables
716  *
717  * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
718  *	struct iommu_group	*iommu_group;
719  */
720 struct dev_iommu {
721 	struct mutex lock;
722 	struct iommu_fault_param __rcu	*fault_param;
723 	struct iommu_fwspec		*fwspec;
724 	struct iommu_device		*iommu_dev;
725 	void				*priv;
726 	u32				max_pasids;
727 	u32				attach_deferred:1;
728 	u32				pci_32bit_workaround:1;
729 	u32				require_direct:1;
730 	u32				shadow_on_flush:1;
731 };
732 
733 int iommu_device_register(struct iommu_device *iommu,
734 			  const struct iommu_ops *ops,
735 			  struct device *hwdev);
736 void iommu_device_unregister(struct iommu_device *iommu);
737 int  iommu_device_sysfs_add(struct iommu_device *iommu,
738 			    struct device *parent,
739 			    const struct attribute_group **groups,
740 			    const char *fmt, ...) __printf(4, 5);
741 void iommu_device_sysfs_remove(struct iommu_device *iommu);
742 int  iommu_device_link(struct iommu_device   *iommu, struct device *link);
743 void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
744 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
745 
746 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
747 {
748 	return (struct iommu_device *)dev_get_drvdata(dev);
749 }
750 
751 /**
752  * iommu_get_iommu_dev - Get iommu_device for a device
753  * @dev: an end-point device
754  *
755  * Note that this function must be called from the iommu_ops
756  * to retrieve the iommu_device for a device, which the core code
757  * guarentees it will not invoke the op without an attached iommu.
758  */
759 static inline struct iommu_device *__iommu_get_iommu_dev(struct device *dev)
760 {
761 	return dev->iommu->iommu_dev;
762 }
763 
764 #define iommu_get_iommu_dev(dev, type, member) \
765 	container_of(__iommu_get_iommu_dev(dev), type, member)
766 
767 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
768 {
769 	*gather = (struct iommu_iotlb_gather) {
770 		.start	= ULONG_MAX,
771 		.freelist = LIST_HEAD_INIT(gather->freelist),
772 	};
773 }
774 
775 extern int bus_iommu_probe(const struct bus_type *bus);
776 extern bool iommu_present(const struct bus_type *bus);
777 extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
778 extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
779 extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus);
780 extern void iommu_domain_free(struct iommu_domain *domain);
781 extern int iommu_attach_device(struct iommu_domain *domain,
782 			       struct device *dev);
783 extern void iommu_detach_device(struct iommu_domain *domain,
784 				struct device *dev);
785 extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
786 				   struct device *dev, ioasid_t pasid);
787 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
788 extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
789 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
790 		     phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
791 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
792 			  size_t size);
793 extern size_t iommu_unmap_fast(struct iommu_domain *domain,
794 			       unsigned long iova, size_t size,
795 			       struct iommu_iotlb_gather *iotlb_gather);
796 extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
797 			    struct scatterlist *sg, unsigned int nents,
798 			    int prot, gfp_t gfp);
799 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
800 extern void iommu_set_fault_handler(struct iommu_domain *domain,
801 			iommu_fault_handler_t handler, void *token);
802 
803 extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
804 extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
805 extern void iommu_set_default_passthrough(bool cmd_line);
806 extern void iommu_set_default_translated(bool cmd_line);
807 extern bool iommu_default_passthrough(void);
808 extern struct iommu_resv_region *
809 iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
810 			enum iommu_resv_type type, gfp_t gfp);
811 extern int iommu_get_group_resv_regions(struct iommu_group *group,
812 					struct list_head *head);
813 
814 extern int iommu_attach_group(struct iommu_domain *domain,
815 			      struct iommu_group *group);
816 extern void iommu_detach_group(struct iommu_domain *domain,
817 			       struct iommu_group *group);
818 extern struct iommu_group *iommu_group_alloc(void);
819 extern void *iommu_group_get_iommudata(struct iommu_group *group);
820 extern void iommu_group_set_iommudata(struct iommu_group *group,
821 				      void *iommu_data,
822 				      void (*release)(void *iommu_data));
823 extern int iommu_group_set_name(struct iommu_group *group, const char *name);
824 extern int iommu_group_add_device(struct iommu_group *group,
825 				  struct device *dev);
826 extern void iommu_group_remove_device(struct device *dev);
827 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
828 				    int (*fn)(struct device *, void *));
829 extern struct iommu_group *iommu_group_get(struct device *dev);
830 extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
831 extern void iommu_group_put(struct iommu_group *group);
832 
833 extern int iommu_group_id(struct iommu_group *group);
834 extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
835 
836 int iommu_enable_nesting(struct iommu_domain *domain);
837 int iommu_set_pgtable_quirks(struct iommu_domain *domain,
838 		unsigned long quirks);
839 
840 void iommu_set_dma_strict(void);
841 
842 extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
843 			      unsigned long iova, int flags);
844 
845 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
846 {
847 	if (domain->ops->flush_iotlb_all)
848 		domain->ops->flush_iotlb_all(domain);
849 }
850 
851 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
852 				  struct iommu_iotlb_gather *iotlb_gather)
853 {
854 	if (domain->ops->iotlb_sync)
855 		domain->ops->iotlb_sync(domain, iotlb_gather);
856 
857 	iommu_iotlb_gather_init(iotlb_gather);
858 }
859 
860 /**
861  * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
862  *
863  * @gather: TLB gather data
864  * @iova: start of page to invalidate
865  * @size: size of page to invalidate
866  *
867  * Helper for IOMMU drivers to check whether a new range and the gathered range
868  * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better
869  * than merging the two, which might lead to unnecessary invalidations.
870  */
871 static inline
872 bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
873 				    unsigned long iova, size_t size)
874 {
875 	unsigned long start = iova, end = start + size - 1;
876 
877 	return gather->end != 0 &&
878 		(end + 1 < gather->start || start > gather->end + 1);
879 }
880 
881 
882 /**
883  * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
884  * @gather: TLB gather data
885  * @iova: start of page to invalidate
886  * @size: size of page to invalidate
887  *
888  * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
889  * where only the address range matters, and simply minimising intermediate
890  * syncs is preferred.
891  */
892 static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
893 						unsigned long iova, size_t size)
894 {
895 	unsigned long end = iova + size - 1;
896 
897 	if (gather->start > iova)
898 		gather->start = iova;
899 	if (gather->end < end)
900 		gather->end = end;
901 }
902 
903 /**
904  * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
905  * @domain: IOMMU domain to be invalidated
906  * @gather: TLB gather data
907  * @iova: start of page to invalidate
908  * @size: size of page to invalidate
909  *
910  * Helper for IOMMU drivers to build invalidation commands based on individual
911  * pages, or with page size/table level hints which cannot be gathered if they
912  * differ.
913  */
914 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
915 					       struct iommu_iotlb_gather *gather,
916 					       unsigned long iova, size_t size)
917 {
918 	/*
919 	 * If the new page is disjoint from the current range or is mapped at
920 	 * a different granularity, then sync the TLB so that the gather
921 	 * structure can be rewritten.
922 	 */
923 	if ((gather->pgsize && gather->pgsize != size) ||
924 	    iommu_iotlb_gather_is_disjoint(gather, iova, size))
925 		iommu_iotlb_sync(domain, gather);
926 
927 	gather->pgsize = size;
928 	iommu_iotlb_gather_add_range(gather, iova, size);
929 }
930 
931 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
932 {
933 	return gather && gather->queued;
934 }
935 
936 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
937 					   struct iova_bitmap *bitmap,
938 					   struct iommu_iotlb_gather *gather)
939 {
940 	if (gather)
941 		iommu_iotlb_gather_init(gather);
942 
943 	dirty->bitmap = bitmap;
944 	dirty->gather = gather;
945 }
946 
947 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
948 					     unsigned long iova,
949 					     unsigned long length)
950 {
951 	if (dirty->bitmap)
952 		iova_bitmap_set(dirty->bitmap, iova, length);
953 
954 	if (dirty->gather)
955 		iommu_iotlb_gather_add_range(dirty->gather, iova, length);
956 }
957 
958 /* PCI device grouping function */
959 extern struct iommu_group *pci_device_group(struct device *dev);
960 /* Generic device grouping function */
961 extern struct iommu_group *generic_device_group(struct device *dev);
962 /* FSL-MC device grouping function */
963 struct iommu_group *fsl_mc_device_group(struct device *dev);
964 extern struct iommu_group *generic_single_device_group(struct device *dev);
965 
966 /**
967  * struct iommu_fwspec - per-device IOMMU instance data
968  * @ops: ops for this device's IOMMU
969  * @iommu_fwnode: firmware handle for this device's IOMMU
970  * @flags: IOMMU_FWSPEC_* flags
971  * @num_ids: number of associated device IDs
972  * @ids: IDs which this device may present to the IOMMU
973  *
974  * Note that the IDs (and any other information, really) stored in this structure should be
975  * considered private to the IOMMU device driver and are not to be used directly by IOMMU
976  * consumers.
977  */
978 struct iommu_fwspec {
979 	const struct iommu_ops	*ops;
980 	struct fwnode_handle	*iommu_fwnode;
981 	u32			flags;
982 	unsigned int		num_ids;
983 	u32			ids[];
984 };
985 
986 /* ATS is supported */
987 #define IOMMU_FWSPEC_PCI_RC_ATS			(1 << 0)
988 
989 /**
990  * struct iommu_sva - handle to a device-mm bond
991  */
992 struct iommu_sva {
993 	struct device			*dev;
994 	struct iommu_domain		*domain;
995 	struct list_head		handle_item;
996 	refcount_t			users;
997 };
998 
999 struct iommu_mm_data {
1000 	u32			pasid;
1001 	struct list_head	sva_domains;
1002 	struct list_head	sva_handles;
1003 };
1004 
1005 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
1006 		      const struct iommu_ops *ops);
1007 void iommu_fwspec_free(struct device *dev);
1008 int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids);
1009 const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode);
1010 
1011 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1012 {
1013 	if (dev->iommu)
1014 		return dev->iommu->fwspec;
1015 	else
1016 		return NULL;
1017 }
1018 
1019 static inline void dev_iommu_fwspec_set(struct device *dev,
1020 					struct iommu_fwspec *fwspec)
1021 {
1022 	dev->iommu->fwspec = fwspec;
1023 }
1024 
1025 static inline void *dev_iommu_priv_get(struct device *dev)
1026 {
1027 	if (dev->iommu)
1028 		return dev->iommu->priv;
1029 	else
1030 		return NULL;
1031 }
1032 
1033 void dev_iommu_priv_set(struct device *dev, void *priv);
1034 
1035 extern struct mutex iommu_probe_device_lock;
1036 int iommu_probe_device(struct device *dev);
1037 
1038 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
1039 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
1040 
1041 int iommu_device_use_default_domain(struct device *dev);
1042 void iommu_device_unuse_default_domain(struct device *dev);
1043 
1044 int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
1045 void iommu_group_release_dma_owner(struct iommu_group *group);
1046 bool iommu_group_dma_owner_claimed(struct iommu_group *group);
1047 
1048 int iommu_device_claim_dma_owner(struct device *dev, void *owner);
1049 void iommu_device_release_dma_owner(struct device *dev);
1050 
1051 int iommu_attach_device_pasid(struct iommu_domain *domain,
1052 			      struct device *dev, ioasid_t pasid);
1053 void iommu_detach_device_pasid(struct iommu_domain *domain,
1054 			       struct device *dev, ioasid_t pasid);
1055 struct iommu_domain *
1056 iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
1057 			       unsigned int type);
1058 ioasid_t iommu_alloc_global_pasid(struct device *dev);
1059 void iommu_free_global_pasid(ioasid_t pasid);
1060 #else /* CONFIG_IOMMU_API */
1061 
1062 struct iommu_ops {};
1063 struct iommu_group {};
1064 struct iommu_fwspec {};
1065 struct iommu_device {};
1066 struct iommu_fault_param {};
1067 struct iommu_iotlb_gather {};
1068 struct iommu_dirty_bitmap {};
1069 struct iommu_dirty_ops {};
1070 
1071 static inline bool iommu_present(const struct bus_type *bus)
1072 {
1073 	return false;
1074 }
1075 
1076 static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
1077 {
1078 	return false;
1079 }
1080 
1081 static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
1082 {
1083 	return NULL;
1084 }
1085 
1086 static inline void iommu_domain_free(struct iommu_domain *domain)
1087 {
1088 }
1089 
1090 static inline int iommu_attach_device(struct iommu_domain *domain,
1091 				      struct device *dev)
1092 {
1093 	return -ENODEV;
1094 }
1095 
1096 static inline void iommu_detach_device(struct iommu_domain *domain,
1097 				       struct device *dev)
1098 {
1099 }
1100 
1101 static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1102 {
1103 	return NULL;
1104 }
1105 
1106 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
1107 			    phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
1108 {
1109 	return -ENODEV;
1110 }
1111 
1112 static inline size_t iommu_unmap(struct iommu_domain *domain,
1113 				 unsigned long iova, size_t size)
1114 {
1115 	return 0;
1116 }
1117 
1118 static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
1119 				      unsigned long iova, int gfp_order,
1120 				      struct iommu_iotlb_gather *iotlb_gather)
1121 {
1122 	return 0;
1123 }
1124 
1125 static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
1126 				   unsigned long iova, struct scatterlist *sg,
1127 				   unsigned int nents, int prot, gfp_t gfp)
1128 {
1129 	return -ENODEV;
1130 }
1131 
1132 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
1133 {
1134 }
1135 
1136 static inline void iommu_iotlb_sync(struct iommu_domain *domain,
1137 				  struct iommu_iotlb_gather *iotlb_gather)
1138 {
1139 }
1140 
1141 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1142 {
1143 	return 0;
1144 }
1145 
1146 static inline void iommu_set_fault_handler(struct iommu_domain *domain,
1147 				iommu_fault_handler_t handler, void *token)
1148 {
1149 }
1150 
1151 static inline void iommu_get_resv_regions(struct device *dev,
1152 					struct list_head *list)
1153 {
1154 }
1155 
1156 static inline void iommu_put_resv_regions(struct device *dev,
1157 					struct list_head *list)
1158 {
1159 }
1160 
1161 static inline int iommu_get_group_resv_regions(struct iommu_group *group,
1162 					       struct list_head *head)
1163 {
1164 	return -ENODEV;
1165 }
1166 
1167 static inline void iommu_set_default_passthrough(bool cmd_line)
1168 {
1169 }
1170 
1171 static inline void iommu_set_default_translated(bool cmd_line)
1172 {
1173 }
1174 
1175 static inline bool iommu_default_passthrough(void)
1176 {
1177 	return true;
1178 }
1179 
1180 static inline int iommu_attach_group(struct iommu_domain *domain,
1181 				     struct iommu_group *group)
1182 {
1183 	return -ENODEV;
1184 }
1185 
1186 static inline void iommu_detach_group(struct iommu_domain *domain,
1187 				      struct iommu_group *group)
1188 {
1189 }
1190 
1191 static inline struct iommu_group *iommu_group_alloc(void)
1192 {
1193 	return ERR_PTR(-ENODEV);
1194 }
1195 
1196 static inline void *iommu_group_get_iommudata(struct iommu_group *group)
1197 {
1198 	return NULL;
1199 }
1200 
1201 static inline void iommu_group_set_iommudata(struct iommu_group *group,
1202 					     void *iommu_data,
1203 					     void (*release)(void *iommu_data))
1204 {
1205 }
1206 
1207 static inline int iommu_group_set_name(struct iommu_group *group,
1208 				       const char *name)
1209 {
1210 	return -ENODEV;
1211 }
1212 
1213 static inline int iommu_group_add_device(struct iommu_group *group,
1214 					 struct device *dev)
1215 {
1216 	return -ENODEV;
1217 }
1218 
1219 static inline void iommu_group_remove_device(struct device *dev)
1220 {
1221 }
1222 
1223 static inline int iommu_group_for_each_dev(struct iommu_group *group,
1224 					   void *data,
1225 					   int (*fn)(struct device *, void *))
1226 {
1227 	return -ENODEV;
1228 }
1229 
1230 static inline struct iommu_group *iommu_group_get(struct device *dev)
1231 {
1232 	return NULL;
1233 }
1234 
1235 static inline void iommu_group_put(struct iommu_group *group)
1236 {
1237 }
1238 
1239 static inline int iommu_group_id(struct iommu_group *group)
1240 {
1241 	return -ENODEV;
1242 }
1243 
1244 static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain,
1245 		unsigned long quirks)
1246 {
1247 	return 0;
1248 }
1249 
1250 static inline int iommu_device_register(struct iommu_device *iommu,
1251 					const struct iommu_ops *ops,
1252 					struct device *hwdev)
1253 {
1254 	return -ENODEV;
1255 }
1256 
1257 static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
1258 {
1259 	return NULL;
1260 }
1261 
1262 static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
1263 {
1264 }
1265 
1266 static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
1267 					       struct iommu_iotlb_gather *gather,
1268 					       unsigned long iova, size_t size)
1269 {
1270 }
1271 
1272 static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
1273 {
1274 	return false;
1275 }
1276 
1277 static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
1278 					   struct iova_bitmap *bitmap,
1279 					   struct iommu_iotlb_gather *gather)
1280 {
1281 }
1282 
1283 static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
1284 					     unsigned long iova,
1285 					     unsigned long length)
1286 {
1287 }
1288 
1289 static inline void iommu_device_unregister(struct iommu_device *iommu)
1290 {
1291 }
1292 
1293 static inline int  iommu_device_sysfs_add(struct iommu_device *iommu,
1294 					  struct device *parent,
1295 					  const struct attribute_group **groups,
1296 					  const char *fmt, ...)
1297 {
1298 	return -ENODEV;
1299 }
1300 
1301 static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
1302 {
1303 }
1304 
1305 static inline int iommu_device_link(struct device *dev, struct device *link)
1306 {
1307 	return -EINVAL;
1308 }
1309 
1310 static inline void iommu_device_unlink(struct device *dev, struct device *link)
1311 {
1312 }
1313 
1314 static inline int iommu_fwspec_init(struct device *dev,
1315 				    struct fwnode_handle *iommu_fwnode,
1316 				    const struct iommu_ops *ops)
1317 {
1318 	return -ENODEV;
1319 }
1320 
1321 static inline void iommu_fwspec_free(struct device *dev)
1322 {
1323 }
1324 
1325 static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
1326 				       int num_ids)
1327 {
1328 	return -ENODEV;
1329 }
1330 
1331 static inline
1332 const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode)
1333 {
1334 	return NULL;
1335 }
1336 
1337 static inline int
1338 iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
1339 {
1340 	return -ENODEV;
1341 }
1342 
1343 static inline int
1344 iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
1345 {
1346 	return -ENODEV;
1347 }
1348 
1349 static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1350 {
1351 	return NULL;
1352 }
1353 
1354 static inline int iommu_device_use_default_domain(struct device *dev)
1355 {
1356 	return 0;
1357 }
1358 
1359 static inline void iommu_device_unuse_default_domain(struct device *dev)
1360 {
1361 }
1362 
1363 static inline int
1364 iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
1365 {
1366 	return -ENODEV;
1367 }
1368 
1369 static inline void iommu_group_release_dma_owner(struct iommu_group *group)
1370 {
1371 }
1372 
1373 static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
1374 {
1375 	return false;
1376 }
1377 
1378 static inline void iommu_device_release_dma_owner(struct device *dev)
1379 {
1380 }
1381 
1382 static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner)
1383 {
1384 	return -ENODEV;
1385 }
1386 
1387 static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
1388 					    struct device *dev, ioasid_t pasid)
1389 {
1390 	return -ENODEV;
1391 }
1392 
1393 static inline void iommu_detach_device_pasid(struct iommu_domain *domain,
1394 					     struct device *dev, ioasid_t pasid)
1395 {
1396 }
1397 
1398 static inline struct iommu_domain *
1399 iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
1400 			       unsigned int type)
1401 {
1402 	return NULL;
1403 }
1404 
1405 static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
1406 {
1407 	return IOMMU_PASID_INVALID;
1408 }
1409 
1410 static inline void iommu_free_global_pasid(ioasid_t pasid) {}
1411 #endif /* CONFIG_IOMMU_API */
1412 
1413 #if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API)
1414 void iommu_group_mutex_assert(struct device *dev);
1415 #else
1416 static inline void iommu_group_mutex_assert(struct device *dev)
1417 {
1418 }
1419 #endif
1420 
1421 /**
1422  * iommu_map_sgtable - Map the given buffer to the IOMMU domain
1423  * @domain:	The IOMMU domain to perform the mapping
1424  * @iova:	The start address to map the buffer
1425  * @sgt:	The sg_table object describing the buffer
1426  * @prot:	IOMMU protection bits
1427  *
1428  * Creates a mapping at @iova for the buffer described by a scatterlist
1429  * stored in the given sg_table object in the provided IOMMU domain.
1430  */
1431 static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain,
1432 			unsigned long iova, struct sg_table *sgt, int prot)
1433 {
1434 	return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot,
1435 			    GFP_KERNEL);
1436 }
1437 
1438 #ifdef CONFIG_IOMMU_DEBUGFS
1439 extern	struct dentry *iommu_debugfs_dir;
1440 void iommu_debugfs_setup(void);
1441 #else
1442 static inline void iommu_debugfs_setup(void) {}
1443 #endif
1444 
1445 #ifdef CONFIG_IOMMU_DMA
1446 #include <linux/msi.h>
1447 
1448 /* Setup call for arch DMA mapping code */
1449 void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit);
1450 
1451 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
1452 
1453 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
1454 void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg);
1455 
1456 #else /* CONFIG_IOMMU_DMA */
1457 
1458 struct msi_desc;
1459 struct msi_msg;
1460 
1461 static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
1462 {
1463 }
1464 
1465 static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
1466 {
1467 	return -ENODEV;
1468 }
1469 
1470 static inline int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1471 {
1472 	return 0;
1473 }
1474 
1475 static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
1476 {
1477 }
1478 
1479 #endif	/* CONFIG_IOMMU_DMA */
1480 
1481 /*
1482  * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into
1483  * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents
1484  * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals.
1485  */
1486 #define TEGRA_STREAM_ID_BYPASS 0x7f
1487 
1488 static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id)
1489 {
1490 #ifdef CONFIG_IOMMU_API
1491 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1492 
1493 	if (fwspec && fwspec->num_ids == 1) {
1494 		*stream_id = fwspec->ids[0] & 0xffff;
1495 		return true;
1496 	}
1497 #endif
1498 
1499 	return false;
1500 }
1501 
1502 #ifdef CONFIG_IOMMU_MM_DATA
1503 static inline void mm_pasid_init(struct mm_struct *mm)
1504 {
1505 	/*
1506 	 * During dup_mm(), a new mm will be memcpy'd from an old one and that makes
1507 	 * the new mm and the old one point to a same iommu_mm instance. When either
1508 	 * one of the two mms gets released, the iommu_mm instance is freed, leaving
1509 	 * the other mm running into a use-after-free/double-free problem. To avoid
1510 	 * the problem, zeroing the iommu_mm pointer of a new mm is needed here.
1511 	 */
1512 	mm->iommu_mm = NULL;
1513 }
1514 
1515 static inline bool mm_valid_pasid(struct mm_struct *mm)
1516 {
1517 	return READ_ONCE(mm->iommu_mm);
1518 }
1519 
1520 static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
1521 {
1522 	struct iommu_mm_data *iommu_mm = READ_ONCE(mm->iommu_mm);
1523 
1524 	if (!iommu_mm)
1525 		return IOMMU_PASID_INVALID;
1526 	return iommu_mm->pasid;
1527 }
1528 
1529 void mm_pasid_drop(struct mm_struct *mm);
1530 struct iommu_sva *iommu_sva_bind_device(struct device *dev,
1531 					struct mm_struct *mm);
1532 void iommu_sva_unbind_device(struct iommu_sva *handle);
1533 u32 iommu_sva_get_pasid(struct iommu_sva *handle);
1534 struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
1535 					    struct mm_struct *mm);
1536 #else
1537 static inline struct iommu_sva *
1538 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
1539 {
1540 	return NULL;
1541 }
1542 
1543 static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
1544 {
1545 }
1546 
1547 static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
1548 {
1549 	return IOMMU_PASID_INVALID;
1550 }
1551 static inline void mm_pasid_init(struct mm_struct *mm) {}
1552 static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; }
1553 
1554 static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
1555 {
1556 	return IOMMU_PASID_INVALID;
1557 }
1558 
1559 static inline void mm_pasid_drop(struct mm_struct *mm) {}
1560 
1561 static inline struct iommu_domain *
1562 iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm)
1563 {
1564 	return NULL;
1565 }
1566 #endif /* CONFIG_IOMMU_SVA */
1567 
1568 #ifdef CONFIG_IOMMU_IOPF
1569 int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
1570 void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev);
1571 int iopf_queue_flush_dev(struct device *dev);
1572 struct iopf_queue *iopf_queue_alloc(const char *name);
1573 void iopf_queue_free(struct iopf_queue *queue);
1574 int iopf_queue_discard_partial(struct iopf_queue *queue);
1575 void iopf_free_group(struct iopf_group *group);
1576 void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
1577 void iopf_group_response(struct iopf_group *group,
1578 			 enum iommu_page_response_code status);
1579 #else
1580 static inline int
1581 iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
1582 {
1583 	return -ENODEV;
1584 }
1585 
1586 static inline void
1587 iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
1588 {
1589 }
1590 
1591 static inline int iopf_queue_flush_dev(struct device *dev)
1592 {
1593 	return -ENODEV;
1594 }
1595 
1596 static inline struct iopf_queue *iopf_queue_alloc(const char *name)
1597 {
1598 	return NULL;
1599 }
1600 
1601 static inline void iopf_queue_free(struct iopf_queue *queue)
1602 {
1603 }
1604 
1605 static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
1606 {
1607 	return -ENODEV;
1608 }
1609 
1610 static inline void iopf_free_group(struct iopf_group *group)
1611 {
1612 }
1613 
1614 static inline void
1615 iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
1616 {
1617 }
1618 
1619 static inline void iopf_group_response(struct iopf_group *group,
1620 				       enum iommu_page_response_code status)
1621 {
1622 }
1623 #endif /* CONFIG_IOMMU_IOPF */
1624 #endif /* __LINUX_IOMMU_H */
1625