1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __IO_PGTABLE_H 3 #define __IO_PGTABLE_H 4 5 #include <linux/bitops.h> 6 #include <linux/iommu.h> 7 8 /* 9 * Public API for use by IOMMU drivers 10 */ 11 enum io_pgtable_fmt { 12 ARM_32_LPAE_S1, 13 ARM_32_LPAE_S2, 14 ARM_64_LPAE_S1, 15 ARM_64_LPAE_S2, 16 ARM_V7S, 17 ARM_MALI_LPAE, 18 AMD_IOMMU_V1, 19 APPLE_DART, 20 APPLE_DART2, 21 IO_PGTABLE_NUM_FMTS, 22 }; 23 24 /** 25 * struct iommu_flush_ops - IOMMU callbacks for TLB and page table management. 26 * 27 * @tlb_flush_all: Synchronously invalidate the entire TLB context. 28 * @tlb_flush_walk: Synchronously invalidate all intermediate TLB state 29 * (sometimes referred to as the "walk cache") for a virtual 30 * address range. 31 * @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a 32 * single page. IOMMUs that cannot batch TLB invalidation 33 * operations efficiently will typically issue them here, but 34 * others may decide to update the iommu_iotlb_gather structure 35 * and defer the invalidation until iommu_iotlb_sync() instead. 36 * 37 * Note that these can all be called in atomic context and must therefore 38 * not block. 39 */ 40 struct iommu_flush_ops { 41 void (*tlb_flush_all)(void *cookie); 42 void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule, 43 void *cookie); 44 void (*tlb_add_page)(struct iommu_iotlb_gather *gather, 45 unsigned long iova, size_t granule, void *cookie); 46 }; 47 48 /** 49 * struct io_pgtable_cfg - Configuration data for a set of page tables. 50 * 51 * @quirks: A bitmap of hardware quirks that require some special 52 * action by the low-level page table allocator. 53 * @pgsize_bitmap: A bitmap of page sizes supported by this set of page 54 * tables. 55 * @ias: Input address (iova) size, in bits. 56 * @oas: Output address (paddr) size, in bits. 57 * @coherent_walk A flag to indicate whether or not page table walks made 58 * by the IOMMU are coherent with the CPU caches. 59 * @tlb: TLB management callbacks for this set of tables. 60 * @iommu_dev: The device representing the DMA configuration for the 61 * page table walker. 62 */ 63 struct io_pgtable_cfg { 64 /* 65 * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in 66 * stage 1 PTEs, for hardware which insists on validating them 67 * even in non-secure state where they should normally be ignored. 68 * 69 * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and 70 * IOMMU_NOEXEC flags and map everything with full access, for 71 * hardware which does not implement the permissions of a given 72 * format, and/or requires some format-specific default value. 73 * 74 * IO_PGTABLE_QUIRK_ARM_MTK_EXT: (ARM v7s format) MediaTek IOMMUs extend 75 * to support up to 35 bits PA where the bit32, bit33 and bit34 are 76 * encoded in the bit9, bit4 and bit5 of the PTE respectively. 77 * 78 * IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT: (ARM v7s format) MediaTek IOMMUs 79 * extend the translation table base support up to 35 bits PA, the 80 * encoding format is same with IO_PGTABLE_QUIRK_ARM_MTK_EXT. 81 * 82 * IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table 83 * for use in the upper half of a split address space. 84 * 85 * IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability 86 * attributes set in the TCR for a non-coherent page-table walker. 87 */ 88 #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) 89 #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) 90 #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3) 91 #define IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT BIT(4) 92 #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5) 93 #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6) 94 unsigned long quirks; 95 unsigned long pgsize_bitmap; 96 unsigned int ias; 97 unsigned int oas; 98 bool coherent_walk; 99 const struct iommu_flush_ops *tlb; 100 struct device *iommu_dev; 101 102 /* Low-level data specific to the table format */ 103 union { 104 struct { 105 u64 ttbr; 106 struct { 107 u32 ips:3; 108 u32 tg:2; 109 u32 sh:2; 110 u32 orgn:2; 111 u32 irgn:2; 112 u32 tsz:6; 113 } tcr; 114 u64 mair; 115 } arm_lpae_s1_cfg; 116 117 struct { 118 u64 vttbr; 119 struct { 120 u32 ps:3; 121 u32 tg:2; 122 u32 sh:2; 123 u32 orgn:2; 124 u32 irgn:2; 125 u32 sl:2; 126 u32 tsz:6; 127 } vtcr; 128 } arm_lpae_s2_cfg; 129 130 struct { 131 u32 ttbr; 132 u32 tcr; 133 u32 nmrr; 134 u32 prrr; 135 } arm_v7s_cfg; 136 137 struct { 138 u64 transtab; 139 u64 memattr; 140 } arm_mali_lpae_cfg; 141 142 struct { 143 u64 ttbr[4]; 144 u32 n_ttbrs; 145 } apple_dart_cfg; 146 }; 147 }; 148 149 /** 150 * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers. 151 * 152 * @map: Map a physically contiguous memory region. 153 * @map_pages: Map a physically contiguous range of pages of the same size. 154 * @unmap: Unmap a physically contiguous memory region. 155 * @unmap_pages: Unmap a range of virtually contiguous pages of the same size. 156 * @iova_to_phys: Translate iova to physical address. 157 * 158 * These functions map directly onto the iommu_ops member functions with 159 * the same names. 160 */ 161 struct io_pgtable_ops { 162 int (*map)(struct io_pgtable_ops *ops, unsigned long iova, 163 phys_addr_t paddr, size_t size, int prot, gfp_t gfp); 164 int (*map_pages)(struct io_pgtable_ops *ops, unsigned long iova, 165 phys_addr_t paddr, size_t pgsize, size_t pgcount, 166 int prot, gfp_t gfp, size_t *mapped); 167 size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, 168 size_t size, struct iommu_iotlb_gather *gather); 169 size_t (*unmap_pages)(struct io_pgtable_ops *ops, unsigned long iova, 170 size_t pgsize, size_t pgcount, 171 struct iommu_iotlb_gather *gather); 172 phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, 173 unsigned long iova); 174 }; 175 176 /** 177 * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU. 178 * 179 * @fmt: The page table format. 180 * @cfg: The page table configuration. This will be modified to represent 181 * the configuration actually provided by the allocator (e.g. the 182 * pgsize_bitmap may be restricted). 183 * @cookie: An opaque token provided by the IOMMU driver and passed back to 184 * the callback routines in cfg->tlb. 185 */ 186 struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, 187 struct io_pgtable_cfg *cfg, 188 void *cookie); 189 190 /** 191 * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller 192 * *must* ensure that the page table is no longer 193 * live, but the TLB can be dirty. 194 * 195 * @ops: The ops returned from alloc_io_pgtable_ops. 196 */ 197 void free_io_pgtable_ops(struct io_pgtable_ops *ops); 198 199 200 /* 201 * Internal structures for page table allocator implementations. 202 */ 203 204 /** 205 * struct io_pgtable - Internal structure describing a set of page tables. 206 * 207 * @fmt: The page table format. 208 * @cookie: An opaque token provided by the IOMMU driver and passed back to 209 * any callback routines. 210 * @cfg: A copy of the page table configuration. 211 * @ops: The page table operations in use for this set of page tables. 212 */ 213 struct io_pgtable { 214 enum io_pgtable_fmt fmt; 215 void *cookie; 216 struct io_pgtable_cfg cfg; 217 struct io_pgtable_ops ops; 218 }; 219 220 #define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops) 221 222 static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) 223 { 224 if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_all) 225 iop->cfg.tlb->tlb_flush_all(iop->cookie); 226 } 227 228 static inline void 229 io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova, 230 size_t size, size_t granule) 231 { 232 if (iop->cfg.tlb && iop->cfg.tlb->tlb_flush_walk) 233 iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie); 234 } 235 236 static inline void 237 io_pgtable_tlb_add_page(struct io_pgtable *iop, 238 struct iommu_iotlb_gather * gather, unsigned long iova, 239 size_t granule) 240 { 241 if (iop->cfg.tlb && iop->cfg.tlb->tlb_add_page) 242 iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie); 243 } 244 245 /** 246 * struct io_pgtable_init_fns - Alloc/free a set of page tables for a 247 * particular format. 248 * 249 * @alloc: Allocate a set of page tables described by cfg. 250 * @free: Free the page tables associated with iop. 251 */ 252 struct io_pgtable_init_fns { 253 struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie); 254 void (*free)(struct io_pgtable *iop); 255 }; 256 257 extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns; 258 extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns; 259 extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns; 260 extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns; 261 extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns; 262 extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns; 263 extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns; 264 extern struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns; 265 266 #endif /* __IO_PGTABLE_H */ 267