1 /* 2 * Copyright (c) 2006, Intel Corporation. 3 * 4 * This file is released under the GPLv2. 5 * 6 * Copyright (C) 2006-2008 Intel Corporation 7 * Author: Anil S Keshavamurthy <[email protected]> 8 * 9 */ 10 11 #ifndef _IOVA_H_ 12 #define _IOVA_H_ 13 14 #include <linux/types.h> 15 #include <linux/kernel.h> 16 #include <linux/rbtree.h> 17 #include <linux/atomic.h> 18 #include <linux/dma-mapping.h> 19 20 /* iova structure */ 21 struct iova { 22 struct rb_node node; 23 unsigned long pfn_hi; /* Highest allocated pfn */ 24 unsigned long pfn_lo; /* Lowest allocated pfn */ 25 }; 26 27 struct iova_magazine; 28 struct iova_cpu_rcache; 29 30 #define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */ 31 #define MAX_GLOBAL_MAGS 32 /* magazines per bin */ 32 33 struct iova_rcache { 34 spinlock_t lock; 35 unsigned long depot_size; 36 struct iova_magazine *depot[MAX_GLOBAL_MAGS]; 37 struct iova_cpu_rcache __percpu *cpu_rcaches; 38 }; 39 40 struct iova_domain; 41 42 /* Call-Back from IOVA code into IOMMU drivers */ 43 typedef void (* iova_flush_cb)(struct iova_domain *domain); 44 45 /* Destructor for per-entry data */ 46 typedef void (* iova_entry_dtor)(unsigned long data); 47 48 /* Number of entries per Flush Queue */ 49 #define IOVA_FQ_SIZE 256 50 51 /* Timeout (in ms) after which entries are flushed from the Flush-Queue */ 52 #define IOVA_FQ_TIMEOUT 10 53 54 /* Flush Queue entry for defered flushing */ 55 struct iova_fq_entry { 56 unsigned long iova_pfn; 57 unsigned long pages; 58 unsigned long data; 59 u64 counter; /* Flush counter when this entrie was added */ 60 }; 61 62 /* Per-CPU Flush Queue structure */ 63 struct iova_fq { 64 struct iova_fq_entry entries[IOVA_FQ_SIZE]; 65 unsigned head, tail; 66 spinlock_t lock; 67 }; 68 69 /* holds all the iova translations for a domain */ 70 struct iova_domain { 71 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ 72 struct rb_root rbroot; /* iova domain rbtree root */ 73 struct rb_node *cached_node; /* Save last alloced node */ 74 struct rb_node *cached32_node; /* Save last 32-bit alloced node */ 75 unsigned long granule; /* pfn granularity for this domain */ 76 unsigned long start_pfn; /* Lower limit for this domain */ 77 unsigned long dma_32bit_pfn; 78 unsigned long max32_alloc_size; /* Size of last failed allocation */ 79 struct iova_fq __percpu *fq; /* Flush Queue */ 80 81 atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that 82 have been started */ 83 84 atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that 85 have been finished */ 86 87 struct iova anchor; /* rbtree lookup anchor */ 88 struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ 89 90 iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU 91 TLBs */ 92 93 iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for 94 iova entry */ 95 96 struct timer_list fq_timer; /* Timer to regularily empty the 97 flush-queues */ 98 atomic_t fq_timer_on; /* 1 when timer is active, 0 99 when not */ 100 }; 101 102 static inline unsigned long iova_size(struct iova *iova) 103 { 104 return iova->pfn_hi - iova->pfn_lo + 1; 105 } 106 107 static inline unsigned long iova_shift(struct iova_domain *iovad) 108 { 109 return __ffs(iovad->granule); 110 } 111 112 static inline unsigned long iova_mask(struct iova_domain *iovad) 113 { 114 return iovad->granule - 1; 115 } 116 117 static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova) 118 { 119 return iova & iova_mask(iovad); 120 } 121 122 static inline size_t iova_align(struct iova_domain *iovad, size_t size) 123 { 124 return ALIGN(size, iovad->granule); 125 } 126 127 static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) 128 { 129 return (dma_addr_t)iova->pfn_lo << iova_shift(iovad); 130 } 131 132 static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) 133 { 134 return iova >> iova_shift(iovad); 135 } 136 137 #if IS_ENABLED(CONFIG_IOMMU_IOVA) 138 int iova_cache_get(void); 139 void iova_cache_put(void); 140 141 struct iova *alloc_iova_mem(void); 142 void free_iova_mem(struct iova *iova); 143 void free_iova(struct iova_domain *iovad, unsigned long pfn); 144 void __free_iova(struct iova_domain *iovad, struct iova *iova); 145 struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, 146 unsigned long limit_pfn, 147 bool size_aligned); 148 void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, 149 unsigned long size); 150 void queue_iova(struct iova_domain *iovad, 151 unsigned long pfn, unsigned long pages, 152 unsigned long data); 153 unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, 154 unsigned long limit_pfn, bool flush_rcache); 155 struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, 156 unsigned long pfn_hi); 157 void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); 158 void init_iova_domain(struct iova_domain *iovad, unsigned long granule, 159 unsigned long start_pfn); 160 int init_iova_flush_queue(struct iova_domain *iovad, 161 iova_flush_cb flush_cb, iova_entry_dtor entry_dtor); 162 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); 163 void put_iova_domain(struct iova_domain *iovad); 164 struct iova *split_and_remove_iova(struct iova_domain *iovad, 165 struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi); 166 void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); 167 #else 168 static inline int iova_cache_get(void) 169 { 170 return -ENOTSUPP; 171 } 172 173 static inline void iova_cache_put(void) 174 { 175 } 176 177 static inline struct iova *alloc_iova_mem(void) 178 { 179 return NULL; 180 } 181 182 static inline void free_iova_mem(struct iova *iova) 183 { 184 } 185 186 static inline void free_iova(struct iova_domain *iovad, unsigned long pfn) 187 { 188 } 189 190 static inline void __free_iova(struct iova_domain *iovad, struct iova *iova) 191 { 192 } 193 194 static inline struct iova *alloc_iova(struct iova_domain *iovad, 195 unsigned long size, 196 unsigned long limit_pfn, 197 bool size_aligned) 198 { 199 return NULL; 200 } 201 202 static inline void free_iova_fast(struct iova_domain *iovad, 203 unsigned long pfn, 204 unsigned long size) 205 { 206 } 207 208 static inline void queue_iova(struct iova_domain *iovad, 209 unsigned long pfn, unsigned long pages, 210 unsigned long data) 211 { 212 } 213 214 static inline unsigned long alloc_iova_fast(struct iova_domain *iovad, 215 unsigned long size, 216 unsigned long limit_pfn, 217 bool flush_rcache) 218 { 219 return 0; 220 } 221 222 static inline struct iova *reserve_iova(struct iova_domain *iovad, 223 unsigned long pfn_lo, 224 unsigned long pfn_hi) 225 { 226 return NULL; 227 } 228 229 static inline void copy_reserved_iova(struct iova_domain *from, 230 struct iova_domain *to) 231 { 232 } 233 234 static inline void init_iova_domain(struct iova_domain *iovad, 235 unsigned long granule, 236 unsigned long start_pfn) 237 { 238 } 239 240 static inline int init_iova_flush_queue(struct iova_domain *iovad, 241 iova_flush_cb flush_cb, 242 iova_entry_dtor entry_dtor) 243 { 244 return -ENODEV; 245 } 246 247 static inline struct iova *find_iova(struct iova_domain *iovad, 248 unsigned long pfn) 249 { 250 return NULL; 251 } 252 253 static inline void put_iova_domain(struct iova_domain *iovad) 254 { 255 } 256 257 static inline struct iova *split_and_remove_iova(struct iova_domain *iovad, 258 struct iova *iova, 259 unsigned long pfn_lo, 260 unsigned long pfn_hi) 261 { 262 return NULL; 263 } 264 265 static inline void free_cpu_cached_iovas(unsigned int cpu, 266 struct iova_domain *iovad) 267 { 268 } 269 #endif 270 271 #endif 272