1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2020 Ruslan Bukin <[email protected]>
5 *
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory (Department of Computer Science and
8 * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9 * DARPA SSITH research programme.
10 *
11 * Portions of this work was supported by Innovate UK project 105694,
12 * "Digital Security by Design (DSbD) Technology Platform Prototype".
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 #include "opt_platform.h"
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40
41 #include <sys/param.h>
42 #include <sys/bus.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/memdesc.h>
46 #include <sys/tree.h>
47 #include <sys/taskqueue.h>
48 #include <sys/lock.h>
49 #include <sys/mutex.h>
50 #include <sys/sysctl.h>
51 #include <vm/vm.h>
52
53 #include <dev/pci/pcireg.h>
54 #include <dev/pci/pcivar.h>
55 #include <machine/bus.h>
56 #include <dev/iommu/busdma_iommu.h>
57 #include <machine/vmparam.h>
58
59 #include "iommu.h"
60 #include "iommu_if.h"
61
62 static MALLOC_DEFINE(M_IOMMU, "IOMMU", "IOMMU framework");
63
64 #define IOMMU_LIST_LOCK() mtx_lock(&iommu_mtx)
65 #define IOMMU_LIST_UNLOCK() mtx_unlock(&iommu_mtx)
66 #define IOMMU_LIST_ASSERT_LOCKED() mtx_assert(&iommu_mtx, MA_OWNED)
67
68 #define dprintf(fmt, ...)
69
70 static struct mtx iommu_mtx;
71
72 struct iommu_entry {
73 struct iommu_unit *iommu;
74 LIST_ENTRY(iommu_entry) next;
75 };
76 static LIST_HEAD(, iommu_entry) iommu_list = LIST_HEAD_INITIALIZER(iommu_list);
77
78 static int
iommu_domain_unmap_buf(struct iommu_domain * iodom,iommu_gaddr_t base,iommu_gaddr_t size,int flags)79 iommu_domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
80 iommu_gaddr_t size, int flags)
81 {
82 struct iommu_unit *iommu;
83 int error;
84
85 iommu = iodom->iommu;
86
87 error = IOMMU_UNMAP(iommu->dev, iodom, base, size);
88
89 return (error);
90 }
91
92 static int
iommu_domain_map_buf(struct iommu_domain * iodom,iommu_gaddr_t base,iommu_gaddr_t size,vm_page_t * ma,uint64_t eflags,int flags)93 iommu_domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
94 iommu_gaddr_t size, vm_page_t *ma, uint64_t eflags, int flags)
95 {
96 struct iommu_unit *iommu;
97 vm_prot_t prot;
98 vm_offset_t va;
99 int error;
100
101 dprintf("%s: base %lx, size %lx\n", __func__, base, size);
102
103 prot = 0;
104 if (eflags & IOMMU_MAP_ENTRY_READ)
105 prot |= VM_PROT_READ;
106 if (eflags & IOMMU_MAP_ENTRY_WRITE)
107 prot |= VM_PROT_WRITE;
108
109 va = base;
110
111 iommu = iodom->iommu;
112
113 error = IOMMU_MAP(iommu->dev, iodom, va, ma, size, prot);
114
115 return (0);
116 }
117
118 static const struct iommu_domain_map_ops domain_map_ops = {
119 .map = iommu_domain_map_buf,
120 .unmap = iommu_domain_unmap_buf,
121 };
122
123 static struct iommu_domain *
iommu_domain_alloc(struct iommu_unit * iommu)124 iommu_domain_alloc(struct iommu_unit *iommu)
125 {
126 struct iommu_domain *iodom;
127
128 iodom = IOMMU_DOMAIN_ALLOC(iommu->dev, iommu);
129 if (iodom == NULL)
130 return (NULL);
131
132 iommu_domain_init(iommu, iodom, &domain_map_ops);
133 iodom->end = VM_MAXUSER_ADDRESS;
134 iodom->iommu = iommu;
135 iommu_gas_init_domain(iodom);
136
137 return (iodom);
138 }
139
140 static int
iommu_domain_free(struct iommu_domain * iodom)141 iommu_domain_free(struct iommu_domain *iodom)
142 {
143 struct iommu_unit *iommu;
144
145 iommu = iodom->iommu;
146
147 IOMMU_LOCK(iommu);
148
149 if ((iodom->flags & IOMMU_DOMAIN_GAS_INITED) != 0) {
150 IOMMU_DOMAIN_LOCK(iodom);
151 iommu_gas_fini_domain(iodom);
152 IOMMU_DOMAIN_UNLOCK(iodom);
153 }
154
155 iommu_domain_fini(iodom);
156
157 IOMMU_DOMAIN_FREE(iommu->dev, iodom);
158 IOMMU_UNLOCK(iommu);
159
160 return (0);
161 }
162
163 static void
iommu_tag_init(struct bus_dma_tag_iommu * t)164 iommu_tag_init(struct bus_dma_tag_iommu *t)
165 {
166 bus_addr_t maxaddr;
167
168 maxaddr = BUS_SPACE_MAXADDR;
169
170 t->common.ref_count = 0;
171 t->common.impl = &bus_dma_iommu_impl;
172 t->common.alignment = 1;
173 t->common.boundary = 0;
174 t->common.lowaddr = maxaddr;
175 t->common.highaddr = maxaddr;
176 t->common.maxsize = maxaddr;
177 t->common.nsegments = BUS_SPACE_UNRESTRICTED;
178 t->common.maxsegsz = maxaddr;
179 }
180
181 static struct iommu_ctx *
iommu_ctx_alloc(device_t dev,struct iommu_domain * iodom,bool disabled)182 iommu_ctx_alloc(device_t dev, struct iommu_domain *iodom, bool disabled)
183 {
184 struct iommu_unit *iommu;
185 struct iommu_ctx *ioctx;
186
187 iommu = iodom->iommu;
188
189 ioctx = IOMMU_CTX_ALLOC(iommu->dev, iodom, dev, disabled);
190 if (ioctx == NULL)
191 return (NULL);
192
193 /*
194 * iommu can also be used for non-PCI based devices.
195 * This should be reimplemented as new newbus method with
196 * pci_get_rid() as a default for PCI device class.
197 */
198 ioctx->rid = pci_get_rid(dev);
199
200 return (ioctx);
201 }
202
203 struct iommu_ctx *
iommu_get_ctx(struct iommu_unit * iommu,device_t requester,uint16_t rid,bool disabled,bool rmrr)204 iommu_get_ctx(struct iommu_unit *iommu, device_t requester,
205 uint16_t rid, bool disabled, bool rmrr)
206 {
207 struct iommu_ctx *ioctx;
208 struct iommu_domain *iodom;
209 struct bus_dma_tag_iommu *tag;
210
211 IOMMU_LOCK(iommu);
212 ioctx = IOMMU_CTX_LOOKUP(iommu->dev, requester);
213 if (ioctx) {
214 IOMMU_UNLOCK(iommu);
215 return (ioctx);
216 }
217 IOMMU_UNLOCK(iommu);
218
219 /*
220 * In our current configuration we have a domain per each ctx.
221 * So allocate a domain first.
222 */
223 iodom = iommu_domain_alloc(iommu);
224 if (iodom == NULL)
225 return (NULL);
226
227 ioctx = iommu_ctx_alloc(requester, iodom, disabled);
228 if (ioctx == NULL) {
229 iommu_domain_free(iodom);
230 return (NULL);
231 }
232
233 tag = ioctx->tag = malloc(sizeof(struct bus_dma_tag_iommu),
234 M_IOMMU, M_WAITOK | M_ZERO);
235 tag->owner = requester;
236 tag->ctx = ioctx;
237 tag->ctx->domain = iodom;
238
239 iommu_tag_init(tag);
240
241 ioctx->domain = iodom;
242
243 return (ioctx);
244 }
245
246 void
iommu_free_ctx_locked(struct iommu_unit * iommu,struct iommu_ctx * ioctx)247 iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ioctx)
248 {
249 struct bus_dma_tag_iommu *tag;
250
251 IOMMU_ASSERT_LOCKED(iommu);
252
253 tag = ioctx->tag;
254
255 IOMMU_CTX_FREE(iommu->dev, ioctx);
256
257 free(tag, M_IOMMU);
258 }
259
260 void
iommu_free_ctx(struct iommu_ctx * ioctx)261 iommu_free_ctx(struct iommu_ctx *ioctx)
262 {
263 struct iommu_unit *iommu;
264 struct iommu_domain *iodom;
265 int error;
266
267 iodom = ioctx->domain;
268 iommu = iodom->iommu;
269
270 IOMMU_LOCK(iommu);
271 iommu_free_ctx_locked(iommu, ioctx);
272 IOMMU_UNLOCK(iommu);
273
274 /* Since we have a domain per each ctx, remove the domain too. */
275 error = iommu_domain_free(iodom);
276 if (error)
277 device_printf(iommu->dev, "Could not free a domain\n");
278 }
279
280 static void
iommu_domain_free_entry(struct iommu_map_entry * entry,bool free)281 iommu_domain_free_entry(struct iommu_map_entry *entry, bool free)
282 {
283 struct iommu_domain *iodom;
284
285 iodom = entry->domain;
286
287 IOMMU_DOMAIN_LOCK(iodom);
288 iommu_gas_free_space(iodom, entry);
289 IOMMU_DOMAIN_UNLOCK(iodom);
290
291 if (free)
292 iommu_gas_free_entry(iodom, entry);
293 else
294 entry->flags = 0;
295 }
296
297 void
iommu_domain_unload(struct iommu_domain * iodom,struct iommu_map_entries_tailq * entries,bool cansleep)298 iommu_domain_unload(struct iommu_domain *iodom,
299 struct iommu_map_entries_tailq *entries, bool cansleep)
300 {
301 struct iommu_map_entry *entry, *entry1;
302 int error;
303
304 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
305 KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
306 ("not mapped entry %p %p", iodom, entry));
307 error = iodom->ops->unmap(iodom, entry->start, entry->end -
308 entry->start, cansleep ? IOMMU_PGF_WAITOK : 0);
309 KASSERT(error == 0, ("unmap %p error %d", iodom, error));
310 TAILQ_REMOVE(entries, entry, dmamap_link);
311 iommu_domain_free_entry(entry, true);
312 }
313
314 if (TAILQ_EMPTY(entries))
315 return;
316
317 panic("entries map is not empty");
318 }
319
320 int
iommu_register(struct iommu_unit * iommu)321 iommu_register(struct iommu_unit *iommu)
322 {
323 struct iommu_entry *entry;
324
325 mtx_init(&iommu->lock, "IOMMU", NULL, MTX_DEF);
326
327 entry = malloc(sizeof(struct iommu_entry), M_IOMMU, M_WAITOK | M_ZERO);
328 entry->iommu = iommu;
329
330 IOMMU_LIST_LOCK();
331 LIST_INSERT_HEAD(&iommu_list, entry, next);
332 IOMMU_LIST_UNLOCK();
333
334 iommu_init_busdma(iommu);
335
336 return (0);
337 }
338
339 int
iommu_unregister(struct iommu_unit * iommu)340 iommu_unregister(struct iommu_unit *iommu)
341 {
342 struct iommu_entry *entry, *tmp;
343
344 IOMMU_LIST_LOCK();
345 LIST_FOREACH_SAFE(entry, &iommu_list, next, tmp) {
346 if (entry->iommu == iommu) {
347 LIST_REMOVE(entry, next);
348 free(entry, M_IOMMU);
349 }
350 }
351 IOMMU_LIST_UNLOCK();
352
353 iommu_fini_busdma(iommu);
354
355 mtx_destroy(&iommu->lock);
356
357 return (0);
358 }
359
360 struct iommu_unit *
iommu_find(device_t dev,bool verbose)361 iommu_find(device_t dev, bool verbose)
362 {
363 struct iommu_entry *entry;
364 struct iommu_unit *iommu;
365 int error;
366
367 IOMMU_LIST_LOCK();
368 LIST_FOREACH(entry, &iommu_list, next) {
369 iommu = entry->iommu;
370 error = IOMMU_FIND(iommu->dev, dev);
371 if (error == 0) {
372 IOMMU_LIST_UNLOCK();
373 return (entry->iommu);
374 }
375 }
376 IOMMU_LIST_UNLOCK();
377
378 return (NULL);
379 }
380
381 void
iommu_domain_unload_entry(struct iommu_map_entry * entry,bool free)382 iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free)
383 {
384
385 dprintf("%s\n", __func__);
386
387 iommu_domain_free_entry(entry, free);
388 }
389
390 static void
iommu_init(void)391 iommu_init(void)
392 {
393
394 mtx_init(&iommu_mtx, "IOMMU", NULL, MTX_DEF);
395 }
396
397 SYSINIT(iommu, SI_SUB_DRIVERS, SI_ORDER_FIRST, iommu_init, NULL);
398