1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2013 The FreeBSD Foundation
5 * All rights reserved.
6 *
7 * This software was developed by Konstantin Belousov <[email protected]>
8 * under sponsorship from the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/bus.h>
39 #include <sys/interrupt.h>
40 #include <sys/kernel.h>
41 #include <sys/ktr.h>
42 #include <sys/lock.h>
43 #include <sys/memdesc.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/rwlock.h>
47 #include <sys/rman.h>
48 #include <sys/sf_buf.h>
49 #include <sys/sysctl.h>
50 #include <sys/taskqueue.h>
51 #include <sys/tree.h>
52 #include <sys/uio.h>
53 #include <sys/vmem.h>
54 #include <vm/vm.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_pager.h>
60 #include <vm/vm_map.h>
61 #include <dev/pci/pcireg.h>
62 #include <machine/atomic.h>
63 #include <machine/bus.h>
64 #include <machine/cpu.h>
65 #include <machine/md_var.h>
66 #include <machine/specialreg.h>
67 #include <x86/include/busdma_impl.h>
68 #include <dev/iommu/busdma_iommu.h>
69 #include <x86/iommu/intel_reg.h>
70 #include <x86/iommu/intel_dmar.h>
71
72 static int domain_unmap_buf_locked(struct dmar_domain *domain,
73 iommu_gaddr_t base, iommu_gaddr_t size, int flags);
74
75 /*
76 * The cache of the identity mapping page tables for the DMARs. Using
77 * the cache saves significant amount of memory for page tables by
78 * reusing the page tables, since usually DMARs are identical and have
79 * the same capabilities. Still, cache records the information needed
80 * to match DMAR capabilities and page table format, to correctly
81 * handle different DMARs.
82 */
83
84 struct idpgtbl {
85 iommu_gaddr_t maxaddr; /* Page table covers the guest address
86 range [0..maxaddr) */
87 int pglvl; /* Total page table levels ignoring
88 superpages */
89 int leaf; /* The last materialized page table
90 level, it is non-zero if superpages
91 are supported */
92 vm_object_t pgtbl_obj; /* The page table pages */
93 LIST_ENTRY(idpgtbl) link;
94 };
95
96 static struct sx idpgtbl_lock;
97 SX_SYSINIT(idpgtbl, &idpgtbl_lock, "idpgtbl");
98 static LIST_HEAD(, idpgtbl) idpgtbls = LIST_HEAD_INITIALIZER(idpgtbls);
99 static MALLOC_DEFINE(M_DMAR_IDPGTBL, "dmar_idpgtbl",
100 "Intel DMAR Identity mappings cache elements");
101
102 /*
103 * Build the next level of the page tables for the identity mapping.
104 * - lvl is the level to build;
105 * - idx is the index of the page table page in the pgtbl_obj, which is
106 * being allocated filled now;
107 * - addr is the starting address in the bus address space which is
108 * mapped by the page table page.
109 */
110 static void
domain_idmap_nextlvl(struct idpgtbl * tbl,int lvl,vm_pindex_t idx,iommu_gaddr_t addr)111 domain_idmap_nextlvl(struct idpgtbl *tbl, int lvl, vm_pindex_t idx,
112 iommu_gaddr_t addr)
113 {
114 vm_page_t m1;
115 dmar_pte_t *pte;
116 struct sf_buf *sf;
117 iommu_gaddr_t f, pg_sz;
118 vm_pindex_t base;
119 int i;
120
121 VM_OBJECT_ASSERT_LOCKED(tbl->pgtbl_obj);
122 if (addr >= tbl->maxaddr)
123 return;
124 (void)dmar_pgalloc(tbl->pgtbl_obj, idx, IOMMU_PGF_OBJL |
125 IOMMU_PGF_WAITOK | IOMMU_PGF_ZERO);
126 base = idx * DMAR_NPTEPG + 1; /* Index of the first child page of idx */
127 pg_sz = pglvl_page_size(tbl->pglvl, lvl);
128 if (lvl != tbl->leaf) {
129 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz)
130 domain_idmap_nextlvl(tbl, lvl + 1, base + i, f);
131 }
132 VM_OBJECT_WUNLOCK(tbl->pgtbl_obj);
133 pte = dmar_map_pgtbl(tbl->pgtbl_obj, idx, IOMMU_PGF_WAITOK, &sf);
134 if (lvl == tbl->leaf) {
135 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) {
136 if (f >= tbl->maxaddr)
137 break;
138 pte[i].pte = (DMAR_PTE_ADDR_MASK & f) |
139 DMAR_PTE_R | DMAR_PTE_W;
140 }
141 } else {
142 for (i = 0, f = addr; i < DMAR_NPTEPG; i++, f += pg_sz) {
143 if (f >= tbl->maxaddr)
144 break;
145 m1 = dmar_pgalloc(tbl->pgtbl_obj, base + i,
146 IOMMU_PGF_NOALLOC);
147 KASSERT(m1 != NULL, ("lost page table page"));
148 pte[i].pte = (DMAR_PTE_ADDR_MASK &
149 VM_PAGE_TO_PHYS(m1)) | DMAR_PTE_R | DMAR_PTE_W;
150 }
151 }
152 /* domain_get_idmap_pgtbl flushes CPU cache if needed. */
153 dmar_unmap_pgtbl(sf);
154 VM_OBJECT_WLOCK(tbl->pgtbl_obj);
155 }
156
157 /*
158 * Find a ready and compatible identity-mapping page table in the
159 * cache. If not found, populate the identity-mapping page table for
160 * the context, up to the maxaddr. The maxaddr byte is allowed to be
161 * not mapped, which is aligned with the definition of Maxmem as the
162 * highest usable physical address + 1. If superpages are used, the
163 * maxaddr is typically mapped.
164 */
165 vm_object_t
domain_get_idmap_pgtbl(struct dmar_domain * domain,iommu_gaddr_t maxaddr)166 domain_get_idmap_pgtbl(struct dmar_domain *domain, iommu_gaddr_t maxaddr)
167 {
168 struct dmar_unit *unit;
169 struct idpgtbl *tbl;
170 vm_object_t res;
171 vm_page_t m;
172 int leaf, i;
173
174 leaf = 0; /* silence gcc */
175
176 /*
177 * First, determine where to stop the paging structures.
178 */
179 for (i = 0; i < domain->pglvl; i++) {
180 if (i == domain->pglvl - 1 || domain_is_sp_lvl(domain, i)) {
181 leaf = i;
182 break;
183 }
184 }
185
186 /*
187 * Search the cache for a compatible page table. Qualified
188 * page table must map up to maxaddr, its level must be
189 * supported by the DMAR and leaf should be equal to the
190 * calculated value. The later restriction could be lifted
191 * but I believe it is currently impossible to have any
192 * deviations for existing hardware.
193 */
194 sx_slock(&idpgtbl_lock);
195 LIST_FOREACH(tbl, &idpgtbls, link) {
196 if (tbl->maxaddr >= maxaddr &&
197 dmar_pglvl_supported(domain->dmar, tbl->pglvl) &&
198 tbl->leaf == leaf) {
199 res = tbl->pgtbl_obj;
200 vm_object_reference(res);
201 sx_sunlock(&idpgtbl_lock);
202 domain->pglvl = tbl->pglvl; /* XXXKIB ? */
203 goto end;
204 }
205 }
206
207 /*
208 * Not found in cache, relock the cache into exclusive mode to
209 * be able to add element, and recheck cache again after the
210 * relock.
211 */
212 sx_sunlock(&idpgtbl_lock);
213 sx_xlock(&idpgtbl_lock);
214 LIST_FOREACH(tbl, &idpgtbls, link) {
215 if (tbl->maxaddr >= maxaddr &&
216 dmar_pglvl_supported(domain->dmar, tbl->pglvl) &&
217 tbl->leaf == leaf) {
218 res = tbl->pgtbl_obj;
219 vm_object_reference(res);
220 sx_xunlock(&idpgtbl_lock);
221 domain->pglvl = tbl->pglvl; /* XXXKIB ? */
222 return (res);
223 }
224 }
225
226 /*
227 * Still not found, create new page table.
228 */
229 tbl = malloc(sizeof(*tbl), M_DMAR_IDPGTBL, M_WAITOK);
230 tbl->pglvl = domain->pglvl;
231 tbl->leaf = leaf;
232 tbl->maxaddr = maxaddr;
233 tbl->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
234 IDX_TO_OFF(pglvl_max_pages(tbl->pglvl)), 0, 0, NULL);
235 VM_OBJECT_WLOCK(tbl->pgtbl_obj);
236 domain_idmap_nextlvl(tbl, 0, 0, 0);
237 VM_OBJECT_WUNLOCK(tbl->pgtbl_obj);
238 LIST_INSERT_HEAD(&idpgtbls, tbl, link);
239 res = tbl->pgtbl_obj;
240 vm_object_reference(res);
241 sx_xunlock(&idpgtbl_lock);
242
243 end:
244 /*
245 * Table was found or created.
246 *
247 * If DMAR does not snoop paging structures accesses, flush
248 * CPU cache to memory. Note that dmar_unmap_pgtbl() coherent
249 * argument was possibly invalid at the time of the identity
250 * page table creation, since DMAR which was passed at the
251 * time of creation could be coherent, while current DMAR is
252 * not.
253 *
254 * If DMAR cannot look into the chipset write buffer, flush it
255 * as well.
256 */
257 unit = domain->dmar;
258 if (!DMAR_IS_COHERENT(unit)) {
259 VM_OBJECT_WLOCK(res);
260 for (m = vm_page_lookup(res, 0); m != NULL;
261 m = vm_page_next(m))
262 pmap_invalidate_cache_pages(&m, 1);
263 VM_OBJECT_WUNLOCK(res);
264 }
265 if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
266 DMAR_LOCK(unit);
267 dmar_flush_write_bufs(unit);
268 DMAR_UNLOCK(unit);
269 }
270
271 return (res);
272 }
273
274 /*
275 * Return a reference to the identity mapping page table to the cache.
276 */
277 void
put_idmap_pgtbl(vm_object_t obj)278 put_idmap_pgtbl(vm_object_t obj)
279 {
280 struct idpgtbl *tbl, *tbl1;
281 vm_object_t rmobj;
282
283 sx_slock(&idpgtbl_lock);
284 KASSERT(obj->ref_count >= 2, ("lost cache reference"));
285 vm_object_deallocate(obj);
286
287 /*
288 * Cache always owns one last reference on the page table object.
289 * If there is an additional reference, object must stay.
290 */
291 if (obj->ref_count > 1) {
292 sx_sunlock(&idpgtbl_lock);
293 return;
294 }
295
296 /*
297 * Cache reference is the last, remove cache element and free
298 * page table object, returning the page table pages to the
299 * system.
300 */
301 sx_sunlock(&idpgtbl_lock);
302 sx_xlock(&idpgtbl_lock);
303 LIST_FOREACH_SAFE(tbl, &idpgtbls, link, tbl1) {
304 rmobj = tbl->pgtbl_obj;
305 if (rmobj->ref_count == 1) {
306 LIST_REMOVE(tbl, link);
307 atomic_subtract_int(&dmar_tbl_pagecnt,
308 rmobj->resident_page_count);
309 vm_object_deallocate(rmobj);
310 free(tbl, M_DMAR_IDPGTBL);
311 }
312 }
313 sx_xunlock(&idpgtbl_lock);
314 }
315
316 /*
317 * The core routines to map and unmap host pages at the given guest
318 * address. Support superpages.
319 */
320
321 /*
322 * Index of the pte for the guest address base in the page table at
323 * the level lvl.
324 */
325 static int
domain_pgtbl_pte_off(struct dmar_domain * domain,iommu_gaddr_t base,int lvl)326 domain_pgtbl_pte_off(struct dmar_domain *domain, iommu_gaddr_t base, int lvl)
327 {
328
329 base >>= DMAR_PAGE_SHIFT + (domain->pglvl - lvl - 1) *
330 DMAR_NPTEPGSHIFT;
331 return (base & DMAR_PTEMASK);
332 }
333
334 /*
335 * Returns the page index of the page table page in the page table
336 * object, which maps the given address base at the page table level
337 * lvl.
338 */
339 static vm_pindex_t
domain_pgtbl_get_pindex(struct dmar_domain * domain,iommu_gaddr_t base,int lvl)340 domain_pgtbl_get_pindex(struct dmar_domain *domain, iommu_gaddr_t base, int lvl)
341 {
342 vm_pindex_t idx, pidx;
343 int i;
344
345 KASSERT(lvl >= 0 && lvl < domain->pglvl,
346 ("wrong lvl %p %d", domain, lvl));
347
348 for (pidx = idx = 0, i = 0; i < lvl; i++, pidx = idx) {
349 idx = domain_pgtbl_pte_off(domain, base, i) +
350 pidx * DMAR_NPTEPG + 1;
351 }
352 return (idx);
353 }
354
355 static dmar_pte_t *
domain_pgtbl_map_pte(struct dmar_domain * domain,iommu_gaddr_t base,int lvl,int flags,vm_pindex_t * idxp,struct sf_buf ** sf)356 domain_pgtbl_map_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl,
357 int flags, vm_pindex_t *idxp, struct sf_buf **sf)
358 {
359 vm_page_t m;
360 struct sf_buf *sfp;
361 dmar_pte_t *pte, *ptep;
362 vm_pindex_t idx, idx1;
363
364 DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
365 KASSERT((flags & IOMMU_PGF_OBJL) != 0, ("lost PGF_OBJL"));
366
367 idx = domain_pgtbl_get_pindex(domain, base, lvl);
368 if (*sf != NULL && idx == *idxp) {
369 pte = (dmar_pte_t *)sf_buf_kva(*sf);
370 } else {
371 if (*sf != NULL)
372 dmar_unmap_pgtbl(*sf);
373 *idxp = idx;
374 retry:
375 pte = dmar_map_pgtbl(domain->pgtbl_obj, idx, flags, sf);
376 if (pte == NULL) {
377 KASSERT(lvl > 0,
378 ("lost root page table page %p", domain));
379 /*
380 * Page table page does not exist, allocate
381 * it and create a pte in the preceeding page level
382 * to reference the allocated page table page.
383 */
384 m = dmar_pgalloc(domain->pgtbl_obj, idx, flags |
385 IOMMU_PGF_ZERO);
386 if (m == NULL)
387 return (NULL);
388
389 /*
390 * Prevent potential free while pgtbl_obj is
391 * unlocked in the recursive call to
392 * domain_pgtbl_map_pte(), if other thread did
393 * pte write and clean while the lock is
394 * dropped.
395 */
396 m->ref_count++;
397
398 sfp = NULL;
399 ptep = domain_pgtbl_map_pte(domain, base, lvl - 1,
400 flags, &idx1, &sfp);
401 if (ptep == NULL) {
402 KASSERT(m->pindex != 0,
403 ("loosing root page %p", domain));
404 m->ref_count--;
405 dmar_pgfree(domain->pgtbl_obj, m->pindex,
406 flags);
407 return (NULL);
408 }
409 dmar_pte_store(&ptep->pte, DMAR_PTE_R | DMAR_PTE_W |
410 VM_PAGE_TO_PHYS(m));
411 dmar_flush_pte_to_ram(domain->dmar, ptep);
412 sf_buf_page(sfp)->ref_count += 1;
413 m->ref_count--;
414 dmar_unmap_pgtbl(sfp);
415 /* Only executed once. */
416 goto retry;
417 }
418 }
419 pte += domain_pgtbl_pte_off(domain, base, lvl);
420 return (pte);
421 }
422
423 static int
domain_map_buf_locked(struct dmar_domain * domain,iommu_gaddr_t base,iommu_gaddr_t size,vm_page_t * ma,uint64_t pflags,int flags)424 domain_map_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base,
425 iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags)
426 {
427 dmar_pte_t *pte;
428 struct sf_buf *sf;
429 iommu_gaddr_t pg_sz, base1, size1;
430 vm_pindex_t pi, c, idx, run_sz;
431 int lvl;
432 bool superpage;
433
434 DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
435
436 base1 = base;
437 size1 = size;
438 flags |= IOMMU_PGF_OBJL;
439 TD_PREP_PINNED_ASSERT;
440
441 for (sf = NULL, pi = 0; size > 0; base += pg_sz, size -= pg_sz,
442 pi += run_sz) {
443 for (lvl = 0, c = 0, superpage = false;; lvl++) {
444 pg_sz = domain_page_size(domain, lvl);
445 run_sz = pg_sz >> DMAR_PAGE_SHIFT;
446 if (lvl == domain->pglvl - 1)
447 break;
448 /*
449 * Check if the current base suitable for the
450 * superpage mapping. First, verify the level.
451 */
452 if (!domain_is_sp_lvl(domain, lvl))
453 continue;
454 /*
455 * Next, look at the size of the mapping and
456 * alignment of both guest and host addresses.
457 */
458 if (size < pg_sz || (base & (pg_sz - 1)) != 0 ||
459 (VM_PAGE_TO_PHYS(ma[pi]) & (pg_sz - 1)) != 0)
460 continue;
461 /* All passed, check host pages contiguouty. */
462 if (c == 0) {
463 for (c = 1; c < run_sz; c++) {
464 if (VM_PAGE_TO_PHYS(ma[pi + c]) !=
465 VM_PAGE_TO_PHYS(ma[pi + c - 1]) +
466 PAGE_SIZE)
467 break;
468 }
469 }
470 if (c >= run_sz) {
471 superpage = true;
472 break;
473 }
474 }
475 KASSERT(size >= pg_sz,
476 ("mapping loop overflow %p %jx %jx %jx", domain,
477 (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz));
478 KASSERT(pg_sz > 0, ("pg_sz 0 lvl %d", lvl));
479 pte = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf);
480 if (pte == NULL) {
481 KASSERT((flags & IOMMU_PGF_WAITOK) == 0,
482 ("failed waitable pte alloc %p", domain));
483 if (sf != NULL)
484 dmar_unmap_pgtbl(sf);
485 domain_unmap_buf_locked(domain, base1, base - base1,
486 flags);
487 TD_PINNED_ASSERT;
488 return (ENOMEM);
489 }
490 dmar_pte_store(&pte->pte, VM_PAGE_TO_PHYS(ma[pi]) | pflags |
491 (superpage ? DMAR_PTE_SP : 0));
492 dmar_flush_pte_to_ram(domain->dmar, pte);
493 sf_buf_page(sf)->ref_count += 1;
494 }
495 if (sf != NULL)
496 dmar_unmap_pgtbl(sf);
497 TD_PINNED_ASSERT;
498 return (0);
499 }
500
501 static int
domain_map_buf(struct iommu_domain * iodom,iommu_gaddr_t base,iommu_gaddr_t size,vm_page_t * ma,uint64_t eflags,int flags)502 domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
503 iommu_gaddr_t size, vm_page_t *ma, uint64_t eflags, int flags)
504 {
505 struct dmar_domain *domain;
506 struct dmar_unit *unit;
507 uint64_t pflags;
508 int error;
509
510 pflags = ((eflags & IOMMU_MAP_ENTRY_READ) != 0 ? DMAR_PTE_R : 0) |
511 ((eflags & IOMMU_MAP_ENTRY_WRITE) != 0 ? DMAR_PTE_W : 0) |
512 ((eflags & IOMMU_MAP_ENTRY_SNOOP) != 0 ? DMAR_PTE_SNP : 0) |
513 ((eflags & IOMMU_MAP_ENTRY_TM) != 0 ? DMAR_PTE_TM : 0);
514
515 domain = IODOM2DOM(iodom);
516 unit = domain->dmar;
517
518 KASSERT((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) == 0,
519 ("modifying idmap pagetable domain %p", domain));
520 KASSERT((base & DMAR_PAGE_MASK) == 0,
521 ("non-aligned base %p %jx %jx", domain, (uintmax_t)base,
522 (uintmax_t)size));
523 KASSERT((size & DMAR_PAGE_MASK) == 0,
524 ("non-aligned size %p %jx %jx", domain, (uintmax_t)base,
525 (uintmax_t)size));
526 KASSERT(size > 0, ("zero size %p %jx %jx", domain, (uintmax_t)base,
527 (uintmax_t)size));
528 KASSERT(base < (1ULL << domain->agaw),
529 ("base too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
530 (uintmax_t)size, domain->agaw));
531 KASSERT(base + size < (1ULL << domain->agaw),
532 ("end too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
533 (uintmax_t)size, domain->agaw));
534 KASSERT(base + size > base,
535 ("size overflow %p %jx %jx", domain, (uintmax_t)base,
536 (uintmax_t)size));
537 KASSERT((pflags & (DMAR_PTE_R | DMAR_PTE_W)) != 0,
538 ("neither read nor write %jx", (uintmax_t)pflags));
539 KASSERT((pflags & ~(DMAR_PTE_R | DMAR_PTE_W | DMAR_PTE_SNP |
540 DMAR_PTE_TM)) == 0,
541 ("invalid pte flags %jx", (uintmax_t)pflags));
542 KASSERT((pflags & DMAR_PTE_SNP) == 0 ||
543 (unit->hw_ecap & DMAR_ECAP_SC) != 0,
544 ("PTE_SNP for dmar without snoop control %p %jx",
545 domain, (uintmax_t)pflags));
546 KASSERT((pflags & DMAR_PTE_TM) == 0 ||
547 (unit->hw_ecap & DMAR_ECAP_DI) != 0,
548 ("PTE_TM for dmar without DIOTLB %p %jx",
549 domain, (uintmax_t)pflags));
550 KASSERT((flags & ~IOMMU_PGF_WAITOK) == 0, ("invalid flags %x", flags));
551
552 DMAR_DOMAIN_PGLOCK(domain);
553 error = domain_map_buf_locked(domain, base, size, ma, pflags, flags);
554 DMAR_DOMAIN_PGUNLOCK(domain);
555 if (error != 0)
556 return (error);
557
558 if ((unit->hw_cap & DMAR_CAP_CM) != 0)
559 domain_flush_iotlb_sync(domain, base, size);
560 else if ((unit->hw_cap & DMAR_CAP_RWBF) != 0) {
561 /* See 11.1 Write Buffer Flushing. */
562 DMAR_LOCK(unit);
563 dmar_flush_write_bufs(unit);
564 DMAR_UNLOCK(unit);
565 }
566 return (0);
567 }
568
569 static void domain_unmap_clear_pte(struct dmar_domain *domain,
570 iommu_gaddr_t base, int lvl, int flags, dmar_pte_t *pte,
571 struct sf_buf **sf, bool free_fs);
572
573 static void
domain_free_pgtbl_pde(struct dmar_domain * domain,iommu_gaddr_t base,int lvl,int flags)574 domain_free_pgtbl_pde(struct dmar_domain *domain, iommu_gaddr_t base,
575 int lvl, int flags)
576 {
577 struct sf_buf *sf;
578 dmar_pte_t *pde;
579 vm_pindex_t idx;
580
581 sf = NULL;
582 pde = domain_pgtbl_map_pte(domain, base, lvl, flags, &idx, &sf);
583 domain_unmap_clear_pte(domain, base, lvl, flags, pde, &sf, true);
584 }
585
586 static void
domain_unmap_clear_pte(struct dmar_domain * domain,iommu_gaddr_t base,int lvl,int flags,dmar_pte_t * pte,struct sf_buf ** sf,bool free_sf)587 domain_unmap_clear_pte(struct dmar_domain *domain, iommu_gaddr_t base, int lvl,
588 int flags, dmar_pte_t *pte, struct sf_buf **sf, bool free_sf)
589 {
590 vm_page_t m;
591
592 dmar_pte_clear(&pte->pte);
593 dmar_flush_pte_to_ram(domain->dmar, pte);
594 m = sf_buf_page(*sf);
595 if (free_sf) {
596 dmar_unmap_pgtbl(*sf);
597 *sf = NULL;
598 }
599 m->ref_count--;
600 if (m->ref_count != 0)
601 return;
602 KASSERT(lvl != 0,
603 ("lost reference (lvl) on root pg domain %p base %jx lvl %d",
604 domain, (uintmax_t)base, lvl));
605 KASSERT(m->pindex != 0,
606 ("lost reference (idx) on root pg domain %p base %jx lvl %d",
607 domain, (uintmax_t)base, lvl));
608 dmar_pgfree(domain->pgtbl_obj, m->pindex, flags);
609 domain_free_pgtbl_pde(domain, base, lvl - 1, flags);
610 }
611
612 /*
613 * Assumes that the unmap is never partial.
614 */
615 static int
domain_unmap_buf_locked(struct dmar_domain * domain,iommu_gaddr_t base,iommu_gaddr_t size,int flags)616 domain_unmap_buf_locked(struct dmar_domain *domain, iommu_gaddr_t base,
617 iommu_gaddr_t size, int flags)
618 {
619 dmar_pte_t *pte;
620 struct sf_buf *sf;
621 vm_pindex_t idx;
622 iommu_gaddr_t pg_sz;
623 int lvl;
624
625 DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
626 if (size == 0)
627 return (0);
628
629 KASSERT((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) == 0,
630 ("modifying idmap pagetable domain %p", domain));
631 KASSERT((base & DMAR_PAGE_MASK) == 0,
632 ("non-aligned base %p %jx %jx", domain, (uintmax_t)base,
633 (uintmax_t)size));
634 KASSERT((size & DMAR_PAGE_MASK) == 0,
635 ("non-aligned size %p %jx %jx", domain, (uintmax_t)base,
636 (uintmax_t)size));
637 KASSERT(base < (1ULL << domain->agaw),
638 ("base too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
639 (uintmax_t)size, domain->agaw));
640 KASSERT(base + size < (1ULL << domain->agaw),
641 ("end too high %p %jx %jx agaw %d", domain, (uintmax_t)base,
642 (uintmax_t)size, domain->agaw));
643 KASSERT(base + size > base,
644 ("size overflow %p %jx %jx", domain, (uintmax_t)base,
645 (uintmax_t)size));
646 KASSERT((flags & ~IOMMU_PGF_WAITOK) == 0, ("invalid flags %x", flags));
647
648 pg_sz = 0; /* silence gcc */
649 flags |= IOMMU_PGF_OBJL;
650 TD_PREP_PINNED_ASSERT;
651
652 for (sf = NULL; size > 0; base += pg_sz, size -= pg_sz) {
653 for (lvl = 0; lvl < domain->pglvl; lvl++) {
654 if (lvl != domain->pglvl - 1 &&
655 !domain_is_sp_lvl(domain, lvl))
656 continue;
657 pg_sz = domain_page_size(domain, lvl);
658 if (pg_sz > size)
659 continue;
660 pte = domain_pgtbl_map_pte(domain, base, lvl, flags,
661 &idx, &sf);
662 KASSERT(pte != NULL,
663 ("sleeping or page missed %p %jx %d 0x%x",
664 domain, (uintmax_t)base, lvl, flags));
665 if ((pte->pte & DMAR_PTE_SP) != 0 ||
666 lvl == domain->pglvl - 1) {
667 domain_unmap_clear_pte(domain, base, lvl,
668 flags, pte, &sf, false);
669 break;
670 }
671 }
672 KASSERT(size >= pg_sz,
673 ("unmapping loop overflow %p %jx %jx %jx", domain,
674 (uintmax_t)base, (uintmax_t)size, (uintmax_t)pg_sz));
675 }
676 if (sf != NULL)
677 dmar_unmap_pgtbl(sf);
678 /*
679 * See 11.1 Write Buffer Flushing for an explanation why RWBF
680 * can be ignored there.
681 */
682
683 TD_PINNED_ASSERT;
684 return (0);
685 }
686
687 static int
domain_unmap_buf(struct iommu_domain * iodom,iommu_gaddr_t base,iommu_gaddr_t size,int flags)688 domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
689 iommu_gaddr_t size, int flags)
690 {
691 struct dmar_domain *domain;
692 int error;
693
694 domain = IODOM2DOM(iodom);
695
696 DMAR_DOMAIN_PGLOCK(domain);
697 error = domain_unmap_buf_locked(domain, base, size, flags);
698 DMAR_DOMAIN_PGUNLOCK(domain);
699 return (error);
700 }
701
702 int
domain_alloc_pgtbl(struct dmar_domain * domain)703 domain_alloc_pgtbl(struct dmar_domain *domain)
704 {
705 vm_page_t m;
706
707 KASSERT(domain->pgtbl_obj == NULL,
708 ("already initialized %p", domain));
709
710 domain->pgtbl_obj = vm_pager_allocate(OBJT_PHYS, NULL,
711 IDX_TO_OFF(pglvl_max_pages(domain->pglvl)), 0, 0, NULL);
712 DMAR_DOMAIN_PGLOCK(domain);
713 m = dmar_pgalloc(domain->pgtbl_obj, 0, IOMMU_PGF_WAITOK |
714 IOMMU_PGF_ZERO | IOMMU_PGF_OBJL);
715 /* No implicit free of the top level page table page. */
716 m->ref_count = 1;
717 DMAR_DOMAIN_PGUNLOCK(domain);
718 DMAR_LOCK(domain->dmar);
719 domain->iodom.flags |= IOMMU_DOMAIN_PGTBL_INITED;
720 DMAR_UNLOCK(domain->dmar);
721 return (0);
722 }
723
724 void
domain_free_pgtbl(struct dmar_domain * domain)725 domain_free_pgtbl(struct dmar_domain *domain)
726 {
727 vm_object_t obj;
728 vm_page_t m;
729
730 obj = domain->pgtbl_obj;
731 if (obj == NULL) {
732 KASSERT((domain->dmar->hw_ecap & DMAR_ECAP_PT) != 0 &&
733 (domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0,
734 ("lost pagetable object domain %p", domain));
735 return;
736 }
737 DMAR_DOMAIN_ASSERT_PGLOCKED(domain);
738 domain->pgtbl_obj = NULL;
739
740 if ((domain->iodom.flags & IOMMU_DOMAIN_IDMAP) != 0) {
741 put_idmap_pgtbl(obj);
742 domain->iodom.flags &= ~IOMMU_DOMAIN_IDMAP;
743 return;
744 }
745
746 /* Obliterate ref_counts */
747 VM_OBJECT_ASSERT_WLOCKED(obj);
748 for (m = vm_page_lookup(obj, 0); m != NULL; m = vm_page_next(m))
749 m->ref_count = 0;
750 VM_OBJECT_WUNLOCK(obj);
751 vm_object_deallocate(obj);
752 }
753
754 static inline uint64_t
domain_wait_iotlb_flush(struct dmar_unit * unit,uint64_t wt,int iro)755 domain_wait_iotlb_flush(struct dmar_unit *unit, uint64_t wt, int iro)
756 {
757 uint64_t iotlbr;
758
759 dmar_write8(unit, iro + DMAR_IOTLB_REG_OFF, DMAR_IOTLB_IVT |
760 DMAR_IOTLB_DR | DMAR_IOTLB_DW | wt);
761 for (;;) {
762 iotlbr = dmar_read8(unit, iro + DMAR_IOTLB_REG_OFF);
763 if ((iotlbr & DMAR_IOTLB_IVT) == 0)
764 break;
765 cpu_spinwait();
766 }
767 return (iotlbr);
768 }
769
770 void
domain_flush_iotlb_sync(struct dmar_domain * domain,iommu_gaddr_t base,iommu_gaddr_t size)771 domain_flush_iotlb_sync(struct dmar_domain *domain, iommu_gaddr_t base,
772 iommu_gaddr_t size)
773 {
774 struct dmar_unit *unit;
775 iommu_gaddr_t isize;
776 uint64_t iotlbr;
777 int am, iro;
778
779 unit = domain->dmar;
780 KASSERT(!unit->qi_enabled, ("dmar%d: sync iotlb flush call",
781 unit->iommu.unit));
782 iro = DMAR_ECAP_IRO(unit->hw_ecap) * 16;
783 DMAR_LOCK(unit);
784 if ((unit->hw_cap & DMAR_CAP_PSI) == 0 || size > 2 * 1024 * 1024) {
785 iotlbr = domain_wait_iotlb_flush(unit, DMAR_IOTLB_IIRG_DOM |
786 DMAR_IOTLB_DID(domain->domain), iro);
787 KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
788 DMAR_IOTLB_IAIG_INVLD,
789 ("dmar%d: invalidation failed %jx", unit->iommu.unit,
790 (uintmax_t)iotlbr));
791 } else {
792 for (; size > 0; base += isize, size -= isize) {
793 am = calc_am(unit, base, size, &isize);
794 dmar_write8(unit, iro, base | am);
795 iotlbr = domain_wait_iotlb_flush(unit,
796 DMAR_IOTLB_IIRG_PAGE |
797 DMAR_IOTLB_DID(domain->domain), iro);
798 KASSERT((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
799 DMAR_IOTLB_IAIG_INVLD,
800 ("dmar%d: PSI invalidation failed "
801 "iotlbr 0x%jx base 0x%jx size 0x%jx am %d",
802 unit->iommu.unit, (uintmax_t)iotlbr,
803 (uintmax_t)base, (uintmax_t)size, am));
804 /*
805 * Any non-page granularity covers whole guest
806 * address space for the domain.
807 */
808 if ((iotlbr & DMAR_IOTLB_IAIG_MASK) !=
809 DMAR_IOTLB_IAIG_PAGE)
810 break;
811 }
812 }
813 DMAR_UNLOCK(unit);
814 }
815
816 const struct iommu_domain_map_ops dmar_domain_map_ops = {
817 .map = domain_map_buf,
818 .unmap = domain_unmap_buf,
819 };
820