1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 2018 Marvell International Ltd.
5 *
6 * Author: Jayachandran C Nair <[email protected]>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include "opt_acpi.h"
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39
40 #include <machine/intr.h>
41
42 #include <contrib/dev/acpica/include/acpi.h>
43 #include <contrib/dev/acpica/include/accommon.h>
44 #include <contrib/dev/acpica/include/actables.h>
45
46 #include <dev/acpica/acpivar.h>
47
48 /*
49 * Track next XREF available for ITS groups.
50 */
51 static u_int acpi_its_xref = ACPI_MSI_XREF;
52
53 /*
54 * Some types of IORT nodes have a set of mappings. Each of them map
55 * a range of device IDs [base..end] from the current node to another
56 * node. The corresponding device IDs on destination node starts at
57 * outbase.
58 */
59 struct iort_map_entry {
60 u_int base;
61 u_int end;
62 u_int outbase;
63 u_int flags;
64 u_int out_node_offset;
65 struct iort_node *out_node;
66 };
67
68 /*
69 * The ITS group node does not have any outgoing mappings. It has a
70 * of a list of GIC ITS blocks which can handle the device ID. We
71 * will store the PIC XREF used by the block and the blocks proximity
72 * data here, so that it can be retrieved together.
73 */
74 struct iort_its_entry {
75 u_int its_id;
76 u_int xref;
77 int pxm;
78 };
79
80 /*
81 * IORT node. Each node has some device specific data depending on the
82 * type of the node. The node can also have a set of mappings, OR in
83 * case of ITS group nodes a set of ITS entries.
84 * The nodes are kept in a TAILQ by type.
85 */
86 struct iort_node {
87 TAILQ_ENTRY(iort_node) next; /* next entry with same type */
88 enum AcpiIortNodeType type; /* ACPI type */
89 u_int node_offset; /* offset in IORT - node ID */
90 u_int nentries; /* items in array below */
91 u_int usecount; /* for bookkeeping */
92 u_int revision; /* node revision */
93 union {
94 ACPI_IORT_ROOT_COMPLEX pci_rc; /* PCI root complex */
95 ACPI_IORT_SMMU smmu;
96 ACPI_IORT_SMMU_V3 smmu_v3;
97 } data;
98 union {
99 struct iort_map_entry *mappings; /* node mappings */
100 struct iort_its_entry *its; /* ITS IDs array */
101 } entries;
102 };
103
104 /* Lists for each of the types. */
105 static TAILQ_HEAD(, iort_node) pci_nodes = TAILQ_HEAD_INITIALIZER(pci_nodes);
106 static TAILQ_HEAD(, iort_node) smmu_nodes = TAILQ_HEAD_INITIALIZER(smmu_nodes);
107 static TAILQ_HEAD(, iort_node) its_groups = TAILQ_HEAD_INITIALIZER(its_groups);
108
109 static int
iort_entry_get_id_mapping_index(struct iort_node * node)110 iort_entry_get_id_mapping_index(struct iort_node *node)
111 {
112
113 switch(node->type) {
114 case ACPI_IORT_NODE_SMMU_V3:
115 /* The ID mapping field was added in version 1 */
116 if (node->revision < 1)
117 return (-1);
118
119 /*
120 * If all the control interrupts are GISCV based the ID
121 * mapping field is ignored.
122 */
123 if (node->data.smmu_v3.EventGsiv != 0 &&
124 node->data.smmu_v3.PriGsiv != 0 &&
125 node->data.smmu_v3.GerrGsiv != 0 &&
126 node->data.smmu_v3.SyncGsiv != 0)
127 return (-1);
128
129 if (node->data.smmu_v3.IdMappingIndex >= node->nentries)
130 return (-1);
131
132 return (node->data.smmu_v3.IdMappingIndex);
133 case ACPI_IORT_NODE_PMCG:
134 return (0);
135 default:
136 break;
137 }
138
139 return (-1);
140 }
141
142 /*
143 * Lookup an ID in the mappings array. If successful, map the input ID
144 * to the output ID and return the output node found.
145 */
146 static struct iort_node *
iort_entry_lookup(struct iort_node * node,u_int id,u_int * outid)147 iort_entry_lookup(struct iort_node *node, u_int id, u_int *outid)
148 {
149 struct iort_map_entry *entry;
150 int i, id_map;
151
152 id_map = iort_entry_get_id_mapping_index(node);
153 entry = node->entries.mappings;
154 for (i = 0; i < node->nentries; i++, entry++) {
155 if (i == id_map)
156 continue;
157 if (entry->base <= id && id <= entry->end)
158 break;
159 }
160 if (i == node->nentries)
161 return (NULL);
162 if ((entry->flags & ACPI_IORT_ID_SINGLE_MAPPING) == 0)
163 *outid = entry->outbase + (id - entry->base);
164 else
165 *outid = entry->outbase;
166 return (entry->out_node);
167 }
168
169 /*
170 * Map a PCI RID to a SMMU node or an ITS node, based on outtype.
171 */
172 static struct iort_node *
iort_pci_rc_map(u_int seg,u_int rid,u_int outtype,u_int * outid)173 iort_pci_rc_map(u_int seg, u_int rid, u_int outtype, u_int *outid)
174 {
175 struct iort_node *node, *out_node;
176 u_int nxtid;
177
178 out_node = NULL;
179 TAILQ_FOREACH(node, &pci_nodes, next) {
180 if (node->data.pci_rc.PciSegmentNumber != seg)
181 continue;
182 out_node = iort_entry_lookup(node, rid, &nxtid);
183 if (out_node != NULL)
184 break;
185 }
186
187 /* Could not find a PCI RC node with segment and device ID. */
188 if (out_node == NULL)
189 return (NULL);
190
191 /* Node can be SMMU or ITS. If SMMU, we need another lookup. */
192 if (outtype == ACPI_IORT_NODE_ITS_GROUP &&
193 (out_node->type == ACPI_IORT_NODE_SMMU_V3 ||
194 out_node->type == ACPI_IORT_NODE_SMMU)) {
195 out_node = iort_entry_lookup(out_node, nxtid, &nxtid);
196 if (out_node == NULL)
197 return (NULL);
198 }
199
200 KASSERT(out_node->type == outtype, ("mapping fail"));
201 *outid = nxtid;
202 return (out_node);
203 }
204
205 #ifdef notyet
206 /*
207 * Not implemented, map a PCIe device to the SMMU it is associated with.
208 */
209 int
acpi_iort_map_smmu(u_int seg,u_int devid,void ** smmu,u_int * sid)210 acpi_iort_map_smmu(u_int seg, u_int devid, void **smmu, u_int *sid)
211 {
212 /* XXX: convert oref to SMMU device */
213 return (ENXIO);
214 }
215 #endif
216
217 /*
218 * Allocate memory for a node, initialize and copy mappings. 'start'
219 * argument provides the table start used to calculate the node offset.
220 */
221 static void
iort_copy_data(struct iort_node * node,ACPI_IORT_NODE * node_entry)222 iort_copy_data(struct iort_node *node, ACPI_IORT_NODE *node_entry)
223 {
224 ACPI_IORT_ID_MAPPING *map_entry;
225 struct iort_map_entry *mapping;
226 int i;
227
228 map_entry = ACPI_ADD_PTR(ACPI_IORT_ID_MAPPING, node_entry,
229 node_entry->MappingOffset);
230 node->nentries = node_entry->MappingCount;
231 node->usecount = 0;
232 mapping = malloc(sizeof(*mapping) * node->nentries, M_DEVBUF,
233 M_WAITOK | M_ZERO);
234 node->entries.mappings = mapping;
235 for (i = 0; i < node->nentries; i++, mapping++, map_entry++) {
236 mapping->base = map_entry->InputBase;
237 /*
238 * IdCount means "The number of IDs in the range minus one" (ARM DEN 0049D).
239 * We use <= for comparison against this field, so don't add one here.
240 */
241 mapping->end = map_entry->InputBase + map_entry->IdCount;
242 mapping->outbase = map_entry->OutputBase;
243 mapping->out_node_offset = map_entry->OutputReference;
244 mapping->flags = map_entry->Flags;
245 mapping->out_node = NULL;
246 }
247 }
248
249 /*
250 * Allocate and copy an ITS group.
251 */
252 static void
iort_copy_its(struct iort_node * node,ACPI_IORT_NODE * node_entry)253 iort_copy_its(struct iort_node *node, ACPI_IORT_NODE *node_entry)
254 {
255 struct iort_its_entry *its;
256 ACPI_IORT_ITS_GROUP *itsg_entry;
257 UINT32 *id;
258 int i;
259
260 itsg_entry = (ACPI_IORT_ITS_GROUP *)node_entry->NodeData;
261 node->nentries = itsg_entry->ItsCount;
262 node->usecount = 0;
263 its = malloc(sizeof(*its) * node->nentries, M_DEVBUF, M_WAITOK | M_ZERO);
264 node->entries.its = its;
265 id = &itsg_entry->Identifiers[0];
266 for (i = 0; i < node->nentries; i++, its++, id++) {
267 its->its_id = *id;
268 its->pxm = -1;
269 its->xref = 0;
270 }
271 }
272
273 /*
274 * Walk the IORT table and add nodes to corresponding list.
275 */
276 static void
iort_add_nodes(ACPI_IORT_NODE * node_entry,u_int node_offset)277 iort_add_nodes(ACPI_IORT_NODE *node_entry, u_int node_offset)
278 {
279 ACPI_IORT_ROOT_COMPLEX *pci_rc;
280 ACPI_IORT_SMMU *smmu;
281 ACPI_IORT_SMMU_V3 *smmu_v3;
282 struct iort_node *node;
283
284 node = malloc(sizeof(*node), M_DEVBUF, M_WAITOK | M_ZERO);
285 node->type = node_entry->Type;
286 node->node_offset = node_offset;
287 node->revision = node_entry->Revision;
288
289 /* copy nodes depending on type */
290 switch(node_entry->Type) {
291 case ACPI_IORT_NODE_PCI_ROOT_COMPLEX:
292 pci_rc = (ACPI_IORT_ROOT_COMPLEX *)node_entry->NodeData;
293 memcpy(&node->data.pci_rc, pci_rc, sizeof(*pci_rc));
294 iort_copy_data(node, node_entry);
295 TAILQ_INSERT_TAIL(&pci_nodes, node, next);
296 break;
297 case ACPI_IORT_NODE_SMMU:
298 smmu = (ACPI_IORT_SMMU *)node_entry->NodeData;
299 memcpy(&node->data.smmu, smmu, sizeof(*smmu));
300 iort_copy_data(node, node_entry);
301 TAILQ_INSERT_TAIL(&smmu_nodes, node, next);
302 break;
303 case ACPI_IORT_NODE_SMMU_V3:
304 smmu_v3 = (ACPI_IORT_SMMU_V3 *)node_entry->NodeData;
305 memcpy(&node->data.smmu_v3, smmu_v3, sizeof(*smmu_v3));
306 iort_copy_data(node, node_entry);
307 TAILQ_INSERT_TAIL(&smmu_nodes, node, next);
308 break;
309 case ACPI_IORT_NODE_ITS_GROUP:
310 iort_copy_its(node, node_entry);
311 TAILQ_INSERT_TAIL(&its_groups, node, next);
312 break;
313 default:
314 printf("ACPI: IORT: Dropping unhandled type %u\n",
315 node_entry->Type);
316 free(node, M_DEVBUF);
317 break;
318 }
319 }
320
321 /*
322 * For the mapping entry given, walk thru all the possible destination
323 * nodes and resolve the output reference.
324 */
325 static void
iort_resolve_node(struct iort_map_entry * entry,int check_smmu)326 iort_resolve_node(struct iort_map_entry *entry, int check_smmu)
327 {
328 struct iort_node *node, *np;
329
330 node = NULL;
331 if (check_smmu) {
332 TAILQ_FOREACH(np, &smmu_nodes, next) {
333 if (entry->out_node_offset == np->node_offset) {
334 node = np;
335 break;
336 }
337 }
338 }
339 if (node == NULL) {
340 TAILQ_FOREACH(np, &its_groups, next) {
341 if (entry->out_node_offset == np->node_offset) {
342 node = np;
343 break;
344 }
345 }
346 }
347 if (node != NULL) {
348 node->usecount++;
349 entry->out_node = node;
350 } else {
351 printf("ACPI: IORT: Firmware Bug: no mapping for node %u\n",
352 entry->out_node_offset);
353 }
354 }
355
356 /*
357 * Resolve all output node references to node pointers.
358 */
359 static void
iort_post_process_mappings(void)360 iort_post_process_mappings(void)
361 {
362 struct iort_node *node;
363 int i;
364
365 TAILQ_FOREACH(node, &pci_nodes, next)
366 for (i = 0; i < node->nentries; i++)
367 iort_resolve_node(&node->entries.mappings[i], TRUE);
368 TAILQ_FOREACH(node, &smmu_nodes, next)
369 for (i = 0; i < node->nentries; i++)
370 iort_resolve_node(&node->entries.mappings[i], FALSE);
371 /* TODO: named nodes */
372 }
373
374 /*
375 * Walk MADT table, assign PIC xrefs to all ITS entries.
376 */
377 static void
madt_resolve_its_xref(ACPI_SUBTABLE_HEADER * entry,void * arg)378 madt_resolve_its_xref(ACPI_SUBTABLE_HEADER *entry, void *arg)
379 {
380 ACPI_MADT_GENERIC_TRANSLATOR *gict;
381 struct iort_node *its_node;
382 struct iort_its_entry *its_entry;
383 u_int xref;
384 int i, matches;
385
386 if (entry->Type != ACPI_MADT_TYPE_GENERIC_TRANSLATOR)
387 return;
388
389 gict = (ACPI_MADT_GENERIC_TRANSLATOR *)entry;
390 matches = 0;
391 xref = acpi_its_xref++;
392 TAILQ_FOREACH(its_node, &its_groups, next) {
393 its_entry = its_node->entries.its;
394 for (i = 0; i < its_node->nentries; i++, its_entry++) {
395 if (its_entry->its_id == gict->TranslationId) {
396 its_entry->xref = xref;
397 matches++;
398 }
399 }
400 }
401 if (matches == 0)
402 printf("ACPI: IORT: Unused ITS block, ID %u\n",
403 gict->TranslationId);
404 }
405
406 /*
407 * Walk SRAT, assign proximity to all ITS entries.
408 */
409 static void
srat_resolve_its_pxm(ACPI_SUBTABLE_HEADER * entry,void * arg)410 srat_resolve_its_pxm(ACPI_SUBTABLE_HEADER *entry, void *arg)
411 {
412 ACPI_SRAT_GIC_ITS_AFFINITY *gicits;
413 struct iort_node *its_node;
414 struct iort_its_entry *its_entry;
415 int *map_counts;
416 int i, matches, dom;
417
418 if (entry->Type != ACPI_SRAT_TYPE_GIC_ITS_AFFINITY)
419 return;
420
421 matches = 0;
422 map_counts = arg;
423 gicits = (ACPI_SRAT_GIC_ITS_AFFINITY *)entry;
424 dom = acpi_map_pxm_to_vm_domainid(gicits->ProximityDomain);
425
426 /*
427 * Catch firmware and config errors. map_counts keeps a
428 * count of ProximityDomain values mapping to a domain ID
429 */
430 #if MAXMEMDOM > 1
431 if (dom == -1)
432 printf("Firmware Error: Proximity Domain %d could not be"
433 " mapped for GIC ITS ID %d!\n",
434 gicits->ProximityDomain, gicits->ItsId);
435 #endif
436 /* use dom + 1 as index to handle the case where dom == -1 */
437 i = ++map_counts[dom + 1];
438 if (i > 1) {
439 #ifdef NUMA
440 if (dom != -1)
441 printf("ERROR: Multiple Proximity Domains map to the"
442 " same NUMA domain %d!\n", dom);
443 #else
444 printf("WARNING: multiple Proximity Domains in SRAT but NUMA"
445 " NOT enabled!\n");
446 #endif
447 }
448 TAILQ_FOREACH(its_node, &its_groups, next) {
449 its_entry = its_node->entries.its;
450 for (i = 0; i < its_node->nentries; i++, its_entry++) {
451 if (its_entry->its_id == gicits->ItsId) {
452 its_entry->pxm = dom;
453 matches++;
454 }
455 }
456 }
457 if (matches == 0)
458 printf("ACPI: IORT: ITS block %u in SRAT not found in IORT!\n",
459 gicits->ItsId);
460 }
461
462 /*
463 * Cross check the ITS Id with MADT and (if available) SRAT.
464 */
465 static int
iort_post_process_its(void)466 iort_post_process_its(void)
467 {
468 ACPI_TABLE_MADT *madt;
469 ACPI_TABLE_SRAT *srat;
470 vm_paddr_t madt_pa, srat_pa;
471 int map_counts[MAXMEMDOM + 1] = { 0 };
472
473 /* Check ITS block in MADT */
474 madt_pa = acpi_find_table(ACPI_SIG_MADT);
475 KASSERT(madt_pa != 0, ("no MADT!"));
476 madt = acpi_map_table(madt_pa, ACPI_SIG_MADT);
477 KASSERT(madt != NULL, ("can't map MADT!"));
478 acpi_walk_subtables(madt + 1, (char *)madt + madt->Header.Length,
479 madt_resolve_its_xref, NULL);
480 acpi_unmap_table(madt);
481
482 /* Get proximtiy if available */
483 srat_pa = acpi_find_table(ACPI_SIG_SRAT);
484 if (srat_pa != 0) {
485 srat = acpi_map_table(srat_pa, ACPI_SIG_SRAT);
486 KASSERT(srat != NULL, ("can't map SRAT!"));
487 acpi_walk_subtables(srat + 1, (char *)srat + srat->Header.Length,
488 srat_resolve_its_pxm, map_counts);
489 acpi_unmap_table(srat);
490 }
491 return (0);
492 }
493
494 /*
495 * Find, parse, and save IO Remapping Table ("IORT").
496 */
497 static int
acpi_parse_iort(void * dummy __unused)498 acpi_parse_iort(void *dummy __unused)
499 {
500 ACPI_TABLE_IORT *iort;
501 ACPI_IORT_NODE *node_entry;
502 vm_paddr_t iort_pa;
503 u_int node_offset;
504
505 iort_pa = acpi_find_table(ACPI_SIG_IORT);
506 if (iort_pa == 0)
507 return (ENXIO);
508
509 iort = acpi_map_table(iort_pa, ACPI_SIG_IORT);
510 if (iort == NULL) {
511 printf("ACPI: Unable to map the IORT table!\n");
512 return (ENXIO);
513 }
514 for (node_offset = iort->NodeOffset;
515 node_offset < iort->Header.Length;
516 node_offset += node_entry->Length) {
517 node_entry = ACPI_ADD_PTR(ACPI_IORT_NODE, iort, node_offset);
518 iort_add_nodes(node_entry, node_offset);
519 }
520 acpi_unmap_table(iort);
521 iort_post_process_mappings();
522 iort_post_process_its();
523 return (0);
524 }
525 SYSINIT(acpi_parse_iort, SI_SUB_DRIVERS, SI_ORDER_FIRST, acpi_parse_iort, NULL);
526
527 /*
528 * Provide ITS ID to PIC xref mapping.
529 */
530 int
acpi_iort_its_lookup(u_int its_id,u_int * xref,int * pxm)531 acpi_iort_its_lookup(u_int its_id, u_int *xref, int *pxm)
532 {
533 struct iort_node *its_node;
534 struct iort_its_entry *its_entry;
535 int i;
536
537 TAILQ_FOREACH(its_node, &its_groups, next) {
538 its_entry = its_node->entries.its;
539 for (i = 0; i < its_node->nentries; i++, its_entry++) {
540 if (its_entry->its_id == its_id) {
541 *xref = its_entry->xref;
542 *pxm = its_entry->pxm;
543 return (0);
544 }
545 }
546 }
547 return (ENOENT);
548 }
549
550 /*
551 * Find mapping for a PCIe device given segment and device ID
552 * returns the XREF for MSI interrupt setup and the device ID to
553 * use for the interrupt setup
554 */
555 int
acpi_iort_map_pci_msi(u_int seg,u_int rid,u_int * xref,u_int * devid)556 acpi_iort_map_pci_msi(u_int seg, u_int rid, u_int *xref, u_int *devid)
557 {
558 struct iort_node *node;
559
560 node = iort_pci_rc_map(seg, rid, ACPI_IORT_NODE_ITS_GROUP, devid);
561 if (node == NULL)
562 return (ENOENT);
563
564 /* This should be an ITS node */
565 KASSERT(node->type == ACPI_IORT_NODE_ITS_GROUP, ("bad group"));
566
567 /* return first node, we don't handle more than that now. */
568 *xref = node->entries.its[0].xref;
569 return (0);
570 }
571
572 int
acpi_iort_map_pci_smmuv3(u_int seg,u_int rid,u_int * xref,u_int * sid)573 acpi_iort_map_pci_smmuv3(u_int seg, u_int rid, u_int *xref, u_int *sid)
574 {
575 ACPI_IORT_SMMU_V3 *smmu;
576 struct iort_node *node;
577
578 node = iort_pci_rc_map(seg, rid, ACPI_IORT_NODE_SMMU_V3, sid);
579 if (node == NULL)
580 return (ENOENT);
581
582 /* This should be an SMMU node. */
583 KASSERT(node->type == ACPI_IORT_NODE_SMMU_V3, ("bad node"));
584
585 smmu = (ACPI_IORT_SMMU_V3 *)&node->data.smmu_v3;
586 *xref = smmu->BaseAddress;
587
588 return (0);
589 }
590