1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2003-2008 Joseph Koshy
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/types.h>
33 #include <sys/param.h>
34 #include <sys/module.h>
35 #include <sys/pmc.h>
36 #include <sys/syscall.h>
37
38 #include <ctype.h>
39 #include <errno.h>
40 #include <err.h>
41 #include <fcntl.h>
42 #include <pmc.h>
43 #include <stdio.h>
44 #include <stdlib.h>
45 #include <string.h>
46 #include <strings.h>
47 #include <sysexits.h>
48 #include <unistd.h>
49
50 #include "libpmcinternal.h"
51
52 /* Function prototypes */
53 #if defined(__amd64__) || defined(__i386__)
54 static int k8_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
55 struct pmc_op_pmcallocate *_pmc_config);
56 #endif
57 #if defined(__amd64__) || defined(__i386__)
58 static int tsc_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
59 struct pmc_op_pmcallocate *_pmc_config);
60 #endif
61 #if defined(__arm__)
62 static int armv7_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
63 struct pmc_op_pmcallocate *_pmc_config);
64 #endif
65 #if defined(__aarch64__)
66 static int arm64_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
67 struct pmc_op_pmcallocate *_pmc_config);
68 #endif
69 #if defined(__mips__)
70 static int mips_allocate_pmc(enum pmc_event _pe, char* ctrspec,
71 struct pmc_op_pmcallocate *_pmc_config);
72 #endif /* __mips__ */
73 static int soft_allocate_pmc(enum pmc_event _pe, char *_ctrspec,
74 struct pmc_op_pmcallocate *_pmc_config);
75
76 #if defined(__powerpc__)
77 static int powerpc_allocate_pmc(enum pmc_event _pe, char* ctrspec,
78 struct pmc_op_pmcallocate *_pmc_config);
79 #endif /* __powerpc__ */
80
81 #define PMC_CALL(cmd, params) \
82 syscall(pmc_syscall, PMC_OP_##cmd, (params))
83
84 /*
85 * Event aliases provide a way for the user to ask for generic events
86 * like "cache-misses", or "instructions-retired". These aliases are
87 * mapped to the appropriate canonical event descriptions using a
88 * lookup table.
89 */
90 struct pmc_event_alias {
91 const char *pm_alias;
92 const char *pm_spec;
93 };
94
95 static const struct pmc_event_alias *pmc_mdep_event_aliases;
96
97 /*
98 * The pmc_event_descr structure maps symbolic names known to the user
99 * to integer codes used by the PMC KLD.
100 */
101 struct pmc_event_descr {
102 const char *pm_ev_name;
103 enum pmc_event pm_ev_code;
104 };
105
106 /*
107 * The pmc_class_descr structure maps class name prefixes for
108 * event names to event tables and other PMC class data.
109 */
110 struct pmc_class_descr {
111 const char *pm_evc_name;
112 size_t pm_evc_name_size;
113 enum pmc_class pm_evc_class;
114 const struct pmc_event_descr *pm_evc_event_table;
115 size_t pm_evc_event_table_size;
116 int (*pm_evc_allocate_pmc)(enum pmc_event _pe,
117 char *_ctrspec, struct pmc_op_pmcallocate *_pa);
118 };
119
120 #define PMC_TABLE_SIZE(N) (sizeof(N)/sizeof(N[0]))
121 #define PMC_EVENT_TABLE_SIZE(N) PMC_TABLE_SIZE(N##_event_table)
122
123 #undef __PMC_EV
124 #define __PMC_EV(C,N) { #N, PMC_EV_ ## C ## _ ## N },
125
126 /*
127 * PMC_CLASSDEP_TABLE(NAME, CLASS)
128 *
129 * Define a table mapping event names and aliases to HWPMC event IDs.
130 */
131 #define PMC_CLASSDEP_TABLE(N, C) \
132 static const struct pmc_event_descr N##_event_table[] = \
133 { \
134 __PMC_EV_##C() \
135 }
136
137 PMC_CLASSDEP_TABLE(iaf, IAF);
138 PMC_CLASSDEP_TABLE(k8, K8);
139 PMC_CLASSDEP_TABLE(armv7, ARMV7);
140 PMC_CLASSDEP_TABLE(armv8, ARMV8);
141 PMC_CLASSDEP_TABLE(beri, BERI);
142 PMC_CLASSDEP_TABLE(mips24k, MIPS24K);
143 PMC_CLASSDEP_TABLE(mips74k, MIPS74K);
144 PMC_CLASSDEP_TABLE(octeon, OCTEON);
145 PMC_CLASSDEP_TABLE(ppc7450, PPC7450);
146 PMC_CLASSDEP_TABLE(ppc970, PPC970);
147 PMC_CLASSDEP_TABLE(power8, POWER8);
148 PMC_CLASSDEP_TABLE(e500, E500);
149
150 static struct pmc_event_descr soft_event_table[PMC_EV_DYN_COUNT];
151
152 #undef __PMC_EV_ALIAS
153 #define __PMC_EV_ALIAS(N,CODE) { N, PMC_EV_##CODE },
154
155 /*
156 * TODO: Factor out the __PMC_EV_ARMV7/8 list into a single separate table
157 * rather than duplicating for each core.
158 */
159
160 static const struct pmc_event_descr cortex_a8_event_table[] =
161 {
162 __PMC_EV_ALIAS_ARMV7_CORTEX_A8()
163 __PMC_EV_ARMV7()
164 };
165
166 static const struct pmc_event_descr cortex_a9_event_table[] =
167 {
168 __PMC_EV_ALIAS_ARMV7_CORTEX_A9()
169 __PMC_EV_ARMV7()
170 };
171
172 static const struct pmc_event_descr cortex_a53_event_table[] =
173 {
174 __PMC_EV_ALIAS_ARMV8_CORTEX_A53()
175 __PMC_EV_ARMV8()
176 };
177
178 static const struct pmc_event_descr cortex_a57_event_table[] =
179 {
180 __PMC_EV_ALIAS_ARMV8_CORTEX_A57()
181 __PMC_EV_ARMV8()
182 };
183
184 static const struct pmc_event_descr cortex_a76_event_table[] =
185 {
186 __PMC_EV_ALIAS_ARMV8_CORTEX_A76()
187 __PMC_EV_ARMV8()
188 };
189
190 static const struct pmc_event_descr tsc_event_table[] =
191 {
192 __PMC_EV_ALIAS_TSC()
193 };
194
195 #undef PMC_CLASS_TABLE_DESC
196 #define PMC_CLASS_TABLE_DESC(NAME, CLASS, EVENTS, ALLOCATOR) \
197 static const struct pmc_class_descr NAME##_class_table_descr = \
198 { \
199 .pm_evc_name = #CLASS "-", \
200 .pm_evc_name_size = sizeof(#CLASS "-") - 1, \
201 .pm_evc_class = PMC_CLASS_##CLASS , \
202 .pm_evc_event_table = EVENTS##_event_table , \
203 .pm_evc_event_table_size = \
204 PMC_EVENT_TABLE_SIZE(EVENTS), \
205 .pm_evc_allocate_pmc = ALLOCATOR##_allocate_pmc \
206 }
207
208 #if defined(__i386__) || defined(__amd64__)
209 PMC_CLASS_TABLE_DESC(k8, K8, k8, k8);
210 #endif
211 #if defined(__i386__) || defined(__amd64__)
212 PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc);
213 #endif
214 #if defined(__arm__)
215 PMC_CLASS_TABLE_DESC(cortex_a8, ARMV7, cortex_a8, armv7);
216 PMC_CLASS_TABLE_DESC(cortex_a9, ARMV7, cortex_a9, armv7);
217 #endif
218 #if defined(__aarch64__)
219 PMC_CLASS_TABLE_DESC(cortex_a53, ARMV8, cortex_a53, arm64);
220 PMC_CLASS_TABLE_DESC(cortex_a57, ARMV8, cortex_a57, arm64);
221 PMC_CLASS_TABLE_DESC(cortex_a76, ARMV8, cortex_a76, arm64);
222 #endif
223 #if defined(__mips__)
224 PMC_CLASS_TABLE_DESC(beri, BERI, beri, mips);
225 PMC_CLASS_TABLE_DESC(mips24k, MIPS24K, mips24k, mips);
226 PMC_CLASS_TABLE_DESC(mips74k, MIPS74K, mips74k, mips);
227 PMC_CLASS_TABLE_DESC(octeon, OCTEON, octeon, mips);
228 #endif /* __mips__ */
229 #if defined(__powerpc__)
230 PMC_CLASS_TABLE_DESC(ppc7450, PPC7450, ppc7450, powerpc);
231 PMC_CLASS_TABLE_DESC(ppc970, PPC970, ppc970, powerpc);
232 PMC_CLASS_TABLE_DESC(power8, POWER8, power8, powerpc);
233 PMC_CLASS_TABLE_DESC(e500, E500, e500, powerpc);
234 #endif
235
236 static struct pmc_class_descr soft_class_table_descr =
237 {
238 .pm_evc_name = "SOFT-",
239 .pm_evc_name_size = sizeof("SOFT-") - 1,
240 .pm_evc_class = PMC_CLASS_SOFT,
241 .pm_evc_event_table = NULL,
242 .pm_evc_event_table_size = 0,
243 .pm_evc_allocate_pmc = soft_allocate_pmc
244 };
245
246 #undef PMC_CLASS_TABLE_DESC
247
248 static const struct pmc_class_descr **pmc_class_table;
249 #define PMC_CLASS_TABLE_SIZE cpu_info.pm_nclass
250
251 /*
252 * Mapping tables, mapping enumeration values to human readable
253 * strings.
254 */
255
256 static const char * pmc_capability_names[] = {
257 #undef __PMC_CAP
258 #define __PMC_CAP(N,V,D) #N ,
259 __PMC_CAPS()
260 };
261
262 struct pmc_class_map {
263 enum pmc_class pm_class;
264 const char *pm_name;
265 };
266
267 static const struct pmc_class_map pmc_class_names[] = {
268 #undef __PMC_CLASS
269 #define __PMC_CLASS(S,V,D) { .pm_class = PMC_CLASS_##S, .pm_name = #S } ,
270 __PMC_CLASSES()
271 };
272
273 struct pmc_cputype_map {
274 enum pmc_cputype pm_cputype;
275 const char *pm_name;
276 };
277
278 static const struct pmc_cputype_map pmc_cputype_names[] = {
279 #undef __PMC_CPU
280 #define __PMC_CPU(S, V, D) { .pm_cputype = PMC_CPU_##S, .pm_name = #S } ,
281 __PMC_CPUS()
282 };
283
284 static const char * pmc_disposition_names[] = {
285 #undef __PMC_DISP
286 #define __PMC_DISP(D) #D ,
287 __PMC_DISPOSITIONS()
288 };
289
290 static const char * pmc_mode_names[] = {
291 #undef __PMC_MODE
292 #define __PMC_MODE(M,N) #M ,
293 __PMC_MODES()
294 };
295
296 static const char * pmc_state_names[] = {
297 #undef __PMC_STATE
298 #define __PMC_STATE(S) #S ,
299 __PMC_STATES()
300 };
301
302 /*
303 * Filled in by pmc_init().
304 */
305 static int pmc_syscall = -1;
306 static struct pmc_cpuinfo cpu_info;
307 static struct pmc_op_getdyneventinfo soft_event_info;
308
309 /* Event masks for events */
310 struct pmc_masks {
311 const char *pm_name;
312 const uint64_t pm_value;
313 };
314 #define PMCMASK(N,V) { .pm_name = #N, .pm_value = (V) }
315 #define NULLMASK { .pm_name = NULL }
316
317 #if defined(__amd64__) || defined(__i386__)
318 static int
pmc_parse_mask(const struct pmc_masks * pmask,char * p,uint64_t * evmask)319 pmc_parse_mask(const struct pmc_masks *pmask, char *p, uint64_t *evmask)
320 {
321 const struct pmc_masks *pm;
322 char *q, *r;
323 int c;
324
325 if (pmask == NULL) /* no mask keywords */
326 return (-1);
327 q = strchr(p, '='); /* skip '=' */
328 if (*++q == '\0') /* no more data */
329 return (-1);
330 c = 0; /* count of mask keywords seen */
331 while ((r = strsep(&q, "+")) != NULL) {
332 for (pm = pmask; pm->pm_name && strcasecmp(r, pm->pm_name);
333 pm++)
334 ;
335 if (pm->pm_name == NULL) /* not found */
336 return (-1);
337 *evmask |= pm->pm_value;
338 c++;
339 }
340 return (c);
341 }
342 #endif
343
344 #define KWMATCH(p,kw) (strcasecmp((p), (kw)) == 0)
345 #define KWPREFIXMATCH(p,kw) (strncasecmp((p), (kw), sizeof((kw)) - 1) == 0)
346 #define EV_ALIAS(N,S) { .pm_alias = N, .pm_spec = S }
347
348 #if defined(__amd64__) || defined(__i386__)
349 /*
350 * AMD K8 PMCs.
351 *
352 */
353
354 static struct pmc_event_alias k8_aliases[] = {
355 EV_ALIAS("branches", "k8-fr-retired-taken-branches"),
356 EV_ALIAS("branch-mispredicts",
357 "k8-fr-retired-taken-branches-mispredicted"),
358 EV_ALIAS("cycles", "tsc"),
359 EV_ALIAS("dc-misses", "k8-dc-miss"),
360 EV_ALIAS("ic-misses", "k8-ic-miss"),
361 EV_ALIAS("instructions", "k8-fr-retired-x86-instructions"),
362 EV_ALIAS("interrupts", "k8-fr-taken-hardware-interrupts"),
363 EV_ALIAS("unhalted-cycles", "k8-bu-cpu-clk-unhalted"),
364 EV_ALIAS(NULL, NULL)
365 };
366
367 #define __K8MASK(N,V) PMCMASK(N,(1 << (V)))
368
369 /*
370 * Parsing tables
371 */
372
373 /* fp dispatched fpu ops */
374 static const struct pmc_masks k8_mask_fdfo[] = {
375 __K8MASK(add-pipe-excluding-junk-ops, 0),
376 __K8MASK(multiply-pipe-excluding-junk-ops, 1),
377 __K8MASK(store-pipe-excluding-junk-ops, 2),
378 __K8MASK(add-pipe-junk-ops, 3),
379 __K8MASK(multiply-pipe-junk-ops, 4),
380 __K8MASK(store-pipe-junk-ops, 5),
381 NULLMASK
382 };
383
384 /* ls segment register loads */
385 static const struct pmc_masks k8_mask_lsrl[] = {
386 __K8MASK(es, 0),
387 __K8MASK(cs, 1),
388 __K8MASK(ss, 2),
389 __K8MASK(ds, 3),
390 __K8MASK(fs, 4),
391 __K8MASK(gs, 5),
392 __K8MASK(hs, 6),
393 NULLMASK
394 };
395
396 /* ls locked operation */
397 static const struct pmc_masks k8_mask_llo[] = {
398 __K8MASK(locked-instructions, 0),
399 __K8MASK(cycles-in-request, 1),
400 __K8MASK(cycles-to-complete, 2),
401 NULLMASK
402 };
403
404 /* dc refill from {l2,system} and dc copyback */
405 static const struct pmc_masks k8_mask_dc[] = {
406 __K8MASK(invalid, 0),
407 __K8MASK(shared, 1),
408 __K8MASK(exclusive, 2),
409 __K8MASK(owner, 3),
410 __K8MASK(modified, 4),
411 NULLMASK
412 };
413
414 /* dc one bit ecc error */
415 static const struct pmc_masks k8_mask_dobee[] = {
416 __K8MASK(scrubber, 0),
417 __K8MASK(piggyback, 1),
418 NULLMASK
419 };
420
421 /* dc dispatched prefetch instructions */
422 static const struct pmc_masks k8_mask_ddpi[] = {
423 __K8MASK(load, 0),
424 __K8MASK(store, 1),
425 __K8MASK(nta, 2),
426 NULLMASK
427 };
428
429 /* dc dcache accesses by locks */
430 static const struct pmc_masks k8_mask_dabl[] = {
431 __K8MASK(accesses, 0),
432 __K8MASK(misses, 1),
433 NULLMASK
434 };
435
436 /* bu internal l2 request */
437 static const struct pmc_masks k8_mask_bilr[] = {
438 __K8MASK(ic-fill, 0),
439 __K8MASK(dc-fill, 1),
440 __K8MASK(tlb-reload, 2),
441 __K8MASK(tag-snoop, 3),
442 __K8MASK(cancelled, 4),
443 NULLMASK
444 };
445
446 /* bu fill request l2 miss */
447 static const struct pmc_masks k8_mask_bfrlm[] = {
448 __K8MASK(ic-fill, 0),
449 __K8MASK(dc-fill, 1),
450 __K8MASK(tlb-reload, 2),
451 NULLMASK
452 };
453
454 /* bu fill into l2 */
455 static const struct pmc_masks k8_mask_bfil[] = {
456 __K8MASK(dirty-l2-victim, 0),
457 __K8MASK(victim-from-l2, 1),
458 NULLMASK
459 };
460
461 /* fr retired fpu instructions */
462 static const struct pmc_masks k8_mask_frfi[] = {
463 __K8MASK(x87, 0),
464 __K8MASK(mmx-3dnow, 1),
465 __K8MASK(packed-sse-sse2, 2),
466 __K8MASK(scalar-sse-sse2, 3),
467 NULLMASK
468 };
469
470 /* fr retired fastpath double op instructions */
471 static const struct pmc_masks k8_mask_frfdoi[] = {
472 __K8MASK(low-op-pos-0, 0),
473 __K8MASK(low-op-pos-1, 1),
474 __K8MASK(low-op-pos-2, 2),
475 NULLMASK
476 };
477
478 /* fr fpu exceptions */
479 static const struct pmc_masks k8_mask_ffe[] = {
480 __K8MASK(x87-reclass-microfaults, 0),
481 __K8MASK(sse-retype-microfaults, 1),
482 __K8MASK(sse-reclass-microfaults, 2),
483 __K8MASK(sse-and-x87-microtraps, 3),
484 NULLMASK
485 };
486
487 /* nb memory controller page access event */
488 static const struct pmc_masks k8_mask_nmcpae[] = {
489 __K8MASK(page-hit, 0),
490 __K8MASK(page-miss, 1),
491 __K8MASK(page-conflict, 2),
492 NULLMASK
493 };
494
495 /* nb memory controller turnaround */
496 static const struct pmc_masks k8_mask_nmct[] = {
497 __K8MASK(dimm-turnaround, 0),
498 __K8MASK(read-to-write-turnaround, 1),
499 __K8MASK(write-to-read-turnaround, 2),
500 NULLMASK
501 };
502
503 /* nb memory controller bypass saturation */
504 static const struct pmc_masks k8_mask_nmcbs[] = {
505 __K8MASK(memory-controller-hi-pri-bypass, 0),
506 __K8MASK(memory-controller-lo-pri-bypass, 1),
507 __K8MASK(dram-controller-interface-bypass, 2),
508 __K8MASK(dram-controller-queue-bypass, 3),
509 NULLMASK
510 };
511
512 /* nb sized commands */
513 static const struct pmc_masks k8_mask_nsc[] = {
514 __K8MASK(nonpostwrszbyte, 0),
515 __K8MASK(nonpostwrszdword, 1),
516 __K8MASK(postwrszbyte, 2),
517 __K8MASK(postwrszdword, 3),
518 __K8MASK(rdszbyte, 4),
519 __K8MASK(rdszdword, 5),
520 __K8MASK(rdmodwr, 6),
521 NULLMASK
522 };
523
524 /* nb probe result */
525 static const struct pmc_masks k8_mask_npr[] = {
526 __K8MASK(probe-miss, 0),
527 __K8MASK(probe-hit, 1),
528 __K8MASK(probe-hit-dirty-no-memory-cancel, 2),
529 __K8MASK(probe-hit-dirty-with-memory-cancel, 3),
530 NULLMASK
531 };
532
533 /* nb hypertransport bus bandwidth */
534 static const struct pmc_masks k8_mask_nhbb[] = { /* HT bus bandwidth */
535 __K8MASK(command, 0),
536 __K8MASK(data, 1),
537 __K8MASK(buffer-release, 2),
538 __K8MASK(nop, 3),
539 NULLMASK
540 };
541
542 #undef __K8MASK
543
544 #define K8_KW_COUNT "count"
545 #define K8_KW_EDGE "edge"
546 #define K8_KW_INV "inv"
547 #define K8_KW_MASK "mask"
548 #define K8_KW_OS "os"
549 #define K8_KW_USR "usr"
550
551 static int
k8_allocate_pmc(enum pmc_event pe,char * ctrspec,struct pmc_op_pmcallocate * pmc_config)552 k8_allocate_pmc(enum pmc_event pe, char *ctrspec,
553 struct pmc_op_pmcallocate *pmc_config)
554 {
555 char *e, *p, *q;
556 int n;
557 uint32_t count;
558 uint64_t evmask;
559 const struct pmc_masks *pm, *pmask;
560
561 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
562 pmc_config->pm_md.pm_amd.pm_amd_config = 0;
563
564 pmask = NULL;
565 evmask = 0;
566
567 #define __K8SETMASK(M) pmask = k8_mask_##M
568
569 /* setup parsing tables */
570 switch (pe) {
571 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
572 __K8SETMASK(fdfo);
573 break;
574 case PMC_EV_K8_LS_SEGMENT_REGISTER_LOAD:
575 __K8SETMASK(lsrl);
576 break;
577 case PMC_EV_K8_LS_LOCKED_OPERATION:
578 __K8SETMASK(llo);
579 break;
580 case PMC_EV_K8_DC_REFILL_FROM_L2:
581 case PMC_EV_K8_DC_REFILL_FROM_SYSTEM:
582 case PMC_EV_K8_DC_COPYBACK:
583 __K8SETMASK(dc);
584 break;
585 case PMC_EV_K8_DC_ONE_BIT_ECC_ERROR:
586 __K8SETMASK(dobee);
587 break;
588 case PMC_EV_K8_DC_DISPATCHED_PREFETCH_INSTRUCTIONS:
589 __K8SETMASK(ddpi);
590 break;
591 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
592 __K8SETMASK(dabl);
593 break;
594 case PMC_EV_K8_BU_INTERNAL_L2_REQUEST:
595 __K8SETMASK(bilr);
596 break;
597 case PMC_EV_K8_BU_FILL_REQUEST_L2_MISS:
598 __K8SETMASK(bfrlm);
599 break;
600 case PMC_EV_K8_BU_FILL_INTO_L2:
601 __K8SETMASK(bfil);
602 break;
603 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
604 __K8SETMASK(frfi);
605 break;
606 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
607 __K8SETMASK(frfdoi);
608 break;
609 case PMC_EV_K8_FR_FPU_EXCEPTIONS:
610 __K8SETMASK(ffe);
611 break;
612 case PMC_EV_K8_NB_MEMORY_CONTROLLER_PAGE_ACCESS_EVENT:
613 __K8SETMASK(nmcpae);
614 break;
615 case PMC_EV_K8_NB_MEMORY_CONTROLLER_TURNAROUND:
616 __K8SETMASK(nmct);
617 break;
618 case PMC_EV_K8_NB_MEMORY_CONTROLLER_BYPASS_SATURATION:
619 __K8SETMASK(nmcbs);
620 break;
621 case PMC_EV_K8_NB_SIZED_COMMANDS:
622 __K8SETMASK(nsc);
623 break;
624 case PMC_EV_K8_NB_PROBE_RESULT:
625 __K8SETMASK(npr);
626 break;
627 case PMC_EV_K8_NB_HT_BUS0_BANDWIDTH:
628 case PMC_EV_K8_NB_HT_BUS1_BANDWIDTH:
629 case PMC_EV_K8_NB_HT_BUS2_BANDWIDTH:
630 __K8SETMASK(nhbb);
631 break;
632
633 default:
634 break; /* no options defined */
635 }
636
637 while ((p = strsep(&ctrspec, ",")) != NULL) {
638 if (KWPREFIXMATCH(p, K8_KW_COUNT "=")) {
639 q = strchr(p, '=');
640 if (*++q == '\0') /* skip '=' */
641 return (-1);
642
643 count = strtol(q, &e, 0);
644 if (e == q || *e != '\0')
645 return (-1);
646
647 pmc_config->pm_caps |= PMC_CAP_THRESHOLD;
648 pmc_config->pm_md.pm_amd.pm_amd_config |=
649 AMD_PMC_TO_COUNTER(count);
650
651 } else if (KWMATCH(p, K8_KW_EDGE)) {
652 pmc_config->pm_caps |= PMC_CAP_EDGE;
653 } else if (KWMATCH(p, K8_KW_INV)) {
654 pmc_config->pm_caps |= PMC_CAP_INVERT;
655 } else if (KWPREFIXMATCH(p, K8_KW_MASK "=")) {
656 if ((n = pmc_parse_mask(pmask, p, &evmask)) < 0)
657 return (-1);
658 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
659 } else if (KWMATCH(p, K8_KW_OS)) {
660 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
661 } else if (KWMATCH(p, K8_KW_USR)) {
662 pmc_config->pm_caps |= PMC_CAP_USER;
663 } else
664 return (-1);
665 }
666
667 /* other post processing */
668 switch (pe) {
669 case PMC_EV_K8_FP_DISPATCHED_FPU_OPS:
670 case PMC_EV_K8_FP_CYCLES_WITH_NO_FPU_OPS_RETIRED:
671 case PMC_EV_K8_FP_DISPATCHED_FPU_FAST_FLAG_OPS:
672 case PMC_EV_K8_FR_RETIRED_FASTPATH_DOUBLE_OP_INSTRUCTIONS:
673 case PMC_EV_K8_FR_RETIRED_FPU_INSTRUCTIONS:
674 case PMC_EV_K8_FR_FPU_EXCEPTIONS:
675 /* XXX only available in rev B and later */
676 break;
677 case PMC_EV_K8_DC_DCACHE_ACCESSES_BY_LOCKS:
678 /* XXX only available in rev C and later */
679 break;
680 case PMC_EV_K8_LS_LOCKED_OPERATION:
681 /* XXX CPU Rev A,B evmask is to be zero */
682 if (evmask & (evmask - 1)) /* > 1 bit set */
683 return (-1);
684 if (evmask == 0) {
685 evmask = 0x01; /* Rev C and later: #instrs */
686 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
687 }
688 break;
689 default:
690 if (evmask == 0 && pmask != NULL) {
691 for (pm = pmask; pm->pm_name; pm++)
692 evmask |= pm->pm_value;
693 pmc_config->pm_caps |= PMC_CAP_QUALIFIER;
694 }
695 }
696
697 if (pmc_config->pm_caps & PMC_CAP_QUALIFIER)
698 pmc_config->pm_md.pm_amd.pm_amd_config =
699 AMD_PMC_TO_UNITMASK(evmask);
700
701 return (0);
702 }
703
704 #endif
705
706 #if defined(__i386__) || defined(__amd64__)
707 static int
tsc_allocate_pmc(enum pmc_event pe,char * ctrspec,struct pmc_op_pmcallocate * pmc_config)708 tsc_allocate_pmc(enum pmc_event pe, char *ctrspec,
709 struct pmc_op_pmcallocate *pmc_config)
710 {
711 if (pe != PMC_EV_TSC_TSC)
712 return (-1);
713
714 /* TSC events must be unqualified. */
715 if (ctrspec && *ctrspec != '\0')
716 return (-1);
717
718 pmc_config->pm_md.pm_amd.pm_amd_config = 0;
719 pmc_config->pm_caps |= PMC_CAP_READ;
720
721 return (0);
722 }
723 #endif
724
725 static struct pmc_event_alias generic_aliases[] = {
726 EV_ALIAS("instructions", "SOFT-CLOCK.HARD"),
727 EV_ALIAS(NULL, NULL)
728 };
729
730 static int
soft_allocate_pmc(enum pmc_event pe,char * ctrspec,struct pmc_op_pmcallocate * pmc_config)731 soft_allocate_pmc(enum pmc_event pe, char *ctrspec,
732 struct pmc_op_pmcallocate *pmc_config)
733 {
734 (void)ctrspec;
735 (void)pmc_config;
736
737 if ((int)pe < PMC_EV_SOFT_FIRST || (int)pe > PMC_EV_SOFT_LAST)
738 return (-1);
739
740 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
741 return (0);
742 }
743
744 #if defined(__arm__)
745 static struct pmc_event_alias cortex_a8_aliases[] = {
746 EV_ALIAS("dc-misses", "L1_DCACHE_REFILL"),
747 EV_ALIAS("ic-misses", "L1_ICACHE_REFILL"),
748 EV_ALIAS("instructions", "INSTR_EXECUTED"),
749 EV_ALIAS(NULL, NULL)
750 };
751
752 static struct pmc_event_alias cortex_a9_aliases[] = {
753 EV_ALIAS("dc-misses", "L1_DCACHE_REFILL"),
754 EV_ALIAS("ic-misses", "L1_ICACHE_REFILL"),
755 EV_ALIAS("instructions", "INSTR_EXECUTED"),
756 EV_ALIAS(NULL, NULL)
757 };
758
759 static int
armv7_allocate_pmc(enum pmc_event pe,char * ctrspec __unused,struct pmc_op_pmcallocate * pmc_config __unused)760 armv7_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
761 struct pmc_op_pmcallocate *pmc_config __unused)
762 {
763 switch (pe) {
764 default:
765 break;
766 }
767
768 return (0);
769 }
770 #endif
771
772 #if defined(__aarch64__)
773 static struct pmc_event_alias cortex_a53_aliases[] = {
774 EV_ALIAS(NULL, NULL)
775 };
776 static struct pmc_event_alias cortex_a57_aliases[] = {
777 EV_ALIAS(NULL, NULL)
778 };
779 static struct pmc_event_alias cortex_a76_aliases[] = {
780 EV_ALIAS(NULL, NULL)
781 };
782 static int
arm64_allocate_pmc(enum pmc_event pe,char * ctrspec __unused,struct pmc_op_pmcallocate * pmc_config __unused)783 arm64_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
784 struct pmc_op_pmcallocate *pmc_config __unused)
785 {
786 switch (pe) {
787 default:
788 break;
789 }
790
791 return (0);
792 }
793 #endif
794
795 #if defined(__mips__)
796
797 static struct pmc_event_alias beri_aliases[] = {
798 EV_ALIAS("instructions", "INST"),
799 EV_ALIAS(NULL, NULL)
800 };
801
802 static struct pmc_event_alias mips24k_aliases[] = {
803 EV_ALIAS("instructions", "INSTR_EXECUTED"),
804 EV_ALIAS("branches", "BRANCH_COMPLETED"),
805 EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"),
806 EV_ALIAS(NULL, NULL)
807 };
808
809 static struct pmc_event_alias mips74k_aliases[] = {
810 EV_ALIAS("instructions", "INSTR_EXECUTED"),
811 EV_ALIAS("branches", "BRANCH_INSNS"),
812 EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCH_INSNS"),
813 EV_ALIAS(NULL, NULL)
814 };
815
816 static struct pmc_event_alias octeon_aliases[] = {
817 EV_ALIAS("instructions", "RET"),
818 EV_ALIAS("branches", "BR"),
819 EV_ALIAS("branch-mispredicts", "BRMIS"),
820 EV_ALIAS(NULL, NULL)
821 };
822
823 #define MIPS_KW_OS "os"
824 #define MIPS_KW_USR "usr"
825 #define MIPS_KW_ANYTHREAD "anythread"
826
827 static int
mips_allocate_pmc(enum pmc_event pe,char * ctrspec __unused,struct pmc_op_pmcallocate * pmc_config __unused)828 mips_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
829 struct pmc_op_pmcallocate *pmc_config __unused)
830 {
831 char *p;
832
833 (void) pe;
834
835 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
836
837 while ((p = strsep(&ctrspec, ",")) != NULL) {
838 if (KWMATCH(p, MIPS_KW_OS))
839 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
840 else if (KWMATCH(p, MIPS_KW_USR))
841 pmc_config->pm_caps |= PMC_CAP_USER;
842 else if (KWMATCH(p, MIPS_KW_ANYTHREAD))
843 pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
844 else
845 return (-1);
846 }
847
848 return (0);
849 }
850
851 #endif /* __mips__ */
852
853 #if defined(__powerpc__)
854
855 static struct pmc_event_alias ppc7450_aliases[] = {
856 EV_ALIAS("instructions", "INSTR_COMPLETED"),
857 EV_ALIAS("branches", "BRANCHES_COMPLETED"),
858 EV_ALIAS("branch-mispredicts", "MISPREDICTED_BRANCHES"),
859 EV_ALIAS(NULL, NULL)
860 };
861
862 static struct pmc_event_alias ppc970_aliases[] = {
863 EV_ALIAS("instructions", "INSTR_COMPLETED"),
864 EV_ALIAS("cycles", "CYCLES"),
865 EV_ALIAS(NULL, NULL)
866 };
867
868 static struct pmc_event_alias power8_aliases[] = {
869 EV_ALIAS("instructions", "INSTR_COMPLETED"),
870 EV_ALIAS("cycles", "CYCLES"),
871 EV_ALIAS(NULL, NULL)
872 };
873
874 static struct pmc_event_alias e500_aliases[] = {
875 EV_ALIAS("instructions", "INSTR_COMPLETED"),
876 EV_ALIAS("cycles", "CYCLES"),
877 EV_ALIAS(NULL, NULL)
878 };
879
880 #define POWERPC_KW_OS "os"
881 #define POWERPC_KW_USR "usr"
882 #define POWERPC_KW_ANYTHREAD "anythread"
883
884 static int
powerpc_allocate_pmc(enum pmc_event pe,char * ctrspec __unused,struct pmc_op_pmcallocate * pmc_config __unused)885 powerpc_allocate_pmc(enum pmc_event pe, char *ctrspec __unused,
886 struct pmc_op_pmcallocate *pmc_config __unused)
887 {
888 char *p;
889
890 (void) pe;
891
892 pmc_config->pm_caps |= (PMC_CAP_READ | PMC_CAP_WRITE);
893
894 while ((p = strsep(&ctrspec, ",")) != NULL) {
895 if (KWMATCH(p, POWERPC_KW_OS))
896 pmc_config->pm_caps |= PMC_CAP_SYSTEM;
897 else if (KWMATCH(p, POWERPC_KW_USR))
898 pmc_config->pm_caps |= PMC_CAP_USER;
899 else if (KWMATCH(p, POWERPC_KW_ANYTHREAD))
900 pmc_config->pm_caps |= (PMC_CAP_USER | PMC_CAP_SYSTEM);
901 else
902 return (-1);
903 }
904
905 return (0);
906 }
907
908 #endif /* __powerpc__ */
909
910
911 /*
912 * Match an event name `name' with its canonical form.
913 *
914 * Matches are case insensitive and spaces, periods, underscores and
915 * hyphen characters are considered to match each other.
916 *
917 * Returns 1 for a match, 0 otherwise.
918 */
919
920 static int
pmc_match_event_name(const char * name,const char * canonicalname)921 pmc_match_event_name(const char *name, const char *canonicalname)
922 {
923 int cc, nc;
924 const unsigned char *c, *n;
925
926 c = (const unsigned char *) canonicalname;
927 n = (const unsigned char *) name;
928
929 for (; (nc = *n) && (cc = *c); n++, c++) {
930
931 if ((nc == ' ' || nc == '_' || nc == '-' || nc == '.') &&
932 (cc == ' ' || cc == '_' || cc == '-' || cc == '.'))
933 continue;
934
935 if (toupper(nc) == toupper(cc))
936 continue;
937
938
939 return (0);
940 }
941
942 if (*n == '\0' && *c == '\0')
943 return (1);
944
945 return (0);
946 }
947
948 /*
949 * Match an event name against all the event named supported by a
950 * PMC class.
951 *
952 * Returns an event descriptor pointer on match or NULL otherwise.
953 */
954 static const struct pmc_event_descr *
pmc_match_event_class(const char * name,const struct pmc_class_descr * pcd)955 pmc_match_event_class(const char *name,
956 const struct pmc_class_descr *pcd)
957 {
958 size_t n;
959 const struct pmc_event_descr *ev;
960
961 ev = pcd->pm_evc_event_table;
962 for (n = 0; n < pcd->pm_evc_event_table_size; n++, ev++)
963 if (pmc_match_event_name(name, ev->pm_ev_name))
964 return (ev);
965
966 return (NULL);
967 }
968
969 /*
970 * API entry points
971 */
972
973 int
pmc_allocate(const char * ctrspec,enum pmc_mode mode,uint32_t flags,int cpu,pmc_id_t * pmcid,uint64_t count)974 pmc_allocate(const char *ctrspec, enum pmc_mode mode,
975 uint32_t flags, int cpu, pmc_id_t *pmcid,
976 uint64_t count)
977 {
978 size_t n;
979 int retval;
980 char *r, *spec_copy;
981 const char *ctrname;
982 const struct pmc_event_descr *ev;
983 const struct pmc_event_alias *alias;
984 struct pmc_op_pmcallocate pmc_config;
985 const struct pmc_class_descr *pcd;
986
987 spec_copy = NULL;
988 retval = -1;
989
990 if (mode != PMC_MODE_SS && mode != PMC_MODE_TS &&
991 mode != PMC_MODE_SC && mode != PMC_MODE_TC) {
992 errno = EINVAL;
993 goto out;
994 }
995 bzero(&pmc_config, sizeof(pmc_config));
996 pmc_config.pm_cpu = cpu;
997 pmc_config.pm_mode = mode;
998 pmc_config.pm_flags = flags;
999 pmc_config.pm_count = count;
1000 if (PMC_IS_SAMPLING_MODE(mode))
1001 pmc_config.pm_caps |= PMC_CAP_INTERRUPT;
1002
1003 /*
1004 * Try to pull the raw event ID directly from the pmu-events table. If
1005 * this is unsupported on the platform, or the event is not found,
1006 * continue with searching the regular event tables.
1007 */
1008 r = spec_copy = strdup(ctrspec);
1009 ctrname = strsep(&r, ",");
1010 if (pmc_pmu_enabled()) {
1011 if (pmc_pmu_pmcallocate(ctrname, &pmc_config) == 0)
1012 goto found;
1013
1014 /* Otherwise, reset any changes */
1015 pmc_config.pm_ev = 0;
1016 pmc_config.pm_caps = 0;
1017 pmc_config.pm_class = 0;
1018 }
1019 free(spec_copy);
1020 spec_copy = NULL;
1021
1022 /* replace an event alias with the canonical event specifier */
1023 if (pmc_mdep_event_aliases)
1024 for (alias = pmc_mdep_event_aliases; alias->pm_alias; alias++)
1025 if (!strcasecmp(ctrspec, alias->pm_alias)) {
1026 spec_copy = strdup(alias->pm_spec);
1027 break;
1028 }
1029
1030 if (spec_copy == NULL)
1031 spec_copy = strdup(ctrspec);
1032
1033 r = spec_copy;
1034 ctrname = strsep(&r, ",");
1035
1036 /*
1037 * If a explicit class prefix was given by the user, restrict the
1038 * search for the event to the specified PMC class.
1039 */
1040 ev = NULL;
1041 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++) {
1042 pcd = pmc_class_table[n];
1043 if (pcd != NULL && strncasecmp(ctrname, pcd->pm_evc_name,
1044 pcd->pm_evc_name_size) == 0) {
1045 if ((ev = pmc_match_event_class(ctrname +
1046 pcd->pm_evc_name_size, pcd)) == NULL) {
1047 errno = EINVAL;
1048 goto out;
1049 }
1050 break;
1051 }
1052 }
1053
1054 /*
1055 * Otherwise, search for this event in all compatible PMC
1056 * classes.
1057 */
1058 for (n = 0; ev == NULL && n < PMC_CLASS_TABLE_SIZE; n++) {
1059 pcd = pmc_class_table[n];
1060 if (pcd != NULL)
1061 ev = pmc_match_event_class(ctrname, pcd);
1062 }
1063
1064 if (ev == NULL) {
1065 errno = EINVAL;
1066 goto out;
1067 }
1068
1069 pmc_config.pm_ev = ev->pm_ev_code;
1070 pmc_config.pm_class = pcd->pm_evc_class;
1071
1072 if (pcd->pm_evc_allocate_pmc(ev->pm_ev_code, r, &pmc_config) < 0) {
1073 errno = EINVAL;
1074 goto out;
1075 }
1076
1077 found:
1078 if (PMC_CALL(PMCALLOCATE, &pmc_config) == 0) {
1079 *pmcid = pmc_config.pm_pmcid;
1080 retval = 0;
1081 }
1082 out:
1083 if (spec_copy)
1084 free(spec_copy);
1085
1086 return (retval);
1087 }
1088
1089 int
pmc_attach(pmc_id_t pmc,pid_t pid)1090 pmc_attach(pmc_id_t pmc, pid_t pid)
1091 {
1092 struct pmc_op_pmcattach pmc_attach_args;
1093
1094 pmc_attach_args.pm_pmc = pmc;
1095 pmc_attach_args.pm_pid = pid;
1096
1097 return (PMC_CALL(PMCATTACH, &pmc_attach_args));
1098 }
1099
1100 int
pmc_capabilities(pmc_id_t pmcid,uint32_t * caps)1101 pmc_capabilities(pmc_id_t pmcid, uint32_t *caps)
1102 {
1103 unsigned int i;
1104 enum pmc_class cl;
1105
1106 cl = PMC_ID_TO_CLASS(pmcid);
1107 for (i = 0; i < cpu_info.pm_nclass; i++)
1108 if (cpu_info.pm_classes[i].pm_class == cl) {
1109 *caps = cpu_info.pm_classes[i].pm_caps;
1110 return (0);
1111 }
1112 errno = EINVAL;
1113 return (-1);
1114 }
1115
1116 int
pmc_configure_logfile(int fd)1117 pmc_configure_logfile(int fd)
1118 {
1119 struct pmc_op_configurelog cla;
1120
1121 cla.pm_logfd = fd;
1122 if (PMC_CALL(CONFIGURELOG, &cla) < 0)
1123 return (-1);
1124 return (0);
1125 }
1126
1127 int
pmc_cpuinfo(const struct pmc_cpuinfo ** pci)1128 pmc_cpuinfo(const struct pmc_cpuinfo **pci)
1129 {
1130 if (pmc_syscall == -1) {
1131 errno = ENXIO;
1132 return (-1);
1133 }
1134
1135 *pci = &cpu_info;
1136 return (0);
1137 }
1138
1139 int
pmc_detach(pmc_id_t pmc,pid_t pid)1140 pmc_detach(pmc_id_t pmc, pid_t pid)
1141 {
1142 struct pmc_op_pmcattach pmc_detach_args;
1143
1144 pmc_detach_args.pm_pmc = pmc;
1145 pmc_detach_args.pm_pid = pid;
1146 return (PMC_CALL(PMCDETACH, &pmc_detach_args));
1147 }
1148
1149 int
pmc_disable(int cpu,int pmc)1150 pmc_disable(int cpu, int pmc)
1151 {
1152 struct pmc_op_pmcadmin ssa;
1153
1154 ssa.pm_cpu = cpu;
1155 ssa.pm_pmc = pmc;
1156 ssa.pm_state = PMC_STATE_DISABLED;
1157 return (PMC_CALL(PMCADMIN, &ssa));
1158 }
1159
1160 int
pmc_enable(int cpu,int pmc)1161 pmc_enable(int cpu, int pmc)
1162 {
1163 struct pmc_op_pmcadmin ssa;
1164
1165 ssa.pm_cpu = cpu;
1166 ssa.pm_pmc = pmc;
1167 ssa.pm_state = PMC_STATE_FREE;
1168 return (PMC_CALL(PMCADMIN, &ssa));
1169 }
1170
1171 /*
1172 * Return a list of events known to a given PMC class. 'cl' is the
1173 * PMC class identifier, 'eventnames' is the returned list of 'const
1174 * char *' pointers pointing to the names of the events. 'nevents' is
1175 * the number of event name pointers returned.
1176 *
1177 * The space for 'eventnames' is allocated using malloc(3). The caller
1178 * is responsible for freeing this space when done.
1179 */
1180 int
pmc_event_names_of_class(enum pmc_class cl,const char *** eventnames,int * nevents)1181 pmc_event_names_of_class(enum pmc_class cl, const char ***eventnames,
1182 int *nevents)
1183 {
1184 int count;
1185 const char **names;
1186 const struct pmc_event_descr *ev;
1187
1188 switch (cl)
1189 {
1190 case PMC_CLASS_IAF:
1191 ev = iaf_event_table;
1192 count = PMC_EVENT_TABLE_SIZE(iaf);
1193 break;
1194 case PMC_CLASS_TSC:
1195 ev = tsc_event_table;
1196 count = PMC_EVENT_TABLE_SIZE(tsc);
1197 break;
1198 case PMC_CLASS_K8:
1199 ev = k8_event_table;
1200 count = PMC_EVENT_TABLE_SIZE(k8);
1201 break;
1202 case PMC_CLASS_ARMV7:
1203 switch (cpu_info.pm_cputype) {
1204 default:
1205 case PMC_CPU_ARMV7_CORTEX_A8:
1206 ev = cortex_a8_event_table;
1207 count = PMC_EVENT_TABLE_SIZE(cortex_a8);
1208 break;
1209 case PMC_CPU_ARMV7_CORTEX_A9:
1210 ev = cortex_a9_event_table;
1211 count = PMC_EVENT_TABLE_SIZE(cortex_a9);
1212 break;
1213 }
1214 break;
1215 case PMC_CLASS_ARMV8:
1216 switch (cpu_info.pm_cputype) {
1217 default:
1218 case PMC_CPU_ARMV8_CORTEX_A53:
1219 ev = cortex_a53_event_table;
1220 count = PMC_EVENT_TABLE_SIZE(cortex_a53);
1221 break;
1222 case PMC_CPU_ARMV8_CORTEX_A57:
1223 ev = cortex_a57_event_table;
1224 count = PMC_EVENT_TABLE_SIZE(cortex_a57);
1225 break;
1226 case PMC_CPU_ARMV8_CORTEX_A76:
1227 ev = cortex_a76_event_table;
1228 count = PMC_EVENT_TABLE_SIZE(cortex_a76);
1229 break;
1230 }
1231 break;
1232 case PMC_CLASS_BERI:
1233 ev = beri_event_table;
1234 count = PMC_EVENT_TABLE_SIZE(beri);
1235 break;
1236 case PMC_CLASS_MIPS24K:
1237 ev = mips24k_event_table;
1238 count = PMC_EVENT_TABLE_SIZE(mips24k);
1239 break;
1240 case PMC_CLASS_MIPS74K:
1241 ev = mips74k_event_table;
1242 count = PMC_EVENT_TABLE_SIZE(mips74k);
1243 break;
1244 case PMC_CLASS_OCTEON:
1245 ev = octeon_event_table;
1246 count = PMC_EVENT_TABLE_SIZE(octeon);
1247 break;
1248 case PMC_CLASS_PPC7450:
1249 ev = ppc7450_event_table;
1250 count = PMC_EVENT_TABLE_SIZE(ppc7450);
1251 break;
1252 case PMC_CLASS_PPC970:
1253 ev = ppc970_event_table;
1254 count = PMC_EVENT_TABLE_SIZE(ppc970);
1255 break;
1256 case PMC_CLASS_POWER8:
1257 ev = power8_event_table;
1258 count = PMC_EVENT_TABLE_SIZE(power8);
1259 break;
1260 case PMC_CLASS_E500:
1261 ev = e500_event_table;
1262 count = PMC_EVENT_TABLE_SIZE(e500);
1263 break;
1264 case PMC_CLASS_SOFT:
1265 ev = soft_event_table;
1266 count = soft_event_info.pm_nevent;
1267 break;
1268 default:
1269 errno = EINVAL;
1270 return (-1);
1271 }
1272
1273 if ((names = malloc(count * sizeof(const char *))) == NULL)
1274 return (-1);
1275
1276 *eventnames = names;
1277 *nevents = count;
1278
1279 for (;count--; ev++, names++)
1280 *names = ev->pm_ev_name;
1281
1282 return (0);
1283 }
1284
1285 int
pmc_flush_logfile(void)1286 pmc_flush_logfile(void)
1287 {
1288 return (PMC_CALL(FLUSHLOG,0));
1289 }
1290
1291 int
pmc_close_logfile(void)1292 pmc_close_logfile(void)
1293 {
1294 return (PMC_CALL(CLOSELOG,0));
1295 }
1296
1297 int
pmc_get_driver_stats(struct pmc_driverstats * ds)1298 pmc_get_driver_stats(struct pmc_driverstats *ds)
1299 {
1300 struct pmc_op_getdriverstats gms;
1301
1302 if (PMC_CALL(GETDRIVERSTATS, &gms) < 0)
1303 return (-1);
1304
1305 /* copy out fields in the current userland<->library interface */
1306 ds->pm_intr_ignored = gms.pm_intr_ignored;
1307 ds->pm_intr_processed = gms.pm_intr_processed;
1308 ds->pm_intr_bufferfull = gms.pm_intr_bufferfull;
1309 ds->pm_syscalls = gms.pm_syscalls;
1310 ds->pm_syscall_errors = gms.pm_syscall_errors;
1311 ds->pm_buffer_requests = gms.pm_buffer_requests;
1312 ds->pm_buffer_requests_failed = gms.pm_buffer_requests_failed;
1313 ds->pm_log_sweeps = gms.pm_log_sweeps;
1314 return (0);
1315 }
1316
1317 int
pmc_get_msr(pmc_id_t pmc,uint32_t * msr)1318 pmc_get_msr(pmc_id_t pmc, uint32_t *msr)
1319 {
1320 struct pmc_op_getmsr gm;
1321
1322 gm.pm_pmcid = pmc;
1323 if (PMC_CALL(PMCGETMSR, &gm) < 0)
1324 return (-1);
1325 *msr = gm.pm_msr;
1326 return (0);
1327 }
1328
1329 int
pmc_init(void)1330 pmc_init(void)
1331 {
1332 int error, pmc_mod_id;
1333 unsigned int n;
1334 uint32_t abi_version;
1335 struct module_stat pmc_modstat;
1336 struct pmc_op_getcpuinfo op_cpu_info;
1337
1338 if (pmc_syscall != -1) /* already inited */
1339 return (0);
1340
1341 /* retrieve the system call number from the KLD */
1342 if ((pmc_mod_id = modfind(PMC_MODULE_NAME)) < 0)
1343 return (-1);
1344
1345 pmc_modstat.version = sizeof(struct module_stat);
1346 if ((error = modstat(pmc_mod_id, &pmc_modstat)) < 0)
1347 return (-1);
1348
1349 pmc_syscall = pmc_modstat.data.intval;
1350
1351 /* check the kernel module's ABI against our compiled-in version */
1352 abi_version = PMC_VERSION;
1353 if (PMC_CALL(GETMODULEVERSION, &abi_version) < 0)
1354 return (pmc_syscall = -1);
1355
1356 /* ignore patch & minor numbers for the comparison */
1357 if ((abi_version & 0xFF000000) != (PMC_VERSION & 0xFF000000)) {
1358 errno = EPROGMISMATCH;
1359 return (pmc_syscall = -1);
1360 }
1361
1362 bzero(&op_cpu_info, sizeof(op_cpu_info));
1363 if (PMC_CALL(GETCPUINFO, &op_cpu_info) < 0)
1364 return (pmc_syscall = -1);
1365
1366 cpu_info.pm_cputype = op_cpu_info.pm_cputype;
1367 cpu_info.pm_ncpu = op_cpu_info.pm_ncpu;
1368 cpu_info.pm_npmc = op_cpu_info.pm_npmc;
1369 cpu_info.pm_nclass = op_cpu_info.pm_nclass;
1370 for (n = 0; n < op_cpu_info.pm_nclass; n++)
1371 memcpy(&cpu_info.pm_classes[n], &op_cpu_info.pm_classes[n],
1372 sizeof(cpu_info.pm_classes[n]));
1373
1374 pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE *
1375 sizeof(struct pmc_class_descr *));
1376
1377 if (pmc_class_table == NULL)
1378 return (-1);
1379
1380 for (n = 0; n < PMC_CLASS_TABLE_SIZE; n++)
1381 pmc_class_table[n] = NULL;
1382
1383 /*
1384 * Get soft events list.
1385 */
1386 soft_event_info.pm_class = PMC_CLASS_SOFT;
1387 if (PMC_CALL(GETDYNEVENTINFO, &soft_event_info) < 0)
1388 return (pmc_syscall = -1);
1389
1390 /* Map soft events to static list. */
1391 for (n = 0; n < soft_event_info.pm_nevent; n++) {
1392 soft_event_table[n].pm_ev_name =
1393 soft_event_info.pm_events[n].pm_ev_name;
1394 soft_event_table[n].pm_ev_code =
1395 soft_event_info.pm_events[n].pm_ev_code;
1396 }
1397 soft_class_table_descr.pm_evc_event_table_size = \
1398 soft_event_info.pm_nevent;
1399 soft_class_table_descr.pm_evc_event_table = \
1400 soft_event_table;
1401
1402 /*
1403 * Fill in the class table.
1404 */
1405 n = 0;
1406
1407 /* Fill soft events information. */
1408 pmc_class_table[n++] = &soft_class_table_descr;
1409 #if defined(__amd64__) || defined(__i386__)
1410 if (cpu_info.pm_cputype != PMC_CPU_GENERIC)
1411 pmc_class_table[n++] = &tsc_class_table_descr;
1412 #endif
1413
1414 #define PMC_MDEP_INIT(C) pmc_mdep_event_aliases = C##_aliases
1415
1416 /* Configure the event name parser. */
1417 switch (cpu_info.pm_cputype) {
1418 #if defined(__amd64__) || defined(__i386__)
1419 case PMC_CPU_AMD_K8:
1420 PMC_MDEP_INIT(k8);
1421 pmc_class_table[n] = &k8_class_table_descr;
1422 break;
1423 #endif
1424 case PMC_CPU_GENERIC:
1425 PMC_MDEP_INIT(generic);
1426 break;
1427 #if defined(__arm__)
1428 case PMC_CPU_ARMV7_CORTEX_A8:
1429 PMC_MDEP_INIT(cortex_a8);
1430 pmc_class_table[n] = &cortex_a8_class_table_descr;
1431 break;
1432 case PMC_CPU_ARMV7_CORTEX_A9:
1433 PMC_MDEP_INIT(cortex_a9);
1434 pmc_class_table[n] = &cortex_a9_class_table_descr;
1435 break;
1436 #endif
1437 #if defined(__aarch64__)
1438 case PMC_CPU_ARMV8_CORTEX_A53:
1439 PMC_MDEP_INIT(cortex_a53);
1440 pmc_class_table[n] = &cortex_a53_class_table_descr;
1441 break;
1442 case PMC_CPU_ARMV8_CORTEX_A57:
1443 PMC_MDEP_INIT(cortex_a57);
1444 pmc_class_table[n] = &cortex_a57_class_table_descr;
1445 break;
1446 case PMC_CPU_ARMV8_CORTEX_A76:
1447 PMC_MDEP_INIT(cortex_a76);
1448 pmc_class_table[n] = &cortex_a76_class_table_descr;
1449 break;
1450 #endif
1451 #if defined(__mips__)
1452 case PMC_CPU_MIPS_BERI:
1453 PMC_MDEP_INIT(beri);
1454 pmc_class_table[n] = &beri_class_table_descr;
1455 break;
1456 case PMC_CPU_MIPS_24K:
1457 PMC_MDEP_INIT(mips24k);
1458 pmc_class_table[n] = &mips24k_class_table_descr;
1459 break;
1460 case PMC_CPU_MIPS_74K:
1461 PMC_MDEP_INIT(mips74k);
1462 pmc_class_table[n] = &mips74k_class_table_descr;
1463 break;
1464 case PMC_CPU_MIPS_OCTEON:
1465 PMC_MDEP_INIT(octeon);
1466 pmc_class_table[n] = &octeon_class_table_descr;
1467 break;
1468 #endif /* __mips__ */
1469 #if defined(__powerpc__)
1470 case PMC_CPU_PPC_7450:
1471 PMC_MDEP_INIT(ppc7450);
1472 pmc_class_table[n] = &ppc7450_class_table_descr;
1473 break;
1474 case PMC_CPU_PPC_970:
1475 PMC_MDEP_INIT(ppc970);
1476 pmc_class_table[n] = &ppc970_class_table_descr;
1477 break;
1478 case PMC_CPU_PPC_POWER8:
1479 PMC_MDEP_INIT(power8);
1480 pmc_class_table[n] = &power8_class_table_descr;
1481 break;
1482 case PMC_CPU_PPC_E500:
1483 PMC_MDEP_INIT(e500);
1484 pmc_class_table[n] = &e500_class_table_descr;
1485 break;
1486 #endif
1487 default:
1488 /*
1489 * Some kind of CPU this version of the library knows nothing
1490 * about. This shouldn't happen since the abi version check
1491 * should have caught this.
1492 */
1493 #if defined(__amd64__) || defined(__i386__)
1494 break;
1495 #endif
1496 errno = ENXIO;
1497 return (pmc_syscall = -1);
1498 }
1499
1500 return (0);
1501 }
1502
1503 const char *
pmc_name_of_capability(enum pmc_caps cap)1504 pmc_name_of_capability(enum pmc_caps cap)
1505 {
1506 int i;
1507
1508 /*
1509 * 'cap' should have a single bit set and should be in
1510 * range.
1511 */
1512 if ((cap & (cap - 1)) || cap < PMC_CAP_FIRST ||
1513 cap > PMC_CAP_LAST) {
1514 errno = EINVAL;
1515 return (NULL);
1516 }
1517
1518 i = ffs(cap);
1519 return (pmc_capability_names[i - 1]);
1520 }
1521
1522 const char *
pmc_name_of_class(enum pmc_class pc)1523 pmc_name_of_class(enum pmc_class pc)
1524 {
1525 size_t n;
1526
1527 for (n = 0; n < PMC_TABLE_SIZE(pmc_class_names); n++)
1528 if (pc == pmc_class_names[n].pm_class)
1529 return (pmc_class_names[n].pm_name);
1530
1531 errno = EINVAL;
1532 return (NULL);
1533 }
1534
1535 const char *
pmc_name_of_cputype(enum pmc_cputype cp)1536 pmc_name_of_cputype(enum pmc_cputype cp)
1537 {
1538 size_t n;
1539
1540 for (n = 0; n < PMC_TABLE_SIZE(pmc_cputype_names); n++)
1541 if (cp == pmc_cputype_names[n].pm_cputype)
1542 return (pmc_cputype_names[n].pm_name);
1543
1544 errno = EINVAL;
1545 return (NULL);
1546 }
1547
1548 const char *
pmc_name_of_disposition(enum pmc_disp pd)1549 pmc_name_of_disposition(enum pmc_disp pd)
1550 {
1551 if ((int) pd >= PMC_DISP_FIRST &&
1552 pd <= PMC_DISP_LAST)
1553 return (pmc_disposition_names[pd]);
1554
1555 errno = EINVAL;
1556 return (NULL);
1557 }
1558
1559 const char *
_pmc_name_of_event(enum pmc_event pe,enum pmc_cputype cpu)1560 _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype cpu)
1561 {
1562 const struct pmc_event_descr *ev, *evfence;
1563
1564 ev = evfence = NULL;
1565 if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) {
1566 ev = k8_event_table;
1567 evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8);
1568
1569 } else if (pe >= PMC_EV_ARMV7_FIRST && pe <= PMC_EV_ARMV7_LAST) {
1570 switch (cpu) {
1571 case PMC_CPU_ARMV7_CORTEX_A8:
1572 ev = cortex_a8_event_table;
1573 evfence = cortex_a8_event_table + PMC_EVENT_TABLE_SIZE(cortex_a8);
1574 break;
1575 case PMC_CPU_ARMV7_CORTEX_A9:
1576 ev = cortex_a9_event_table;
1577 evfence = cortex_a9_event_table + PMC_EVENT_TABLE_SIZE(cortex_a9);
1578 break;
1579 default: /* Unknown CPU type. */
1580 break;
1581 }
1582 } else if (pe >= PMC_EV_ARMV8_FIRST && pe <= PMC_EV_ARMV8_LAST) {
1583 switch (cpu) {
1584 case PMC_CPU_ARMV8_CORTEX_A53:
1585 ev = cortex_a53_event_table;
1586 evfence = cortex_a53_event_table + PMC_EVENT_TABLE_SIZE(cortex_a53);
1587 break;
1588 case PMC_CPU_ARMV8_CORTEX_A57:
1589 ev = cortex_a57_event_table;
1590 evfence = cortex_a57_event_table + PMC_EVENT_TABLE_SIZE(cortex_a57);
1591 break;
1592 case PMC_CPU_ARMV8_CORTEX_A76:
1593 ev = cortex_a76_event_table;
1594 evfence = cortex_a76_event_table + PMC_EVENT_TABLE_SIZE(cortex_a76);
1595 break;
1596 default: /* Unknown CPU type. */
1597 break;
1598 }
1599 } else if (pe >= PMC_EV_BERI_FIRST && pe <= PMC_EV_BERI_LAST) {
1600 ev = beri_event_table;
1601 evfence = beri_event_table + PMC_EVENT_TABLE_SIZE(beri);
1602 } else if (pe >= PMC_EV_MIPS24K_FIRST && pe <= PMC_EV_MIPS24K_LAST) {
1603 ev = mips24k_event_table;
1604 evfence = mips24k_event_table + PMC_EVENT_TABLE_SIZE(mips24k);
1605 } else if (pe >= PMC_EV_MIPS74K_FIRST && pe <= PMC_EV_MIPS74K_LAST) {
1606 ev = mips74k_event_table;
1607 evfence = mips74k_event_table + PMC_EVENT_TABLE_SIZE(mips74k);
1608 } else if (pe >= PMC_EV_OCTEON_FIRST && pe <= PMC_EV_OCTEON_LAST) {
1609 ev = octeon_event_table;
1610 evfence = octeon_event_table + PMC_EVENT_TABLE_SIZE(octeon);
1611 } else if (pe >= PMC_EV_PPC7450_FIRST && pe <= PMC_EV_PPC7450_LAST) {
1612 ev = ppc7450_event_table;
1613 evfence = ppc7450_event_table + PMC_EVENT_TABLE_SIZE(ppc7450);
1614 } else if (pe >= PMC_EV_PPC970_FIRST && pe <= PMC_EV_PPC970_LAST) {
1615 ev = ppc970_event_table;
1616 evfence = ppc970_event_table + PMC_EVENT_TABLE_SIZE(ppc970);
1617 } else if (pe >= PMC_EV_POWER8_FIRST && pe <= PMC_EV_POWER8_LAST) {
1618 ev = power8_event_table;
1619 evfence = power8_event_table + PMC_EVENT_TABLE_SIZE(power8);
1620 } else if (pe >= PMC_EV_E500_FIRST && pe <= PMC_EV_E500_LAST) {
1621 ev = e500_event_table;
1622 evfence = e500_event_table + PMC_EVENT_TABLE_SIZE(e500);
1623 } else if (pe == PMC_EV_TSC_TSC) {
1624 ev = tsc_event_table;
1625 evfence = tsc_event_table + PMC_EVENT_TABLE_SIZE(tsc);
1626 } else if ((int)pe >= PMC_EV_SOFT_FIRST && (int)pe <= PMC_EV_SOFT_LAST) {
1627 ev = soft_event_table;
1628 evfence = soft_event_table + soft_event_info.pm_nevent;
1629 }
1630
1631 for (; ev != evfence; ev++)
1632 if (pe == ev->pm_ev_code)
1633 return (ev->pm_ev_name);
1634
1635 return (NULL);
1636 }
1637
1638 const char *
pmc_name_of_event(enum pmc_event pe)1639 pmc_name_of_event(enum pmc_event pe)
1640 {
1641 const char *n;
1642
1643 if ((n = _pmc_name_of_event(pe, cpu_info.pm_cputype)) != NULL)
1644 return (n);
1645
1646 errno = EINVAL;
1647 return (NULL);
1648 }
1649
1650 const char *
pmc_name_of_mode(enum pmc_mode pm)1651 pmc_name_of_mode(enum pmc_mode pm)
1652 {
1653 if ((int) pm >= PMC_MODE_FIRST &&
1654 pm <= PMC_MODE_LAST)
1655 return (pmc_mode_names[pm]);
1656
1657 errno = EINVAL;
1658 return (NULL);
1659 }
1660
1661 const char *
pmc_name_of_state(enum pmc_state ps)1662 pmc_name_of_state(enum pmc_state ps)
1663 {
1664 if ((int) ps >= PMC_STATE_FIRST &&
1665 ps <= PMC_STATE_LAST)
1666 return (pmc_state_names[ps]);
1667
1668 errno = EINVAL;
1669 return (NULL);
1670 }
1671
1672 int
pmc_ncpu(void)1673 pmc_ncpu(void)
1674 {
1675 if (pmc_syscall == -1) {
1676 errno = ENXIO;
1677 return (-1);
1678 }
1679
1680 return (cpu_info.pm_ncpu);
1681 }
1682
1683 int
pmc_npmc(int cpu)1684 pmc_npmc(int cpu)
1685 {
1686 if (pmc_syscall == -1) {
1687 errno = ENXIO;
1688 return (-1);
1689 }
1690
1691 if (cpu < 0 || cpu >= (int) cpu_info.pm_ncpu) {
1692 errno = EINVAL;
1693 return (-1);
1694 }
1695
1696 return (cpu_info.pm_npmc);
1697 }
1698
1699 int
pmc_pmcinfo(int cpu,struct pmc_pmcinfo ** ppmci)1700 pmc_pmcinfo(int cpu, struct pmc_pmcinfo **ppmci)
1701 {
1702 int nbytes, npmc;
1703 struct pmc_op_getpmcinfo *pmci;
1704
1705 if ((npmc = pmc_npmc(cpu)) < 0)
1706 return (-1);
1707
1708 nbytes = sizeof(struct pmc_op_getpmcinfo) +
1709 npmc * sizeof(struct pmc_info);
1710
1711 if ((pmci = calloc(1, nbytes)) == NULL)
1712 return (-1);
1713
1714 pmci->pm_cpu = cpu;
1715
1716 if (PMC_CALL(GETPMCINFO, pmci) < 0) {
1717 free(pmci);
1718 return (-1);
1719 }
1720
1721 /* kernel<->library, library<->userland interfaces are identical */
1722 *ppmci = (struct pmc_pmcinfo *) pmci;
1723 return (0);
1724 }
1725
1726 int
pmc_read(pmc_id_t pmc,pmc_value_t * value)1727 pmc_read(pmc_id_t pmc, pmc_value_t *value)
1728 {
1729 struct pmc_op_pmcrw pmc_read_op;
1730
1731 pmc_read_op.pm_pmcid = pmc;
1732 pmc_read_op.pm_flags = PMC_F_OLDVALUE;
1733 pmc_read_op.pm_value = -1;
1734
1735 if (PMC_CALL(PMCRW, &pmc_read_op) < 0)
1736 return (-1);
1737
1738 *value = pmc_read_op.pm_value;
1739 return (0);
1740 }
1741
1742 int
pmc_release(pmc_id_t pmc)1743 pmc_release(pmc_id_t pmc)
1744 {
1745 struct pmc_op_simple pmc_release_args;
1746
1747 pmc_release_args.pm_pmcid = pmc;
1748 return (PMC_CALL(PMCRELEASE, &pmc_release_args));
1749 }
1750
1751 int
pmc_rw(pmc_id_t pmc,pmc_value_t newvalue,pmc_value_t * oldvaluep)1752 pmc_rw(pmc_id_t pmc, pmc_value_t newvalue, pmc_value_t *oldvaluep)
1753 {
1754 struct pmc_op_pmcrw pmc_rw_op;
1755
1756 pmc_rw_op.pm_pmcid = pmc;
1757 pmc_rw_op.pm_flags = PMC_F_NEWVALUE | PMC_F_OLDVALUE;
1758 pmc_rw_op.pm_value = newvalue;
1759
1760 if (PMC_CALL(PMCRW, &pmc_rw_op) < 0)
1761 return (-1);
1762
1763 *oldvaluep = pmc_rw_op.pm_value;
1764 return (0);
1765 }
1766
1767 int
pmc_set(pmc_id_t pmc,pmc_value_t value)1768 pmc_set(pmc_id_t pmc, pmc_value_t value)
1769 {
1770 struct pmc_op_pmcsetcount sc;
1771
1772 sc.pm_pmcid = pmc;
1773 sc.pm_count = value;
1774
1775 if (PMC_CALL(PMCSETCOUNT, &sc) < 0)
1776 return (-1);
1777 return (0);
1778 }
1779
1780 int
pmc_start(pmc_id_t pmc)1781 pmc_start(pmc_id_t pmc)
1782 {
1783 struct pmc_op_simple pmc_start_args;
1784
1785 pmc_start_args.pm_pmcid = pmc;
1786 return (PMC_CALL(PMCSTART, &pmc_start_args));
1787 }
1788
1789 int
pmc_stop(pmc_id_t pmc)1790 pmc_stop(pmc_id_t pmc)
1791 {
1792 struct pmc_op_simple pmc_stop_args;
1793
1794 pmc_stop_args.pm_pmcid = pmc;
1795 return (PMC_CALL(PMCSTOP, &pmc_stop_args));
1796 }
1797
1798 int
pmc_width(pmc_id_t pmcid,uint32_t * width)1799 pmc_width(pmc_id_t pmcid, uint32_t *width)
1800 {
1801 unsigned int i;
1802 enum pmc_class cl;
1803
1804 cl = PMC_ID_TO_CLASS(pmcid);
1805 for (i = 0; i < cpu_info.pm_nclass; i++)
1806 if (cpu_info.pm_classes[i].pm_class == cl) {
1807 *width = cpu_info.pm_classes[i].pm_width;
1808 return (0);
1809 }
1810 errno = EINVAL;
1811 return (-1);
1812 }
1813
1814 int
pmc_write(pmc_id_t pmc,pmc_value_t value)1815 pmc_write(pmc_id_t pmc, pmc_value_t value)
1816 {
1817 struct pmc_op_pmcrw pmc_write_op;
1818
1819 pmc_write_op.pm_pmcid = pmc;
1820 pmc_write_op.pm_flags = PMC_F_NEWVALUE;
1821 pmc_write_op.pm_value = value;
1822 return (PMC_CALL(PMCRW, &pmc_write_op));
1823 }
1824
1825 int
pmc_writelog(uint32_t userdata)1826 pmc_writelog(uint32_t userdata)
1827 {
1828 struct pmc_op_writelog wl;
1829
1830 wl.pm_userdata = userdata;
1831 return (PMC_CALL(WRITELOG, &wl));
1832 }
1833