xref: /linux-6.15/tools/perf/util/python.c (revision e7e9943c)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <Python.h>
3 #include <structmember.h>
4 #include <inttypes.h>
5 #include <poll.h>
6 #include <linux/err.h>
7 #include <perf/cpumap.h>
8 #ifdef HAVE_LIBTRACEEVENT
9 #include <event-parse.h>
10 #endif
11 #include <perf/mmap.h>
12 #include "evlist.h"
13 #include "evsel.h"
14 #include "event.h"
15 #include "print_binary.h"
16 #include "thread_map.h"
17 #include "trace-event.h"
18 #include "mmap.h"
19 #include "util/bpf-filter.h"
20 #include "util/env.h"
21 #include "util/kvm-stat.h"
22 #include "util/stat.h"
23 #include "util/kwork.h"
24 #include "util/sample.h"
25 #include "util/lock-contention.h"
26 #include <internal/lib.h>
27 #include "../builtin.h"
28 
29 #define _PyUnicode_FromString(arg) \
30   PyUnicode_FromString(arg)
31 #define _PyUnicode_FromFormat(...) \
32   PyUnicode_FromFormat(__VA_ARGS__)
33 #define _PyLong_FromLong(arg) \
34   PyLong_FromLong(arg)
35 
36 PyMODINIT_FUNC PyInit_perf(void);
37 
38 #define member_def(type, member, ptype, help) \
39 	{ #member, ptype, \
40 	  offsetof(struct pyrf_event, event) + offsetof(struct type, member), \
41 	  0, help }
42 
43 #define sample_member_def(name, member, ptype, help) \
44 	{ #name, ptype, \
45 	  offsetof(struct pyrf_event, sample) + offsetof(struct perf_sample, member), \
46 	  0, help }
47 
48 struct pyrf_event {
49 	PyObject_HEAD
50 	struct evsel *evsel;
51 	struct perf_sample sample;
52 	union perf_event   event;
53 };
54 
55 #define sample_members \
56 	sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"),			 \
57 	sample_member_def(sample_pid, pid, T_INT, "event pid"),			 \
58 	sample_member_def(sample_tid, tid, T_INT, "event tid"),			 \
59 	sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"),		 \
60 	sample_member_def(sample_addr, addr, T_ULONGLONG, "event addr"),		 \
61 	sample_member_def(sample_id, id, T_ULONGLONG, "event id"),			 \
62 	sample_member_def(sample_stream_id, stream_id, T_ULONGLONG, "event stream id"), \
63 	sample_member_def(sample_period, period, T_ULONGLONG, "event period"),		 \
64 	sample_member_def(sample_cpu, cpu, T_UINT, "event cpu"),
65 
66 static char pyrf_mmap_event__doc[] = PyDoc_STR("perf mmap event object.");
67 
68 static PyMemberDef pyrf_mmap_event__members[] = {
69 	sample_members
70 	member_def(perf_event_header, type, T_UINT, "event type"),
71 	member_def(perf_event_header, misc, T_UINT, "event misc"),
72 	member_def(perf_record_mmap, pid, T_UINT, "event pid"),
73 	member_def(perf_record_mmap, tid, T_UINT, "event tid"),
74 	member_def(perf_record_mmap, start, T_ULONGLONG, "start of the map"),
75 	member_def(perf_record_mmap, len, T_ULONGLONG, "map length"),
76 	member_def(perf_record_mmap, pgoff, T_ULONGLONG, "page offset"),
77 	member_def(perf_record_mmap, filename, T_STRING_INPLACE, "backing store"),
78 	{ .name = NULL, },
79 };
80 
81 static PyObject *pyrf_mmap_event__repr(struct pyrf_event *pevent)
82 {
83 	PyObject *ret;
84 	char *s;
85 
86 	if (asprintf(&s, "{ type: mmap, pid: %u, tid: %u, start: %#" PRI_lx64 ", "
87 			 "length: %#" PRI_lx64 ", offset: %#" PRI_lx64 ", "
88 			 "filename: %s }",
89 		     pevent->event.mmap.pid, pevent->event.mmap.tid,
90 		     pevent->event.mmap.start, pevent->event.mmap.len,
91 		     pevent->event.mmap.pgoff, pevent->event.mmap.filename) < 0) {
92 		ret = PyErr_NoMemory();
93 	} else {
94 		ret = PyUnicode_FromString(s);
95 		free(s);
96 	}
97 	return ret;
98 }
99 
100 static PyTypeObject pyrf_mmap_event__type = {
101 	PyVarObject_HEAD_INIT(NULL, 0)
102 	.tp_name	= "perf.mmap_event",
103 	.tp_basicsize	= sizeof(struct pyrf_event),
104 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
105 	.tp_doc		= pyrf_mmap_event__doc,
106 	.tp_members	= pyrf_mmap_event__members,
107 	.tp_repr	= (reprfunc)pyrf_mmap_event__repr,
108 };
109 
110 static char pyrf_task_event__doc[] = PyDoc_STR("perf task (fork/exit) event object.");
111 
112 static PyMemberDef pyrf_task_event__members[] = {
113 	sample_members
114 	member_def(perf_event_header, type, T_UINT, "event type"),
115 	member_def(perf_record_fork, pid, T_UINT, "event pid"),
116 	member_def(perf_record_fork, ppid, T_UINT, "event ppid"),
117 	member_def(perf_record_fork, tid, T_UINT, "event tid"),
118 	member_def(perf_record_fork, ptid, T_UINT, "event ptid"),
119 	member_def(perf_record_fork, time, T_ULONGLONG, "timestamp"),
120 	{ .name = NULL, },
121 };
122 
123 static PyObject *pyrf_task_event__repr(struct pyrf_event *pevent)
124 {
125 	return PyUnicode_FromFormat("{ type: %s, pid: %u, ppid: %u, tid: %u, "
126 				   "ptid: %u, time: %" PRI_lu64 "}",
127 				   pevent->event.header.type == PERF_RECORD_FORK ? "fork" : "exit",
128 				   pevent->event.fork.pid,
129 				   pevent->event.fork.ppid,
130 				   pevent->event.fork.tid,
131 				   pevent->event.fork.ptid,
132 				   pevent->event.fork.time);
133 }
134 
135 static PyTypeObject pyrf_task_event__type = {
136 	PyVarObject_HEAD_INIT(NULL, 0)
137 	.tp_name	= "perf.task_event",
138 	.tp_basicsize	= sizeof(struct pyrf_event),
139 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
140 	.tp_doc		= pyrf_task_event__doc,
141 	.tp_members	= pyrf_task_event__members,
142 	.tp_repr	= (reprfunc)pyrf_task_event__repr,
143 };
144 
145 static char pyrf_comm_event__doc[] = PyDoc_STR("perf comm event object.");
146 
147 static PyMemberDef pyrf_comm_event__members[] = {
148 	sample_members
149 	member_def(perf_event_header, type, T_UINT, "event type"),
150 	member_def(perf_record_comm, pid, T_UINT, "event pid"),
151 	member_def(perf_record_comm, tid, T_UINT, "event tid"),
152 	member_def(perf_record_comm, comm, T_STRING_INPLACE, "process name"),
153 	{ .name = NULL, },
154 };
155 
156 static PyObject *pyrf_comm_event__repr(struct pyrf_event *pevent)
157 {
158 	return PyUnicode_FromFormat("{ type: comm, pid: %u, tid: %u, comm: %s }",
159 				   pevent->event.comm.pid,
160 				   pevent->event.comm.tid,
161 				   pevent->event.comm.comm);
162 }
163 
164 static PyTypeObject pyrf_comm_event__type = {
165 	PyVarObject_HEAD_INIT(NULL, 0)
166 	.tp_name	= "perf.comm_event",
167 	.tp_basicsize	= sizeof(struct pyrf_event),
168 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
169 	.tp_doc		= pyrf_comm_event__doc,
170 	.tp_members	= pyrf_comm_event__members,
171 	.tp_repr	= (reprfunc)pyrf_comm_event__repr,
172 };
173 
174 static char pyrf_throttle_event__doc[] = PyDoc_STR("perf throttle event object.");
175 
176 static PyMemberDef pyrf_throttle_event__members[] = {
177 	sample_members
178 	member_def(perf_event_header, type, T_UINT, "event type"),
179 	member_def(perf_record_throttle, time, T_ULONGLONG, "timestamp"),
180 	member_def(perf_record_throttle, id, T_ULONGLONG, "event id"),
181 	member_def(perf_record_throttle, stream_id, T_ULONGLONG, "event stream id"),
182 	{ .name = NULL, },
183 };
184 
185 static PyObject *pyrf_throttle_event__repr(struct pyrf_event *pevent)
186 {
187 	struct perf_record_throttle *te = (struct perf_record_throttle *)(&pevent->event.header + 1);
188 
189 	return PyUnicode_FromFormat("{ type: %sthrottle, time: %" PRI_lu64 ", id: %" PRI_lu64
190 				   ", stream_id: %" PRI_lu64 " }",
191 				   pevent->event.header.type == PERF_RECORD_THROTTLE ? "" : "un",
192 				   te->time, te->id, te->stream_id);
193 }
194 
195 static PyTypeObject pyrf_throttle_event__type = {
196 	PyVarObject_HEAD_INIT(NULL, 0)
197 	.tp_name	= "perf.throttle_event",
198 	.tp_basicsize	= sizeof(struct pyrf_event),
199 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
200 	.tp_doc		= pyrf_throttle_event__doc,
201 	.tp_members	= pyrf_throttle_event__members,
202 	.tp_repr	= (reprfunc)pyrf_throttle_event__repr,
203 };
204 
205 static char pyrf_lost_event__doc[] = PyDoc_STR("perf lost event object.");
206 
207 static PyMemberDef pyrf_lost_event__members[] = {
208 	sample_members
209 	member_def(perf_record_lost, id, T_ULONGLONG, "event id"),
210 	member_def(perf_record_lost, lost, T_ULONGLONG, "number of lost events"),
211 	{ .name = NULL, },
212 };
213 
214 static PyObject *pyrf_lost_event__repr(struct pyrf_event *pevent)
215 {
216 	PyObject *ret;
217 	char *s;
218 
219 	if (asprintf(&s, "{ type: lost, id: %#" PRI_lx64 ", "
220 			 "lost: %#" PRI_lx64 " }",
221 		     pevent->event.lost.id, pevent->event.lost.lost) < 0) {
222 		ret = PyErr_NoMemory();
223 	} else {
224 		ret = PyUnicode_FromString(s);
225 		free(s);
226 	}
227 	return ret;
228 }
229 
230 static PyTypeObject pyrf_lost_event__type = {
231 	PyVarObject_HEAD_INIT(NULL, 0)
232 	.tp_name	= "perf.lost_event",
233 	.tp_basicsize	= sizeof(struct pyrf_event),
234 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
235 	.tp_doc		= pyrf_lost_event__doc,
236 	.tp_members	= pyrf_lost_event__members,
237 	.tp_repr	= (reprfunc)pyrf_lost_event__repr,
238 };
239 
240 static char pyrf_read_event__doc[] = PyDoc_STR("perf read event object.");
241 
242 static PyMemberDef pyrf_read_event__members[] = {
243 	sample_members
244 	member_def(perf_record_read, pid, T_UINT, "event pid"),
245 	member_def(perf_record_read, tid, T_UINT, "event tid"),
246 	{ .name = NULL, },
247 };
248 
249 static PyObject *pyrf_read_event__repr(struct pyrf_event *pevent)
250 {
251 	return PyUnicode_FromFormat("{ type: read, pid: %u, tid: %u }",
252 				   pevent->event.read.pid,
253 				   pevent->event.read.tid);
254 	/*
255  	 * FIXME: return the array of read values,
256  	 * making this method useful ;-)
257  	 */
258 }
259 
260 static PyTypeObject pyrf_read_event__type = {
261 	PyVarObject_HEAD_INIT(NULL, 0)
262 	.tp_name	= "perf.read_event",
263 	.tp_basicsize	= sizeof(struct pyrf_event),
264 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
265 	.tp_doc		= pyrf_read_event__doc,
266 	.tp_members	= pyrf_read_event__members,
267 	.tp_repr	= (reprfunc)pyrf_read_event__repr,
268 };
269 
270 static char pyrf_sample_event__doc[] = PyDoc_STR("perf sample event object.");
271 
272 static PyMemberDef pyrf_sample_event__members[] = {
273 	sample_members
274 	member_def(perf_event_header, type, T_UINT, "event type"),
275 	{ .name = NULL, },
276 };
277 
278 static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent)
279 {
280 	PyObject *ret;
281 	char *s;
282 
283 	if (asprintf(&s, "{ type: sample }") < 0) {
284 		ret = PyErr_NoMemory();
285 	} else {
286 		ret = PyUnicode_FromString(s);
287 		free(s);
288 	}
289 	return ret;
290 }
291 
292 #ifdef HAVE_LIBTRACEEVENT
293 static bool is_tracepoint(struct pyrf_event *pevent)
294 {
295 	return pevent->evsel->core.attr.type == PERF_TYPE_TRACEPOINT;
296 }
297 
298 static PyObject*
299 tracepoint_field(struct pyrf_event *pe, struct tep_format_field *field)
300 {
301 	struct tep_handle *pevent = field->event->tep;
302 	void *data = pe->sample.raw_data;
303 	PyObject *ret = NULL;
304 	unsigned long long val;
305 	unsigned int offset, len;
306 
307 	if (field->flags & TEP_FIELD_IS_ARRAY) {
308 		offset = field->offset;
309 		len    = field->size;
310 		if (field->flags & TEP_FIELD_IS_DYNAMIC) {
311 			val     = tep_read_number(pevent, data + offset, len);
312 			offset  = val;
313 			len     = offset >> 16;
314 			offset &= 0xffff;
315 			if (tep_field_is_relative(field->flags))
316 				offset += field->offset + field->size;
317 		}
318 		if (field->flags & TEP_FIELD_IS_STRING &&
319 		    is_printable_array(data + offset, len)) {
320 			ret = PyUnicode_FromString((char *)data + offset);
321 		} else {
322 			ret = PyByteArray_FromStringAndSize((const char *) data + offset, len);
323 			field->flags &= ~TEP_FIELD_IS_STRING;
324 		}
325 	} else {
326 		val = tep_read_number(pevent, data + field->offset,
327 				      field->size);
328 		if (field->flags & TEP_FIELD_IS_POINTER)
329 			ret = PyLong_FromUnsignedLong((unsigned long) val);
330 		else if (field->flags & TEP_FIELD_IS_SIGNED)
331 			ret = PyLong_FromLong((long) val);
332 		else
333 			ret = PyLong_FromUnsignedLong((unsigned long) val);
334 	}
335 
336 	return ret;
337 }
338 
339 static PyObject*
340 get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
341 {
342 	const char *str = _PyUnicode_AsString(PyObject_Str(attr_name));
343 	struct evsel *evsel = pevent->evsel;
344 	struct tep_format_field *field;
345 
346 	if (!evsel->tp_format) {
347 		struct tep_event *tp_format;
348 
349 		tp_format = trace_event__tp_format_id(evsel->core.attr.config);
350 		if (IS_ERR_OR_NULL(tp_format))
351 			return NULL;
352 
353 		evsel->tp_format = tp_format;
354 	}
355 
356 	field = tep_find_any_field(evsel->tp_format, str);
357 	if (!field)
358 		return NULL;
359 
360 	return tracepoint_field(pevent, field);
361 }
362 #endif /* HAVE_LIBTRACEEVENT */
363 
364 static PyObject*
365 pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name)
366 {
367 	PyObject *obj = NULL;
368 
369 #ifdef HAVE_LIBTRACEEVENT
370 	if (is_tracepoint(pevent))
371 		obj = get_tracepoint_field(pevent, attr_name);
372 #endif
373 
374 	return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name);
375 }
376 
377 static PyTypeObject pyrf_sample_event__type = {
378 	PyVarObject_HEAD_INIT(NULL, 0)
379 	.tp_name	= "perf.sample_event",
380 	.tp_basicsize	= sizeof(struct pyrf_event),
381 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
382 	.tp_doc		= pyrf_sample_event__doc,
383 	.tp_members	= pyrf_sample_event__members,
384 	.tp_repr	= (reprfunc)pyrf_sample_event__repr,
385 	.tp_getattro	= (getattrofunc) pyrf_sample_event__getattro,
386 };
387 
388 static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
389 
390 static PyMemberDef pyrf_context_switch_event__members[] = {
391 	sample_members
392 	member_def(perf_event_header, type, T_UINT, "event type"),
393 	member_def(perf_record_switch, next_prev_pid, T_UINT, "next/prev pid"),
394 	member_def(perf_record_switch, next_prev_tid, T_UINT, "next/prev tid"),
395 	{ .name = NULL, },
396 };
397 
398 static PyObject *pyrf_context_switch_event__repr(struct pyrf_event *pevent)
399 {
400 	PyObject *ret;
401 	char *s;
402 
403 	if (asprintf(&s, "{ type: context_switch, next_prev_pid: %u, next_prev_tid: %u, switch_out: %u }",
404 		     pevent->event.context_switch.next_prev_pid,
405 		     pevent->event.context_switch.next_prev_tid,
406 		     !!(pevent->event.header.misc & PERF_RECORD_MISC_SWITCH_OUT)) < 0) {
407 		ret = PyErr_NoMemory();
408 	} else {
409 		ret = PyUnicode_FromString(s);
410 		free(s);
411 	}
412 	return ret;
413 }
414 
415 static PyTypeObject pyrf_context_switch_event__type = {
416 	PyVarObject_HEAD_INIT(NULL, 0)
417 	.tp_name	= "perf.context_switch_event",
418 	.tp_basicsize	= sizeof(struct pyrf_event),
419 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
420 	.tp_doc		= pyrf_context_switch_event__doc,
421 	.tp_members	= pyrf_context_switch_event__members,
422 	.tp_repr	= (reprfunc)pyrf_context_switch_event__repr,
423 };
424 
425 static int pyrf_event__setup_types(void)
426 {
427 	int err;
428 	pyrf_mmap_event__type.tp_new =
429 	pyrf_task_event__type.tp_new =
430 	pyrf_comm_event__type.tp_new =
431 	pyrf_lost_event__type.tp_new =
432 	pyrf_read_event__type.tp_new =
433 	pyrf_sample_event__type.tp_new =
434 	pyrf_context_switch_event__type.tp_new =
435 	pyrf_throttle_event__type.tp_new = PyType_GenericNew;
436 	err = PyType_Ready(&pyrf_mmap_event__type);
437 	if (err < 0)
438 		goto out;
439 	err = PyType_Ready(&pyrf_lost_event__type);
440 	if (err < 0)
441 		goto out;
442 	err = PyType_Ready(&pyrf_task_event__type);
443 	if (err < 0)
444 		goto out;
445 	err = PyType_Ready(&pyrf_comm_event__type);
446 	if (err < 0)
447 		goto out;
448 	err = PyType_Ready(&pyrf_throttle_event__type);
449 	if (err < 0)
450 		goto out;
451 	err = PyType_Ready(&pyrf_read_event__type);
452 	if (err < 0)
453 		goto out;
454 	err = PyType_Ready(&pyrf_sample_event__type);
455 	if (err < 0)
456 		goto out;
457 	err = PyType_Ready(&pyrf_context_switch_event__type);
458 	if (err < 0)
459 		goto out;
460 out:
461 	return err;
462 }
463 
464 static PyTypeObject *pyrf_event__type[] = {
465 	[PERF_RECORD_MMAP]	 = &pyrf_mmap_event__type,
466 	[PERF_RECORD_LOST]	 = &pyrf_lost_event__type,
467 	[PERF_RECORD_COMM]	 = &pyrf_comm_event__type,
468 	[PERF_RECORD_EXIT]	 = &pyrf_task_event__type,
469 	[PERF_RECORD_THROTTLE]	 = &pyrf_throttle_event__type,
470 	[PERF_RECORD_UNTHROTTLE] = &pyrf_throttle_event__type,
471 	[PERF_RECORD_FORK]	 = &pyrf_task_event__type,
472 	[PERF_RECORD_READ]	 = &pyrf_read_event__type,
473 	[PERF_RECORD_SAMPLE]	 = &pyrf_sample_event__type,
474 	[PERF_RECORD_SWITCH]	 = &pyrf_context_switch_event__type,
475 	[PERF_RECORD_SWITCH_CPU_WIDE]  = &pyrf_context_switch_event__type,
476 };
477 
478 static PyObject *pyrf_event__new(union perf_event *event)
479 {
480 	struct pyrf_event *pevent;
481 	PyTypeObject *ptype;
482 
483 	if ((event->header.type < PERF_RECORD_MMAP ||
484 	     event->header.type > PERF_RECORD_SAMPLE) &&
485 	    !(event->header.type == PERF_RECORD_SWITCH ||
486 	      event->header.type == PERF_RECORD_SWITCH_CPU_WIDE))
487 		return NULL;
488 
489 	ptype = pyrf_event__type[event->header.type];
490 	pevent = PyObject_New(struct pyrf_event, ptype);
491 	if (pevent != NULL)
492 		memcpy(&pevent->event, event, event->header.size);
493 	return (PyObject *)pevent;
494 }
495 
496 struct pyrf_cpu_map {
497 	PyObject_HEAD
498 
499 	struct perf_cpu_map *cpus;
500 };
501 
502 static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
503 			      PyObject *args, PyObject *kwargs)
504 {
505 	static char *kwlist[] = { "cpustr", NULL };
506 	char *cpustr = NULL;
507 
508 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
509 					 kwlist, &cpustr))
510 		return -1;
511 
512 	pcpus->cpus = perf_cpu_map__new(cpustr);
513 	if (pcpus->cpus == NULL)
514 		return -1;
515 	return 0;
516 }
517 
518 static void pyrf_cpu_map__delete(struct pyrf_cpu_map *pcpus)
519 {
520 	perf_cpu_map__put(pcpus->cpus);
521 	Py_TYPE(pcpus)->tp_free((PyObject*)pcpus);
522 }
523 
524 static Py_ssize_t pyrf_cpu_map__length(PyObject *obj)
525 {
526 	struct pyrf_cpu_map *pcpus = (void *)obj;
527 
528 	return perf_cpu_map__nr(pcpus->cpus);
529 }
530 
531 static PyObject *pyrf_cpu_map__item(PyObject *obj, Py_ssize_t i)
532 {
533 	struct pyrf_cpu_map *pcpus = (void *)obj;
534 
535 	if (i >= perf_cpu_map__nr(pcpus->cpus))
536 		return NULL;
537 
538 	return Py_BuildValue("i", perf_cpu_map__cpu(pcpus->cpus, i).cpu);
539 }
540 
541 static PySequenceMethods pyrf_cpu_map__sequence_methods = {
542 	.sq_length = pyrf_cpu_map__length,
543 	.sq_item   = pyrf_cpu_map__item,
544 };
545 
546 static char pyrf_cpu_map__doc[] = PyDoc_STR("cpu map object.");
547 
548 static PyTypeObject pyrf_cpu_map__type = {
549 	PyVarObject_HEAD_INIT(NULL, 0)
550 	.tp_name	= "perf.cpu_map",
551 	.tp_basicsize	= sizeof(struct pyrf_cpu_map),
552 	.tp_dealloc	= (destructor)pyrf_cpu_map__delete,
553 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
554 	.tp_doc		= pyrf_cpu_map__doc,
555 	.tp_as_sequence	= &pyrf_cpu_map__sequence_methods,
556 	.tp_init	= (initproc)pyrf_cpu_map__init,
557 };
558 
559 static int pyrf_cpu_map__setup_types(void)
560 {
561 	pyrf_cpu_map__type.tp_new = PyType_GenericNew;
562 	return PyType_Ready(&pyrf_cpu_map__type);
563 }
564 
565 struct pyrf_thread_map {
566 	PyObject_HEAD
567 
568 	struct perf_thread_map *threads;
569 };
570 
571 static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
572 				 PyObject *args, PyObject *kwargs)
573 {
574 	static char *kwlist[] = { "pid", "tid", "uid", NULL };
575 	int pid = -1, tid = -1, uid = UINT_MAX;
576 
577 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii",
578 					 kwlist, &pid, &tid, &uid))
579 		return -1;
580 
581 	pthreads->threads = thread_map__new(pid, tid, uid);
582 	if (pthreads->threads == NULL)
583 		return -1;
584 	return 0;
585 }
586 
587 static void pyrf_thread_map__delete(struct pyrf_thread_map *pthreads)
588 {
589 	perf_thread_map__put(pthreads->threads);
590 	Py_TYPE(pthreads)->tp_free((PyObject*)pthreads);
591 }
592 
593 static Py_ssize_t pyrf_thread_map__length(PyObject *obj)
594 {
595 	struct pyrf_thread_map *pthreads = (void *)obj;
596 
597 	return perf_thread_map__nr(pthreads->threads);
598 }
599 
600 static PyObject *pyrf_thread_map__item(PyObject *obj, Py_ssize_t i)
601 {
602 	struct pyrf_thread_map *pthreads = (void *)obj;
603 
604 	if (i >= perf_thread_map__nr(pthreads->threads))
605 		return NULL;
606 
607 	return Py_BuildValue("i", perf_thread_map__pid(pthreads->threads, i));
608 }
609 
610 static PySequenceMethods pyrf_thread_map__sequence_methods = {
611 	.sq_length = pyrf_thread_map__length,
612 	.sq_item   = pyrf_thread_map__item,
613 };
614 
615 static char pyrf_thread_map__doc[] = PyDoc_STR("thread map object.");
616 
617 static PyTypeObject pyrf_thread_map__type = {
618 	PyVarObject_HEAD_INIT(NULL, 0)
619 	.tp_name	= "perf.thread_map",
620 	.tp_basicsize	= sizeof(struct pyrf_thread_map),
621 	.tp_dealloc	= (destructor)pyrf_thread_map__delete,
622 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
623 	.tp_doc		= pyrf_thread_map__doc,
624 	.tp_as_sequence	= &pyrf_thread_map__sequence_methods,
625 	.tp_init	= (initproc)pyrf_thread_map__init,
626 };
627 
628 static int pyrf_thread_map__setup_types(void)
629 {
630 	pyrf_thread_map__type.tp_new = PyType_GenericNew;
631 	return PyType_Ready(&pyrf_thread_map__type);
632 }
633 
634 struct pyrf_evsel {
635 	PyObject_HEAD
636 
637 	struct evsel evsel;
638 };
639 
640 static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
641 			    PyObject *args, PyObject *kwargs)
642 {
643 	struct perf_event_attr attr = {
644 		.type = PERF_TYPE_HARDWARE,
645 		.config = PERF_COUNT_HW_CPU_CYCLES,
646 		.sample_type = PERF_SAMPLE_PERIOD | PERF_SAMPLE_TID,
647 	};
648 	static char *kwlist[] = {
649 		"type",
650 		"config",
651 		"sample_freq",
652 		"sample_period",
653 		"sample_type",
654 		"read_format",
655 		"disabled",
656 		"inherit",
657 		"pinned",
658 		"exclusive",
659 		"exclude_user",
660 		"exclude_kernel",
661 		"exclude_hv",
662 		"exclude_idle",
663 		"mmap",
664 		"context_switch",
665 		"comm",
666 		"freq",
667 		"inherit_stat",
668 		"enable_on_exec",
669 		"task",
670 		"watermark",
671 		"precise_ip",
672 		"mmap_data",
673 		"sample_id_all",
674 		"wakeup_events",
675 		"bp_type",
676 		"bp_addr",
677 		"bp_len",
678 		 NULL
679 	};
680 	u64 sample_period = 0;
681 	u32 disabled = 0,
682 	    inherit = 0,
683 	    pinned = 0,
684 	    exclusive = 0,
685 	    exclude_user = 0,
686 	    exclude_kernel = 0,
687 	    exclude_hv = 0,
688 	    exclude_idle = 0,
689 	    mmap = 0,
690 	    context_switch = 0,
691 	    comm = 0,
692 	    freq = 1,
693 	    inherit_stat = 0,
694 	    enable_on_exec = 0,
695 	    task = 0,
696 	    watermark = 0,
697 	    precise_ip = 0,
698 	    mmap_data = 0,
699 	    sample_id_all = 1;
700 	int idx = 0;
701 
702 	if (!PyArg_ParseTupleAndKeywords(args, kwargs,
703 					 "|iKiKKiiiiiiiiiiiiiiiiiiiiiiKK", kwlist,
704 					 &attr.type, &attr.config, &attr.sample_freq,
705 					 &sample_period, &attr.sample_type,
706 					 &attr.read_format, &disabled, &inherit,
707 					 &pinned, &exclusive, &exclude_user,
708 					 &exclude_kernel, &exclude_hv, &exclude_idle,
709 					 &mmap, &context_switch, &comm, &freq, &inherit_stat,
710 					 &enable_on_exec, &task, &watermark,
711 					 &precise_ip, &mmap_data, &sample_id_all,
712 					 &attr.wakeup_events, &attr.bp_type,
713 					 &attr.bp_addr, &attr.bp_len, &idx))
714 		return -1;
715 
716 	/* union... */
717 	if (sample_period != 0) {
718 		if (attr.sample_freq != 0)
719 			return -1; /* FIXME: throw right exception */
720 		attr.sample_period = sample_period;
721 	}
722 
723 	/* Bitfields */
724 	attr.disabled	    = disabled;
725 	attr.inherit	    = inherit;
726 	attr.pinned	    = pinned;
727 	attr.exclusive	    = exclusive;
728 	attr.exclude_user   = exclude_user;
729 	attr.exclude_kernel = exclude_kernel;
730 	attr.exclude_hv	    = exclude_hv;
731 	attr.exclude_idle   = exclude_idle;
732 	attr.mmap	    = mmap;
733 	attr.context_switch = context_switch;
734 	attr.comm	    = comm;
735 	attr.freq	    = freq;
736 	attr.inherit_stat   = inherit_stat;
737 	attr.enable_on_exec = enable_on_exec;
738 	attr.task	    = task;
739 	attr.watermark	    = watermark;
740 	attr.precise_ip	    = precise_ip;
741 	attr.mmap_data	    = mmap_data;
742 	attr.sample_id_all  = sample_id_all;
743 	attr.size	    = sizeof(attr);
744 
745 	evsel__init(&pevsel->evsel, &attr, idx);
746 	return 0;
747 }
748 
749 static void pyrf_evsel__delete(struct pyrf_evsel *pevsel)
750 {
751 	evsel__exit(&pevsel->evsel);
752 	Py_TYPE(pevsel)->tp_free((PyObject*)pevsel);
753 }
754 
755 static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
756 				  PyObject *args, PyObject *kwargs)
757 {
758 	struct evsel *evsel = &pevsel->evsel;
759 	struct perf_cpu_map *cpus = NULL;
760 	struct perf_thread_map *threads = NULL;
761 	PyObject *pcpus = NULL, *pthreads = NULL;
762 	int group = 0, inherit = 0;
763 	static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL };
764 
765 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
766 					 &pcpus, &pthreads, &group, &inherit))
767 		return NULL;
768 
769 	if (pthreads != NULL)
770 		threads = ((struct pyrf_thread_map *)pthreads)->threads;
771 
772 	if (pcpus != NULL)
773 		cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
774 
775 	evsel->core.attr.inherit = inherit;
776 	/*
777 	 * This will group just the fds for this single evsel, to group
778 	 * multiple events, use evlist.open().
779 	 */
780 	if (evsel__open(evsel, cpus, threads) < 0) {
781 		PyErr_SetFromErrno(PyExc_OSError);
782 		return NULL;
783 	}
784 
785 	Py_INCREF(Py_None);
786 	return Py_None;
787 }
788 
789 static PyMethodDef pyrf_evsel__methods[] = {
790 	{
791 		.ml_name  = "open",
792 		.ml_meth  = (PyCFunction)pyrf_evsel__open,
793 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
794 		.ml_doc	  = PyDoc_STR("open the event selector file descriptor table.")
795 	},
796 	{ .ml_name = NULL, }
797 };
798 
799 static char pyrf_evsel__doc[] = PyDoc_STR("perf event selector list object.");
800 
801 static PyTypeObject pyrf_evsel__type = {
802 	PyVarObject_HEAD_INIT(NULL, 0)
803 	.tp_name	= "perf.evsel",
804 	.tp_basicsize	= sizeof(struct pyrf_evsel),
805 	.tp_dealloc	= (destructor)pyrf_evsel__delete,
806 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
807 	.tp_doc		= pyrf_evsel__doc,
808 	.tp_methods	= pyrf_evsel__methods,
809 	.tp_init	= (initproc)pyrf_evsel__init,
810 };
811 
812 static int pyrf_evsel__setup_types(void)
813 {
814 	pyrf_evsel__type.tp_new = PyType_GenericNew;
815 	return PyType_Ready(&pyrf_evsel__type);
816 }
817 
818 struct pyrf_evlist {
819 	PyObject_HEAD
820 
821 	struct evlist evlist;
822 };
823 
824 static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
825 			     PyObject *args, PyObject *kwargs __maybe_unused)
826 {
827 	PyObject *pcpus = NULL, *pthreads = NULL;
828 	struct perf_cpu_map *cpus;
829 	struct perf_thread_map *threads;
830 
831 	if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
832 		return -1;
833 
834 	threads = ((struct pyrf_thread_map *)pthreads)->threads;
835 	cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
836 	evlist__init(&pevlist->evlist, cpus, threads);
837 	return 0;
838 }
839 
840 static void pyrf_evlist__delete(struct pyrf_evlist *pevlist)
841 {
842 	evlist__exit(&pevlist->evlist);
843 	Py_TYPE(pevlist)->tp_free((PyObject*)pevlist);
844 }
845 
846 static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
847 				   PyObject *args, PyObject *kwargs)
848 {
849 	struct evlist *evlist = &pevlist->evlist;
850 	static char *kwlist[] = { "pages", "overwrite", NULL };
851 	int pages = 128, overwrite = false;
852 
853 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
854 					 &pages, &overwrite))
855 		return NULL;
856 
857 	if (evlist__mmap(evlist, pages) < 0) {
858 		PyErr_SetFromErrno(PyExc_OSError);
859 		return NULL;
860 	}
861 
862 	Py_INCREF(Py_None);
863 	return Py_None;
864 }
865 
866 static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
867 				   PyObject *args, PyObject *kwargs)
868 {
869 	struct evlist *evlist = &pevlist->evlist;
870 	static char *kwlist[] = { "timeout", NULL };
871 	int timeout = -1, n;
872 
873 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
874 		return NULL;
875 
876 	n = evlist__poll(evlist, timeout);
877 	if (n < 0) {
878 		PyErr_SetFromErrno(PyExc_OSError);
879 		return NULL;
880 	}
881 
882 	return Py_BuildValue("i", n);
883 }
884 
885 static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
886 					 PyObject *args __maybe_unused,
887 					 PyObject *kwargs __maybe_unused)
888 {
889 	struct evlist *evlist = &pevlist->evlist;
890         PyObject *list = PyList_New(0);
891 	int i;
892 
893 	for (i = 0; i < evlist->core.pollfd.nr; ++i) {
894 		PyObject *file;
895 		file = PyFile_FromFd(evlist->core.pollfd.entries[i].fd, "perf", "r", -1,
896 				     NULL, NULL, NULL, 0);
897 		if (file == NULL)
898 			goto free_list;
899 
900 		if (PyList_Append(list, file) != 0) {
901 			Py_DECREF(file);
902 			goto free_list;
903 		}
904 
905 		Py_DECREF(file);
906 	}
907 
908 	return list;
909 free_list:
910 	return PyErr_NoMemory();
911 }
912 
913 
914 static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist,
915 				  PyObject *args,
916 				  PyObject *kwargs __maybe_unused)
917 {
918 	struct evlist *evlist = &pevlist->evlist;
919 	PyObject *pevsel;
920 	struct evsel *evsel;
921 
922 	if (!PyArg_ParseTuple(args, "O", &pevsel))
923 		return NULL;
924 
925 	Py_INCREF(pevsel);
926 	evsel = &((struct pyrf_evsel *)pevsel)->evsel;
927 	evsel->core.idx = evlist->core.nr_entries;
928 	evlist__add(evlist, evsel);
929 
930 	return Py_BuildValue("i", evlist->core.nr_entries);
931 }
932 
933 static struct mmap *get_md(struct evlist *evlist, int cpu)
934 {
935 	int i;
936 
937 	for (i = 0; i < evlist->core.nr_mmaps; i++) {
938 		struct mmap *md = &evlist->mmap[i];
939 
940 		if (md->core.cpu.cpu == cpu)
941 			return md;
942 	}
943 
944 	return NULL;
945 }
946 
947 static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
948 					  PyObject *args, PyObject *kwargs)
949 {
950 	struct evlist *evlist = &pevlist->evlist;
951 	union perf_event *event;
952 	int sample_id_all = 1, cpu;
953 	static char *kwlist[] = { "cpu", "sample_id_all", NULL };
954 	struct mmap *md;
955 	int err;
956 
957 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
958 					 &cpu, &sample_id_all))
959 		return NULL;
960 
961 	md = get_md(evlist, cpu);
962 	if (!md)
963 		return NULL;
964 
965 	if (perf_mmap__read_init(&md->core) < 0)
966 		goto end;
967 
968 	event = perf_mmap__read_event(&md->core);
969 	if (event != NULL) {
970 		PyObject *pyevent = pyrf_event__new(event);
971 		struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
972 		struct evsel *evsel;
973 
974 		if (pyevent == NULL)
975 			return PyErr_NoMemory();
976 
977 		evsel = evlist__event2evsel(evlist, event);
978 		if (!evsel) {
979 			Py_INCREF(Py_None);
980 			return Py_None;
981 		}
982 
983 		pevent->evsel = evsel;
984 
985 		err = evsel__parse_sample(evsel, event, &pevent->sample);
986 
987 		/* Consume the even only after we parsed it out. */
988 		perf_mmap__consume(&md->core);
989 
990 		if (err)
991 			return PyErr_Format(PyExc_OSError,
992 					    "perf: can't parse sample, err=%d", err);
993 		return pyevent;
994 	}
995 end:
996 	Py_INCREF(Py_None);
997 	return Py_None;
998 }
999 
1000 static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
1001 				   PyObject *args, PyObject *kwargs)
1002 {
1003 	struct evlist *evlist = &pevlist->evlist;
1004 
1005 	if (evlist__open(evlist) < 0) {
1006 		PyErr_SetFromErrno(PyExc_OSError);
1007 		return NULL;
1008 	}
1009 
1010 	Py_INCREF(Py_None);
1011 	return Py_None;
1012 }
1013 
1014 static PyMethodDef pyrf_evlist__methods[] = {
1015 	{
1016 		.ml_name  = "mmap",
1017 		.ml_meth  = (PyCFunction)pyrf_evlist__mmap,
1018 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1019 		.ml_doc	  = PyDoc_STR("mmap the file descriptor table.")
1020 	},
1021 	{
1022 		.ml_name  = "open",
1023 		.ml_meth  = (PyCFunction)pyrf_evlist__open,
1024 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1025 		.ml_doc	  = PyDoc_STR("open the file descriptors.")
1026 	},
1027 	{
1028 		.ml_name  = "poll",
1029 		.ml_meth  = (PyCFunction)pyrf_evlist__poll,
1030 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1031 		.ml_doc	  = PyDoc_STR("poll the file descriptor table.")
1032 	},
1033 	{
1034 		.ml_name  = "get_pollfd",
1035 		.ml_meth  = (PyCFunction)pyrf_evlist__get_pollfd,
1036 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1037 		.ml_doc	  = PyDoc_STR("get the poll file descriptor table.")
1038 	},
1039 	{
1040 		.ml_name  = "add",
1041 		.ml_meth  = (PyCFunction)pyrf_evlist__add,
1042 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1043 		.ml_doc	  = PyDoc_STR("adds an event selector to the list.")
1044 	},
1045 	{
1046 		.ml_name  = "read_on_cpu",
1047 		.ml_meth  = (PyCFunction)pyrf_evlist__read_on_cpu,
1048 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1049 		.ml_doc	  = PyDoc_STR("reads an event.")
1050 	},
1051 	{ .ml_name = NULL, }
1052 };
1053 
1054 static Py_ssize_t pyrf_evlist__length(PyObject *obj)
1055 {
1056 	struct pyrf_evlist *pevlist = (void *)obj;
1057 
1058 	return pevlist->evlist.core.nr_entries;
1059 }
1060 
1061 static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
1062 {
1063 	struct pyrf_evlist *pevlist = (void *)obj;
1064 	struct evsel *pos;
1065 
1066 	if (i >= pevlist->evlist.core.nr_entries)
1067 		return NULL;
1068 
1069 	evlist__for_each_entry(&pevlist->evlist, pos) {
1070 		if (i-- == 0)
1071 			break;
1072 	}
1073 
1074 	return Py_BuildValue("O", container_of(pos, struct pyrf_evsel, evsel));
1075 }
1076 
1077 static PySequenceMethods pyrf_evlist__sequence_methods = {
1078 	.sq_length = pyrf_evlist__length,
1079 	.sq_item   = pyrf_evlist__item,
1080 };
1081 
1082 static char pyrf_evlist__doc[] = PyDoc_STR("perf event selector list object.");
1083 
1084 static PyTypeObject pyrf_evlist__type = {
1085 	PyVarObject_HEAD_INIT(NULL, 0)
1086 	.tp_name	= "perf.evlist",
1087 	.tp_basicsize	= sizeof(struct pyrf_evlist),
1088 	.tp_dealloc	= (destructor)pyrf_evlist__delete,
1089 	.tp_flags	= Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
1090 	.tp_as_sequence	= &pyrf_evlist__sequence_methods,
1091 	.tp_doc		= pyrf_evlist__doc,
1092 	.tp_methods	= pyrf_evlist__methods,
1093 	.tp_init	= (initproc)pyrf_evlist__init,
1094 };
1095 
1096 static int pyrf_evlist__setup_types(void)
1097 {
1098 	pyrf_evlist__type.tp_new = PyType_GenericNew;
1099 	return PyType_Ready(&pyrf_evlist__type);
1100 }
1101 
1102 #define PERF_CONST(name) { #name, PERF_##name }
1103 
1104 static struct {
1105 	const char *name;
1106 	int	    value;
1107 } perf__constants[] = {
1108 	PERF_CONST(TYPE_HARDWARE),
1109 	PERF_CONST(TYPE_SOFTWARE),
1110 	PERF_CONST(TYPE_TRACEPOINT),
1111 	PERF_CONST(TYPE_HW_CACHE),
1112 	PERF_CONST(TYPE_RAW),
1113 	PERF_CONST(TYPE_BREAKPOINT),
1114 
1115 	PERF_CONST(COUNT_HW_CPU_CYCLES),
1116 	PERF_CONST(COUNT_HW_INSTRUCTIONS),
1117 	PERF_CONST(COUNT_HW_CACHE_REFERENCES),
1118 	PERF_CONST(COUNT_HW_CACHE_MISSES),
1119 	PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS),
1120 	PERF_CONST(COUNT_HW_BRANCH_MISSES),
1121 	PERF_CONST(COUNT_HW_BUS_CYCLES),
1122 	PERF_CONST(COUNT_HW_CACHE_L1D),
1123 	PERF_CONST(COUNT_HW_CACHE_L1I),
1124 	PERF_CONST(COUNT_HW_CACHE_LL),
1125 	PERF_CONST(COUNT_HW_CACHE_DTLB),
1126 	PERF_CONST(COUNT_HW_CACHE_ITLB),
1127 	PERF_CONST(COUNT_HW_CACHE_BPU),
1128 	PERF_CONST(COUNT_HW_CACHE_OP_READ),
1129 	PERF_CONST(COUNT_HW_CACHE_OP_WRITE),
1130 	PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH),
1131 	PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS),
1132 	PERF_CONST(COUNT_HW_CACHE_RESULT_MISS),
1133 
1134 	PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND),
1135 	PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND),
1136 
1137 	PERF_CONST(COUNT_SW_CPU_CLOCK),
1138 	PERF_CONST(COUNT_SW_TASK_CLOCK),
1139 	PERF_CONST(COUNT_SW_PAGE_FAULTS),
1140 	PERF_CONST(COUNT_SW_CONTEXT_SWITCHES),
1141 	PERF_CONST(COUNT_SW_CPU_MIGRATIONS),
1142 	PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN),
1143 	PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ),
1144 	PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS),
1145 	PERF_CONST(COUNT_SW_EMULATION_FAULTS),
1146 	PERF_CONST(COUNT_SW_DUMMY),
1147 
1148 	PERF_CONST(SAMPLE_IP),
1149 	PERF_CONST(SAMPLE_TID),
1150 	PERF_CONST(SAMPLE_TIME),
1151 	PERF_CONST(SAMPLE_ADDR),
1152 	PERF_CONST(SAMPLE_READ),
1153 	PERF_CONST(SAMPLE_CALLCHAIN),
1154 	PERF_CONST(SAMPLE_ID),
1155 	PERF_CONST(SAMPLE_CPU),
1156 	PERF_CONST(SAMPLE_PERIOD),
1157 	PERF_CONST(SAMPLE_STREAM_ID),
1158 	PERF_CONST(SAMPLE_RAW),
1159 
1160 	PERF_CONST(FORMAT_TOTAL_TIME_ENABLED),
1161 	PERF_CONST(FORMAT_TOTAL_TIME_RUNNING),
1162 	PERF_CONST(FORMAT_ID),
1163 	PERF_CONST(FORMAT_GROUP),
1164 
1165 	PERF_CONST(RECORD_MMAP),
1166 	PERF_CONST(RECORD_LOST),
1167 	PERF_CONST(RECORD_COMM),
1168 	PERF_CONST(RECORD_EXIT),
1169 	PERF_CONST(RECORD_THROTTLE),
1170 	PERF_CONST(RECORD_UNTHROTTLE),
1171 	PERF_CONST(RECORD_FORK),
1172 	PERF_CONST(RECORD_READ),
1173 	PERF_CONST(RECORD_SAMPLE),
1174 	PERF_CONST(RECORD_MMAP2),
1175 	PERF_CONST(RECORD_AUX),
1176 	PERF_CONST(RECORD_ITRACE_START),
1177 	PERF_CONST(RECORD_LOST_SAMPLES),
1178 	PERF_CONST(RECORD_SWITCH),
1179 	PERF_CONST(RECORD_SWITCH_CPU_WIDE),
1180 
1181 	PERF_CONST(RECORD_MISC_SWITCH_OUT),
1182 	{ .name = NULL, },
1183 };
1184 
1185 static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
1186 				  PyObject *args, PyObject *kwargs)
1187 {
1188 #ifndef HAVE_LIBTRACEEVENT
1189 	return NULL;
1190 #else
1191 	struct tep_event *tp_format;
1192 	static char *kwlist[] = { "sys", "name", NULL };
1193 	char *sys  = NULL;
1194 	char *name = NULL;
1195 
1196 	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist,
1197 					 &sys, &name))
1198 		return NULL;
1199 
1200 	tp_format = trace_event__tp_format(sys, name);
1201 	if (IS_ERR(tp_format))
1202 		return PyLong_FromLong(-1);
1203 
1204 	return PyLong_FromLong(tp_format->id);
1205 #endif // HAVE_LIBTRACEEVENT
1206 }
1207 
1208 static PyMethodDef perf__methods[] = {
1209 	{
1210 		.ml_name  = "tracepoint",
1211 		.ml_meth  = (PyCFunction) pyrf__tracepoint,
1212 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
1213 		.ml_doc	  = PyDoc_STR("Get tracepoint config.")
1214 	},
1215 	{ .ml_name = NULL, }
1216 };
1217 
1218 PyMODINIT_FUNC PyInit_perf(void)
1219 {
1220 	PyObject *obj;
1221 	int i;
1222 	PyObject *dict;
1223 	static struct PyModuleDef moduledef = {
1224 		PyModuleDef_HEAD_INIT,
1225 		"perf",			/* m_name */
1226 		"",			/* m_doc */
1227 		-1,			/* m_size */
1228 		perf__methods,		/* m_methods */
1229 		NULL,			/* m_reload */
1230 		NULL,			/* m_traverse */
1231 		NULL,			/* m_clear */
1232 		NULL,			/* m_free */
1233 	};
1234 	PyObject *module = PyModule_Create(&moduledef);
1235 
1236 	if (module == NULL ||
1237 	    pyrf_event__setup_types() < 0 ||
1238 	    pyrf_evlist__setup_types() < 0 ||
1239 	    pyrf_evsel__setup_types() < 0 ||
1240 	    pyrf_thread_map__setup_types() < 0 ||
1241 	    pyrf_cpu_map__setup_types() < 0)
1242 		return module;
1243 
1244 	/* The page_size is placed in util object. */
1245 	page_size = sysconf(_SC_PAGE_SIZE);
1246 
1247 	Py_INCREF(&pyrf_evlist__type);
1248 	PyModule_AddObject(module, "evlist", (PyObject*)&pyrf_evlist__type);
1249 
1250 	Py_INCREF(&pyrf_evsel__type);
1251 	PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
1252 
1253 	Py_INCREF(&pyrf_mmap_event__type);
1254 	PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type);
1255 
1256 	Py_INCREF(&pyrf_lost_event__type);
1257 	PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type);
1258 
1259 	Py_INCREF(&pyrf_comm_event__type);
1260 	PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type);
1261 
1262 	Py_INCREF(&pyrf_task_event__type);
1263 	PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1264 
1265 	Py_INCREF(&pyrf_throttle_event__type);
1266 	PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type);
1267 
1268 	Py_INCREF(&pyrf_task_event__type);
1269 	PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
1270 
1271 	Py_INCREF(&pyrf_read_event__type);
1272 	PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type);
1273 
1274 	Py_INCREF(&pyrf_sample_event__type);
1275 	PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type);
1276 
1277 	Py_INCREF(&pyrf_context_switch_event__type);
1278 	PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type);
1279 
1280 	Py_INCREF(&pyrf_thread_map__type);
1281 	PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
1282 
1283 	Py_INCREF(&pyrf_cpu_map__type);
1284 	PyModule_AddObject(module, "cpu_map", (PyObject*)&pyrf_cpu_map__type);
1285 
1286 	dict = PyModule_GetDict(module);
1287 	if (dict == NULL)
1288 		goto error;
1289 
1290 	for (i = 0; perf__constants[i].name != NULL; i++) {
1291 		obj = PyLong_FromLong(perf__constants[i].value);
1292 		if (obj == NULL)
1293 			goto error;
1294 		PyDict_SetItemString(dict, perf__constants[i].name, obj);
1295 		Py_DECREF(obj);
1296 	}
1297 
1298 error:
1299 	if (PyErr_Occurred())
1300 		PyErr_SetString(PyExc_ImportError, "perf: Init failed!");
1301 	return module;
1302 }
1303 
1304 
1305 /* The following are stubs to avoid dragging in builtin-* objects. */
1306 /* TODO: move the code out of the builtin-* file into util. */
1307 
1308 unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
1309 
1310 #ifdef HAVE_KVM_STAT_SUPPORT
1311 bool kvm_entry_event(struct evsel *evsel __maybe_unused)
1312 {
1313 	return false;
1314 }
1315 
1316 bool kvm_exit_event(struct evsel *evsel __maybe_unused)
1317 {
1318 	return false;
1319 }
1320 
1321 bool exit_event_begin(struct evsel *evsel __maybe_unused,
1322 		      struct perf_sample *sample  __maybe_unused,
1323 		      struct event_key *key  __maybe_unused)
1324 {
1325 	return false;
1326 }
1327 
1328 bool exit_event_end(struct evsel *evsel __maybe_unused,
1329 		    struct perf_sample *sample __maybe_unused,
1330 		    struct event_key *key __maybe_unused)
1331 {
1332 	return false;
1333 }
1334 
1335 void exit_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
1336 			   struct event_key *key __maybe_unused,
1337 			   char *decode __maybe_unused)
1338 {
1339 }
1340 #endif // HAVE_KVM_STAT_SUPPORT
1341 
1342 int find_scripts(char **scripts_array  __maybe_unused, char **scripts_path_array  __maybe_unused,
1343 		int num  __maybe_unused, int pathlen __maybe_unused)
1344 {
1345 	return -1;
1346 }
1347 
1348 void perf_stat__set_no_csv_summary(int set __maybe_unused)
1349 {
1350 }
1351 
1352 void perf_stat__set_big_num(int set __maybe_unused)
1353 {
1354 }
1355 
1356 int script_spec_register(const char *spec __maybe_unused, struct scripting_ops *ops __maybe_unused)
1357 {
1358 	return -1;
1359 }
1360 
1361 arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch __maybe_unused)
1362 {
1363 	return NULL;
1364 }
1365 
1366 struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork __maybe_unused,
1367 				       struct kwork_class *class __maybe_unused,
1368 				       struct kwork_work *key  __maybe_unused)
1369 {
1370 	return NULL;
1371 }
1372 
1373 void script_fetch_insn(struct perf_sample *sample __maybe_unused,
1374 		struct thread *thread __maybe_unused,
1375 		struct machine *machine __maybe_unused)
1376 {
1377 }
1378 
1379 int perf_sample__sprintf_flags(u32 flags __maybe_unused, char *str __maybe_unused,
1380 			size_t sz __maybe_unused)
1381 {
1382 	return -1;
1383 }
1384 
1385 bool match_callstack_filter(struct machine *machine __maybe_unused, u64 *callstack __maybe_unused)
1386 {
1387 	return false;
1388 }
1389 
1390 struct lock_stat *lock_stat_find(u64 addr __maybe_unused)
1391 {
1392 	return NULL;
1393 }
1394 
1395 struct lock_stat *lock_stat_findnew(u64 addr __maybe_unused, const char *name __maybe_unused,
1396 				int flags __maybe_unused)
1397 {
1398 	return NULL;
1399 }
1400 
1401 int cmd_inject(int argc __maybe_unused, const char *argv[] __maybe_unused)
1402 {
1403 	return -1;
1404 }
1405