1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2020 Marvell International Ltd.
3 */
4
5 #include <fnmatch.h>
6 #include <sys/queue.h>
7 #include <regex.h>
8
9 #include <rte_common.h>
10 #include <rte_errno.h>
11 #include <rte_lcore.h>
12 #include <rte_per_lcore.h>
13 #include <rte_string_fns.h>
14
15 #include "eal_trace.h"
16
17 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz);
18 RTE_DEFINE_PER_LCORE(void *, trace_mem);
19 static RTE_DEFINE_PER_LCORE(char *, ctf_field);
20
21 static struct trace_point_head tp_list = STAILQ_HEAD_INITIALIZER(tp_list);
22 static struct trace trace = { .args = STAILQ_HEAD_INITIALIZER(trace.args), };
23
24 struct trace *
trace_obj_get(void)25 trace_obj_get(void)
26 {
27 return &trace;
28 }
29
30 struct trace_point_head *
trace_list_head_get(void)31 trace_list_head_get(void)
32 {
33 return &tp_list;
34 }
35
36 int
eal_trace_init(void)37 eal_trace_init(void)
38 {
39 struct trace_arg *arg;
40
41 /* Trace memory should start with 8B aligned for natural alignment */
42 RTE_BUILD_BUG_ON((offsetof(struct __rte_trace_header, mem) % 8) != 0);
43
44 /* One of the trace point registration failed */
45 if (trace.register_errno) {
46 rte_errno = trace.register_errno;
47 goto fail;
48 }
49
50 if (!STAILQ_EMPTY(&trace.args))
51 trace.status = true;
52
53 if (!rte_trace_is_enabled())
54 return 0;
55
56 rte_spinlock_init(&trace.lock);
57
58 /* Is duplicate trace name registered */
59 if (trace_has_duplicate_entry())
60 goto fail;
61
62 /* Generate UUID ver 4 with total size of events and number of
63 * events
64 */
65 trace_uuid_generate();
66
67 /* Apply buffer size configuration for trace output */
68 trace_bufsz_args_apply();
69
70 /* Generate CTF TDSL metadata */
71 if (trace_metadata_create() < 0)
72 goto fail;
73
74 /* Create trace directory */
75 if (trace_mkdir())
76 goto free_meta;
77
78 /* Save current epoch timestamp for future use */
79 if (trace_epoch_time_save() < 0)
80 goto fail;
81
82 /* Apply global configurations */
83 STAILQ_FOREACH(arg, &trace.args, next)
84 trace_args_apply(arg->val);
85
86 rte_trace_mode_set(trace.mode);
87
88 return 0;
89
90 free_meta:
91 trace_metadata_destroy();
92 fail:
93 trace_err("failed to initialize trace [%s]", rte_strerror(rte_errno));
94 return -rte_errno;
95 }
96
97 void
eal_trace_fini(void)98 eal_trace_fini(void)
99 {
100 if (!rte_trace_is_enabled())
101 return;
102 trace_mem_free();
103 trace_metadata_destroy();
104 eal_trace_args_free();
105 }
106
107 bool
rte_trace_is_enabled(void)108 rte_trace_is_enabled(void)
109 {
110 return trace.status;
111 }
112
113 static void
trace_mode_set(rte_trace_point_t * trace,enum rte_trace_mode mode)114 trace_mode_set(rte_trace_point_t *trace, enum rte_trace_mode mode)
115 {
116 if (mode == RTE_TRACE_MODE_OVERWRITE)
117 __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,
118 __ATOMIC_RELEASE);
119 else
120 __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_DISCARD,
121 __ATOMIC_RELEASE);
122 }
123
124 void
rte_trace_mode_set(enum rte_trace_mode mode)125 rte_trace_mode_set(enum rte_trace_mode mode)
126 {
127 struct trace_point *tp;
128
129 if (!rte_trace_is_enabled())
130 return;
131
132 STAILQ_FOREACH(tp, &tp_list, next)
133 trace_mode_set(tp->handle, mode);
134
135 trace.mode = mode;
136 }
137
138 enum
rte_trace_mode_get(void)139 rte_trace_mode rte_trace_mode_get(void)
140 {
141 return trace.mode;
142 }
143
144 static bool
trace_point_is_invalid(rte_trace_point_t * t)145 trace_point_is_invalid(rte_trace_point_t *t)
146 {
147 return (t == NULL) || (trace_id_get(t) >= trace.nb_trace_points);
148 }
149
150 bool
rte_trace_point_is_enabled(rte_trace_point_t * trace)151 rte_trace_point_is_enabled(rte_trace_point_t *trace)
152 {
153 uint64_t val;
154
155 if (trace_point_is_invalid(trace))
156 return false;
157
158 val = __atomic_load_n(trace, __ATOMIC_ACQUIRE);
159 return (val & __RTE_TRACE_FIELD_ENABLE_MASK) != 0;
160 }
161
162 int
rte_trace_point_enable(rte_trace_point_t * trace)163 rte_trace_point_enable(rte_trace_point_t *trace)
164 {
165 if (trace_point_is_invalid(trace))
166 return -ERANGE;
167
168 __atomic_or_fetch(trace, __RTE_TRACE_FIELD_ENABLE_MASK,
169 __ATOMIC_RELEASE);
170 return 0;
171 }
172
173 int
rte_trace_point_disable(rte_trace_point_t * trace)174 rte_trace_point_disable(rte_trace_point_t *trace)
175 {
176 if (trace_point_is_invalid(trace))
177 return -ERANGE;
178
179 __atomic_and_fetch(trace, ~__RTE_TRACE_FIELD_ENABLE_MASK,
180 __ATOMIC_RELEASE);
181 return 0;
182 }
183
184 int
rte_trace_pattern(const char * pattern,bool enable)185 rte_trace_pattern(const char *pattern, bool enable)
186 {
187 struct trace_point *tp;
188 int rc = 0, found = 0;
189
190 STAILQ_FOREACH(tp, &tp_list, next) {
191 if (fnmatch(pattern, tp->name, 0) == 0) {
192 if (enable)
193 rc = rte_trace_point_enable(tp->handle);
194 else
195 rc = rte_trace_point_disable(tp->handle);
196 found = 1;
197 }
198 if (rc < 0)
199 return rc;
200 }
201
202 return rc | found;
203 }
204
205 int
rte_trace_regexp(const char * regex,bool enable)206 rte_trace_regexp(const char *regex, bool enable)
207 {
208 struct trace_point *tp;
209 int rc = 0, found = 0;
210 regex_t r;
211
212 if (regcomp(&r, regex, 0) != 0)
213 return -EINVAL;
214
215 STAILQ_FOREACH(tp, &tp_list, next) {
216 if (regexec(&r, tp->name, 0, NULL, 0) == 0) {
217 if (enable)
218 rc = rte_trace_point_enable(tp->handle);
219 else
220 rc = rte_trace_point_disable(tp->handle);
221 found = 1;
222 }
223 if (rc < 0)
224 return rc;
225 }
226 regfree(&r);
227
228 return rc | found;
229 }
230
231 rte_trace_point_t *
rte_trace_point_lookup(const char * name)232 rte_trace_point_lookup(const char *name)
233 {
234 struct trace_point *tp;
235
236 if (name == NULL)
237 return NULL;
238
239 STAILQ_FOREACH(tp, &tp_list, next)
240 if (strncmp(tp->name, name, TRACE_POINT_NAME_SIZE) == 0)
241 return tp->handle;
242
243 return NULL;
244 }
245
246 static void
trace_point_dump(FILE * f,struct trace_point * tp)247 trace_point_dump(FILE *f, struct trace_point *tp)
248 {
249 rte_trace_point_t *handle = tp->handle;
250
251 fprintf(f, "\tid %d, %s, size is %d, %s\n",
252 trace_id_get(handle), tp->name,
253 (uint16_t)(*handle & __RTE_TRACE_FIELD_SIZE_MASK),
254 rte_trace_point_is_enabled(handle) ? "enabled" : "disabled");
255 }
256
257 static void
trace_lcore_mem_dump(FILE * f)258 trace_lcore_mem_dump(FILE *f)
259 {
260 struct trace *trace = trace_obj_get();
261 struct __rte_trace_header *header;
262 uint32_t count;
263
264 if (trace->nb_trace_mem_list == 0)
265 return;
266
267 rte_spinlock_lock(&trace->lock);
268 fprintf(f, "nb_trace_mem_list = %d\n", trace->nb_trace_mem_list);
269 fprintf(f, "\nTrace mem info\n--------------\n");
270 for (count = 0; count < trace->nb_trace_mem_list; count++) {
271 header = trace->lcore_meta[count].mem;
272 fprintf(f, "\tid %d, mem=%p, area=%s, lcore_id=%d, name=%s\n",
273 count, header,
274 trace_area_to_string(trace->lcore_meta[count].area),
275 header->stream_header.lcore_id,
276 header->stream_header.thread_name);
277 }
278 rte_spinlock_unlock(&trace->lock);
279 }
280
281 void
rte_trace_dump(FILE * f)282 rte_trace_dump(FILE *f)
283 {
284 struct trace_point_head *tp_list = trace_list_head_get();
285 struct trace *trace = trace_obj_get();
286 struct trace_point *tp;
287
288 fprintf(f, "\nGlobal info\n-----------\n");
289 fprintf(f, "status = %s\n",
290 rte_trace_is_enabled() ? "enabled" : "disabled");
291 fprintf(f, "mode = %s\n",
292 trace_mode_to_string(rte_trace_mode_get()));
293 fprintf(f, "dir = %s\n", trace->dir);
294 fprintf(f, "buffer len = %d\n", trace->buff_len);
295 fprintf(f, "number of trace points = %d\n", trace->nb_trace_points);
296
297 trace_lcore_mem_dump(f);
298 fprintf(f, "\nTrace point info\n----------------\n");
299 STAILQ_FOREACH(tp, tp_list, next)
300 trace_point_dump(f, tp);
301 }
302
303 void
__rte_trace_mem_per_thread_alloc(void)304 __rte_trace_mem_per_thread_alloc(void)
305 {
306 struct trace *trace = trace_obj_get();
307 struct __rte_trace_header *header;
308 uint32_t count;
309
310 if (!rte_trace_is_enabled())
311 return;
312
313 if (RTE_PER_LCORE(trace_mem))
314 return;
315
316 rte_spinlock_lock(&trace->lock);
317
318 count = trace->nb_trace_mem_list;
319
320 /* Allocate room for storing the thread trace mem meta */
321 trace->lcore_meta = realloc(trace->lcore_meta,
322 sizeof(trace->lcore_meta[0]) * (count + 1));
323
324 /* Provide dummy space for fast path to consume */
325 if (trace->lcore_meta == NULL) {
326 trace_crit("trace mem meta memory realloc failed");
327 header = NULL;
328 goto fail;
329 }
330
331 /* First attempt from huge page */
332 header = eal_malloc_no_trace(NULL, trace_mem_sz(trace->buff_len), 8);
333 if (header) {
334 trace->lcore_meta[count].area = TRACE_AREA_HUGEPAGE;
335 goto found;
336 }
337
338 /* Second attempt from heap */
339 header = malloc(trace_mem_sz(trace->buff_len));
340 if (header == NULL) {
341 trace_crit("trace mem malloc attempt failed");
342 header = NULL;
343 goto fail;
344
345 }
346
347 /* Second attempt from heap is success */
348 trace->lcore_meta[count].area = TRACE_AREA_HEAP;
349
350 /* Initialize the trace header */
351 found:
352 header->offset = 0;
353 header->len = trace->buff_len;
354 header->stream_header.magic = TRACE_CTF_MAGIC;
355 rte_uuid_copy(header->stream_header.uuid, trace->uuid);
356 header->stream_header.lcore_id = rte_lcore_id();
357
358 /* Store the thread name */
359 char *name = header->stream_header.thread_name;
360 memset(name, 0, __RTE_TRACE_EMIT_STRING_LEN_MAX);
361 rte_thread_getname(pthread_self(), name,
362 __RTE_TRACE_EMIT_STRING_LEN_MAX);
363
364 trace->lcore_meta[count].mem = header;
365 trace->nb_trace_mem_list++;
366 fail:
367 RTE_PER_LCORE(trace_mem) = header;
368 rte_spinlock_unlock(&trace->lock);
369 }
370
371 static void
trace_mem_per_thread_free_unlocked(struct thread_mem_meta * meta)372 trace_mem_per_thread_free_unlocked(struct thread_mem_meta *meta)
373 {
374 if (meta->area == TRACE_AREA_HUGEPAGE)
375 eal_free_no_trace(meta->mem);
376 else if (meta->area == TRACE_AREA_HEAP)
377 free(meta->mem);
378 }
379
380 void
trace_mem_per_thread_free(void)381 trace_mem_per_thread_free(void)
382 {
383 struct trace *trace = trace_obj_get();
384 struct __rte_trace_header *header;
385 uint32_t count;
386
387 header = RTE_PER_LCORE(trace_mem);
388 if (header == NULL)
389 return;
390
391 rte_spinlock_lock(&trace->lock);
392 for (count = 0; count < trace->nb_trace_mem_list; count++) {
393 if (trace->lcore_meta[count].mem == header)
394 break;
395 }
396 if (count != trace->nb_trace_mem_list) {
397 struct thread_mem_meta *meta = &trace->lcore_meta[count];
398
399 trace_mem_per_thread_free_unlocked(meta);
400 if (count != trace->nb_trace_mem_list - 1) {
401 memmove(meta, meta + 1,
402 sizeof(*meta) *
403 (trace->nb_trace_mem_list - count - 1));
404 }
405 trace->nb_trace_mem_list--;
406 }
407 rte_spinlock_unlock(&trace->lock);
408 }
409
410 void
trace_mem_free(void)411 trace_mem_free(void)
412 {
413 struct trace *trace = trace_obj_get();
414 uint32_t count;
415
416 if (!rte_trace_is_enabled())
417 return;
418
419 rte_spinlock_lock(&trace->lock);
420 for (count = 0; count < trace->nb_trace_mem_list; count++) {
421 trace_mem_per_thread_free_unlocked(&trace->lcore_meta[count]);
422 }
423 trace->nb_trace_mem_list = 0;
424 rte_spinlock_unlock(&trace->lock);
425 }
426
427 void
__rte_trace_point_emit_field(size_t sz,const char * in,const char * datatype)428 __rte_trace_point_emit_field(size_t sz, const char *in, const char *datatype)
429 {
430 char *field;
431 char *fixup;
432 int rc;
433
434 fixup = trace_metadata_fixup_field(in);
435 if (fixup != NULL)
436 in = fixup;
437 rc = asprintf(&field, "%s %s %s;\n",
438 RTE_PER_LCORE(ctf_field) != NULL ?
439 RTE_PER_LCORE(ctf_field) : "",
440 datatype, in);
441 free(RTE_PER_LCORE(ctf_field));
442 free(fixup);
443 if (rc == -1) {
444 RTE_PER_LCORE(trace_point_sz) = 0;
445 RTE_PER_LCORE(ctf_field) = NULL;
446 trace_crit("could not allocate CTF field");
447 return;
448 }
449 RTE_PER_LCORE(trace_point_sz) += sz;
450 RTE_PER_LCORE(ctf_field) = field;
451 }
452
453 int
__rte_trace_point_register(rte_trace_point_t * handle,const char * name,void (* register_fn)(void))454 __rte_trace_point_register(rte_trace_point_t *handle, const char *name,
455 void (*register_fn)(void))
456 {
457 struct trace_point *tp;
458 uint16_t sz;
459
460 /* Sanity checks of arguments */
461 if (name == NULL || register_fn == NULL || handle == NULL) {
462 trace_err("invalid arguments");
463 rte_errno = EINVAL;
464 goto fail;
465 }
466
467 /* Check the size of the trace point object */
468 RTE_PER_LCORE(trace_point_sz) = 0;
469 register_fn();
470 if (RTE_PER_LCORE(trace_point_sz) == 0) {
471 trace_err("missing rte_trace_emit_header() in register fn");
472 rte_errno = EBADF;
473 goto fail;
474 }
475
476 /* Is size overflowed */
477 if (RTE_PER_LCORE(trace_point_sz) > UINT16_MAX) {
478 trace_err("trace point size overflowed");
479 rte_errno = ENOSPC;
480 goto fail;
481 }
482
483 /* Are we running out of space to store trace points? */
484 if (trace.nb_trace_points > UINT16_MAX) {
485 trace_err("trace point exceeds the max count");
486 rte_errno = ENOSPC;
487 goto fail;
488 }
489
490 /* Get the size of the trace point */
491 sz = RTE_PER_LCORE(trace_point_sz);
492 tp = calloc(1, sizeof(struct trace_point));
493 if (tp == NULL) {
494 trace_err("fail to allocate trace point memory");
495 rte_errno = ENOMEM;
496 goto fail;
497 }
498
499 /* Initialize the trace point */
500 if (rte_strscpy(tp->name, name, TRACE_POINT_NAME_SIZE) < 0) {
501 trace_err("name is too long");
502 goto free;
503 }
504
505 /* Copy the accumulated fields description and clear it for the next
506 * trace point.
507 */
508 tp->ctf_field = RTE_PER_LCORE(ctf_field);
509 RTE_PER_LCORE(ctf_field) = NULL;
510
511 /* Form the trace handle */
512 *handle = sz;
513 *handle |= trace.nb_trace_points << __RTE_TRACE_FIELD_ID_SHIFT;
514
515 trace.nb_trace_points++;
516 tp->handle = handle;
517
518 /* Add the trace point at tail */
519 STAILQ_INSERT_TAIL(&tp_list, tp, next);
520 __atomic_thread_fence(__ATOMIC_RELEASE);
521
522 /* All Good !!! */
523 return 0;
524 free:
525 free(tp);
526 fail:
527 if (trace.register_errno == 0)
528 trace.register_errno = rte_errno;
529
530 return -rte_errno;
531 }
532