1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
3 */
4
5 #include <stdio.h>
6 #include <inttypes.h>
7 #include <string.h>
8
9 #include <rte_service.h>
10 #include <rte_service_component.h>
11
12 #include <rte_lcore.h>
13 #include <rte_common.h>
14 #include <rte_cycles.h>
15 #include <rte_atomic.h>
16 #include <rte_malloc.h>
17 #include <rte_spinlock.h>
18
19 #include "eal_private.h"
20
21 #define RTE_SERVICE_NUM_MAX 64
22
23 #define SERVICE_F_REGISTERED (1 << 0)
24 #define SERVICE_F_STATS_ENABLED (1 << 1)
25 #define SERVICE_F_START_CHECK (1 << 2)
26
27 /* runstates for services and lcores, denoting if they are active or not */
28 #define RUNSTATE_STOPPED 0
29 #define RUNSTATE_RUNNING 1
30
31 /* internal representation of a service */
32 struct rte_service_spec_impl {
33 /* public part of the struct */
34 struct rte_service_spec spec;
35
36 /* spin lock that when set indicates a service core is currently
37 * running this service callback. When not set, a core may take the
38 * lock and then run the service callback.
39 */
40 rte_spinlock_t execute_lock;
41
42 /* API set/get-able variables */
43 int8_t app_runstate;
44 int8_t comp_runstate;
45 uint8_t internal_flags;
46
47 /* per service statistics */
48 /* Indicates how many cores the service is mapped to run on.
49 * It does not indicate the number of cores the service is running
50 * on currently.
51 */
52 uint32_t num_mapped_cores;
53 uint64_t calls;
54 uint64_t cycles_spent;
55 } __rte_cache_aligned;
56
57 /* the internal values of a service core */
58 struct core_state {
59 /* map of services IDs are run on this core */
60 uint64_t service_mask;
61 uint8_t runstate; /* running or stopped */
62 uint8_t thread_active; /* indicates when thread is in service_run() */
63 uint8_t is_service_core; /* set if core is currently a service core */
64 uint8_t service_active_on_lcore[RTE_SERVICE_NUM_MAX];
65 uint64_t loops;
66 uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
67 } __rte_cache_aligned;
68
69 static uint32_t rte_service_count;
70 static struct rte_service_spec_impl *rte_services;
71 static struct core_state *lcore_states;
72 static uint32_t rte_service_library_initialized;
73
74 int32_t
rte_service_init(void)75 rte_service_init(void)
76 {
77 if (rte_service_library_initialized) {
78 RTE_LOG(NOTICE, EAL,
79 "service library init() called, init flag %d\n",
80 rte_service_library_initialized);
81 return -EALREADY;
82 }
83
84 rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
85 sizeof(struct rte_service_spec_impl),
86 RTE_CACHE_LINE_SIZE);
87 if (!rte_services) {
88 RTE_LOG(ERR, EAL, "error allocating rte services array\n");
89 goto fail_mem;
90 }
91
92 lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
93 sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
94 if (!lcore_states) {
95 RTE_LOG(ERR, EAL, "error allocating core states array\n");
96 goto fail_mem;
97 }
98
99 int i;
100 int count = 0;
101 struct rte_config *cfg = rte_eal_get_configuration();
102 for (i = 0; i < RTE_MAX_LCORE; i++) {
103 if (lcore_config[i].core_role == ROLE_SERVICE) {
104 if ((unsigned int)i == cfg->main_lcore)
105 continue;
106 rte_service_lcore_add(i);
107 count++;
108 }
109 }
110
111 rte_service_library_initialized = 1;
112 return 0;
113 fail_mem:
114 rte_free(rte_services);
115 rte_free(lcore_states);
116 return -ENOMEM;
117 }
118
119 void
rte_service_finalize(void)120 rte_service_finalize(void)
121 {
122 if (!rte_service_library_initialized)
123 return;
124
125 rte_service_lcore_reset_all();
126 rte_eal_mp_wait_lcore();
127
128 rte_free(rte_services);
129 rte_free(lcore_states);
130
131 rte_service_library_initialized = 0;
132 }
133
134 /* returns 1 if service is registered and has not been unregistered
135 * Returns 0 if service never registered, or has been unregistered
136 */
137 static inline int
service_valid(uint32_t id)138 service_valid(uint32_t id)
139 {
140 return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
141 }
142
143 static struct rte_service_spec_impl *
service_get(uint32_t id)144 service_get(uint32_t id)
145 {
146 return &rte_services[id];
147 }
148
149 /* validate ID and retrieve service pointer, or return error value */
150 #define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do { \
151 if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id)) \
152 return retval; \
153 service = &rte_services[id]; \
154 } while (0)
155
156 /* returns 1 if statistics should be collected for service
157 * Returns 0 if statistics should not be collected for service
158 */
159 static inline int
service_stats_enabled(struct rte_service_spec_impl * impl)160 service_stats_enabled(struct rte_service_spec_impl *impl)
161 {
162 return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
163 }
164
165 static inline int
service_mt_safe(struct rte_service_spec_impl * s)166 service_mt_safe(struct rte_service_spec_impl *s)
167 {
168 return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
169 }
170
171 int32_t
rte_service_set_stats_enable(uint32_t id,int32_t enabled)172 rte_service_set_stats_enable(uint32_t id, int32_t enabled)
173 {
174 struct rte_service_spec_impl *s;
175 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
176
177 if (enabled)
178 s->internal_flags |= SERVICE_F_STATS_ENABLED;
179 else
180 s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
181
182 return 0;
183 }
184
185 int32_t
rte_service_set_runstate_mapped_check(uint32_t id,int32_t enabled)186 rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
187 {
188 struct rte_service_spec_impl *s;
189 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
190
191 if (enabled)
192 s->internal_flags |= SERVICE_F_START_CHECK;
193 else
194 s->internal_flags &= ~(SERVICE_F_START_CHECK);
195
196 return 0;
197 }
198
199 uint32_t
rte_service_get_count(void)200 rte_service_get_count(void)
201 {
202 return rte_service_count;
203 }
204
205 int32_t
rte_service_get_by_name(const char * name,uint32_t * service_id)206 rte_service_get_by_name(const char *name, uint32_t *service_id)
207 {
208 if (!service_id)
209 return -EINVAL;
210
211 int i;
212 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
213 if (service_valid(i) &&
214 strcmp(name, rte_services[i].spec.name) == 0) {
215 *service_id = i;
216 return 0;
217 }
218 }
219
220 return -ENODEV;
221 }
222
223 const char *
rte_service_get_name(uint32_t id)224 rte_service_get_name(uint32_t id)
225 {
226 struct rte_service_spec_impl *s;
227 SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
228 return s->spec.name;
229 }
230
231 int32_t
rte_service_probe_capability(uint32_t id,uint32_t capability)232 rte_service_probe_capability(uint32_t id, uint32_t capability)
233 {
234 struct rte_service_spec_impl *s;
235 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
236 return !!(s->spec.capabilities & capability);
237 }
238
239 int32_t
rte_service_component_register(const struct rte_service_spec * spec,uint32_t * id_ptr)240 rte_service_component_register(const struct rte_service_spec *spec,
241 uint32_t *id_ptr)
242 {
243 uint32_t i;
244 int32_t free_slot = -1;
245
246 if (spec->callback == NULL || strlen(spec->name) == 0)
247 return -EINVAL;
248
249 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
250 if (!service_valid(i)) {
251 free_slot = i;
252 break;
253 }
254 }
255
256 if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
257 return -ENOSPC;
258
259 struct rte_service_spec_impl *s = &rte_services[free_slot];
260 s->spec = *spec;
261 s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
262
263 rte_service_count++;
264
265 if (id_ptr)
266 *id_ptr = free_slot;
267
268 return 0;
269 }
270
271 int32_t
rte_service_component_unregister(uint32_t id)272 rte_service_component_unregister(uint32_t id)
273 {
274 uint32_t i;
275 struct rte_service_spec_impl *s;
276 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
277
278 rte_service_count--;
279
280 s->internal_flags &= ~(SERVICE_F_REGISTERED);
281
282 /* clear the run-bit in all cores */
283 for (i = 0; i < RTE_MAX_LCORE; i++)
284 lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
285
286 memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
287
288 return 0;
289 }
290
291 int32_t
rte_service_component_runstate_set(uint32_t id,uint32_t runstate)292 rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
293 {
294 struct rte_service_spec_impl *s;
295 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
296
297 /* comp_runstate act as the guard variable. Use store-release
298 * memory order. This synchronizes with load-acquire in
299 * service_run and service_runstate_get function.
300 */
301 if (runstate)
302 __atomic_store_n(&s->comp_runstate, RUNSTATE_RUNNING,
303 __ATOMIC_RELEASE);
304 else
305 __atomic_store_n(&s->comp_runstate, RUNSTATE_STOPPED,
306 __ATOMIC_RELEASE);
307
308 return 0;
309 }
310
311 int32_t
rte_service_runstate_set(uint32_t id,uint32_t runstate)312 rte_service_runstate_set(uint32_t id, uint32_t runstate)
313 {
314 struct rte_service_spec_impl *s;
315 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
316
317 /* app_runstate act as the guard variable. Use store-release
318 * memory order. This synchronizes with load-acquire in
319 * service_run runstate_get function.
320 */
321 if (runstate)
322 __atomic_store_n(&s->app_runstate, RUNSTATE_RUNNING,
323 __ATOMIC_RELEASE);
324 else
325 __atomic_store_n(&s->app_runstate, RUNSTATE_STOPPED,
326 __ATOMIC_RELEASE);
327
328 return 0;
329 }
330
331 int32_t
rte_service_runstate_get(uint32_t id)332 rte_service_runstate_get(uint32_t id)
333 {
334 struct rte_service_spec_impl *s;
335 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
336
337 /* comp_runstate and app_runstate act as the guard variables.
338 * Use load-acquire memory order. This synchronizes with
339 * store-release in service state set functions.
340 */
341 if (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) ==
342 RUNSTATE_RUNNING &&
343 __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) ==
344 RUNSTATE_RUNNING) {
345 int check_disabled = !(s->internal_flags &
346 SERVICE_F_START_CHECK);
347 int lcore_mapped = (__atomic_load_n(&s->num_mapped_cores,
348 __ATOMIC_RELAXED) > 0);
349
350 return (check_disabled | lcore_mapped);
351 } else
352 return 0;
353
354 }
355
356 static inline void
service_runner_do_callback(struct rte_service_spec_impl * s,struct core_state * cs,uint32_t service_idx)357 service_runner_do_callback(struct rte_service_spec_impl *s,
358 struct core_state *cs, uint32_t service_idx)
359 {
360 void *userdata = s->spec.callback_userdata;
361
362 if (service_stats_enabled(s)) {
363 uint64_t start = rte_rdtsc();
364 s->spec.callback(userdata);
365 uint64_t end = rte_rdtsc();
366 s->cycles_spent += end - start;
367 cs->calls_per_service[service_idx]++;
368 s->calls++;
369 } else
370 s->spec.callback(userdata);
371 }
372
373
374 /* Expects the service 's' is valid. */
375 static int32_t
service_run(uint32_t i,struct core_state * cs,uint64_t service_mask,struct rte_service_spec_impl * s,uint32_t serialize_mt_unsafe)376 service_run(uint32_t i, struct core_state *cs, uint64_t service_mask,
377 struct rte_service_spec_impl *s, uint32_t serialize_mt_unsafe)
378 {
379 if (!s)
380 return -EINVAL;
381
382 /* comp_runstate and app_runstate act as the guard variables.
383 * Use load-acquire memory order. This synchronizes with
384 * store-release in service state set functions.
385 */
386 if (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) !=
387 RUNSTATE_RUNNING ||
388 __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) !=
389 RUNSTATE_RUNNING ||
390 !(service_mask & (UINT64_C(1) << i))) {
391 cs->service_active_on_lcore[i] = 0;
392 return -ENOEXEC;
393 }
394
395 cs->service_active_on_lcore[i] = 1;
396
397 if ((service_mt_safe(s) == 0) && (serialize_mt_unsafe == 1)) {
398 if (!rte_spinlock_trylock(&s->execute_lock))
399 return -EBUSY;
400
401 service_runner_do_callback(s, cs, i);
402 rte_spinlock_unlock(&s->execute_lock);
403 } else
404 service_runner_do_callback(s, cs, i);
405
406 return 0;
407 }
408
409 int32_t
rte_service_may_be_active(uint32_t id)410 rte_service_may_be_active(uint32_t id)
411 {
412 uint32_t ids[RTE_MAX_LCORE] = {0};
413 int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
414 int i;
415
416 if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))
417 return -EINVAL;
418
419 for (i = 0; i < lcore_count; i++) {
420 if (lcore_states[ids[i]].service_active_on_lcore[id])
421 return 1;
422 }
423
424 return 0;
425 }
426
427 int32_t
rte_service_run_iter_on_app_lcore(uint32_t id,uint32_t serialize_mt_unsafe)428 rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe)
429 {
430 struct core_state *cs = &lcore_states[rte_lcore_id()];
431 struct rte_service_spec_impl *s;
432
433 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
434
435 /* Increment num_mapped_cores to reflect that this core is
436 * now mapped capable of running the service.
437 */
438 __atomic_add_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
439
440 int ret = service_run(id, cs, UINT64_MAX, s, serialize_mt_unsafe);
441
442 __atomic_sub_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
443
444 return ret;
445 }
446
447 static int32_t
service_runner_func(void * arg)448 service_runner_func(void *arg)
449 {
450 RTE_SET_USED(arg);
451 uint32_t i;
452 const int lcore = rte_lcore_id();
453 struct core_state *cs = &lcore_states[lcore];
454
455 __atomic_store_n(&cs->thread_active, 1, __ATOMIC_SEQ_CST);
456
457 /* runstate act as the guard variable. Use load-acquire
458 * memory order here to synchronize with store-release
459 * in runstate update functions.
460 */
461 while (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) ==
462 RUNSTATE_RUNNING) {
463 const uint64_t service_mask = cs->service_mask;
464
465 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
466 if (!service_valid(i))
467 continue;
468 /* return value ignored as no change to code flow */
469 service_run(i, cs, service_mask, service_get(i), 1);
470 }
471
472 cs->loops++;
473 }
474
475 /* Use SEQ CST memory ordering to avoid any re-ordering around
476 * this store, ensuring that once this store is visible, the service
477 * lcore thread really is done in service cores code.
478 */
479 __atomic_store_n(&cs->thread_active, 0, __ATOMIC_SEQ_CST);
480 return 0;
481 }
482
483 int32_t
rte_service_lcore_may_be_active(uint32_t lcore)484 rte_service_lcore_may_be_active(uint32_t lcore)
485 {
486 if (lcore >= RTE_MAX_LCORE || !lcore_states[lcore].is_service_core)
487 return -EINVAL;
488
489 /* Load thread_active using ACQUIRE to avoid instructions dependent on
490 * the result being re-ordered before this load completes.
491 */
492 return __atomic_load_n(&lcore_states[lcore].thread_active,
493 __ATOMIC_ACQUIRE);
494 }
495
496 int32_t
rte_service_lcore_count(void)497 rte_service_lcore_count(void)
498 {
499 int32_t count = 0;
500 uint32_t i;
501 for (i = 0; i < RTE_MAX_LCORE; i++)
502 count += lcore_states[i].is_service_core;
503 return count;
504 }
505
506 int32_t
rte_service_lcore_list(uint32_t array[],uint32_t n)507 rte_service_lcore_list(uint32_t array[], uint32_t n)
508 {
509 uint32_t count = rte_service_lcore_count();
510 if (count > n)
511 return -ENOMEM;
512
513 if (!array)
514 return -EINVAL;
515
516 uint32_t i;
517 uint32_t idx = 0;
518 for (i = 0; i < RTE_MAX_LCORE; i++) {
519 struct core_state *cs = &lcore_states[i];
520 if (cs->is_service_core) {
521 array[idx] = i;
522 idx++;
523 }
524 }
525
526 return count;
527 }
528
529 int32_t
rte_service_lcore_count_services(uint32_t lcore)530 rte_service_lcore_count_services(uint32_t lcore)
531 {
532 if (lcore >= RTE_MAX_LCORE)
533 return -EINVAL;
534
535 struct core_state *cs = &lcore_states[lcore];
536 if (!cs->is_service_core)
537 return -ENOTSUP;
538
539 return __builtin_popcountll(cs->service_mask);
540 }
541
542 int32_t
rte_service_start_with_defaults(void)543 rte_service_start_with_defaults(void)
544 {
545 /* create a default mapping from cores to services, then start the
546 * services to make them transparent to unaware applications.
547 */
548 uint32_t i;
549 int ret;
550 uint32_t count = rte_service_get_count();
551
552 int32_t lcore_iter = 0;
553 uint32_t ids[RTE_MAX_LCORE] = {0};
554 int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
555
556 if (lcore_count == 0)
557 return -ENOTSUP;
558
559 for (i = 0; (int)i < lcore_count; i++)
560 rte_service_lcore_start(ids[i]);
561
562 for (i = 0; i < count; i++) {
563 /* do 1:1 core mapping here, with each service getting
564 * assigned a single core by default. Adding multiple services
565 * should multiplex to a single core, or 1:1 if there are the
566 * same amount of services as service-cores
567 */
568 ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
569 if (ret)
570 return -ENODEV;
571
572 lcore_iter++;
573 if (lcore_iter >= lcore_count)
574 lcore_iter = 0;
575
576 ret = rte_service_runstate_set(i, 1);
577 if (ret)
578 return -ENOEXEC;
579 }
580
581 return 0;
582 }
583
584 static int32_t
service_update(uint32_t sid,uint32_t lcore,uint32_t * set,uint32_t * enabled)585 service_update(uint32_t sid, uint32_t lcore, uint32_t *set, uint32_t *enabled)
586 {
587 /* validate ID, or return error value */
588 if (sid >= RTE_SERVICE_NUM_MAX || !service_valid(sid) ||
589 lcore >= RTE_MAX_LCORE || !lcore_states[lcore].is_service_core)
590 return -EINVAL;
591
592 uint64_t sid_mask = UINT64_C(1) << sid;
593 if (set) {
594 uint64_t lcore_mapped = lcore_states[lcore].service_mask &
595 sid_mask;
596
597 if (*set && !lcore_mapped) {
598 lcore_states[lcore].service_mask |= sid_mask;
599 __atomic_add_fetch(&rte_services[sid].num_mapped_cores,
600 1, __ATOMIC_RELAXED);
601 }
602 if (!*set && lcore_mapped) {
603 lcore_states[lcore].service_mask &= ~(sid_mask);
604 __atomic_sub_fetch(&rte_services[sid].num_mapped_cores,
605 1, __ATOMIC_RELAXED);
606 }
607 }
608
609 if (enabled)
610 *enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
611
612 return 0;
613 }
614
615 int32_t
rte_service_map_lcore_set(uint32_t id,uint32_t lcore,uint32_t enabled)616 rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
617 {
618 uint32_t on = enabled > 0;
619 return service_update(id, lcore, &on, 0);
620 }
621
622 int32_t
rte_service_map_lcore_get(uint32_t id,uint32_t lcore)623 rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
624 {
625 uint32_t enabled;
626 int ret = service_update(id, lcore, 0, &enabled);
627 if (ret == 0)
628 return enabled;
629 return ret;
630 }
631
632 static void
set_lcore_state(uint32_t lcore,int32_t state)633 set_lcore_state(uint32_t lcore, int32_t state)
634 {
635 /* mark core state in hugepage backed config */
636 struct rte_config *cfg = rte_eal_get_configuration();
637 cfg->lcore_role[lcore] = state;
638
639 /* mark state in process local lcore_config */
640 lcore_config[lcore].core_role = state;
641
642 /* update per-lcore optimized state tracking */
643 lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
644 }
645
646 int32_t
rte_service_lcore_reset_all(void)647 rte_service_lcore_reset_all(void)
648 {
649 /* loop over cores, reset all to mask 0 */
650 uint32_t i;
651 for (i = 0; i < RTE_MAX_LCORE; i++) {
652 if (lcore_states[i].is_service_core) {
653 lcore_states[i].service_mask = 0;
654 set_lcore_state(i, ROLE_RTE);
655 /* runstate act as guard variable Use
656 * store-release memory order here to synchronize
657 * with load-acquire in runstate read functions.
658 */
659 __atomic_store_n(&lcore_states[i].runstate,
660 RUNSTATE_STOPPED, __ATOMIC_RELEASE);
661 }
662 }
663 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
664 __atomic_store_n(&rte_services[i].num_mapped_cores, 0,
665 __ATOMIC_RELAXED);
666
667 return 0;
668 }
669
670 int32_t
rte_service_lcore_add(uint32_t lcore)671 rte_service_lcore_add(uint32_t lcore)
672 {
673 if (lcore >= RTE_MAX_LCORE)
674 return -EINVAL;
675 if (lcore_states[lcore].is_service_core)
676 return -EALREADY;
677
678 set_lcore_state(lcore, ROLE_SERVICE);
679
680 /* ensure that after adding a core the mask and state are defaults */
681 lcore_states[lcore].service_mask = 0;
682 /* Use store-release memory order here to synchronize with
683 * load-acquire in runstate read functions.
684 */
685 __atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
686 __ATOMIC_RELEASE);
687
688 return rte_eal_wait_lcore(lcore);
689 }
690
691 int32_t
rte_service_lcore_del(uint32_t lcore)692 rte_service_lcore_del(uint32_t lcore)
693 {
694 if (lcore >= RTE_MAX_LCORE)
695 return -EINVAL;
696
697 struct core_state *cs = &lcore_states[lcore];
698 if (!cs->is_service_core)
699 return -EINVAL;
700
701 /* runstate act as the guard variable. Use load-acquire
702 * memory order here to synchronize with store-release
703 * in runstate update functions.
704 */
705 if (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) !=
706 RUNSTATE_STOPPED)
707 return -EBUSY;
708
709 set_lcore_state(lcore, ROLE_RTE);
710
711 rte_smp_wmb();
712 return 0;
713 }
714
715 int32_t
rte_service_lcore_start(uint32_t lcore)716 rte_service_lcore_start(uint32_t lcore)
717 {
718 if (lcore >= RTE_MAX_LCORE)
719 return -EINVAL;
720
721 struct core_state *cs = &lcore_states[lcore];
722 if (!cs->is_service_core)
723 return -EINVAL;
724
725 /* runstate act as the guard variable. Use load-acquire
726 * memory order here to synchronize with store-release
727 * in runstate update functions.
728 */
729 if (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) ==
730 RUNSTATE_RUNNING)
731 return -EALREADY;
732
733 /* set core to run state first, and then launch otherwise it will
734 * return immediately as runstate keeps it in the service poll loop
735 */
736 /* Use load-acquire memory order here to synchronize with
737 * store-release in runstate update functions.
738 */
739 __atomic_store_n(&cs->runstate, RUNSTATE_RUNNING, __ATOMIC_RELEASE);
740
741 int ret = rte_eal_remote_launch(service_runner_func, 0, lcore);
742 /* returns -EBUSY if the core is already launched, 0 on success */
743 return ret;
744 }
745
746 int32_t
rte_service_lcore_stop(uint32_t lcore)747 rte_service_lcore_stop(uint32_t lcore)
748 {
749 if (lcore >= RTE_MAX_LCORE)
750 return -EINVAL;
751
752 /* runstate act as the guard variable. Use load-acquire
753 * memory order here to synchronize with store-release
754 * in runstate update functions.
755 */
756 if (__atomic_load_n(&lcore_states[lcore].runstate, __ATOMIC_ACQUIRE) ==
757 RUNSTATE_STOPPED)
758 return -EALREADY;
759
760 uint32_t i;
761 uint64_t service_mask = lcore_states[lcore].service_mask;
762 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
763 int32_t enabled = service_mask & (UINT64_C(1) << i);
764 int32_t service_running = rte_service_runstate_get(i);
765 int32_t only_core = (1 ==
766 __atomic_load_n(&rte_services[i].num_mapped_cores,
767 __ATOMIC_RELAXED));
768
769 /* if the core is mapped, and the service is running, and this
770 * is the only core that is mapped, the service would cease to
771 * run if this core stopped, so fail instead.
772 */
773 if (enabled && service_running && only_core)
774 return -EBUSY;
775 }
776
777 /* Use store-release memory order here to synchronize with
778 * load-acquire in runstate read functions.
779 */
780 __atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
781 __ATOMIC_RELEASE);
782
783 return 0;
784 }
785
786 int32_t
rte_service_attr_get(uint32_t id,uint32_t attr_id,uint64_t * attr_value)787 rte_service_attr_get(uint32_t id, uint32_t attr_id, uint64_t *attr_value)
788 {
789 struct rte_service_spec_impl *s;
790 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
791
792 if (!attr_value)
793 return -EINVAL;
794
795 switch (attr_id) {
796 case RTE_SERVICE_ATTR_CYCLES:
797 *attr_value = s->cycles_spent;
798 return 0;
799 case RTE_SERVICE_ATTR_CALL_COUNT:
800 *attr_value = s->calls;
801 return 0;
802 default:
803 return -EINVAL;
804 }
805 }
806
807 int32_t
rte_service_lcore_attr_get(uint32_t lcore,uint32_t attr_id,uint64_t * attr_value)808 rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
809 uint64_t *attr_value)
810 {
811 struct core_state *cs;
812
813 if (lcore >= RTE_MAX_LCORE || !attr_value)
814 return -EINVAL;
815
816 cs = &lcore_states[lcore];
817 if (!cs->is_service_core)
818 return -ENOTSUP;
819
820 switch (attr_id) {
821 case RTE_SERVICE_LCORE_ATTR_LOOPS:
822 *attr_value = cs->loops;
823 return 0;
824 default:
825 return -EINVAL;
826 }
827 }
828
829 int32_t
rte_service_attr_reset_all(uint32_t id)830 rte_service_attr_reset_all(uint32_t id)
831 {
832 struct rte_service_spec_impl *s;
833 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
834
835 s->cycles_spent = 0;
836 s->calls = 0;
837 return 0;
838 }
839
840 int32_t
rte_service_lcore_attr_reset_all(uint32_t lcore)841 rte_service_lcore_attr_reset_all(uint32_t lcore)
842 {
843 struct core_state *cs;
844
845 if (lcore >= RTE_MAX_LCORE)
846 return -EINVAL;
847
848 cs = &lcore_states[lcore];
849 if (!cs->is_service_core)
850 return -ENOTSUP;
851
852 cs->loops = 0;
853
854 return 0;
855 }
856
857 static void
service_dump_one(FILE * f,struct rte_service_spec_impl * s)858 service_dump_one(FILE *f, struct rte_service_spec_impl *s)
859 {
860 /* avoid divide by zero */
861 int calls = 1;
862
863 if (s->calls != 0)
864 calls = s->calls;
865 fprintf(f, " %s: stats %d\tcalls %"PRIu64"\tcycles %"
866 PRIu64"\tavg: %"PRIu64"\n",
867 s->spec.name, service_stats_enabled(s), s->calls,
868 s->cycles_spent, s->cycles_spent / calls);
869 }
870
871 static void
service_dump_calls_per_lcore(FILE * f,uint32_t lcore)872 service_dump_calls_per_lcore(FILE *f, uint32_t lcore)
873 {
874 uint32_t i;
875 struct core_state *cs = &lcore_states[lcore];
876
877 fprintf(f, "%02d\t", lcore);
878 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
879 if (!service_valid(i))
880 continue;
881 fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
882 }
883 fprintf(f, "\n");
884 }
885
886 int32_t
rte_service_dump(FILE * f,uint32_t id)887 rte_service_dump(FILE *f, uint32_t id)
888 {
889 uint32_t i;
890 int print_one = (id != UINT32_MAX);
891
892 /* print only the specified service */
893 if (print_one) {
894 struct rte_service_spec_impl *s;
895 SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
896 fprintf(f, "Service %s Summary\n", s->spec.name);
897 service_dump_one(f, s);
898 return 0;
899 }
900
901 /* print all services, as UINT32_MAX was passed as id */
902 fprintf(f, "Services Summary\n");
903 for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
904 if (!service_valid(i))
905 continue;
906 service_dump_one(f, &rte_services[i]);
907 }
908
909 fprintf(f, "Service Cores Summary\n");
910 for (i = 0; i < RTE_MAX_LCORE; i++) {
911 if (lcore_config[i].core_role != ROLE_SERVICE)
912 continue;
913
914 service_dump_calls_per_lcore(f, i);
915 }
916
917 return 0;
918 }
919