1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2d30ea906Sjfb8856606 * Copyright(c) 2017 Cavium, Inc
32bfe3f2eSlogwang */
42bfe3f2eSlogwang
52bfe3f2eSlogwang #ifndef _TEST_ORDER_COMMON_
62bfe3f2eSlogwang #define _TEST_ORDER_COMMON_
72bfe3f2eSlogwang
82bfe3f2eSlogwang #include <stdio.h>
92bfe3f2eSlogwang #include <stdbool.h>
102bfe3f2eSlogwang
112bfe3f2eSlogwang #include <rte_cycles.h>
122bfe3f2eSlogwang #include <rte_eventdev.h>
132bfe3f2eSlogwang #include <rte_lcore.h>
142bfe3f2eSlogwang #include <rte_malloc.h>
152bfe3f2eSlogwang #include <rte_mbuf.h>
16*2d9fd380Sjfb8856606 #include <rte_mbuf_dyn.h>
172bfe3f2eSlogwang
182bfe3f2eSlogwang #include "evt_common.h"
192bfe3f2eSlogwang #include "evt_options.h"
202bfe3f2eSlogwang #include "evt_test.h"
212bfe3f2eSlogwang
222bfe3f2eSlogwang #define BURST_SIZE 16
232bfe3f2eSlogwang
24*2d9fd380Sjfb8856606 typedef uint32_t flow_id_t;
25*2d9fd380Sjfb8856606 typedef uint32_t seqn_t;
26*2d9fd380Sjfb8856606
272bfe3f2eSlogwang struct test_order;
282bfe3f2eSlogwang
292bfe3f2eSlogwang struct worker_data {
302bfe3f2eSlogwang uint8_t dev_id;
312bfe3f2eSlogwang uint8_t port_id;
322bfe3f2eSlogwang struct test_order *t;
332bfe3f2eSlogwang };
342bfe3f2eSlogwang
352bfe3f2eSlogwang struct prod_data {
362bfe3f2eSlogwang uint8_t dev_id;
372bfe3f2eSlogwang uint8_t port_id;
382bfe3f2eSlogwang uint8_t queue_id;
392bfe3f2eSlogwang struct test_order *t;
402bfe3f2eSlogwang };
412bfe3f2eSlogwang
422bfe3f2eSlogwang struct test_order {
432bfe3f2eSlogwang /* Don't change the offset of "err". Signal handler use this memory
442bfe3f2eSlogwang * to terminate all lcores work.
452bfe3f2eSlogwang */
462bfe3f2eSlogwang int err;
472bfe3f2eSlogwang /*
482bfe3f2eSlogwang * The atomic_* is an expensive operation,Since it is a functional test,
492bfe3f2eSlogwang * We are using the atomic_ operation to reduce the code complexity.
502bfe3f2eSlogwang */
512bfe3f2eSlogwang rte_atomic64_t outstand_pkts;
522bfe3f2eSlogwang enum evt_test_result result;
532bfe3f2eSlogwang uint32_t nb_flows;
542bfe3f2eSlogwang uint64_t nb_pkts;
552bfe3f2eSlogwang struct rte_mempool *pool;
56*2d9fd380Sjfb8856606 int flow_id_dynfield_offset;
57*2d9fd380Sjfb8856606 int seqn_dynfield_offset;
582bfe3f2eSlogwang struct prod_data prod;
592bfe3f2eSlogwang struct worker_data worker[EVT_MAX_PORTS];
602bfe3f2eSlogwang uint32_t *producer_flow_seq;
612bfe3f2eSlogwang uint32_t *expected_flow_seq;
622bfe3f2eSlogwang struct evt_options *opt;
632bfe3f2eSlogwang } __rte_cache_aligned;
642bfe3f2eSlogwang
65*2d9fd380Sjfb8856606 static inline void
order_flow_id_copy_from_mbuf(struct test_order * t,struct rte_event * event)66*2d9fd380Sjfb8856606 order_flow_id_copy_from_mbuf(struct test_order *t, struct rte_event *event)
67*2d9fd380Sjfb8856606 {
68*2d9fd380Sjfb8856606 event->flow_id = *RTE_MBUF_DYNFIELD(event->mbuf,
69*2d9fd380Sjfb8856606 t->flow_id_dynfield_offset, flow_id_t *);
70*2d9fd380Sjfb8856606 }
71*2d9fd380Sjfb8856606
72*2d9fd380Sjfb8856606 static inline void
order_flow_id_save(struct test_order * t,flow_id_t flow_id,struct rte_mbuf * mbuf,struct rte_event * event)73*2d9fd380Sjfb8856606 order_flow_id_save(struct test_order *t, flow_id_t flow_id,
74*2d9fd380Sjfb8856606 struct rte_mbuf *mbuf, struct rte_event *event)
75*2d9fd380Sjfb8856606 {
76*2d9fd380Sjfb8856606 *RTE_MBUF_DYNFIELD(mbuf,
77*2d9fd380Sjfb8856606 t->flow_id_dynfield_offset, flow_id_t *) = flow_id;
78*2d9fd380Sjfb8856606 event->flow_id = flow_id;
79*2d9fd380Sjfb8856606 event->mbuf = mbuf;
80*2d9fd380Sjfb8856606 }
81*2d9fd380Sjfb8856606
82*2d9fd380Sjfb8856606 static inline seqn_t *
order_mbuf_seqn(struct test_order * t,struct rte_mbuf * mbuf)83*2d9fd380Sjfb8856606 order_mbuf_seqn(struct test_order *t, struct rte_mbuf *mbuf)
84*2d9fd380Sjfb8856606 {
85*2d9fd380Sjfb8856606 return RTE_MBUF_DYNFIELD(mbuf, t->seqn_dynfield_offset, seqn_t *);
86*2d9fd380Sjfb8856606 }
87*2d9fd380Sjfb8856606
882bfe3f2eSlogwang static inline int
order_nb_event_ports(struct evt_options * opt)892bfe3f2eSlogwang order_nb_event_ports(struct evt_options *opt)
902bfe3f2eSlogwang {
912bfe3f2eSlogwang return evt_nr_active_lcores(opt->wlcores) + 1 /* producer */;
922bfe3f2eSlogwang }
932bfe3f2eSlogwang
94*2d9fd380Sjfb8856606 static __rte_always_inline void
order_process_stage_1(struct test_order * const t,struct rte_event * const ev,const uint32_t nb_flows,uint32_t * const expected_flow_seq,rte_atomic64_t * const outstand_pkts)952bfe3f2eSlogwang order_process_stage_1(struct test_order *const t,
962bfe3f2eSlogwang struct rte_event *const ev, const uint32_t nb_flows,
972bfe3f2eSlogwang uint32_t *const expected_flow_seq,
982bfe3f2eSlogwang rte_atomic64_t *const outstand_pkts)
992bfe3f2eSlogwang {
1002bfe3f2eSlogwang const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
1012bfe3f2eSlogwang /* compare the seqn against expected value */
102*2d9fd380Sjfb8856606 if (*order_mbuf_seqn(t, ev->mbuf) != expected_flow_seq[flow]) {
1032bfe3f2eSlogwang evt_err("flow=%x seqn mismatch got=%x expected=%x",
104*2d9fd380Sjfb8856606 flow, *order_mbuf_seqn(t, ev->mbuf),
105*2d9fd380Sjfb8856606 expected_flow_seq[flow]);
1062bfe3f2eSlogwang t->err = true;
1072bfe3f2eSlogwang rte_smp_wmb();
1082bfe3f2eSlogwang }
1092bfe3f2eSlogwang /*
1102bfe3f2eSlogwang * Events from an atomic flow of an event queue can be scheduled only to
1112bfe3f2eSlogwang * a single port at a time. The port is guaranteed to have exclusive
1122bfe3f2eSlogwang * (atomic) access for given atomic flow.So we don't need to update
1132bfe3f2eSlogwang * expected_flow_seq in critical section.
1142bfe3f2eSlogwang */
1152bfe3f2eSlogwang expected_flow_seq[flow]++;
1162bfe3f2eSlogwang rte_pktmbuf_free(ev->mbuf);
1172bfe3f2eSlogwang rte_atomic64_sub(outstand_pkts, 1);
1182bfe3f2eSlogwang }
1192bfe3f2eSlogwang
120*2d9fd380Sjfb8856606 static __rte_always_inline void
order_process_stage_invalid(struct test_order * const t,struct rte_event * const ev)1212bfe3f2eSlogwang order_process_stage_invalid(struct test_order *const t,
1222bfe3f2eSlogwang struct rte_event *const ev)
1232bfe3f2eSlogwang {
1242bfe3f2eSlogwang evt_err("invalid queue %d", ev->queue_id);
1252bfe3f2eSlogwang t->err = true;
1262bfe3f2eSlogwang rte_smp_wmb();
1272bfe3f2eSlogwang }
1282bfe3f2eSlogwang
1292bfe3f2eSlogwang #define ORDER_WORKER_INIT\
1302bfe3f2eSlogwang struct worker_data *w = arg;\
1312bfe3f2eSlogwang struct test_order *t = w->t;\
1322bfe3f2eSlogwang struct evt_options *opt = t->opt;\
1332bfe3f2eSlogwang const uint8_t dev_id = w->dev_id;\
1342bfe3f2eSlogwang const uint8_t port = w->port_id;\
1352bfe3f2eSlogwang const uint32_t nb_flows = t->nb_flows;\
1362bfe3f2eSlogwang uint32_t *expected_flow_seq = t->expected_flow_seq;\
1372bfe3f2eSlogwang rte_atomic64_t *outstand_pkts = &t->outstand_pkts;\
1382bfe3f2eSlogwang if (opt->verbose_level > 1)\
1392bfe3f2eSlogwang printf("%s(): lcore %d dev_id %d port=%d\n",\
1402bfe3f2eSlogwang __func__, rte_lcore_id(), dev_id, port)
1412bfe3f2eSlogwang
1422bfe3f2eSlogwang int order_test_result(struct evt_test *test, struct evt_options *opt);
1432bfe3f2eSlogwang int order_opt_check(struct evt_options *opt);
1442bfe3f2eSlogwang int order_test_setup(struct evt_test *test, struct evt_options *opt);
1452bfe3f2eSlogwang int order_mempool_setup(struct evt_test *test, struct evt_options *opt);
1462bfe3f2eSlogwang int order_launch_lcores(struct evt_test *test, struct evt_options *opt,
1472bfe3f2eSlogwang int (*worker)(void *));
1482bfe3f2eSlogwang int order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
1492bfe3f2eSlogwang uint8_t nb_workers, uint8_t nb_queues);
1502bfe3f2eSlogwang void order_test_destroy(struct evt_test *test, struct evt_options *opt);
1512bfe3f2eSlogwang void order_opt_dump(struct evt_options *opt);
1522bfe3f2eSlogwang void order_mempool_destroy(struct evt_test *test, struct evt_options *opt);
1532bfe3f2eSlogwang void order_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
1542bfe3f2eSlogwang
1552bfe3f2eSlogwang #endif /* _TEST_ORDER_COMMON_ */
156