1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2d30ea906Sjfb8856606  * Copyright(c) 2017 Intel Corporation
3d30ea906Sjfb8856606  */
4d30ea906Sjfb8856606 
5d30ea906Sjfb8856606 #include <stdio.h>
6d30ea906Sjfb8856606 #include <inttypes.h>
7d30ea906Sjfb8856606 #include <math.h>
8d30ea906Sjfb8856606 
9d30ea906Sjfb8856606 #include <rte_eal.h>
10d30ea906Sjfb8856606 #include <rte_common.h>
11d30ea906Sjfb8856606 #include <rte_dev.h>
12d30ea906Sjfb8856606 #include <rte_launch.h>
13d30ea906Sjfb8856606 #include <rte_bbdev.h>
14d30ea906Sjfb8856606 #include <rte_cycles.h>
15d30ea906Sjfb8856606 #include <rte_lcore.h>
16d30ea906Sjfb8856606 #include <rte_malloc.h>
17d30ea906Sjfb8856606 #include <rte_random.h>
18d30ea906Sjfb8856606 #include <rte_hexdump.h>
19d30ea906Sjfb8856606 
20d30ea906Sjfb8856606 #include "main.h"
21d30ea906Sjfb8856606 #include "test_bbdev_vector.h"
22d30ea906Sjfb8856606 
23d30ea906Sjfb8856606 #define GET_SOCKET(socket_id) (((socket_id) == SOCKET_ID_ANY) ? 0 : (socket_id))
24d30ea906Sjfb8856606 
25d30ea906Sjfb8856606 #define MAX_QUEUES RTE_MAX_LCORE
26d30ea906Sjfb8856606 
27d30ea906Sjfb8856606 #define OPS_CACHE_SIZE 256U
28d30ea906Sjfb8856606 #define OPS_POOL_SIZE_MIN 511U /* 0.5K per queue */
29d30ea906Sjfb8856606 
30d30ea906Sjfb8856606 #define SYNC_WAIT 0
31d30ea906Sjfb8856606 #define SYNC_START 1
32d30ea906Sjfb8856606 
33d30ea906Sjfb8856606 #define INVALID_QUEUE_ID -1
34d30ea906Sjfb8856606 
35d30ea906Sjfb8856606 static struct test_bbdev_vector test_vector;
36d30ea906Sjfb8856606 
37d30ea906Sjfb8856606 /* Switch between PMD and Interrupt for throughput TC */
38d30ea906Sjfb8856606 static bool intr_enabled;
39d30ea906Sjfb8856606 
40d30ea906Sjfb8856606 /* Represents tested active devices */
41d30ea906Sjfb8856606 static struct active_device {
42d30ea906Sjfb8856606 	const char *driver_name;
43d30ea906Sjfb8856606 	uint8_t dev_id;
44d30ea906Sjfb8856606 	uint16_t supported_ops;
45d30ea906Sjfb8856606 	uint16_t queue_ids[MAX_QUEUES];
46d30ea906Sjfb8856606 	uint16_t nb_queues;
47d30ea906Sjfb8856606 	struct rte_mempool *ops_mempool;
48d30ea906Sjfb8856606 	struct rte_mempool *in_mbuf_pool;
49d30ea906Sjfb8856606 	struct rte_mempool *hard_out_mbuf_pool;
50d30ea906Sjfb8856606 	struct rte_mempool *soft_out_mbuf_pool;
51d30ea906Sjfb8856606 } active_devs[RTE_BBDEV_MAX_DEVS];
52d30ea906Sjfb8856606 
53d30ea906Sjfb8856606 static uint8_t nb_active_devs;
54d30ea906Sjfb8856606 
55d30ea906Sjfb8856606 /* Data buffers used by BBDEV ops */
56d30ea906Sjfb8856606 struct test_buffers {
57d30ea906Sjfb8856606 	struct rte_bbdev_op_data *inputs;
58d30ea906Sjfb8856606 	struct rte_bbdev_op_data *hard_outputs;
59d30ea906Sjfb8856606 	struct rte_bbdev_op_data *soft_outputs;
60d30ea906Sjfb8856606 };
61d30ea906Sjfb8856606 
62d30ea906Sjfb8856606 /* Operation parameters specific for given test case */
63d30ea906Sjfb8856606 struct test_op_params {
64d30ea906Sjfb8856606 	struct rte_mempool *mp;
65d30ea906Sjfb8856606 	struct rte_bbdev_dec_op *ref_dec_op;
66d30ea906Sjfb8856606 	struct rte_bbdev_enc_op *ref_enc_op;
67d30ea906Sjfb8856606 	uint16_t burst_sz;
68d30ea906Sjfb8856606 	uint16_t num_to_process;
69d30ea906Sjfb8856606 	uint16_t num_lcores;
70d30ea906Sjfb8856606 	int vector_mask;
71d30ea906Sjfb8856606 	rte_atomic16_t sync;
72d30ea906Sjfb8856606 	struct test_buffers q_bufs[RTE_MAX_NUMA_NODES][MAX_QUEUES];
73d30ea906Sjfb8856606 };
74d30ea906Sjfb8856606 
75d30ea906Sjfb8856606 /* Contains per lcore params */
76d30ea906Sjfb8856606 struct thread_params {
77d30ea906Sjfb8856606 	uint8_t dev_id;
78d30ea906Sjfb8856606 	uint16_t queue_id;
79d30ea906Sjfb8856606 	uint64_t start_time;
80d30ea906Sjfb8856606 	double mops;
81d30ea906Sjfb8856606 	double mbps;
82d30ea906Sjfb8856606 	rte_atomic16_t nb_dequeued;
83d30ea906Sjfb8856606 	rte_atomic16_t processing_status;
84d30ea906Sjfb8856606 	struct test_op_params *op_params;
85d30ea906Sjfb8856606 };
86d30ea906Sjfb8856606 
87d30ea906Sjfb8856606 #ifdef RTE_BBDEV_OFFLOAD_COST
88d30ea906Sjfb8856606 /* Stores time statistics */
89d30ea906Sjfb8856606 struct test_time_stats {
90d30ea906Sjfb8856606 	/* Stores software enqueue total working time */
91d30ea906Sjfb8856606 	uint64_t enq_sw_tot_time;
92d30ea906Sjfb8856606 	/* Stores minimum value of software enqueue working time */
93d30ea906Sjfb8856606 	uint64_t enq_sw_min_time;
94d30ea906Sjfb8856606 	/* Stores maximum value of software enqueue working time */
95d30ea906Sjfb8856606 	uint64_t enq_sw_max_time;
96d30ea906Sjfb8856606 	/* Stores turbo enqueue total working time */
97d30ea906Sjfb8856606 	uint64_t enq_tur_tot_time;
98d30ea906Sjfb8856606 	/* Stores minimum value of turbo enqueue working time */
99d30ea906Sjfb8856606 	uint64_t enq_tur_min_time;
100d30ea906Sjfb8856606 	/* Stores maximum value of turbo enqueue working time */
101d30ea906Sjfb8856606 	uint64_t enq_tur_max_time;
102d30ea906Sjfb8856606 	/* Stores dequeue total working time */
103d30ea906Sjfb8856606 	uint64_t deq_tot_time;
104d30ea906Sjfb8856606 	/* Stores minimum value of dequeue working time */
105d30ea906Sjfb8856606 	uint64_t deq_min_time;
106d30ea906Sjfb8856606 	/* Stores maximum value of dequeue working time */
107d30ea906Sjfb8856606 	uint64_t deq_max_time;
108d30ea906Sjfb8856606 };
109d30ea906Sjfb8856606 #endif
110d30ea906Sjfb8856606 
111d30ea906Sjfb8856606 typedef int (test_case_function)(struct active_device *ad,
112d30ea906Sjfb8856606 		struct test_op_params *op_params);
113d30ea906Sjfb8856606 
114d30ea906Sjfb8856606 static inline void
115d30ea906Sjfb8856606 set_avail_op(struct active_device *ad, enum rte_bbdev_op_type op_type)
116d30ea906Sjfb8856606 {
117d30ea906Sjfb8856606 	ad->supported_ops |= (1 << op_type);
118d30ea906Sjfb8856606 }
119d30ea906Sjfb8856606 
120d30ea906Sjfb8856606 static inline bool
121d30ea906Sjfb8856606 is_avail_op(struct active_device *ad, enum rte_bbdev_op_type op_type)
122d30ea906Sjfb8856606 {
123d30ea906Sjfb8856606 	return ad->supported_ops & (1 << op_type);
124d30ea906Sjfb8856606 }
125d30ea906Sjfb8856606 
126d30ea906Sjfb8856606 static inline bool
127d30ea906Sjfb8856606 flags_match(uint32_t flags_req, uint32_t flags_present)
128d30ea906Sjfb8856606 {
129d30ea906Sjfb8856606 	return (flags_req & flags_present) == flags_req;
130d30ea906Sjfb8856606 }
131d30ea906Sjfb8856606 
132d30ea906Sjfb8856606 static void
133d30ea906Sjfb8856606 clear_soft_out_cap(uint32_t *op_flags)
134d30ea906Sjfb8856606 {
135d30ea906Sjfb8856606 	*op_flags &= ~RTE_BBDEV_TURBO_SOFT_OUTPUT;
136d30ea906Sjfb8856606 	*op_flags &= ~RTE_BBDEV_TURBO_POS_LLR_1_BIT_SOFT_OUT;
137d30ea906Sjfb8856606 	*op_flags &= ~RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT;
138d30ea906Sjfb8856606 }
139d30ea906Sjfb8856606 
140d30ea906Sjfb8856606 static int
141d30ea906Sjfb8856606 check_dev_cap(const struct rte_bbdev_info *dev_info)
142d30ea906Sjfb8856606 {
143d30ea906Sjfb8856606 	unsigned int i;
144d30ea906Sjfb8856606 	unsigned int nb_inputs, nb_soft_outputs, nb_hard_outputs;
145d30ea906Sjfb8856606 	const struct rte_bbdev_op_cap *op_cap = dev_info->drv.capabilities;
146d30ea906Sjfb8856606 
147d30ea906Sjfb8856606 	nb_inputs = test_vector.entries[DATA_INPUT].nb_segments;
148d30ea906Sjfb8856606 	nb_soft_outputs = test_vector.entries[DATA_SOFT_OUTPUT].nb_segments;
149d30ea906Sjfb8856606 	nb_hard_outputs = test_vector.entries[DATA_HARD_OUTPUT].nb_segments;
150d30ea906Sjfb8856606 
151d30ea906Sjfb8856606 	for (i = 0; op_cap->type != RTE_BBDEV_OP_NONE; ++i, ++op_cap) {
152d30ea906Sjfb8856606 		if (op_cap->type != test_vector.op_type)
153d30ea906Sjfb8856606 			continue;
154d30ea906Sjfb8856606 
155d30ea906Sjfb8856606 		if (op_cap->type == RTE_BBDEV_OP_TURBO_DEC) {
156d30ea906Sjfb8856606 			const struct rte_bbdev_op_cap_turbo_dec *cap =
157d30ea906Sjfb8856606 					&op_cap->cap.turbo_dec;
158d30ea906Sjfb8856606 			/* Ignore lack of soft output capability, just skip
159d30ea906Sjfb8856606 			 * checking if soft output is valid.
160d30ea906Sjfb8856606 			 */
161d30ea906Sjfb8856606 			if ((test_vector.turbo_dec.op_flags &
162d30ea906Sjfb8856606 					RTE_BBDEV_TURBO_SOFT_OUTPUT) &&
163d30ea906Sjfb8856606 					!(cap->capability_flags &
164d30ea906Sjfb8856606 					RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
165d30ea906Sjfb8856606 				printf(
166d30ea906Sjfb8856606 					"WARNING: Device \"%s\" does not support soft output - soft output flags will be ignored.\n",
167d30ea906Sjfb8856606 					dev_info->dev_name);
168d30ea906Sjfb8856606 				clear_soft_out_cap(
169d30ea906Sjfb8856606 					&test_vector.turbo_dec.op_flags);
170d30ea906Sjfb8856606 			}
171d30ea906Sjfb8856606 
172d30ea906Sjfb8856606 			if (!flags_match(test_vector.turbo_dec.op_flags,
173d30ea906Sjfb8856606 					cap->capability_flags))
174d30ea906Sjfb8856606 				return TEST_FAILED;
175d30ea906Sjfb8856606 			if (nb_inputs > cap->num_buffers_src) {
176d30ea906Sjfb8856606 				printf("Too many inputs defined: %u, max: %u\n",
177d30ea906Sjfb8856606 					nb_inputs, cap->num_buffers_src);
178d30ea906Sjfb8856606 				return TEST_FAILED;
179d30ea906Sjfb8856606 			}
180d30ea906Sjfb8856606 			if (nb_soft_outputs > cap->num_buffers_soft_out &&
181d30ea906Sjfb8856606 					(test_vector.turbo_dec.op_flags &
182d30ea906Sjfb8856606 					RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
183d30ea906Sjfb8856606 				printf(
184d30ea906Sjfb8856606 					"Too many soft outputs defined: %u, max: %u\n",
185d30ea906Sjfb8856606 						nb_soft_outputs,
186d30ea906Sjfb8856606 						cap->num_buffers_soft_out);
187d30ea906Sjfb8856606 				return TEST_FAILED;
188d30ea906Sjfb8856606 			}
189d30ea906Sjfb8856606 			if (nb_hard_outputs > cap->num_buffers_hard_out) {
190d30ea906Sjfb8856606 				printf(
191d30ea906Sjfb8856606 					"Too many hard outputs defined: %u, max: %u\n",
192d30ea906Sjfb8856606 						nb_hard_outputs,
193d30ea906Sjfb8856606 						cap->num_buffers_hard_out);
194d30ea906Sjfb8856606 				return TEST_FAILED;
195d30ea906Sjfb8856606 			}
196d30ea906Sjfb8856606 			if (intr_enabled && !(cap->capability_flags &
197d30ea906Sjfb8856606 					RTE_BBDEV_TURBO_DEC_INTERRUPTS)) {
198d30ea906Sjfb8856606 				printf(
199d30ea906Sjfb8856606 					"Dequeue interrupts are not supported!\n");
200d30ea906Sjfb8856606 				return TEST_FAILED;
201d30ea906Sjfb8856606 			}
202d30ea906Sjfb8856606 
203d30ea906Sjfb8856606 			return TEST_SUCCESS;
204d30ea906Sjfb8856606 		} else if (op_cap->type == RTE_BBDEV_OP_TURBO_ENC) {
205d30ea906Sjfb8856606 			const struct rte_bbdev_op_cap_turbo_enc *cap =
206d30ea906Sjfb8856606 					&op_cap->cap.turbo_enc;
207d30ea906Sjfb8856606 
208d30ea906Sjfb8856606 			if (!flags_match(test_vector.turbo_enc.op_flags,
209d30ea906Sjfb8856606 					cap->capability_flags))
210d30ea906Sjfb8856606 				return TEST_FAILED;
211d30ea906Sjfb8856606 			if (nb_inputs > cap->num_buffers_src) {
212d30ea906Sjfb8856606 				printf("Too many inputs defined: %u, max: %u\n",
213d30ea906Sjfb8856606 					nb_inputs, cap->num_buffers_src);
214d30ea906Sjfb8856606 				return TEST_FAILED;
215d30ea906Sjfb8856606 			}
216d30ea906Sjfb8856606 			if (nb_hard_outputs > cap->num_buffers_dst) {
217d30ea906Sjfb8856606 				printf(
218d30ea906Sjfb8856606 					"Too many hard outputs defined: %u, max: %u\n",
219d30ea906Sjfb8856606 					nb_hard_outputs, cap->num_buffers_src);
220d30ea906Sjfb8856606 				return TEST_FAILED;
221d30ea906Sjfb8856606 			}
222d30ea906Sjfb8856606 			if (intr_enabled && !(cap->capability_flags &
223d30ea906Sjfb8856606 					RTE_BBDEV_TURBO_ENC_INTERRUPTS)) {
224d30ea906Sjfb8856606 				printf(
225d30ea906Sjfb8856606 					"Dequeue interrupts are not supported!\n");
226d30ea906Sjfb8856606 				return TEST_FAILED;
227d30ea906Sjfb8856606 			}
228d30ea906Sjfb8856606 
229d30ea906Sjfb8856606 			return TEST_SUCCESS;
230d30ea906Sjfb8856606 		}
231d30ea906Sjfb8856606 	}
232d30ea906Sjfb8856606 
233d30ea906Sjfb8856606 	if ((i == 0) && (test_vector.op_type == RTE_BBDEV_OP_NONE))
234d30ea906Sjfb8856606 		return TEST_SUCCESS; /* Special case for NULL device */
235d30ea906Sjfb8856606 
236d30ea906Sjfb8856606 	return TEST_FAILED;
237d30ea906Sjfb8856606 }
238d30ea906Sjfb8856606 
239d30ea906Sjfb8856606 /* calculates optimal mempool size not smaller than the val */
240d30ea906Sjfb8856606 static unsigned int
241d30ea906Sjfb8856606 optimal_mempool_size(unsigned int val)
242d30ea906Sjfb8856606 {
243d30ea906Sjfb8856606 	return rte_align32pow2(val + 1) - 1;
244d30ea906Sjfb8856606 }
245d30ea906Sjfb8856606 
246d30ea906Sjfb8856606 /* allocates mbuf mempool for inputs and outputs */
247d30ea906Sjfb8856606 static struct rte_mempool *
248d30ea906Sjfb8856606 create_mbuf_pool(struct op_data_entries *entries, uint8_t dev_id,
249d30ea906Sjfb8856606 		int socket_id, unsigned int mbuf_pool_size,
250d30ea906Sjfb8856606 		const char *op_type_str)
251d30ea906Sjfb8856606 {
252d30ea906Sjfb8856606 	unsigned int i;
253d30ea906Sjfb8856606 	uint32_t max_seg_sz = 0;
254d30ea906Sjfb8856606 	char pool_name[RTE_MEMPOOL_NAMESIZE];
255d30ea906Sjfb8856606 
256d30ea906Sjfb8856606 	/* find max input segment size */
257d30ea906Sjfb8856606 	for (i = 0; i < entries->nb_segments; ++i)
258d30ea906Sjfb8856606 		if (entries->segments[i].length > max_seg_sz)
259d30ea906Sjfb8856606 			max_seg_sz = entries->segments[i].length;
260d30ea906Sjfb8856606 
261d30ea906Sjfb8856606 	snprintf(pool_name, sizeof(pool_name), "%s_pool_%u", op_type_str,
262d30ea906Sjfb8856606 			dev_id);
263d30ea906Sjfb8856606 	return rte_pktmbuf_pool_create(pool_name, mbuf_pool_size, 0, 0,
264d30ea906Sjfb8856606 			RTE_MAX(max_seg_sz + RTE_PKTMBUF_HEADROOM,
265d30ea906Sjfb8856606 			(unsigned int)RTE_MBUF_DEFAULT_BUF_SIZE), socket_id);
266d30ea906Sjfb8856606 }
267d30ea906Sjfb8856606 
268d30ea906Sjfb8856606 static int
269d30ea906Sjfb8856606 create_mempools(struct active_device *ad, int socket_id,
270d30ea906Sjfb8856606 		enum rte_bbdev_op_type org_op_type, uint16_t num_ops)
271d30ea906Sjfb8856606 {
272d30ea906Sjfb8856606 	struct rte_mempool *mp;
273d30ea906Sjfb8856606 	unsigned int ops_pool_size, mbuf_pool_size = 0;
274d30ea906Sjfb8856606 	char pool_name[RTE_MEMPOOL_NAMESIZE];
275d30ea906Sjfb8856606 	const char *op_type_str;
276d30ea906Sjfb8856606 	enum rte_bbdev_op_type op_type = org_op_type;
277d30ea906Sjfb8856606 
278d30ea906Sjfb8856606 	struct op_data_entries *in = &test_vector.entries[DATA_INPUT];
279d30ea906Sjfb8856606 	struct op_data_entries *hard_out =
280d30ea906Sjfb8856606 			&test_vector.entries[DATA_HARD_OUTPUT];
281d30ea906Sjfb8856606 	struct op_data_entries *soft_out =
282d30ea906Sjfb8856606 			&test_vector.entries[DATA_SOFT_OUTPUT];
283d30ea906Sjfb8856606 
284d30ea906Sjfb8856606 	/* allocate ops mempool */
285d30ea906Sjfb8856606 	ops_pool_size = optimal_mempool_size(RTE_MAX(
286d30ea906Sjfb8856606 			/* Ops used plus 1 reference op */
287d30ea906Sjfb8856606 			RTE_MAX((unsigned int)(ad->nb_queues * num_ops + 1),
288d30ea906Sjfb8856606 			/* Minimal cache size plus 1 reference op */
289d30ea906Sjfb8856606 			(unsigned int)(1.5 * rte_lcore_count() *
290d30ea906Sjfb8856606 					OPS_CACHE_SIZE + 1)),
291d30ea906Sjfb8856606 			OPS_POOL_SIZE_MIN));
292d30ea906Sjfb8856606 
293d30ea906Sjfb8856606 	if (org_op_type == RTE_BBDEV_OP_NONE)
294d30ea906Sjfb8856606 		op_type = RTE_BBDEV_OP_TURBO_ENC;
295d30ea906Sjfb8856606 
296d30ea906Sjfb8856606 	op_type_str = rte_bbdev_op_type_str(op_type);
297d30ea906Sjfb8856606 	TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
298d30ea906Sjfb8856606 
299d30ea906Sjfb8856606 	snprintf(pool_name, sizeof(pool_name), "%s_pool_%u", op_type_str,
300d30ea906Sjfb8856606 			ad->dev_id);
301d30ea906Sjfb8856606 	mp = rte_bbdev_op_pool_create(pool_name, op_type,
302d30ea906Sjfb8856606 			ops_pool_size, OPS_CACHE_SIZE, socket_id);
303d30ea906Sjfb8856606 	TEST_ASSERT_NOT_NULL(mp,
304d30ea906Sjfb8856606 			"ERROR Failed to create %u items ops pool for dev %u on socket %u.",
305d30ea906Sjfb8856606 			ops_pool_size,
306d30ea906Sjfb8856606 			ad->dev_id,
307d30ea906Sjfb8856606 			socket_id);
308d30ea906Sjfb8856606 	ad->ops_mempool = mp;
309d30ea906Sjfb8856606 
310d30ea906Sjfb8856606 	/* Do not create inputs and outputs mbufs for BaseBand Null Device */
311d30ea906Sjfb8856606 	if (org_op_type == RTE_BBDEV_OP_NONE)
312d30ea906Sjfb8856606 		return TEST_SUCCESS;
313d30ea906Sjfb8856606 
314d30ea906Sjfb8856606 	/* Inputs */
315d30ea906Sjfb8856606 	mbuf_pool_size = optimal_mempool_size(ops_pool_size * in->nb_segments);
316d30ea906Sjfb8856606 	mp = create_mbuf_pool(in, ad->dev_id, socket_id, mbuf_pool_size, "in");
317d30ea906Sjfb8856606 	TEST_ASSERT_NOT_NULL(mp,
318d30ea906Sjfb8856606 			"ERROR Failed to create %u items input pktmbuf pool for dev %u on socket %u.",
319d30ea906Sjfb8856606 			mbuf_pool_size,
320d30ea906Sjfb8856606 			ad->dev_id,
321d30ea906Sjfb8856606 			socket_id);
322d30ea906Sjfb8856606 	ad->in_mbuf_pool = mp;
323d30ea906Sjfb8856606 
324d30ea906Sjfb8856606 	/* Hard outputs */
325d30ea906Sjfb8856606 	mbuf_pool_size = optimal_mempool_size(ops_pool_size *
326d30ea906Sjfb8856606 			hard_out->nb_segments);
327d30ea906Sjfb8856606 	mp = create_mbuf_pool(hard_out, ad->dev_id, socket_id, mbuf_pool_size,
328d30ea906Sjfb8856606 			"hard_out");
329d30ea906Sjfb8856606 	TEST_ASSERT_NOT_NULL(mp,
330d30ea906Sjfb8856606 			"ERROR Failed to create %u items hard output pktmbuf pool for dev %u on socket %u.",
331d30ea906Sjfb8856606 			mbuf_pool_size,
332d30ea906Sjfb8856606 			ad->dev_id,
333d30ea906Sjfb8856606 			socket_id);
334d30ea906Sjfb8856606 	ad->hard_out_mbuf_pool = mp;
335d30ea906Sjfb8856606 
336d30ea906Sjfb8856606 	if (soft_out->nb_segments == 0)
337d30ea906Sjfb8856606 		return TEST_SUCCESS;
338d30ea906Sjfb8856606 
339d30ea906Sjfb8856606 	/* Soft outputs */
340d30ea906Sjfb8856606 	mbuf_pool_size = optimal_mempool_size(ops_pool_size *
341d30ea906Sjfb8856606 			soft_out->nb_segments);
342d30ea906Sjfb8856606 	mp = create_mbuf_pool(soft_out, ad->dev_id, socket_id, mbuf_pool_size,
343d30ea906Sjfb8856606 			"soft_out");
344d30ea906Sjfb8856606 	TEST_ASSERT_NOT_NULL(mp,
345d30ea906Sjfb8856606 			"ERROR Failed to create %uB soft output pktmbuf pool for dev %u on socket %u.",
346d30ea906Sjfb8856606 			mbuf_pool_size,
347d30ea906Sjfb8856606 			ad->dev_id,
348d30ea906Sjfb8856606 			socket_id);
349d30ea906Sjfb8856606 	ad->soft_out_mbuf_pool = mp;
350d30ea906Sjfb8856606 
351d30ea906Sjfb8856606 	return 0;
352d30ea906Sjfb8856606 }
353d30ea906Sjfb8856606 
354d30ea906Sjfb8856606 static int
355d30ea906Sjfb8856606 add_bbdev_dev(uint8_t dev_id, struct rte_bbdev_info *info,
356d30ea906Sjfb8856606 		struct test_bbdev_vector *vector)
357d30ea906Sjfb8856606 {
358d30ea906Sjfb8856606 	int ret;
359d30ea906Sjfb8856606 	unsigned int queue_id;
360d30ea906Sjfb8856606 	struct rte_bbdev_queue_conf qconf;
361d30ea906Sjfb8856606 	struct active_device *ad = &active_devs[nb_active_devs];
362d30ea906Sjfb8856606 	unsigned int nb_queues;
363d30ea906Sjfb8856606 	enum rte_bbdev_op_type op_type = vector->op_type;
364d30ea906Sjfb8856606 
365d30ea906Sjfb8856606 	nb_queues = RTE_MIN(rte_lcore_count(), info->drv.max_num_queues);
366d30ea906Sjfb8856606 	/* setup device */
367d30ea906Sjfb8856606 	ret = rte_bbdev_setup_queues(dev_id, nb_queues, info->socket_id);
368d30ea906Sjfb8856606 	if (ret < 0) {
369d30ea906Sjfb8856606 		printf("rte_bbdev_setup_queues(%u, %u, %d) ret %i\n",
370d30ea906Sjfb8856606 				dev_id, nb_queues, info->socket_id, ret);
371d30ea906Sjfb8856606 		return TEST_FAILED;
372d30ea906Sjfb8856606 	}
373d30ea906Sjfb8856606 
374d30ea906Sjfb8856606 	/* configure interrupts if needed */
375d30ea906Sjfb8856606 	if (intr_enabled) {
376d30ea906Sjfb8856606 		ret = rte_bbdev_intr_enable(dev_id);
377d30ea906Sjfb8856606 		if (ret < 0) {
378d30ea906Sjfb8856606 			printf("rte_bbdev_intr_enable(%u) ret %i\n", dev_id,
379d30ea906Sjfb8856606 					ret);
380d30ea906Sjfb8856606 			return TEST_FAILED;
381d30ea906Sjfb8856606 		}
382d30ea906Sjfb8856606 	}
383d30ea906Sjfb8856606 
384d30ea906Sjfb8856606 	/* setup device queues */
385d30ea906Sjfb8856606 	qconf.socket = info->socket_id;
386d30ea906Sjfb8856606 	qconf.queue_size = info->drv.default_queue_conf.queue_size;
387d30ea906Sjfb8856606 	qconf.priority = 0;
388d30ea906Sjfb8856606 	qconf.deferred_start = 0;
389d30ea906Sjfb8856606 	qconf.op_type = op_type;
390d30ea906Sjfb8856606 
391d30ea906Sjfb8856606 	for (queue_id = 0; queue_id < nb_queues; ++queue_id) {
392d30ea906Sjfb8856606 		ret = rte_bbdev_queue_configure(dev_id, queue_id, &qconf);
393d30ea906Sjfb8856606 		if (ret != 0) {
394d30ea906Sjfb8856606 			printf(
395d30ea906Sjfb8856606 					"Allocated all queues (id=%u) at prio%u on dev%u\n",
396d30ea906Sjfb8856606 					queue_id, qconf.priority, dev_id);
397d30ea906Sjfb8856606 			qconf.priority++;
398d30ea906Sjfb8856606 			ret = rte_bbdev_queue_configure(ad->dev_id, queue_id,
399d30ea906Sjfb8856606 					&qconf);
400d30ea906Sjfb8856606 		}
401d30ea906Sjfb8856606 		if (ret != 0) {
402d30ea906Sjfb8856606 			printf("All queues on dev %u allocated: %u\n",
403d30ea906Sjfb8856606 					dev_id, queue_id);
404d30ea906Sjfb8856606 			break;
405d30ea906Sjfb8856606 		}
406d30ea906Sjfb8856606 		ad->queue_ids[queue_id] = queue_id;
407d30ea906Sjfb8856606 	}
408d30ea906Sjfb8856606 	TEST_ASSERT(queue_id != 0,
409d30ea906Sjfb8856606 			"ERROR Failed to configure any queues on dev %u",
410d30ea906Sjfb8856606 			dev_id);
411d30ea906Sjfb8856606 	ad->nb_queues = queue_id;
412d30ea906Sjfb8856606 
413d30ea906Sjfb8856606 	set_avail_op(ad, op_type);
414d30ea906Sjfb8856606 
415d30ea906Sjfb8856606 	return TEST_SUCCESS;
416d30ea906Sjfb8856606 }
417d30ea906Sjfb8856606 
418d30ea906Sjfb8856606 static int
419d30ea906Sjfb8856606 add_active_device(uint8_t dev_id, struct rte_bbdev_info *info,
420d30ea906Sjfb8856606 		struct test_bbdev_vector *vector)
421d30ea906Sjfb8856606 {
422d30ea906Sjfb8856606 	int ret;
423d30ea906Sjfb8856606 
424d30ea906Sjfb8856606 	active_devs[nb_active_devs].driver_name = info->drv.driver_name;
425d30ea906Sjfb8856606 	active_devs[nb_active_devs].dev_id = dev_id;
426d30ea906Sjfb8856606 
427d30ea906Sjfb8856606 	ret = add_bbdev_dev(dev_id, info, vector);
428d30ea906Sjfb8856606 	if (ret == TEST_SUCCESS)
429d30ea906Sjfb8856606 		++nb_active_devs;
430d30ea906Sjfb8856606 	return ret;
431d30ea906Sjfb8856606 }
432d30ea906Sjfb8856606 
433d30ea906Sjfb8856606 static uint8_t
434d30ea906Sjfb8856606 populate_active_devices(void)
435d30ea906Sjfb8856606 {
436d30ea906Sjfb8856606 	int ret;
437d30ea906Sjfb8856606 	uint8_t dev_id;
438d30ea906Sjfb8856606 	uint8_t nb_devs_added = 0;
439d30ea906Sjfb8856606 	struct rte_bbdev_info info;
440d30ea906Sjfb8856606 
441d30ea906Sjfb8856606 	RTE_BBDEV_FOREACH(dev_id) {
442d30ea906Sjfb8856606 		rte_bbdev_info_get(dev_id, &info);
443d30ea906Sjfb8856606 
444d30ea906Sjfb8856606 		if (check_dev_cap(&info)) {
445d30ea906Sjfb8856606 			printf(
446d30ea906Sjfb8856606 				"Device %d (%s) does not support specified capabilities\n",
447d30ea906Sjfb8856606 					dev_id, info.dev_name);
448d30ea906Sjfb8856606 			continue;
449d30ea906Sjfb8856606 		}
450d30ea906Sjfb8856606 
451d30ea906Sjfb8856606 		ret = add_active_device(dev_id, &info, &test_vector);
452d30ea906Sjfb8856606 		if (ret != 0) {
453d30ea906Sjfb8856606 			printf("Adding active bbdev %s skipped\n",
454d30ea906Sjfb8856606 					info.dev_name);
455d30ea906Sjfb8856606 			continue;
456d30ea906Sjfb8856606 		}
457d30ea906Sjfb8856606 		nb_devs_added++;
458d30ea906Sjfb8856606 	}
459d30ea906Sjfb8856606 
460d30ea906Sjfb8856606 	return nb_devs_added;
461d30ea906Sjfb8856606 }
462d30ea906Sjfb8856606 
463d30ea906Sjfb8856606 static int
464d30ea906Sjfb8856606 read_test_vector(void)
465d30ea906Sjfb8856606 {
466d30ea906Sjfb8856606 	int ret;
467d30ea906Sjfb8856606 
468d30ea906Sjfb8856606 	memset(&test_vector, 0, sizeof(test_vector));
469d30ea906Sjfb8856606 	printf("Test vector file = %s\n", get_vector_filename());
470d30ea906Sjfb8856606 	ret = test_bbdev_vector_read(get_vector_filename(), &test_vector);
471d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS(ret, "Failed to parse file %s\n",
472d30ea906Sjfb8856606 			get_vector_filename());
473d30ea906Sjfb8856606 
474d30ea906Sjfb8856606 	return TEST_SUCCESS;
475d30ea906Sjfb8856606 }
476d30ea906Sjfb8856606 
477d30ea906Sjfb8856606 static int
478d30ea906Sjfb8856606 testsuite_setup(void)
479d30ea906Sjfb8856606 {
480d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS(read_test_vector(), "Test suite setup failed\n");
481d30ea906Sjfb8856606 
482d30ea906Sjfb8856606 	if (populate_active_devices() == 0) {
483d30ea906Sjfb8856606 		printf("No suitable devices found!\n");
484d30ea906Sjfb8856606 		return TEST_SKIPPED;
485d30ea906Sjfb8856606 	}
486d30ea906Sjfb8856606 
487d30ea906Sjfb8856606 	return TEST_SUCCESS;
488d30ea906Sjfb8856606 }
489d30ea906Sjfb8856606 
490d30ea906Sjfb8856606 static int
491d30ea906Sjfb8856606 interrupt_testsuite_setup(void)
492d30ea906Sjfb8856606 {
493d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS(read_test_vector(), "Test suite setup failed\n");
494d30ea906Sjfb8856606 
495d30ea906Sjfb8856606 	/* Enable interrupts */
496d30ea906Sjfb8856606 	intr_enabled = true;
497d30ea906Sjfb8856606 
498d30ea906Sjfb8856606 	/* Special case for NULL device (RTE_BBDEV_OP_NONE) */
499d30ea906Sjfb8856606 	if (populate_active_devices() == 0 ||
500d30ea906Sjfb8856606 			test_vector.op_type == RTE_BBDEV_OP_NONE) {
501d30ea906Sjfb8856606 		intr_enabled = false;
502d30ea906Sjfb8856606 		printf("No suitable devices found!\n");
503d30ea906Sjfb8856606 		return TEST_SKIPPED;
504d30ea906Sjfb8856606 	}
505d30ea906Sjfb8856606 
506d30ea906Sjfb8856606 	return TEST_SUCCESS;
507d30ea906Sjfb8856606 }
508d30ea906Sjfb8856606 
509d30ea906Sjfb8856606 static void
510d30ea906Sjfb8856606 testsuite_teardown(void)
511d30ea906Sjfb8856606 {
512d30ea906Sjfb8856606 	uint8_t dev_id;
513d30ea906Sjfb8856606 
514d30ea906Sjfb8856606 	/* Unconfigure devices */
515d30ea906Sjfb8856606 	RTE_BBDEV_FOREACH(dev_id)
516d30ea906Sjfb8856606 		rte_bbdev_close(dev_id);
517d30ea906Sjfb8856606 
518d30ea906Sjfb8856606 	/* Clear active devices structs. */
519d30ea906Sjfb8856606 	memset(active_devs, 0, sizeof(active_devs));
520d30ea906Sjfb8856606 	nb_active_devs = 0;
521d30ea906Sjfb8856606 }
522d30ea906Sjfb8856606 
523d30ea906Sjfb8856606 static int
524d30ea906Sjfb8856606 ut_setup(void)
525d30ea906Sjfb8856606 {
526d30ea906Sjfb8856606 	uint8_t i, dev_id;
527d30ea906Sjfb8856606 
528d30ea906Sjfb8856606 	for (i = 0; i < nb_active_devs; i++) {
529d30ea906Sjfb8856606 		dev_id = active_devs[i].dev_id;
530d30ea906Sjfb8856606 		/* reset bbdev stats */
531d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(rte_bbdev_stats_reset(dev_id),
532d30ea906Sjfb8856606 				"Failed to reset stats of bbdev %u", dev_id);
533d30ea906Sjfb8856606 		/* start the device */
534d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(rte_bbdev_start(dev_id),
535d30ea906Sjfb8856606 				"Failed to start bbdev %u", dev_id);
536d30ea906Sjfb8856606 	}
537d30ea906Sjfb8856606 
538d30ea906Sjfb8856606 	return TEST_SUCCESS;
539d30ea906Sjfb8856606 }
540d30ea906Sjfb8856606 
541d30ea906Sjfb8856606 static void
542d30ea906Sjfb8856606 ut_teardown(void)
543d30ea906Sjfb8856606 {
544d30ea906Sjfb8856606 	uint8_t i, dev_id;
545d30ea906Sjfb8856606 	struct rte_bbdev_stats stats;
546d30ea906Sjfb8856606 
547d30ea906Sjfb8856606 	for (i = 0; i < nb_active_devs; i++) {
548d30ea906Sjfb8856606 		dev_id = active_devs[i].dev_id;
549d30ea906Sjfb8856606 		/* read stats and print */
550d30ea906Sjfb8856606 		rte_bbdev_stats_get(dev_id, &stats);
551d30ea906Sjfb8856606 		/* Stop the device */
552d30ea906Sjfb8856606 		rte_bbdev_stop(dev_id);
553d30ea906Sjfb8856606 	}
554d30ea906Sjfb8856606 }
555d30ea906Sjfb8856606 
556d30ea906Sjfb8856606 static int
557d30ea906Sjfb8856606 init_op_data_objs(struct rte_bbdev_op_data *bufs,
558d30ea906Sjfb8856606 		struct op_data_entries *ref_entries,
559d30ea906Sjfb8856606 		struct rte_mempool *mbuf_pool, const uint16_t n,
560d30ea906Sjfb8856606 		enum op_data_type op_type, uint16_t min_alignment)
561d30ea906Sjfb8856606 {
562d30ea906Sjfb8856606 	int ret;
563d30ea906Sjfb8856606 	unsigned int i, j;
564d30ea906Sjfb8856606 
565d30ea906Sjfb8856606 	for (i = 0; i < n; ++i) {
566d30ea906Sjfb8856606 		char *data;
567d30ea906Sjfb8856606 		struct op_data_buf *seg = &ref_entries->segments[0];
568d30ea906Sjfb8856606 		struct rte_mbuf *m_head = rte_pktmbuf_alloc(mbuf_pool);
569d30ea906Sjfb8856606 		TEST_ASSERT_NOT_NULL(m_head,
570d30ea906Sjfb8856606 				"Not enough mbufs in %d data type mbuf pool (needed %u, available %u)",
571d30ea906Sjfb8856606 				op_type, n * ref_entries->nb_segments,
572d30ea906Sjfb8856606 				mbuf_pool->size);
573d30ea906Sjfb8856606 
574d30ea906Sjfb8856606 		bufs[i].data = m_head;
575d30ea906Sjfb8856606 		bufs[i].offset = 0;
576d30ea906Sjfb8856606 		bufs[i].length = 0;
577d30ea906Sjfb8856606 
578d30ea906Sjfb8856606 		if (op_type == DATA_INPUT) {
579d30ea906Sjfb8856606 			data = rte_pktmbuf_append(m_head, seg->length);
580d30ea906Sjfb8856606 			TEST_ASSERT_NOT_NULL(data,
581d30ea906Sjfb8856606 					"Couldn't append %u bytes to mbuf from %d data type mbuf pool",
582d30ea906Sjfb8856606 					seg->length, op_type);
583d30ea906Sjfb8856606 
584d30ea906Sjfb8856606 			TEST_ASSERT(data == RTE_PTR_ALIGN(data, min_alignment),
585d30ea906Sjfb8856606 					"Data addr in mbuf (%p) is not aligned to device min alignment (%u)",
586d30ea906Sjfb8856606 					data, min_alignment);
587d30ea906Sjfb8856606 			rte_memcpy(data, seg->addr, seg->length);
588d30ea906Sjfb8856606 			bufs[i].length += seg->length;
589d30ea906Sjfb8856606 
590d30ea906Sjfb8856606 
591d30ea906Sjfb8856606 			for (j = 1; j < ref_entries->nb_segments; ++j) {
592d30ea906Sjfb8856606 				struct rte_mbuf *m_tail =
593d30ea906Sjfb8856606 						rte_pktmbuf_alloc(mbuf_pool);
594d30ea906Sjfb8856606 				TEST_ASSERT_NOT_NULL(m_tail,
595d30ea906Sjfb8856606 						"Not enough mbufs in %d data type mbuf pool (needed %u, available %u)",
596d30ea906Sjfb8856606 						op_type,
597d30ea906Sjfb8856606 						n * ref_entries->nb_segments,
598d30ea906Sjfb8856606 						mbuf_pool->size);
599d30ea906Sjfb8856606 				seg += 1;
600d30ea906Sjfb8856606 
601d30ea906Sjfb8856606 				data = rte_pktmbuf_append(m_tail, seg->length);
602d30ea906Sjfb8856606 				TEST_ASSERT_NOT_NULL(data,
603d30ea906Sjfb8856606 						"Couldn't append %u bytes to mbuf from %d data type mbuf pool",
604d30ea906Sjfb8856606 						seg->length, op_type);
605d30ea906Sjfb8856606 
606d30ea906Sjfb8856606 				TEST_ASSERT(data == RTE_PTR_ALIGN(data,
607d30ea906Sjfb8856606 						min_alignment),
608d30ea906Sjfb8856606 						"Data addr in mbuf (%p) is not aligned to device min alignment (%u)",
609d30ea906Sjfb8856606 						data, min_alignment);
610d30ea906Sjfb8856606 				rte_memcpy(data, seg->addr, seg->length);
611d30ea906Sjfb8856606 				bufs[i].length += seg->length;
612d30ea906Sjfb8856606 
613d30ea906Sjfb8856606 				ret = rte_pktmbuf_chain(m_head, m_tail);
614d30ea906Sjfb8856606 				TEST_ASSERT_SUCCESS(ret,
615d30ea906Sjfb8856606 						"Couldn't chain mbufs from %d data type mbuf pool",
616d30ea906Sjfb8856606 						op_type);
617d30ea906Sjfb8856606 			}
618d30ea906Sjfb8856606 		}
619d30ea906Sjfb8856606 	}
620d30ea906Sjfb8856606 
621d30ea906Sjfb8856606 	return 0;
622d30ea906Sjfb8856606 }
623d30ea906Sjfb8856606 
624d30ea906Sjfb8856606 static int
625d30ea906Sjfb8856606 allocate_buffers_on_socket(struct rte_bbdev_op_data **buffers, const int len,
626d30ea906Sjfb8856606 		const int socket)
627d30ea906Sjfb8856606 {
628d30ea906Sjfb8856606 	int i;
629d30ea906Sjfb8856606 
630d30ea906Sjfb8856606 	*buffers = rte_zmalloc_socket(NULL, len, 0, socket);
631d30ea906Sjfb8856606 	if (*buffers == NULL) {
632d30ea906Sjfb8856606 		printf("WARNING: Failed to allocate op_data on socket %d\n",
633d30ea906Sjfb8856606 				socket);
634d30ea906Sjfb8856606 		/* try to allocate memory on other detected sockets */
635d30ea906Sjfb8856606 		for (i = 0; i < socket; i++) {
636d30ea906Sjfb8856606 			*buffers = rte_zmalloc_socket(NULL, len, 0, i);
637d30ea906Sjfb8856606 			if (*buffers != NULL)
638d30ea906Sjfb8856606 				break;
639d30ea906Sjfb8856606 		}
640d30ea906Sjfb8856606 	}
641d30ea906Sjfb8856606 
642d30ea906Sjfb8856606 	return (*buffers == NULL) ? TEST_FAILED : TEST_SUCCESS;
643d30ea906Sjfb8856606 }
644d30ea906Sjfb8856606 
645d30ea906Sjfb8856606 static void
646d30ea906Sjfb8856606 limit_input_llr_val_range(struct rte_bbdev_op_data *input_ops,
647d30ea906Sjfb8856606 		uint16_t n, int8_t max_llr_modulus)
648d30ea906Sjfb8856606 {
649d30ea906Sjfb8856606 	uint16_t i, byte_idx;
650d30ea906Sjfb8856606 
651d30ea906Sjfb8856606 	for (i = 0; i < n; ++i) {
652d30ea906Sjfb8856606 		struct rte_mbuf *m = input_ops[i].data;
653d30ea906Sjfb8856606 		while (m != NULL) {
654d30ea906Sjfb8856606 			int8_t *llr = rte_pktmbuf_mtod_offset(m, int8_t *,
655d30ea906Sjfb8856606 					input_ops[i].offset);
656d30ea906Sjfb8856606 			for (byte_idx = 0; byte_idx < input_ops[i].length;
657d30ea906Sjfb8856606 					++byte_idx)
658d30ea906Sjfb8856606 				llr[byte_idx] = round((double)max_llr_modulus *
659d30ea906Sjfb8856606 						llr[byte_idx] / INT8_MAX);
660d30ea906Sjfb8856606 
661d30ea906Sjfb8856606 			m = m->next;
662d30ea906Sjfb8856606 		}
663d30ea906Sjfb8856606 	}
664d30ea906Sjfb8856606 }
665d30ea906Sjfb8856606 
666d30ea906Sjfb8856606 static int
667d30ea906Sjfb8856606 fill_queue_buffers(struct test_op_params *op_params,
668d30ea906Sjfb8856606 		struct rte_mempool *in_mp, struct rte_mempool *hard_out_mp,
669d30ea906Sjfb8856606 		struct rte_mempool *soft_out_mp, uint16_t queue_id,
670d30ea906Sjfb8856606 		const struct rte_bbdev_op_cap *capabilities,
671d30ea906Sjfb8856606 		uint16_t min_alignment, const int socket_id)
672d30ea906Sjfb8856606 {
673d30ea906Sjfb8856606 	int ret;
674d30ea906Sjfb8856606 	enum op_data_type type;
675d30ea906Sjfb8856606 	const uint16_t n = op_params->num_to_process;
676d30ea906Sjfb8856606 
677d30ea906Sjfb8856606 	struct rte_mempool *mbuf_pools[DATA_NUM_TYPES] = {
678d30ea906Sjfb8856606 		in_mp,
679d30ea906Sjfb8856606 		soft_out_mp,
680d30ea906Sjfb8856606 		hard_out_mp,
681d30ea906Sjfb8856606 	};
682d30ea906Sjfb8856606 
683d30ea906Sjfb8856606 	struct rte_bbdev_op_data **queue_ops[DATA_NUM_TYPES] = {
684d30ea906Sjfb8856606 		&op_params->q_bufs[socket_id][queue_id].inputs,
685d30ea906Sjfb8856606 		&op_params->q_bufs[socket_id][queue_id].soft_outputs,
686d30ea906Sjfb8856606 		&op_params->q_bufs[socket_id][queue_id].hard_outputs,
687d30ea906Sjfb8856606 	};
688d30ea906Sjfb8856606 
689d30ea906Sjfb8856606 	for (type = DATA_INPUT; type < DATA_NUM_TYPES; ++type) {
690d30ea906Sjfb8856606 		struct op_data_entries *ref_entries =
691d30ea906Sjfb8856606 				&test_vector.entries[type];
692d30ea906Sjfb8856606 		if (ref_entries->nb_segments == 0)
693d30ea906Sjfb8856606 			continue;
694d30ea906Sjfb8856606 
695d30ea906Sjfb8856606 		ret = allocate_buffers_on_socket(queue_ops[type],
696d30ea906Sjfb8856606 				n * sizeof(struct rte_bbdev_op_data),
697d30ea906Sjfb8856606 				socket_id);
698d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(ret,
699d30ea906Sjfb8856606 				"Couldn't allocate memory for rte_bbdev_op_data structs");
700d30ea906Sjfb8856606 
701d30ea906Sjfb8856606 		ret = init_op_data_objs(*queue_ops[type], ref_entries,
702d30ea906Sjfb8856606 				mbuf_pools[type], n, type, min_alignment);
703d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(ret,
704d30ea906Sjfb8856606 				"Couldn't init rte_bbdev_op_data structs");
705d30ea906Sjfb8856606 	}
706d30ea906Sjfb8856606 
707d30ea906Sjfb8856606 	if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
708d30ea906Sjfb8856606 		limit_input_llr_val_range(*queue_ops[DATA_INPUT], n,
709d30ea906Sjfb8856606 			capabilities->cap.turbo_dec.max_llr_modulus);
710d30ea906Sjfb8856606 
711d30ea906Sjfb8856606 	return 0;
712d30ea906Sjfb8856606 }
713d30ea906Sjfb8856606 
714d30ea906Sjfb8856606 static void
715d30ea906Sjfb8856606 free_buffers(struct active_device *ad, struct test_op_params *op_params)
716d30ea906Sjfb8856606 {
717d30ea906Sjfb8856606 	unsigned int i, j;
718d30ea906Sjfb8856606 
719d30ea906Sjfb8856606 	rte_mempool_free(ad->ops_mempool);
720d30ea906Sjfb8856606 	rte_mempool_free(ad->in_mbuf_pool);
721d30ea906Sjfb8856606 	rte_mempool_free(ad->hard_out_mbuf_pool);
722d30ea906Sjfb8856606 	rte_mempool_free(ad->soft_out_mbuf_pool);
723d30ea906Sjfb8856606 
724d30ea906Sjfb8856606 	for (i = 0; i < rte_lcore_count(); ++i) {
725d30ea906Sjfb8856606 		for (j = 0; j < RTE_MAX_NUMA_NODES; ++j) {
726d30ea906Sjfb8856606 			rte_free(op_params->q_bufs[j][i].inputs);
727d30ea906Sjfb8856606 			rte_free(op_params->q_bufs[j][i].hard_outputs);
728d30ea906Sjfb8856606 			rte_free(op_params->q_bufs[j][i].soft_outputs);
729d30ea906Sjfb8856606 		}
730d30ea906Sjfb8856606 	}
731d30ea906Sjfb8856606 }
732d30ea906Sjfb8856606 
733d30ea906Sjfb8856606 static void
734d30ea906Sjfb8856606 copy_reference_dec_op(struct rte_bbdev_dec_op **ops, unsigned int n,
735d30ea906Sjfb8856606 		unsigned int start_idx,
736d30ea906Sjfb8856606 		struct rte_bbdev_op_data *inputs,
737d30ea906Sjfb8856606 		struct rte_bbdev_op_data *hard_outputs,
738d30ea906Sjfb8856606 		struct rte_bbdev_op_data *soft_outputs,
739d30ea906Sjfb8856606 		struct rte_bbdev_dec_op *ref_op)
740d30ea906Sjfb8856606 {
741d30ea906Sjfb8856606 	unsigned int i;
742d30ea906Sjfb8856606 	struct rte_bbdev_op_turbo_dec *turbo_dec = &ref_op->turbo_dec;
743d30ea906Sjfb8856606 
744d30ea906Sjfb8856606 	for (i = 0; i < n; ++i) {
745d30ea906Sjfb8856606 		if (turbo_dec->code_block_mode == 0) {
746d30ea906Sjfb8856606 			ops[i]->turbo_dec.tb_params.ea =
747d30ea906Sjfb8856606 					turbo_dec->tb_params.ea;
748d30ea906Sjfb8856606 			ops[i]->turbo_dec.tb_params.eb =
749d30ea906Sjfb8856606 					turbo_dec->tb_params.eb;
750d30ea906Sjfb8856606 			ops[i]->turbo_dec.tb_params.k_pos =
751d30ea906Sjfb8856606 					turbo_dec->tb_params.k_pos;
752d30ea906Sjfb8856606 			ops[i]->turbo_dec.tb_params.k_neg =
753d30ea906Sjfb8856606 					turbo_dec->tb_params.k_neg;
754d30ea906Sjfb8856606 			ops[i]->turbo_dec.tb_params.c =
755d30ea906Sjfb8856606 					turbo_dec->tb_params.c;
756d30ea906Sjfb8856606 			ops[i]->turbo_dec.tb_params.c_neg =
757d30ea906Sjfb8856606 					turbo_dec->tb_params.c_neg;
758d30ea906Sjfb8856606 			ops[i]->turbo_dec.tb_params.cab =
759d30ea906Sjfb8856606 					turbo_dec->tb_params.cab;
760d30ea906Sjfb8856606 		} else {
761d30ea906Sjfb8856606 			ops[i]->turbo_dec.cb_params.e = turbo_dec->cb_params.e;
762d30ea906Sjfb8856606 			ops[i]->turbo_dec.cb_params.k = turbo_dec->cb_params.k;
763d30ea906Sjfb8856606 		}
764d30ea906Sjfb8856606 
765d30ea906Sjfb8856606 		ops[i]->turbo_dec.ext_scale = turbo_dec->ext_scale;
766d30ea906Sjfb8856606 		ops[i]->turbo_dec.iter_max = turbo_dec->iter_max;
767d30ea906Sjfb8856606 		ops[i]->turbo_dec.iter_min = turbo_dec->iter_min;
768d30ea906Sjfb8856606 		ops[i]->turbo_dec.op_flags = turbo_dec->op_flags;
769d30ea906Sjfb8856606 		ops[i]->turbo_dec.rv_index = turbo_dec->rv_index;
770d30ea906Sjfb8856606 		ops[i]->turbo_dec.num_maps = turbo_dec->num_maps;
771d30ea906Sjfb8856606 		ops[i]->turbo_dec.code_block_mode = turbo_dec->code_block_mode;
772d30ea906Sjfb8856606 
773d30ea906Sjfb8856606 		ops[i]->turbo_dec.hard_output = hard_outputs[start_idx + i];
774d30ea906Sjfb8856606 		ops[i]->turbo_dec.input = inputs[start_idx + i];
775d30ea906Sjfb8856606 		if (soft_outputs != NULL)
776d30ea906Sjfb8856606 			ops[i]->turbo_dec.soft_output =
777d30ea906Sjfb8856606 				soft_outputs[start_idx + i];
778d30ea906Sjfb8856606 	}
779d30ea906Sjfb8856606 }
780d30ea906Sjfb8856606 
781d30ea906Sjfb8856606 static void
782d30ea906Sjfb8856606 copy_reference_enc_op(struct rte_bbdev_enc_op **ops, unsigned int n,
783d30ea906Sjfb8856606 		unsigned int start_idx,
784d30ea906Sjfb8856606 		struct rte_bbdev_op_data *inputs,
785d30ea906Sjfb8856606 		struct rte_bbdev_op_data *outputs,
786d30ea906Sjfb8856606 		struct rte_bbdev_enc_op *ref_op)
787d30ea906Sjfb8856606 {
788d30ea906Sjfb8856606 	unsigned int i;
789d30ea906Sjfb8856606 	struct rte_bbdev_op_turbo_enc *turbo_enc = &ref_op->turbo_enc;
790d30ea906Sjfb8856606 	for (i = 0; i < n; ++i) {
791d30ea906Sjfb8856606 		if (turbo_enc->code_block_mode == 0) {
792d30ea906Sjfb8856606 			ops[i]->turbo_enc.tb_params.ea =
793d30ea906Sjfb8856606 					turbo_enc->tb_params.ea;
794d30ea906Sjfb8856606 			ops[i]->turbo_enc.tb_params.eb =
795d30ea906Sjfb8856606 					turbo_enc->tb_params.eb;
796d30ea906Sjfb8856606 			ops[i]->turbo_enc.tb_params.k_pos =
797d30ea906Sjfb8856606 					turbo_enc->tb_params.k_pos;
798d30ea906Sjfb8856606 			ops[i]->turbo_enc.tb_params.k_neg =
799d30ea906Sjfb8856606 					turbo_enc->tb_params.k_neg;
800d30ea906Sjfb8856606 			ops[i]->turbo_enc.tb_params.c =
801d30ea906Sjfb8856606 					turbo_enc->tb_params.c;
802d30ea906Sjfb8856606 			ops[i]->turbo_enc.tb_params.c_neg =
803d30ea906Sjfb8856606 					turbo_enc->tb_params.c_neg;
804d30ea906Sjfb8856606 			ops[i]->turbo_enc.tb_params.cab =
805d30ea906Sjfb8856606 					turbo_enc->tb_params.cab;
806d30ea906Sjfb8856606 			ops[i]->turbo_enc.tb_params.ncb_pos =
807d30ea906Sjfb8856606 					turbo_enc->tb_params.ncb_pos;
808d30ea906Sjfb8856606 			ops[i]->turbo_enc.tb_params.ncb_neg =
809d30ea906Sjfb8856606 					turbo_enc->tb_params.ncb_neg;
810d30ea906Sjfb8856606 			ops[i]->turbo_enc.tb_params.r = turbo_enc->tb_params.r;
811d30ea906Sjfb8856606 		} else {
812d30ea906Sjfb8856606 			ops[i]->turbo_enc.cb_params.e = turbo_enc->cb_params.e;
813d30ea906Sjfb8856606 			ops[i]->turbo_enc.cb_params.k = turbo_enc->cb_params.k;
814d30ea906Sjfb8856606 			ops[i]->turbo_enc.cb_params.ncb =
815d30ea906Sjfb8856606 					turbo_enc->cb_params.ncb;
816d30ea906Sjfb8856606 		}
817d30ea906Sjfb8856606 		ops[i]->turbo_enc.rv_index = turbo_enc->rv_index;
818d30ea906Sjfb8856606 		ops[i]->turbo_enc.op_flags = turbo_enc->op_flags;
819d30ea906Sjfb8856606 		ops[i]->turbo_enc.code_block_mode = turbo_enc->code_block_mode;
820d30ea906Sjfb8856606 
821d30ea906Sjfb8856606 		ops[i]->turbo_enc.output = outputs[start_idx + i];
822d30ea906Sjfb8856606 		ops[i]->turbo_enc.input = inputs[start_idx + i];
823d30ea906Sjfb8856606 	}
824d30ea906Sjfb8856606 }
825d30ea906Sjfb8856606 
826d30ea906Sjfb8856606 static int
827d30ea906Sjfb8856606 check_dec_status_and_ordering(struct rte_bbdev_dec_op *op,
828d30ea906Sjfb8856606 		unsigned int order_idx, const int expected_status)
829d30ea906Sjfb8856606 {
830d30ea906Sjfb8856606 	TEST_ASSERT(op->status == expected_status,
831d30ea906Sjfb8856606 			"op_status (%d) != expected_status (%d)",
832d30ea906Sjfb8856606 			op->status, expected_status);
833d30ea906Sjfb8856606 
834d30ea906Sjfb8856606 	TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data,
835d30ea906Sjfb8856606 			"Ordering error, expected %p, got %p",
836d30ea906Sjfb8856606 			(void *)(uintptr_t)order_idx, op->opaque_data);
837d30ea906Sjfb8856606 
838d30ea906Sjfb8856606 	return TEST_SUCCESS;
839d30ea906Sjfb8856606 }
840d30ea906Sjfb8856606 
841d30ea906Sjfb8856606 static int
842d30ea906Sjfb8856606 check_enc_status_and_ordering(struct rte_bbdev_enc_op *op,
843d30ea906Sjfb8856606 		unsigned int order_idx, const int expected_status)
844d30ea906Sjfb8856606 {
845d30ea906Sjfb8856606 	TEST_ASSERT(op->status == expected_status,
846d30ea906Sjfb8856606 			"op_status (%d) != expected_status (%d)",
847d30ea906Sjfb8856606 			op->status, expected_status);
848d30ea906Sjfb8856606 
849d30ea906Sjfb8856606 	TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data,
850d30ea906Sjfb8856606 			"Ordering error, expected %p, got %p",
851d30ea906Sjfb8856606 			(void *)(uintptr_t)order_idx, op->opaque_data);
852d30ea906Sjfb8856606 
853d30ea906Sjfb8856606 	return TEST_SUCCESS;
854d30ea906Sjfb8856606 }
855d30ea906Sjfb8856606 
856d30ea906Sjfb8856606 static inline int
857d30ea906Sjfb8856606 validate_op_chain(struct rte_bbdev_op_data *op,
858d30ea906Sjfb8856606 		struct op_data_entries *orig_op)
859d30ea906Sjfb8856606 {
860d30ea906Sjfb8856606 	uint8_t i;
861d30ea906Sjfb8856606 	struct rte_mbuf *m = op->data;
862d30ea906Sjfb8856606 	uint8_t nb_dst_segments = orig_op->nb_segments;
863d30ea906Sjfb8856606 
864d30ea906Sjfb8856606 	TEST_ASSERT(nb_dst_segments == m->nb_segs,
865d30ea906Sjfb8856606 			"Number of segments differ in original (%u) and filled (%u) op",
866d30ea906Sjfb8856606 			nb_dst_segments, m->nb_segs);
867d30ea906Sjfb8856606 
868d30ea906Sjfb8856606 	for (i = 0; i < nb_dst_segments; ++i) {
869d30ea906Sjfb8856606 		/* Apply offset to the first mbuf segment */
870d30ea906Sjfb8856606 		uint16_t offset = (i == 0) ? op->offset : 0;
871d30ea906Sjfb8856606 		uint16_t data_len = m->data_len - offset;
872d30ea906Sjfb8856606 
873d30ea906Sjfb8856606 		TEST_ASSERT(orig_op->segments[i].length == data_len,
874d30ea906Sjfb8856606 				"Length of segment differ in original (%u) and filled (%u) op",
875d30ea906Sjfb8856606 				orig_op->segments[i].length, data_len);
876d30ea906Sjfb8856606 		TEST_ASSERT_BUFFERS_ARE_EQUAL(orig_op->segments[i].addr,
877d30ea906Sjfb8856606 				rte_pktmbuf_mtod_offset(m, uint32_t *, offset),
878d30ea906Sjfb8856606 				data_len,
879d30ea906Sjfb8856606 				"Output buffers (CB=%u) are not equal", i);
880d30ea906Sjfb8856606 		m = m->next;
881d30ea906Sjfb8856606 	}
882d30ea906Sjfb8856606 
883d30ea906Sjfb8856606 	return TEST_SUCCESS;
884d30ea906Sjfb8856606 }
885d30ea906Sjfb8856606 
886d30ea906Sjfb8856606 static int
887d30ea906Sjfb8856606 validate_dec_buffers(struct rte_bbdev_dec_op *ref_op, struct test_buffers *bufs,
888d30ea906Sjfb8856606 		const uint16_t num_to_process)
889d30ea906Sjfb8856606 {
890d30ea906Sjfb8856606 	int i;
891d30ea906Sjfb8856606 
892d30ea906Sjfb8856606 	struct op_data_entries *hard_data_orig =
893d30ea906Sjfb8856606 			&test_vector.entries[DATA_HARD_OUTPUT];
894d30ea906Sjfb8856606 	struct op_data_entries *soft_data_orig =
895d30ea906Sjfb8856606 			&test_vector.entries[DATA_SOFT_OUTPUT];
896d30ea906Sjfb8856606 
897d30ea906Sjfb8856606 	for (i = 0; i < num_to_process; i++) {
898d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(validate_op_chain(&bufs->hard_outputs[i],
899d30ea906Sjfb8856606 				hard_data_orig),
900d30ea906Sjfb8856606 				"Hard output buffers are not equal");
901d30ea906Sjfb8856606 		if (ref_op->turbo_dec.op_flags &
902d30ea906Sjfb8856606 				RTE_BBDEV_TURBO_SOFT_OUTPUT)
903d30ea906Sjfb8856606 			TEST_ASSERT_SUCCESS(validate_op_chain(
904d30ea906Sjfb8856606 					&bufs->soft_outputs[i],
905d30ea906Sjfb8856606 					soft_data_orig),
906d30ea906Sjfb8856606 					"Soft output buffers are not equal");
907d30ea906Sjfb8856606 	}
908d30ea906Sjfb8856606 
909d30ea906Sjfb8856606 	return TEST_SUCCESS;
910d30ea906Sjfb8856606 }
911d30ea906Sjfb8856606 
912d30ea906Sjfb8856606 static int
913d30ea906Sjfb8856606 validate_enc_buffers(struct test_buffers *bufs, const uint16_t num_to_process)
914d30ea906Sjfb8856606 {
915d30ea906Sjfb8856606 	int i;
916d30ea906Sjfb8856606 
917d30ea906Sjfb8856606 	struct op_data_entries *hard_data_orig =
918d30ea906Sjfb8856606 			&test_vector.entries[DATA_HARD_OUTPUT];
919d30ea906Sjfb8856606 
920d30ea906Sjfb8856606 	for (i = 0; i < num_to_process; i++)
921d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(validate_op_chain(&bufs->hard_outputs[i],
922d30ea906Sjfb8856606 				hard_data_orig), "");
923d30ea906Sjfb8856606 
924d30ea906Sjfb8856606 	return TEST_SUCCESS;
925d30ea906Sjfb8856606 }
926d30ea906Sjfb8856606 
927d30ea906Sjfb8856606 static int
928d30ea906Sjfb8856606 validate_dec_op(struct rte_bbdev_dec_op **ops, const uint16_t n,
929d30ea906Sjfb8856606 		struct rte_bbdev_dec_op *ref_op, const int vector_mask)
930d30ea906Sjfb8856606 {
931d30ea906Sjfb8856606 	unsigned int i;
932d30ea906Sjfb8856606 	int ret;
933d30ea906Sjfb8856606 	struct op_data_entries *hard_data_orig =
934d30ea906Sjfb8856606 			&test_vector.entries[DATA_HARD_OUTPUT];
935d30ea906Sjfb8856606 	struct op_data_entries *soft_data_orig =
936d30ea906Sjfb8856606 			&test_vector.entries[DATA_SOFT_OUTPUT];
937d30ea906Sjfb8856606 	struct rte_bbdev_op_turbo_dec *ops_td;
938d30ea906Sjfb8856606 	struct rte_bbdev_op_data *hard_output;
939d30ea906Sjfb8856606 	struct rte_bbdev_op_data *soft_output;
940d30ea906Sjfb8856606 	struct rte_bbdev_op_turbo_dec *ref_td = &ref_op->turbo_dec;
941d30ea906Sjfb8856606 
942d30ea906Sjfb8856606 	for (i = 0; i < n; ++i) {
943d30ea906Sjfb8856606 		ops_td = &ops[i]->turbo_dec;
944d30ea906Sjfb8856606 		hard_output = &ops_td->hard_output;
945d30ea906Sjfb8856606 		soft_output = &ops_td->soft_output;
946d30ea906Sjfb8856606 
947d30ea906Sjfb8856606 		if (vector_mask & TEST_BBDEV_VF_EXPECTED_ITER_COUNT)
948d30ea906Sjfb8856606 			TEST_ASSERT(ops_td->iter_count <= ref_td->iter_count,
949d30ea906Sjfb8856606 					"Returned iter_count (%d) > expected iter_count (%d)",
950d30ea906Sjfb8856606 					ops_td->iter_count, ref_td->iter_count);
951d30ea906Sjfb8856606 		ret = check_dec_status_and_ordering(ops[i], i, ref_op->status);
952d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(ret,
953d30ea906Sjfb8856606 				"Checking status and ordering for decoder failed");
954d30ea906Sjfb8856606 
955d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(validate_op_chain(hard_output,
956d30ea906Sjfb8856606 				hard_data_orig),
957d30ea906Sjfb8856606 				"Hard output buffers (CB=%u) are not equal",
958d30ea906Sjfb8856606 				i);
959d30ea906Sjfb8856606 
960d30ea906Sjfb8856606 		if (ref_op->turbo_dec.op_flags & RTE_BBDEV_TURBO_SOFT_OUTPUT)
961d30ea906Sjfb8856606 			TEST_ASSERT_SUCCESS(validate_op_chain(soft_output,
962d30ea906Sjfb8856606 					soft_data_orig),
963d30ea906Sjfb8856606 					"Soft output buffers (CB=%u) are not equal",
964d30ea906Sjfb8856606 					i);
965d30ea906Sjfb8856606 	}
966d30ea906Sjfb8856606 
967d30ea906Sjfb8856606 	return TEST_SUCCESS;
968d30ea906Sjfb8856606 }
969d30ea906Sjfb8856606 
970d30ea906Sjfb8856606 static int
971d30ea906Sjfb8856606 validate_enc_op(struct rte_bbdev_enc_op **ops, const uint16_t n,
972d30ea906Sjfb8856606 		struct rte_bbdev_enc_op *ref_op)
973d30ea906Sjfb8856606 {
974d30ea906Sjfb8856606 	unsigned int i;
975d30ea906Sjfb8856606 	int ret;
976d30ea906Sjfb8856606 	struct op_data_entries *hard_data_orig =
977d30ea906Sjfb8856606 			&test_vector.entries[DATA_HARD_OUTPUT];
978d30ea906Sjfb8856606 
979d30ea906Sjfb8856606 	for (i = 0; i < n; ++i) {
980d30ea906Sjfb8856606 		ret = check_enc_status_and_ordering(ops[i], i, ref_op->status);
981d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(ret,
982d30ea906Sjfb8856606 				"Checking status and ordering for encoder failed");
983d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(validate_op_chain(
984d30ea906Sjfb8856606 				&ops[i]->turbo_enc.output,
985d30ea906Sjfb8856606 				hard_data_orig),
986d30ea906Sjfb8856606 				"Output buffers (CB=%u) are not equal",
987d30ea906Sjfb8856606 				i);
988d30ea906Sjfb8856606 	}
989d30ea906Sjfb8856606 
990d30ea906Sjfb8856606 	return TEST_SUCCESS;
991d30ea906Sjfb8856606 }
992d30ea906Sjfb8856606 
993d30ea906Sjfb8856606 static void
994d30ea906Sjfb8856606 create_reference_dec_op(struct rte_bbdev_dec_op *op)
995d30ea906Sjfb8856606 {
996d30ea906Sjfb8856606 	unsigned int i;
997d30ea906Sjfb8856606 	struct op_data_entries *entry;
998d30ea906Sjfb8856606 
999d30ea906Sjfb8856606 	op->turbo_dec = test_vector.turbo_dec;
1000d30ea906Sjfb8856606 	entry = &test_vector.entries[DATA_INPUT];
1001d30ea906Sjfb8856606 	for (i = 0; i < entry->nb_segments; ++i)
1002d30ea906Sjfb8856606 		op->turbo_dec.input.length +=
1003d30ea906Sjfb8856606 				entry->segments[i].length;
1004d30ea906Sjfb8856606 }
1005d30ea906Sjfb8856606 
1006d30ea906Sjfb8856606 static void
1007d30ea906Sjfb8856606 create_reference_enc_op(struct rte_bbdev_enc_op *op)
1008d30ea906Sjfb8856606 {
1009d30ea906Sjfb8856606 	unsigned int i;
1010d30ea906Sjfb8856606 	struct op_data_entries *entry;
1011d30ea906Sjfb8856606 
1012d30ea906Sjfb8856606 	op->turbo_enc = test_vector.turbo_enc;
1013d30ea906Sjfb8856606 	entry = &test_vector.entries[DATA_INPUT];
1014d30ea906Sjfb8856606 	for (i = 0; i < entry->nb_segments; ++i)
1015d30ea906Sjfb8856606 		op->turbo_enc.input.length +=
1016d30ea906Sjfb8856606 				entry->segments[i].length;
1017d30ea906Sjfb8856606 }
1018d30ea906Sjfb8856606 
1019d30ea906Sjfb8856606 static int
1020d30ea906Sjfb8856606 init_test_op_params(struct test_op_params *op_params,
1021d30ea906Sjfb8856606 		enum rte_bbdev_op_type op_type, const int expected_status,
1022d30ea906Sjfb8856606 		const int vector_mask, struct rte_mempool *ops_mp,
1023d30ea906Sjfb8856606 		uint16_t burst_sz, uint16_t num_to_process, uint16_t num_lcores)
1024d30ea906Sjfb8856606 {
1025d30ea906Sjfb8856606 	int ret = 0;
1026d30ea906Sjfb8856606 	if (op_type == RTE_BBDEV_OP_TURBO_DEC)
1027d30ea906Sjfb8856606 		ret = rte_bbdev_dec_op_alloc_bulk(ops_mp,
1028d30ea906Sjfb8856606 				&op_params->ref_dec_op, 1);
1029d30ea906Sjfb8856606 	else
1030d30ea906Sjfb8856606 		ret = rte_bbdev_enc_op_alloc_bulk(ops_mp,
1031d30ea906Sjfb8856606 				&op_params->ref_enc_op, 1);
1032d30ea906Sjfb8856606 
1033d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS(ret, "rte_bbdev_op_alloc_bulk() failed");
1034d30ea906Sjfb8856606 
1035d30ea906Sjfb8856606 	op_params->mp = ops_mp;
1036d30ea906Sjfb8856606 	op_params->burst_sz = burst_sz;
1037d30ea906Sjfb8856606 	op_params->num_to_process = num_to_process;
1038d30ea906Sjfb8856606 	op_params->num_lcores = num_lcores;
1039d30ea906Sjfb8856606 	op_params->vector_mask = vector_mask;
1040d30ea906Sjfb8856606 	if (op_type == RTE_BBDEV_OP_TURBO_DEC)
1041d30ea906Sjfb8856606 		op_params->ref_dec_op->status = expected_status;
1042d30ea906Sjfb8856606 	else if (op_type == RTE_BBDEV_OP_TURBO_ENC)
1043d30ea906Sjfb8856606 		op_params->ref_enc_op->status = expected_status;
1044d30ea906Sjfb8856606 
1045d30ea906Sjfb8856606 	return 0;
1046d30ea906Sjfb8856606 }
1047d30ea906Sjfb8856606 
1048d30ea906Sjfb8856606 static int
1049d30ea906Sjfb8856606 run_test_case_on_device(test_case_function *test_case_func, uint8_t dev_id,
1050d30ea906Sjfb8856606 		struct test_op_params *op_params)
1051d30ea906Sjfb8856606 {
1052d30ea906Sjfb8856606 	int t_ret, f_ret, socket_id = SOCKET_ID_ANY;
1053d30ea906Sjfb8856606 	unsigned int i;
1054d30ea906Sjfb8856606 	struct active_device *ad;
1055d30ea906Sjfb8856606 	unsigned int burst_sz = get_burst_sz();
1056d30ea906Sjfb8856606 	enum rte_bbdev_op_type op_type = test_vector.op_type;
1057d30ea906Sjfb8856606 	const struct rte_bbdev_op_cap *capabilities = NULL;
1058d30ea906Sjfb8856606 
1059d30ea906Sjfb8856606 	ad = &active_devs[dev_id];
1060d30ea906Sjfb8856606 
1061d30ea906Sjfb8856606 	/* Check if device supports op_type */
1062d30ea906Sjfb8856606 	if (!is_avail_op(ad, test_vector.op_type))
1063d30ea906Sjfb8856606 		return TEST_SUCCESS;
1064d30ea906Sjfb8856606 
1065d30ea906Sjfb8856606 	struct rte_bbdev_info info;
1066d30ea906Sjfb8856606 	rte_bbdev_info_get(ad->dev_id, &info);
1067d30ea906Sjfb8856606 	socket_id = GET_SOCKET(info.socket_id);
1068d30ea906Sjfb8856606 
1069d30ea906Sjfb8856606 	f_ret = create_mempools(ad, socket_id, op_type,
1070d30ea906Sjfb8856606 			get_num_ops());
1071d30ea906Sjfb8856606 	if (f_ret != TEST_SUCCESS) {
1072d30ea906Sjfb8856606 		printf("Couldn't create mempools");
1073d30ea906Sjfb8856606 		goto fail;
1074d30ea906Sjfb8856606 	}
1075d30ea906Sjfb8856606 	if (op_type == RTE_BBDEV_OP_NONE)
1076d30ea906Sjfb8856606 		op_type = RTE_BBDEV_OP_TURBO_ENC;
1077d30ea906Sjfb8856606 
1078d30ea906Sjfb8856606 	f_ret = init_test_op_params(op_params, test_vector.op_type,
1079d30ea906Sjfb8856606 			test_vector.expected_status,
1080d30ea906Sjfb8856606 			test_vector.mask,
1081d30ea906Sjfb8856606 			ad->ops_mempool,
1082d30ea906Sjfb8856606 			burst_sz,
1083d30ea906Sjfb8856606 			get_num_ops(),
1084d30ea906Sjfb8856606 			get_num_lcores());
1085d30ea906Sjfb8856606 	if (f_ret != TEST_SUCCESS) {
1086d30ea906Sjfb8856606 		printf("Couldn't init test op params");
1087d30ea906Sjfb8856606 		goto fail;
1088d30ea906Sjfb8856606 	}
1089d30ea906Sjfb8856606 
1090d30ea906Sjfb8856606 	if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) {
1091d30ea906Sjfb8856606 		/* Find Decoder capabilities */
1092d30ea906Sjfb8856606 		const struct rte_bbdev_op_cap *cap = info.drv.capabilities;
1093d30ea906Sjfb8856606 		while (cap->type != RTE_BBDEV_OP_NONE) {
1094d30ea906Sjfb8856606 			if (cap->type == RTE_BBDEV_OP_TURBO_DEC) {
1095d30ea906Sjfb8856606 				capabilities = cap;
1096d30ea906Sjfb8856606 				break;
1097d30ea906Sjfb8856606 			}
1098d30ea906Sjfb8856606 		}
1099d30ea906Sjfb8856606 		TEST_ASSERT_NOT_NULL(capabilities,
1100d30ea906Sjfb8856606 				"Couldn't find Decoder capabilities");
1101d30ea906Sjfb8856606 
1102d30ea906Sjfb8856606 		create_reference_dec_op(op_params->ref_dec_op);
1103d30ea906Sjfb8856606 	} else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
1104d30ea906Sjfb8856606 		create_reference_enc_op(op_params->ref_enc_op);
1105d30ea906Sjfb8856606 
1106d30ea906Sjfb8856606 	for (i = 0; i < ad->nb_queues; ++i) {
1107d30ea906Sjfb8856606 		f_ret = fill_queue_buffers(op_params,
1108d30ea906Sjfb8856606 				ad->in_mbuf_pool,
1109d30ea906Sjfb8856606 				ad->hard_out_mbuf_pool,
1110d30ea906Sjfb8856606 				ad->soft_out_mbuf_pool,
1111d30ea906Sjfb8856606 				ad->queue_ids[i],
1112d30ea906Sjfb8856606 				capabilities,
1113d30ea906Sjfb8856606 				info.drv.min_alignment,
1114d30ea906Sjfb8856606 				socket_id);
1115d30ea906Sjfb8856606 		if (f_ret != TEST_SUCCESS) {
1116d30ea906Sjfb8856606 			printf("Couldn't init queue buffers");
1117d30ea906Sjfb8856606 			goto fail;
1118d30ea906Sjfb8856606 		}
1119d30ea906Sjfb8856606 	}
1120d30ea906Sjfb8856606 
1121d30ea906Sjfb8856606 	/* Run test case function */
1122d30ea906Sjfb8856606 	t_ret = test_case_func(ad, op_params);
1123d30ea906Sjfb8856606 
1124d30ea906Sjfb8856606 	/* Free active device resources and return */
1125d30ea906Sjfb8856606 	free_buffers(ad, op_params);
1126d30ea906Sjfb8856606 	return t_ret;
1127d30ea906Sjfb8856606 
1128d30ea906Sjfb8856606 fail:
1129d30ea906Sjfb8856606 	free_buffers(ad, op_params);
1130d30ea906Sjfb8856606 	return TEST_FAILED;
1131d30ea906Sjfb8856606 }
1132d30ea906Sjfb8856606 
1133d30ea906Sjfb8856606 /* Run given test function per active device per supported op type
1134d30ea906Sjfb8856606  * per burst size.
1135d30ea906Sjfb8856606  */
1136d30ea906Sjfb8856606 static int
1137d30ea906Sjfb8856606 run_test_case(test_case_function *test_case_func)
1138d30ea906Sjfb8856606 {
1139d30ea906Sjfb8856606 	int ret = 0;
1140d30ea906Sjfb8856606 	uint8_t dev;
1141d30ea906Sjfb8856606 
1142d30ea906Sjfb8856606 	/* Alloc op_params */
1143d30ea906Sjfb8856606 	struct test_op_params *op_params = rte_zmalloc(NULL,
1144d30ea906Sjfb8856606 			sizeof(struct test_op_params), RTE_CACHE_LINE_SIZE);
1145d30ea906Sjfb8856606 	TEST_ASSERT_NOT_NULL(op_params, "Failed to alloc %zuB for op_params",
1146d30ea906Sjfb8856606 			RTE_ALIGN(sizeof(struct test_op_params),
1147d30ea906Sjfb8856606 				RTE_CACHE_LINE_SIZE));
1148d30ea906Sjfb8856606 
1149d30ea906Sjfb8856606 	/* For each device run test case function */
1150d30ea906Sjfb8856606 	for (dev = 0; dev < nb_active_devs; ++dev)
1151d30ea906Sjfb8856606 		ret |= run_test_case_on_device(test_case_func, dev, op_params);
1152d30ea906Sjfb8856606 
1153d30ea906Sjfb8856606 	rte_free(op_params);
1154d30ea906Sjfb8856606 
1155d30ea906Sjfb8856606 	return ret;
1156d30ea906Sjfb8856606 }
1157d30ea906Sjfb8856606 
1158d30ea906Sjfb8856606 static void
1159d30ea906Sjfb8856606 dequeue_event_callback(uint16_t dev_id,
1160d30ea906Sjfb8856606 		enum rte_bbdev_event_type event, void *cb_arg,
1161d30ea906Sjfb8856606 		void *ret_param)
1162d30ea906Sjfb8856606 {
1163d30ea906Sjfb8856606 	int ret;
1164d30ea906Sjfb8856606 	uint16_t i;
1165d30ea906Sjfb8856606 	uint64_t total_time;
1166d30ea906Sjfb8856606 	uint16_t deq, burst_sz, num_to_process;
1167d30ea906Sjfb8856606 	uint16_t queue_id = INVALID_QUEUE_ID;
1168d30ea906Sjfb8856606 	struct rte_bbdev_dec_op *dec_ops[MAX_BURST];
1169d30ea906Sjfb8856606 	struct rte_bbdev_enc_op *enc_ops[MAX_BURST];
1170d30ea906Sjfb8856606 	struct test_buffers *bufs;
1171d30ea906Sjfb8856606 	struct rte_bbdev_info info;
1172d30ea906Sjfb8856606 
1173d30ea906Sjfb8856606 	/* Input length in bytes, million operations per second,
1174d30ea906Sjfb8856606 	 * million bits per second.
1175d30ea906Sjfb8856606 	 */
1176d30ea906Sjfb8856606 	double in_len;
1177d30ea906Sjfb8856606 
1178d30ea906Sjfb8856606 	struct thread_params *tp = cb_arg;
1179d30ea906Sjfb8856606 	RTE_SET_USED(ret_param);
1180d30ea906Sjfb8856606 	queue_id = tp->queue_id;
1181d30ea906Sjfb8856606 
1182d30ea906Sjfb8856606 	/* Find matching thread params using queue_id */
1183d30ea906Sjfb8856606 	for (i = 0; i < MAX_QUEUES; ++i, ++tp)
1184d30ea906Sjfb8856606 		if (tp->queue_id == queue_id)
1185d30ea906Sjfb8856606 			break;
1186d30ea906Sjfb8856606 
1187d30ea906Sjfb8856606 	if (i == MAX_QUEUES) {
1188d30ea906Sjfb8856606 		printf("%s: Queue_id from interrupt details was not found!\n",
1189d30ea906Sjfb8856606 				__func__);
1190d30ea906Sjfb8856606 		return;
1191d30ea906Sjfb8856606 	}
1192d30ea906Sjfb8856606 
1193d30ea906Sjfb8856606 	if (unlikely(event != RTE_BBDEV_EVENT_DEQUEUE)) {
1194d30ea906Sjfb8856606 		rte_atomic16_set(&tp->processing_status, TEST_FAILED);
1195d30ea906Sjfb8856606 		printf(
1196d30ea906Sjfb8856606 			"Dequeue interrupt handler called for incorrect event!\n");
1197d30ea906Sjfb8856606 		return;
1198d30ea906Sjfb8856606 	}
1199d30ea906Sjfb8856606 
1200d30ea906Sjfb8856606 	burst_sz = tp->op_params->burst_sz;
1201d30ea906Sjfb8856606 	num_to_process = tp->op_params->num_to_process;
1202d30ea906Sjfb8856606 
1203d30ea906Sjfb8856606 	if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
1204d30ea906Sjfb8856606 		deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id, dec_ops,
1205d30ea906Sjfb8856606 				burst_sz);
1206d30ea906Sjfb8856606 	else
1207d30ea906Sjfb8856606 		deq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id, enc_ops,
1208d30ea906Sjfb8856606 				burst_sz);
1209d30ea906Sjfb8856606 
1210d30ea906Sjfb8856606 	if (deq < burst_sz) {
1211d30ea906Sjfb8856606 		printf(
1212d30ea906Sjfb8856606 			"After receiving the interrupt all operations should be dequeued. Expected: %u, got: %u\n",
1213d30ea906Sjfb8856606 			burst_sz, deq);
1214d30ea906Sjfb8856606 		rte_atomic16_set(&tp->processing_status, TEST_FAILED);
1215d30ea906Sjfb8856606 		return;
1216d30ea906Sjfb8856606 	}
1217d30ea906Sjfb8856606 
1218d30ea906Sjfb8856606 	if (rte_atomic16_read(&tp->nb_dequeued) + deq < num_to_process) {
1219d30ea906Sjfb8856606 		rte_atomic16_add(&tp->nb_dequeued, deq);
1220d30ea906Sjfb8856606 		return;
1221d30ea906Sjfb8856606 	}
1222d30ea906Sjfb8856606 
1223d30ea906Sjfb8856606 	total_time = rte_rdtsc_precise() - tp->start_time;
1224d30ea906Sjfb8856606 
1225d30ea906Sjfb8856606 	rte_bbdev_info_get(dev_id, &info);
1226d30ea906Sjfb8856606 
1227d30ea906Sjfb8856606 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
1228d30ea906Sjfb8856606 
1229d30ea906Sjfb8856606 	ret = TEST_SUCCESS;
1230d30ea906Sjfb8856606 	if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
1231d30ea906Sjfb8856606 		ret = validate_dec_buffers(tp->op_params->ref_dec_op, bufs,
1232d30ea906Sjfb8856606 				num_to_process);
1233d30ea906Sjfb8856606 	else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
1234d30ea906Sjfb8856606 		ret = validate_enc_buffers(bufs, num_to_process);
1235d30ea906Sjfb8856606 
1236d30ea906Sjfb8856606 	if (ret) {
1237d30ea906Sjfb8856606 		printf("Buffers validation failed\n");
1238d30ea906Sjfb8856606 		rte_atomic16_set(&tp->processing_status, TEST_FAILED);
1239d30ea906Sjfb8856606 	}
1240d30ea906Sjfb8856606 
1241d30ea906Sjfb8856606 	switch (test_vector.op_type) {
1242d30ea906Sjfb8856606 	case RTE_BBDEV_OP_TURBO_DEC:
1243d30ea906Sjfb8856606 		in_len = tp->op_params->ref_dec_op->turbo_dec.input.length;
1244d30ea906Sjfb8856606 		break;
1245d30ea906Sjfb8856606 	case RTE_BBDEV_OP_TURBO_ENC:
1246d30ea906Sjfb8856606 		in_len = tp->op_params->ref_enc_op->turbo_enc.input.length;
1247d30ea906Sjfb8856606 		break;
1248d30ea906Sjfb8856606 	case RTE_BBDEV_OP_NONE:
1249d30ea906Sjfb8856606 		in_len = 0.0;
1250d30ea906Sjfb8856606 		break;
1251d30ea906Sjfb8856606 	default:
1252d30ea906Sjfb8856606 		printf("Unknown op type: %d\n", test_vector.op_type);
1253d30ea906Sjfb8856606 		rte_atomic16_set(&tp->processing_status, TEST_FAILED);
1254d30ea906Sjfb8856606 		return;
1255d30ea906Sjfb8856606 	}
1256d30ea906Sjfb8856606 
1257d30ea906Sjfb8856606 	tp->mops = ((double)num_to_process / 1000000.0) /
1258d30ea906Sjfb8856606 			((double)total_time / (double)rte_get_tsc_hz());
1259d30ea906Sjfb8856606 	tp->mbps = ((double)num_to_process * in_len * 8 / 1000000.0) /
1260d30ea906Sjfb8856606 			((double)total_time / (double)rte_get_tsc_hz());
1261d30ea906Sjfb8856606 
1262d30ea906Sjfb8856606 	rte_atomic16_add(&tp->nb_dequeued, deq);
1263d30ea906Sjfb8856606 }
1264d30ea906Sjfb8856606 
1265d30ea906Sjfb8856606 static int
1266d30ea906Sjfb8856606 throughput_intr_lcore_dec(void *arg)
1267d30ea906Sjfb8856606 {
1268d30ea906Sjfb8856606 	struct thread_params *tp = arg;
1269d30ea906Sjfb8856606 	unsigned int enqueued;
1270d30ea906Sjfb8856606 	struct rte_bbdev_dec_op *ops[MAX_BURST];
1271d30ea906Sjfb8856606 	const uint16_t queue_id = tp->queue_id;
1272d30ea906Sjfb8856606 	const uint16_t burst_sz = tp->op_params->burst_sz;
1273d30ea906Sjfb8856606 	const uint16_t num_to_process = tp->op_params->num_to_process;
1274d30ea906Sjfb8856606 	struct test_buffers *bufs = NULL;
1275d30ea906Sjfb8856606 	unsigned int allocs_failed = 0;
1276d30ea906Sjfb8856606 	struct rte_bbdev_info info;
1277d30ea906Sjfb8856606 	int ret;
1278d30ea906Sjfb8856606 
1279d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
1280d30ea906Sjfb8856606 			"BURST_SIZE should be <= %u", MAX_BURST);
1281d30ea906Sjfb8856606 
1282d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id),
1283d30ea906Sjfb8856606 			"Failed to enable interrupts for dev: %u, queue_id: %u",
1284d30ea906Sjfb8856606 			tp->dev_id, queue_id);
1285d30ea906Sjfb8856606 
1286d30ea906Sjfb8856606 	rte_bbdev_info_get(tp->dev_id, &info);
1287d30ea906Sjfb8856606 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
1288d30ea906Sjfb8856606 
1289d30ea906Sjfb8856606 	rte_atomic16_clear(&tp->processing_status);
1290d30ea906Sjfb8856606 	rte_atomic16_clear(&tp->nb_dequeued);
1291d30ea906Sjfb8856606 
1292d30ea906Sjfb8856606 	while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
1293d30ea906Sjfb8856606 		rte_pause();
1294d30ea906Sjfb8856606 
1295d30ea906Sjfb8856606 	tp->start_time = rte_rdtsc_precise();
1296d30ea906Sjfb8856606 	for (enqueued = 0; enqueued < num_to_process;) {
1297d30ea906Sjfb8856606 
1298d30ea906Sjfb8856606 		uint16_t num_to_enq = burst_sz;
1299d30ea906Sjfb8856606 
1300d30ea906Sjfb8856606 		if (unlikely(num_to_process - enqueued < num_to_enq))
1301d30ea906Sjfb8856606 			num_to_enq = num_to_process - enqueued;
1302d30ea906Sjfb8856606 
1303d30ea906Sjfb8856606 		ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
1304d30ea906Sjfb8856606 				num_to_enq);
1305d30ea906Sjfb8856606 		if (ret != 0) {
1306d30ea906Sjfb8856606 			allocs_failed++;
1307d30ea906Sjfb8856606 			continue;
1308d30ea906Sjfb8856606 		}
1309d30ea906Sjfb8856606 
1310d30ea906Sjfb8856606 		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1311d30ea906Sjfb8856606 			copy_reference_dec_op(ops, num_to_enq, enqueued,
1312d30ea906Sjfb8856606 					bufs->inputs,
1313d30ea906Sjfb8856606 					bufs->hard_outputs,
1314d30ea906Sjfb8856606 					bufs->soft_outputs,
1315d30ea906Sjfb8856606 					tp->op_params->ref_dec_op);
1316d30ea906Sjfb8856606 
1317d30ea906Sjfb8856606 		enqueued += rte_bbdev_enqueue_dec_ops(tp->dev_id, queue_id, ops,
1318d30ea906Sjfb8856606 				num_to_enq);
1319d30ea906Sjfb8856606 
1320d30ea906Sjfb8856606 		rte_bbdev_dec_op_free_bulk(ops, num_to_enq);
1321d30ea906Sjfb8856606 	}
1322d30ea906Sjfb8856606 
1323d30ea906Sjfb8856606 	if (allocs_failed > 0)
1324d30ea906Sjfb8856606 		printf("WARNING: op allocations failed: %u times\n",
1325d30ea906Sjfb8856606 				allocs_failed);
1326d30ea906Sjfb8856606 
1327d30ea906Sjfb8856606 	return TEST_SUCCESS;
1328d30ea906Sjfb8856606 }
1329d30ea906Sjfb8856606 
1330d30ea906Sjfb8856606 static int
1331d30ea906Sjfb8856606 throughput_intr_lcore_enc(void *arg)
1332d30ea906Sjfb8856606 {
1333d30ea906Sjfb8856606 	struct thread_params *tp = arg;
1334d30ea906Sjfb8856606 	unsigned int enqueued;
1335d30ea906Sjfb8856606 	struct rte_bbdev_enc_op *ops[MAX_BURST];
1336d30ea906Sjfb8856606 	const uint16_t queue_id = tp->queue_id;
1337d30ea906Sjfb8856606 	const uint16_t burst_sz = tp->op_params->burst_sz;
1338d30ea906Sjfb8856606 	const uint16_t num_to_process = tp->op_params->num_to_process;
1339d30ea906Sjfb8856606 	struct test_buffers *bufs = NULL;
1340d30ea906Sjfb8856606 	unsigned int allocs_failed = 0;
1341d30ea906Sjfb8856606 	struct rte_bbdev_info info;
1342d30ea906Sjfb8856606 	int ret;
1343d30ea906Sjfb8856606 
1344d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
1345d30ea906Sjfb8856606 			"BURST_SIZE should be <= %u", MAX_BURST);
1346d30ea906Sjfb8856606 
1347d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id),
1348d30ea906Sjfb8856606 			"Failed to enable interrupts for dev: %u, queue_id: %u",
1349d30ea906Sjfb8856606 			tp->dev_id, queue_id);
1350d30ea906Sjfb8856606 
1351d30ea906Sjfb8856606 	rte_bbdev_info_get(tp->dev_id, &info);
1352d30ea906Sjfb8856606 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
1353d30ea906Sjfb8856606 
1354d30ea906Sjfb8856606 	rte_atomic16_clear(&tp->processing_status);
1355d30ea906Sjfb8856606 	rte_atomic16_clear(&tp->nb_dequeued);
1356d30ea906Sjfb8856606 
1357d30ea906Sjfb8856606 	while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
1358d30ea906Sjfb8856606 		rte_pause();
1359d30ea906Sjfb8856606 
1360d30ea906Sjfb8856606 	tp->start_time = rte_rdtsc_precise();
1361d30ea906Sjfb8856606 	for (enqueued = 0; enqueued < num_to_process;) {
1362d30ea906Sjfb8856606 
1363d30ea906Sjfb8856606 		uint16_t num_to_enq = burst_sz;
1364d30ea906Sjfb8856606 
1365d30ea906Sjfb8856606 		if (unlikely(num_to_process - enqueued < num_to_enq))
1366d30ea906Sjfb8856606 			num_to_enq = num_to_process - enqueued;
1367d30ea906Sjfb8856606 
1368d30ea906Sjfb8856606 		ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
1369d30ea906Sjfb8856606 				num_to_enq);
1370d30ea906Sjfb8856606 		if (ret != 0) {
1371d30ea906Sjfb8856606 			allocs_failed++;
1372d30ea906Sjfb8856606 			continue;
1373d30ea906Sjfb8856606 		}
1374d30ea906Sjfb8856606 
1375d30ea906Sjfb8856606 		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1376d30ea906Sjfb8856606 			copy_reference_enc_op(ops, num_to_enq, enqueued,
1377d30ea906Sjfb8856606 					bufs->inputs,
1378d30ea906Sjfb8856606 					bufs->hard_outputs,
1379d30ea906Sjfb8856606 					tp->op_params->ref_enc_op);
1380d30ea906Sjfb8856606 
1381d30ea906Sjfb8856606 		enqueued += rte_bbdev_enqueue_enc_ops(tp->dev_id, queue_id, ops,
1382d30ea906Sjfb8856606 				num_to_enq);
1383d30ea906Sjfb8856606 
1384d30ea906Sjfb8856606 		rte_bbdev_enc_op_free_bulk(ops, num_to_enq);
1385d30ea906Sjfb8856606 	}
1386d30ea906Sjfb8856606 
1387d30ea906Sjfb8856606 	if (allocs_failed > 0)
1388d30ea906Sjfb8856606 		printf("WARNING: op allocations failed: %u times\n",
1389d30ea906Sjfb8856606 				allocs_failed);
1390d30ea906Sjfb8856606 
1391d30ea906Sjfb8856606 	return TEST_SUCCESS;
1392d30ea906Sjfb8856606 }
1393d30ea906Sjfb8856606 
1394d30ea906Sjfb8856606 static int
1395d30ea906Sjfb8856606 throughput_pmd_lcore_dec(void *arg)
1396d30ea906Sjfb8856606 {
1397d30ea906Sjfb8856606 	struct thread_params *tp = arg;
1398d30ea906Sjfb8856606 	unsigned int enqueued, dequeued;
1399d30ea906Sjfb8856606 	struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
1400d30ea906Sjfb8856606 	uint64_t total_time, start_time;
1401d30ea906Sjfb8856606 	const uint16_t queue_id = tp->queue_id;
1402d30ea906Sjfb8856606 	const uint16_t burst_sz = tp->op_params->burst_sz;
1403d30ea906Sjfb8856606 	const uint16_t num_to_process = tp->op_params->num_to_process;
1404d30ea906Sjfb8856606 	struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
1405d30ea906Sjfb8856606 	struct test_buffers *bufs = NULL;
1406d30ea906Sjfb8856606 	unsigned int allocs_failed = 0;
1407d30ea906Sjfb8856606 	int ret;
1408d30ea906Sjfb8856606 	struct rte_bbdev_info info;
1409d30ea906Sjfb8856606 
1410d30ea906Sjfb8856606 	/* Input length in bytes, million operations per second, million bits
1411d30ea906Sjfb8856606 	 * per second.
1412d30ea906Sjfb8856606 	 */
1413d30ea906Sjfb8856606 	double in_len;
1414d30ea906Sjfb8856606 
1415d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
1416d30ea906Sjfb8856606 			"BURST_SIZE should be <= %u", MAX_BURST);
1417d30ea906Sjfb8856606 
1418d30ea906Sjfb8856606 	rte_bbdev_info_get(tp->dev_id, &info);
1419d30ea906Sjfb8856606 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
1420d30ea906Sjfb8856606 
1421d30ea906Sjfb8856606 	while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
1422d30ea906Sjfb8856606 		rte_pause();
1423d30ea906Sjfb8856606 
1424d30ea906Sjfb8856606 	start_time = rte_rdtsc_precise();
1425d30ea906Sjfb8856606 	for (enqueued = 0, dequeued = 0; dequeued < num_to_process;) {
1426d30ea906Sjfb8856606 		uint16_t deq;
1427d30ea906Sjfb8856606 
1428d30ea906Sjfb8856606 		if (likely(enqueued < num_to_process)) {
1429d30ea906Sjfb8856606 
1430d30ea906Sjfb8856606 			uint16_t num_to_enq = burst_sz;
1431d30ea906Sjfb8856606 
1432d30ea906Sjfb8856606 			if (unlikely(num_to_process - enqueued < num_to_enq))
1433d30ea906Sjfb8856606 				num_to_enq = num_to_process - enqueued;
1434d30ea906Sjfb8856606 
1435d30ea906Sjfb8856606 			ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp,
1436d30ea906Sjfb8856606 					ops_enq, num_to_enq);
1437d30ea906Sjfb8856606 			if (ret != 0) {
1438d30ea906Sjfb8856606 				allocs_failed++;
1439d30ea906Sjfb8856606 				goto do_dequeue;
1440d30ea906Sjfb8856606 			}
1441d30ea906Sjfb8856606 
1442d30ea906Sjfb8856606 			if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1443d30ea906Sjfb8856606 				copy_reference_dec_op(ops_enq, num_to_enq,
1444d30ea906Sjfb8856606 						enqueued,
1445d30ea906Sjfb8856606 						bufs->inputs,
1446d30ea906Sjfb8856606 						bufs->hard_outputs,
1447d30ea906Sjfb8856606 						bufs->soft_outputs,
1448d30ea906Sjfb8856606 						ref_op);
1449d30ea906Sjfb8856606 
1450d30ea906Sjfb8856606 			enqueued += rte_bbdev_enqueue_dec_ops(tp->dev_id,
1451d30ea906Sjfb8856606 					queue_id, ops_enq, num_to_enq);
1452d30ea906Sjfb8856606 		}
1453d30ea906Sjfb8856606 do_dequeue:
1454d30ea906Sjfb8856606 		deq = rte_bbdev_dequeue_dec_ops(tp->dev_id, queue_id, ops_deq,
1455d30ea906Sjfb8856606 				burst_sz);
1456d30ea906Sjfb8856606 		dequeued += deq;
1457d30ea906Sjfb8856606 		rte_bbdev_dec_op_free_bulk(ops_enq, deq);
1458d30ea906Sjfb8856606 	}
1459d30ea906Sjfb8856606 	total_time = rte_rdtsc_precise() - start_time;
1460d30ea906Sjfb8856606 
1461d30ea906Sjfb8856606 	if (allocs_failed > 0)
1462d30ea906Sjfb8856606 		printf("WARNING: op allocations failed: %u times\n",
1463d30ea906Sjfb8856606 				allocs_failed);
1464d30ea906Sjfb8856606 
1465d30ea906Sjfb8856606 	TEST_ASSERT(enqueued == dequeued, "enqueued (%u) != dequeued (%u)",
1466d30ea906Sjfb8856606 			enqueued, dequeued);
1467d30ea906Sjfb8856606 
1468d30ea906Sjfb8856606 	if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
1469d30ea906Sjfb8856606 		ret = validate_dec_buffers(ref_op, bufs, num_to_process);
1470d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(ret, "Buffers validation failed");
1471d30ea906Sjfb8856606 	}
1472d30ea906Sjfb8856606 
1473d30ea906Sjfb8856606 	in_len = ref_op->turbo_dec.input.length;
1474d30ea906Sjfb8856606 	tp->mops = ((double)num_to_process / 1000000.0) /
1475d30ea906Sjfb8856606 			((double)total_time / (double)rte_get_tsc_hz());
1476d30ea906Sjfb8856606 	tp->mbps = ((double)num_to_process * in_len * 8 / 1000000.0) /
1477d30ea906Sjfb8856606 			((double)total_time / (double)rte_get_tsc_hz());
1478d30ea906Sjfb8856606 
1479d30ea906Sjfb8856606 	return TEST_SUCCESS;
1480d30ea906Sjfb8856606 }
1481d30ea906Sjfb8856606 
1482d30ea906Sjfb8856606 static int
1483d30ea906Sjfb8856606 throughput_pmd_lcore_enc(void *arg)
1484d30ea906Sjfb8856606 {
1485d30ea906Sjfb8856606 	struct thread_params *tp = arg;
1486d30ea906Sjfb8856606 	unsigned int enqueued, dequeued;
1487d30ea906Sjfb8856606 	struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
1488d30ea906Sjfb8856606 	uint64_t total_time, start_time;
1489d30ea906Sjfb8856606 	const uint16_t queue_id = tp->queue_id;
1490d30ea906Sjfb8856606 	const uint16_t burst_sz = tp->op_params->burst_sz;
1491d30ea906Sjfb8856606 	const uint16_t num_to_process = tp->op_params->num_to_process;
1492d30ea906Sjfb8856606 	struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
1493d30ea906Sjfb8856606 	struct test_buffers *bufs = NULL;
1494d30ea906Sjfb8856606 	unsigned int allocs_failed = 0;
1495d30ea906Sjfb8856606 	int ret;
1496d30ea906Sjfb8856606 	struct rte_bbdev_info info;
1497d30ea906Sjfb8856606 
1498d30ea906Sjfb8856606 	/* Input length in bytes, million operations per second, million bits
1499d30ea906Sjfb8856606 	 * per second.
1500d30ea906Sjfb8856606 	 */
1501d30ea906Sjfb8856606 	double in_len;
1502d30ea906Sjfb8856606 
1503d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
1504d30ea906Sjfb8856606 			"BURST_SIZE should be <= %u", MAX_BURST);
1505d30ea906Sjfb8856606 
1506d30ea906Sjfb8856606 	rte_bbdev_info_get(tp->dev_id, &info);
1507d30ea906Sjfb8856606 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
1508d30ea906Sjfb8856606 
1509d30ea906Sjfb8856606 	while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
1510d30ea906Sjfb8856606 		rte_pause();
1511d30ea906Sjfb8856606 
1512d30ea906Sjfb8856606 	start_time = rte_rdtsc_precise();
1513d30ea906Sjfb8856606 	for (enqueued = 0, dequeued = 0; dequeued < num_to_process;) {
1514d30ea906Sjfb8856606 		uint16_t deq;
1515d30ea906Sjfb8856606 
1516d30ea906Sjfb8856606 		if (likely(enqueued < num_to_process)) {
1517d30ea906Sjfb8856606 
1518d30ea906Sjfb8856606 			uint16_t num_to_enq = burst_sz;
1519d30ea906Sjfb8856606 
1520d30ea906Sjfb8856606 			if (unlikely(num_to_process - enqueued < num_to_enq))
1521d30ea906Sjfb8856606 				num_to_enq = num_to_process - enqueued;
1522d30ea906Sjfb8856606 
1523d30ea906Sjfb8856606 			ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp,
1524d30ea906Sjfb8856606 					ops_enq, num_to_enq);
1525d30ea906Sjfb8856606 			if (ret != 0) {
1526d30ea906Sjfb8856606 				allocs_failed++;
1527d30ea906Sjfb8856606 				goto do_dequeue;
1528d30ea906Sjfb8856606 			}
1529d30ea906Sjfb8856606 
1530d30ea906Sjfb8856606 			if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1531d30ea906Sjfb8856606 				copy_reference_enc_op(ops_enq, num_to_enq,
1532d30ea906Sjfb8856606 						enqueued,
1533d30ea906Sjfb8856606 						bufs->inputs,
1534d30ea906Sjfb8856606 						bufs->hard_outputs,
1535d30ea906Sjfb8856606 						ref_op);
1536d30ea906Sjfb8856606 
1537d30ea906Sjfb8856606 			enqueued += rte_bbdev_enqueue_enc_ops(tp->dev_id,
1538d30ea906Sjfb8856606 					queue_id, ops_enq, num_to_enq);
1539d30ea906Sjfb8856606 		}
1540d30ea906Sjfb8856606 do_dequeue:
1541d30ea906Sjfb8856606 		deq = rte_bbdev_dequeue_enc_ops(tp->dev_id, queue_id, ops_deq,
1542d30ea906Sjfb8856606 				burst_sz);
1543d30ea906Sjfb8856606 		dequeued += deq;
1544d30ea906Sjfb8856606 		rte_bbdev_enc_op_free_bulk(ops_enq, deq);
1545d30ea906Sjfb8856606 	}
1546d30ea906Sjfb8856606 	total_time = rte_rdtsc_precise() - start_time;
1547d30ea906Sjfb8856606 
1548d30ea906Sjfb8856606 	if (allocs_failed > 0)
1549d30ea906Sjfb8856606 		printf("WARNING: op allocations failed: %u times\n",
1550d30ea906Sjfb8856606 				allocs_failed);
1551d30ea906Sjfb8856606 
1552d30ea906Sjfb8856606 	TEST_ASSERT(enqueued == dequeued, "enqueued (%u) != dequeued (%u)",
1553d30ea906Sjfb8856606 			enqueued, dequeued);
1554d30ea906Sjfb8856606 
1555d30ea906Sjfb8856606 	if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
1556d30ea906Sjfb8856606 		ret = validate_enc_buffers(bufs, num_to_process);
1557d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(ret, "Buffers validation failed");
1558d30ea906Sjfb8856606 	}
1559d30ea906Sjfb8856606 
1560d30ea906Sjfb8856606 	in_len = ref_op->turbo_enc.input.length;
1561d30ea906Sjfb8856606 
1562d30ea906Sjfb8856606 	tp->mops = ((double)num_to_process / 1000000.0) /
1563d30ea906Sjfb8856606 			((double)total_time / (double)rte_get_tsc_hz());
1564d30ea906Sjfb8856606 	tp->mbps = ((double)num_to_process * in_len * 8 / 1000000.0) /
1565d30ea906Sjfb8856606 			((double)total_time / (double)rte_get_tsc_hz());
1566d30ea906Sjfb8856606 
1567d30ea906Sjfb8856606 	return TEST_SUCCESS;
1568d30ea906Sjfb8856606 }
1569d30ea906Sjfb8856606 static void
1570d30ea906Sjfb8856606 print_throughput(struct thread_params *t_params, unsigned int used_cores)
1571d30ea906Sjfb8856606 {
1572d30ea906Sjfb8856606 	unsigned int lcore_id, iter = 0;
1573d30ea906Sjfb8856606 	double total_mops = 0, total_mbps = 0;
1574d30ea906Sjfb8856606 
1575d30ea906Sjfb8856606 	RTE_LCORE_FOREACH(lcore_id) {
1576d30ea906Sjfb8856606 		if (iter++ >= used_cores)
1577d30ea906Sjfb8856606 			break;
1578d30ea906Sjfb8856606 		printf("\tlcore_id: %u, throughput: %.8lg MOPS, %.8lg Mbps\n",
1579d30ea906Sjfb8856606 		lcore_id, t_params[lcore_id].mops, t_params[lcore_id].mbps);
1580d30ea906Sjfb8856606 		total_mops += t_params[lcore_id].mops;
1581d30ea906Sjfb8856606 		total_mbps += t_params[lcore_id].mbps;
1582d30ea906Sjfb8856606 	}
1583d30ea906Sjfb8856606 	printf(
1584d30ea906Sjfb8856606 		"\n\tTotal stats for %u cores: throughput: %.8lg MOPS, %.8lg Mbps\n",
1585d30ea906Sjfb8856606 		used_cores, total_mops, total_mbps);
1586d30ea906Sjfb8856606 }
1587d30ea906Sjfb8856606 
1588d30ea906Sjfb8856606 /*
1589d30ea906Sjfb8856606  * Test function that determines how long an enqueue + dequeue of a burst
1590d30ea906Sjfb8856606  * takes on available lcores.
1591d30ea906Sjfb8856606  */
1592d30ea906Sjfb8856606 static int
1593d30ea906Sjfb8856606 throughput_test(struct active_device *ad,
1594d30ea906Sjfb8856606 		struct test_op_params *op_params)
1595d30ea906Sjfb8856606 {
1596d30ea906Sjfb8856606 	int ret;
1597d30ea906Sjfb8856606 	unsigned int lcore_id, used_cores = 0;
1598d30ea906Sjfb8856606 	struct thread_params t_params[MAX_QUEUES];
1599d30ea906Sjfb8856606 	struct rte_bbdev_info info;
1600d30ea906Sjfb8856606 	lcore_function_t *throughput_function;
1601d30ea906Sjfb8856606 	struct thread_params *tp;
1602d30ea906Sjfb8856606 	uint16_t num_lcores;
1603d30ea906Sjfb8856606 	const char *op_type_str;
1604d30ea906Sjfb8856606 
1605d30ea906Sjfb8856606 	rte_bbdev_info_get(ad->dev_id, &info);
1606d30ea906Sjfb8856606 
1607d30ea906Sjfb8856606 	op_type_str = rte_bbdev_op_type_str(test_vector.op_type);
1608d30ea906Sjfb8856606 	TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u",
1609d30ea906Sjfb8856606 			test_vector.op_type);
1610d30ea906Sjfb8856606 
1611d30ea906Sjfb8856606 	printf(
1612d30ea906Sjfb8856606 		"Throughput test: dev: %s, nb_queues: %u, burst size: %u, num ops: %u, num_lcores: %u, op type: %s, int mode: %s, GHz: %lg\n",
1613d30ea906Sjfb8856606 			info.dev_name, ad->nb_queues, op_params->burst_sz,
1614d30ea906Sjfb8856606 			op_params->num_to_process, op_params->num_lcores,
1615d30ea906Sjfb8856606 			op_type_str,
1616d30ea906Sjfb8856606 			intr_enabled ? "Interrupt mode" : "PMD mode",
1617d30ea906Sjfb8856606 			(double)rte_get_tsc_hz() / 1000000000.0);
1618d30ea906Sjfb8856606 
1619d30ea906Sjfb8856606 	/* Set number of lcores */
1620d30ea906Sjfb8856606 	num_lcores = (ad->nb_queues < (op_params->num_lcores))
1621d30ea906Sjfb8856606 			? ad->nb_queues
1622d30ea906Sjfb8856606 			: op_params->num_lcores;
1623d30ea906Sjfb8856606 
1624d30ea906Sjfb8856606 	if (intr_enabled) {
1625d30ea906Sjfb8856606 		if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
1626d30ea906Sjfb8856606 			throughput_function = throughput_intr_lcore_dec;
1627d30ea906Sjfb8856606 		else
1628d30ea906Sjfb8856606 			throughput_function = throughput_intr_lcore_enc;
1629d30ea906Sjfb8856606 
1630d30ea906Sjfb8856606 		/* Dequeue interrupt callback registration */
1631d30ea906Sjfb8856606 		ret = rte_bbdev_callback_register(ad->dev_id,
1632d30ea906Sjfb8856606 				RTE_BBDEV_EVENT_DEQUEUE, dequeue_event_callback,
1633d30ea906Sjfb8856606 				&t_params);
1634d30ea906Sjfb8856606 		if (ret < 0)
1635d30ea906Sjfb8856606 			return ret;
1636d30ea906Sjfb8856606 	} else {
1637d30ea906Sjfb8856606 		if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
1638d30ea906Sjfb8856606 			throughput_function = throughput_pmd_lcore_dec;
1639d30ea906Sjfb8856606 		else
1640d30ea906Sjfb8856606 			throughput_function = throughput_pmd_lcore_enc;
1641d30ea906Sjfb8856606 	}
1642d30ea906Sjfb8856606 
1643d30ea906Sjfb8856606 	rte_atomic16_set(&op_params->sync, SYNC_WAIT);
1644d30ea906Sjfb8856606 
1645d30ea906Sjfb8856606 	t_params[rte_lcore_id()].dev_id = ad->dev_id;
1646d30ea906Sjfb8856606 	t_params[rte_lcore_id()].op_params = op_params;
1647d30ea906Sjfb8856606 	t_params[rte_lcore_id()].queue_id =
1648d30ea906Sjfb8856606 			ad->queue_ids[used_cores++];
1649d30ea906Sjfb8856606 
1650d30ea906Sjfb8856606 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1651d30ea906Sjfb8856606 		if (used_cores >= num_lcores)
1652d30ea906Sjfb8856606 			break;
1653d30ea906Sjfb8856606 
1654d30ea906Sjfb8856606 		t_params[lcore_id].dev_id = ad->dev_id;
1655d30ea906Sjfb8856606 		t_params[lcore_id].op_params = op_params;
1656d30ea906Sjfb8856606 		t_params[lcore_id].queue_id = ad->queue_ids[used_cores++];
1657d30ea906Sjfb8856606 
1658d30ea906Sjfb8856606 		rte_eal_remote_launch(throughput_function, &t_params[lcore_id],
1659d30ea906Sjfb8856606 				lcore_id);
1660d30ea906Sjfb8856606 	}
1661d30ea906Sjfb8856606 
1662d30ea906Sjfb8856606 	rte_atomic16_set(&op_params->sync, SYNC_START);
1663d30ea906Sjfb8856606 	ret = throughput_function(&t_params[rte_lcore_id()]);
1664d30ea906Sjfb8856606 
1665d30ea906Sjfb8856606 	/* Master core is always used */
1666d30ea906Sjfb8856606 	used_cores = 1;
1667d30ea906Sjfb8856606 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1668d30ea906Sjfb8856606 		if (used_cores++ >= num_lcores)
1669d30ea906Sjfb8856606 			break;
1670d30ea906Sjfb8856606 
1671d30ea906Sjfb8856606 		ret |= rte_eal_wait_lcore(lcore_id);
1672d30ea906Sjfb8856606 	}
1673d30ea906Sjfb8856606 
1674d30ea906Sjfb8856606 	/* Return if test failed */
1675d30ea906Sjfb8856606 	if (ret)
1676d30ea906Sjfb8856606 		return ret;
1677d30ea906Sjfb8856606 
1678d30ea906Sjfb8856606 	/* Print throughput if interrupts are disabled and test passed */
1679d30ea906Sjfb8856606 	if (!intr_enabled) {
1680d30ea906Sjfb8856606 		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1681d30ea906Sjfb8856606 			print_throughput(t_params, num_lcores);
1682d30ea906Sjfb8856606 		return ret;
1683d30ea906Sjfb8856606 	}
1684d30ea906Sjfb8856606 
1685d30ea906Sjfb8856606 	/* In interrupt TC we need to wait for the interrupt callback to deqeue
1686d30ea906Sjfb8856606 	 * all pending operations. Skip waiting for queues which reported an
1687d30ea906Sjfb8856606 	 * error using processing_status variable.
1688d30ea906Sjfb8856606 	 * Wait for master lcore operations.
1689d30ea906Sjfb8856606 	 */
1690d30ea906Sjfb8856606 	tp = &t_params[rte_lcore_id()];
1691d30ea906Sjfb8856606 	while ((rte_atomic16_read(&tp->nb_dequeued) <
1692d30ea906Sjfb8856606 			op_params->num_to_process) &&
1693d30ea906Sjfb8856606 			(rte_atomic16_read(&tp->processing_status) !=
1694d30ea906Sjfb8856606 			TEST_FAILED))
1695d30ea906Sjfb8856606 		rte_pause();
1696d30ea906Sjfb8856606 
1697d30ea906Sjfb8856606 	ret |= rte_atomic16_read(&tp->processing_status);
1698d30ea906Sjfb8856606 
1699d30ea906Sjfb8856606 	/* Wait for slave lcores operations */
1700d30ea906Sjfb8856606 	used_cores = 1;
1701d30ea906Sjfb8856606 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1702d30ea906Sjfb8856606 		tp = &t_params[lcore_id];
1703d30ea906Sjfb8856606 		if (used_cores++ >= num_lcores)
1704d30ea906Sjfb8856606 			break;
1705d30ea906Sjfb8856606 
1706d30ea906Sjfb8856606 		while ((rte_atomic16_read(&tp->nb_dequeued) <
1707d30ea906Sjfb8856606 				op_params->num_to_process) &&
1708d30ea906Sjfb8856606 				(rte_atomic16_read(&tp->processing_status) !=
1709d30ea906Sjfb8856606 				TEST_FAILED))
1710d30ea906Sjfb8856606 			rte_pause();
1711d30ea906Sjfb8856606 
1712d30ea906Sjfb8856606 		ret |= rte_atomic16_read(&tp->processing_status);
1713d30ea906Sjfb8856606 	}
1714d30ea906Sjfb8856606 
1715d30ea906Sjfb8856606 	/* Print throughput if test passed */
1716d30ea906Sjfb8856606 	if (!ret && test_vector.op_type != RTE_BBDEV_OP_NONE)
1717d30ea906Sjfb8856606 		print_throughput(t_params, num_lcores);
1718d30ea906Sjfb8856606 
1719d30ea906Sjfb8856606 	return ret;
1720d30ea906Sjfb8856606 }
1721d30ea906Sjfb8856606 
1722d30ea906Sjfb8856606 static int
1723d30ea906Sjfb8856606 latency_test_dec(struct rte_mempool *mempool,
1724d30ea906Sjfb8856606 		struct test_buffers *bufs, struct rte_bbdev_dec_op *ref_op,
1725d30ea906Sjfb8856606 		int vector_mask, uint16_t dev_id, uint16_t queue_id,
1726d30ea906Sjfb8856606 		const uint16_t num_to_process, uint16_t burst_sz,
1727d30ea906Sjfb8856606 		uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
1728d30ea906Sjfb8856606 {
1729d30ea906Sjfb8856606 	int ret = TEST_SUCCESS;
1730d30ea906Sjfb8856606 	uint16_t i, j, dequeued;
1731d30ea906Sjfb8856606 	struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
1732d30ea906Sjfb8856606 	uint64_t start_time = 0, last_time = 0;
1733d30ea906Sjfb8856606 
1734d30ea906Sjfb8856606 	for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
1735d30ea906Sjfb8856606 		uint16_t enq = 0, deq = 0;
1736d30ea906Sjfb8856606 		bool first_time = true;
1737d30ea906Sjfb8856606 		last_time = 0;
1738d30ea906Sjfb8856606 
1739d30ea906Sjfb8856606 		if (unlikely(num_to_process - dequeued < burst_sz))
1740d30ea906Sjfb8856606 			burst_sz = num_to_process - dequeued;
1741d30ea906Sjfb8856606 
1742d30ea906Sjfb8856606 		ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
1743d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(ret,
1744d30ea906Sjfb8856606 				"rte_bbdev_dec_op_alloc_bulk() failed");
1745d30ea906Sjfb8856606 		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1746d30ea906Sjfb8856606 			copy_reference_dec_op(ops_enq, burst_sz, dequeued,
1747d30ea906Sjfb8856606 					bufs->inputs,
1748d30ea906Sjfb8856606 					bufs->hard_outputs,
1749d30ea906Sjfb8856606 					bufs->soft_outputs,
1750d30ea906Sjfb8856606 					ref_op);
1751d30ea906Sjfb8856606 
1752d30ea906Sjfb8856606 		/* Set counter to validate the ordering */
1753d30ea906Sjfb8856606 		for (j = 0; j < burst_sz; ++j)
1754d30ea906Sjfb8856606 			ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
1755d30ea906Sjfb8856606 
1756d30ea906Sjfb8856606 		start_time = rte_rdtsc_precise();
1757d30ea906Sjfb8856606 
1758d30ea906Sjfb8856606 		enq = rte_bbdev_enqueue_dec_ops(dev_id, queue_id, &ops_enq[enq],
1759d30ea906Sjfb8856606 				burst_sz);
1760d30ea906Sjfb8856606 		TEST_ASSERT(enq == burst_sz,
1761d30ea906Sjfb8856606 				"Error enqueueing burst, expected %u, got %u",
1762d30ea906Sjfb8856606 				burst_sz, enq);
1763d30ea906Sjfb8856606 
1764d30ea906Sjfb8856606 		/* Dequeue */
1765d30ea906Sjfb8856606 		do {
1766d30ea906Sjfb8856606 			deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
1767d30ea906Sjfb8856606 					&ops_deq[deq], burst_sz - deq);
1768d30ea906Sjfb8856606 			if (likely(first_time && (deq > 0))) {
1769d30ea906Sjfb8856606 				last_time = rte_rdtsc_precise() - start_time;
1770d30ea906Sjfb8856606 				first_time = false;
1771d30ea906Sjfb8856606 			}
1772d30ea906Sjfb8856606 		} while (unlikely(burst_sz != deq));
1773d30ea906Sjfb8856606 
1774d30ea906Sjfb8856606 		*max_time = RTE_MAX(*max_time, last_time);
1775d30ea906Sjfb8856606 		*min_time = RTE_MIN(*min_time, last_time);
1776d30ea906Sjfb8856606 		*total_time += last_time;
1777d30ea906Sjfb8856606 
1778d30ea906Sjfb8856606 		if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
1779d30ea906Sjfb8856606 			ret = validate_dec_op(ops_deq, burst_sz, ref_op,
1780d30ea906Sjfb8856606 					vector_mask);
1781d30ea906Sjfb8856606 			TEST_ASSERT_SUCCESS(ret, "Validation failed!");
1782d30ea906Sjfb8856606 		}
1783d30ea906Sjfb8856606 
1784d30ea906Sjfb8856606 		rte_bbdev_dec_op_free_bulk(ops_enq, deq);
1785d30ea906Sjfb8856606 		dequeued += deq;
1786d30ea906Sjfb8856606 	}
1787d30ea906Sjfb8856606 
1788d30ea906Sjfb8856606 	return i;
1789d30ea906Sjfb8856606 }
1790d30ea906Sjfb8856606 
1791d30ea906Sjfb8856606 static int
1792d30ea906Sjfb8856606 latency_test_enc(struct rte_mempool *mempool,
1793d30ea906Sjfb8856606 		struct test_buffers *bufs, struct rte_bbdev_enc_op *ref_op,
1794d30ea906Sjfb8856606 		uint16_t dev_id, uint16_t queue_id,
1795d30ea906Sjfb8856606 		const uint16_t num_to_process, uint16_t burst_sz,
1796d30ea906Sjfb8856606 		uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
1797d30ea906Sjfb8856606 {
1798d30ea906Sjfb8856606 	int ret = TEST_SUCCESS;
1799d30ea906Sjfb8856606 	uint16_t i, j, dequeued;
1800d30ea906Sjfb8856606 	struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
1801d30ea906Sjfb8856606 	uint64_t start_time = 0, last_time = 0;
1802d30ea906Sjfb8856606 
1803d30ea906Sjfb8856606 	for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
1804d30ea906Sjfb8856606 		uint16_t enq = 0, deq = 0;
1805d30ea906Sjfb8856606 		bool first_time = true;
1806d30ea906Sjfb8856606 		last_time = 0;
1807d30ea906Sjfb8856606 
1808d30ea906Sjfb8856606 		if (unlikely(num_to_process - dequeued < burst_sz))
1809d30ea906Sjfb8856606 			burst_sz = num_to_process - dequeued;
1810d30ea906Sjfb8856606 
1811d30ea906Sjfb8856606 		ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
1812d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(ret,
1813d30ea906Sjfb8856606 				"rte_bbdev_enc_op_alloc_bulk() failed");
1814d30ea906Sjfb8856606 		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1815d30ea906Sjfb8856606 			copy_reference_enc_op(ops_enq, burst_sz, dequeued,
1816d30ea906Sjfb8856606 					bufs->inputs,
1817d30ea906Sjfb8856606 					bufs->hard_outputs,
1818d30ea906Sjfb8856606 					ref_op);
1819d30ea906Sjfb8856606 
1820d30ea906Sjfb8856606 		/* Set counter to validate the ordering */
1821d30ea906Sjfb8856606 		for (j = 0; j < burst_sz; ++j)
1822d30ea906Sjfb8856606 			ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
1823d30ea906Sjfb8856606 
1824d30ea906Sjfb8856606 		start_time = rte_rdtsc_precise();
1825d30ea906Sjfb8856606 
1826d30ea906Sjfb8856606 		enq = rte_bbdev_enqueue_enc_ops(dev_id, queue_id, &ops_enq[enq],
1827d30ea906Sjfb8856606 				burst_sz);
1828d30ea906Sjfb8856606 		TEST_ASSERT(enq == burst_sz,
1829d30ea906Sjfb8856606 				"Error enqueueing burst, expected %u, got %u",
1830d30ea906Sjfb8856606 				burst_sz, enq);
1831d30ea906Sjfb8856606 
1832d30ea906Sjfb8856606 		/* Dequeue */
1833d30ea906Sjfb8856606 		do {
1834d30ea906Sjfb8856606 			deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
1835d30ea906Sjfb8856606 					&ops_deq[deq], burst_sz - deq);
1836d30ea906Sjfb8856606 			if (likely(first_time && (deq > 0))) {
1837d30ea906Sjfb8856606 				last_time += rte_rdtsc_precise() - start_time;
1838d30ea906Sjfb8856606 				first_time = false;
1839d30ea906Sjfb8856606 			}
1840d30ea906Sjfb8856606 		} while (unlikely(burst_sz != deq));
1841d30ea906Sjfb8856606 
1842d30ea906Sjfb8856606 		*max_time = RTE_MAX(*max_time, last_time);
1843d30ea906Sjfb8856606 		*min_time = RTE_MIN(*min_time, last_time);
1844d30ea906Sjfb8856606 		*total_time += last_time;
1845d30ea906Sjfb8856606 
1846d30ea906Sjfb8856606 		if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
1847d30ea906Sjfb8856606 			ret = validate_enc_op(ops_deq, burst_sz, ref_op);
1848d30ea906Sjfb8856606 			TEST_ASSERT_SUCCESS(ret, "Validation failed!");
1849d30ea906Sjfb8856606 		}
1850d30ea906Sjfb8856606 
1851d30ea906Sjfb8856606 		rte_bbdev_enc_op_free_bulk(ops_enq, deq);
1852d30ea906Sjfb8856606 		dequeued += deq;
1853d30ea906Sjfb8856606 	}
1854d30ea906Sjfb8856606 
1855d30ea906Sjfb8856606 	return i;
1856d30ea906Sjfb8856606 }
1857d30ea906Sjfb8856606 
1858d30ea906Sjfb8856606 static int
1859d30ea906Sjfb8856606 latency_test(struct active_device *ad,
1860d30ea906Sjfb8856606 		struct test_op_params *op_params)
1861d30ea906Sjfb8856606 {
1862d30ea906Sjfb8856606 	int iter;
1863d30ea906Sjfb8856606 	uint16_t burst_sz = op_params->burst_sz;
1864d30ea906Sjfb8856606 	const uint16_t num_to_process = op_params->num_to_process;
1865d30ea906Sjfb8856606 	const enum rte_bbdev_op_type op_type = test_vector.op_type;
1866d30ea906Sjfb8856606 	const uint16_t queue_id = ad->queue_ids[0];
1867d30ea906Sjfb8856606 	struct test_buffers *bufs = NULL;
1868d30ea906Sjfb8856606 	struct rte_bbdev_info info;
1869d30ea906Sjfb8856606 	uint64_t total_time, min_time, max_time;
1870d30ea906Sjfb8856606 	const char *op_type_str;
1871d30ea906Sjfb8856606 
1872d30ea906Sjfb8856606 	total_time = max_time = 0;
1873d30ea906Sjfb8856606 	min_time = UINT64_MAX;
1874d30ea906Sjfb8856606 
1875d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
1876d30ea906Sjfb8856606 			"BURST_SIZE should be <= %u", MAX_BURST);
1877d30ea906Sjfb8856606 
1878d30ea906Sjfb8856606 	rte_bbdev_info_get(ad->dev_id, &info);
1879d30ea906Sjfb8856606 	bufs = &op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
1880d30ea906Sjfb8856606 
1881d30ea906Sjfb8856606 	op_type_str = rte_bbdev_op_type_str(op_type);
1882d30ea906Sjfb8856606 	TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
1883d30ea906Sjfb8856606 
1884d30ea906Sjfb8856606 	printf(
1885d30ea906Sjfb8856606 		"Validation/Latency test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
1886d30ea906Sjfb8856606 			info.dev_name, burst_sz, num_to_process, op_type_str);
1887d30ea906Sjfb8856606 
1888d30ea906Sjfb8856606 	if (op_type == RTE_BBDEV_OP_TURBO_DEC)
1889d30ea906Sjfb8856606 		iter = latency_test_dec(op_params->mp, bufs,
1890d30ea906Sjfb8856606 				op_params->ref_dec_op, op_params->vector_mask,
1891d30ea906Sjfb8856606 				ad->dev_id, queue_id, num_to_process,
1892d30ea906Sjfb8856606 				burst_sz, &total_time, &min_time, &max_time);
1893d30ea906Sjfb8856606 	else
1894d30ea906Sjfb8856606 		iter = latency_test_enc(op_params->mp, bufs,
1895d30ea906Sjfb8856606 				op_params->ref_enc_op, ad->dev_id, queue_id,
1896d30ea906Sjfb8856606 				num_to_process, burst_sz, &total_time,
1897d30ea906Sjfb8856606 				&min_time, &max_time);
1898d30ea906Sjfb8856606 
1899d30ea906Sjfb8856606 	if (iter <= 0)
1900d30ea906Sjfb8856606 		return TEST_FAILED;
1901d30ea906Sjfb8856606 
1902d30ea906Sjfb8856606 	printf("\toperation latency:\n"
1903d30ea906Sjfb8856606 			"\t\tavg latency: %lg cycles, %lg us\n"
1904d30ea906Sjfb8856606 			"\t\tmin latency: %lg cycles, %lg us\n"
1905d30ea906Sjfb8856606 			"\t\tmax latency: %lg cycles, %lg us\n",
1906d30ea906Sjfb8856606 			(double)total_time / (double)iter,
1907d30ea906Sjfb8856606 			(double)(total_time * 1000000) / (double)iter /
1908d30ea906Sjfb8856606 			(double)rte_get_tsc_hz(), (double)min_time,
1909d30ea906Sjfb8856606 			(double)(min_time * 1000000) / (double)rte_get_tsc_hz(),
1910d30ea906Sjfb8856606 			(double)max_time, (double)(max_time * 1000000) /
1911d30ea906Sjfb8856606 			(double)rte_get_tsc_hz());
1912d30ea906Sjfb8856606 
1913d30ea906Sjfb8856606 	return TEST_SUCCESS;
1914d30ea906Sjfb8856606 }
1915d30ea906Sjfb8856606 
1916d30ea906Sjfb8856606 #ifdef RTE_BBDEV_OFFLOAD_COST
1917d30ea906Sjfb8856606 static int
1918d30ea906Sjfb8856606 get_bbdev_queue_stats(uint16_t dev_id, uint16_t queue_id,
1919d30ea906Sjfb8856606 		struct rte_bbdev_stats *stats)
1920d30ea906Sjfb8856606 {
1921d30ea906Sjfb8856606 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
1922d30ea906Sjfb8856606 	struct rte_bbdev_stats *q_stats;
1923d30ea906Sjfb8856606 
1924d30ea906Sjfb8856606 	if (queue_id >= dev->data->num_queues)
1925d30ea906Sjfb8856606 		return -1;
1926d30ea906Sjfb8856606 
1927d30ea906Sjfb8856606 	q_stats = &dev->data->queues[queue_id].queue_stats;
1928d30ea906Sjfb8856606 
1929d30ea906Sjfb8856606 	stats->enqueued_count = q_stats->enqueued_count;
1930d30ea906Sjfb8856606 	stats->dequeued_count = q_stats->dequeued_count;
1931d30ea906Sjfb8856606 	stats->enqueue_err_count = q_stats->enqueue_err_count;
1932d30ea906Sjfb8856606 	stats->dequeue_err_count = q_stats->dequeue_err_count;
1933d30ea906Sjfb8856606 	stats->offload_time = q_stats->offload_time;
1934d30ea906Sjfb8856606 
1935d30ea906Sjfb8856606 	return 0;
1936d30ea906Sjfb8856606 }
1937d30ea906Sjfb8856606 
1938d30ea906Sjfb8856606 static int
1939d30ea906Sjfb8856606 offload_latency_test_dec(struct rte_mempool *mempool, struct test_buffers *bufs,
1940d30ea906Sjfb8856606 		struct rte_bbdev_dec_op *ref_op, uint16_t dev_id,
1941d30ea906Sjfb8856606 		uint16_t queue_id, const uint16_t num_to_process,
1942d30ea906Sjfb8856606 		uint16_t burst_sz, struct test_time_stats *time_st)
1943d30ea906Sjfb8856606 {
1944d30ea906Sjfb8856606 	int i, dequeued, ret;
1945d30ea906Sjfb8856606 	struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
1946d30ea906Sjfb8856606 	uint64_t enq_start_time, deq_start_time;
1947d30ea906Sjfb8856606 	uint64_t enq_sw_last_time, deq_last_time;
1948d30ea906Sjfb8856606 	struct rte_bbdev_stats stats;
1949d30ea906Sjfb8856606 
1950d30ea906Sjfb8856606 	for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
1951d30ea906Sjfb8856606 		uint16_t enq = 0, deq = 0;
1952d30ea906Sjfb8856606 
1953d30ea906Sjfb8856606 		if (unlikely(num_to_process - dequeued < burst_sz))
1954d30ea906Sjfb8856606 			burst_sz = num_to_process - dequeued;
1955d30ea906Sjfb8856606 
1956*1646932aSjfb8856606 		ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
1957*1646932aSjfb8856606 		TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
1958*1646932aSjfb8856606 				burst_sz);
1959*1646932aSjfb8856606 
1960d30ea906Sjfb8856606 		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
1961d30ea906Sjfb8856606 			copy_reference_dec_op(ops_enq, burst_sz, dequeued,
1962d30ea906Sjfb8856606 					bufs->inputs,
1963d30ea906Sjfb8856606 					bufs->hard_outputs,
1964d30ea906Sjfb8856606 					bufs->soft_outputs,
1965d30ea906Sjfb8856606 					ref_op);
1966d30ea906Sjfb8856606 
1967d30ea906Sjfb8856606 		/* Start time meas for enqueue function offload latency */
1968d30ea906Sjfb8856606 		enq_start_time = rte_rdtsc_precise();
1969d30ea906Sjfb8856606 		do {
1970d30ea906Sjfb8856606 			enq += rte_bbdev_enqueue_dec_ops(dev_id, queue_id,
1971d30ea906Sjfb8856606 					&ops_enq[enq], burst_sz - enq);
1972d30ea906Sjfb8856606 		} while (unlikely(burst_sz != enq));
1973d30ea906Sjfb8856606 
1974d30ea906Sjfb8856606 		ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
1975d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(ret,
1976d30ea906Sjfb8856606 				"Failed to get stats for queue (%u) of device (%u)",
1977d30ea906Sjfb8856606 				queue_id, dev_id);
1978d30ea906Sjfb8856606 
1979d30ea906Sjfb8856606 		enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
1980d30ea906Sjfb8856606 				stats.offload_time;
1981d30ea906Sjfb8856606 		time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
1982d30ea906Sjfb8856606 				enq_sw_last_time);
1983d30ea906Sjfb8856606 		time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
1984d30ea906Sjfb8856606 				enq_sw_last_time);
1985d30ea906Sjfb8856606 		time_st->enq_sw_tot_time += enq_sw_last_time;
1986d30ea906Sjfb8856606 
1987d30ea906Sjfb8856606 		time_st->enq_tur_max_time = RTE_MAX(time_st->enq_tur_max_time,
1988d30ea906Sjfb8856606 				stats.offload_time);
1989d30ea906Sjfb8856606 		time_st->enq_tur_min_time = RTE_MIN(time_st->enq_tur_min_time,
1990d30ea906Sjfb8856606 				stats.offload_time);
1991d30ea906Sjfb8856606 		time_st->enq_tur_tot_time += stats.offload_time;
1992d30ea906Sjfb8856606 
1993d30ea906Sjfb8856606 		/* ensure enqueue has been completed */
1994d30ea906Sjfb8856606 		rte_delay_ms(10);
1995d30ea906Sjfb8856606 
1996d30ea906Sjfb8856606 		/* Start time meas for dequeue function offload latency */
1997d30ea906Sjfb8856606 		deq_start_time = rte_rdtsc_precise();
1998d30ea906Sjfb8856606 		/* Dequeue one operation */
1999d30ea906Sjfb8856606 		do {
2000d30ea906Sjfb8856606 			deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
2001d30ea906Sjfb8856606 					&ops_deq[deq], 1);
2002d30ea906Sjfb8856606 		} while (unlikely(deq != 1));
2003d30ea906Sjfb8856606 
2004d30ea906Sjfb8856606 		deq_last_time = rte_rdtsc_precise() - deq_start_time;
2005d30ea906Sjfb8856606 		time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
2006d30ea906Sjfb8856606 				deq_last_time);
2007d30ea906Sjfb8856606 		time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
2008d30ea906Sjfb8856606 				deq_last_time);
2009d30ea906Sjfb8856606 		time_st->deq_tot_time += deq_last_time;
2010d30ea906Sjfb8856606 
2011d30ea906Sjfb8856606 		/* Dequeue remaining operations if needed*/
2012d30ea906Sjfb8856606 		while (burst_sz != deq)
2013d30ea906Sjfb8856606 			deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
2014d30ea906Sjfb8856606 					&ops_deq[deq], burst_sz - deq);
2015d30ea906Sjfb8856606 
2016d30ea906Sjfb8856606 		rte_bbdev_dec_op_free_bulk(ops_enq, deq);
2017d30ea906Sjfb8856606 		dequeued += deq;
2018d30ea906Sjfb8856606 	}
2019d30ea906Sjfb8856606 
2020d30ea906Sjfb8856606 	return i;
2021d30ea906Sjfb8856606 }
2022d30ea906Sjfb8856606 
2023d30ea906Sjfb8856606 static int
2024d30ea906Sjfb8856606 offload_latency_test_enc(struct rte_mempool *mempool, struct test_buffers *bufs,
2025d30ea906Sjfb8856606 		struct rte_bbdev_enc_op *ref_op, uint16_t dev_id,
2026d30ea906Sjfb8856606 		uint16_t queue_id, const uint16_t num_to_process,
2027d30ea906Sjfb8856606 		uint16_t burst_sz, struct test_time_stats *time_st)
2028d30ea906Sjfb8856606 {
2029d30ea906Sjfb8856606 	int i, dequeued, ret;
2030d30ea906Sjfb8856606 	struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
2031d30ea906Sjfb8856606 	uint64_t enq_start_time, deq_start_time;
2032d30ea906Sjfb8856606 	uint64_t enq_sw_last_time, deq_last_time;
2033d30ea906Sjfb8856606 	struct rte_bbdev_stats stats;
2034d30ea906Sjfb8856606 
2035d30ea906Sjfb8856606 	for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
2036d30ea906Sjfb8856606 		uint16_t enq = 0, deq = 0;
2037d30ea906Sjfb8856606 
2038d30ea906Sjfb8856606 		if (unlikely(num_to_process - dequeued < burst_sz))
2039d30ea906Sjfb8856606 			burst_sz = num_to_process - dequeued;
2040d30ea906Sjfb8856606 
2041*1646932aSjfb8856606 		ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
2042*1646932aSjfb8856606 		TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
2043*1646932aSjfb8856606 				burst_sz);
2044*1646932aSjfb8856606 
2045d30ea906Sjfb8856606 		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
2046d30ea906Sjfb8856606 			copy_reference_enc_op(ops_enq, burst_sz, dequeued,
2047d30ea906Sjfb8856606 					bufs->inputs,
2048d30ea906Sjfb8856606 					bufs->hard_outputs,
2049d30ea906Sjfb8856606 					ref_op);
2050d30ea906Sjfb8856606 
2051d30ea906Sjfb8856606 		/* Start time meas for enqueue function offload latency */
2052d30ea906Sjfb8856606 		enq_start_time = rte_rdtsc_precise();
2053d30ea906Sjfb8856606 		do {
2054d30ea906Sjfb8856606 			enq += rte_bbdev_enqueue_enc_ops(dev_id, queue_id,
2055d30ea906Sjfb8856606 					&ops_enq[enq], burst_sz - enq);
2056d30ea906Sjfb8856606 		} while (unlikely(burst_sz != enq));
2057d30ea906Sjfb8856606 
2058d30ea906Sjfb8856606 		ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
2059d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(ret,
2060d30ea906Sjfb8856606 				"Failed to get stats for queue (%u) of device (%u)",
2061d30ea906Sjfb8856606 				queue_id, dev_id);
2062d30ea906Sjfb8856606 
2063d30ea906Sjfb8856606 		enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
2064d30ea906Sjfb8856606 				stats.offload_time;
2065d30ea906Sjfb8856606 		time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
2066d30ea906Sjfb8856606 				enq_sw_last_time);
2067d30ea906Sjfb8856606 		time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
2068d30ea906Sjfb8856606 				enq_sw_last_time);
2069d30ea906Sjfb8856606 		time_st->enq_sw_tot_time += enq_sw_last_time;
2070d30ea906Sjfb8856606 
2071d30ea906Sjfb8856606 		time_st->enq_tur_max_time = RTE_MAX(time_st->enq_tur_max_time,
2072d30ea906Sjfb8856606 				stats.offload_time);
2073d30ea906Sjfb8856606 		time_st->enq_tur_min_time = RTE_MIN(time_st->enq_tur_min_time,
2074d30ea906Sjfb8856606 				stats.offload_time);
2075d30ea906Sjfb8856606 		time_st->enq_tur_tot_time += stats.offload_time;
2076d30ea906Sjfb8856606 
2077d30ea906Sjfb8856606 		/* ensure enqueue has been completed */
2078d30ea906Sjfb8856606 		rte_delay_ms(10);
2079d30ea906Sjfb8856606 
2080d30ea906Sjfb8856606 		/* Start time meas for dequeue function offload latency */
2081d30ea906Sjfb8856606 		deq_start_time = rte_rdtsc_precise();
2082d30ea906Sjfb8856606 		/* Dequeue one operation */
2083d30ea906Sjfb8856606 		do {
2084d30ea906Sjfb8856606 			deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
2085d30ea906Sjfb8856606 					&ops_deq[deq], 1);
2086d30ea906Sjfb8856606 		} while (unlikely(deq != 1));
2087d30ea906Sjfb8856606 
2088d30ea906Sjfb8856606 		deq_last_time = rte_rdtsc_precise() - deq_start_time;
2089d30ea906Sjfb8856606 		time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
2090d30ea906Sjfb8856606 				deq_last_time);
2091d30ea906Sjfb8856606 		time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
2092d30ea906Sjfb8856606 				deq_last_time);
2093d30ea906Sjfb8856606 		time_st->deq_tot_time += deq_last_time;
2094d30ea906Sjfb8856606 
2095d30ea906Sjfb8856606 		while (burst_sz != deq)
2096d30ea906Sjfb8856606 			deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
2097d30ea906Sjfb8856606 					&ops_deq[deq], burst_sz - deq);
2098d30ea906Sjfb8856606 
2099d30ea906Sjfb8856606 		rte_bbdev_enc_op_free_bulk(ops_enq, deq);
2100d30ea906Sjfb8856606 		dequeued += deq;
2101d30ea906Sjfb8856606 	}
2102d30ea906Sjfb8856606 
2103d30ea906Sjfb8856606 	return i;
2104d30ea906Sjfb8856606 }
2105d30ea906Sjfb8856606 #endif
2106d30ea906Sjfb8856606 
2107d30ea906Sjfb8856606 static int
2108d30ea906Sjfb8856606 offload_cost_test(struct active_device *ad,
2109d30ea906Sjfb8856606 		struct test_op_params *op_params)
2110d30ea906Sjfb8856606 {
2111d30ea906Sjfb8856606 #ifndef RTE_BBDEV_OFFLOAD_COST
2112d30ea906Sjfb8856606 	RTE_SET_USED(ad);
2113d30ea906Sjfb8856606 	RTE_SET_USED(op_params);
2114d30ea906Sjfb8856606 	printf("Offload latency test is disabled.\n");
2115d30ea906Sjfb8856606 	printf("Set RTE_BBDEV_OFFLOAD_COST to 'y' to turn the test on.\n");
2116d30ea906Sjfb8856606 	return TEST_SKIPPED;
2117d30ea906Sjfb8856606 #else
2118d30ea906Sjfb8856606 	int iter;
2119d30ea906Sjfb8856606 	uint16_t burst_sz = op_params->burst_sz;
2120d30ea906Sjfb8856606 	const uint16_t num_to_process = op_params->num_to_process;
2121d30ea906Sjfb8856606 	const enum rte_bbdev_op_type op_type = test_vector.op_type;
2122d30ea906Sjfb8856606 	const uint16_t queue_id = ad->queue_ids[0];
2123d30ea906Sjfb8856606 	struct test_buffers *bufs = NULL;
2124d30ea906Sjfb8856606 	struct rte_bbdev_info info;
2125d30ea906Sjfb8856606 	const char *op_type_str;
2126d30ea906Sjfb8856606 	struct test_time_stats time_st;
2127d30ea906Sjfb8856606 
2128d30ea906Sjfb8856606 	memset(&time_st, 0, sizeof(struct test_time_stats));
2129d30ea906Sjfb8856606 	time_st.enq_sw_min_time = UINT64_MAX;
2130d30ea906Sjfb8856606 	time_st.enq_tur_min_time = UINT64_MAX;
2131d30ea906Sjfb8856606 	time_st.deq_min_time = UINT64_MAX;
2132d30ea906Sjfb8856606 
2133d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
2134d30ea906Sjfb8856606 			"BURST_SIZE should be <= %u", MAX_BURST);
2135d30ea906Sjfb8856606 
2136d30ea906Sjfb8856606 	rte_bbdev_info_get(ad->dev_id, &info);
2137d30ea906Sjfb8856606 	bufs = &op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
2138d30ea906Sjfb8856606 
2139d30ea906Sjfb8856606 	op_type_str = rte_bbdev_op_type_str(op_type);
2140d30ea906Sjfb8856606 	TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
2141d30ea906Sjfb8856606 
2142d30ea906Sjfb8856606 	printf(
2143d30ea906Sjfb8856606 		"Offload latency test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
2144d30ea906Sjfb8856606 			info.dev_name, burst_sz, num_to_process, op_type_str);
2145d30ea906Sjfb8856606 
2146d30ea906Sjfb8856606 	if (op_type == RTE_BBDEV_OP_TURBO_DEC)
2147d30ea906Sjfb8856606 		iter = offload_latency_test_dec(op_params->mp, bufs,
2148d30ea906Sjfb8856606 				op_params->ref_dec_op, ad->dev_id, queue_id,
2149d30ea906Sjfb8856606 				num_to_process, burst_sz, &time_st);
2150d30ea906Sjfb8856606 	else
2151d30ea906Sjfb8856606 		iter = offload_latency_test_enc(op_params->mp, bufs,
2152d30ea906Sjfb8856606 				op_params->ref_enc_op, ad->dev_id, queue_id,
2153d30ea906Sjfb8856606 				num_to_process, burst_sz, &time_st);
2154d30ea906Sjfb8856606 
2155d30ea906Sjfb8856606 	if (iter <= 0)
2156d30ea906Sjfb8856606 		return TEST_FAILED;
2157d30ea906Sjfb8856606 
2158d30ea906Sjfb8856606 	printf("\tenq offload cost latency:\n"
2159d30ea906Sjfb8856606 			"\t\tsoftware avg %lg cycles, %lg us\n"
2160d30ea906Sjfb8856606 			"\t\tsoftware min %lg cycles, %lg us\n"
2161d30ea906Sjfb8856606 			"\t\tsoftware max %lg cycles, %lg us\n"
2162d30ea906Sjfb8856606 			"\t\tturbo avg %lg cycles, %lg us\n"
2163d30ea906Sjfb8856606 			"\t\tturbo min %lg cycles, %lg us\n"
2164d30ea906Sjfb8856606 			"\t\tturbo max %lg cycles, %lg us\n",
2165d30ea906Sjfb8856606 			(double)time_st.enq_sw_tot_time / (double)iter,
2166d30ea906Sjfb8856606 			(double)(time_st.enq_sw_tot_time * 1000000) /
2167d30ea906Sjfb8856606 			(double)iter / (double)rte_get_tsc_hz(),
2168d30ea906Sjfb8856606 			(double)time_st.enq_sw_min_time,
2169d30ea906Sjfb8856606 			(double)(time_st.enq_sw_min_time * 1000000) /
2170d30ea906Sjfb8856606 			rte_get_tsc_hz(), (double)time_st.enq_sw_max_time,
2171d30ea906Sjfb8856606 			(double)(time_st.enq_sw_max_time * 1000000) /
2172d30ea906Sjfb8856606 			rte_get_tsc_hz(), (double)time_st.enq_tur_tot_time /
2173d30ea906Sjfb8856606 			(double)iter,
2174d30ea906Sjfb8856606 			(double)(time_st.enq_tur_tot_time * 1000000) /
2175d30ea906Sjfb8856606 			(double)iter / (double)rte_get_tsc_hz(),
2176d30ea906Sjfb8856606 			(double)time_st.enq_tur_min_time,
2177d30ea906Sjfb8856606 			(double)(time_st.enq_tur_min_time * 1000000) /
2178d30ea906Sjfb8856606 			rte_get_tsc_hz(), (double)time_st.enq_tur_max_time,
2179d30ea906Sjfb8856606 			(double)(time_st.enq_tur_max_time * 1000000) /
2180d30ea906Sjfb8856606 			rte_get_tsc_hz());
2181d30ea906Sjfb8856606 
2182d30ea906Sjfb8856606 	printf("\tdeq offload cost latency - one op:\n"
2183d30ea906Sjfb8856606 			"\t\tavg %lg cycles, %lg us\n"
2184d30ea906Sjfb8856606 			"\t\tmin %lg cycles, %lg us\n"
2185d30ea906Sjfb8856606 			"\t\tmax %lg cycles, %lg us\n",
2186d30ea906Sjfb8856606 			(double)time_st.deq_tot_time / (double)iter,
2187d30ea906Sjfb8856606 			(double)(time_st.deq_tot_time * 1000000) /
2188d30ea906Sjfb8856606 			(double)iter / (double)rte_get_tsc_hz(),
2189d30ea906Sjfb8856606 			(double)time_st.deq_min_time,
2190d30ea906Sjfb8856606 			(double)(time_st.deq_min_time * 1000000) /
2191d30ea906Sjfb8856606 			rte_get_tsc_hz(), (double)time_st.deq_max_time,
2192d30ea906Sjfb8856606 			(double)(time_st.deq_max_time * 1000000) /
2193d30ea906Sjfb8856606 			rte_get_tsc_hz());
2194d30ea906Sjfb8856606 
2195d30ea906Sjfb8856606 	return TEST_SUCCESS;
2196d30ea906Sjfb8856606 #endif
2197d30ea906Sjfb8856606 }
2198d30ea906Sjfb8856606 
2199d30ea906Sjfb8856606 #ifdef RTE_BBDEV_OFFLOAD_COST
2200d30ea906Sjfb8856606 static int
2201d30ea906Sjfb8856606 offload_latency_empty_q_test_dec(uint16_t dev_id, uint16_t queue_id,
2202d30ea906Sjfb8856606 		const uint16_t num_to_process, uint16_t burst_sz,
2203d30ea906Sjfb8856606 		uint64_t *deq_tot_time, uint64_t *deq_min_time,
2204d30ea906Sjfb8856606 		uint64_t *deq_max_time)
2205d30ea906Sjfb8856606 {
2206d30ea906Sjfb8856606 	int i, deq_total;
2207d30ea906Sjfb8856606 	struct rte_bbdev_dec_op *ops[MAX_BURST];
2208d30ea906Sjfb8856606 	uint64_t deq_start_time, deq_last_time;
2209d30ea906Sjfb8856606 
2210d30ea906Sjfb8856606 	/* Test deq offload latency from an empty queue */
2211d30ea906Sjfb8856606 
2212d30ea906Sjfb8856606 	for (i = 0, deq_total = 0; deq_total < num_to_process;
2213d30ea906Sjfb8856606 			++i, deq_total += burst_sz) {
2214d30ea906Sjfb8856606 		deq_start_time = rte_rdtsc_precise();
2215d30ea906Sjfb8856606 
2216d30ea906Sjfb8856606 		if (unlikely(num_to_process - deq_total < burst_sz))
2217d30ea906Sjfb8856606 			burst_sz = num_to_process - deq_total;
2218d30ea906Sjfb8856606 		rte_bbdev_dequeue_dec_ops(dev_id, queue_id, ops, burst_sz);
2219d30ea906Sjfb8856606 
2220d30ea906Sjfb8856606 		deq_last_time = rte_rdtsc_precise() - deq_start_time;
2221d30ea906Sjfb8856606 		*deq_max_time = RTE_MAX(*deq_max_time, deq_last_time);
2222d30ea906Sjfb8856606 		*deq_min_time = RTE_MIN(*deq_min_time, deq_last_time);
2223d30ea906Sjfb8856606 		*deq_tot_time += deq_last_time;
2224d30ea906Sjfb8856606 	}
2225d30ea906Sjfb8856606 
2226d30ea906Sjfb8856606 	return i;
2227d30ea906Sjfb8856606 }
2228d30ea906Sjfb8856606 
2229d30ea906Sjfb8856606 static int
2230d30ea906Sjfb8856606 offload_latency_empty_q_test_enc(uint16_t dev_id, uint16_t queue_id,
2231d30ea906Sjfb8856606 		const uint16_t num_to_process, uint16_t burst_sz,
2232d30ea906Sjfb8856606 		uint64_t *deq_tot_time, uint64_t *deq_min_time,
2233d30ea906Sjfb8856606 		uint64_t *deq_max_time)
2234d30ea906Sjfb8856606 {
2235d30ea906Sjfb8856606 	int i, deq_total;
2236d30ea906Sjfb8856606 	struct rte_bbdev_enc_op *ops[MAX_BURST];
2237d30ea906Sjfb8856606 	uint64_t deq_start_time, deq_last_time;
2238d30ea906Sjfb8856606 
2239d30ea906Sjfb8856606 	/* Test deq offload latency from an empty queue */
2240d30ea906Sjfb8856606 	for (i = 0, deq_total = 0; deq_total < num_to_process;
2241d30ea906Sjfb8856606 			++i, deq_total += burst_sz) {
2242d30ea906Sjfb8856606 		deq_start_time = rte_rdtsc_precise();
2243d30ea906Sjfb8856606 
2244d30ea906Sjfb8856606 		if (unlikely(num_to_process - deq_total < burst_sz))
2245d30ea906Sjfb8856606 			burst_sz = num_to_process - deq_total;
2246d30ea906Sjfb8856606 		rte_bbdev_dequeue_enc_ops(dev_id, queue_id, ops, burst_sz);
2247d30ea906Sjfb8856606 
2248d30ea906Sjfb8856606 		deq_last_time = rte_rdtsc_precise() - deq_start_time;
2249d30ea906Sjfb8856606 		*deq_max_time = RTE_MAX(*deq_max_time, deq_last_time);
2250d30ea906Sjfb8856606 		*deq_min_time = RTE_MIN(*deq_min_time, deq_last_time);
2251d30ea906Sjfb8856606 		*deq_tot_time += deq_last_time;
2252d30ea906Sjfb8856606 	}
2253d30ea906Sjfb8856606 
2254d30ea906Sjfb8856606 	return i;
2255d30ea906Sjfb8856606 }
2256d30ea906Sjfb8856606 #endif
2257d30ea906Sjfb8856606 
2258d30ea906Sjfb8856606 static int
2259d30ea906Sjfb8856606 offload_latency_empty_q_test(struct active_device *ad,
2260d30ea906Sjfb8856606 		struct test_op_params *op_params)
2261d30ea906Sjfb8856606 {
2262d30ea906Sjfb8856606 #ifndef RTE_BBDEV_OFFLOAD_COST
2263d30ea906Sjfb8856606 	RTE_SET_USED(ad);
2264d30ea906Sjfb8856606 	RTE_SET_USED(op_params);
2265d30ea906Sjfb8856606 	printf("Offload latency empty dequeue test is disabled.\n");
2266d30ea906Sjfb8856606 	printf("Set RTE_BBDEV_OFFLOAD_COST to 'y' to turn the test on.\n");
2267d30ea906Sjfb8856606 	return TEST_SKIPPED;
2268d30ea906Sjfb8856606 #else
2269d30ea906Sjfb8856606 	int iter;
2270d30ea906Sjfb8856606 	uint64_t deq_tot_time, deq_min_time, deq_max_time;
2271d30ea906Sjfb8856606 	uint16_t burst_sz = op_params->burst_sz;
2272d30ea906Sjfb8856606 	const uint16_t num_to_process = op_params->num_to_process;
2273d30ea906Sjfb8856606 	const enum rte_bbdev_op_type op_type = test_vector.op_type;
2274d30ea906Sjfb8856606 	const uint16_t queue_id = ad->queue_ids[0];
2275d30ea906Sjfb8856606 	struct rte_bbdev_info info;
2276d30ea906Sjfb8856606 	const char *op_type_str;
2277d30ea906Sjfb8856606 
2278d30ea906Sjfb8856606 	deq_tot_time = deq_max_time = 0;
2279d30ea906Sjfb8856606 	deq_min_time = UINT64_MAX;
2280d30ea906Sjfb8856606 
2281d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
2282d30ea906Sjfb8856606 			"BURST_SIZE should be <= %u", MAX_BURST);
2283d30ea906Sjfb8856606 
2284d30ea906Sjfb8856606 	rte_bbdev_info_get(ad->dev_id, &info);
2285d30ea906Sjfb8856606 
2286d30ea906Sjfb8856606 	op_type_str = rte_bbdev_op_type_str(op_type);
2287d30ea906Sjfb8856606 	TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
2288d30ea906Sjfb8856606 
2289d30ea906Sjfb8856606 	printf(
2290d30ea906Sjfb8856606 		"Offload latency empty dequeue test: dev: %s, burst size: %u, num ops: %u, op type: %s\n",
2291d30ea906Sjfb8856606 			info.dev_name, burst_sz, num_to_process, op_type_str);
2292d30ea906Sjfb8856606 
2293d30ea906Sjfb8856606 	if (op_type == RTE_BBDEV_OP_TURBO_DEC)
2294d30ea906Sjfb8856606 		iter = offload_latency_empty_q_test_dec(ad->dev_id, queue_id,
2295d30ea906Sjfb8856606 				num_to_process, burst_sz, &deq_tot_time,
2296d30ea906Sjfb8856606 				&deq_min_time, &deq_max_time);
2297d30ea906Sjfb8856606 	else
2298d30ea906Sjfb8856606 		iter = offload_latency_empty_q_test_enc(ad->dev_id, queue_id,
2299d30ea906Sjfb8856606 				num_to_process, burst_sz, &deq_tot_time,
2300d30ea906Sjfb8856606 				&deq_min_time, &deq_max_time);
2301d30ea906Sjfb8856606 
2302d30ea906Sjfb8856606 	if (iter <= 0)
2303d30ea906Sjfb8856606 		return TEST_FAILED;
2304d30ea906Sjfb8856606 
2305d30ea906Sjfb8856606 	printf("\tempty deq offload\n"
2306d30ea906Sjfb8856606 			"\t\tavg. latency: %lg cycles, %lg us\n"
2307d30ea906Sjfb8856606 			"\t\tmin. latency: %lg cycles, %lg us\n"
2308d30ea906Sjfb8856606 			"\t\tmax. latency: %lg cycles, %lg us\n",
2309d30ea906Sjfb8856606 			(double)deq_tot_time / (double)iter,
2310d30ea906Sjfb8856606 			(double)(deq_tot_time * 1000000) / (double)iter /
2311d30ea906Sjfb8856606 			(double)rte_get_tsc_hz(), (double)deq_min_time,
2312d30ea906Sjfb8856606 			(double)(deq_min_time * 1000000) / rte_get_tsc_hz(),
2313d30ea906Sjfb8856606 			(double)deq_max_time, (double)(deq_max_time * 1000000) /
2314d30ea906Sjfb8856606 			rte_get_tsc_hz());
2315d30ea906Sjfb8856606 
2316d30ea906Sjfb8856606 	return TEST_SUCCESS;
2317d30ea906Sjfb8856606 #endif
2318d30ea906Sjfb8856606 }
2319d30ea906Sjfb8856606 
2320d30ea906Sjfb8856606 static int
2321d30ea906Sjfb8856606 throughput_tc(void)
2322d30ea906Sjfb8856606 {
2323d30ea906Sjfb8856606 	return run_test_case(throughput_test);
2324d30ea906Sjfb8856606 }
2325d30ea906Sjfb8856606 
2326d30ea906Sjfb8856606 static int
2327d30ea906Sjfb8856606 offload_cost_tc(void)
2328d30ea906Sjfb8856606 {
2329d30ea906Sjfb8856606 	return run_test_case(offload_cost_test);
2330d30ea906Sjfb8856606 }
2331d30ea906Sjfb8856606 
2332d30ea906Sjfb8856606 static int
2333d30ea906Sjfb8856606 offload_latency_empty_q_tc(void)
2334d30ea906Sjfb8856606 {
2335d30ea906Sjfb8856606 	return run_test_case(offload_latency_empty_q_test);
2336d30ea906Sjfb8856606 }
2337d30ea906Sjfb8856606 
2338d30ea906Sjfb8856606 static int
2339d30ea906Sjfb8856606 latency_tc(void)
2340d30ea906Sjfb8856606 {
2341d30ea906Sjfb8856606 	return run_test_case(latency_test);
2342d30ea906Sjfb8856606 }
2343d30ea906Sjfb8856606 
2344d30ea906Sjfb8856606 static int
2345d30ea906Sjfb8856606 interrupt_tc(void)
2346d30ea906Sjfb8856606 {
2347d30ea906Sjfb8856606 	return run_test_case(throughput_test);
2348d30ea906Sjfb8856606 }
2349d30ea906Sjfb8856606 
2350d30ea906Sjfb8856606 static struct unit_test_suite bbdev_throughput_testsuite = {
2351d30ea906Sjfb8856606 	.suite_name = "BBdev Throughput Tests",
2352d30ea906Sjfb8856606 	.setup = testsuite_setup,
2353d30ea906Sjfb8856606 	.teardown = testsuite_teardown,
2354d30ea906Sjfb8856606 	.unit_test_cases = {
2355d30ea906Sjfb8856606 		TEST_CASE_ST(ut_setup, ut_teardown, throughput_tc),
2356d30ea906Sjfb8856606 		TEST_CASES_END() /**< NULL terminate unit test array */
2357d30ea906Sjfb8856606 	}
2358d30ea906Sjfb8856606 };
2359d30ea906Sjfb8856606 
2360d30ea906Sjfb8856606 static struct unit_test_suite bbdev_validation_testsuite = {
2361d30ea906Sjfb8856606 	.suite_name = "BBdev Validation Tests",
2362d30ea906Sjfb8856606 	.setup = testsuite_setup,
2363d30ea906Sjfb8856606 	.teardown = testsuite_teardown,
2364d30ea906Sjfb8856606 	.unit_test_cases = {
2365d30ea906Sjfb8856606 		TEST_CASE_ST(ut_setup, ut_teardown, latency_tc),
2366d30ea906Sjfb8856606 		TEST_CASES_END() /**< NULL terminate unit test array */
2367d30ea906Sjfb8856606 	}
2368d30ea906Sjfb8856606 };
2369d30ea906Sjfb8856606 
2370d30ea906Sjfb8856606 static struct unit_test_suite bbdev_latency_testsuite = {
2371d30ea906Sjfb8856606 	.suite_name = "BBdev Latency Tests",
2372d30ea906Sjfb8856606 	.setup = testsuite_setup,
2373d30ea906Sjfb8856606 	.teardown = testsuite_teardown,
2374d30ea906Sjfb8856606 	.unit_test_cases = {
2375d30ea906Sjfb8856606 		TEST_CASE_ST(ut_setup, ut_teardown, latency_tc),
2376d30ea906Sjfb8856606 		TEST_CASES_END() /**< NULL terminate unit test array */
2377d30ea906Sjfb8856606 	}
2378d30ea906Sjfb8856606 };
2379d30ea906Sjfb8856606 
2380d30ea906Sjfb8856606 static struct unit_test_suite bbdev_offload_cost_testsuite = {
2381d30ea906Sjfb8856606 	.suite_name = "BBdev Offload Cost Tests",
2382d30ea906Sjfb8856606 	.setup = testsuite_setup,
2383d30ea906Sjfb8856606 	.teardown = testsuite_teardown,
2384d30ea906Sjfb8856606 	.unit_test_cases = {
2385d30ea906Sjfb8856606 		TEST_CASE_ST(ut_setup, ut_teardown, offload_cost_tc),
2386d30ea906Sjfb8856606 		TEST_CASE_ST(ut_setup, ut_teardown, offload_latency_empty_q_tc),
2387d30ea906Sjfb8856606 		TEST_CASES_END() /**< NULL terminate unit test array */
2388d30ea906Sjfb8856606 	}
2389d30ea906Sjfb8856606 };
2390d30ea906Sjfb8856606 
2391d30ea906Sjfb8856606 static struct unit_test_suite bbdev_interrupt_testsuite = {
2392d30ea906Sjfb8856606 	.suite_name = "BBdev Interrupt Tests",
2393d30ea906Sjfb8856606 	.setup = interrupt_testsuite_setup,
2394d30ea906Sjfb8856606 	.teardown = testsuite_teardown,
2395d30ea906Sjfb8856606 	.unit_test_cases = {
2396d30ea906Sjfb8856606 		TEST_CASE_ST(ut_setup, ut_teardown, interrupt_tc),
2397d30ea906Sjfb8856606 		TEST_CASES_END() /**< NULL terminate unit test array */
2398d30ea906Sjfb8856606 	}
2399d30ea906Sjfb8856606 };
2400d30ea906Sjfb8856606 
2401d30ea906Sjfb8856606 REGISTER_TEST_COMMAND(throughput, bbdev_throughput_testsuite);
2402d30ea906Sjfb8856606 REGISTER_TEST_COMMAND(validation, bbdev_validation_testsuite);
2403d30ea906Sjfb8856606 REGISTER_TEST_COMMAND(latency, bbdev_latency_testsuite);
2404d30ea906Sjfb8856606 REGISTER_TEST_COMMAND(offload, bbdev_offload_cost_testsuite);
2405d30ea906Sjfb8856606 REGISTER_TEST_COMMAND(interrupt, bbdev_interrupt_testsuite);
2406