1d30ea906Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2d30ea906Sjfb8856606  * Copyright(c) 2017 Intel Corporation
3d30ea906Sjfb8856606  */
4d30ea906Sjfb8856606 
5d30ea906Sjfb8856606 #include <stdio.h>
6d30ea906Sjfb8856606 #include <inttypes.h>
7d30ea906Sjfb8856606 #include <math.h>
8d30ea906Sjfb8856606 
9d30ea906Sjfb8856606 #include <rte_eal.h>
10d30ea906Sjfb8856606 #include <rte_common.h>
11d30ea906Sjfb8856606 #include <rte_dev.h>
12d30ea906Sjfb8856606 #include <rte_launch.h>
13d30ea906Sjfb8856606 #include <rte_bbdev.h>
14d30ea906Sjfb8856606 #include <rte_cycles.h>
15d30ea906Sjfb8856606 #include <rte_lcore.h>
16d30ea906Sjfb8856606 #include <rte_malloc.h>
17d30ea906Sjfb8856606 #include <rte_random.h>
18d30ea906Sjfb8856606 #include <rte_hexdump.h>
194418919fSjohnjiang #include <rte_interrupts.h>
204418919fSjohnjiang 
21d30ea906Sjfb8856606 #include "main.h"
22d30ea906Sjfb8856606 #include "test_bbdev_vector.h"
23d30ea906Sjfb8856606 
24d30ea906Sjfb8856606 #define GET_SOCKET(socket_id) (((socket_id) == SOCKET_ID_ANY) ? 0 : (socket_id))
25d30ea906Sjfb8856606 
26d30ea906Sjfb8856606 #define MAX_QUEUES RTE_MAX_LCORE
27*2d9fd380Sjfb8856606 #define TEST_REPETITIONS 100
28*2d9fd380Sjfb8856606 #define WAIT_OFFLOAD_US 1000
294418919fSjohnjiang 
30*2d9fd380Sjfb8856606 #ifdef RTE_BASEBAND_FPGA_LTE_FEC
31*2d9fd380Sjfb8856606 #include <fpga_lte_fec.h>
32*2d9fd380Sjfb8856606 #define FPGA_LTE_PF_DRIVER_NAME ("intel_fpga_lte_fec_pf")
33*2d9fd380Sjfb8856606 #define FPGA_LTE_VF_DRIVER_NAME ("intel_fpga_lte_fec_vf")
34*2d9fd380Sjfb8856606 #define VF_UL_4G_QUEUE_VALUE 4
35*2d9fd380Sjfb8856606 #define VF_DL_4G_QUEUE_VALUE 4
36*2d9fd380Sjfb8856606 #define UL_4G_BANDWIDTH 3
37*2d9fd380Sjfb8856606 #define DL_4G_BANDWIDTH 3
38*2d9fd380Sjfb8856606 #define UL_4G_LOAD_BALANCE 128
39*2d9fd380Sjfb8856606 #define DL_4G_LOAD_BALANCE 128
40*2d9fd380Sjfb8856606 #define FLR_4G_TIMEOUT 610
41*2d9fd380Sjfb8856606 #endif
42*2d9fd380Sjfb8856606 
43*2d9fd380Sjfb8856606 #ifdef RTE_BASEBAND_FPGA_5GNR_FEC
44*2d9fd380Sjfb8856606 #include <rte_pmd_fpga_5gnr_fec.h>
45*2d9fd380Sjfb8856606 #define FPGA_5GNR_PF_DRIVER_NAME ("intel_fpga_5gnr_fec_pf")
46*2d9fd380Sjfb8856606 #define FPGA_5GNR_VF_DRIVER_NAME ("intel_fpga_5gnr_fec_vf")
47*2d9fd380Sjfb8856606 #define VF_UL_5G_QUEUE_VALUE 4
48*2d9fd380Sjfb8856606 #define VF_DL_5G_QUEUE_VALUE 4
49*2d9fd380Sjfb8856606 #define UL_5G_BANDWIDTH 3
50*2d9fd380Sjfb8856606 #define DL_5G_BANDWIDTH 3
51*2d9fd380Sjfb8856606 #define UL_5G_LOAD_BALANCE 128
52*2d9fd380Sjfb8856606 #define DL_5G_LOAD_BALANCE 128
53*2d9fd380Sjfb8856606 #define FLR_5G_TIMEOUT 610
54*2d9fd380Sjfb8856606 #endif
55*2d9fd380Sjfb8856606 
56*2d9fd380Sjfb8856606 #ifdef RTE_BASEBAND_ACC100
57*2d9fd380Sjfb8856606 #include <rte_acc100_cfg.h>
58*2d9fd380Sjfb8856606 #define ACC100PF_DRIVER_NAME   ("intel_acc100_pf")
59*2d9fd380Sjfb8856606 #define ACC100VF_DRIVER_NAME   ("intel_acc100_vf")
60*2d9fd380Sjfb8856606 #define ACC100_QMGR_NUM_AQS 16
61*2d9fd380Sjfb8856606 #define ACC100_QMGR_NUM_QGS 2
62*2d9fd380Sjfb8856606 #define ACC100_QMGR_AQ_DEPTH 5
63*2d9fd380Sjfb8856606 #define ACC100_QMGR_INVALID_IDX -1
64*2d9fd380Sjfb8856606 #define ACC100_QMGR_RR 1
65*2d9fd380Sjfb8856606 #define ACC100_QOS_GBR 0
664418919fSjohnjiang #endif
67d30ea906Sjfb8856606 
68d30ea906Sjfb8856606 #define OPS_CACHE_SIZE 256U
69d30ea906Sjfb8856606 #define OPS_POOL_SIZE_MIN 511U /* 0.5K per queue */
70d30ea906Sjfb8856606 
71d30ea906Sjfb8856606 #define SYNC_WAIT 0
72d30ea906Sjfb8856606 #define SYNC_START 1
73*2d9fd380Sjfb8856606 #define INVALID_OPAQUE -1
74d30ea906Sjfb8856606 
75d30ea906Sjfb8856606 #define INVALID_QUEUE_ID -1
76*2d9fd380Sjfb8856606 /* Increment for next code block in external HARQ memory */
77*2d9fd380Sjfb8856606 #define HARQ_INCR 32768
78*2d9fd380Sjfb8856606 /* Headroom for filler LLRs insertion in HARQ buffer */
79*2d9fd380Sjfb8856606 #define FILLER_HEADROOM 1024
80*2d9fd380Sjfb8856606 /* Constants from K0 computation from 3GPP 38.212 Table 5.4.2.1-2 */
81*2d9fd380Sjfb8856606 #define N_ZC_1 66 /* N = 66 Zc for BG 1 */
82*2d9fd380Sjfb8856606 #define N_ZC_2 50 /* N = 50 Zc for BG 2 */
83*2d9fd380Sjfb8856606 #define K0_1_1 17 /* K0 fraction numerator for rv 1 and BG 1 */
84*2d9fd380Sjfb8856606 #define K0_1_2 13 /* K0 fraction numerator for rv 1 and BG 2 */
85*2d9fd380Sjfb8856606 #define K0_2_1 33 /* K0 fraction numerator for rv 2 and BG 1 */
86*2d9fd380Sjfb8856606 #define K0_2_2 25 /* K0 fraction numerator for rv 2 and BG 2 */
87*2d9fd380Sjfb8856606 #define K0_3_1 56 /* K0 fraction numerator for rv 3 and BG 1 */
88*2d9fd380Sjfb8856606 #define K0_3_2 43 /* K0 fraction numerator for rv 3 and BG 2 */
89d30ea906Sjfb8856606 
90d30ea906Sjfb8856606 static struct test_bbdev_vector test_vector;
91d30ea906Sjfb8856606 
92d30ea906Sjfb8856606 /* Switch between PMD and Interrupt for throughput TC */
93d30ea906Sjfb8856606 static bool intr_enabled;
94d30ea906Sjfb8856606 
95*2d9fd380Sjfb8856606 /* LLR arithmetic representation for numerical conversion */
96*2d9fd380Sjfb8856606 static int ldpc_llr_decimals;
97*2d9fd380Sjfb8856606 static int ldpc_llr_size;
98*2d9fd380Sjfb8856606 /* Keep track of the LDPC decoder device capability flag */
99*2d9fd380Sjfb8856606 static uint32_t ldpc_cap_flags;
100*2d9fd380Sjfb8856606 
101d30ea906Sjfb8856606 /* Represents tested active devices */
102d30ea906Sjfb8856606 static struct active_device {
103d30ea906Sjfb8856606 	const char *driver_name;
104d30ea906Sjfb8856606 	uint8_t dev_id;
105d30ea906Sjfb8856606 	uint16_t supported_ops;
106d30ea906Sjfb8856606 	uint16_t queue_ids[MAX_QUEUES];
107d30ea906Sjfb8856606 	uint16_t nb_queues;
108d30ea906Sjfb8856606 	struct rte_mempool *ops_mempool;
109d30ea906Sjfb8856606 	struct rte_mempool *in_mbuf_pool;
110d30ea906Sjfb8856606 	struct rte_mempool *hard_out_mbuf_pool;
111d30ea906Sjfb8856606 	struct rte_mempool *soft_out_mbuf_pool;
1124418919fSjohnjiang 	struct rte_mempool *harq_in_mbuf_pool;
1134418919fSjohnjiang 	struct rte_mempool *harq_out_mbuf_pool;
114d30ea906Sjfb8856606 } active_devs[RTE_BBDEV_MAX_DEVS];
115d30ea906Sjfb8856606 
116d30ea906Sjfb8856606 static uint8_t nb_active_devs;
117d30ea906Sjfb8856606 
118d30ea906Sjfb8856606 /* Data buffers used by BBDEV ops */
119d30ea906Sjfb8856606 struct test_buffers {
120d30ea906Sjfb8856606 	struct rte_bbdev_op_data *inputs;
121d30ea906Sjfb8856606 	struct rte_bbdev_op_data *hard_outputs;
122d30ea906Sjfb8856606 	struct rte_bbdev_op_data *soft_outputs;
1234418919fSjohnjiang 	struct rte_bbdev_op_data *harq_inputs;
1244418919fSjohnjiang 	struct rte_bbdev_op_data *harq_outputs;
125d30ea906Sjfb8856606 };
126d30ea906Sjfb8856606 
127d30ea906Sjfb8856606 /* Operation parameters specific for given test case */
128d30ea906Sjfb8856606 struct test_op_params {
129d30ea906Sjfb8856606 	struct rte_mempool *mp;
130d30ea906Sjfb8856606 	struct rte_bbdev_dec_op *ref_dec_op;
131d30ea906Sjfb8856606 	struct rte_bbdev_enc_op *ref_enc_op;
132d30ea906Sjfb8856606 	uint16_t burst_sz;
133d30ea906Sjfb8856606 	uint16_t num_to_process;
134d30ea906Sjfb8856606 	uint16_t num_lcores;
135d30ea906Sjfb8856606 	int vector_mask;
136d30ea906Sjfb8856606 	rte_atomic16_t sync;
137d30ea906Sjfb8856606 	struct test_buffers q_bufs[RTE_MAX_NUMA_NODES][MAX_QUEUES];
138d30ea906Sjfb8856606 };
139d30ea906Sjfb8856606 
140d30ea906Sjfb8856606 /* Contains per lcore params */
141d30ea906Sjfb8856606 struct thread_params {
142d30ea906Sjfb8856606 	uint8_t dev_id;
143d30ea906Sjfb8856606 	uint16_t queue_id;
1444418919fSjohnjiang 	uint32_t lcore_id;
145d30ea906Sjfb8856606 	uint64_t start_time;
1464418919fSjohnjiang 	double ops_per_sec;
147d30ea906Sjfb8856606 	double mbps;
1484418919fSjohnjiang 	uint8_t iter_count;
149*2d9fd380Sjfb8856606 	double iter_average;
150*2d9fd380Sjfb8856606 	double bler;
151d30ea906Sjfb8856606 	rte_atomic16_t nb_dequeued;
152d30ea906Sjfb8856606 	rte_atomic16_t processing_status;
1534418919fSjohnjiang 	rte_atomic16_t burst_sz;
154d30ea906Sjfb8856606 	struct test_op_params *op_params;
1554418919fSjohnjiang 	struct rte_bbdev_dec_op *dec_ops[MAX_BURST];
1564418919fSjohnjiang 	struct rte_bbdev_enc_op *enc_ops[MAX_BURST];
157d30ea906Sjfb8856606 };
158d30ea906Sjfb8856606 
159d30ea906Sjfb8856606 #ifdef RTE_BBDEV_OFFLOAD_COST
160d30ea906Sjfb8856606 /* Stores time statistics */
161d30ea906Sjfb8856606 struct test_time_stats {
162d30ea906Sjfb8856606 	/* Stores software enqueue total working time */
1634418919fSjohnjiang 	uint64_t enq_sw_total_time;
164d30ea906Sjfb8856606 	/* Stores minimum value of software enqueue working time */
165d30ea906Sjfb8856606 	uint64_t enq_sw_min_time;
166d30ea906Sjfb8856606 	/* Stores maximum value of software enqueue working time */
167d30ea906Sjfb8856606 	uint64_t enq_sw_max_time;
168d30ea906Sjfb8856606 	/* Stores turbo enqueue total working time */
1694418919fSjohnjiang 	uint64_t enq_acc_total_time;
1704418919fSjohnjiang 	/* Stores minimum value of accelerator enqueue working time */
1714418919fSjohnjiang 	uint64_t enq_acc_min_time;
1724418919fSjohnjiang 	/* Stores maximum value of accelerator enqueue working time */
1734418919fSjohnjiang 	uint64_t enq_acc_max_time;
174d30ea906Sjfb8856606 	/* Stores dequeue total working time */
1754418919fSjohnjiang 	uint64_t deq_total_time;
176d30ea906Sjfb8856606 	/* Stores minimum value of dequeue working time */
177d30ea906Sjfb8856606 	uint64_t deq_min_time;
178d30ea906Sjfb8856606 	/* Stores maximum value of dequeue working time */
179d30ea906Sjfb8856606 	uint64_t deq_max_time;
180d30ea906Sjfb8856606 };
181d30ea906Sjfb8856606 #endif
182d30ea906Sjfb8856606 
183d30ea906Sjfb8856606 typedef int (test_case_function)(struct active_device *ad,
184d30ea906Sjfb8856606 		struct test_op_params *op_params);
185d30ea906Sjfb8856606 
186d30ea906Sjfb8856606 static inline void
mbuf_reset(struct rte_mbuf * m)1874418919fSjohnjiang mbuf_reset(struct rte_mbuf *m)
1884418919fSjohnjiang {
1894418919fSjohnjiang 	m->pkt_len = 0;
1904418919fSjohnjiang 
1914418919fSjohnjiang 	do {
1924418919fSjohnjiang 		m->data_len = 0;
1934418919fSjohnjiang 		m = m->next;
1944418919fSjohnjiang 	} while (m != NULL);
1954418919fSjohnjiang }
1964418919fSjohnjiang 
1974418919fSjohnjiang /* Read flag value 0/1 from bitmap */
1984418919fSjohnjiang static inline bool
check_bit(uint32_t bitmap,uint32_t bitmask)1994418919fSjohnjiang check_bit(uint32_t bitmap, uint32_t bitmask)
2004418919fSjohnjiang {
2014418919fSjohnjiang 	return bitmap & bitmask;
2024418919fSjohnjiang }
2034418919fSjohnjiang 
2044418919fSjohnjiang static inline void
set_avail_op(struct active_device * ad,enum rte_bbdev_op_type op_type)205d30ea906Sjfb8856606 set_avail_op(struct active_device *ad, enum rte_bbdev_op_type op_type)
206d30ea906Sjfb8856606 {
207d30ea906Sjfb8856606 	ad->supported_ops |= (1 << op_type);
208d30ea906Sjfb8856606 }
209d30ea906Sjfb8856606 
210d30ea906Sjfb8856606 static inline bool
is_avail_op(struct active_device * ad,enum rte_bbdev_op_type op_type)211d30ea906Sjfb8856606 is_avail_op(struct active_device *ad, enum rte_bbdev_op_type op_type)
212d30ea906Sjfb8856606 {
213d30ea906Sjfb8856606 	return ad->supported_ops & (1 << op_type);
214d30ea906Sjfb8856606 }
215d30ea906Sjfb8856606 
216d30ea906Sjfb8856606 static inline bool
flags_match(uint32_t flags_req,uint32_t flags_present)217d30ea906Sjfb8856606 flags_match(uint32_t flags_req, uint32_t flags_present)
218d30ea906Sjfb8856606 {
219d30ea906Sjfb8856606 	return (flags_req & flags_present) == flags_req;
220d30ea906Sjfb8856606 }
221d30ea906Sjfb8856606 
222d30ea906Sjfb8856606 static void
clear_soft_out_cap(uint32_t * op_flags)223d30ea906Sjfb8856606 clear_soft_out_cap(uint32_t *op_flags)
224d30ea906Sjfb8856606 {
225d30ea906Sjfb8856606 	*op_flags &= ~RTE_BBDEV_TURBO_SOFT_OUTPUT;
226d30ea906Sjfb8856606 	*op_flags &= ~RTE_BBDEV_TURBO_POS_LLR_1_BIT_SOFT_OUT;
227d30ea906Sjfb8856606 	*op_flags &= ~RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT;
228d30ea906Sjfb8856606 }
229d30ea906Sjfb8856606 
230d30ea906Sjfb8856606 static int
check_dev_cap(const struct rte_bbdev_info * dev_info)231d30ea906Sjfb8856606 check_dev_cap(const struct rte_bbdev_info *dev_info)
232d30ea906Sjfb8856606 {
233d30ea906Sjfb8856606 	unsigned int i;
2344418919fSjohnjiang 	unsigned int nb_inputs, nb_soft_outputs, nb_hard_outputs,
2354418919fSjohnjiang 		nb_harq_inputs, nb_harq_outputs;
236d30ea906Sjfb8856606 	const struct rte_bbdev_op_cap *op_cap = dev_info->drv.capabilities;
237d30ea906Sjfb8856606 
238d30ea906Sjfb8856606 	nb_inputs = test_vector.entries[DATA_INPUT].nb_segments;
239d30ea906Sjfb8856606 	nb_soft_outputs = test_vector.entries[DATA_SOFT_OUTPUT].nb_segments;
240d30ea906Sjfb8856606 	nb_hard_outputs = test_vector.entries[DATA_HARD_OUTPUT].nb_segments;
2414418919fSjohnjiang 	nb_harq_inputs  = test_vector.entries[DATA_HARQ_INPUT].nb_segments;
2424418919fSjohnjiang 	nb_harq_outputs = test_vector.entries[DATA_HARQ_OUTPUT].nb_segments;
243d30ea906Sjfb8856606 
244d30ea906Sjfb8856606 	for (i = 0; op_cap->type != RTE_BBDEV_OP_NONE; ++i, ++op_cap) {
245d30ea906Sjfb8856606 		if (op_cap->type != test_vector.op_type)
246d30ea906Sjfb8856606 			continue;
247d30ea906Sjfb8856606 
248d30ea906Sjfb8856606 		if (op_cap->type == RTE_BBDEV_OP_TURBO_DEC) {
249d30ea906Sjfb8856606 			const struct rte_bbdev_op_cap_turbo_dec *cap =
250d30ea906Sjfb8856606 					&op_cap->cap.turbo_dec;
251d30ea906Sjfb8856606 			/* Ignore lack of soft output capability, just skip
252d30ea906Sjfb8856606 			 * checking if soft output is valid.
253d30ea906Sjfb8856606 			 */
254d30ea906Sjfb8856606 			if ((test_vector.turbo_dec.op_flags &
255d30ea906Sjfb8856606 					RTE_BBDEV_TURBO_SOFT_OUTPUT) &&
256d30ea906Sjfb8856606 					!(cap->capability_flags &
257d30ea906Sjfb8856606 					RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
258d30ea906Sjfb8856606 				printf(
2594418919fSjohnjiang 					"INFO: Device \"%s\" does not support soft output - soft output flags will be ignored.\n",
260d30ea906Sjfb8856606 					dev_info->dev_name);
261d30ea906Sjfb8856606 				clear_soft_out_cap(
262d30ea906Sjfb8856606 					&test_vector.turbo_dec.op_flags);
263d30ea906Sjfb8856606 			}
264d30ea906Sjfb8856606 
265d30ea906Sjfb8856606 			if (!flags_match(test_vector.turbo_dec.op_flags,
266d30ea906Sjfb8856606 					cap->capability_flags))
267d30ea906Sjfb8856606 				return TEST_FAILED;
268d30ea906Sjfb8856606 			if (nb_inputs > cap->num_buffers_src) {
269d30ea906Sjfb8856606 				printf("Too many inputs defined: %u, max: %u\n",
270d30ea906Sjfb8856606 					nb_inputs, cap->num_buffers_src);
271d30ea906Sjfb8856606 				return TEST_FAILED;
272d30ea906Sjfb8856606 			}
273d30ea906Sjfb8856606 			if (nb_soft_outputs > cap->num_buffers_soft_out &&
274d30ea906Sjfb8856606 					(test_vector.turbo_dec.op_flags &
275d30ea906Sjfb8856606 					RTE_BBDEV_TURBO_SOFT_OUTPUT)) {
276d30ea906Sjfb8856606 				printf(
277d30ea906Sjfb8856606 					"Too many soft outputs defined: %u, max: %u\n",
278d30ea906Sjfb8856606 						nb_soft_outputs,
279d30ea906Sjfb8856606 						cap->num_buffers_soft_out);
280d30ea906Sjfb8856606 				return TEST_FAILED;
281d30ea906Sjfb8856606 			}
282d30ea906Sjfb8856606 			if (nb_hard_outputs > cap->num_buffers_hard_out) {
283d30ea906Sjfb8856606 				printf(
284d30ea906Sjfb8856606 					"Too many hard outputs defined: %u, max: %u\n",
285d30ea906Sjfb8856606 						nb_hard_outputs,
286d30ea906Sjfb8856606 						cap->num_buffers_hard_out);
287d30ea906Sjfb8856606 				return TEST_FAILED;
288d30ea906Sjfb8856606 			}
289d30ea906Sjfb8856606 			if (intr_enabled && !(cap->capability_flags &
290d30ea906Sjfb8856606 					RTE_BBDEV_TURBO_DEC_INTERRUPTS)) {
291d30ea906Sjfb8856606 				printf(
292d30ea906Sjfb8856606 					"Dequeue interrupts are not supported!\n");
293d30ea906Sjfb8856606 				return TEST_FAILED;
294d30ea906Sjfb8856606 			}
295d30ea906Sjfb8856606 
296d30ea906Sjfb8856606 			return TEST_SUCCESS;
297d30ea906Sjfb8856606 		} else if (op_cap->type == RTE_BBDEV_OP_TURBO_ENC) {
298d30ea906Sjfb8856606 			const struct rte_bbdev_op_cap_turbo_enc *cap =
299d30ea906Sjfb8856606 					&op_cap->cap.turbo_enc;
300d30ea906Sjfb8856606 
301d30ea906Sjfb8856606 			if (!flags_match(test_vector.turbo_enc.op_flags,
302d30ea906Sjfb8856606 					cap->capability_flags))
303d30ea906Sjfb8856606 				return TEST_FAILED;
304d30ea906Sjfb8856606 			if (nb_inputs > cap->num_buffers_src) {
305d30ea906Sjfb8856606 				printf("Too many inputs defined: %u, max: %u\n",
306d30ea906Sjfb8856606 					nb_inputs, cap->num_buffers_src);
307d30ea906Sjfb8856606 				return TEST_FAILED;
308d30ea906Sjfb8856606 			}
309d30ea906Sjfb8856606 			if (nb_hard_outputs > cap->num_buffers_dst) {
310d30ea906Sjfb8856606 				printf(
311d30ea906Sjfb8856606 					"Too many hard outputs defined: %u, max: %u\n",
3124418919fSjohnjiang 					nb_hard_outputs, cap->num_buffers_dst);
313d30ea906Sjfb8856606 				return TEST_FAILED;
314d30ea906Sjfb8856606 			}
315d30ea906Sjfb8856606 			if (intr_enabled && !(cap->capability_flags &
316d30ea906Sjfb8856606 					RTE_BBDEV_TURBO_ENC_INTERRUPTS)) {
317d30ea906Sjfb8856606 				printf(
318d30ea906Sjfb8856606 					"Dequeue interrupts are not supported!\n");
319d30ea906Sjfb8856606 				return TEST_FAILED;
320d30ea906Sjfb8856606 			}
321d30ea906Sjfb8856606 
322d30ea906Sjfb8856606 			return TEST_SUCCESS;
3234418919fSjohnjiang 		} else if (op_cap->type == RTE_BBDEV_OP_LDPC_ENC) {
3244418919fSjohnjiang 			const struct rte_bbdev_op_cap_ldpc_enc *cap =
3254418919fSjohnjiang 					&op_cap->cap.ldpc_enc;
3264418919fSjohnjiang 
3274418919fSjohnjiang 			if (!flags_match(test_vector.ldpc_enc.op_flags,
3284418919fSjohnjiang 					cap->capability_flags)){
3294418919fSjohnjiang 				printf("Flag Mismatch\n");
3304418919fSjohnjiang 				return TEST_FAILED;
3314418919fSjohnjiang 			}
3324418919fSjohnjiang 			if (nb_inputs > cap->num_buffers_src) {
3334418919fSjohnjiang 				printf("Too many inputs defined: %u, max: %u\n",
3344418919fSjohnjiang 					nb_inputs, cap->num_buffers_src);
3354418919fSjohnjiang 				return TEST_FAILED;
3364418919fSjohnjiang 			}
3374418919fSjohnjiang 			if (nb_hard_outputs > cap->num_buffers_dst) {
3384418919fSjohnjiang 				printf(
3394418919fSjohnjiang 					"Too many hard outputs defined: %u, max: %u\n",
3404418919fSjohnjiang 					nb_hard_outputs, cap->num_buffers_dst);
3414418919fSjohnjiang 				return TEST_FAILED;
3424418919fSjohnjiang 			}
3434418919fSjohnjiang 			if (intr_enabled && !(cap->capability_flags &
344*2d9fd380Sjfb8856606 					RTE_BBDEV_LDPC_ENC_INTERRUPTS)) {
3454418919fSjohnjiang 				printf(
3464418919fSjohnjiang 					"Dequeue interrupts are not supported!\n");
3474418919fSjohnjiang 				return TEST_FAILED;
3484418919fSjohnjiang 			}
3494418919fSjohnjiang 
3504418919fSjohnjiang 			return TEST_SUCCESS;
3514418919fSjohnjiang 		} else if (op_cap->type == RTE_BBDEV_OP_LDPC_DEC) {
3524418919fSjohnjiang 			const struct rte_bbdev_op_cap_ldpc_dec *cap =
3534418919fSjohnjiang 					&op_cap->cap.ldpc_dec;
3544418919fSjohnjiang 
3554418919fSjohnjiang 			if (!flags_match(test_vector.ldpc_dec.op_flags,
3564418919fSjohnjiang 					cap->capability_flags)){
3574418919fSjohnjiang 				printf("Flag Mismatch\n");
3584418919fSjohnjiang 				return TEST_FAILED;
3594418919fSjohnjiang 			}
3604418919fSjohnjiang 			if (nb_inputs > cap->num_buffers_src) {
3614418919fSjohnjiang 				printf("Too many inputs defined: %u, max: %u\n",
3624418919fSjohnjiang 					nb_inputs, cap->num_buffers_src);
3634418919fSjohnjiang 				return TEST_FAILED;
3644418919fSjohnjiang 			}
3654418919fSjohnjiang 			if (nb_hard_outputs > cap->num_buffers_hard_out) {
3664418919fSjohnjiang 				printf(
3674418919fSjohnjiang 					"Too many hard outputs defined: %u, max: %u\n",
3684418919fSjohnjiang 					nb_hard_outputs,
3694418919fSjohnjiang 					cap->num_buffers_hard_out);
3704418919fSjohnjiang 				return TEST_FAILED;
3714418919fSjohnjiang 			}
3724418919fSjohnjiang 			if (nb_harq_inputs > cap->num_buffers_hard_out) {
3734418919fSjohnjiang 				printf(
3744418919fSjohnjiang 					"Too many HARQ inputs defined: %u, max: %u\n",
3754418919fSjohnjiang 					nb_hard_outputs,
3764418919fSjohnjiang 					cap->num_buffers_hard_out);
3774418919fSjohnjiang 				return TEST_FAILED;
3784418919fSjohnjiang 			}
3794418919fSjohnjiang 			if (nb_harq_outputs > cap->num_buffers_hard_out) {
3804418919fSjohnjiang 				printf(
3814418919fSjohnjiang 					"Too many HARQ outputs defined: %u, max: %u\n",
3824418919fSjohnjiang 					nb_hard_outputs,
3834418919fSjohnjiang 					cap->num_buffers_hard_out);
3844418919fSjohnjiang 				return TEST_FAILED;
3854418919fSjohnjiang 			}
3864418919fSjohnjiang 			if (intr_enabled && !(cap->capability_flags &
387*2d9fd380Sjfb8856606 					RTE_BBDEV_LDPC_DEC_INTERRUPTS)) {
3884418919fSjohnjiang 				printf(
3894418919fSjohnjiang 					"Dequeue interrupts are not supported!\n");
3904418919fSjohnjiang 				return TEST_FAILED;
3914418919fSjohnjiang 			}
392*2d9fd380Sjfb8856606 			if (intr_enabled && (test_vector.ldpc_dec.op_flags &
393*2d9fd380Sjfb8856606 				(RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |
394*2d9fd380Sjfb8856606 				RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |
395*2d9fd380Sjfb8856606 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK
396*2d9fd380Sjfb8856606 					))) {
397*2d9fd380Sjfb8856606 				printf("Skip loop-back with interrupt\n");
398*2d9fd380Sjfb8856606 				return TEST_FAILED;
399*2d9fd380Sjfb8856606 			}
4004418919fSjohnjiang 			return TEST_SUCCESS;
401d30ea906Sjfb8856606 		}
402d30ea906Sjfb8856606 	}
403d30ea906Sjfb8856606 
404d30ea906Sjfb8856606 	if ((i == 0) && (test_vector.op_type == RTE_BBDEV_OP_NONE))
405d30ea906Sjfb8856606 		return TEST_SUCCESS; /* Special case for NULL device */
406d30ea906Sjfb8856606 
407d30ea906Sjfb8856606 	return TEST_FAILED;
408d30ea906Sjfb8856606 }
409d30ea906Sjfb8856606 
410d30ea906Sjfb8856606 /* calculates optimal mempool size not smaller than the val */
411d30ea906Sjfb8856606 static unsigned int
optimal_mempool_size(unsigned int val)412d30ea906Sjfb8856606 optimal_mempool_size(unsigned int val)
413d30ea906Sjfb8856606 {
414d30ea906Sjfb8856606 	return rte_align32pow2(val + 1) - 1;
415d30ea906Sjfb8856606 }
416d30ea906Sjfb8856606 
417d30ea906Sjfb8856606 /* allocates mbuf mempool for inputs and outputs */
418d30ea906Sjfb8856606 static struct rte_mempool *
create_mbuf_pool(struct op_data_entries * entries,uint8_t dev_id,int socket_id,unsigned int mbuf_pool_size,const char * op_type_str)419d30ea906Sjfb8856606 create_mbuf_pool(struct op_data_entries *entries, uint8_t dev_id,
420d30ea906Sjfb8856606 		int socket_id, unsigned int mbuf_pool_size,
421d30ea906Sjfb8856606 		const char *op_type_str)
422d30ea906Sjfb8856606 {
423d30ea906Sjfb8856606 	unsigned int i;
424d30ea906Sjfb8856606 	uint32_t max_seg_sz = 0;
425d30ea906Sjfb8856606 	char pool_name[RTE_MEMPOOL_NAMESIZE];
426d30ea906Sjfb8856606 
427d30ea906Sjfb8856606 	/* find max input segment size */
428d30ea906Sjfb8856606 	for (i = 0; i < entries->nb_segments; ++i)
429d30ea906Sjfb8856606 		if (entries->segments[i].length > max_seg_sz)
430d30ea906Sjfb8856606 			max_seg_sz = entries->segments[i].length;
431d30ea906Sjfb8856606 
432d30ea906Sjfb8856606 	snprintf(pool_name, sizeof(pool_name), "%s_pool_%u", op_type_str,
433d30ea906Sjfb8856606 			dev_id);
434d30ea906Sjfb8856606 	return rte_pktmbuf_pool_create(pool_name, mbuf_pool_size, 0, 0,
435*2d9fd380Sjfb8856606 			RTE_MAX(max_seg_sz + RTE_PKTMBUF_HEADROOM
436*2d9fd380Sjfb8856606 					+ FILLER_HEADROOM,
437d30ea906Sjfb8856606 			(unsigned int)RTE_MBUF_DEFAULT_BUF_SIZE), socket_id);
438d30ea906Sjfb8856606 }
439d30ea906Sjfb8856606 
440d30ea906Sjfb8856606 static int
create_mempools(struct active_device * ad,int socket_id,enum rte_bbdev_op_type org_op_type,uint16_t num_ops)441d30ea906Sjfb8856606 create_mempools(struct active_device *ad, int socket_id,
442d30ea906Sjfb8856606 		enum rte_bbdev_op_type org_op_type, uint16_t num_ops)
443d30ea906Sjfb8856606 {
444d30ea906Sjfb8856606 	struct rte_mempool *mp;
445d30ea906Sjfb8856606 	unsigned int ops_pool_size, mbuf_pool_size = 0;
446d30ea906Sjfb8856606 	char pool_name[RTE_MEMPOOL_NAMESIZE];
447d30ea906Sjfb8856606 	const char *op_type_str;
448d30ea906Sjfb8856606 	enum rte_bbdev_op_type op_type = org_op_type;
449d30ea906Sjfb8856606 
450d30ea906Sjfb8856606 	struct op_data_entries *in = &test_vector.entries[DATA_INPUT];
451d30ea906Sjfb8856606 	struct op_data_entries *hard_out =
452d30ea906Sjfb8856606 			&test_vector.entries[DATA_HARD_OUTPUT];
453d30ea906Sjfb8856606 	struct op_data_entries *soft_out =
454d30ea906Sjfb8856606 			&test_vector.entries[DATA_SOFT_OUTPUT];
4554418919fSjohnjiang 	struct op_data_entries *harq_in =
4564418919fSjohnjiang 			&test_vector.entries[DATA_HARQ_INPUT];
4574418919fSjohnjiang 	struct op_data_entries *harq_out =
4584418919fSjohnjiang 			&test_vector.entries[DATA_HARQ_OUTPUT];
459d30ea906Sjfb8856606 
460d30ea906Sjfb8856606 	/* allocate ops mempool */
461d30ea906Sjfb8856606 	ops_pool_size = optimal_mempool_size(RTE_MAX(
462d30ea906Sjfb8856606 			/* Ops used plus 1 reference op */
463d30ea906Sjfb8856606 			RTE_MAX((unsigned int)(ad->nb_queues * num_ops + 1),
464d30ea906Sjfb8856606 			/* Minimal cache size plus 1 reference op */
465d30ea906Sjfb8856606 			(unsigned int)(1.5 * rte_lcore_count() *
466d30ea906Sjfb8856606 					OPS_CACHE_SIZE + 1)),
467d30ea906Sjfb8856606 			OPS_POOL_SIZE_MIN));
468d30ea906Sjfb8856606 
469d30ea906Sjfb8856606 	if (org_op_type == RTE_BBDEV_OP_NONE)
470d30ea906Sjfb8856606 		op_type = RTE_BBDEV_OP_TURBO_ENC;
471d30ea906Sjfb8856606 
472d30ea906Sjfb8856606 	op_type_str = rte_bbdev_op_type_str(op_type);
473d30ea906Sjfb8856606 	TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
474d30ea906Sjfb8856606 
475d30ea906Sjfb8856606 	snprintf(pool_name, sizeof(pool_name), "%s_pool_%u", op_type_str,
476d30ea906Sjfb8856606 			ad->dev_id);
477d30ea906Sjfb8856606 	mp = rte_bbdev_op_pool_create(pool_name, op_type,
478d30ea906Sjfb8856606 			ops_pool_size, OPS_CACHE_SIZE, socket_id);
479d30ea906Sjfb8856606 	TEST_ASSERT_NOT_NULL(mp,
480d30ea906Sjfb8856606 			"ERROR Failed to create %u items ops pool for dev %u on socket %u.",
481d30ea906Sjfb8856606 			ops_pool_size,
482d30ea906Sjfb8856606 			ad->dev_id,
483d30ea906Sjfb8856606 			socket_id);
484d30ea906Sjfb8856606 	ad->ops_mempool = mp;
485d30ea906Sjfb8856606 
486d30ea906Sjfb8856606 	/* Do not create inputs and outputs mbufs for BaseBand Null Device */
487d30ea906Sjfb8856606 	if (org_op_type == RTE_BBDEV_OP_NONE)
488d30ea906Sjfb8856606 		return TEST_SUCCESS;
489d30ea906Sjfb8856606 
490d30ea906Sjfb8856606 	/* Inputs */
491*2d9fd380Sjfb8856606 	if (in->nb_segments > 0) {
492*2d9fd380Sjfb8856606 		mbuf_pool_size = optimal_mempool_size(ops_pool_size *
493*2d9fd380Sjfb8856606 				in->nb_segments);
494*2d9fd380Sjfb8856606 		mp = create_mbuf_pool(in, ad->dev_id, socket_id,
495*2d9fd380Sjfb8856606 				mbuf_pool_size, "in");
496d30ea906Sjfb8856606 		TEST_ASSERT_NOT_NULL(mp,
497d30ea906Sjfb8856606 				"ERROR Failed to create %u items input pktmbuf pool for dev %u on socket %u.",
498d30ea906Sjfb8856606 				mbuf_pool_size,
499d30ea906Sjfb8856606 				ad->dev_id,
500d30ea906Sjfb8856606 				socket_id);
501d30ea906Sjfb8856606 		ad->in_mbuf_pool = mp;
502*2d9fd380Sjfb8856606 	}
503d30ea906Sjfb8856606 
504d30ea906Sjfb8856606 	/* Hard outputs */
505*2d9fd380Sjfb8856606 	if (hard_out->nb_segments > 0) {
506d30ea906Sjfb8856606 		mbuf_pool_size = optimal_mempool_size(ops_pool_size *
507d30ea906Sjfb8856606 				hard_out->nb_segments);
508*2d9fd380Sjfb8856606 		mp = create_mbuf_pool(hard_out, ad->dev_id, socket_id,
509*2d9fd380Sjfb8856606 				mbuf_pool_size,
510d30ea906Sjfb8856606 				"hard_out");
511d30ea906Sjfb8856606 		TEST_ASSERT_NOT_NULL(mp,
512d30ea906Sjfb8856606 				"ERROR Failed to create %u items hard output pktmbuf pool for dev %u on socket %u.",
513d30ea906Sjfb8856606 				mbuf_pool_size,
514d30ea906Sjfb8856606 				ad->dev_id,
515d30ea906Sjfb8856606 				socket_id);
516d30ea906Sjfb8856606 		ad->hard_out_mbuf_pool = mp;
517*2d9fd380Sjfb8856606 	}
518d30ea906Sjfb8856606 
519d30ea906Sjfb8856606 	/* Soft outputs */
5204418919fSjohnjiang 	if (soft_out->nb_segments > 0) {
521d30ea906Sjfb8856606 		mbuf_pool_size = optimal_mempool_size(ops_pool_size *
522d30ea906Sjfb8856606 				soft_out->nb_segments);
5234418919fSjohnjiang 		mp = create_mbuf_pool(soft_out, ad->dev_id, socket_id,
5244418919fSjohnjiang 				mbuf_pool_size,
525d30ea906Sjfb8856606 				"soft_out");
526d30ea906Sjfb8856606 		TEST_ASSERT_NOT_NULL(mp,
527d30ea906Sjfb8856606 				"ERROR Failed to create %uB soft output pktmbuf pool for dev %u on socket %u.",
528d30ea906Sjfb8856606 				mbuf_pool_size,
529d30ea906Sjfb8856606 				ad->dev_id,
530d30ea906Sjfb8856606 				socket_id);
531d30ea906Sjfb8856606 		ad->soft_out_mbuf_pool = mp;
5324418919fSjohnjiang 	}
533d30ea906Sjfb8856606 
5344418919fSjohnjiang 	/* HARQ inputs */
5354418919fSjohnjiang 	if (harq_in->nb_segments > 0) {
5364418919fSjohnjiang 		mbuf_pool_size = optimal_mempool_size(ops_pool_size *
5374418919fSjohnjiang 				harq_in->nb_segments);
5384418919fSjohnjiang 		mp = create_mbuf_pool(harq_in, ad->dev_id, socket_id,
5394418919fSjohnjiang 				mbuf_pool_size,
5404418919fSjohnjiang 				"harq_in");
5414418919fSjohnjiang 		TEST_ASSERT_NOT_NULL(mp,
5424418919fSjohnjiang 				"ERROR Failed to create %uB harq input pktmbuf pool for dev %u on socket %u.",
5434418919fSjohnjiang 				mbuf_pool_size,
5444418919fSjohnjiang 				ad->dev_id,
5454418919fSjohnjiang 				socket_id);
5464418919fSjohnjiang 		ad->harq_in_mbuf_pool = mp;
5474418919fSjohnjiang 	}
5484418919fSjohnjiang 
5494418919fSjohnjiang 	/* HARQ outputs */
5504418919fSjohnjiang 	if (harq_out->nb_segments > 0) {
5514418919fSjohnjiang 		mbuf_pool_size = optimal_mempool_size(ops_pool_size *
5524418919fSjohnjiang 				harq_out->nb_segments);
5534418919fSjohnjiang 		mp = create_mbuf_pool(harq_out, ad->dev_id, socket_id,
5544418919fSjohnjiang 				mbuf_pool_size,
5554418919fSjohnjiang 				"harq_out");
5564418919fSjohnjiang 		TEST_ASSERT_NOT_NULL(mp,
5574418919fSjohnjiang 				"ERROR Failed to create %uB harq output pktmbuf pool for dev %u on socket %u.",
5584418919fSjohnjiang 				mbuf_pool_size,
5594418919fSjohnjiang 				ad->dev_id,
5604418919fSjohnjiang 				socket_id);
5614418919fSjohnjiang 		ad->harq_out_mbuf_pool = mp;
5624418919fSjohnjiang 	}
5634418919fSjohnjiang 
5644418919fSjohnjiang 	return TEST_SUCCESS;
565d30ea906Sjfb8856606 }
566d30ea906Sjfb8856606 
567d30ea906Sjfb8856606 static int
add_bbdev_dev(uint8_t dev_id,struct rte_bbdev_info * info,struct test_bbdev_vector * vector)568d30ea906Sjfb8856606 add_bbdev_dev(uint8_t dev_id, struct rte_bbdev_info *info,
569d30ea906Sjfb8856606 		struct test_bbdev_vector *vector)
570d30ea906Sjfb8856606 {
571d30ea906Sjfb8856606 	int ret;
572d30ea906Sjfb8856606 	unsigned int queue_id;
573d30ea906Sjfb8856606 	struct rte_bbdev_queue_conf qconf;
574d30ea906Sjfb8856606 	struct active_device *ad = &active_devs[nb_active_devs];
575d30ea906Sjfb8856606 	unsigned int nb_queues;
576d30ea906Sjfb8856606 	enum rte_bbdev_op_type op_type = vector->op_type;
577d30ea906Sjfb8856606 
5784418919fSjohnjiang /* Configure fpga lte fec with PF & VF values
5794418919fSjohnjiang  * if '-i' flag is set and using fpga device
5804418919fSjohnjiang  */
581*2d9fd380Sjfb8856606 #ifdef RTE_BASEBAND_FPGA_LTE_FEC
5824418919fSjohnjiang 	if ((get_init_device() == true) &&
583*2d9fd380Sjfb8856606 		(!strcmp(info->drv.driver_name, FPGA_LTE_PF_DRIVER_NAME))) {
584*2d9fd380Sjfb8856606 		struct rte_fpga_lte_fec_conf conf;
5854418919fSjohnjiang 		unsigned int i;
5864418919fSjohnjiang 
587*2d9fd380Sjfb8856606 		printf("Configure FPGA LTE FEC Driver %s with default values\n",
5884418919fSjohnjiang 				info->drv.driver_name);
5894418919fSjohnjiang 
5904418919fSjohnjiang 		/* clear default configuration before initialization */
591*2d9fd380Sjfb8856606 		memset(&conf, 0, sizeof(struct rte_fpga_lte_fec_conf));
5924418919fSjohnjiang 
5934418919fSjohnjiang 		/* Set PF mode :
5944418919fSjohnjiang 		 * true if PF is used for data plane
5954418919fSjohnjiang 		 * false for VFs
5964418919fSjohnjiang 		 */
5974418919fSjohnjiang 		conf.pf_mode_en = true;
5984418919fSjohnjiang 
5994418919fSjohnjiang 		for (i = 0; i < FPGA_LTE_FEC_NUM_VFS; ++i) {
6004418919fSjohnjiang 			/* Number of UL queues per VF (fpga supports 8 VFs) */
601*2d9fd380Sjfb8856606 			conf.vf_ul_queues_number[i] = VF_UL_4G_QUEUE_VALUE;
6024418919fSjohnjiang 			/* Number of DL queues per VF (fpga supports 8 VFs) */
603*2d9fd380Sjfb8856606 			conf.vf_dl_queues_number[i] = VF_DL_4G_QUEUE_VALUE;
6044418919fSjohnjiang 		}
6054418919fSjohnjiang 
6064418919fSjohnjiang 		/* UL bandwidth. Needed for schedule algorithm */
607*2d9fd380Sjfb8856606 		conf.ul_bandwidth = UL_4G_BANDWIDTH;
6084418919fSjohnjiang 		/* DL bandwidth */
609*2d9fd380Sjfb8856606 		conf.dl_bandwidth = DL_4G_BANDWIDTH;
6104418919fSjohnjiang 
6114418919fSjohnjiang 		/* UL & DL load Balance Factor to 64 */
612*2d9fd380Sjfb8856606 		conf.ul_load_balance = UL_4G_LOAD_BALANCE;
613*2d9fd380Sjfb8856606 		conf.dl_load_balance = DL_4G_LOAD_BALANCE;
6144418919fSjohnjiang 
6154418919fSjohnjiang 		/**< FLR timeout value */
616*2d9fd380Sjfb8856606 		conf.flr_time_out = FLR_4G_TIMEOUT;
6174418919fSjohnjiang 
6184418919fSjohnjiang 		/* setup FPGA PF with configuration information */
619*2d9fd380Sjfb8856606 		ret = rte_fpga_lte_fec_configure(info->dev_name, &conf);
6204418919fSjohnjiang 		TEST_ASSERT_SUCCESS(ret,
6214418919fSjohnjiang 				"Failed to configure 4G FPGA PF for bbdev %s",
6224418919fSjohnjiang 				info->dev_name);
6234418919fSjohnjiang 	}
6244418919fSjohnjiang #endif
625*2d9fd380Sjfb8856606 #ifdef RTE_BASEBAND_FPGA_5GNR_FEC
626*2d9fd380Sjfb8856606 	if ((get_init_device() == true) &&
627*2d9fd380Sjfb8856606 		(!strcmp(info->drv.driver_name, FPGA_5GNR_PF_DRIVER_NAME))) {
628*2d9fd380Sjfb8856606 		struct rte_fpga_5gnr_fec_conf conf;
629*2d9fd380Sjfb8856606 		unsigned int i;
630*2d9fd380Sjfb8856606 
631*2d9fd380Sjfb8856606 		printf("Configure FPGA 5GNR FEC Driver %s with default values\n",
632*2d9fd380Sjfb8856606 				info->drv.driver_name);
633*2d9fd380Sjfb8856606 
634*2d9fd380Sjfb8856606 		/* clear default configuration before initialization */
635*2d9fd380Sjfb8856606 		memset(&conf, 0, sizeof(struct rte_fpga_5gnr_fec_conf));
636*2d9fd380Sjfb8856606 
637*2d9fd380Sjfb8856606 		/* Set PF mode :
638*2d9fd380Sjfb8856606 		 * true if PF is used for data plane
639*2d9fd380Sjfb8856606 		 * false for VFs
640*2d9fd380Sjfb8856606 		 */
641*2d9fd380Sjfb8856606 		conf.pf_mode_en = true;
642*2d9fd380Sjfb8856606 
643*2d9fd380Sjfb8856606 		for (i = 0; i < FPGA_5GNR_FEC_NUM_VFS; ++i) {
644*2d9fd380Sjfb8856606 			/* Number of UL queues per VF (fpga supports 8 VFs) */
645*2d9fd380Sjfb8856606 			conf.vf_ul_queues_number[i] = VF_UL_5G_QUEUE_VALUE;
646*2d9fd380Sjfb8856606 			/* Number of DL queues per VF (fpga supports 8 VFs) */
647*2d9fd380Sjfb8856606 			conf.vf_dl_queues_number[i] = VF_DL_5G_QUEUE_VALUE;
648*2d9fd380Sjfb8856606 		}
649*2d9fd380Sjfb8856606 
650*2d9fd380Sjfb8856606 		/* UL bandwidth. Needed for schedule algorithm */
651*2d9fd380Sjfb8856606 		conf.ul_bandwidth = UL_5G_BANDWIDTH;
652*2d9fd380Sjfb8856606 		/* DL bandwidth */
653*2d9fd380Sjfb8856606 		conf.dl_bandwidth = DL_5G_BANDWIDTH;
654*2d9fd380Sjfb8856606 
655*2d9fd380Sjfb8856606 		/* UL & DL load Balance Factor to 64 */
656*2d9fd380Sjfb8856606 		conf.ul_load_balance = UL_5G_LOAD_BALANCE;
657*2d9fd380Sjfb8856606 		conf.dl_load_balance = DL_5G_LOAD_BALANCE;
658*2d9fd380Sjfb8856606 
659*2d9fd380Sjfb8856606 		/**< FLR timeout value */
660*2d9fd380Sjfb8856606 		conf.flr_time_out = FLR_5G_TIMEOUT;
661*2d9fd380Sjfb8856606 
662*2d9fd380Sjfb8856606 		/* setup FPGA PF with configuration information */
663*2d9fd380Sjfb8856606 		ret = rte_fpga_5gnr_fec_configure(info->dev_name, &conf);
664*2d9fd380Sjfb8856606 		TEST_ASSERT_SUCCESS(ret,
665*2d9fd380Sjfb8856606 				"Failed to configure 5G FPGA PF for bbdev %s",
666*2d9fd380Sjfb8856606 				info->dev_name);
667*2d9fd380Sjfb8856606 	}
668*2d9fd380Sjfb8856606 #endif
669*2d9fd380Sjfb8856606 #ifdef RTE_BASEBAND_ACC100
670*2d9fd380Sjfb8856606 	if ((get_init_device() == true) &&
671*2d9fd380Sjfb8856606 		(!strcmp(info->drv.driver_name, ACC100PF_DRIVER_NAME))) {
672*2d9fd380Sjfb8856606 		struct rte_acc100_conf conf;
673*2d9fd380Sjfb8856606 		unsigned int i;
674*2d9fd380Sjfb8856606 
675*2d9fd380Sjfb8856606 		printf("Configure ACC100 FEC Driver %s with default values\n",
676*2d9fd380Sjfb8856606 				info->drv.driver_name);
677*2d9fd380Sjfb8856606 
678*2d9fd380Sjfb8856606 		/* clear default configuration before initialization */
679*2d9fd380Sjfb8856606 		memset(&conf, 0, sizeof(struct rte_acc100_conf));
680*2d9fd380Sjfb8856606 
681*2d9fd380Sjfb8856606 		/* Always set in PF mode for built-in configuration */
682*2d9fd380Sjfb8856606 		conf.pf_mode_en = true;
683*2d9fd380Sjfb8856606 		for (i = 0; i < RTE_ACC100_NUM_VFS; ++i) {
684*2d9fd380Sjfb8856606 			conf.arb_dl_4g[i].gbr_threshold1 = ACC100_QOS_GBR;
685*2d9fd380Sjfb8856606 			conf.arb_dl_4g[i].gbr_threshold1 = ACC100_QOS_GBR;
686*2d9fd380Sjfb8856606 			conf.arb_dl_4g[i].round_robin_weight = ACC100_QMGR_RR;
687*2d9fd380Sjfb8856606 			conf.arb_ul_4g[i].gbr_threshold1 = ACC100_QOS_GBR;
688*2d9fd380Sjfb8856606 			conf.arb_ul_4g[i].gbr_threshold1 = ACC100_QOS_GBR;
689*2d9fd380Sjfb8856606 			conf.arb_ul_4g[i].round_robin_weight = ACC100_QMGR_RR;
690*2d9fd380Sjfb8856606 			conf.arb_dl_5g[i].gbr_threshold1 = ACC100_QOS_GBR;
691*2d9fd380Sjfb8856606 			conf.arb_dl_5g[i].gbr_threshold1 = ACC100_QOS_GBR;
692*2d9fd380Sjfb8856606 			conf.arb_dl_5g[i].round_robin_weight = ACC100_QMGR_RR;
693*2d9fd380Sjfb8856606 			conf.arb_ul_5g[i].gbr_threshold1 = ACC100_QOS_GBR;
694*2d9fd380Sjfb8856606 			conf.arb_ul_5g[i].gbr_threshold1 = ACC100_QOS_GBR;
695*2d9fd380Sjfb8856606 			conf.arb_ul_5g[i].round_robin_weight = ACC100_QMGR_RR;
696*2d9fd380Sjfb8856606 		}
697*2d9fd380Sjfb8856606 
698*2d9fd380Sjfb8856606 		conf.input_pos_llr_1_bit = true;
699*2d9fd380Sjfb8856606 		conf.output_pos_llr_1_bit = true;
700*2d9fd380Sjfb8856606 		conf.num_vf_bundles = 1; /**< Number of VF bundles to setup */
701*2d9fd380Sjfb8856606 
702*2d9fd380Sjfb8856606 		conf.q_ul_4g.num_qgroups = ACC100_QMGR_NUM_QGS;
703*2d9fd380Sjfb8856606 		conf.q_ul_4g.first_qgroup_index = ACC100_QMGR_INVALID_IDX;
704*2d9fd380Sjfb8856606 		conf.q_ul_4g.num_aqs_per_groups = ACC100_QMGR_NUM_AQS;
705*2d9fd380Sjfb8856606 		conf.q_ul_4g.aq_depth_log2 = ACC100_QMGR_AQ_DEPTH;
706*2d9fd380Sjfb8856606 		conf.q_dl_4g.num_qgroups = ACC100_QMGR_NUM_QGS;
707*2d9fd380Sjfb8856606 		conf.q_dl_4g.first_qgroup_index = ACC100_QMGR_INVALID_IDX;
708*2d9fd380Sjfb8856606 		conf.q_dl_4g.num_aqs_per_groups = ACC100_QMGR_NUM_AQS;
709*2d9fd380Sjfb8856606 		conf.q_dl_4g.aq_depth_log2 = ACC100_QMGR_AQ_DEPTH;
710*2d9fd380Sjfb8856606 		conf.q_ul_5g.num_qgroups = ACC100_QMGR_NUM_QGS;
711*2d9fd380Sjfb8856606 		conf.q_ul_5g.first_qgroup_index = ACC100_QMGR_INVALID_IDX;
712*2d9fd380Sjfb8856606 		conf.q_ul_5g.num_aqs_per_groups = ACC100_QMGR_NUM_AQS;
713*2d9fd380Sjfb8856606 		conf.q_ul_5g.aq_depth_log2 = ACC100_QMGR_AQ_DEPTH;
714*2d9fd380Sjfb8856606 		conf.q_dl_5g.num_qgroups = ACC100_QMGR_NUM_QGS;
715*2d9fd380Sjfb8856606 		conf.q_dl_5g.first_qgroup_index = ACC100_QMGR_INVALID_IDX;
716*2d9fd380Sjfb8856606 		conf.q_dl_5g.num_aqs_per_groups = ACC100_QMGR_NUM_AQS;
717*2d9fd380Sjfb8856606 		conf.q_dl_5g.aq_depth_log2 = ACC100_QMGR_AQ_DEPTH;
718*2d9fd380Sjfb8856606 
719*2d9fd380Sjfb8856606 		/* setup PF with configuration information */
720*2d9fd380Sjfb8856606 		ret = rte_acc100_configure(info->dev_name, &conf);
721*2d9fd380Sjfb8856606 		TEST_ASSERT_SUCCESS(ret,
722*2d9fd380Sjfb8856606 				"Failed to configure ACC100 PF for bbdev %s",
723*2d9fd380Sjfb8856606 				info->dev_name);
724*2d9fd380Sjfb8856606 	}
725*2d9fd380Sjfb8856606 #endif
726*2d9fd380Sjfb8856606 	/* Let's refresh this now this is configured */
727*2d9fd380Sjfb8856606 	rte_bbdev_info_get(dev_id, info);
728d30ea906Sjfb8856606 	nb_queues = RTE_MIN(rte_lcore_count(), info->drv.max_num_queues);
7294418919fSjohnjiang 	nb_queues = RTE_MIN(nb_queues, (unsigned int) MAX_QUEUES);
7304418919fSjohnjiang 
731d30ea906Sjfb8856606 	/* setup device */
732d30ea906Sjfb8856606 	ret = rte_bbdev_setup_queues(dev_id, nb_queues, info->socket_id);
733d30ea906Sjfb8856606 	if (ret < 0) {
734d30ea906Sjfb8856606 		printf("rte_bbdev_setup_queues(%u, %u, %d) ret %i\n",
735d30ea906Sjfb8856606 				dev_id, nb_queues, info->socket_id, ret);
736d30ea906Sjfb8856606 		return TEST_FAILED;
737d30ea906Sjfb8856606 	}
738d30ea906Sjfb8856606 
739d30ea906Sjfb8856606 	/* configure interrupts if needed */
740d30ea906Sjfb8856606 	if (intr_enabled) {
741d30ea906Sjfb8856606 		ret = rte_bbdev_intr_enable(dev_id);
742d30ea906Sjfb8856606 		if (ret < 0) {
743d30ea906Sjfb8856606 			printf("rte_bbdev_intr_enable(%u) ret %i\n", dev_id,
744d30ea906Sjfb8856606 					ret);
745d30ea906Sjfb8856606 			return TEST_FAILED;
746d30ea906Sjfb8856606 		}
747d30ea906Sjfb8856606 	}
748d30ea906Sjfb8856606 
749d30ea906Sjfb8856606 	/* setup device queues */
750d30ea906Sjfb8856606 	qconf.socket = info->socket_id;
751d30ea906Sjfb8856606 	qconf.queue_size = info->drv.default_queue_conf.queue_size;
752d30ea906Sjfb8856606 	qconf.priority = 0;
753d30ea906Sjfb8856606 	qconf.deferred_start = 0;
754d30ea906Sjfb8856606 	qconf.op_type = op_type;
755d30ea906Sjfb8856606 
756d30ea906Sjfb8856606 	for (queue_id = 0; queue_id < nb_queues; ++queue_id) {
757d30ea906Sjfb8856606 		ret = rte_bbdev_queue_configure(dev_id, queue_id, &qconf);
758d30ea906Sjfb8856606 		if (ret != 0) {
759d30ea906Sjfb8856606 			printf(
760d30ea906Sjfb8856606 					"Allocated all queues (id=%u) at prio%u on dev%u\n",
761d30ea906Sjfb8856606 					queue_id, qconf.priority, dev_id);
762d30ea906Sjfb8856606 			qconf.priority++;
763d30ea906Sjfb8856606 			ret = rte_bbdev_queue_configure(ad->dev_id, queue_id,
764d30ea906Sjfb8856606 					&qconf);
765d30ea906Sjfb8856606 		}
766d30ea906Sjfb8856606 		if (ret != 0) {
767d30ea906Sjfb8856606 			printf("All queues on dev %u allocated: %u\n",
768d30ea906Sjfb8856606 					dev_id, queue_id);
769d30ea906Sjfb8856606 			break;
770d30ea906Sjfb8856606 		}
771d30ea906Sjfb8856606 		ad->queue_ids[queue_id] = queue_id;
772d30ea906Sjfb8856606 	}
773d30ea906Sjfb8856606 	TEST_ASSERT(queue_id != 0,
774d30ea906Sjfb8856606 			"ERROR Failed to configure any queues on dev %u",
775d30ea906Sjfb8856606 			dev_id);
776d30ea906Sjfb8856606 	ad->nb_queues = queue_id;
777d30ea906Sjfb8856606 
778d30ea906Sjfb8856606 	set_avail_op(ad, op_type);
779d30ea906Sjfb8856606 
780d30ea906Sjfb8856606 	return TEST_SUCCESS;
781d30ea906Sjfb8856606 }
782d30ea906Sjfb8856606 
783d30ea906Sjfb8856606 static int
add_active_device(uint8_t dev_id,struct rte_bbdev_info * info,struct test_bbdev_vector * vector)784d30ea906Sjfb8856606 add_active_device(uint8_t dev_id, struct rte_bbdev_info *info,
785d30ea906Sjfb8856606 		struct test_bbdev_vector *vector)
786d30ea906Sjfb8856606 {
787d30ea906Sjfb8856606 	int ret;
788d30ea906Sjfb8856606 
789d30ea906Sjfb8856606 	active_devs[nb_active_devs].driver_name = info->drv.driver_name;
790d30ea906Sjfb8856606 	active_devs[nb_active_devs].dev_id = dev_id;
791d30ea906Sjfb8856606 
792d30ea906Sjfb8856606 	ret = add_bbdev_dev(dev_id, info, vector);
793d30ea906Sjfb8856606 	if (ret == TEST_SUCCESS)
794d30ea906Sjfb8856606 		++nb_active_devs;
795d30ea906Sjfb8856606 	return ret;
796d30ea906Sjfb8856606 }
797d30ea906Sjfb8856606 
798d30ea906Sjfb8856606 static uint8_t
populate_active_devices(void)799d30ea906Sjfb8856606 populate_active_devices(void)
800d30ea906Sjfb8856606 {
801d30ea906Sjfb8856606 	int ret;
802d30ea906Sjfb8856606 	uint8_t dev_id;
803d30ea906Sjfb8856606 	uint8_t nb_devs_added = 0;
804d30ea906Sjfb8856606 	struct rte_bbdev_info info;
805d30ea906Sjfb8856606 
806d30ea906Sjfb8856606 	RTE_BBDEV_FOREACH(dev_id) {
807d30ea906Sjfb8856606 		rte_bbdev_info_get(dev_id, &info);
808d30ea906Sjfb8856606 
809d30ea906Sjfb8856606 		if (check_dev_cap(&info)) {
810d30ea906Sjfb8856606 			printf(
811d30ea906Sjfb8856606 				"Device %d (%s) does not support specified capabilities\n",
812d30ea906Sjfb8856606 					dev_id, info.dev_name);
813d30ea906Sjfb8856606 			continue;
814d30ea906Sjfb8856606 		}
815d30ea906Sjfb8856606 
816d30ea906Sjfb8856606 		ret = add_active_device(dev_id, &info, &test_vector);
817d30ea906Sjfb8856606 		if (ret != 0) {
818d30ea906Sjfb8856606 			printf("Adding active bbdev %s skipped\n",
819d30ea906Sjfb8856606 					info.dev_name);
820d30ea906Sjfb8856606 			continue;
821d30ea906Sjfb8856606 		}
822d30ea906Sjfb8856606 		nb_devs_added++;
823d30ea906Sjfb8856606 	}
824d30ea906Sjfb8856606 
825d30ea906Sjfb8856606 	return nb_devs_added;
826d30ea906Sjfb8856606 }
827d30ea906Sjfb8856606 
828d30ea906Sjfb8856606 static int
read_test_vector(void)829d30ea906Sjfb8856606 read_test_vector(void)
830d30ea906Sjfb8856606 {
831d30ea906Sjfb8856606 	int ret;
832d30ea906Sjfb8856606 
833d30ea906Sjfb8856606 	memset(&test_vector, 0, sizeof(test_vector));
834d30ea906Sjfb8856606 	printf("Test vector file = %s\n", get_vector_filename());
835d30ea906Sjfb8856606 	ret = test_bbdev_vector_read(get_vector_filename(), &test_vector);
836d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS(ret, "Failed to parse file %s\n",
837d30ea906Sjfb8856606 			get_vector_filename());
838d30ea906Sjfb8856606 
839d30ea906Sjfb8856606 	return TEST_SUCCESS;
840d30ea906Sjfb8856606 }
841d30ea906Sjfb8856606 
842d30ea906Sjfb8856606 static int
testsuite_setup(void)843d30ea906Sjfb8856606 testsuite_setup(void)
844d30ea906Sjfb8856606 {
845d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS(read_test_vector(), "Test suite setup failed\n");
846d30ea906Sjfb8856606 
847d30ea906Sjfb8856606 	if (populate_active_devices() == 0) {
848d30ea906Sjfb8856606 		printf("No suitable devices found!\n");
849d30ea906Sjfb8856606 		return TEST_SKIPPED;
850d30ea906Sjfb8856606 	}
851d30ea906Sjfb8856606 
852d30ea906Sjfb8856606 	return TEST_SUCCESS;
853d30ea906Sjfb8856606 }
854d30ea906Sjfb8856606 
855d30ea906Sjfb8856606 static int
interrupt_testsuite_setup(void)856d30ea906Sjfb8856606 interrupt_testsuite_setup(void)
857d30ea906Sjfb8856606 {
858d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS(read_test_vector(), "Test suite setup failed\n");
859d30ea906Sjfb8856606 
860d30ea906Sjfb8856606 	/* Enable interrupts */
861d30ea906Sjfb8856606 	intr_enabled = true;
862d30ea906Sjfb8856606 
863d30ea906Sjfb8856606 	/* Special case for NULL device (RTE_BBDEV_OP_NONE) */
864d30ea906Sjfb8856606 	if (populate_active_devices() == 0 ||
865d30ea906Sjfb8856606 			test_vector.op_type == RTE_BBDEV_OP_NONE) {
866d30ea906Sjfb8856606 		intr_enabled = false;
867d30ea906Sjfb8856606 		printf("No suitable devices found!\n");
868d30ea906Sjfb8856606 		return TEST_SKIPPED;
869d30ea906Sjfb8856606 	}
870d30ea906Sjfb8856606 
871d30ea906Sjfb8856606 	return TEST_SUCCESS;
872d30ea906Sjfb8856606 }
873d30ea906Sjfb8856606 
874d30ea906Sjfb8856606 static void
testsuite_teardown(void)875d30ea906Sjfb8856606 testsuite_teardown(void)
876d30ea906Sjfb8856606 {
877d30ea906Sjfb8856606 	uint8_t dev_id;
878d30ea906Sjfb8856606 
879d30ea906Sjfb8856606 	/* Unconfigure devices */
880d30ea906Sjfb8856606 	RTE_BBDEV_FOREACH(dev_id)
881d30ea906Sjfb8856606 		rte_bbdev_close(dev_id);
882d30ea906Sjfb8856606 
883d30ea906Sjfb8856606 	/* Clear active devices structs. */
884d30ea906Sjfb8856606 	memset(active_devs, 0, sizeof(active_devs));
885d30ea906Sjfb8856606 	nb_active_devs = 0;
886*2d9fd380Sjfb8856606 
887*2d9fd380Sjfb8856606 	/* Disable interrupts */
888*2d9fd380Sjfb8856606 	intr_enabled = false;
889d30ea906Sjfb8856606 }
890d30ea906Sjfb8856606 
891d30ea906Sjfb8856606 static int
ut_setup(void)892d30ea906Sjfb8856606 ut_setup(void)
893d30ea906Sjfb8856606 {
894d30ea906Sjfb8856606 	uint8_t i, dev_id;
895d30ea906Sjfb8856606 
896d30ea906Sjfb8856606 	for (i = 0; i < nb_active_devs; i++) {
897d30ea906Sjfb8856606 		dev_id = active_devs[i].dev_id;
898d30ea906Sjfb8856606 		/* reset bbdev stats */
899d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(rte_bbdev_stats_reset(dev_id),
900d30ea906Sjfb8856606 				"Failed to reset stats of bbdev %u", dev_id);
901d30ea906Sjfb8856606 		/* start the device */
902d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(rte_bbdev_start(dev_id),
903d30ea906Sjfb8856606 				"Failed to start bbdev %u", dev_id);
904d30ea906Sjfb8856606 	}
905d30ea906Sjfb8856606 
906d30ea906Sjfb8856606 	return TEST_SUCCESS;
907d30ea906Sjfb8856606 }
908d30ea906Sjfb8856606 
909d30ea906Sjfb8856606 static void
ut_teardown(void)910d30ea906Sjfb8856606 ut_teardown(void)
911d30ea906Sjfb8856606 {
912d30ea906Sjfb8856606 	uint8_t i, dev_id;
913d30ea906Sjfb8856606 	struct rte_bbdev_stats stats;
914d30ea906Sjfb8856606 
915d30ea906Sjfb8856606 	for (i = 0; i < nb_active_devs; i++) {
916d30ea906Sjfb8856606 		dev_id = active_devs[i].dev_id;
917d30ea906Sjfb8856606 		/* read stats and print */
918d30ea906Sjfb8856606 		rte_bbdev_stats_get(dev_id, &stats);
919d30ea906Sjfb8856606 		/* Stop the device */
920d30ea906Sjfb8856606 		rte_bbdev_stop(dev_id);
921d30ea906Sjfb8856606 	}
922d30ea906Sjfb8856606 }
923d30ea906Sjfb8856606 
924d30ea906Sjfb8856606 static int
init_op_data_objs(struct rte_bbdev_op_data * bufs,struct op_data_entries * ref_entries,struct rte_mempool * mbuf_pool,const uint16_t n,enum op_data_type op_type,uint16_t min_alignment)925d30ea906Sjfb8856606 init_op_data_objs(struct rte_bbdev_op_data *bufs,
926d30ea906Sjfb8856606 		struct op_data_entries *ref_entries,
927d30ea906Sjfb8856606 		struct rte_mempool *mbuf_pool, const uint16_t n,
928d30ea906Sjfb8856606 		enum op_data_type op_type, uint16_t min_alignment)
929d30ea906Sjfb8856606 {
930d30ea906Sjfb8856606 	int ret;
931d30ea906Sjfb8856606 	unsigned int i, j;
932*2d9fd380Sjfb8856606 	bool large_input = false;
933d30ea906Sjfb8856606 
934d30ea906Sjfb8856606 	for (i = 0; i < n; ++i) {
935d30ea906Sjfb8856606 		char *data;
936d30ea906Sjfb8856606 		struct op_data_buf *seg = &ref_entries->segments[0];
937d30ea906Sjfb8856606 		struct rte_mbuf *m_head = rte_pktmbuf_alloc(mbuf_pool);
938d30ea906Sjfb8856606 		TEST_ASSERT_NOT_NULL(m_head,
939d30ea906Sjfb8856606 				"Not enough mbufs in %d data type mbuf pool (needed %u, available %u)",
940d30ea906Sjfb8856606 				op_type, n * ref_entries->nb_segments,
941d30ea906Sjfb8856606 				mbuf_pool->size);
942d30ea906Sjfb8856606 
943*2d9fd380Sjfb8856606 		if (seg->length > RTE_BBDEV_LDPC_E_MAX_MBUF) {
944*2d9fd380Sjfb8856606 			/*
945*2d9fd380Sjfb8856606 			 * Special case when DPDK mbuf cannot handle
946*2d9fd380Sjfb8856606 			 * the required input size
947*2d9fd380Sjfb8856606 			 */
948*2d9fd380Sjfb8856606 			printf("Warning: Larger input size than DPDK mbuf %d\n",
949*2d9fd380Sjfb8856606 					seg->length);
950*2d9fd380Sjfb8856606 			large_input = true;
951*2d9fd380Sjfb8856606 		}
952d30ea906Sjfb8856606 		bufs[i].data = m_head;
953d30ea906Sjfb8856606 		bufs[i].offset = 0;
954d30ea906Sjfb8856606 		bufs[i].length = 0;
955d30ea906Sjfb8856606 
9564418919fSjohnjiang 		if ((op_type == DATA_INPUT) || (op_type == DATA_HARQ_INPUT)) {
957*2d9fd380Sjfb8856606 			if ((op_type == DATA_INPUT) && large_input) {
958*2d9fd380Sjfb8856606 				/* Allocate a fake overused mbuf */
959*2d9fd380Sjfb8856606 				data = rte_malloc(NULL, seg->length, 0);
960*2d9fd380Sjfb8856606 				memcpy(data, seg->addr, seg->length);
961*2d9fd380Sjfb8856606 				m_head->buf_addr = data;
962*2d9fd380Sjfb8856606 				m_head->buf_iova = rte_malloc_virt2iova(data);
963*2d9fd380Sjfb8856606 				m_head->data_off = 0;
964*2d9fd380Sjfb8856606 				m_head->data_len = seg->length;
965*2d9fd380Sjfb8856606 			} else {
966d30ea906Sjfb8856606 				data = rte_pktmbuf_append(m_head, seg->length);
967d30ea906Sjfb8856606 				TEST_ASSERT_NOT_NULL(data,
968d30ea906Sjfb8856606 					"Couldn't append %u bytes to mbuf from %d data type mbuf pool",
969d30ea906Sjfb8856606 					seg->length, op_type);
970d30ea906Sjfb8856606 
971*2d9fd380Sjfb8856606 				TEST_ASSERT(data == RTE_PTR_ALIGN(
972*2d9fd380Sjfb8856606 						data, min_alignment),
973d30ea906Sjfb8856606 					"Data addr in mbuf (%p) is not aligned to device min alignment (%u)",
974d30ea906Sjfb8856606 					data, min_alignment);
975d30ea906Sjfb8856606 				rte_memcpy(data, seg->addr, seg->length);
976*2d9fd380Sjfb8856606 			}
977*2d9fd380Sjfb8856606 
978d30ea906Sjfb8856606 			bufs[i].length += seg->length;
979d30ea906Sjfb8856606 
980d30ea906Sjfb8856606 			for (j = 1; j < ref_entries->nb_segments; ++j) {
981d30ea906Sjfb8856606 				struct rte_mbuf *m_tail =
982d30ea906Sjfb8856606 						rte_pktmbuf_alloc(mbuf_pool);
983d30ea906Sjfb8856606 				TEST_ASSERT_NOT_NULL(m_tail,
984d30ea906Sjfb8856606 						"Not enough mbufs in %d data type mbuf pool (needed %u, available %u)",
985d30ea906Sjfb8856606 						op_type,
986d30ea906Sjfb8856606 						n * ref_entries->nb_segments,
987d30ea906Sjfb8856606 						mbuf_pool->size);
988d30ea906Sjfb8856606 				seg += 1;
989d30ea906Sjfb8856606 
990d30ea906Sjfb8856606 				data = rte_pktmbuf_append(m_tail, seg->length);
991d30ea906Sjfb8856606 				TEST_ASSERT_NOT_NULL(data,
992d30ea906Sjfb8856606 						"Couldn't append %u bytes to mbuf from %d data type mbuf pool",
993d30ea906Sjfb8856606 						seg->length, op_type);
994d30ea906Sjfb8856606 
995d30ea906Sjfb8856606 				TEST_ASSERT(data == RTE_PTR_ALIGN(data,
996d30ea906Sjfb8856606 						min_alignment),
997d30ea906Sjfb8856606 						"Data addr in mbuf (%p) is not aligned to device min alignment (%u)",
998d30ea906Sjfb8856606 						data, min_alignment);
999d30ea906Sjfb8856606 				rte_memcpy(data, seg->addr, seg->length);
1000d30ea906Sjfb8856606 				bufs[i].length += seg->length;
1001d30ea906Sjfb8856606 
1002d30ea906Sjfb8856606 				ret = rte_pktmbuf_chain(m_head, m_tail);
1003d30ea906Sjfb8856606 				TEST_ASSERT_SUCCESS(ret,
1004d30ea906Sjfb8856606 						"Couldn't chain mbufs from %d data type mbuf pool",
1005d30ea906Sjfb8856606 						op_type);
1006d30ea906Sjfb8856606 			}
10074418919fSjohnjiang 		} else {
10084418919fSjohnjiang 
10094418919fSjohnjiang 			/* allocate chained-mbuf for output buffer */
10104418919fSjohnjiang 			for (j = 1; j < ref_entries->nb_segments; ++j) {
10114418919fSjohnjiang 				struct rte_mbuf *m_tail =
10124418919fSjohnjiang 						rte_pktmbuf_alloc(mbuf_pool);
10134418919fSjohnjiang 				TEST_ASSERT_NOT_NULL(m_tail,
10144418919fSjohnjiang 						"Not enough mbufs in %d data type mbuf pool (needed %u, available %u)",
10154418919fSjohnjiang 						op_type,
10164418919fSjohnjiang 						n * ref_entries->nb_segments,
10174418919fSjohnjiang 						mbuf_pool->size);
10184418919fSjohnjiang 
10194418919fSjohnjiang 				ret = rte_pktmbuf_chain(m_head, m_tail);
10204418919fSjohnjiang 				TEST_ASSERT_SUCCESS(ret,
10214418919fSjohnjiang 						"Couldn't chain mbufs from %d data type mbuf pool",
10224418919fSjohnjiang 						op_type);
10234418919fSjohnjiang 			}
1024d30ea906Sjfb8856606 		}
1025d30ea906Sjfb8856606 	}
1026d30ea906Sjfb8856606 
1027d30ea906Sjfb8856606 	return 0;
1028d30ea906Sjfb8856606 }
1029d30ea906Sjfb8856606 
1030d30ea906Sjfb8856606 static int
allocate_buffers_on_socket(struct rte_bbdev_op_data ** buffers,const int len,const int socket)1031d30ea906Sjfb8856606 allocate_buffers_on_socket(struct rte_bbdev_op_data **buffers, const int len,
1032d30ea906Sjfb8856606 		const int socket)
1033d30ea906Sjfb8856606 {
1034d30ea906Sjfb8856606 	int i;
1035d30ea906Sjfb8856606 
1036d30ea906Sjfb8856606 	*buffers = rte_zmalloc_socket(NULL, len, 0, socket);
1037d30ea906Sjfb8856606 	if (*buffers == NULL) {
1038d30ea906Sjfb8856606 		printf("WARNING: Failed to allocate op_data on socket %d\n",
1039d30ea906Sjfb8856606 				socket);
1040d30ea906Sjfb8856606 		/* try to allocate memory on other detected sockets */
1041d30ea906Sjfb8856606 		for (i = 0; i < socket; i++) {
1042d30ea906Sjfb8856606 			*buffers = rte_zmalloc_socket(NULL, len, 0, i);
1043d30ea906Sjfb8856606 			if (*buffers != NULL)
1044d30ea906Sjfb8856606 				break;
1045d30ea906Sjfb8856606 		}
1046d30ea906Sjfb8856606 	}
1047d30ea906Sjfb8856606 
1048d30ea906Sjfb8856606 	return (*buffers == NULL) ? TEST_FAILED : TEST_SUCCESS;
1049d30ea906Sjfb8856606 }
1050d30ea906Sjfb8856606 
1051d30ea906Sjfb8856606 static void
limit_input_llr_val_range(struct rte_bbdev_op_data * input_ops,const uint16_t n,const int8_t max_llr_modulus)1052d30ea906Sjfb8856606 limit_input_llr_val_range(struct rte_bbdev_op_data *input_ops,
10534418919fSjohnjiang 		const uint16_t n, const int8_t max_llr_modulus)
1054d30ea906Sjfb8856606 {
1055d30ea906Sjfb8856606 	uint16_t i, byte_idx;
1056d30ea906Sjfb8856606 
1057d30ea906Sjfb8856606 	for (i = 0; i < n; ++i) {
1058d30ea906Sjfb8856606 		struct rte_mbuf *m = input_ops[i].data;
1059d30ea906Sjfb8856606 		while (m != NULL) {
1060d30ea906Sjfb8856606 			int8_t *llr = rte_pktmbuf_mtod_offset(m, int8_t *,
1061d30ea906Sjfb8856606 					input_ops[i].offset);
10624418919fSjohnjiang 			for (byte_idx = 0; byte_idx < rte_pktmbuf_data_len(m);
1063d30ea906Sjfb8856606 					++byte_idx)
1064d30ea906Sjfb8856606 				llr[byte_idx] = round((double)max_llr_modulus *
1065d30ea906Sjfb8856606 						llr[byte_idx] / INT8_MAX);
1066d30ea906Sjfb8856606 
1067d30ea906Sjfb8856606 			m = m->next;
1068d30ea906Sjfb8856606 		}
1069d30ea906Sjfb8856606 	}
1070d30ea906Sjfb8856606 }
1071d30ea906Sjfb8856606 
1072*2d9fd380Sjfb8856606 /*
1073*2d9fd380Sjfb8856606  * We may have to insert filler bits
1074*2d9fd380Sjfb8856606  * when they are required by the HARQ assumption
1075*2d9fd380Sjfb8856606  */
1076*2d9fd380Sjfb8856606 static void
ldpc_add_filler(struct rte_bbdev_op_data * input_ops,const uint16_t n,struct test_op_params * op_params)1077*2d9fd380Sjfb8856606 ldpc_add_filler(struct rte_bbdev_op_data *input_ops,
1078*2d9fd380Sjfb8856606 		const uint16_t n, struct test_op_params *op_params)
1079*2d9fd380Sjfb8856606 {
1080*2d9fd380Sjfb8856606 	struct rte_bbdev_op_ldpc_dec dec = op_params->ref_dec_op->ldpc_dec;
1081*2d9fd380Sjfb8856606 
1082*2d9fd380Sjfb8856606 	if (input_ops == NULL)
1083*2d9fd380Sjfb8856606 		return;
1084*2d9fd380Sjfb8856606 	/* No need to add filler if not required by device */
1085*2d9fd380Sjfb8856606 	if (!(ldpc_cap_flags &
1086*2d9fd380Sjfb8856606 			RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS))
1087*2d9fd380Sjfb8856606 		return;
1088*2d9fd380Sjfb8856606 	/* No need to add filler for loopback operation */
1089*2d9fd380Sjfb8856606 	if (dec.op_flags & RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)
1090*2d9fd380Sjfb8856606 		return;
1091*2d9fd380Sjfb8856606 
1092*2d9fd380Sjfb8856606 	uint16_t i, j, parity_offset;
1093*2d9fd380Sjfb8856606 	for (i = 0; i < n; ++i) {
1094*2d9fd380Sjfb8856606 		struct rte_mbuf *m = input_ops[i].data;
1095*2d9fd380Sjfb8856606 		int8_t *llr = rte_pktmbuf_mtod_offset(m, int8_t *,
1096*2d9fd380Sjfb8856606 				input_ops[i].offset);
1097*2d9fd380Sjfb8856606 		parity_offset = (dec.basegraph == 1 ? 20 : 8)
1098*2d9fd380Sjfb8856606 				* dec.z_c - dec.n_filler;
1099*2d9fd380Sjfb8856606 		uint16_t new_hin_size = input_ops[i].length + dec.n_filler;
1100*2d9fd380Sjfb8856606 		m->data_len = new_hin_size;
1101*2d9fd380Sjfb8856606 		input_ops[i].length = new_hin_size;
1102*2d9fd380Sjfb8856606 		for (j = new_hin_size - 1; j >= parity_offset + dec.n_filler;
1103*2d9fd380Sjfb8856606 				j--)
1104*2d9fd380Sjfb8856606 			llr[j] = llr[j - dec.n_filler];
1105*2d9fd380Sjfb8856606 		uint16_t llr_max_pre_scaling = (1 << (ldpc_llr_size - 1)) - 1;
1106*2d9fd380Sjfb8856606 		for (j = 0; j < dec.n_filler; j++)
1107*2d9fd380Sjfb8856606 			llr[parity_offset + j] = llr_max_pre_scaling;
1108*2d9fd380Sjfb8856606 	}
1109*2d9fd380Sjfb8856606 }
1110*2d9fd380Sjfb8856606 
11114418919fSjohnjiang static void
ldpc_input_llr_scaling(struct rte_bbdev_op_data * input_ops,const uint16_t n,const int8_t llr_size,const int8_t llr_decimals)11124418919fSjohnjiang ldpc_input_llr_scaling(struct rte_bbdev_op_data *input_ops,
11134418919fSjohnjiang 		const uint16_t n, const int8_t llr_size,
11144418919fSjohnjiang 		const int8_t llr_decimals)
11154418919fSjohnjiang {
11164418919fSjohnjiang 	if (input_ops == NULL)
11174418919fSjohnjiang 		return;
11184418919fSjohnjiang 
11194418919fSjohnjiang 	uint16_t i, byte_idx;
11204418919fSjohnjiang 
11214418919fSjohnjiang 	int16_t llr_max, llr_min, llr_tmp;
11224418919fSjohnjiang 	llr_max = (1 << (llr_size - 1)) - 1;
11234418919fSjohnjiang 	llr_min = -llr_max;
11244418919fSjohnjiang 	for (i = 0; i < n; ++i) {
11254418919fSjohnjiang 		struct rte_mbuf *m = input_ops[i].data;
11264418919fSjohnjiang 		while (m != NULL) {
11274418919fSjohnjiang 			int8_t *llr = rte_pktmbuf_mtod_offset(m, int8_t *,
11284418919fSjohnjiang 					input_ops[i].offset);
11294418919fSjohnjiang 			for (byte_idx = 0; byte_idx < rte_pktmbuf_data_len(m);
11304418919fSjohnjiang 					++byte_idx) {
11314418919fSjohnjiang 
11324418919fSjohnjiang 				llr_tmp = llr[byte_idx];
1133*2d9fd380Sjfb8856606 				if (llr_decimals == 4)
1134*2d9fd380Sjfb8856606 					llr_tmp *= 8;
1135*2d9fd380Sjfb8856606 				else if (llr_decimals == 2)
11364418919fSjohnjiang 					llr_tmp *= 2;
11374418919fSjohnjiang 				else if (llr_decimals == 0)
11384418919fSjohnjiang 					llr_tmp /= 2;
11394418919fSjohnjiang 				llr_tmp = RTE_MIN(llr_max,
11404418919fSjohnjiang 						RTE_MAX(llr_min, llr_tmp));
11414418919fSjohnjiang 				llr[byte_idx] = (int8_t) llr_tmp;
11424418919fSjohnjiang 			}
11434418919fSjohnjiang 
11444418919fSjohnjiang 			m = m->next;
11454418919fSjohnjiang 		}
11464418919fSjohnjiang 	}
11474418919fSjohnjiang }
11484418919fSjohnjiang 
11494418919fSjohnjiang 
11504418919fSjohnjiang 
1151d30ea906Sjfb8856606 static int
fill_queue_buffers(struct test_op_params * op_params,struct rte_mempool * in_mp,struct rte_mempool * hard_out_mp,struct rte_mempool * soft_out_mp,struct rte_mempool * harq_in_mp,struct rte_mempool * harq_out_mp,uint16_t queue_id,const struct rte_bbdev_op_cap * capabilities,uint16_t min_alignment,const int socket_id)1152d30ea906Sjfb8856606 fill_queue_buffers(struct test_op_params *op_params,
1153d30ea906Sjfb8856606 		struct rte_mempool *in_mp, struct rte_mempool *hard_out_mp,
11544418919fSjohnjiang 		struct rte_mempool *soft_out_mp,
11554418919fSjohnjiang 		struct rte_mempool *harq_in_mp, struct rte_mempool *harq_out_mp,
11564418919fSjohnjiang 		uint16_t queue_id,
1157d30ea906Sjfb8856606 		const struct rte_bbdev_op_cap *capabilities,
1158d30ea906Sjfb8856606 		uint16_t min_alignment, const int socket_id)
1159d30ea906Sjfb8856606 {
1160d30ea906Sjfb8856606 	int ret;
1161d30ea906Sjfb8856606 	enum op_data_type type;
1162d30ea906Sjfb8856606 	const uint16_t n = op_params->num_to_process;
1163d30ea906Sjfb8856606 
1164d30ea906Sjfb8856606 	struct rte_mempool *mbuf_pools[DATA_NUM_TYPES] = {
1165d30ea906Sjfb8856606 		in_mp,
1166d30ea906Sjfb8856606 		soft_out_mp,
1167d30ea906Sjfb8856606 		hard_out_mp,
11684418919fSjohnjiang 		harq_in_mp,
11694418919fSjohnjiang 		harq_out_mp,
1170d30ea906Sjfb8856606 	};
1171d30ea906Sjfb8856606 
1172d30ea906Sjfb8856606 	struct rte_bbdev_op_data **queue_ops[DATA_NUM_TYPES] = {
1173d30ea906Sjfb8856606 		&op_params->q_bufs[socket_id][queue_id].inputs,
1174d30ea906Sjfb8856606 		&op_params->q_bufs[socket_id][queue_id].soft_outputs,
1175d30ea906Sjfb8856606 		&op_params->q_bufs[socket_id][queue_id].hard_outputs,
11764418919fSjohnjiang 		&op_params->q_bufs[socket_id][queue_id].harq_inputs,
11774418919fSjohnjiang 		&op_params->q_bufs[socket_id][queue_id].harq_outputs,
1178d30ea906Sjfb8856606 	};
1179d30ea906Sjfb8856606 
1180d30ea906Sjfb8856606 	for (type = DATA_INPUT; type < DATA_NUM_TYPES; ++type) {
1181d30ea906Sjfb8856606 		struct op_data_entries *ref_entries =
1182d30ea906Sjfb8856606 				&test_vector.entries[type];
1183d30ea906Sjfb8856606 		if (ref_entries->nb_segments == 0)
1184d30ea906Sjfb8856606 			continue;
1185d30ea906Sjfb8856606 
1186d30ea906Sjfb8856606 		ret = allocate_buffers_on_socket(queue_ops[type],
1187d30ea906Sjfb8856606 				n * sizeof(struct rte_bbdev_op_data),
1188d30ea906Sjfb8856606 				socket_id);
1189d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(ret,
1190d30ea906Sjfb8856606 				"Couldn't allocate memory for rte_bbdev_op_data structs");
1191d30ea906Sjfb8856606 
1192d30ea906Sjfb8856606 		ret = init_op_data_objs(*queue_ops[type], ref_entries,
1193d30ea906Sjfb8856606 				mbuf_pools[type], n, type, min_alignment);
1194d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(ret,
1195d30ea906Sjfb8856606 				"Couldn't init rte_bbdev_op_data structs");
1196d30ea906Sjfb8856606 	}
1197d30ea906Sjfb8856606 
1198d30ea906Sjfb8856606 	if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
1199d30ea906Sjfb8856606 		limit_input_llr_val_range(*queue_ops[DATA_INPUT], n,
1200d30ea906Sjfb8856606 			capabilities->cap.turbo_dec.max_llr_modulus);
1201d30ea906Sjfb8856606 
12024418919fSjohnjiang 	if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC) {
1203*2d9fd380Sjfb8856606 		bool loopback = op_params->ref_dec_op->ldpc_dec.op_flags &
1204*2d9fd380Sjfb8856606 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK;
1205*2d9fd380Sjfb8856606 		bool llr_comp = op_params->ref_dec_op->ldpc_dec.op_flags &
1206*2d9fd380Sjfb8856606 				RTE_BBDEV_LDPC_LLR_COMPRESSION;
1207*2d9fd380Sjfb8856606 		bool harq_comp = op_params->ref_dec_op->ldpc_dec.op_flags &
1208*2d9fd380Sjfb8856606 				RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION;
1209*2d9fd380Sjfb8856606 		ldpc_llr_decimals = capabilities->cap.ldpc_dec.llr_decimals;
1210*2d9fd380Sjfb8856606 		ldpc_llr_size = capabilities->cap.ldpc_dec.llr_size;
1211*2d9fd380Sjfb8856606 		ldpc_cap_flags = capabilities->cap.ldpc_dec.capability_flags;
1212*2d9fd380Sjfb8856606 		if (!loopback && !llr_comp)
12134418919fSjohnjiang 			ldpc_input_llr_scaling(*queue_ops[DATA_INPUT], n,
1214*2d9fd380Sjfb8856606 					ldpc_llr_size, ldpc_llr_decimals);
1215*2d9fd380Sjfb8856606 		if (!loopback && !harq_comp)
12164418919fSjohnjiang 			ldpc_input_llr_scaling(*queue_ops[DATA_HARQ_INPUT], n,
1217*2d9fd380Sjfb8856606 					ldpc_llr_size, ldpc_llr_decimals);
1218*2d9fd380Sjfb8856606 		if (!loopback)
1219*2d9fd380Sjfb8856606 			ldpc_add_filler(*queue_ops[DATA_HARQ_INPUT], n,
1220*2d9fd380Sjfb8856606 					op_params);
12214418919fSjohnjiang 	}
12224418919fSjohnjiang 
1223d30ea906Sjfb8856606 	return 0;
1224d30ea906Sjfb8856606 }
1225d30ea906Sjfb8856606 
1226d30ea906Sjfb8856606 static void
free_buffers(struct active_device * ad,struct test_op_params * op_params)1227d30ea906Sjfb8856606 free_buffers(struct active_device *ad, struct test_op_params *op_params)
1228d30ea906Sjfb8856606 {
1229d30ea906Sjfb8856606 	unsigned int i, j;
1230d30ea906Sjfb8856606 
1231d30ea906Sjfb8856606 	rte_mempool_free(ad->ops_mempool);
1232d30ea906Sjfb8856606 	rte_mempool_free(ad->in_mbuf_pool);
1233d30ea906Sjfb8856606 	rte_mempool_free(ad->hard_out_mbuf_pool);
1234d30ea906Sjfb8856606 	rte_mempool_free(ad->soft_out_mbuf_pool);
12354418919fSjohnjiang 	rte_mempool_free(ad->harq_in_mbuf_pool);
12364418919fSjohnjiang 	rte_mempool_free(ad->harq_out_mbuf_pool);
1237d30ea906Sjfb8856606 
1238d30ea906Sjfb8856606 	for (i = 0; i < rte_lcore_count(); ++i) {
1239d30ea906Sjfb8856606 		for (j = 0; j < RTE_MAX_NUMA_NODES; ++j) {
1240d30ea906Sjfb8856606 			rte_free(op_params->q_bufs[j][i].inputs);
1241d30ea906Sjfb8856606 			rte_free(op_params->q_bufs[j][i].hard_outputs);
1242d30ea906Sjfb8856606 			rte_free(op_params->q_bufs[j][i].soft_outputs);
12434418919fSjohnjiang 			rte_free(op_params->q_bufs[j][i].harq_inputs);
12444418919fSjohnjiang 			rte_free(op_params->q_bufs[j][i].harq_outputs);
1245d30ea906Sjfb8856606 		}
1246d30ea906Sjfb8856606 	}
1247d30ea906Sjfb8856606 }
1248d30ea906Sjfb8856606 
1249d30ea906Sjfb8856606 static void
copy_reference_dec_op(struct rte_bbdev_dec_op ** ops,unsigned int n,unsigned int start_idx,struct rte_bbdev_op_data * inputs,struct rte_bbdev_op_data * hard_outputs,struct rte_bbdev_op_data * soft_outputs,struct rte_bbdev_dec_op * ref_op)1250d30ea906Sjfb8856606 copy_reference_dec_op(struct rte_bbdev_dec_op **ops, unsigned int n,
1251d30ea906Sjfb8856606 		unsigned int start_idx,
1252d30ea906Sjfb8856606 		struct rte_bbdev_op_data *inputs,
1253d30ea906Sjfb8856606 		struct rte_bbdev_op_data *hard_outputs,
1254d30ea906Sjfb8856606 		struct rte_bbdev_op_data *soft_outputs,
1255d30ea906Sjfb8856606 		struct rte_bbdev_dec_op *ref_op)
1256d30ea906Sjfb8856606 {
1257d30ea906Sjfb8856606 	unsigned int i;
1258d30ea906Sjfb8856606 	struct rte_bbdev_op_turbo_dec *turbo_dec = &ref_op->turbo_dec;
1259d30ea906Sjfb8856606 
1260d30ea906Sjfb8856606 	for (i = 0; i < n; ++i) {
1261d30ea906Sjfb8856606 		if (turbo_dec->code_block_mode == 0) {
1262d30ea906Sjfb8856606 			ops[i]->turbo_dec.tb_params.ea =
1263d30ea906Sjfb8856606 					turbo_dec->tb_params.ea;
1264d30ea906Sjfb8856606 			ops[i]->turbo_dec.tb_params.eb =
1265d30ea906Sjfb8856606 					turbo_dec->tb_params.eb;
1266d30ea906Sjfb8856606 			ops[i]->turbo_dec.tb_params.k_pos =
1267d30ea906Sjfb8856606 					turbo_dec->tb_params.k_pos;
1268d30ea906Sjfb8856606 			ops[i]->turbo_dec.tb_params.k_neg =
1269d30ea906Sjfb8856606 					turbo_dec->tb_params.k_neg;
1270d30ea906Sjfb8856606 			ops[i]->turbo_dec.tb_params.c =
1271d30ea906Sjfb8856606 					turbo_dec->tb_params.c;
1272d30ea906Sjfb8856606 			ops[i]->turbo_dec.tb_params.c_neg =
1273d30ea906Sjfb8856606 					turbo_dec->tb_params.c_neg;
1274d30ea906Sjfb8856606 			ops[i]->turbo_dec.tb_params.cab =
1275d30ea906Sjfb8856606 					turbo_dec->tb_params.cab;
12764418919fSjohnjiang 			ops[i]->turbo_dec.tb_params.r =
12774418919fSjohnjiang 					turbo_dec->tb_params.r;
1278d30ea906Sjfb8856606 		} else {
1279d30ea906Sjfb8856606 			ops[i]->turbo_dec.cb_params.e = turbo_dec->cb_params.e;
1280d30ea906Sjfb8856606 			ops[i]->turbo_dec.cb_params.k = turbo_dec->cb_params.k;
1281d30ea906Sjfb8856606 		}
1282d30ea906Sjfb8856606 
1283d30ea906Sjfb8856606 		ops[i]->turbo_dec.ext_scale = turbo_dec->ext_scale;
1284d30ea906Sjfb8856606 		ops[i]->turbo_dec.iter_max = turbo_dec->iter_max;
1285d30ea906Sjfb8856606 		ops[i]->turbo_dec.iter_min = turbo_dec->iter_min;
1286d30ea906Sjfb8856606 		ops[i]->turbo_dec.op_flags = turbo_dec->op_flags;
1287d30ea906Sjfb8856606 		ops[i]->turbo_dec.rv_index = turbo_dec->rv_index;
1288d30ea906Sjfb8856606 		ops[i]->turbo_dec.num_maps = turbo_dec->num_maps;
1289d30ea906Sjfb8856606 		ops[i]->turbo_dec.code_block_mode = turbo_dec->code_block_mode;
1290d30ea906Sjfb8856606 
1291d30ea906Sjfb8856606 		ops[i]->turbo_dec.hard_output = hard_outputs[start_idx + i];
1292d30ea906Sjfb8856606 		ops[i]->turbo_dec.input = inputs[start_idx + i];
1293d30ea906Sjfb8856606 		if (soft_outputs != NULL)
1294d30ea906Sjfb8856606 			ops[i]->turbo_dec.soft_output =
1295d30ea906Sjfb8856606 				soft_outputs[start_idx + i];
1296d30ea906Sjfb8856606 	}
1297d30ea906Sjfb8856606 }
1298d30ea906Sjfb8856606 
1299d30ea906Sjfb8856606 static void
copy_reference_enc_op(struct rte_bbdev_enc_op ** ops,unsigned int n,unsigned int start_idx,struct rte_bbdev_op_data * inputs,struct rte_bbdev_op_data * outputs,struct rte_bbdev_enc_op * ref_op)1300d30ea906Sjfb8856606 copy_reference_enc_op(struct rte_bbdev_enc_op **ops, unsigned int n,
1301d30ea906Sjfb8856606 		unsigned int start_idx,
1302d30ea906Sjfb8856606 		struct rte_bbdev_op_data *inputs,
1303d30ea906Sjfb8856606 		struct rte_bbdev_op_data *outputs,
1304d30ea906Sjfb8856606 		struct rte_bbdev_enc_op *ref_op)
1305d30ea906Sjfb8856606 {
1306d30ea906Sjfb8856606 	unsigned int i;
1307d30ea906Sjfb8856606 	struct rte_bbdev_op_turbo_enc *turbo_enc = &ref_op->turbo_enc;
1308d30ea906Sjfb8856606 	for (i = 0; i < n; ++i) {
1309d30ea906Sjfb8856606 		if (turbo_enc->code_block_mode == 0) {
1310d30ea906Sjfb8856606 			ops[i]->turbo_enc.tb_params.ea =
1311d30ea906Sjfb8856606 					turbo_enc->tb_params.ea;
1312d30ea906Sjfb8856606 			ops[i]->turbo_enc.tb_params.eb =
1313d30ea906Sjfb8856606 					turbo_enc->tb_params.eb;
1314d30ea906Sjfb8856606 			ops[i]->turbo_enc.tb_params.k_pos =
1315d30ea906Sjfb8856606 					turbo_enc->tb_params.k_pos;
1316d30ea906Sjfb8856606 			ops[i]->turbo_enc.tb_params.k_neg =
1317d30ea906Sjfb8856606 					turbo_enc->tb_params.k_neg;
1318d30ea906Sjfb8856606 			ops[i]->turbo_enc.tb_params.c =
1319d30ea906Sjfb8856606 					turbo_enc->tb_params.c;
1320d30ea906Sjfb8856606 			ops[i]->turbo_enc.tb_params.c_neg =
1321d30ea906Sjfb8856606 					turbo_enc->tb_params.c_neg;
1322d30ea906Sjfb8856606 			ops[i]->turbo_enc.tb_params.cab =
1323d30ea906Sjfb8856606 					turbo_enc->tb_params.cab;
1324d30ea906Sjfb8856606 			ops[i]->turbo_enc.tb_params.ncb_pos =
1325d30ea906Sjfb8856606 					turbo_enc->tb_params.ncb_pos;
1326d30ea906Sjfb8856606 			ops[i]->turbo_enc.tb_params.ncb_neg =
1327d30ea906Sjfb8856606 					turbo_enc->tb_params.ncb_neg;
1328d30ea906Sjfb8856606 			ops[i]->turbo_enc.tb_params.r = turbo_enc->tb_params.r;
1329d30ea906Sjfb8856606 		} else {
1330d30ea906Sjfb8856606 			ops[i]->turbo_enc.cb_params.e = turbo_enc->cb_params.e;
1331d30ea906Sjfb8856606 			ops[i]->turbo_enc.cb_params.k = turbo_enc->cb_params.k;
1332d30ea906Sjfb8856606 			ops[i]->turbo_enc.cb_params.ncb =
1333d30ea906Sjfb8856606 					turbo_enc->cb_params.ncb;
1334d30ea906Sjfb8856606 		}
1335d30ea906Sjfb8856606 		ops[i]->turbo_enc.rv_index = turbo_enc->rv_index;
1336d30ea906Sjfb8856606 		ops[i]->turbo_enc.op_flags = turbo_enc->op_flags;
1337d30ea906Sjfb8856606 		ops[i]->turbo_enc.code_block_mode = turbo_enc->code_block_mode;
1338d30ea906Sjfb8856606 
1339d30ea906Sjfb8856606 		ops[i]->turbo_enc.output = outputs[start_idx + i];
1340d30ea906Sjfb8856606 		ops[i]->turbo_enc.input = inputs[start_idx + i];
1341d30ea906Sjfb8856606 	}
1342d30ea906Sjfb8856606 }
1343d30ea906Sjfb8856606 
1344*2d9fd380Sjfb8856606 
1345*2d9fd380Sjfb8856606 /* Returns a random number drawn from a normal distribution
1346*2d9fd380Sjfb8856606  * with mean of 0 and variance of 1
1347*2d9fd380Sjfb8856606  * Marsaglia algorithm
1348*2d9fd380Sjfb8856606  */
1349*2d9fd380Sjfb8856606 static double
randn(int n)1350*2d9fd380Sjfb8856606 randn(int n)
1351*2d9fd380Sjfb8856606 {
1352*2d9fd380Sjfb8856606 	double S, Z, U1, U2, u, v, fac;
1353*2d9fd380Sjfb8856606 
1354*2d9fd380Sjfb8856606 	do {
1355*2d9fd380Sjfb8856606 		U1 = (double)rand() / RAND_MAX;
1356*2d9fd380Sjfb8856606 		U2 = (double)rand() / RAND_MAX;
1357*2d9fd380Sjfb8856606 		u = 2. * U1 - 1.;
1358*2d9fd380Sjfb8856606 		v = 2. * U2 - 1.;
1359*2d9fd380Sjfb8856606 		S = u * u + v * v;
1360*2d9fd380Sjfb8856606 	} while (S >= 1 || S == 0);
1361*2d9fd380Sjfb8856606 	fac = sqrt(-2. * log(S) / S);
1362*2d9fd380Sjfb8856606 	Z = (n % 2) ? u * fac : v * fac;
1363*2d9fd380Sjfb8856606 	return Z;
1364*2d9fd380Sjfb8856606 }
1365*2d9fd380Sjfb8856606 
1366*2d9fd380Sjfb8856606 static inline double
maxstar(double A,double B)1367*2d9fd380Sjfb8856606 maxstar(double A, double B)
1368*2d9fd380Sjfb8856606 {
1369*2d9fd380Sjfb8856606 	if (fabs(A - B) > 5)
1370*2d9fd380Sjfb8856606 		return RTE_MAX(A, B);
1371*2d9fd380Sjfb8856606 	else
1372*2d9fd380Sjfb8856606 		return RTE_MAX(A, B) + log1p(exp(-fabs(A - B)));
1373*2d9fd380Sjfb8856606 }
1374*2d9fd380Sjfb8856606 
1375*2d9fd380Sjfb8856606 /*
1376*2d9fd380Sjfb8856606  * Generate Qm LLRS for Qm==8
1377*2d9fd380Sjfb8856606  * Modulation, AWGN and LLR estimation from max log development
1378*2d9fd380Sjfb8856606  */
1379*2d9fd380Sjfb8856606 static void
gen_qm8_llr(int8_t * llrs,uint32_t i,double N0,double llr_max)1380*2d9fd380Sjfb8856606 gen_qm8_llr(int8_t *llrs, uint32_t i, double N0, double llr_max)
1381*2d9fd380Sjfb8856606 {
1382*2d9fd380Sjfb8856606 	int qm = 8;
1383*2d9fd380Sjfb8856606 	int qam = 256;
1384*2d9fd380Sjfb8856606 	int m, k;
1385*2d9fd380Sjfb8856606 	double I, Q, p0, p1, llr_, b[qm], log_syml_prob[qam];
1386*2d9fd380Sjfb8856606 	/* 5.1.4 of TS38.211 */
1387*2d9fd380Sjfb8856606 	const double symbols_I[256] = {
1388*2d9fd380Sjfb8856606 			5, 5, 7, 7, 5, 5, 7, 7, 3, 3, 1, 1, 3, 3, 1, 1, 5,
1389*2d9fd380Sjfb8856606 			5, 7, 7, 5, 5, 7, 7, 3, 3, 1, 1, 3, 3, 1, 1, 11,
1390*2d9fd380Sjfb8856606 			11, 9, 9, 11, 11, 9, 9, 13, 13, 15, 15, 13, 13,
1391*2d9fd380Sjfb8856606 			15, 15, 11, 11, 9, 9, 11, 11, 9, 9, 13, 13, 15,
1392*2d9fd380Sjfb8856606 			15, 13, 13, 15, 15, 5, 5, 7, 7, 5, 5, 7, 7, 3, 3,
1393*2d9fd380Sjfb8856606 			1, 1, 3, 3, 1, 1, 5, 5, 7, 7, 5, 5, 7, 7, 3, 3, 1,
1394*2d9fd380Sjfb8856606 			1, 3, 3, 1, 1, 11, 11, 9, 9, 11, 11, 9, 9, 13, 13,
1395*2d9fd380Sjfb8856606 			15, 15, 13, 13, 15, 15, 11, 11, 9, 9, 11, 11, 9, 9,
1396*2d9fd380Sjfb8856606 			13, 13, 15, 15, 13, 13, 15, 15, -5, -5, -7, -7, -5,
1397*2d9fd380Sjfb8856606 			-5, -7, -7, -3, -3, -1, -1, -3, -3, -1, -1, -5, -5,
1398*2d9fd380Sjfb8856606 			-7, -7, -5, -5, -7, -7, -3, -3, -1, -1, -3, -3,
1399*2d9fd380Sjfb8856606 			-1, -1, -11, -11, -9, -9, -11, -11, -9, -9, -13,
1400*2d9fd380Sjfb8856606 			-13, -15, -15, -13, -13, -15, -15, -11, -11, -9,
1401*2d9fd380Sjfb8856606 			-9, -11, -11, -9, -9, -13, -13, -15, -15, -13,
1402*2d9fd380Sjfb8856606 			-13, -15, -15, -5, -5, -7, -7, -5, -5, -7, -7, -3,
1403*2d9fd380Sjfb8856606 			-3, -1, -1, -3, -3, -1, -1, -5, -5, -7, -7, -5, -5,
1404*2d9fd380Sjfb8856606 			-7, -7, -3, -3, -1, -1, -3, -3, -1, -1, -11, -11,
1405*2d9fd380Sjfb8856606 			-9, -9, -11, -11, -9, -9, -13, -13, -15, -15, -13,
1406*2d9fd380Sjfb8856606 			-13, -15, -15, -11, -11, -9, -9, -11, -11, -9, -9,
1407*2d9fd380Sjfb8856606 			-13, -13, -15, -15, -13, -13, -15, -15};
1408*2d9fd380Sjfb8856606 	const double symbols_Q[256] = {
1409*2d9fd380Sjfb8856606 			5, 7, 5, 7, 3, 1, 3, 1, 5, 7, 5, 7, 3, 1, 3, 1, 11,
1410*2d9fd380Sjfb8856606 			9, 11, 9, 13, 15, 13, 15, 11, 9, 11, 9, 13, 15, 13,
1411*2d9fd380Sjfb8856606 			15, 5, 7, 5, 7, 3, 1, 3, 1, 5, 7, 5, 7, 3, 1, 3, 1,
1412*2d9fd380Sjfb8856606 			11, 9, 11, 9, 13, 15, 13, 15, 11, 9, 11, 9, 13,
1413*2d9fd380Sjfb8856606 			15, 13, 15, -5, -7, -5, -7, -3, -1, -3, -1, -5,
1414*2d9fd380Sjfb8856606 			-7, -5, -7, -3, -1, -3, -1, -11, -9, -11, -9, -13,
1415*2d9fd380Sjfb8856606 			-15, -13, -15, -11, -9, -11, -9, -13, -15, -13,
1416*2d9fd380Sjfb8856606 			-15, -5, -7, -5, -7, -3, -1, -3, -1, -5, -7, -5,
1417*2d9fd380Sjfb8856606 			-7, -3, -1, -3, -1, -11, -9, -11, -9, -13, -15,
1418*2d9fd380Sjfb8856606 			-13, -15, -11, -9, -11, -9, -13, -15, -13, -15, 5,
1419*2d9fd380Sjfb8856606 			7, 5, 7, 3, 1, 3, 1, 5, 7, 5, 7, 3, 1, 3, 1, 11,
1420*2d9fd380Sjfb8856606 			9, 11, 9, 13, 15, 13, 15, 11, 9, 11, 9, 13, 15,
1421*2d9fd380Sjfb8856606 			13, 15, 5, 7, 5, 7, 3, 1, 3, 1, 5, 7, 5, 7, 3, 1,
1422*2d9fd380Sjfb8856606 			3, 1, 11, 9, 11, 9, 13, 15, 13, 15, 11, 9, 11, 9,
1423*2d9fd380Sjfb8856606 			13, 15, 13, 15, -5, -7, -5, -7, -3, -1, -3, -1,
1424*2d9fd380Sjfb8856606 			-5, -7, -5, -7, -3, -1, -3, -1, -11, -9, -11, -9,
1425*2d9fd380Sjfb8856606 			-13, -15, -13, -15, -11, -9, -11, -9, -13, -15,
1426*2d9fd380Sjfb8856606 			-13, -15, -5, -7, -5, -7, -3, -1, -3, -1, -5, -7,
1427*2d9fd380Sjfb8856606 			-5, -7, -3, -1, -3, -1, -11, -9, -11, -9, -13, -15,
1428*2d9fd380Sjfb8856606 			-13, -15, -11, -9, -11, -9, -13, -15, -13, -15};
1429*2d9fd380Sjfb8856606 	/* Average constellation point energy */
1430*2d9fd380Sjfb8856606 	N0 *= 170.0;
1431*2d9fd380Sjfb8856606 	for (k = 0; k < qm; k++)
1432*2d9fd380Sjfb8856606 		b[k] = llrs[qm * i + k] < 0 ? 1.0 : 0.0;
1433*2d9fd380Sjfb8856606 	/* 5.1.4 of TS38.211 */
1434*2d9fd380Sjfb8856606 	I = (1 - 2 * b[0]) * (8 - (1 - 2 * b[2]) *
1435*2d9fd380Sjfb8856606 			(4 - (1 - 2 * b[4]) * (2 - (1 - 2 * b[6]))));
1436*2d9fd380Sjfb8856606 	Q = (1 - 2 * b[1]) * (8 - (1 - 2 * b[3]) *
1437*2d9fd380Sjfb8856606 			(4 - (1 - 2 * b[5]) * (2 - (1 - 2 * b[7]))));
1438*2d9fd380Sjfb8856606 	/* AWGN channel */
1439*2d9fd380Sjfb8856606 	I += sqrt(N0 / 2) * randn(0);
1440*2d9fd380Sjfb8856606 	Q += sqrt(N0 / 2) * randn(1);
1441*2d9fd380Sjfb8856606 	/*
1442*2d9fd380Sjfb8856606 	 * Calculate the log of the probability that each of
1443*2d9fd380Sjfb8856606 	 * the constellation points was transmitted
1444*2d9fd380Sjfb8856606 	 */
1445*2d9fd380Sjfb8856606 	for (m = 0; m < qam; m++)
1446*2d9fd380Sjfb8856606 		log_syml_prob[m] = -(pow(I - symbols_I[m], 2.0)
1447*2d9fd380Sjfb8856606 				+ pow(Q - symbols_Q[m], 2.0)) / N0;
1448*2d9fd380Sjfb8856606 	/* Calculate an LLR for each of the k_64QAM bits in the set */
1449*2d9fd380Sjfb8856606 	for (k = 0; k < qm; k++) {
1450*2d9fd380Sjfb8856606 		p0 = -999999;
1451*2d9fd380Sjfb8856606 		p1 = -999999;
1452*2d9fd380Sjfb8856606 		/* For each constellation point */
1453*2d9fd380Sjfb8856606 		for (m = 0; m < qam; m++) {
1454*2d9fd380Sjfb8856606 			if ((m >> (qm - k - 1)) & 1)
1455*2d9fd380Sjfb8856606 				p1 = maxstar(p1, log_syml_prob[m]);
1456*2d9fd380Sjfb8856606 			else
1457*2d9fd380Sjfb8856606 				p0 = maxstar(p0, log_syml_prob[m]);
1458*2d9fd380Sjfb8856606 		}
1459*2d9fd380Sjfb8856606 		/* Calculate the LLR */
1460*2d9fd380Sjfb8856606 		llr_ = p0 - p1;
1461*2d9fd380Sjfb8856606 		llr_ *= (1 << ldpc_llr_decimals);
1462*2d9fd380Sjfb8856606 		llr_ = round(llr_);
1463*2d9fd380Sjfb8856606 		if (llr_ > llr_max)
1464*2d9fd380Sjfb8856606 			llr_ = llr_max;
1465*2d9fd380Sjfb8856606 		if (llr_ < -llr_max)
1466*2d9fd380Sjfb8856606 			llr_ = -llr_max;
1467*2d9fd380Sjfb8856606 		llrs[qm * i + k] = (int8_t) llr_;
1468*2d9fd380Sjfb8856606 	}
1469*2d9fd380Sjfb8856606 }
1470*2d9fd380Sjfb8856606 
1471*2d9fd380Sjfb8856606 
1472*2d9fd380Sjfb8856606 /*
1473*2d9fd380Sjfb8856606  * Generate Qm LLRS for Qm==6
1474*2d9fd380Sjfb8856606  * Modulation, AWGN and LLR estimation from max log development
1475*2d9fd380Sjfb8856606  */
1476*2d9fd380Sjfb8856606 static void
gen_qm6_llr(int8_t * llrs,uint32_t i,double N0,double llr_max)1477*2d9fd380Sjfb8856606 gen_qm6_llr(int8_t *llrs, uint32_t i, double N0, double llr_max)
1478*2d9fd380Sjfb8856606 {
1479*2d9fd380Sjfb8856606 	int qm = 6;
1480*2d9fd380Sjfb8856606 	int qam = 64;
1481*2d9fd380Sjfb8856606 	int m, k;
1482*2d9fd380Sjfb8856606 	double I, Q, p0, p1, llr_, b[qm], log_syml_prob[qam];
1483*2d9fd380Sjfb8856606 	/* 5.1.4 of TS38.211 */
1484*2d9fd380Sjfb8856606 	const double symbols_I[64] = {
1485*2d9fd380Sjfb8856606 			3, 3, 1, 1, 3, 3, 1, 1, 5, 5, 7, 7, 5, 5, 7, 7,
1486*2d9fd380Sjfb8856606 			3, 3, 1, 1, 3, 3, 1, 1, 5, 5, 7, 7, 5, 5, 7, 7,
1487*2d9fd380Sjfb8856606 			-3, -3, -1, -1, -3, -3, -1, -1, -5, -5, -7, -7,
1488*2d9fd380Sjfb8856606 			-5, -5, -7, -7, -3, -3, -1, -1, -3, -3, -1, -1,
1489*2d9fd380Sjfb8856606 			-5, -5, -7, -7, -5, -5, -7, -7};
1490*2d9fd380Sjfb8856606 	const double symbols_Q[64] = {
1491*2d9fd380Sjfb8856606 			3, 1, 3, 1, 5, 7, 5, 7, 3, 1, 3, 1, 5, 7, 5, 7,
1492*2d9fd380Sjfb8856606 			-3, -1, -3, -1, -5, -7, -5, -7, -3, -1, -3, -1,
1493*2d9fd380Sjfb8856606 			-5, -7, -5, -7, 3, 1, 3, 1, 5, 7, 5, 7, 3, 1, 3, 1,
1494*2d9fd380Sjfb8856606 			5, 7, 5, 7, -3, -1, -3, -1, -5, -7, -5, -7,
1495*2d9fd380Sjfb8856606 			-3, -1, -3, -1, -5, -7, -5, -7};
1496*2d9fd380Sjfb8856606 	/* Average constellation point energy */
1497*2d9fd380Sjfb8856606 	N0 *= 42.0;
1498*2d9fd380Sjfb8856606 	for (k = 0; k < qm; k++)
1499*2d9fd380Sjfb8856606 		b[k] = llrs[qm * i + k] < 0 ? 1.0 : 0.0;
1500*2d9fd380Sjfb8856606 	/* 5.1.4 of TS38.211 */
1501*2d9fd380Sjfb8856606 	I = (1 - 2 * b[0])*(4 - (1 - 2 * b[2]) * (2 - (1 - 2 * b[4])));
1502*2d9fd380Sjfb8856606 	Q = (1 - 2 * b[1])*(4 - (1 - 2 * b[3]) * (2 - (1 - 2 * b[5])));
1503*2d9fd380Sjfb8856606 	/* AWGN channel */
1504*2d9fd380Sjfb8856606 	I += sqrt(N0 / 2) * randn(0);
1505*2d9fd380Sjfb8856606 	Q += sqrt(N0 / 2) * randn(1);
1506*2d9fd380Sjfb8856606 	/*
1507*2d9fd380Sjfb8856606 	 * Calculate the log of the probability that each of
1508*2d9fd380Sjfb8856606 	 * the constellation points was transmitted
1509*2d9fd380Sjfb8856606 	 */
1510*2d9fd380Sjfb8856606 	for (m = 0; m < qam; m++)
1511*2d9fd380Sjfb8856606 		log_syml_prob[m] = -(pow(I - symbols_I[m], 2.0)
1512*2d9fd380Sjfb8856606 				+ pow(Q - symbols_Q[m], 2.0)) / N0;
1513*2d9fd380Sjfb8856606 	/* Calculate an LLR for each of the k_64QAM bits in the set */
1514*2d9fd380Sjfb8856606 	for (k = 0; k < qm; k++) {
1515*2d9fd380Sjfb8856606 		p0 = -999999;
1516*2d9fd380Sjfb8856606 		p1 = -999999;
1517*2d9fd380Sjfb8856606 		/* For each constellation point */
1518*2d9fd380Sjfb8856606 		for (m = 0; m < qam; m++) {
1519*2d9fd380Sjfb8856606 			if ((m >> (qm - k - 1)) & 1)
1520*2d9fd380Sjfb8856606 				p1 = maxstar(p1, log_syml_prob[m]);
1521*2d9fd380Sjfb8856606 			else
1522*2d9fd380Sjfb8856606 				p0 = maxstar(p0, log_syml_prob[m]);
1523*2d9fd380Sjfb8856606 		}
1524*2d9fd380Sjfb8856606 		/* Calculate the LLR */
1525*2d9fd380Sjfb8856606 		llr_ = p0 - p1;
1526*2d9fd380Sjfb8856606 		llr_ *= (1 << ldpc_llr_decimals);
1527*2d9fd380Sjfb8856606 		llr_ = round(llr_);
1528*2d9fd380Sjfb8856606 		if (llr_ > llr_max)
1529*2d9fd380Sjfb8856606 			llr_ = llr_max;
1530*2d9fd380Sjfb8856606 		if (llr_ < -llr_max)
1531*2d9fd380Sjfb8856606 			llr_ = -llr_max;
1532*2d9fd380Sjfb8856606 		llrs[qm * i + k] = (int8_t) llr_;
1533*2d9fd380Sjfb8856606 	}
1534*2d9fd380Sjfb8856606 }
1535*2d9fd380Sjfb8856606 
1536*2d9fd380Sjfb8856606 /*
1537*2d9fd380Sjfb8856606  * Generate Qm LLRS for Qm==4
1538*2d9fd380Sjfb8856606  * Modulation, AWGN and LLR estimation from max log development
1539*2d9fd380Sjfb8856606  */
1540*2d9fd380Sjfb8856606 static void
gen_qm4_llr(int8_t * llrs,uint32_t i,double N0,double llr_max)1541*2d9fd380Sjfb8856606 gen_qm4_llr(int8_t *llrs, uint32_t i, double N0, double llr_max)
1542*2d9fd380Sjfb8856606 {
1543*2d9fd380Sjfb8856606 	int qm = 4;
1544*2d9fd380Sjfb8856606 	int qam = 16;
1545*2d9fd380Sjfb8856606 	int m, k;
1546*2d9fd380Sjfb8856606 	double I, Q, p0, p1, llr_, b[qm], log_syml_prob[qam];
1547*2d9fd380Sjfb8856606 	/* 5.1.4 of TS38.211 */
1548*2d9fd380Sjfb8856606 	const double symbols_I[16] = {1, 1, 3, 3, 1, 1, 3, 3,
1549*2d9fd380Sjfb8856606 			-1, -1, -3, -3, -1, -1, -3, -3};
1550*2d9fd380Sjfb8856606 	const double symbols_Q[16] = {1, 3, 1, 3, -1, -3, -1, -3,
1551*2d9fd380Sjfb8856606 			1, 3, 1, 3, -1, -3, -1, -3};
1552*2d9fd380Sjfb8856606 	/* Average constellation point energy */
1553*2d9fd380Sjfb8856606 	N0 *= 10.0;
1554*2d9fd380Sjfb8856606 	for (k = 0; k < qm; k++)
1555*2d9fd380Sjfb8856606 		b[k] = llrs[qm * i + k] < 0 ? 1.0 : 0.0;
1556*2d9fd380Sjfb8856606 	/* 5.1.4 of TS38.211 */
1557*2d9fd380Sjfb8856606 	I = (1 - 2 * b[0]) * (2 - (1 - 2 * b[2]));
1558*2d9fd380Sjfb8856606 	Q = (1 - 2 * b[1]) * (2 - (1 - 2 * b[3]));
1559*2d9fd380Sjfb8856606 	/* AWGN channel */
1560*2d9fd380Sjfb8856606 	I += sqrt(N0 / 2) * randn(0);
1561*2d9fd380Sjfb8856606 	Q += sqrt(N0 / 2) * randn(1);
1562*2d9fd380Sjfb8856606 	/*
1563*2d9fd380Sjfb8856606 	 * Calculate the log of the probability that each of
1564*2d9fd380Sjfb8856606 	 * the constellation points was transmitted
1565*2d9fd380Sjfb8856606 	 */
1566*2d9fd380Sjfb8856606 	for (m = 0; m < qam; m++)
1567*2d9fd380Sjfb8856606 		log_syml_prob[m] = -(pow(I - symbols_I[m], 2.0)
1568*2d9fd380Sjfb8856606 				+ pow(Q - symbols_Q[m], 2.0)) / N0;
1569*2d9fd380Sjfb8856606 	/* Calculate an LLR for each of the k_64QAM bits in the set */
1570*2d9fd380Sjfb8856606 	for (k = 0; k < qm; k++) {
1571*2d9fd380Sjfb8856606 		p0 = -999999;
1572*2d9fd380Sjfb8856606 		p1 = -999999;
1573*2d9fd380Sjfb8856606 		/* For each constellation point */
1574*2d9fd380Sjfb8856606 		for (m = 0; m < qam; m++) {
1575*2d9fd380Sjfb8856606 			if ((m >> (qm - k - 1)) & 1)
1576*2d9fd380Sjfb8856606 				p1 = maxstar(p1, log_syml_prob[m]);
1577*2d9fd380Sjfb8856606 			else
1578*2d9fd380Sjfb8856606 				p0 = maxstar(p0, log_syml_prob[m]);
1579*2d9fd380Sjfb8856606 		}
1580*2d9fd380Sjfb8856606 		/* Calculate the LLR */
1581*2d9fd380Sjfb8856606 		llr_ = p0 - p1;
1582*2d9fd380Sjfb8856606 		llr_ *= (1 << ldpc_llr_decimals);
1583*2d9fd380Sjfb8856606 		llr_ = round(llr_);
1584*2d9fd380Sjfb8856606 		if (llr_ > llr_max)
1585*2d9fd380Sjfb8856606 			llr_ = llr_max;
1586*2d9fd380Sjfb8856606 		if (llr_ < -llr_max)
1587*2d9fd380Sjfb8856606 			llr_ = -llr_max;
1588*2d9fd380Sjfb8856606 		llrs[qm * i + k] = (int8_t) llr_;
1589*2d9fd380Sjfb8856606 	}
1590*2d9fd380Sjfb8856606 }
1591*2d9fd380Sjfb8856606 
1592*2d9fd380Sjfb8856606 static void
gen_qm2_llr(int8_t * llrs,uint32_t j,double N0,double llr_max)1593*2d9fd380Sjfb8856606 gen_qm2_llr(int8_t *llrs, uint32_t j, double N0, double llr_max)
1594*2d9fd380Sjfb8856606 {
1595*2d9fd380Sjfb8856606 	double b, b1, n;
1596*2d9fd380Sjfb8856606 	double coeff = 2.0 * sqrt(N0);
1597*2d9fd380Sjfb8856606 
1598*2d9fd380Sjfb8856606 	/* Ignore in vectors rare quasi null LLRs not to be saturated */
1599*2d9fd380Sjfb8856606 	if (llrs[j] < 8 && llrs[j] > -8)
1600*2d9fd380Sjfb8856606 		return;
1601*2d9fd380Sjfb8856606 
1602*2d9fd380Sjfb8856606 	/* Note don't change sign here */
1603*2d9fd380Sjfb8856606 	n = randn(j % 2);
1604*2d9fd380Sjfb8856606 	b1 = ((llrs[j] > 0 ? 2.0 : -2.0)
1605*2d9fd380Sjfb8856606 			+ coeff * n) / N0;
1606*2d9fd380Sjfb8856606 	b = b1 * (1 << ldpc_llr_decimals);
1607*2d9fd380Sjfb8856606 	b = round(b);
1608*2d9fd380Sjfb8856606 	if (b > llr_max)
1609*2d9fd380Sjfb8856606 		b = llr_max;
1610*2d9fd380Sjfb8856606 	if (b < -llr_max)
1611*2d9fd380Sjfb8856606 		b = -llr_max;
1612*2d9fd380Sjfb8856606 	llrs[j] = (int8_t) b;
1613*2d9fd380Sjfb8856606 }
1614*2d9fd380Sjfb8856606 
1615*2d9fd380Sjfb8856606 /* Generate LLR for a given SNR */
1616*2d9fd380Sjfb8856606 static void
generate_llr_input(uint16_t n,struct rte_bbdev_op_data * inputs,struct rte_bbdev_dec_op * ref_op)1617*2d9fd380Sjfb8856606 generate_llr_input(uint16_t n, struct rte_bbdev_op_data *inputs,
1618*2d9fd380Sjfb8856606 		struct rte_bbdev_dec_op *ref_op)
1619*2d9fd380Sjfb8856606 {
1620*2d9fd380Sjfb8856606 	struct rte_mbuf *m;
1621*2d9fd380Sjfb8856606 	uint16_t qm;
1622*2d9fd380Sjfb8856606 	uint32_t i, j, e, range;
1623*2d9fd380Sjfb8856606 	double N0, llr_max;
1624*2d9fd380Sjfb8856606 
1625*2d9fd380Sjfb8856606 	e = ref_op->ldpc_dec.cb_params.e;
1626*2d9fd380Sjfb8856606 	qm = ref_op->ldpc_dec.q_m;
1627*2d9fd380Sjfb8856606 	llr_max = (1 << (ldpc_llr_size - 1)) - 1;
1628*2d9fd380Sjfb8856606 	range = e / qm;
1629*2d9fd380Sjfb8856606 	N0 = 1.0 / pow(10.0, get_snr() / 10.0);
1630*2d9fd380Sjfb8856606 
1631*2d9fd380Sjfb8856606 	for (i = 0; i < n; ++i) {
1632*2d9fd380Sjfb8856606 		m = inputs[i].data;
1633*2d9fd380Sjfb8856606 		int8_t *llrs = rte_pktmbuf_mtod_offset(m, int8_t *, 0);
1634*2d9fd380Sjfb8856606 		if (qm == 8) {
1635*2d9fd380Sjfb8856606 			for (j = 0; j < range; ++j)
1636*2d9fd380Sjfb8856606 				gen_qm8_llr(llrs, j, N0, llr_max);
1637*2d9fd380Sjfb8856606 		} else if (qm == 6) {
1638*2d9fd380Sjfb8856606 			for (j = 0; j < range; ++j)
1639*2d9fd380Sjfb8856606 				gen_qm6_llr(llrs, j, N0, llr_max);
1640*2d9fd380Sjfb8856606 		} else if (qm == 4) {
1641*2d9fd380Sjfb8856606 			for (j = 0; j < range; ++j)
1642*2d9fd380Sjfb8856606 				gen_qm4_llr(llrs, j, N0, llr_max);
1643*2d9fd380Sjfb8856606 		} else {
1644*2d9fd380Sjfb8856606 			for (j = 0; j < e; ++j)
1645*2d9fd380Sjfb8856606 				gen_qm2_llr(llrs, j, N0, llr_max);
1646*2d9fd380Sjfb8856606 		}
1647*2d9fd380Sjfb8856606 	}
1648*2d9fd380Sjfb8856606 }
1649*2d9fd380Sjfb8856606 
16504418919fSjohnjiang static void
copy_reference_ldpc_dec_op(struct rte_bbdev_dec_op ** ops,unsigned int n,unsigned int start_idx,struct rte_bbdev_op_data * inputs,struct rte_bbdev_op_data * hard_outputs,struct rte_bbdev_op_data * soft_outputs,struct rte_bbdev_op_data * harq_inputs,struct rte_bbdev_op_data * harq_outputs,struct rte_bbdev_dec_op * ref_op)16514418919fSjohnjiang copy_reference_ldpc_dec_op(struct rte_bbdev_dec_op **ops, unsigned int n,
16524418919fSjohnjiang 		unsigned int start_idx,
16534418919fSjohnjiang 		struct rte_bbdev_op_data *inputs,
16544418919fSjohnjiang 		struct rte_bbdev_op_data *hard_outputs,
16554418919fSjohnjiang 		struct rte_bbdev_op_data *soft_outputs,
16564418919fSjohnjiang 		struct rte_bbdev_op_data *harq_inputs,
16574418919fSjohnjiang 		struct rte_bbdev_op_data *harq_outputs,
16584418919fSjohnjiang 		struct rte_bbdev_dec_op *ref_op)
16594418919fSjohnjiang {
16604418919fSjohnjiang 	unsigned int i;
16614418919fSjohnjiang 	struct rte_bbdev_op_ldpc_dec *ldpc_dec = &ref_op->ldpc_dec;
16624418919fSjohnjiang 
16634418919fSjohnjiang 	for (i = 0; i < n; ++i) {
16644418919fSjohnjiang 		if (ldpc_dec->code_block_mode == 0) {
16654418919fSjohnjiang 			ops[i]->ldpc_dec.tb_params.ea =
16664418919fSjohnjiang 					ldpc_dec->tb_params.ea;
16674418919fSjohnjiang 			ops[i]->ldpc_dec.tb_params.eb =
16684418919fSjohnjiang 					ldpc_dec->tb_params.eb;
16694418919fSjohnjiang 			ops[i]->ldpc_dec.tb_params.c =
16704418919fSjohnjiang 					ldpc_dec->tb_params.c;
16714418919fSjohnjiang 			ops[i]->ldpc_dec.tb_params.cab =
16724418919fSjohnjiang 					ldpc_dec->tb_params.cab;
16734418919fSjohnjiang 			ops[i]->ldpc_dec.tb_params.r =
16744418919fSjohnjiang 					ldpc_dec->tb_params.r;
16754418919fSjohnjiang 		} else {
16764418919fSjohnjiang 			ops[i]->ldpc_dec.cb_params.e = ldpc_dec->cb_params.e;
16774418919fSjohnjiang 		}
16784418919fSjohnjiang 
16794418919fSjohnjiang 		ops[i]->ldpc_dec.basegraph = ldpc_dec->basegraph;
16804418919fSjohnjiang 		ops[i]->ldpc_dec.z_c = ldpc_dec->z_c;
16814418919fSjohnjiang 		ops[i]->ldpc_dec.q_m = ldpc_dec->q_m;
16824418919fSjohnjiang 		ops[i]->ldpc_dec.n_filler = ldpc_dec->n_filler;
16834418919fSjohnjiang 		ops[i]->ldpc_dec.n_cb = ldpc_dec->n_cb;
16844418919fSjohnjiang 		ops[i]->ldpc_dec.iter_max = ldpc_dec->iter_max;
16854418919fSjohnjiang 		ops[i]->ldpc_dec.rv_index = ldpc_dec->rv_index;
16864418919fSjohnjiang 		ops[i]->ldpc_dec.op_flags = ldpc_dec->op_flags;
16874418919fSjohnjiang 		ops[i]->ldpc_dec.code_block_mode = ldpc_dec->code_block_mode;
16884418919fSjohnjiang 
1689*2d9fd380Sjfb8856606 		if (hard_outputs != NULL)
1690*2d9fd380Sjfb8856606 			ops[i]->ldpc_dec.hard_output =
1691*2d9fd380Sjfb8856606 					hard_outputs[start_idx + i];
1692*2d9fd380Sjfb8856606 		if (inputs != NULL)
1693*2d9fd380Sjfb8856606 			ops[i]->ldpc_dec.input =
1694*2d9fd380Sjfb8856606 					inputs[start_idx + i];
16954418919fSjohnjiang 		if (soft_outputs != NULL)
16964418919fSjohnjiang 			ops[i]->ldpc_dec.soft_output =
16974418919fSjohnjiang 					soft_outputs[start_idx + i];
16984418919fSjohnjiang 		if (harq_inputs != NULL)
16994418919fSjohnjiang 			ops[i]->ldpc_dec.harq_combined_input =
17004418919fSjohnjiang 					harq_inputs[start_idx + i];
17014418919fSjohnjiang 		if (harq_outputs != NULL)
17024418919fSjohnjiang 			ops[i]->ldpc_dec.harq_combined_output =
17034418919fSjohnjiang 					harq_outputs[start_idx + i];
17044418919fSjohnjiang 	}
17054418919fSjohnjiang }
17064418919fSjohnjiang 
17074418919fSjohnjiang 
17084418919fSjohnjiang static void
copy_reference_ldpc_enc_op(struct rte_bbdev_enc_op ** ops,unsigned int n,unsigned int start_idx,struct rte_bbdev_op_data * inputs,struct rte_bbdev_op_data * outputs,struct rte_bbdev_enc_op * ref_op)17094418919fSjohnjiang copy_reference_ldpc_enc_op(struct rte_bbdev_enc_op **ops, unsigned int n,
17104418919fSjohnjiang 		unsigned int start_idx,
17114418919fSjohnjiang 		struct rte_bbdev_op_data *inputs,
17124418919fSjohnjiang 		struct rte_bbdev_op_data *outputs,
17134418919fSjohnjiang 		struct rte_bbdev_enc_op *ref_op)
17144418919fSjohnjiang {
17154418919fSjohnjiang 	unsigned int i;
17164418919fSjohnjiang 	struct rte_bbdev_op_ldpc_enc *ldpc_enc = &ref_op->ldpc_enc;
17174418919fSjohnjiang 	for (i = 0; i < n; ++i) {
17184418919fSjohnjiang 		if (ldpc_enc->code_block_mode == 0) {
17194418919fSjohnjiang 			ops[i]->ldpc_enc.tb_params.ea = ldpc_enc->tb_params.ea;
17204418919fSjohnjiang 			ops[i]->ldpc_enc.tb_params.eb = ldpc_enc->tb_params.eb;
17214418919fSjohnjiang 			ops[i]->ldpc_enc.tb_params.cab =
17224418919fSjohnjiang 					ldpc_enc->tb_params.cab;
17234418919fSjohnjiang 			ops[i]->ldpc_enc.tb_params.c = ldpc_enc->tb_params.c;
17244418919fSjohnjiang 			ops[i]->ldpc_enc.tb_params.r = ldpc_enc->tb_params.r;
17254418919fSjohnjiang 		} else {
17264418919fSjohnjiang 			ops[i]->ldpc_enc.cb_params.e = ldpc_enc->cb_params.e;
17274418919fSjohnjiang 		}
17284418919fSjohnjiang 		ops[i]->ldpc_enc.basegraph = ldpc_enc->basegraph;
17294418919fSjohnjiang 		ops[i]->ldpc_enc.z_c = ldpc_enc->z_c;
17304418919fSjohnjiang 		ops[i]->ldpc_enc.q_m = ldpc_enc->q_m;
17314418919fSjohnjiang 		ops[i]->ldpc_enc.n_filler = ldpc_enc->n_filler;
17324418919fSjohnjiang 		ops[i]->ldpc_enc.n_cb = ldpc_enc->n_cb;
17334418919fSjohnjiang 		ops[i]->ldpc_enc.rv_index = ldpc_enc->rv_index;
17344418919fSjohnjiang 		ops[i]->ldpc_enc.op_flags = ldpc_enc->op_flags;
17354418919fSjohnjiang 		ops[i]->ldpc_enc.code_block_mode = ldpc_enc->code_block_mode;
17364418919fSjohnjiang 		ops[i]->ldpc_enc.output = outputs[start_idx + i];
17374418919fSjohnjiang 		ops[i]->ldpc_enc.input = inputs[start_idx + i];
17384418919fSjohnjiang 	}
17394418919fSjohnjiang }
17404418919fSjohnjiang 
1741d30ea906Sjfb8856606 static int
check_dec_status_and_ordering(struct rte_bbdev_dec_op * op,unsigned int order_idx,const int expected_status)1742d30ea906Sjfb8856606 check_dec_status_and_ordering(struct rte_bbdev_dec_op *op,
1743d30ea906Sjfb8856606 		unsigned int order_idx, const int expected_status)
1744d30ea906Sjfb8856606 {
1745*2d9fd380Sjfb8856606 	int status = op->status;
1746*2d9fd380Sjfb8856606 	/* ignore parity mismatch false alarms for long iterations */
1747*2d9fd380Sjfb8856606 	if (get_iter_max() >= 10) {
1748*2d9fd380Sjfb8856606 		if (!(expected_status & (1 << RTE_BBDEV_SYNDROME_ERROR)) &&
1749*2d9fd380Sjfb8856606 				(status & (1 << RTE_BBDEV_SYNDROME_ERROR))) {
1750*2d9fd380Sjfb8856606 			printf("WARNING: Ignore Syndrome Check mismatch\n");
1751*2d9fd380Sjfb8856606 			status -= (1 << RTE_BBDEV_SYNDROME_ERROR);
1752*2d9fd380Sjfb8856606 		}
1753*2d9fd380Sjfb8856606 		if ((expected_status & (1 << RTE_BBDEV_SYNDROME_ERROR)) &&
1754*2d9fd380Sjfb8856606 				!(status & (1 << RTE_BBDEV_SYNDROME_ERROR))) {
1755*2d9fd380Sjfb8856606 			printf("WARNING: Ignore Syndrome Check mismatch\n");
1756*2d9fd380Sjfb8856606 			status += (1 << RTE_BBDEV_SYNDROME_ERROR);
1757*2d9fd380Sjfb8856606 		}
1758*2d9fd380Sjfb8856606 	}
1759*2d9fd380Sjfb8856606 
1760*2d9fd380Sjfb8856606 	TEST_ASSERT(status == expected_status,
1761d30ea906Sjfb8856606 			"op_status (%d) != expected_status (%d)",
1762d30ea906Sjfb8856606 			op->status, expected_status);
1763d30ea906Sjfb8856606 
1764d30ea906Sjfb8856606 	TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data,
1765d30ea906Sjfb8856606 			"Ordering error, expected %p, got %p",
1766d30ea906Sjfb8856606 			(void *)(uintptr_t)order_idx, op->opaque_data);
1767d30ea906Sjfb8856606 
1768d30ea906Sjfb8856606 	return TEST_SUCCESS;
1769d30ea906Sjfb8856606 }
1770d30ea906Sjfb8856606 
1771d30ea906Sjfb8856606 static int
check_enc_status_and_ordering(struct rte_bbdev_enc_op * op,unsigned int order_idx,const int expected_status)1772d30ea906Sjfb8856606 check_enc_status_and_ordering(struct rte_bbdev_enc_op *op,
1773d30ea906Sjfb8856606 		unsigned int order_idx, const int expected_status)
1774d30ea906Sjfb8856606 {
1775d30ea906Sjfb8856606 	TEST_ASSERT(op->status == expected_status,
1776d30ea906Sjfb8856606 			"op_status (%d) != expected_status (%d)",
1777d30ea906Sjfb8856606 			op->status, expected_status);
1778d30ea906Sjfb8856606 
1779*2d9fd380Sjfb8856606 	if (op->opaque_data != (void *)(uintptr_t)INVALID_OPAQUE)
1780d30ea906Sjfb8856606 		TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data,
1781d30ea906Sjfb8856606 				"Ordering error, expected %p, got %p",
1782d30ea906Sjfb8856606 				(void *)(uintptr_t)order_idx, op->opaque_data);
1783d30ea906Sjfb8856606 
1784d30ea906Sjfb8856606 	return TEST_SUCCESS;
1785d30ea906Sjfb8856606 }
1786d30ea906Sjfb8856606 
1787d30ea906Sjfb8856606 static inline int
validate_op_chain(struct rte_bbdev_op_data * op,struct op_data_entries * orig_op)1788d30ea906Sjfb8856606 validate_op_chain(struct rte_bbdev_op_data *op,
1789d30ea906Sjfb8856606 		struct op_data_entries *orig_op)
1790d30ea906Sjfb8856606 {
1791d30ea906Sjfb8856606 	uint8_t i;
1792d30ea906Sjfb8856606 	struct rte_mbuf *m = op->data;
1793d30ea906Sjfb8856606 	uint8_t nb_dst_segments = orig_op->nb_segments;
17944418919fSjohnjiang 	uint32_t total_data_size = 0;
1795d30ea906Sjfb8856606 
1796d30ea906Sjfb8856606 	TEST_ASSERT(nb_dst_segments == m->nb_segs,
1797d30ea906Sjfb8856606 			"Number of segments differ in original (%u) and filled (%u) op",
1798d30ea906Sjfb8856606 			nb_dst_segments, m->nb_segs);
1799d30ea906Sjfb8856606 
18004418919fSjohnjiang 	/* Validate each mbuf segment length */
1801d30ea906Sjfb8856606 	for (i = 0; i < nb_dst_segments; ++i) {
1802d30ea906Sjfb8856606 		/* Apply offset to the first mbuf segment */
1803d30ea906Sjfb8856606 		uint16_t offset = (i == 0) ? op->offset : 0;
18044418919fSjohnjiang 		uint16_t data_len = rte_pktmbuf_data_len(m) - offset;
18054418919fSjohnjiang 		total_data_size += orig_op->segments[i].length;
1806d30ea906Sjfb8856606 
1807d30ea906Sjfb8856606 		TEST_ASSERT(orig_op->segments[i].length == data_len,
1808d30ea906Sjfb8856606 				"Length of segment differ in original (%u) and filled (%u) op",
1809d30ea906Sjfb8856606 				orig_op->segments[i].length, data_len);
1810d30ea906Sjfb8856606 		TEST_ASSERT_BUFFERS_ARE_EQUAL(orig_op->segments[i].addr,
1811d30ea906Sjfb8856606 				rte_pktmbuf_mtod_offset(m, uint32_t *, offset),
1812d30ea906Sjfb8856606 				data_len,
1813d30ea906Sjfb8856606 				"Output buffers (CB=%u) are not equal", i);
1814d30ea906Sjfb8856606 		m = m->next;
1815d30ea906Sjfb8856606 	}
1816d30ea906Sjfb8856606 
18174418919fSjohnjiang 	/* Validate total mbuf pkt length */
18184418919fSjohnjiang 	uint32_t pkt_len = rte_pktmbuf_pkt_len(op->data) - op->offset;
18194418919fSjohnjiang 	TEST_ASSERT(total_data_size == pkt_len,
18204418919fSjohnjiang 			"Length of data differ in original (%u) and filled (%u) op",
18214418919fSjohnjiang 			total_data_size, pkt_len);
1822d30ea906Sjfb8856606 
1823d30ea906Sjfb8856606 	return TEST_SUCCESS;
1824d30ea906Sjfb8856606 }
1825d30ea906Sjfb8856606 
1826*2d9fd380Sjfb8856606 /*
1827*2d9fd380Sjfb8856606  * Compute K0 for a given configuration for HARQ output length computation
1828*2d9fd380Sjfb8856606  * As per definition in 3GPP 38.212 Table 5.4.2.1-2
1829*2d9fd380Sjfb8856606  */
1830*2d9fd380Sjfb8856606 static inline uint16_t
get_k0(uint16_t n_cb,uint16_t z_c,uint8_t bg,uint8_t rv_index)1831*2d9fd380Sjfb8856606 get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
1832*2d9fd380Sjfb8856606 {
1833*2d9fd380Sjfb8856606 	if (rv_index == 0)
1834*2d9fd380Sjfb8856606 		return 0;
1835*2d9fd380Sjfb8856606 	uint16_t n = (bg == 1 ? N_ZC_1 : N_ZC_2) * z_c;
1836*2d9fd380Sjfb8856606 	if (n_cb == n) {
1837*2d9fd380Sjfb8856606 		if (rv_index == 1)
1838*2d9fd380Sjfb8856606 			return (bg == 1 ? K0_1_1 : K0_1_2) * z_c;
1839*2d9fd380Sjfb8856606 		else if (rv_index == 2)
1840*2d9fd380Sjfb8856606 			return (bg == 1 ? K0_2_1 : K0_2_2) * z_c;
1841*2d9fd380Sjfb8856606 		else
1842*2d9fd380Sjfb8856606 			return (bg == 1 ? K0_3_1 : K0_3_2) * z_c;
1843*2d9fd380Sjfb8856606 	}
1844*2d9fd380Sjfb8856606 	/* LBRM case - includes a division by N */
1845*2d9fd380Sjfb8856606 	if (rv_index == 1)
1846*2d9fd380Sjfb8856606 		return (((bg == 1 ? K0_1_1 : K0_1_2) * n_cb)
1847*2d9fd380Sjfb8856606 				/ n) * z_c;
1848*2d9fd380Sjfb8856606 	else if (rv_index == 2)
1849*2d9fd380Sjfb8856606 		return (((bg == 1 ? K0_2_1 : K0_2_2) * n_cb)
1850*2d9fd380Sjfb8856606 				/ n) * z_c;
1851*2d9fd380Sjfb8856606 	else
1852*2d9fd380Sjfb8856606 		return (((bg == 1 ? K0_3_1 : K0_3_2) * n_cb)
1853*2d9fd380Sjfb8856606 				/ n) * z_c;
1854*2d9fd380Sjfb8856606 }
1855*2d9fd380Sjfb8856606 
1856*2d9fd380Sjfb8856606 /* HARQ output length including the Filler bits */
1857*2d9fd380Sjfb8856606 static inline uint16_t
compute_harq_len(struct rte_bbdev_op_ldpc_dec * ops_ld)1858*2d9fd380Sjfb8856606 compute_harq_len(struct rte_bbdev_op_ldpc_dec *ops_ld)
1859*2d9fd380Sjfb8856606 {
1860*2d9fd380Sjfb8856606 	uint16_t k0 = 0;
1861*2d9fd380Sjfb8856606 	uint8_t max_rv = (ops_ld->rv_index == 1) ? 3 : ops_ld->rv_index;
1862*2d9fd380Sjfb8856606 	k0 = get_k0(ops_ld->n_cb, ops_ld->z_c, ops_ld->basegraph, max_rv);
1863*2d9fd380Sjfb8856606 	/* Compute RM out size and number of rows */
1864*2d9fd380Sjfb8856606 	uint16_t parity_offset = (ops_ld->basegraph == 1 ? 20 : 8)
1865*2d9fd380Sjfb8856606 			* ops_ld->z_c - ops_ld->n_filler;
1866*2d9fd380Sjfb8856606 	uint16_t deRmOutSize = RTE_MIN(
1867*2d9fd380Sjfb8856606 			k0 + ops_ld->cb_params.e +
1868*2d9fd380Sjfb8856606 			((k0 > parity_offset) ?
1869*2d9fd380Sjfb8856606 					0 : ops_ld->n_filler),
1870*2d9fd380Sjfb8856606 					ops_ld->n_cb);
1871*2d9fd380Sjfb8856606 	uint16_t numRows = ((deRmOutSize + ops_ld->z_c - 1)
1872*2d9fd380Sjfb8856606 			/ ops_ld->z_c);
1873*2d9fd380Sjfb8856606 	uint16_t harq_output_len = numRows * ops_ld->z_c;
1874*2d9fd380Sjfb8856606 	return harq_output_len;
1875*2d9fd380Sjfb8856606 }
1876*2d9fd380Sjfb8856606 
1877*2d9fd380Sjfb8856606 static inline int
validate_op_harq_chain(struct rte_bbdev_op_data * op,struct op_data_entries * orig_op,struct rte_bbdev_op_ldpc_dec * ops_ld)1878*2d9fd380Sjfb8856606 validate_op_harq_chain(struct rte_bbdev_op_data *op,
1879*2d9fd380Sjfb8856606 		struct op_data_entries *orig_op,
1880*2d9fd380Sjfb8856606 		struct rte_bbdev_op_ldpc_dec *ops_ld)
1881*2d9fd380Sjfb8856606 {
1882*2d9fd380Sjfb8856606 	uint8_t i;
1883*2d9fd380Sjfb8856606 	uint32_t j, jj, k;
1884*2d9fd380Sjfb8856606 	struct rte_mbuf *m = op->data;
1885*2d9fd380Sjfb8856606 	uint8_t nb_dst_segments = orig_op->nb_segments;
1886*2d9fd380Sjfb8856606 	uint32_t total_data_size = 0;
1887*2d9fd380Sjfb8856606 	int8_t *harq_orig, *harq_out, abs_harq_origin;
1888*2d9fd380Sjfb8856606 	uint32_t byte_error = 0, cum_error = 0, error;
1889*2d9fd380Sjfb8856606 	int16_t llr_max = (1 << (ldpc_llr_size - ldpc_llr_decimals)) - 1;
1890*2d9fd380Sjfb8856606 	int16_t llr_max_pre_scaling = (1 << (ldpc_llr_size - 1)) - 1;
1891*2d9fd380Sjfb8856606 	uint16_t parity_offset;
1892*2d9fd380Sjfb8856606 
1893*2d9fd380Sjfb8856606 	TEST_ASSERT(nb_dst_segments == m->nb_segs,
1894*2d9fd380Sjfb8856606 			"Number of segments differ in original (%u) and filled (%u) op",
1895*2d9fd380Sjfb8856606 			nb_dst_segments, m->nb_segs);
1896*2d9fd380Sjfb8856606 
1897*2d9fd380Sjfb8856606 	/* Validate each mbuf segment length */
1898*2d9fd380Sjfb8856606 	for (i = 0; i < nb_dst_segments; ++i) {
1899*2d9fd380Sjfb8856606 		/* Apply offset to the first mbuf segment */
1900*2d9fd380Sjfb8856606 		uint16_t offset = (i == 0) ? op->offset : 0;
1901*2d9fd380Sjfb8856606 		uint16_t data_len = rte_pktmbuf_data_len(m) - offset;
1902*2d9fd380Sjfb8856606 		total_data_size += orig_op->segments[i].length;
1903*2d9fd380Sjfb8856606 
1904*2d9fd380Sjfb8856606 		TEST_ASSERT(orig_op->segments[i].length <
1905*2d9fd380Sjfb8856606 				(uint32_t)(data_len + 64),
1906*2d9fd380Sjfb8856606 				"Length of segment differ in original (%u) and filled (%u) op",
1907*2d9fd380Sjfb8856606 				orig_op->segments[i].length, data_len);
1908*2d9fd380Sjfb8856606 		harq_orig = (int8_t *) orig_op->segments[i].addr;
1909*2d9fd380Sjfb8856606 		harq_out = rte_pktmbuf_mtod_offset(m, int8_t *, offset);
1910*2d9fd380Sjfb8856606 
1911*2d9fd380Sjfb8856606 		if (!(ldpc_cap_flags &
1912*2d9fd380Sjfb8856606 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS
1913*2d9fd380Sjfb8856606 				) || (ops_ld->op_flags &
1914*2d9fd380Sjfb8856606 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
1915*2d9fd380Sjfb8856606 			data_len -= ops_ld->z_c;
1916*2d9fd380Sjfb8856606 			parity_offset = data_len;
1917*2d9fd380Sjfb8856606 		} else {
1918*2d9fd380Sjfb8856606 			/* Compute RM out size and number of rows */
1919*2d9fd380Sjfb8856606 			parity_offset = (ops_ld->basegraph == 1 ? 20 : 8)
1920*2d9fd380Sjfb8856606 					* ops_ld->z_c - ops_ld->n_filler;
1921*2d9fd380Sjfb8856606 			uint16_t deRmOutSize = compute_harq_len(ops_ld) -
1922*2d9fd380Sjfb8856606 					ops_ld->n_filler;
1923*2d9fd380Sjfb8856606 			if (data_len > deRmOutSize)
1924*2d9fd380Sjfb8856606 				data_len = deRmOutSize;
1925*2d9fd380Sjfb8856606 			if (data_len > orig_op->segments[i].length)
1926*2d9fd380Sjfb8856606 				data_len = orig_op->segments[i].length;
1927*2d9fd380Sjfb8856606 		}
1928*2d9fd380Sjfb8856606 		/*
1929*2d9fd380Sjfb8856606 		 * HARQ output can have minor differences
1930*2d9fd380Sjfb8856606 		 * due to integer representation and related scaling
1931*2d9fd380Sjfb8856606 		 */
1932*2d9fd380Sjfb8856606 		for (j = 0, jj = 0; j < data_len; j++, jj++) {
1933*2d9fd380Sjfb8856606 			if (j == parity_offset) {
1934*2d9fd380Sjfb8856606 				/* Special Handling of the filler bits */
1935*2d9fd380Sjfb8856606 				for (k = 0; k < ops_ld->n_filler; k++) {
1936*2d9fd380Sjfb8856606 					if (harq_out[jj] !=
1937*2d9fd380Sjfb8856606 							llr_max_pre_scaling) {
1938*2d9fd380Sjfb8856606 						printf("HARQ Filler issue %d: %d %d\n",
1939*2d9fd380Sjfb8856606 							jj, harq_out[jj],
1940*2d9fd380Sjfb8856606 							llr_max);
1941*2d9fd380Sjfb8856606 						byte_error++;
1942*2d9fd380Sjfb8856606 					}
1943*2d9fd380Sjfb8856606 					jj++;
1944*2d9fd380Sjfb8856606 				}
1945*2d9fd380Sjfb8856606 			}
1946*2d9fd380Sjfb8856606 			if (!(ops_ld->op_flags &
1947*2d9fd380Sjfb8856606 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
1948*2d9fd380Sjfb8856606 				if (ldpc_llr_decimals > 1)
1949*2d9fd380Sjfb8856606 					harq_out[jj] = (harq_out[jj] + 1)
1950*2d9fd380Sjfb8856606 						>> (ldpc_llr_decimals - 1);
1951*2d9fd380Sjfb8856606 				/* Saturated to S7 */
1952*2d9fd380Sjfb8856606 				if (harq_orig[j] > llr_max)
1953*2d9fd380Sjfb8856606 					harq_orig[j] = llr_max;
1954*2d9fd380Sjfb8856606 				if (harq_orig[j] < -llr_max)
1955*2d9fd380Sjfb8856606 					harq_orig[j] = -llr_max;
1956*2d9fd380Sjfb8856606 			}
1957*2d9fd380Sjfb8856606 			if (harq_orig[j] != harq_out[jj]) {
1958*2d9fd380Sjfb8856606 				error = (harq_orig[j] > harq_out[jj]) ?
1959*2d9fd380Sjfb8856606 						harq_orig[j] - harq_out[jj] :
1960*2d9fd380Sjfb8856606 						harq_out[jj] - harq_orig[j];
1961*2d9fd380Sjfb8856606 				abs_harq_origin = harq_orig[j] > 0 ?
1962*2d9fd380Sjfb8856606 							harq_orig[j] :
1963*2d9fd380Sjfb8856606 							-harq_orig[j];
1964*2d9fd380Sjfb8856606 				/* Residual quantization error */
1965*2d9fd380Sjfb8856606 				if ((error > 8 && (abs_harq_origin <
1966*2d9fd380Sjfb8856606 						(llr_max - 16))) ||
1967*2d9fd380Sjfb8856606 						(error > 16)) {
1968*2d9fd380Sjfb8856606 					printf("HARQ mismatch %d: exp %d act %d => %d\n",
1969*2d9fd380Sjfb8856606 							j, harq_orig[j],
1970*2d9fd380Sjfb8856606 							harq_out[jj], error);
1971*2d9fd380Sjfb8856606 					byte_error++;
1972*2d9fd380Sjfb8856606 					cum_error += error;
1973*2d9fd380Sjfb8856606 				}
1974*2d9fd380Sjfb8856606 			}
1975*2d9fd380Sjfb8856606 		}
1976*2d9fd380Sjfb8856606 		m = m->next;
1977*2d9fd380Sjfb8856606 	}
1978*2d9fd380Sjfb8856606 
1979*2d9fd380Sjfb8856606 	if (byte_error)
1980*2d9fd380Sjfb8856606 		TEST_ASSERT(byte_error <= 1,
1981*2d9fd380Sjfb8856606 				"HARQ output mismatch (%d) %d",
1982*2d9fd380Sjfb8856606 				byte_error, cum_error);
1983*2d9fd380Sjfb8856606 
1984*2d9fd380Sjfb8856606 	/* Validate total mbuf pkt length */
1985*2d9fd380Sjfb8856606 	uint32_t pkt_len = rte_pktmbuf_pkt_len(op->data) - op->offset;
1986*2d9fd380Sjfb8856606 	TEST_ASSERT(total_data_size < pkt_len + 64,
1987*2d9fd380Sjfb8856606 			"Length of data differ in original (%u) and filled (%u) op",
1988*2d9fd380Sjfb8856606 			total_data_size, pkt_len);
1989*2d9fd380Sjfb8856606 
1990*2d9fd380Sjfb8856606 	return TEST_SUCCESS;
1991*2d9fd380Sjfb8856606 }
1992*2d9fd380Sjfb8856606 
1993d30ea906Sjfb8856606 static int
validate_dec_op(struct rte_bbdev_dec_op ** ops,const uint16_t n,struct rte_bbdev_dec_op * ref_op,const int vector_mask)1994d30ea906Sjfb8856606 validate_dec_op(struct rte_bbdev_dec_op **ops, const uint16_t n,
1995d30ea906Sjfb8856606 		struct rte_bbdev_dec_op *ref_op, const int vector_mask)
1996d30ea906Sjfb8856606 {
1997d30ea906Sjfb8856606 	unsigned int i;
1998d30ea906Sjfb8856606 	int ret;
1999d30ea906Sjfb8856606 	struct op_data_entries *hard_data_orig =
2000d30ea906Sjfb8856606 			&test_vector.entries[DATA_HARD_OUTPUT];
2001d30ea906Sjfb8856606 	struct op_data_entries *soft_data_orig =
2002d30ea906Sjfb8856606 			&test_vector.entries[DATA_SOFT_OUTPUT];
2003d30ea906Sjfb8856606 	struct rte_bbdev_op_turbo_dec *ops_td;
2004d30ea906Sjfb8856606 	struct rte_bbdev_op_data *hard_output;
2005d30ea906Sjfb8856606 	struct rte_bbdev_op_data *soft_output;
2006d30ea906Sjfb8856606 	struct rte_bbdev_op_turbo_dec *ref_td = &ref_op->turbo_dec;
2007d30ea906Sjfb8856606 
2008d30ea906Sjfb8856606 	for (i = 0; i < n; ++i) {
2009d30ea906Sjfb8856606 		ops_td = &ops[i]->turbo_dec;
2010d30ea906Sjfb8856606 		hard_output = &ops_td->hard_output;
2011d30ea906Sjfb8856606 		soft_output = &ops_td->soft_output;
2012d30ea906Sjfb8856606 
2013d30ea906Sjfb8856606 		if (vector_mask & TEST_BBDEV_VF_EXPECTED_ITER_COUNT)
2014d30ea906Sjfb8856606 			TEST_ASSERT(ops_td->iter_count <= ref_td->iter_count,
2015d30ea906Sjfb8856606 					"Returned iter_count (%d) > expected iter_count (%d)",
2016d30ea906Sjfb8856606 					ops_td->iter_count, ref_td->iter_count);
2017d30ea906Sjfb8856606 		ret = check_dec_status_and_ordering(ops[i], i, ref_op->status);
2018d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(ret,
2019d30ea906Sjfb8856606 				"Checking status and ordering for decoder failed");
2020d30ea906Sjfb8856606 
2021d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(validate_op_chain(hard_output,
2022d30ea906Sjfb8856606 				hard_data_orig),
2023d30ea906Sjfb8856606 				"Hard output buffers (CB=%u) are not equal",
2024d30ea906Sjfb8856606 				i);
2025d30ea906Sjfb8856606 
2026d30ea906Sjfb8856606 		if (ref_op->turbo_dec.op_flags & RTE_BBDEV_TURBO_SOFT_OUTPUT)
2027d30ea906Sjfb8856606 			TEST_ASSERT_SUCCESS(validate_op_chain(soft_output,
2028d30ea906Sjfb8856606 					soft_data_orig),
2029d30ea906Sjfb8856606 					"Soft output buffers (CB=%u) are not equal",
2030d30ea906Sjfb8856606 					i);
2031d30ea906Sjfb8856606 	}
2032d30ea906Sjfb8856606 
2033d30ea906Sjfb8856606 	return TEST_SUCCESS;
2034d30ea906Sjfb8856606 }
2035d30ea906Sjfb8856606 
2036*2d9fd380Sjfb8856606 /* Check Number of code blocks errors */
2037*2d9fd380Sjfb8856606 static int
validate_ldpc_bler(struct rte_bbdev_dec_op ** ops,const uint16_t n)2038*2d9fd380Sjfb8856606 validate_ldpc_bler(struct rte_bbdev_dec_op **ops, const uint16_t n)
2039*2d9fd380Sjfb8856606 {
2040*2d9fd380Sjfb8856606 	unsigned int i;
2041*2d9fd380Sjfb8856606 	struct op_data_entries *hard_data_orig =
2042*2d9fd380Sjfb8856606 			&test_vector.entries[DATA_HARD_OUTPUT];
2043*2d9fd380Sjfb8856606 	struct rte_bbdev_op_ldpc_dec *ops_td;
2044*2d9fd380Sjfb8856606 	struct rte_bbdev_op_data *hard_output;
2045*2d9fd380Sjfb8856606 	int errors = 0;
2046*2d9fd380Sjfb8856606 	struct rte_mbuf *m;
2047*2d9fd380Sjfb8856606 
2048*2d9fd380Sjfb8856606 	for (i = 0; i < n; ++i) {
2049*2d9fd380Sjfb8856606 		ops_td = &ops[i]->ldpc_dec;
2050*2d9fd380Sjfb8856606 		hard_output = &ops_td->hard_output;
2051*2d9fd380Sjfb8856606 		m = hard_output->data;
2052*2d9fd380Sjfb8856606 		if (memcmp(rte_pktmbuf_mtod_offset(m, uint32_t *, 0),
2053*2d9fd380Sjfb8856606 				hard_data_orig->segments[0].addr,
2054*2d9fd380Sjfb8856606 				hard_data_orig->segments[0].length))
2055*2d9fd380Sjfb8856606 			errors++;
2056*2d9fd380Sjfb8856606 	}
2057*2d9fd380Sjfb8856606 	return errors;
2058*2d9fd380Sjfb8856606 }
20594418919fSjohnjiang 
20604418919fSjohnjiang static int
validate_ldpc_dec_op(struct rte_bbdev_dec_op ** ops,const uint16_t n,struct rte_bbdev_dec_op * ref_op,const int vector_mask)20614418919fSjohnjiang validate_ldpc_dec_op(struct rte_bbdev_dec_op **ops, const uint16_t n,
20624418919fSjohnjiang 		struct rte_bbdev_dec_op *ref_op, const int vector_mask)
20634418919fSjohnjiang {
20644418919fSjohnjiang 	unsigned int i;
20654418919fSjohnjiang 	int ret;
20664418919fSjohnjiang 	struct op_data_entries *hard_data_orig =
20674418919fSjohnjiang 			&test_vector.entries[DATA_HARD_OUTPUT];
20684418919fSjohnjiang 	struct op_data_entries *soft_data_orig =
20694418919fSjohnjiang 			&test_vector.entries[DATA_SOFT_OUTPUT];
20704418919fSjohnjiang 	struct op_data_entries *harq_data_orig =
20714418919fSjohnjiang 				&test_vector.entries[DATA_HARQ_OUTPUT];
20724418919fSjohnjiang 	struct rte_bbdev_op_ldpc_dec *ops_td;
20734418919fSjohnjiang 	struct rte_bbdev_op_data *hard_output;
20744418919fSjohnjiang 	struct rte_bbdev_op_data *harq_output;
20754418919fSjohnjiang 	struct rte_bbdev_op_data *soft_output;
20764418919fSjohnjiang 	struct rte_bbdev_op_ldpc_dec *ref_td = &ref_op->ldpc_dec;
20774418919fSjohnjiang 
20784418919fSjohnjiang 	for (i = 0; i < n; ++i) {
20794418919fSjohnjiang 		ops_td = &ops[i]->ldpc_dec;
20804418919fSjohnjiang 		hard_output = &ops_td->hard_output;
20814418919fSjohnjiang 		harq_output = &ops_td->harq_combined_output;
20824418919fSjohnjiang 		soft_output = &ops_td->soft_output;
20834418919fSjohnjiang 
20844418919fSjohnjiang 		ret = check_dec_status_and_ordering(ops[i], i, ref_op->status);
20854418919fSjohnjiang 		TEST_ASSERT_SUCCESS(ret,
20864418919fSjohnjiang 				"Checking status and ordering for decoder failed");
20874418919fSjohnjiang 		if (vector_mask & TEST_BBDEV_VF_EXPECTED_ITER_COUNT)
20884418919fSjohnjiang 			TEST_ASSERT(ops_td->iter_count <= ref_td->iter_count,
20894418919fSjohnjiang 					"Returned iter_count (%d) > expected iter_count (%d)",
20904418919fSjohnjiang 					ops_td->iter_count, ref_td->iter_count);
2091*2d9fd380Sjfb8856606 		/*
2092*2d9fd380Sjfb8856606 		 * We can ignore output data when the decoding failed to
2093*2d9fd380Sjfb8856606 		 * converge or for loop-back cases
2094*2d9fd380Sjfb8856606 		 */
2095*2d9fd380Sjfb8856606 		if (!check_bit(ops[i]->ldpc_dec.op_flags,
2096*2d9fd380Sjfb8856606 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK
2097*2d9fd380Sjfb8856606 				) && (
2098*2d9fd380Sjfb8856606 				ops[i]->status & (1 << RTE_BBDEV_SYNDROME_ERROR
2099*2d9fd380Sjfb8856606 						)) == 0)
21004418919fSjohnjiang 			TEST_ASSERT_SUCCESS(validate_op_chain(hard_output,
21014418919fSjohnjiang 					hard_data_orig),
21024418919fSjohnjiang 					"Hard output buffers (CB=%u) are not equal",
21034418919fSjohnjiang 					i);
21044418919fSjohnjiang 
21054418919fSjohnjiang 		if (ref_op->ldpc_dec.op_flags & RTE_BBDEV_LDPC_SOFT_OUT_ENABLE)
21064418919fSjohnjiang 			TEST_ASSERT_SUCCESS(validate_op_chain(soft_output,
21074418919fSjohnjiang 					soft_data_orig),
21084418919fSjohnjiang 					"Soft output buffers (CB=%u) are not equal",
21094418919fSjohnjiang 					i);
21104418919fSjohnjiang 		if (ref_op->ldpc_dec.op_flags &
21114418919fSjohnjiang 				RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE) {
2112*2d9fd380Sjfb8856606 			TEST_ASSERT_SUCCESS(validate_op_harq_chain(harq_output,
2113*2d9fd380Sjfb8856606 					harq_data_orig, ops_td),
21144418919fSjohnjiang 					"HARQ output buffers (CB=%u) are not equal",
21154418919fSjohnjiang 					i);
21164418919fSjohnjiang 		}
2117*2d9fd380Sjfb8856606 		if (ref_op->ldpc_dec.op_flags &
2118*2d9fd380Sjfb8856606 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)
2119*2d9fd380Sjfb8856606 			TEST_ASSERT_SUCCESS(validate_op_harq_chain(harq_output,
2120*2d9fd380Sjfb8856606 					harq_data_orig, ops_td),
2121*2d9fd380Sjfb8856606 					"HARQ output buffers (CB=%u) are not equal",
2122*2d9fd380Sjfb8856606 					i);
2123*2d9fd380Sjfb8856606 
21244418919fSjohnjiang 	}
21254418919fSjohnjiang 
21264418919fSjohnjiang 	return TEST_SUCCESS;
21274418919fSjohnjiang }
21284418919fSjohnjiang 
21294418919fSjohnjiang 
2130d30ea906Sjfb8856606 static int
validate_enc_op(struct rte_bbdev_enc_op ** ops,const uint16_t n,struct rte_bbdev_enc_op * ref_op)2131d30ea906Sjfb8856606 validate_enc_op(struct rte_bbdev_enc_op **ops, const uint16_t n,
2132d30ea906Sjfb8856606 		struct rte_bbdev_enc_op *ref_op)
2133d30ea906Sjfb8856606 {
2134d30ea906Sjfb8856606 	unsigned int i;
2135d30ea906Sjfb8856606 	int ret;
2136d30ea906Sjfb8856606 	struct op_data_entries *hard_data_orig =
2137d30ea906Sjfb8856606 			&test_vector.entries[DATA_HARD_OUTPUT];
2138d30ea906Sjfb8856606 
2139d30ea906Sjfb8856606 	for (i = 0; i < n; ++i) {
2140d30ea906Sjfb8856606 		ret = check_enc_status_and_ordering(ops[i], i, ref_op->status);
2141d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(ret,
2142d30ea906Sjfb8856606 				"Checking status and ordering for encoder failed");
2143d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(validate_op_chain(
2144d30ea906Sjfb8856606 				&ops[i]->turbo_enc.output,
2145d30ea906Sjfb8856606 				hard_data_orig),
2146d30ea906Sjfb8856606 				"Output buffers (CB=%u) are not equal",
2147d30ea906Sjfb8856606 				i);
2148d30ea906Sjfb8856606 	}
2149d30ea906Sjfb8856606 
2150d30ea906Sjfb8856606 	return TEST_SUCCESS;
2151d30ea906Sjfb8856606 }
2152d30ea906Sjfb8856606 
21534418919fSjohnjiang static int
validate_ldpc_enc_op(struct rte_bbdev_enc_op ** ops,const uint16_t n,struct rte_bbdev_enc_op * ref_op)21544418919fSjohnjiang validate_ldpc_enc_op(struct rte_bbdev_enc_op **ops, const uint16_t n,
21554418919fSjohnjiang 		struct rte_bbdev_enc_op *ref_op)
21564418919fSjohnjiang {
21574418919fSjohnjiang 	unsigned int i;
21584418919fSjohnjiang 	int ret;
21594418919fSjohnjiang 	struct op_data_entries *hard_data_orig =
21604418919fSjohnjiang 			&test_vector.entries[DATA_HARD_OUTPUT];
21614418919fSjohnjiang 
21624418919fSjohnjiang 	for (i = 0; i < n; ++i) {
21634418919fSjohnjiang 		ret = check_enc_status_and_ordering(ops[i], i, ref_op->status);
21644418919fSjohnjiang 		TEST_ASSERT_SUCCESS(ret,
21654418919fSjohnjiang 				"Checking status and ordering for encoder failed");
21664418919fSjohnjiang 		TEST_ASSERT_SUCCESS(validate_op_chain(
21674418919fSjohnjiang 				&ops[i]->ldpc_enc.output,
21684418919fSjohnjiang 				hard_data_orig),
21694418919fSjohnjiang 				"Output buffers (CB=%u) are not equal",
21704418919fSjohnjiang 				i);
21714418919fSjohnjiang 	}
21724418919fSjohnjiang 
21734418919fSjohnjiang 	return TEST_SUCCESS;
21744418919fSjohnjiang }
21754418919fSjohnjiang 
2176d30ea906Sjfb8856606 static void
create_reference_dec_op(struct rte_bbdev_dec_op * op)2177d30ea906Sjfb8856606 create_reference_dec_op(struct rte_bbdev_dec_op *op)
2178d30ea906Sjfb8856606 {
2179d30ea906Sjfb8856606 	unsigned int i;
2180d30ea906Sjfb8856606 	struct op_data_entries *entry;
2181d30ea906Sjfb8856606 
2182d30ea906Sjfb8856606 	op->turbo_dec = test_vector.turbo_dec;
2183d30ea906Sjfb8856606 	entry = &test_vector.entries[DATA_INPUT];
2184d30ea906Sjfb8856606 	for (i = 0; i < entry->nb_segments; ++i)
2185d30ea906Sjfb8856606 		op->turbo_dec.input.length +=
2186d30ea906Sjfb8856606 				entry->segments[i].length;
2187d30ea906Sjfb8856606 }
2188d30ea906Sjfb8856606 
2189d30ea906Sjfb8856606 static void
create_reference_ldpc_dec_op(struct rte_bbdev_dec_op * op)21904418919fSjohnjiang create_reference_ldpc_dec_op(struct rte_bbdev_dec_op *op)
21914418919fSjohnjiang {
21924418919fSjohnjiang 	unsigned int i;
21934418919fSjohnjiang 	struct op_data_entries *entry;
21944418919fSjohnjiang 
21954418919fSjohnjiang 	op->ldpc_dec = test_vector.ldpc_dec;
21964418919fSjohnjiang 	entry = &test_vector.entries[DATA_INPUT];
21974418919fSjohnjiang 	for (i = 0; i < entry->nb_segments; ++i)
21984418919fSjohnjiang 		op->ldpc_dec.input.length +=
21994418919fSjohnjiang 				entry->segments[i].length;
22004418919fSjohnjiang 	if (test_vector.ldpc_dec.op_flags &
22014418919fSjohnjiang 			RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE) {
22024418919fSjohnjiang 		entry = &test_vector.entries[DATA_HARQ_INPUT];
22034418919fSjohnjiang 		for (i = 0; i < entry->nb_segments; ++i)
22044418919fSjohnjiang 			op->ldpc_dec.harq_combined_input.length +=
22054418919fSjohnjiang 				entry->segments[i].length;
22064418919fSjohnjiang 	}
22074418919fSjohnjiang }
22084418919fSjohnjiang 
22094418919fSjohnjiang 
22104418919fSjohnjiang static void
create_reference_enc_op(struct rte_bbdev_enc_op * op)2211d30ea906Sjfb8856606 create_reference_enc_op(struct rte_bbdev_enc_op *op)
2212d30ea906Sjfb8856606 {
2213d30ea906Sjfb8856606 	unsigned int i;
2214d30ea906Sjfb8856606 	struct op_data_entries *entry;
2215d30ea906Sjfb8856606 
2216d30ea906Sjfb8856606 	op->turbo_enc = test_vector.turbo_enc;
2217d30ea906Sjfb8856606 	entry = &test_vector.entries[DATA_INPUT];
2218d30ea906Sjfb8856606 	for (i = 0; i < entry->nb_segments; ++i)
2219d30ea906Sjfb8856606 		op->turbo_enc.input.length +=
2220d30ea906Sjfb8856606 				entry->segments[i].length;
2221d30ea906Sjfb8856606 }
2222d30ea906Sjfb8856606 
22234418919fSjohnjiang static void
create_reference_ldpc_enc_op(struct rte_bbdev_enc_op * op)22244418919fSjohnjiang create_reference_ldpc_enc_op(struct rte_bbdev_enc_op *op)
22254418919fSjohnjiang {
22264418919fSjohnjiang 	unsigned int i;
22274418919fSjohnjiang 	struct op_data_entries *entry;
22284418919fSjohnjiang 
22294418919fSjohnjiang 	op->ldpc_enc = test_vector.ldpc_enc;
22304418919fSjohnjiang 	entry = &test_vector.entries[DATA_INPUT];
22314418919fSjohnjiang 	for (i = 0; i < entry->nb_segments; ++i)
22324418919fSjohnjiang 		op->ldpc_enc.input.length +=
22334418919fSjohnjiang 				entry->segments[i].length;
22344418919fSjohnjiang }
22354418919fSjohnjiang 
22364418919fSjohnjiang static uint32_t
calc_dec_TB_size(struct rte_bbdev_dec_op * op)22374418919fSjohnjiang calc_dec_TB_size(struct rte_bbdev_dec_op *op)
22384418919fSjohnjiang {
22394418919fSjohnjiang 	uint8_t i;
22404418919fSjohnjiang 	uint32_t c, r, tb_size = 0;
22414418919fSjohnjiang 
22424418919fSjohnjiang 	if (op->turbo_dec.code_block_mode) {
22434418919fSjohnjiang 		tb_size = op->turbo_dec.tb_params.k_neg;
22444418919fSjohnjiang 	} else {
22454418919fSjohnjiang 		c = op->turbo_dec.tb_params.c;
22464418919fSjohnjiang 		r = op->turbo_dec.tb_params.r;
22474418919fSjohnjiang 		for (i = 0; i < c-r; i++)
22484418919fSjohnjiang 			tb_size += (r < op->turbo_dec.tb_params.c_neg) ?
22494418919fSjohnjiang 				op->turbo_dec.tb_params.k_neg :
22504418919fSjohnjiang 				op->turbo_dec.tb_params.k_pos;
22514418919fSjohnjiang 	}
22524418919fSjohnjiang 	return tb_size;
22534418919fSjohnjiang }
22544418919fSjohnjiang 
22554418919fSjohnjiang static uint32_t
calc_ldpc_dec_TB_size(struct rte_bbdev_dec_op * op)22564418919fSjohnjiang calc_ldpc_dec_TB_size(struct rte_bbdev_dec_op *op)
22574418919fSjohnjiang {
22584418919fSjohnjiang 	uint8_t i;
22594418919fSjohnjiang 	uint32_t c, r, tb_size = 0;
22604418919fSjohnjiang 	uint16_t sys_cols = (op->ldpc_dec.basegraph == 1) ? 22 : 10;
22614418919fSjohnjiang 
22624418919fSjohnjiang 	if (op->ldpc_dec.code_block_mode) {
22634418919fSjohnjiang 		tb_size = sys_cols * op->ldpc_dec.z_c - op->ldpc_dec.n_filler;
22644418919fSjohnjiang 	} else {
22654418919fSjohnjiang 		c = op->ldpc_dec.tb_params.c;
22664418919fSjohnjiang 		r = op->ldpc_dec.tb_params.r;
22674418919fSjohnjiang 		for (i = 0; i < c-r; i++)
22684418919fSjohnjiang 			tb_size += sys_cols * op->ldpc_dec.z_c
22694418919fSjohnjiang 					- op->ldpc_dec.n_filler;
22704418919fSjohnjiang 	}
22714418919fSjohnjiang 	return tb_size;
22724418919fSjohnjiang }
22734418919fSjohnjiang 
22744418919fSjohnjiang static uint32_t
calc_enc_TB_size(struct rte_bbdev_enc_op * op)22754418919fSjohnjiang calc_enc_TB_size(struct rte_bbdev_enc_op *op)
22764418919fSjohnjiang {
22774418919fSjohnjiang 	uint8_t i;
22784418919fSjohnjiang 	uint32_t c, r, tb_size = 0;
22794418919fSjohnjiang 
22804418919fSjohnjiang 	if (op->turbo_enc.code_block_mode) {
22814418919fSjohnjiang 		tb_size = op->turbo_enc.tb_params.k_neg;
22824418919fSjohnjiang 	} else {
22834418919fSjohnjiang 		c = op->turbo_enc.tb_params.c;
22844418919fSjohnjiang 		r = op->turbo_enc.tb_params.r;
22854418919fSjohnjiang 		for (i = 0; i < c-r; i++)
22864418919fSjohnjiang 			tb_size += (r < op->turbo_enc.tb_params.c_neg) ?
22874418919fSjohnjiang 				op->turbo_enc.tb_params.k_neg :
22884418919fSjohnjiang 				op->turbo_enc.tb_params.k_pos;
22894418919fSjohnjiang 	}
22904418919fSjohnjiang 	return tb_size;
22914418919fSjohnjiang }
22924418919fSjohnjiang 
22934418919fSjohnjiang static uint32_t
calc_ldpc_enc_TB_size(struct rte_bbdev_enc_op * op)22944418919fSjohnjiang calc_ldpc_enc_TB_size(struct rte_bbdev_enc_op *op)
22954418919fSjohnjiang {
22964418919fSjohnjiang 	uint8_t i;
22974418919fSjohnjiang 	uint32_t c, r, tb_size = 0;
22984418919fSjohnjiang 	uint16_t sys_cols = (op->ldpc_enc.basegraph == 1) ? 22 : 10;
22994418919fSjohnjiang 
23004418919fSjohnjiang 	if (op->turbo_enc.code_block_mode) {
23014418919fSjohnjiang 		tb_size = sys_cols * op->ldpc_enc.z_c - op->ldpc_enc.n_filler;
23024418919fSjohnjiang 	} else {
23034418919fSjohnjiang 		c = op->turbo_enc.tb_params.c;
23044418919fSjohnjiang 		r = op->turbo_enc.tb_params.r;
23054418919fSjohnjiang 		for (i = 0; i < c-r; i++)
23064418919fSjohnjiang 			tb_size += sys_cols * op->ldpc_enc.z_c
23074418919fSjohnjiang 					- op->ldpc_enc.n_filler;
23084418919fSjohnjiang 	}
23094418919fSjohnjiang 	return tb_size;
23104418919fSjohnjiang }
23114418919fSjohnjiang 
23124418919fSjohnjiang 
2313d30ea906Sjfb8856606 static int
init_test_op_params(struct test_op_params * op_params,enum rte_bbdev_op_type op_type,const int expected_status,const int vector_mask,struct rte_mempool * ops_mp,uint16_t burst_sz,uint16_t num_to_process,uint16_t num_lcores)2314d30ea906Sjfb8856606 init_test_op_params(struct test_op_params *op_params,
2315d30ea906Sjfb8856606 		enum rte_bbdev_op_type op_type, const int expected_status,
2316d30ea906Sjfb8856606 		const int vector_mask, struct rte_mempool *ops_mp,
2317d30ea906Sjfb8856606 		uint16_t burst_sz, uint16_t num_to_process, uint16_t num_lcores)
2318d30ea906Sjfb8856606 {
2319d30ea906Sjfb8856606 	int ret = 0;
23204418919fSjohnjiang 	if (op_type == RTE_BBDEV_OP_TURBO_DEC ||
23214418919fSjohnjiang 			op_type == RTE_BBDEV_OP_LDPC_DEC)
2322d30ea906Sjfb8856606 		ret = rte_bbdev_dec_op_alloc_bulk(ops_mp,
2323d30ea906Sjfb8856606 				&op_params->ref_dec_op, 1);
2324d30ea906Sjfb8856606 	else
2325d30ea906Sjfb8856606 		ret = rte_bbdev_enc_op_alloc_bulk(ops_mp,
2326d30ea906Sjfb8856606 				&op_params->ref_enc_op, 1);
2327d30ea906Sjfb8856606 
2328d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS(ret, "rte_bbdev_op_alloc_bulk() failed");
2329d30ea906Sjfb8856606 
2330d30ea906Sjfb8856606 	op_params->mp = ops_mp;
2331d30ea906Sjfb8856606 	op_params->burst_sz = burst_sz;
2332d30ea906Sjfb8856606 	op_params->num_to_process = num_to_process;
2333d30ea906Sjfb8856606 	op_params->num_lcores = num_lcores;
2334d30ea906Sjfb8856606 	op_params->vector_mask = vector_mask;
23354418919fSjohnjiang 	if (op_type == RTE_BBDEV_OP_TURBO_DEC ||
23364418919fSjohnjiang 			op_type == RTE_BBDEV_OP_LDPC_DEC)
2337d30ea906Sjfb8856606 		op_params->ref_dec_op->status = expected_status;
23384418919fSjohnjiang 	else if (op_type == RTE_BBDEV_OP_TURBO_ENC
23394418919fSjohnjiang 			|| op_type == RTE_BBDEV_OP_LDPC_ENC)
2340d30ea906Sjfb8856606 		op_params->ref_enc_op->status = expected_status;
2341d30ea906Sjfb8856606 	return 0;
2342d30ea906Sjfb8856606 }
2343d30ea906Sjfb8856606 
2344d30ea906Sjfb8856606 static int
run_test_case_on_device(test_case_function * test_case_func,uint8_t dev_id,struct test_op_params * op_params)2345d30ea906Sjfb8856606 run_test_case_on_device(test_case_function *test_case_func, uint8_t dev_id,
2346d30ea906Sjfb8856606 		struct test_op_params *op_params)
2347d30ea906Sjfb8856606 {
2348d30ea906Sjfb8856606 	int t_ret, f_ret, socket_id = SOCKET_ID_ANY;
2349d30ea906Sjfb8856606 	unsigned int i;
2350d30ea906Sjfb8856606 	struct active_device *ad;
2351d30ea906Sjfb8856606 	unsigned int burst_sz = get_burst_sz();
2352d30ea906Sjfb8856606 	enum rte_bbdev_op_type op_type = test_vector.op_type;
2353d30ea906Sjfb8856606 	const struct rte_bbdev_op_cap *capabilities = NULL;
2354d30ea906Sjfb8856606 
2355d30ea906Sjfb8856606 	ad = &active_devs[dev_id];
2356d30ea906Sjfb8856606 
2357d30ea906Sjfb8856606 	/* Check if device supports op_type */
2358d30ea906Sjfb8856606 	if (!is_avail_op(ad, test_vector.op_type))
2359d30ea906Sjfb8856606 		return TEST_SUCCESS;
2360d30ea906Sjfb8856606 
2361d30ea906Sjfb8856606 	struct rte_bbdev_info info;
2362d30ea906Sjfb8856606 	rte_bbdev_info_get(ad->dev_id, &info);
2363d30ea906Sjfb8856606 	socket_id = GET_SOCKET(info.socket_id);
2364d30ea906Sjfb8856606 
2365d30ea906Sjfb8856606 	f_ret = create_mempools(ad, socket_id, op_type,
2366d30ea906Sjfb8856606 			get_num_ops());
2367d30ea906Sjfb8856606 	if (f_ret != TEST_SUCCESS) {
2368d30ea906Sjfb8856606 		printf("Couldn't create mempools");
2369d30ea906Sjfb8856606 		goto fail;
2370d30ea906Sjfb8856606 	}
2371d30ea906Sjfb8856606 	if (op_type == RTE_BBDEV_OP_NONE)
2372d30ea906Sjfb8856606 		op_type = RTE_BBDEV_OP_TURBO_ENC;
2373d30ea906Sjfb8856606 
2374d30ea906Sjfb8856606 	f_ret = init_test_op_params(op_params, test_vector.op_type,
2375d30ea906Sjfb8856606 			test_vector.expected_status,
2376d30ea906Sjfb8856606 			test_vector.mask,
2377d30ea906Sjfb8856606 			ad->ops_mempool,
2378d30ea906Sjfb8856606 			burst_sz,
2379d30ea906Sjfb8856606 			get_num_ops(),
2380d30ea906Sjfb8856606 			get_num_lcores());
2381d30ea906Sjfb8856606 	if (f_ret != TEST_SUCCESS) {
2382d30ea906Sjfb8856606 		printf("Couldn't init test op params");
2383d30ea906Sjfb8856606 		goto fail;
2384d30ea906Sjfb8856606 	}
2385d30ea906Sjfb8856606 
23864418919fSjohnjiang 
23874418919fSjohnjiang 	/* Find capabilities */
2388d30ea906Sjfb8856606 	const struct rte_bbdev_op_cap *cap = info.drv.capabilities;
23894418919fSjohnjiang 	for (i = 0; i < RTE_BBDEV_OP_TYPE_COUNT; i++) {
23904418919fSjohnjiang 		if (cap->type == test_vector.op_type) {
2391d30ea906Sjfb8856606 			capabilities = cap;
2392d30ea906Sjfb8856606 			break;
2393d30ea906Sjfb8856606 		}
23944418919fSjohnjiang 		cap++;
2395d30ea906Sjfb8856606 	}
2396d30ea906Sjfb8856606 	TEST_ASSERT_NOT_NULL(capabilities,
23974418919fSjohnjiang 			"Couldn't find capabilities");
2398d30ea906Sjfb8856606 
23994418919fSjohnjiang 	if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) {
2400d30ea906Sjfb8856606 		create_reference_dec_op(op_params->ref_dec_op);
2401d30ea906Sjfb8856606 	} else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
2402d30ea906Sjfb8856606 		create_reference_enc_op(op_params->ref_enc_op);
24034418919fSjohnjiang 	else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
24044418919fSjohnjiang 		create_reference_ldpc_enc_op(op_params->ref_enc_op);
24054418919fSjohnjiang 	else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
24064418919fSjohnjiang 		create_reference_ldpc_dec_op(op_params->ref_dec_op);
2407d30ea906Sjfb8856606 
2408d30ea906Sjfb8856606 	for (i = 0; i < ad->nb_queues; ++i) {
2409d30ea906Sjfb8856606 		f_ret = fill_queue_buffers(op_params,
2410d30ea906Sjfb8856606 				ad->in_mbuf_pool,
2411d30ea906Sjfb8856606 				ad->hard_out_mbuf_pool,
2412d30ea906Sjfb8856606 				ad->soft_out_mbuf_pool,
24134418919fSjohnjiang 				ad->harq_in_mbuf_pool,
24144418919fSjohnjiang 				ad->harq_out_mbuf_pool,
2415d30ea906Sjfb8856606 				ad->queue_ids[i],
2416d30ea906Sjfb8856606 				capabilities,
2417d30ea906Sjfb8856606 				info.drv.min_alignment,
2418d30ea906Sjfb8856606 				socket_id);
2419d30ea906Sjfb8856606 		if (f_ret != TEST_SUCCESS) {
2420d30ea906Sjfb8856606 			printf("Couldn't init queue buffers");
2421d30ea906Sjfb8856606 			goto fail;
2422d30ea906Sjfb8856606 		}
2423d30ea906Sjfb8856606 	}
2424d30ea906Sjfb8856606 
2425d30ea906Sjfb8856606 	/* Run test case function */
2426d30ea906Sjfb8856606 	t_ret = test_case_func(ad, op_params);
2427d30ea906Sjfb8856606 
2428d30ea906Sjfb8856606 	/* Free active device resources and return */
2429d30ea906Sjfb8856606 	free_buffers(ad, op_params);
2430d30ea906Sjfb8856606 	return t_ret;
2431d30ea906Sjfb8856606 
2432d30ea906Sjfb8856606 fail:
2433d30ea906Sjfb8856606 	free_buffers(ad, op_params);
2434d30ea906Sjfb8856606 	return TEST_FAILED;
2435d30ea906Sjfb8856606 }
2436d30ea906Sjfb8856606 
2437d30ea906Sjfb8856606 /* Run given test function per active device per supported op type
2438d30ea906Sjfb8856606  * per burst size.
2439d30ea906Sjfb8856606  */
2440d30ea906Sjfb8856606 static int
run_test_case(test_case_function * test_case_func)2441d30ea906Sjfb8856606 run_test_case(test_case_function *test_case_func)
2442d30ea906Sjfb8856606 {
2443d30ea906Sjfb8856606 	int ret = 0;
2444d30ea906Sjfb8856606 	uint8_t dev;
2445d30ea906Sjfb8856606 
2446d30ea906Sjfb8856606 	/* Alloc op_params */
2447d30ea906Sjfb8856606 	struct test_op_params *op_params = rte_zmalloc(NULL,
2448d30ea906Sjfb8856606 			sizeof(struct test_op_params), RTE_CACHE_LINE_SIZE);
2449d30ea906Sjfb8856606 	TEST_ASSERT_NOT_NULL(op_params, "Failed to alloc %zuB for op_params",
2450d30ea906Sjfb8856606 			RTE_ALIGN(sizeof(struct test_op_params),
2451d30ea906Sjfb8856606 				RTE_CACHE_LINE_SIZE));
2452d30ea906Sjfb8856606 
2453d30ea906Sjfb8856606 	/* For each device run test case function */
2454d30ea906Sjfb8856606 	for (dev = 0; dev < nb_active_devs; ++dev)
2455d30ea906Sjfb8856606 		ret |= run_test_case_on_device(test_case_func, dev, op_params);
2456d30ea906Sjfb8856606 
2457d30ea906Sjfb8856606 	rte_free(op_params);
2458d30ea906Sjfb8856606 
2459d30ea906Sjfb8856606 	return ret;
2460d30ea906Sjfb8856606 }
2461d30ea906Sjfb8856606 
2462*2d9fd380Sjfb8856606 
2463*2d9fd380Sjfb8856606 /* Push back the HARQ output from DDR to host */
2464*2d9fd380Sjfb8856606 static void
retrieve_harq_ddr(uint16_t dev_id,uint16_t queue_id,struct rte_bbdev_dec_op ** ops,const uint16_t n)2465*2d9fd380Sjfb8856606 retrieve_harq_ddr(uint16_t dev_id, uint16_t queue_id,
2466*2d9fd380Sjfb8856606 		struct rte_bbdev_dec_op **ops,
2467*2d9fd380Sjfb8856606 		const uint16_t n)
2468*2d9fd380Sjfb8856606 {
2469*2d9fd380Sjfb8856606 	uint16_t j;
2470*2d9fd380Sjfb8856606 	int save_status, ret;
2471*2d9fd380Sjfb8856606 	uint32_t harq_offset = (uint32_t) queue_id * HARQ_INCR * MAX_OPS;
2472*2d9fd380Sjfb8856606 	struct rte_bbdev_dec_op *ops_deq[MAX_BURST];
2473*2d9fd380Sjfb8856606 	uint32_t flags = ops[0]->ldpc_dec.op_flags;
2474*2d9fd380Sjfb8856606 	bool loopback = flags & RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK;
2475*2d9fd380Sjfb8856606 	bool mem_out = flags & RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE;
2476*2d9fd380Sjfb8856606 	bool hc_out = flags & RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE;
2477*2d9fd380Sjfb8856606 	bool h_comp = flags & RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION;
2478*2d9fd380Sjfb8856606 	for (j = 0; j < n; ++j) {
2479*2d9fd380Sjfb8856606 		if ((loopback && mem_out) || hc_out) {
2480*2d9fd380Sjfb8856606 			save_status = ops[j]->status;
2481*2d9fd380Sjfb8856606 			ops[j]->ldpc_dec.op_flags =
2482*2d9fd380Sjfb8856606 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK +
2483*2d9fd380Sjfb8856606 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE;
2484*2d9fd380Sjfb8856606 			if (h_comp)
2485*2d9fd380Sjfb8856606 				ops[j]->ldpc_dec.op_flags +=
2486*2d9fd380Sjfb8856606 					RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION;
2487*2d9fd380Sjfb8856606 			ops[j]->ldpc_dec.harq_combined_input.offset =
2488*2d9fd380Sjfb8856606 					harq_offset;
2489*2d9fd380Sjfb8856606 			ops[j]->ldpc_dec.harq_combined_output.offset = 0;
2490*2d9fd380Sjfb8856606 			harq_offset += HARQ_INCR;
2491*2d9fd380Sjfb8856606 			if (!loopback)
2492*2d9fd380Sjfb8856606 				ops[j]->ldpc_dec.harq_combined_input.length =
2493*2d9fd380Sjfb8856606 				ops[j]->ldpc_dec.harq_combined_output.length;
2494*2d9fd380Sjfb8856606 			rte_bbdev_enqueue_ldpc_dec_ops(dev_id, queue_id,
2495*2d9fd380Sjfb8856606 					&ops[j], 1);
2496*2d9fd380Sjfb8856606 			ret = 0;
2497*2d9fd380Sjfb8856606 			while (ret == 0)
2498*2d9fd380Sjfb8856606 				ret = rte_bbdev_dequeue_ldpc_dec_ops(
2499*2d9fd380Sjfb8856606 						dev_id, queue_id,
2500*2d9fd380Sjfb8856606 						&ops_deq[j], 1);
2501*2d9fd380Sjfb8856606 			ops[j]->ldpc_dec.op_flags = flags;
2502*2d9fd380Sjfb8856606 			ops[j]->status = save_status;
2503*2d9fd380Sjfb8856606 		}
2504*2d9fd380Sjfb8856606 	}
2505*2d9fd380Sjfb8856606 }
2506*2d9fd380Sjfb8856606 
2507*2d9fd380Sjfb8856606 /*
2508*2d9fd380Sjfb8856606  * Push back the HARQ output from HW DDR to Host
2509*2d9fd380Sjfb8856606  * Preload HARQ memory input and adjust HARQ offset
2510*2d9fd380Sjfb8856606  */
2511*2d9fd380Sjfb8856606 static void
preload_harq_ddr(uint16_t dev_id,uint16_t queue_id,struct rte_bbdev_dec_op ** ops,const uint16_t n,bool preload)2512*2d9fd380Sjfb8856606 preload_harq_ddr(uint16_t dev_id, uint16_t queue_id,
2513*2d9fd380Sjfb8856606 		struct rte_bbdev_dec_op **ops, const uint16_t n,
2514*2d9fd380Sjfb8856606 		bool preload)
2515*2d9fd380Sjfb8856606 {
2516*2d9fd380Sjfb8856606 	uint16_t j;
2517*2d9fd380Sjfb8856606 	int deq;
2518*2d9fd380Sjfb8856606 	uint32_t harq_offset = (uint32_t) queue_id * HARQ_INCR * MAX_OPS;
2519*2d9fd380Sjfb8856606 	struct rte_bbdev_op_data save_hc_in[MAX_OPS], save_hc_out[MAX_OPS];
2520*2d9fd380Sjfb8856606 	struct rte_bbdev_dec_op *ops_deq[MAX_OPS];
2521*2d9fd380Sjfb8856606 	uint32_t flags = ops[0]->ldpc_dec.op_flags;
2522*2d9fd380Sjfb8856606 	bool mem_in = flags & RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE;
2523*2d9fd380Sjfb8856606 	bool hc_in = flags & RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE;
2524*2d9fd380Sjfb8856606 	bool mem_out = flags & RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE;
2525*2d9fd380Sjfb8856606 	bool hc_out = flags & RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE;
2526*2d9fd380Sjfb8856606 	bool h_comp = flags & RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION;
2527*2d9fd380Sjfb8856606 	if ((mem_in || hc_in) && preload) {
2528*2d9fd380Sjfb8856606 		for (j = 0; j < n; ++j) {
2529*2d9fd380Sjfb8856606 			save_hc_in[j] = ops[j]->ldpc_dec.harq_combined_input;
2530*2d9fd380Sjfb8856606 			save_hc_out[j] = ops[j]->ldpc_dec.harq_combined_output;
2531*2d9fd380Sjfb8856606 			ops[j]->ldpc_dec.op_flags =
2532*2d9fd380Sjfb8856606 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK +
2533*2d9fd380Sjfb8856606 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE;
2534*2d9fd380Sjfb8856606 			if (h_comp)
2535*2d9fd380Sjfb8856606 				ops[j]->ldpc_dec.op_flags +=
2536*2d9fd380Sjfb8856606 					RTE_BBDEV_LDPC_HARQ_6BIT_COMPRESSION;
2537*2d9fd380Sjfb8856606 			ops[j]->ldpc_dec.harq_combined_output.offset =
2538*2d9fd380Sjfb8856606 					harq_offset;
2539*2d9fd380Sjfb8856606 			ops[j]->ldpc_dec.harq_combined_input.offset = 0;
2540*2d9fd380Sjfb8856606 			harq_offset += HARQ_INCR;
2541*2d9fd380Sjfb8856606 		}
2542*2d9fd380Sjfb8856606 		rte_bbdev_enqueue_ldpc_dec_ops(dev_id, queue_id, &ops[0], n);
2543*2d9fd380Sjfb8856606 		deq = 0;
2544*2d9fd380Sjfb8856606 		while (deq != n)
2545*2d9fd380Sjfb8856606 			deq += rte_bbdev_dequeue_ldpc_dec_ops(
2546*2d9fd380Sjfb8856606 					dev_id, queue_id, &ops_deq[deq],
2547*2d9fd380Sjfb8856606 					n - deq);
2548*2d9fd380Sjfb8856606 		/* Restore the operations */
2549*2d9fd380Sjfb8856606 		for (j = 0; j < n; ++j) {
2550*2d9fd380Sjfb8856606 			ops[j]->ldpc_dec.op_flags = flags;
2551*2d9fd380Sjfb8856606 			ops[j]->ldpc_dec.harq_combined_input = save_hc_in[j];
2552*2d9fd380Sjfb8856606 			ops[j]->ldpc_dec.harq_combined_output = save_hc_out[j];
2553*2d9fd380Sjfb8856606 		}
2554*2d9fd380Sjfb8856606 	}
2555*2d9fd380Sjfb8856606 	harq_offset = (uint32_t) queue_id * HARQ_INCR * MAX_OPS;
2556*2d9fd380Sjfb8856606 	for (j = 0; j < n; ++j) {
2557*2d9fd380Sjfb8856606 		/* Adjust HARQ offset when we reach external DDR */
2558*2d9fd380Sjfb8856606 		if (mem_in || hc_in)
2559*2d9fd380Sjfb8856606 			ops[j]->ldpc_dec.harq_combined_input.offset
2560*2d9fd380Sjfb8856606 				= harq_offset;
2561*2d9fd380Sjfb8856606 		if (mem_out || hc_out)
2562*2d9fd380Sjfb8856606 			ops[j]->ldpc_dec.harq_combined_output.offset
2563*2d9fd380Sjfb8856606 				= harq_offset;
2564*2d9fd380Sjfb8856606 		harq_offset += HARQ_INCR;
2565*2d9fd380Sjfb8856606 	}
2566*2d9fd380Sjfb8856606 }
2567*2d9fd380Sjfb8856606 
2568d30ea906Sjfb8856606 static void
dequeue_event_callback(uint16_t dev_id,enum rte_bbdev_event_type event,void * cb_arg,void * ret_param)2569d30ea906Sjfb8856606 dequeue_event_callback(uint16_t dev_id,
2570d30ea906Sjfb8856606 		enum rte_bbdev_event_type event, void *cb_arg,
2571d30ea906Sjfb8856606 		void *ret_param)
2572d30ea906Sjfb8856606 {
2573d30ea906Sjfb8856606 	int ret;
2574d30ea906Sjfb8856606 	uint16_t i;
2575d30ea906Sjfb8856606 	uint64_t total_time;
25764418919fSjohnjiang 	uint16_t deq, burst_sz, num_ops;
25774418919fSjohnjiang 	uint16_t queue_id = *(uint16_t *) ret_param;
2578d30ea906Sjfb8856606 	struct rte_bbdev_info info;
25794418919fSjohnjiang 	double tb_len_bits;
2580d30ea906Sjfb8856606 	struct thread_params *tp = cb_arg;
2581d30ea906Sjfb8856606 
2582d30ea906Sjfb8856606 	/* Find matching thread params using queue_id */
2583d30ea906Sjfb8856606 	for (i = 0; i < MAX_QUEUES; ++i, ++tp)
2584d30ea906Sjfb8856606 		if (tp->queue_id == queue_id)
2585d30ea906Sjfb8856606 			break;
2586d30ea906Sjfb8856606 
2587d30ea906Sjfb8856606 	if (i == MAX_QUEUES) {
2588d30ea906Sjfb8856606 		printf("%s: Queue_id from interrupt details was not found!\n",
2589d30ea906Sjfb8856606 				__func__);
2590d30ea906Sjfb8856606 		return;
2591d30ea906Sjfb8856606 	}
2592d30ea906Sjfb8856606 
2593d30ea906Sjfb8856606 	if (unlikely(event != RTE_BBDEV_EVENT_DEQUEUE)) {
2594d30ea906Sjfb8856606 		rte_atomic16_set(&tp->processing_status, TEST_FAILED);
2595d30ea906Sjfb8856606 		printf(
2596d30ea906Sjfb8856606 			"Dequeue interrupt handler called for incorrect event!\n");
2597d30ea906Sjfb8856606 		return;
2598d30ea906Sjfb8856606 	}
2599d30ea906Sjfb8856606 
26004418919fSjohnjiang 	burst_sz = rte_atomic16_read(&tp->burst_sz);
26014418919fSjohnjiang 	num_ops = tp->op_params->num_to_process;
2602d30ea906Sjfb8856606 
2603*2d9fd380Sjfb8856606 	if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
26044418919fSjohnjiang 		deq = rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
26054418919fSjohnjiang 				&tp->dec_ops[
26064418919fSjohnjiang 					rte_atomic16_read(&tp->nb_dequeued)],
2607d30ea906Sjfb8856606 				burst_sz);
2608*2d9fd380Sjfb8856606 	else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
2609*2d9fd380Sjfb8856606 		deq = rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
2610*2d9fd380Sjfb8856606 				&tp->dec_ops[
2611*2d9fd380Sjfb8856606 					rte_atomic16_read(&tp->nb_dequeued)],
2612*2d9fd380Sjfb8856606 				burst_sz);
2613*2d9fd380Sjfb8856606 	else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
2614*2d9fd380Sjfb8856606 		deq = rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
2615*2d9fd380Sjfb8856606 				&tp->enc_ops[
2616*2d9fd380Sjfb8856606 					rte_atomic16_read(&tp->nb_dequeued)],
2617*2d9fd380Sjfb8856606 				burst_sz);
2618*2d9fd380Sjfb8856606 	else /*RTE_BBDEV_OP_TURBO_ENC*/
26194418919fSjohnjiang 		deq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
26204418919fSjohnjiang 				&tp->enc_ops[
26214418919fSjohnjiang 					rte_atomic16_read(&tp->nb_dequeued)],
2622d30ea906Sjfb8856606 				burst_sz);
2623d30ea906Sjfb8856606 
2624d30ea906Sjfb8856606 	if (deq < burst_sz) {
2625d30ea906Sjfb8856606 		printf(
2626d30ea906Sjfb8856606 			"After receiving the interrupt all operations should be dequeued. Expected: %u, got: %u\n",
2627d30ea906Sjfb8856606 			burst_sz, deq);
2628d30ea906Sjfb8856606 		rte_atomic16_set(&tp->processing_status, TEST_FAILED);
2629d30ea906Sjfb8856606 		return;
2630d30ea906Sjfb8856606 	}
2631d30ea906Sjfb8856606 
26324418919fSjohnjiang 	if (rte_atomic16_read(&tp->nb_dequeued) + deq < num_ops) {
2633d30ea906Sjfb8856606 		rte_atomic16_add(&tp->nb_dequeued, deq);
2634d30ea906Sjfb8856606 		return;
2635d30ea906Sjfb8856606 	}
2636d30ea906Sjfb8856606 
2637d30ea906Sjfb8856606 	total_time = rte_rdtsc_precise() - tp->start_time;
2638d30ea906Sjfb8856606 
2639d30ea906Sjfb8856606 	rte_bbdev_info_get(dev_id, &info);
2640d30ea906Sjfb8856606 
2641d30ea906Sjfb8856606 	ret = TEST_SUCCESS;
26424418919fSjohnjiang 
26434418919fSjohnjiang 	if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) {
26444418919fSjohnjiang 		struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
26454418919fSjohnjiang 		ret = validate_dec_op(tp->dec_ops, num_ops, ref_op,
26464418919fSjohnjiang 				tp->op_params->vector_mask);
26474418919fSjohnjiang 		/* get the max of iter_count for all dequeued ops */
26484418919fSjohnjiang 		for (i = 0; i < num_ops; ++i)
26494418919fSjohnjiang 			tp->iter_count = RTE_MAX(
26504418919fSjohnjiang 					tp->dec_ops[i]->turbo_dec.iter_count,
26514418919fSjohnjiang 					tp->iter_count);
26524418919fSjohnjiang 		rte_bbdev_dec_op_free_bulk(tp->dec_ops, deq);
26534418919fSjohnjiang 	} else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC) {
26544418919fSjohnjiang 		struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
26554418919fSjohnjiang 		ret = validate_enc_op(tp->enc_ops, num_ops, ref_op);
26564418919fSjohnjiang 		rte_bbdev_enc_op_free_bulk(tp->enc_ops, deq);
26574418919fSjohnjiang 	} else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC) {
26584418919fSjohnjiang 		struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
26594418919fSjohnjiang 		ret = validate_ldpc_enc_op(tp->enc_ops, num_ops, ref_op);
26604418919fSjohnjiang 		rte_bbdev_enc_op_free_bulk(tp->enc_ops, deq);
26614418919fSjohnjiang 	} else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC) {
26624418919fSjohnjiang 		struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
26634418919fSjohnjiang 		ret = validate_ldpc_dec_op(tp->dec_ops, num_ops, ref_op,
26644418919fSjohnjiang 				tp->op_params->vector_mask);
26654418919fSjohnjiang 		rte_bbdev_dec_op_free_bulk(tp->dec_ops, deq);
26664418919fSjohnjiang 	}
2667d30ea906Sjfb8856606 
2668d30ea906Sjfb8856606 	if (ret) {
2669d30ea906Sjfb8856606 		printf("Buffers validation failed\n");
2670d30ea906Sjfb8856606 		rte_atomic16_set(&tp->processing_status, TEST_FAILED);
2671d30ea906Sjfb8856606 	}
2672d30ea906Sjfb8856606 
2673d30ea906Sjfb8856606 	switch (test_vector.op_type) {
2674d30ea906Sjfb8856606 	case RTE_BBDEV_OP_TURBO_DEC:
26754418919fSjohnjiang 		tb_len_bits = calc_dec_TB_size(tp->op_params->ref_dec_op);
2676d30ea906Sjfb8856606 		break;
2677d30ea906Sjfb8856606 	case RTE_BBDEV_OP_TURBO_ENC:
26784418919fSjohnjiang 		tb_len_bits = calc_enc_TB_size(tp->op_params->ref_enc_op);
26794418919fSjohnjiang 		break;
26804418919fSjohnjiang 	case RTE_BBDEV_OP_LDPC_DEC:
26814418919fSjohnjiang 		tb_len_bits = calc_ldpc_dec_TB_size(tp->op_params->ref_dec_op);
26824418919fSjohnjiang 		break;
26834418919fSjohnjiang 	case RTE_BBDEV_OP_LDPC_ENC:
26844418919fSjohnjiang 		tb_len_bits = calc_ldpc_enc_TB_size(tp->op_params->ref_enc_op);
2685d30ea906Sjfb8856606 		break;
2686d30ea906Sjfb8856606 	case RTE_BBDEV_OP_NONE:
26874418919fSjohnjiang 		tb_len_bits = 0.0;
2688d30ea906Sjfb8856606 		break;
2689d30ea906Sjfb8856606 	default:
2690d30ea906Sjfb8856606 		printf("Unknown op type: %d\n", test_vector.op_type);
2691d30ea906Sjfb8856606 		rte_atomic16_set(&tp->processing_status, TEST_FAILED);
2692d30ea906Sjfb8856606 		return;
2693d30ea906Sjfb8856606 	}
2694d30ea906Sjfb8856606 
26954418919fSjohnjiang 	tp->ops_per_sec += ((double)num_ops) /
2696d30ea906Sjfb8856606 			((double)total_time / (double)rte_get_tsc_hz());
26974418919fSjohnjiang 	tp->mbps += (((double)(num_ops * tb_len_bits)) / 1000000.0) /
2698d30ea906Sjfb8856606 			((double)total_time / (double)rte_get_tsc_hz());
2699d30ea906Sjfb8856606 
2700d30ea906Sjfb8856606 	rte_atomic16_add(&tp->nb_dequeued, deq);
2701d30ea906Sjfb8856606 }
2702d30ea906Sjfb8856606 
2703d30ea906Sjfb8856606 static int
throughput_intr_lcore_ldpc_dec(void * arg)2704*2d9fd380Sjfb8856606 throughput_intr_lcore_ldpc_dec(void *arg)
2705*2d9fd380Sjfb8856606 {
2706*2d9fd380Sjfb8856606 	struct thread_params *tp = arg;
2707*2d9fd380Sjfb8856606 	unsigned int enqueued;
2708*2d9fd380Sjfb8856606 	const uint16_t queue_id = tp->queue_id;
2709*2d9fd380Sjfb8856606 	const uint16_t burst_sz = tp->op_params->burst_sz;
2710*2d9fd380Sjfb8856606 	const uint16_t num_to_process = tp->op_params->num_to_process;
2711*2d9fd380Sjfb8856606 	struct rte_bbdev_dec_op *ops[num_to_process];
2712*2d9fd380Sjfb8856606 	struct test_buffers *bufs = NULL;
2713*2d9fd380Sjfb8856606 	struct rte_bbdev_info info;
2714*2d9fd380Sjfb8856606 	int ret, i, j;
2715*2d9fd380Sjfb8856606 	struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
2716*2d9fd380Sjfb8856606 	uint16_t num_to_enq, enq;
2717*2d9fd380Sjfb8856606 
2718*2d9fd380Sjfb8856606 	bool loopback = check_bit(ref_op->ldpc_dec.op_flags,
2719*2d9fd380Sjfb8856606 			RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK);
2720*2d9fd380Sjfb8856606 	bool hc_out = check_bit(ref_op->ldpc_dec.op_flags,
2721*2d9fd380Sjfb8856606 			RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE);
2722*2d9fd380Sjfb8856606 
2723*2d9fd380Sjfb8856606 	TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
2724*2d9fd380Sjfb8856606 			"BURST_SIZE should be <= %u", MAX_BURST);
2725*2d9fd380Sjfb8856606 
2726*2d9fd380Sjfb8856606 	TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id),
2727*2d9fd380Sjfb8856606 			"Failed to enable interrupts for dev: %u, queue_id: %u",
2728*2d9fd380Sjfb8856606 			tp->dev_id, queue_id);
2729*2d9fd380Sjfb8856606 
2730*2d9fd380Sjfb8856606 	rte_bbdev_info_get(tp->dev_id, &info);
2731*2d9fd380Sjfb8856606 
2732*2d9fd380Sjfb8856606 	TEST_ASSERT_SUCCESS((num_to_process > info.drv.queue_size_lim),
2733*2d9fd380Sjfb8856606 			"NUM_OPS cannot exceed %u for this device",
2734*2d9fd380Sjfb8856606 			info.drv.queue_size_lim);
2735*2d9fd380Sjfb8856606 
2736*2d9fd380Sjfb8856606 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
2737*2d9fd380Sjfb8856606 
2738*2d9fd380Sjfb8856606 	rte_atomic16_clear(&tp->processing_status);
2739*2d9fd380Sjfb8856606 	rte_atomic16_clear(&tp->nb_dequeued);
2740*2d9fd380Sjfb8856606 
2741*2d9fd380Sjfb8856606 	while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
2742*2d9fd380Sjfb8856606 		rte_pause();
2743*2d9fd380Sjfb8856606 
2744*2d9fd380Sjfb8856606 	ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
2745*2d9fd380Sjfb8856606 				num_to_process);
2746*2d9fd380Sjfb8856606 	TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
2747*2d9fd380Sjfb8856606 			num_to_process);
2748*2d9fd380Sjfb8856606 	if (test_vector.op_type != RTE_BBDEV_OP_NONE)
2749*2d9fd380Sjfb8856606 		copy_reference_ldpc_dec_op(ops, num_to_process, 0, bufs->inputs,
2750*2d9fd380Sjfb8856606 				bufs->hard_outputs, bufs->soft_outputs,
2751*2d9fd380Sjfb8856606 				bufs->harq_inputs, bufs->harq_outputs, ref_op);
2752*2d9fd380Sjfb8856606 
2753*2d9fd380Sjfb8856606 	/* Set counter to validate the ordering */
2754*2d9fd380Sjfb8856606 	for (j = 0; j < num_to_process; ++j)
2755*2d9fd380Sjfb8856606 		ops[j]->opaque_data = (void *)(uintptr_t)j;
2756*2d9fd380Sjfb8856606 
2757*2d9fd380Sjfb8856606 	for (j = 0; j < TEST_REPETITIONS; ++j) {
2758*2d9fd380Sjfb8856606 		for (i = 0; i < num_to_process; ++i) {
2759*2d9fd380Sjfb8856606 			if (!loopback)
2760*2d9fd380Sjfb8856606 				rte_pktmbuf_reset(
2761*2d9fd380Sjfb8856606 					ops[i]->ldpc_dec.hard_output.data);
2762*2d9fd380Sjfb8856606 			if (hc_out || loopback)
2763*2d9fd380Sjfb8856606 				mbuf_reset(
2764*2d9fd380Sjfb8856606 				ops[i]->ldpc_dec.harq_combined_output.data);
2765*2d9fd380Sjfb8856606 		}
2766*2d9fd380Sjfb8856606 
2767*2d9fd380Sjfb8856606 		tp->start_time = rte_rdtsc_precise();
2768*2d9fd380Sjfb8856606 		for (enqueued = 0; enqueued < num_to_process;) {
2769*2d9fd380Sjfb8856606 			num_to_enq = burst_sz;
2770*2d9fd380Sjfb8856606 
2771*2d9fd380Sjfb8856606 			if (unlikely(num_to_process - enqueued < num_to_enq))
2772*2d9fd380Sjfb8856606 				num_to_enq = num_to_process - enqueued;
2773*2d9fd380Sjfb8856606 
2774*2d9fd380Sjfb8856606 			enq = 0;
2775*2d9fd380Sjfb8856606 			do {
2776*2d9fd380Sjfb8856606 				enq += rte_bbdev_enqueue_ldpc_dec_ops(
2777*2d9fd380Sjfb8856606 						tp->dev_id,
2778*2d9fd380Sjfb8856606 						queue_id, &ops[enqueued],
2779*2d9fd380Sjfb8856606 						num_to_enq);
2780*2d9fd380Sjfb8856606 			} while (unlikely(num_to_enq != enq));
2781*2d9fd380Sjfb8856606 			enqueued += enq;
2782*2d9fd380Sjfb8856606 
2783*2d9fd380Sjfb8856606 			/* Write to thread burst_sz current number of enqueued
2784*2d9fd380Sjfb8856606 			 * descriptors. It ensures that proper number of
2785*2d9fd380Sjfb8856606 			 * descriptors will be dequeued in callback
2786*2d9fd380Sjfb8856606 			 * function - needed for last batch in case where
2787*2d9fd380Sjfb8856606 			 * the number of operations is not a multiple of
2788*2d9fd380Sjfb8856606 			 * burst size.
2789*2d9fd380Sjfb8856606 			 */
2790*2d9fd380Sjfb8856606 			rte_atomic16_set(&tp->burst_sz, num_to_enq);
2791*2d9fd380Sjfb8856606 
2792*2d9fd380Sjfb8856606 			/* Wait until processing of previous batch is
2793*2d9fd380Sjfb8856606 			 * completed
2794*2d9fd380Sjfb8856606 			 */
2795*2d9fd380Sjfb8856606 			while (rte_atomic16_read(&tp->nb_dequeued) !=
2796*2d9fd380Sjfb8856606 					(int16_t) enqueued)
2797*2d9fd380Sjfb8856606 				rte_pause();
2798*2d9fd380Sjfb8856606 		}
2799*2d9fd380Sjfb8856606 		if (j != TEST_REPETITIONS - 1)
2800*2d9fd380Sjfb8856606 			rte_atomic16_clear(&tp->nb_dequeued);
2801*2d9fd380Sjfb8856606 	}
2802*2d9fd380Sjfb8856606 
2803*2d9fd380Sjfb8856606 	return TEST_SUCCESS;
2804*2d9fd380Sjfb8856606 }
2805*2d9fd380Sjfb8856606 
2806*2d9fd380Sjfb8856606 static int
throughput_intr_lcore_dec(void * arg)2807d30ea906Sjfb8856606 throughput_intr_lcore_dec(void *arg)
2808d30ea906Sjfb8856606 {
2809d30ea906Sjfb8856606 	struct thread_params *tp = arg;
2810d30ea906Sjfb8856606 	unsigned int enqueued;
2811d30ea906Sjfb8856606 	const uint16_t queue_id = tp->queue_id;
2812d30ea906Sjfb8856606 	const uint16_t burst_sz = tp->op_params->burst_sz;
2813d30ea906Sjfb8856606 	const uint16_t num_to_process = tp->op_params->num_to_process;
28144418919fSjohnjiang 	struct rte_bbdev_dec_op *ops[num_to_process];
2815d30ea906Sjfb8856606 	struct test_buffers *bufs = NULL;
2816d30ea906Sjfb8856606 	struct rte_bbdev_info info;
28174418919fSjohnjiang 	int ret, i, j;
28184418919fSjohnjiang 	uint16_t num_to_enq, enq;
2819d30ea906Sjfb8856606 
2820d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
2821d30ea906Sjfb8856606 			"BURST_SIZE should be <= %u", MAX_BURST);
2822d30ea906Sjfb8856606 
2823d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id),
2824d30ea906Sjfb8856606 			"Failed to enable interrupts for dev: %u, queue_id: %u",
2825d30ea906Sjfb8856606 			tp->dev_id, queue_id);
2826d30ea906Sjfb8856606 
2827d30ea906Sjfb8856606 	rte_bbdev_info_get(tp->dev_id, &info);
28284418919fSjohnjiang 
28294418919fSjohnjiang 	TEST_ASSERT_SUCCESS((num_to_process > info.drv.queue_size_lim),
28304418919fSjohnjiang 			"NUM_OPS cannot exceed %u for this device",
28314418919fSjohnjiang 			info.drv.queue_size_lim);
28324418919fSjohnjiang 
2833d30ea906Sjfb8856606 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
2834d30ea906Sjfb8856606 
2835d30ea906Sjfb8856606 	rte_atomic16_clear(&tp->processing_status);
2836d30ea906Sjfb8856606 	rte_atomic16_clear(&tp->nb_dequeued);
2837d30ea906Sjfb8856606 
2838d30ea906Sjfb8856606 	while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
2839d30ea906Sjfb8856606 		rte_pause();
2840d30ea906Sjfb8856606 
28414418919fSjohnjiang 	ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops,
28424418919fSjohnjiang 				num_to_process);
28434418919fSjohnjiang 	TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
28444418919fSjohnjiang 			num_to_process);
28454418919fSjohnjiang 	if (test_vector.op_type != RTE_BBDEV_OP_NONE)
28464418919fSjohnjiang 		copy_reference_dec_op(ops, num_to_process, 0, bufs->inputs,
28474418919fSjohnjiang 				bufs->hard_outputs, bufs->soft_outputs,
28484418919fSjohnjiang 				tp->op_params->ref_dec_op);
28494418919fSjohnjiang 
28504418919fSjohnjiang 	/* Set counter to validate the ordering */
28514418919fSjohnjiang 	for (j = 0; j < num_to_process; ++j)
28524418919fSjohnjiang 		ops[j]->opaque_data = (void *)(uintptr_t)j;
28534418919fSjohnjiang 
28544418919fSjohnjiang 	for (j = 0; j < TEST_REPETITIONS; ++j) {
28554418919fSjohnjiang 		for (i = 0; i < num_to_process; ++i)
28564418919fSjohnjiang 			rte_pktmbuf_reset(ops[i]->turbo_dec.hard_output.data);
28574418919fSjohnjiang 
2858d30ea906Sjfb8856606 		tp->start_time = rte_rdtsc_precise();
2859d30ea906Sjfb8856606 		for (enqueued = 0; enqueued < num_to_process;) {
28604418919fSjohnjiang 			num_to_enq = burst_sz;
2861d30ea906Sjfb8856606 
2862d30ea906Sjfb8856606 			if (unlikely(num_to_process - enqueued < num_to_enq))
2863d30ea906Sjfb8856606 				num_to_enq = num_to_process - enqueued;
2864d30ea906Sjfb8856606 
28654418919fSjohnjiang 			enq = 0;
28664418919fSjohnjiang 			do {
28674418919fSjohnjiang 				enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
28684418919fSjohnjiang 						queue_id, &ops[enqueued],
2869d30ea906Sjfb8856606 						num_to_enq);
28704418919fSjohnjiang 			} while (unlikely(num_to_enq != enq));
28714418919fSjohnjiang 			enqueued += enq;
28724418919fSjohnjiang 
28734418919fSjohnjiang 			/* Write to thread burst_sz current number of enqueued
28744418919fSjohnjiang 			 * descriptors. It ensures that proper number of
28754418919fSjohnjiang 			 * descriptors will be dequeued in callback
28764418919fSjohnjiang 			 * function - needed for last batch in case where
28774418919fSjohnjiang 			 * the number of operations is not a multiple of
28784418919fSjohnjiang 			 * burst size.
28794418919fSjohnjiang 			 */
28804418919fSjohnjiang 			rte_atomic16_set(&tp->burst_sz, num_to_enq);
28814418919fSjohnjiang 
28824418919fSjohnjiang 			/* Wait until processing of previous batch is
28834418919fSjohnjiang 			 * completed
28844418919fSjohnjiang 			 */
28854418919fSjohnjiang 			while (rte_atomic16_read(&tp->nb_dequeued) !=
28864418919fSjohnjiang 					(int16_t) enqueued)
28874418919fSjohnjiang 				rte_pause();
2888d30ea906Sjfb8856606 		}
28894418919fSjohnjiang 		if (j != TEST_REPETITIONS - 1)
28904418919fSjohnjiang 			rte_atomic16_clear(&tp->nb_dequeued);
2891d30ea906Sjfb8856606 	}
2892d30ea906Sjfb8856606 
2893d30ea906Sjfb8856606 	return TEST_SUCCESS;
2894d30ea906Sjfb8856606 }
2895d30ea906Sjfb8856606 
2896d30ea906Sjfb8856606 static int
throughput_intr_lcore_enc(void * arg)2897d30ea906Sjfb8856606 throughput_intr_lcore_enc(void *arg)
2898d30ea906Sjfb8856606 {
2899d30ea906Sjfb8856606 	struct thread_params *tp = arg;
2900d30ea906Sjfb8856606 	unsigned int enqueued;
2901d30ea906Sjfb8856606 	const uint16_t queue_id = tp->queue_id;
2902d30ea906Sjfb8856606 	const uint16_t burst_sz = tp->op_params->burst_sz;
2903d30ea906Sjfb8856606 	const uint16_t num_to_process = tp->op_params->num_to_process;
29044418919fSjohnjiang 	struct rte_bbdev_enc_op *ops[num_to_process];
2905d30ea906Sjfb8856606 	struct test_buffers *bufs = NULL;
2906d30ea906Sjfb8856606 	struct rte_bbdev_info info;
29074418919fSjohnjiang 	int ret, i, j;
29084418919fSjohnjiang 	uint16_t num_to_enq, enq;
2909d30ea906Sjfb8856606 
2910d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
2911d30ea906Sjfb8856606 			"BURST_SIZE should be <= %u", MAX_BURST);
2912d30ea906Sjfb8856606 
2913d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id),
2914d30ea906Sjfb8856606 			"Failed to enable interrupts for dev: %u, queue_id: %u",
2915d30ea906Sjfb8856606 			tp->dev_id, queue_id);
2916d30ea906Sjfb8856606 
2917d30ea906Sjfb8856606 	rte_bbdev_info_get(tp->dev_id, &info);
29184418919fSjohnjiang 
29194418919fSjohnjiang 	TEST_ASSERT_SUCCESS((num_to_process > info.drv.queue_size_lim),
29204418919fSjohnjiang 			"NUM_OPS cannot exceed %u for this device",
29214418919fSjohnjiang 			info.drv.queue_size_lim);
29224418919fSjohnjiang 
2923d30ea906Sjfb8856606 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
2924d30ea906Sjfb8856606 
2925d30ea906Sjfb8856606 	rte_atomic16_clear(&tp->processing_status);
2926d30ea906Sjfb8856606 	rte_atomic16_clear(&tp->nb_dequeued);
2927d30ea906Sjfb8856606 
2928d30ea906Sjfb8856606 	while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
2929d30ea906Sjfb8856606 		rte_pause();
2930d30ea906Sjfb8856606 
29314418919fSjohnjiang 	ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
29324418919fSjohnjiang 			num_to_process);
29334418919fSjohnjiang 	TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
29344418919fSjohnjiang 			num_to_process);
29354418919fSjohnjiang 	if (test_vector.op_type != RTE_BBDEV_OP_NONE)
29364418919fSjohnjiang 		copy_reference_enc_op(ops, num_to_process, 0, bufs->inputs,
29374418919fSjohnjiang 				bufs->hard_outputs, tp->op_params->ref_enc_op);
29384418919fSjohnjiang 
29394418919fSjohnjiang 	/* Set counter to validate the ordering */
29404418919fSjohnjiang 	for (j = 0; j < num_to_process; ++j)
29414418919fSjohnjiang 		ops[j]->opaque_data = (void *)(uintptr_t)j;
29424418919fSjohnjiang 
29434418919fSjohnjiang 	for (j = 0; j < TEST_REPETITIONS; ++j) {
29444418919fSjohnjiang 		for (i = 0; i < num_to_process; ++i)
29454418919fSjohnjiang 			rte_pktmbuf_reset(ops[i]->turbo_enc.output.data);
29464418919fSjohnjiang 
2947d30ea906Sjfb8856606 		tp->start_time = rte_rdtsc_precise();
2948d30ea906Sjfb8856606 		for (enqueued = 0; enqueued < num_to_process;) {
29494418919fSjohnjiang 			num_to_enq = burst_sz;
2950d30ea906Sjfb8856606 
2951d30ea906Sjfb8856606 			if (unlikely(num_to_process - enqueued < num_to_enq))
2952d30ea906Sjfb8856606 				num_to_enq = num_to_process - enqueued;
2953d30ea906Sjfb8856606 
29544418919fSjohnjiang 			enq = 0;
29554418919fSjohnjiang 			do {
29564418919fSjohnjiang 				enq += rte_bbdev_enqueue_enc_ops(tp->dev_id,
29574418919fSjohnjiang 						queue_id, &ops[enqueued],
2958d30ea906Sjfb8856606 						num_to_enq);
29594418919fSjohnjiang 			} while (unlikely(enq != num_to_enq));
29604418919fSjohnjiang 			enqueued += enq;
29614418919fSjohnjiang 
29624418919fSjohnjiang 			/* Write to thread burst_sz current number of enqueued
29634418919fSjohnjiang 			 * descriptors. It ensures that proper number of
29644418919fSjohnjiang 			 * descriptors will be dequeued in callback
29654418919fSjohnjiang 			 * function - needed for last batch in case where
29664418919fSjohnjiang 			 * the number of operations is not a multiple of
29674418919fSjohnjiang 			 * burst size.
29684418919fSjohnjiang 			 */
29694418919fSjohnjiang 			rte_atomic16_set(&tp->burst_sz, num_to_enq);
29704418919fSjohnjiang 
29714418919fSjohnjiang 			/* Wait until processing of previous batch is
29724418919fSjohnjiang 			 * completed
29734418919fSjohnjiang 			 */
29744418919fSjohnjiang 			while (rte_atomic16_read(&tp->nb_dequeued) !=
29754418919fSjohnjiang 					(int16_t) enqueued)
29764418919fSjohnjiang 				rte_pause();
2977d30ea906Sjfb8856606 		}
29784418919fSjohnjiang 		if (j != TEST_REPETITIONS - 1)
29794418919fSjohnjiang 			rte_atomic16_clear(&tp->nb_dequeued);
2980d30ea906Sjfb8856606 	}
2981d30ea906Sjfb8856606 
2982d30ea906Sjfb8856606 	return TEST_SUCCESS;
2983d30ea906Sjfb8856606 }
2984d30ea906Sjfb8856606 
2985*2d9fd380Sjfb8856606 
2986*2d9fd380Sjfb8856606 static int
throughput_intr_lcore_ldpc_enc(void * arg)2987*2d9fd380Sjfb8856606 throughput_intr_lcore_ldpc_enc(void *arg)
2988*2d9fd380Sjfb8856606 {
2989*2d9fd380Sjfb8856606 	struct thread_params *tp = arg;
2990*2d9fd380Sjfb8856606 	unsigned int enqueued;
2991*2d9fd380Sjfb8856606 	const uint16_t queue_id = tp->queue_id;
2992*2d9fd380Sjfb8856606 	const uint16_t burst_sz = tp->op_params->burst_sz;
2993*2d9fd380Sjfb8856606 	const uint16_t num_to_process = tp->op_params->num_to_process;
2994*2d9fd380Sjfb8856606 	struct rte_bbdev_enc_op *ops[num_to_process];
2995*2d9fd380Sjfb8856606 	struct test_buffers *bufs = NULL;
2996*2d9fd380Sjfb8856606 	struct rte_bbdev_info info;
2997*2d9fd380Sjfb8856606 	int ret, i, j;
2998*2d9fd380Sjfb8856606 	uint16_t num_to_enq, enq;
2999*2d9fd380Sjfb8856606 
3000*2d9fd380Sjfb8856606 	TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
3001*2d9fd380Sjfb8856606 			"BURST_SIZE should be <= %u", MAX_BURST);
3002*2d9fd380Sjfb8856606 
3003*2d9fd380Sjfb8856606 	TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id),
3004*2d9fd380Sjfb8856606 			"Failed to enable interrupts for dev: %u, queue_id: %u",
3005*2d9fd380Sjfb8856606 			tp->dev_id, queue_id);
3006*2d9fd380Sjfb8856606 
3007*2d9fd380Sjfb8856606 	rte_bbdev_info_get(tp->dev_id, &info);
3008*2d9fd380Sjfb8856606 
3009*2d9fd380Sjfb8856606 	TEST_ASSERT_SUCCESS((num_to_process > info.drv.queue_size_lim),
3010*2d9fd380Sjfb8856606 			"NUM_OPS cannot exceed %u for this device",
3011*2d9fd380Sjfb8856606 			info.drv.queue_size_lim);
3012*2d9fd380Sjfb8856606 
3013*2d9fd380Sjfb8856606 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
3014*2d9fd380Sjfb8856606 
3015*2d9fd380Sjfb8856606 	rte_atomic16_clear(&tp->processing_status);
3016*2d9fd380Sjfb8856606 	rte_atomic16_clear(&tp->nb_dequeued);
3017*2d9fd380Sjfb8856606 
3018*2d9fd380Sjfb8856606 	while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
3019*2d9fd380Sjfb8856606 		rte_pause();
3020*2d9fd380Sjfb8856606 
3021*2d9fd380Sjfb8856606 	ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops,
3022*2d9fd380Sjfb8856606 			num_to_process);
3023*2d9fd380Sjfb8856606 	TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
3024*2d9fd380Sjfb8856606 			num_to_process);
3025*2d9fd380Sjfb8856606 	if (test_vector.op_type != RTE_BBDEV_OP_NONE)
3026*2d9fd380Sjfb8856606 		copy_reference_ldpc_enc_op(ops, num_to_process, 0,
3027*2d9fd380Sjfb8856606 				bufs->inputs, bufs->hard_outputs,
3028*2d9fd380Sjfb8856606 				tp->op_params->ref_enc_op);
3029*2d9fd380Sjfb8856606 
3030*2d9fd380Sjfb8856606 	/* Set counter to validate the ordering */
3031*2d9fd380Sjfb8856606 	for (j = 0; j < num_to_process; ++j)
3032*2d9fd380Sjfb8856606 		ops[j]->opaque_data = (void *)(uintptr_t)j;
3033*2d9fd380Sjfb8856606 
3034*2d9fd380Sjfb8856606 	for (j = 0; j < TEST_REPETITIONS; ++j) {
3035*2d9fd380Sjfb8856606 		for (i = 0; i < num_to_process; ++i)
3036*2d9fd380Sjfb8856606 			rte_pktmbuf_reset(ops[i]->turbo_enc.output.data);
3037*2d9fd380Sjfb8856606 
3038*2d9fd380Sjfb8856606 		tp->start_time = rte_rdtsc_precise();
3039*2d9fd380Sjfb8856606 		for (enqueued = 0; enqueued < num_to_process;) {
3040*2d9fd380Sjfb8856606 			num_to_enq = burst_sz;
3041*2d9fd380Sjfb8856606 
3042*2d9fd380Sjfb8856606 			if (unlikely(num_to_process - enqueued < num_to_enq))
3043*2d9fd380Sjfb8856606 				num_to_enq = num_to_process - enqueued;
3044*2d9fd380Sjfb8856606 
3045*2d9fd380Sjfb8856606 			enq = 0;
3046*2d9fd380Sjfb8856606 			do {
3047*2d9fd380Sjfb8856606 				enq += rte_bbdev_enqueue_ldpc_enc_ops(
3048*2d9fd380Sjfb8856606 						tp->dev_id,
3049*2d9fd380Sjfb8856606 						queue_id, &ops[enqueued],
3050*2d9fd380Sjfb8856606 						num_to_enq);
3051*2d9fd380Sjfb8856606 			} while (unlikely(enq != num_to_enq));
3052*2d9fd380Sjfb8856606 			enqueued += enq;
3053*2d9fd380Sjfb8856606 
3054*2d9fd380Sjfb8856606 			/* Write to thread burst_sz current number of enqueued
3055*2d9fd380Sjfb8856606 			 * descriptors. It ensures that proper number of
3056*2d9fd380Sjfb8856606 			 * descriptors will be dequeued in callback
3057*2d9fd380Sjfb8856606 			 * function - needed for last batch in case where
3058*2d9fd380Sjfb8856606 			 * the number of operations is not a multiple of
3059*2d9fd380Sjfb8856606 			 * burst size.
3060*2d9fd380Sjfb8856606 			 */
3061*2d9fd380Sjfb8856606 			rte_atomic16_set(&tp->burst_sz, num_to_enq);
3062*2d9fd380Sjfb8856606 
3063*2d9fd380Sjfb8856606 			/* Wait until processing of previous batch is
3064*2d9fd380Sjfb8856606 			 * completed
3065*2d9fd380Sjfb8856606 			 */
3066*2d9fd380Sjfb8856606 			while (rte_atomic16_read(&tp->nb_dequeued) !=
3067*2d9fd380Sjfb8856606 					(int16_t) enqueued)
3068*2d9fd380Sjfb8856606 				rte_pause();
3069*2d9fd380Sjfb8856606 		}
3070*2d9fd380Sjfb8856606 		if (j != TEST_REPETITIONS - 1)
3071*2d9fd380Sjfb8856606 			rte_atomic16_clear(&tp->nb_dequeued);
3072*2d9fd380Sjfb8856606 	}
3073*2d9fd380Sjfb8856606 
3074*2d9fd380Sjfb8856606 	return TEST_SUCCESS;
3075*2d9fd380Sjfb8856606 }
3076*2d9fd380Sjfb8856606 
3077d30ea906Sjfb8856606 static int
throughput_pmd_lcore_dec(void * arg)3078d30ea906Sjfb8856606 throughput_pmd_lcore_dec(void *arg)
3079d30ea906Sjfb8856606 {
3080d30ea906Sjfb8856606 	struct thread_params *tp = arg;
30814418919fSjohnjiang 	uint16_t enq, deq;
30824418919fSjohnjiang 	uint64_t total_time = 0, start_time;
3083d30ea906Sjfb8856606 	const uint16_t queue_id = tp->queue_id;
3084d30ea906Sjfb8856606 	const uint16_t burst_sz = tp->op_params->burst_sz;
30854418919fSjohnjiang 	const uint16_t num_ops = tp->op_params->num_to_process;
30864418919fSjohnjiang 	struct rte_bbdev_dec_op *ops_enq[num_ops];
30874418919fSjohnjiang 	struct rte_bbdev_dec_op *ops_deq[num_ops];
3088d30ea906Sjfb8856606 	struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
3089d30ea906Sjfb8856606 	struct test_buffers *bufs = NULL;
30904418919fSjohnjiang 	int i, j, ret;
3091d30ea906Sjfb8856606 	struct rte_bbdev_info info;
30924418919fSjohnjiang 	uint16_t num_to_enq;
3093d30ea906Sjfb8856606 
3094d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
3095d30ea906Sjfb8856606 			"BURST_SIZE should be <= %u", MAX_BURST);
3096d30ea906Sjfb8856606 
3097d30ea906Sjfb8856606 	rte_bbdev_info_get(tp->dev_id, &info);
30984418919fSjohnjiang 
30994418919fSjohnjiang 	TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
31004418919fSjohnjiang 			"NUM_OPS cannot exceed %u for this device",
31014418919fSjohnjiang 			info.drv.queue_size_lim);
31024418919fSjohnjiang 
3103d30ea906Sjfb8856606 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
3104d30ea906Sjfb8856606 
3105d30ea906Sjfb8856606 	while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
3106d30ea906Sjfb8856606 		rte_pause();
3107d30ea906Sjfb8856606 
31084418919fSjohnjiang 	ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
31094418919fSjohnjiang 	TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
3110d30ea906Sjfb8856606 
3111d30ea906Sjfb8856606 	if (test_vector.op_type != RTE_BBDEV_OP_NONE)
31124418919fSjohnjiang 		copy_reference_dec_op(ops_enq, num_ops, 0, bufs->inputs,
31134418919fSjohnjiang 				bufs->hard_outputs, bufs->soft_outputs, ref_op);
3114d30ea906Sjfb8856606 
31154418919fSjohnjiang 	/* Set counter to validate the ordering */
31164418919fSjohnjiang 	for (j = 0; j < num_ops; ++j)
31174418919fSjohnjiang 		ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
31184418919fSjohnjiang 
31194418919fSjohnjiang 	for (i = 0; i < TEST_REPETITIONS; ++i) {
31204418919fSjohnjiang 
31214418919fSjohnjiang 		for (j = 0; j < num_ops; ++j)
31224418919fSjohnjiang 			mbuf_reset(ops_enq[j]->turbo_dec.hard_output.data);
31234418919fSjohnjiang 
31244418919fSjohnjiang 		start_time = rte_rdtsc_precise();
31254418919fSjohnjiang 
31264418919fSjohnjiang 		for (enq = 0, deq = 0; enq < num_ops;) {
31274418919fSjohnjiang 			num_to_enq = burst_sz;
31284418919fSjohnjiang 
31294418919fSjohnjiang 			if (unlikely(num_ops - enq < num_to_enq))
31304418919fSjohnjiang 				num_to_enq = num_ops - enq;
31314418919fSjohnjiang 
31324418919fSjohnjiang 			enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
31334418919fSjohnjiang 					queue_id, &ops_enq[enq], num_to_enq);
31344418919fSjohnjiang 
31354418919fSjohnjiang 			deq += rte_bbdev_dequeue_dec_ops(tp->dev_id,
31364418919fSjohnjiang 					queue_id, &ops_deq[deq], enq - deq);
3137d30ea906Sjfb8856606 		}
31384418919fSjohnjiang 
31394418919fSjohnjiang 		/* dequeue the remaining */
31404418919fSjohnjiang 		while (deq < enq) {
31414418919fSjohnjiang 			deq += rte_bbdev_dequeue_dec_ops(tp->dev_id,
31424418919fSjohnjiang 					queue_id, &ops_deq[deq], enq - deq);
3143d30ea906Sjfb8856606 		}
3144d30ea906Sjfb8856606 
31454418919fSjohnjiang 		total_time += rte_rdtsc_precise() - start_time;
31464418919fSjohnjiang 	}
3147d30ea906Sjfb8856606 
31484418919fSjohnjiang 	tp->iter_count = 0;
31494418919fSjohnjiang 	/* get the max of iter_count for all dequeued ops */
31504418919fSjohnjiang 	for (i = 0; i < num_ops; ++i) {
31514418919fSjohnjiang 		tp->iter_count = RTE_MAX(ops_enq[i]->turbo_dec.iter_count,
31524418919fSjohnjiang 				tp->iter_count);
31534418919fSjohnjiang 	}
3154d30ea906Sjfb8856606 
3155d30ea906Sjfb8856606 	if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
31564418919fSjohnjiang 		ret = validate_dec_op(ops_deq, num_ops, ref_op,
31574418919fSjohnjiang 				tp->op_params->vector_mask);
31584418919fSjohnjiang 		TEST_ASSERT_SUCCESS(ret, "Validation failed!");
3159d30ea906Sjfb8856606 	}
3160d30ea906Sjfb8856606 
31614418919fSjohnjiang 	rte_bbdev_dec_op_free_bulk(ops_enq, num_ops);
31624418919fSjohnjiang 
31634418919fSjohnjiang 	double tb_len_bits = calc_dec_TB_size(ref_op);
31644418919fSjohnjiang 
31654418919fSjohnjiang 	tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
3166d30ea906Sjfb8856606 			((double)total_time / (double)rte_get_tsc_hz());
31674418919fSjohnjiang 	tp->mbps = (((double)(num_ops * TEST_REPETITIONS * tb_len_bits)) /
31684418919fSjohnjiang 			1000000.0) / ((double)total_time /
31694418919fSjohnjiang 			(double)rte_get_tsc_hz());
31704418919fSjohnjiang 
31714418919fSjohnjiang 	return TEST_SUCCESS;
31724418919fSjohnjiang }
31734418919fSjohnjiang 
31744418919fSjohnjiang static int
bler_pmd_lcore_ldpc_dec(void * arg)3175*2d9fd380Sjfb8856606 bler_pmd_lcore_ldpc_dec(void *arg)
3176*2d9fd380Sjfb8856606 {
3177*2d9fd380Sjfb8856606 	struct thread_params *tp = arg;
3178*2d9fd380Sjfb8856606 	uint16_t enq, deq;
3179*2d9fd380Sjfb8856606 	uint64_t total_time = 0, start_time;
3180*2d9fd380Sjfb8856606 	const uint16_t queue_id = tp->queue_id;
3181*2d9fd380Sjfb8856606 	const uint16_t burst_sz = tp->op_params->burst_sz;
3182*2d9fd380Sjfb8856606 	const uint16_t num_ops = tp->op_params->num_to_process;
3183*2d9fd380Sjfb8856606 	struct rte_bbdev_dec_op *ops_enq[num_ops];
3184*2d9fd380Sjfb8856606 	struct rte_bbdev_dec_op *ops_deq[num_ops];
3185*2d9fd380Sjfb8856606 	struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
3186*2d9fd380Sjfb8856606 	struct test_buffers *bufs = NULL;
3187*2d9fd380Sjfb8856606 	int i, j, ret;
3188*2d9fd380Sjfb8856606 	float parity_bler = 0;
3189*2d9fd380Sjfb8856606 	struct rte_bbdev_info info;
3190*2d9fd380Sjfb8856606 	uint16_t num_to_enq;
3191*2d9fd380Sjfb8856606 	bool extDdr = check_bit(ldpc_cap_flags,
3192*2d9fd380Sjfb8856606 			RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE);
3193*2d9fd380Sjfb8856606 	bool loopback = check_bit(ref_op->ldpc_dec.op_flags,
3194*2d9fd380Sjfb8856606 			RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK);
3195*2d9fd380Sjfb8856606 	bool hc_out = check_bit(ref_op->ldpc_dec.op_flags,
3196*2d9fd380Sjfb8856606 			RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE);
3197*2d9fd380Sjfb8856606 
3198*2d9fd380Sjfb8856606 	TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
3199*2d9fd380Sjfb8856606 			"BURST_SIZE should be <= %u", MAX_BURST);
3200*2d9fd380Sjfb8856606 
3201*2d9fd380Sjfb8856606 	rte_bbdev_info_get(tp->dev_id, &info);
3202*2d9fd380Sjfb8856606 
3203*2d9fd380Sjfb8856606 	TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
3204*2d9fd380Sjfb8856606 			"NUM_OPS cannot exceed %u for this device",
3205*2d9fd380Sjfb8856606 			info.drv.queue_size_lim);
3206*2d9fd380Sjfb8856606 
3207*2d9fd380Sjfb8856606 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
3208*2d9fd380Sjfb8856606 
3209*2d9fd380Sjfb8856606 	while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
3210*2d9fd380Sjfb8856606 		rte_pause();
3211*2d9fd380Sjfb8856606 
3212*2d9fd380Sjfb8856606 	ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
3213*2d9fd380Sjfb8856606 	TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
3214*2d9fd380Sjfb8856606 
3215*2d9fd380Sjfb8856606 	/* For BLER tests we need to enable early termination */
3216*2d9fd380Sjfb8856606 	if (!check_bit(ref_op->ldpc_dec.op_flags,
3217*2d9fd380Sjfb8856606 			RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE))
3218*2d9fd380Sjfb8856606 		ref_op->ldpc_dec.op_flags +=
3219*2d9fd380Sjfb8856606 				RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE;
3220*2d9fd380Sjfb8856606 	ref_op->ldpc_dec.iter_max = get_iter_max();
3221*2d9fd380Sjfb8856606 	ref_op->ldpc_dec.iter_count = ref_op->ldpc_dec.iter_max;
3222*2d9fd380Sjfb8856606 
3223*2d9fd380Sjfb8856606 	if (test_vector.op_type != RTE_BBDEV_OP_NONE)
3224*2d9fd380Sjfb8856606 		copy_reference_ldpc_dec_op(ops_enq, num_ops, 0, bufs->inputs,
3225*2d9fd380Sjfb8856606 				bufs->hard_outputs, bufs->soft_outputs,
3226*2d9fd380Sjfb8856606 				bufs->harq_inputs, bufs->harq_outputs, ref_op);
3227*2d9fd380Sjfb8856606 	generate_llr_input(num_ops, bufs->inputs, ref_op);
3228*2d9fd380Sjfb8856606 
3229*2d9fd380Sjfb8856606 	/* Set counter to validate the ordering */
3230*2d9fd380Sjfb8856606 	for (j = 0; j < num_ops; ++j)
3231*2d9fd380Sjfb8856606 		ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
3232*2d9fd380Sjfb8856606 
3233*2d9fd380Sjfb8856606 	for (i = 0; i < 1; ++i) { /* Could add more iterations */
3234*2d9fd380Sjfb8856606 		for (j = 0; j < num_ops; ++j) {
3235*2d9fd380Sjfb8856606 			if (!loopback)
3236*2d9fd380Sjfb8856606 				mbuf_reset(
3237*2d9fd380Sjfb8856606 				ops_enq[j]->ldpc_dec.hard_output.data);
3238*2d9fd380Sjfb8856606 			if (hc_out || loopback)
3239*2d9fd380Sjfb8856606 				mbuf_reset(
3240*2d9fd380Sjfb8856606 				ops_enq[j]->ldpc_dec.harq_combined_output.data);
3241*2d9fd380Sjfb8856606 		}
3242*2d9fd380Sjfb8856606 		if (extDdr)
3243*2d9fd380Sjfb8856606 			preload_harq_ddr(tp->dev_id, queue_id, ops_enq,
3244*2d9fd380Sjfb8856606 					num_ops, true);
3245*2d9fd380Sjfb8856606 		start_time = rte_rdtsc_precise();
3246*2d9fd380Sjfb8856606 
3247*2d9fd380Sjfb8856606 		for (enq = 0, deq = 0; enq < num_ops;) {
3248*2d9fd380Sjfb8856606 			num_to_enq = burst_sz;
3249*2d9fd380Sjfb8856606 
3250*2d9fd380Sjfb8856606 			if (unlikely(num_ops - enq < num_to_enq))
3251*2d9fd380Sjfb8856606 				num_to_enq = num_ops - enq;
3252*2d9fd380Sjfb8856606 
3253*2d9fd380Sjfb8856606 			enq += rte_bbdev_enqueue_ldpc_dec_ops(tp->dev_id,
3254*2d9fd380Sjfb8856606 					queue_id, &ops_enq[enq], num_to_enq);
3255*2d9fd380Sjfb8856606 
3256*2d9fd380Sjfb8856606 			deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
3257*2d9fd380Sjfb8856606 					queue_id, &ops_deq[deq], enq - deq);
3258*2d9fd380Sjfb8856606 		}
3259*2d9fd380Sjfb8856606 
3260*2d9fd380Sjfb8856606 		/* dequeue the remaining */
3261*2d9fd380Sjfb8856606 		while (deq < enq) {
3262*2d9fd380Sjfb8856606 			deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
3263*2d9fd380Sjfb8856606 					queue_id, &ops_deq[deq], enq - deq);
3264*2d9fd380Sjfb8856606 		}
3265*2d9fd380Sjfb8856606 
3266*2d9fd380Sjfb8856606 		total_time += rte_rdtsc_precise() - start_time;
3267*2d9fd380Sjfb8856606 	}
3268*2d9fd380Sjfb8856606 
3269*2d9fd380Sjfb8856606 	tp->iter_count = 0;
3270*2d9fd380Sjfb8856606 	tp->iter_average = 0;
3271*2d9fd380Sjfb8856606 	/* get the max of iter_count for all dequeued ops */
3272*2d9fd380Sjfb8856606 	for (i = 0; i < num_ops; ++i) {
3273*2d9fd380Sjfb8856606 		tp->iter_count = RTE_MAX(ops_enq[i]->ldpc_dec.iter_count,
3274*2d9fd380Sjfb8856606 				tp->iter_count);
3275*2d9fd380Sjfb8856606 		tp->iter_average += (double) ops_enq[i]->ldpc_dec.iter_count;
3276*2d9fd380Sjfb8856606 		if (ops_enq[i]->status & (1 << RTE_BBDEV_SYNDROME_ERROR))
3277*2d9fd380Sjfb8856606 			parity_bler += 1.0;
3278*2d9fd380Sjfb8856606 	}
3279*2d9fd380Sjfb8856606 
3280*2d9fd380Sjfb8856606 	parity_bler /= num_ops; /* This one is based on SYND */
3281*2d9fd380Sjfb8856606 	tp->iter_average /= num_ops;
3282*2d9fd380Sjfb8856606 	tp->bler = (double) validate_ldpc_bler(ops_deq, num_ops) / num_ops;
3283*2d9fd380Sjfb8856606 
3284*2d9fd380Sjfb8856606 	if (test_vector.op_type != RTE_BBDEV_OP_NONE
3285*2d9fd380Sjfb8856606 			&& tp->bler == 0
3286*2d9fd380Sjfb8856606 			&& parity_bler == 0
3287*2d9fd380Sjfb8856606 			&& !hc_out) {
3288*2d9fd380Sjfb8856606 		ret = validate_ldpc_dec_op(ops_deq, num_ops, ref_op,
3289*2d9fd380Sjfb8856606 				tp->op_params->vector_mask);
3290*2d9fd380Sjfb8856606 		TEST_ASSERT_SUCCESS(ret, "Validation failed!");
3291*2d9fd380Sjfb8856606 	}
3292*2d9fd380Sjfb8856606 
3293*2d9fd380Sjfb8856606 	rte_bbdev_dec_op_free_bulk(ops_enq, num_ops);
3294*2d9fd380Sjfb8856606 
3295*2d9fd380Sjfb8856606 	double tb_len_bits = calc_ldpc_dec_TB_size(ref_op);
3296*2d9fd380Sjfb8856606 	tp->ops_per_sec = ((double)num_ops * 1) /
3297*2d9fd380Sjfb8856606 			((double)total_time / (double)rte_get_tsc_hz());
3298*2d9fd380Sjfb8856606 	tp->mbps = (((double)(num_ops * 1 * tb_len_bits)) /
3299*2d9fd380Sjfb8856606 			1000000.0) / ((double)total_time /
3300*2d9fd380Sjfb8856606 			(double)rte_get_tsc_hz());
3301*2d9fd380Sjfb8856606 
3302*2d9fd380Sjfb8856606 	return TEST_SUCCESS;
3303*2d9fd380Sjfb8856606 }
3304*2d9fd380Sjfb8856606 
3305*2d9fd380Sjfb8856606 static int
throughput_pmd_lcore_ldpc_dec(void * arg)33064418919fSjohnjiang throughput_pmd_lcore_ldpc_dec(void *arg)
33074418919fSjohnjiang {
33084418919fSjohnjiang 	struct thread_params *tp = arg;
33094418919fSjohnjiang 	uint16_t enq, deq;
33104418919fSjohnjiang 	uint64_t total_time = 0, start_time;
33114418919fSjohnjiang 	const uint16_t queue_id = tp->queue_id;
33124418919fSjohnjiang 	const uint16_t burst_sz = tp->op_params->burst_sz;
33134418919fSjohnjiang 	const uint16_t num_ops = tp->op_params->num_to_process;
33144418919fSjohnjiang 	struct rte_bbdev_dec_op *ops_enq[num_ops];
33154418919fSjohnjiang 	struct rte_bbdev_dec_op *ops_deq[num_ops];
33164418919fSjohnjiang 	struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
33174418919fSjohnjiang 	struct test_buffers *bufs = NULL;
33184418919fSjohnjiang 	int i, j, ret;
33194418919fSjohnjiang 	struct rte_bbdev_info info;
33204418919fSjohnjiang 	uint16_t num_to_enq;
3321*2d9fd380Sjfb8856606 	bool extDdr = check_bit(ldpc_cap_flags,
3322*2d9fd380Sjfb8856606 			RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE);
3323*2d9fd380Sjfb8856606 	bool loopback = check_bit(ref_op->ldpc_dec.op_flags,
3324*2d9fd380Sjfb8856606 			RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK);
3325*2d9fd380Sjfb8856606 	bool hc_out = check_bit(ref_op->ldpc_dec.op_flags,
3326*2d9fd380Sjfb8856606 			RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE);
33274418919fSjohnjiang 
33284418919fSjohnjiang 	TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
33294418919fSjohnjiang 			"BURST_SIZE should be <= %u", MAX_BURST);
33304418919fSjohnjiang 
33314418919fSjohnjiang 	rte_bbdev_info_get(tp->dev_id, &info);
33324418919fSjohnjiang 
33334418919fSjohnjiang 	TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
33344418919fSjohnjiang 			"NUM_OPS cannot exceed %u for this device",
33354418919fSjohnjiang 			info.drv.queue_size_lim);
33364418919fSjohnjiang 
33374418919fSjohnjiang 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
33384418919fSjohnjiang 
33394418919fSjohnjiang 	while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
33404418919fSjohnjiang 		rte_pause();
33414418919fSjohnjiang 
33424418919fSjohnjiang 	ret = rte_bbdev_dec_op_alloc_bulk(tp->op_params->mp, ops_enq, num_ops);
33434418919fSjohnjiang 	TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
33444418919fSjohnjiang 
33454418919fSjohnjiang 	/* For throughput tests we need to disable early termination */
33464418919fSjohnjiang 	if (check_bit(ref_op->ldpc_dec.op_flags,
33474418919fSjohnjiang 			RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE))
33484418919fSjohnjiang 		ref_op->ldpc_dec.op_flags -=
33494418919fSjohnjiang 				RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE;
3350*2d9fd380Sjfb8856606 	ref_op->ldpc_dec.iter_max = get_iter_max();
33514418919fSjohnjiang 	ref_op->ldpc_dec.iter_count = ref_op->ldpc_dec.iter_max;
33524418919fSjohnjiang 
33534418919fSjohnjiang 	if (test_vector.op_type != RTE_BBDEV_OP_NONE)
33544418919fSjohnjiang 		copy_reference_ldpc_dec_op(ops_enq, num_ops, 0, bufs->inputs,
33554418919fSjohnjiang 				bufs->hard_outputs, bufs->soft_outputs,
33564418919fSjohnjiang 				bufs->harq_inputs, bufs->harq_outputs, ref_op);
33574418919fSjohnjiang 
33584418919fSjohnjiang 	/* Set counter to validate the ordering */
33594418919fSjohnjiang 	for (j = 0; j < num_ops; ++j)
33604418919fSjohnjiang 		ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
33614418919fSjohnjiang 
33624418919fSjohnjiang 	for (i = 0; i < TEST_REPETITIONS; ++i) {
33634418919fSjohnjiang 		for (j = 0; j < num_ops; ++j) {
3364*2d9fd380Sjfb8856606 			if (!loopback)
3365*2d9fd380Sjfb8856606 				mbuf_reset(
3366*2d9fd380Sjfb8856606 				ops_enq[j]->ldpc_dec.hard_output.data);
3367*2d9fd380Sjfb8856606 			if (hc_out || loopback)
33684418919fSjohnjiang 				mbuf_reset(
33694418919fSjohnjiang 				ops_enq[j]->ldpc_dec.harq_combined_output.data);
33704418919fSjohnjiang 		}
3371*2d9fd380Sjfb8856606 		if (extDdr)
3372*2d9fd380Sjfb8856606 			preload_harq_ddr(tp->dev_id, queue_id, ops_enq,
3373*2d9fd380Sjfb8856606 					num_ops, true);
33744418919fSjohnjiang 		start_time = rte_rdtsc_precise();
33754418919fSjohnjiang 
33764418919fSjohnjiang 		for (enq = 0, deq = 0; enq < num_ops;) {
33774418919fSjohnjiang 			num_to_enq = burst_sz;
33784418919fSjohnjiang 
33794418919fSjohnjiang 			if (unlikely(num_ops - enq < num_to_enq))
33804418919fSjohnjiang 				num_to_enq = num_ops - enq;
33814418919fSjohnjiang 
33824418919fSjohnjiang 			enq += rte_bbdev_enqueue_ldpc_dec_ops(tp->dev_id,
33834418919fSjohnjiang 					queue_id, &ops_enq[enq], num_to_enq);
33844418919fSjohnjiang 
33854418919fSjohnjiang 			deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
33864418919fSjohnjiang 					queue_id, &ops_deq[deq], enq - deq);
33874418919fSjohnjiang 		}
33884418919fSjohnjiang 
33894418919fSjohnjiang 		/* dequeue the remaining */
33904418919fSjohnjiang 		while (deq < enq) {
33914418919fSjohnjiang 			deq += rte_bbdev_dequeue_ldpc_dec_ops(tp->dev_id,
33924418919fSjohnjiang 					queue_id, &ops_deq[deq], enq - deq);
33934418919fSjohnjiang 		}
33944418919fSjohnjiang 
33954418919fSjohnjiang 		total_time += rte_rdtsc_precise() - start_time;
33964418919fSjohnjiang 	}
33974418919fSjohnjiang 
33984418919fSjohnjiang 	tp->iter_count = 0;
33994418919fSjohnjiang 	/* get the max of iter_count for all dequeued ops */
34004418919fSjohnjiang 	for (i = 0; i < num_ops; ++i) {
34014418919fSjohnjiang 		tp->iter_count = RTE_MAX(ops_enq[i]->ldpc_dec.iter_count,
34024418919fSjohnjiang 				tp->iter_count);
34034418919fSjohnjiang 	}
3404*2d9fd380Sjfb8856606 	if (extDdr) {
3405*2d9fd380Sjfb8856606 		/* Read loopback is not thread safe */
3406*2d9fd380Sjfb8856606 		retrieve_harq_ddr(tp->dev_id, queue_id, ops_enq, num_ops);
3407*2d9fd380Sjfb8856606 	}
34084418919fSjohnjiang 
34094418919fSjohnjiang 	if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
34104418919fSjohnjiang 		ret = validate_ldpc_dec_op(ops_deq, num_ops, ref_op,
34114418919fSjohnjiang 				tp->op_params->vector_mask);
34124418919fSjohnjiang 		TEST_ASSERT_SUCCESS(ret, "Validation failed!");
34134418919fSjohnjiang 	}
34144418919fSjohnjiang 
34154418919fSjohnjiang 	rte_bbdev_dec_op_free_bulk(ops_enq, num_ops);
34164418919fSjohnjiang 
34174418919fSjohnjiang 	double tb_len_bits = calc_ldpc_dec_TB_size(ref_op);
34184418919fSjohnjiang 
34194418919fSjohnjiang 	tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
3420d30ea906Sjfb8856606 			((double)total_time / (double)rte_get_tsc_hz());
34214418919fSjohnjiang 	tp->mbps = (((double)(num_ops * TEST_REPETITIONS * tb_len_bits)) /
34224418919fSjohnjiang 			1000000.0) / ((double)total_time /
34234418919fSjohnjiang 			(double)rte_get_tsc_hz());
3424d30ea906Sjfb8856606 
3425d30ea906Sjfb8856606 	return TEST_SUCCESS;
3426d30ea906Sjfb8856606 }
3427d30ea906Sjfb8856606 
3428d30ea906Sjfb8856606 static int
throughput_pmd_lcore_enc(void * arg)3429d30ea906Sjfb8856606 throughput_pmd_lcore_enc(void *arg)
3430d30ea906Sjfb8856606 {
3431d30ea906Sjfb8856606 	struct thread_params *tp = arg;
34324418919fSjohnjiang 	uint16_t enq, deq;
34334418919fSjohnjiang 	uint64_t total_time = 0, start_time;
3434d30ea906Sjfb8856606 	const uint16_t queue_id = tp->queue_id;
3435d30ea906Sjfb8856606 	const uint16_t burst_sz = tp->op_params->burst_sz;
34364418919fSjohnjiang 	const uint16_t num_ops = tp->op_params->num_to_process;
34374418919fSjohnjiang 	struct rte_bbdev_enc_op *ops_enq[num_ops];
34384418919fSjohnjiang 	struct rte_bbdev_enc_op *ops_deq[num_ops];
3439d30ea906Sjfb8856606 	struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
3440d30ea906Sjfb8856606 	struct test_buffers *bufs = NULL;
34414418919fSjohnjiang 	int i, j, ret;
3442d30ea906Sjfb8856606 	struct rte_bbdev_info info;
34434418919fSjohnjiang 	uint16_t num_to_enq;
3444d30ea906Sjfb8856606 
3445d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
3446d30ea906Sjfb8856606 			"BURST_SIZE should be <= %u", MAX_BURST);
3447d30ea906Sjfb8856606 
3448d30ea906Sjfb8856606 	rte_bbdev_info_get(tp->dev_id, &info);
34494418919fSjohnjiang 
34504418919fSjohnjiang 	TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
34514418919fSjohnjiang 			"NUM_OPS cannot exceed %u for this device",
34524418919fSjohnjiang 			info.drv.queue_size_lim);
34534418919fSjohnjiang 
3454d30ea906Sjfb8856606 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
3455d30ea906Sjfb8856606 
3456d30ea906Sjfb8856606 	while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
3457d30ea906Sjfb8856606 		rte_pause();
3458d30ea906Sjfb8856606 
34594418919fSjohnjiang 	ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
34604418919fSjohnjiang 			num_ops);
34614418919fSjohnjiang 	TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
34624418919fSjohnjiang 			num_ops);
34634418919fSjohnjiang 	if (test_vector.op_type != RTE_BBDEV_OP_NONE)
34644418919fSjohnjiang 		copy_reference_enc_op(ops_enq, num_ops, 0, bufs->inputs,
34654418919fSjohnjiang 				bufs->hard_outputs, ref_op);
3466d30ea906Sjfb8856606 
34674418919fSjohnjiang 	/* Set counter to validate the ordering */
34684418919fSjohnjiang 	for (j = 0; j < num_ops; ++j)
34694418919fSjohnjiang 		ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
3470d30ea906Sjfb8856606 
34714418919fSjohnjiang 	for (i = 0; i < TEST_REPETITIONS; ++i) {
3472d30ea906Sjfb8856606 
3473d30ea906Sjfb8856606 		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
34744418919fSjohnjiang 			for (j = 0; j < num_ops; ++j)
34754418919fSjohnjiang 				mbuf_reset(ops_enq[j]->turbo_enc.output.data);
3476d30ea906Sjfb8856606 
34774418919fSjohnjiang 		start_time = rte_rdtsc_precise();
34784418919fSjohnjiang 
34794418919fSjohnjiang 		for (enq = 0, deq = 0; enq < num_ops;) {
34804418919fSjohnjiang 			num_to_enq = burst_sz;
34814418919fSjohnjiang 
34824418919fSjohnjiang 			if (unlikely(num_ops - enq < num_to_enq))
34834418919fSjohnjiang 				num_to_enq = num_ops - enq;
34844418919fSjohnjiang 
34854418919fSjohnjiang 			enq += rte_bbdev_enqueue_enc_ops(tp->dev_id,
34864418919fSjohnjiang 					queue_id, &ops_enq[enq], num_to_enq);
34874418919fSjohnjiang 
34884418919fSjohnjiang 			deq += rte_bbdev_dequeue_enc_ops(tp->dev_id,
34894418919fSjohnjiang 					queue_id, &ops_deq[deq], enq - deq);
3490d30ea906Sjfb8856606 		}
34914418919fSjohnjiang 
34924418919fSjohnjiang 		/* dequeue the remaining */
34934418919fSjohnjiang 		while (deq < enq) {
34944418919fSjohnjiang 			deq += rte_bbdev_dequeue_enc_ops(tp->dev_id,
34954418919fSjohnjiang 					queue_id, &ops_deq[deq], enq - deq);
3496d30ea906Sjfb8856606 		}
3497d30ea906Sjfb8856606 
34984418919fSjohnjiang 		total_time += rte_rdtsc_precise() - start_time;
34994418919fSjohnjiang 	}
3500d30ea906Sjfb8856606 
3501d30ea906Sjfb8856606 	if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
35024418919fSjohnjiang 		ret = validate_enc_op(ops_deq, num_ops, ref_op);
35034418919fSjohnjiang 		TEST_ASSERT_SUCCESS(ret, "Validation failed!");
3504d30ea906Sjfb8856606 	}
3505d30ea906Sjfb8856606 
35064418919fSjohnjiang 	rte_bbdev_enc_op_free_bulk(ops_enq, num_ops);
3507d30ea906Sjfb8856606 
35084418919fSjohnjiang 	double tb_len_bits = calc_enc_TB_size(ref_op);
35094418919fSjohnjiang 
35104418919fSjohnjiang 	tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
3511d30ea906Sjfb8856606 			((double)total_time / (double)rte_get_tsc_hz());
35124418919fSjohnjiang 	tp->mbps = (((double)(num_ops * TEST_REPETITIONS * tb_len_bits))
35134418919fSjohnjiang 			/ 1000000.0) / ((double)total_time /
35144418919fSjohnjiang 			(double)rte_get_tsc_hz());
3515d30ea906Sjfb8856606 
3516d30ea906Sjfb8856606 	return TEST_SUCCESS;
3517d30ea906Sjfb8856606 }
35184418919fSjohnjiang 
35194418919fSjohnjiang static int
throughput_pmd_lcore_ldpc_enc(void * arg)35204418919fSjohnjiang throughput_pmd_lcore_ldpc_enc(void *arg)
3521d30ea906Sjfb8856606 {
35224418919fSjohnjiang 	struct thread_params *tp = arg;
35234418919fSjohnjiang 	uint16_t enq, deq;
35244418919fSjohnjiang 	uint64_t total_time = 0, start_time;
35254418919fSjohnjiang 	const uint16_t queue_id = tp->queue_id;
35264418919fSjohnjiang 	const uint16_t burst_sz = tp->op_params->burst_sz;
35274418919fSjohnjiang 	const uint16_t num_ops = tp->op_params->num_to_process;
35284418919fSjohnjiang 	struct rte_bbdev_enc_op *ops_enq[num_ops];
35294418919fSjohnjiang 	struct rte_bbdev_enc_op *ops_deq[num_ops];
35304418919fSjohnjiang 	struct rte_bbdev_enc_op *ref_op = tp->op_params->ref_enc_op;
35314418919fSjohnjiang 	struct test_buffers *bufs = NULL;
35324418919fSjohnjiang 	int i, j, ret;
35334418919fSjohnjiang 	struct rte_bbdev_info info;
35344418919fSjohnjiang 	uint16_t num_to_enq;
35354418919fSjohnjiang 
35364418919fSjohnjiang 	TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
35374418919fSjohnjiang 			"BURST_SIZE should be <= %u", MAX_BURST);
35384418919fSjohnjiang 
35394418919fSjohnjiang 	rte_bbdev_info_get(tp->dev_id, &info);
35404418919fSjohnjiang 
35414418919fSjohnjiang 	TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
35424418919fSjohnjiang 			"NUM_OPS cannot exceed %u for this device",
35434418919fSjohnjiang 			info.drv.queue_size_lim);
35444418919fSjohnjiang 
35454418919fSjohnjiang 	bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
35464418919fSjohnjiang 
35474418919fSjohnjiang 	while (rte_atomic16_read(&tp->op_params->sync) == SYNC_WAIT)
35484418919fSjohnjiang 		rte_pause();
35494418919fSjohnjiang 
35504418919fSjohnjiang 	ret = rte_bbdev_enc_op_alloc_bulk(tp->op_params->mp, ops_enq,
35514418919fSjohnjiang 			num_ops);
35524418919fSjohnjiang 	TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops",
35534418919fSjohnjiang 			num_ops);
35544418919fSjohnjiang 	if (test_vector.op_type != RTE_BBDEV_OP_NONE)
35554418919fSjohnjiang 		copy_reference_ldpc_enc_op(ops_enq, num_ops, 0, bufs->inputs,
35564418919fSjohnjiang 				bufs->hard_outputs, ref_op);
35574418919fSjohnjiang 
35584418919fSjohnjiang 	/* Set counter to validate the ordering */
35594418919fSjohnjiang 	for (j = 0; j < num_ops; ++j)
35604418919fSjohnjiang 		ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
35614418919fSjohnjiang 
35624418919fSjohnjiang 	for (i = 0; i < TEST_REPETITIONS; ++i) {
35634418919fSjohnjiang 
35644418919fSjohnjiang 		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
35654418919fSjohnjiang 			for (j = 0; j < num_ops; ++j)
35664418919fSjohnjiang 				mbuf_reset(ops_enq[j]->turbo_enc.output.data);
35674418919fSjohnjiang 
35684418919fSjohnjiang 		start_time = rte_rdtsc_precise();
35694418919fSjohnjiang 
35704418919fSjohnjiang 		for (enq = 0, deq = 0; enq < num_ops;) {
35714418919fSjohnjiang 			num_to_enq = burst_sz;
35724418919fSjohnjiang 
35734418919fSjohnjiang 			if (unlikely(num_ops - enq < num_to_enq))
35744418919fSjohnjiang 				num_to_enq = num_ops - enq;
35754418919fSjohnjiang 
35764418919fSjohnjiang 			enq += rte_bbdev_enqueue_ldpc_enc_ops(tp->dev_id,
35774418919fSjohnjiang 					queue_id, &ops_enq[enq], num_to_enq);
35784418919fSjohnjiang 
35794418919fSjohnjiang 			deq += rte_bbdev_dequeue_ldpc_enc_ops(tp->dev_id,
35804418919fSjohnjiang 					queue_id, &ops_deq[deq], enq - deq);
35814418919fSjohnjiang 		}
35824418919fSjohnjiang 
35834418919fSjohnjiang 		/* dequeue the remaining */
35844418919fSjohnjiang 		while (deq < enq) {
35854418919fSjohnjiang 			deq += rte_bbdev_dequeue_ldpc_enc_ops(tp->dev_id,
35864418919fSjohnjiang 					queue_id, &ops_deq[deq], enq - deq);
35874418919fSjohnjiang 		}
35884418919fSjohnjiang 
35894418919fSjohnjiang 		total_time += rte_rdtsc_precise() - start_time;
35904418919fSjohnjiang 	}
35914418919fSjohnjiang 
35924418919fSjohnjiang 	if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
35934418919fSjohnjiang 		ret = validate_ldpc_enc_op(ops_deq, num_ops, ref_op);
35944418919fSjohnjiang 		TEST_ASSERT_SUCCESS(ret, "Validation failed!");
35954418919fSjohnjiang 	}
35964418919fSjohnjiang 
35974418919fSjohnjiang 	rte_bbdev_enc_op_free_bulk(ops_enq, num_ops);
35984418919fSjohnjiang 
35994418919fSjohnjiang 	double tb_len_bits = calc_ldpc_enc_TB_size(ref_op);
36004418919fSjohnjiang 
36014418919fSjohnjiang 	tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
36024418919fSjohnjiang 			((double)total_time / (double)rte_get_tsc_hz());
36034418919fSjohnjiang 	tp->mbps = (((double)(num_ops * TEST_REPETITIONS * tb_len_bits))
36044418919fSjohnjiang 			/ 1000000.0) / ((double)total_time /
36054418919fSjohnjiang 			(double)rte_get_tsc_hz());
36064418919fSjohnjiang 
36074418919fSjohnjiang 	return TEST_SUCCESS;
36084418919fSjohnjiang }
36094418919fSjohnjiang 
36104418919fSjohnjiang static void
print_enc_throughput(struct thread_params * t_params,unsigned int used_cores)36114418919fSjohnjiang print_enc_throughput(struct thread_params *t_params, unsigned int used_cores)
36124418919fSjohnjiang {
36134418919fSjohnjiang 	unsigned int iter = 0;
3614d30ea906Sjfb8856606 	double total_mops = 0, total_mbps = 0;
3615d30ea906Sjfb8856606 
36164418919fSjohnjiang 	for (iter = 0; iter < used_cores; iter++) {
36174418919fSjohnjiang 		printf(
36184418919fSjohnjiang 			"Throughput for core (%u): %.8lg Ops/s, %.8lg Mbps\n",
36194418919fSjohnjiang 			t_params[iter].lcore_id, t_params[iter].ops_per_sec,
36204418919fSjohnjiang 			t_params[iter].mbps);
36214418919fSjohnjiang 		total_mops += t_params[iter].ops_per_sec;
36224418919fSjohnjiang 		total_mbps += t_params[iter].mbps;
3623d30ea906Sjfb8856606 	}
3624d30ea906Sjfb8856606 	printf(
36254418919fSjohnjiang 		"\nTotal throughput for %u cores: %.8lg MOPS, %.8lg Mbps\n",
3626d30ea906Sjfb8856606 		used_cores, total_mops, total_mbps);
3627d30ea906Sjfb8856606 }
3628d30ea906Sjfb8856606 
3629*2d9fd380Sjfb8856606 /* Aggregate the performance results over the number of cores used */
36304418919fSjohnjiang static void
print_dec_throughput(struct thread_params * t_params,unsigned int used_cores)36314418919fSjohnjiang print_dec_throughput(struct thread_params *t_params, unsigned int used_cores)
36324418919fSjohnjiang {
3633*2d9fd380Sjfb8856606 	unsigned int core_idx = 0;
36344418919fSjohnjiang 	double total_mops = 0, total_mbps = 0;
36354418919fSjohnjiang 	uint8_t iter_count = 0;
36364418919fSjohnjiang 
3637*2d9fd380Sjfb8856606 	for (core_idx = 0; core_idx < used_cores; core_idx++) {
36384418919fSjohnjiang 		printf(
36394418919fSjohnjiang 			"Throughput for core (%u): %.8lg Ops/s, %.8lg Mbps @ max %u iterations\n",
3640*2d9fd380Sjfb8856606 			t_params[core_idx].lcore_id,
3641*2d9fd380Sjfb8856606 			t_params[core_idx].ops_per_sec,
3642*2d9fd380Sjfb8856606 			t_params[core_idx].mbps,
3643*2d9fd380Sjfb8856606 			t_params[core_idx].iter_count);
3644*2d9fd380Sjfb8856606 		total_mops += t_params[core_idx].ops_per_sec;
3645*2d9fd380Sjfb8856606 		total_mbps += t_params[core_idx].mbps;
3646*2d9fd380Sjfb8856606 		iter_count = RTE_MAX(iter_count,
3647*2d9fd380Sjfb8856606 				t_params[core_idx].iter_count);
36484418919fSjohnjiang 	}
36494418919fSjohnjiang 	printf(
36504418919fSjohnjiang 		"\nTotal throughput for %u cores: %.8lg MOPS, %.8lg Mbps @ max %u iterations\n",
36514418919fSjohnjiang 		used_cores, total_mops, total_mbps, iter_count);
36524418919fSjohnjiang }
36534418919fSjohnjiang 
3654*2d9fd380Sjfb8856606 /* Aggregate the performance results over the number of cores used */
3655*2d9fd380Sjfb8856606 static void
print_dec_bler(struct thread_params * t_params,unsigned int used_cores)3656*2d9fd380Sjfb8856606 print_dec_bler(struct thread_params *t_params, unsigned int used_cores)
3657*2d9fd380Sjfb8856606 {
3658*2d9fd380Sjfb8856606 	unsigned int core_idx = 0;
3659*2d9fd380Sjfb8856606 	double total_mbps = 0, total_bler = 0, total_iter = 0;
3660*2d9fd380Sjfb8856606 	double snr = get_snr();
3661*2d9fd380Sjfb8856606 
3662*2d9fd380Sjfb8856606 	for (core_idx = 0; core_idx < used_cores; core_idx++) {
3663*2d9fd380Sjfb8856606 		printf("Core%u BLER %.1f %% - Iters %.1f - Tp %.1f Mbps %s\n",
3664*2d9fd380Sjfb8856606 				t_params[core_idx].lcore_id,
3665*2d9fd380Sjfb8856606 				t_params[core_idx].bler * 100,
3666*2d9fd380Sjfb8856606 				t_params[core_idx].iter_average,
3667*2d9fd380Sjfb8856606 				t_params[core_idx].mbps,
3668*2d9fd380Sjfb8856606 				get_vector_filename());
3669*2d9fd380Sjfb8856606 		total_mbps += t_params[core_idx].mbps;
3670*2d9fd380Sjfb8856606 		total_bler += t_params[core_idx].bler;
3671*2d9fd380Sjfb8856606 		total_iter += t_params[core_idx].iter_average;
3672*2d9fd380Sjfb8856606 	}
3673*2d9fd380Sjfb8856606 	total_bler /= used_cores;
3674*2d9fd380Sjfb8856606 	total_iter /= used_cores;
3675*2d9fd380Sjfb8856606 
3676*2d9fd380Sjfb8856606 	printf("SNR %.2f BLER %.1f %% - Iterations %.1f %d - Tp %.1f Mbps %s\n",
3677*2d9fd380Sjfb8856606 			snr, total_bler * 100, total_iter, get_iter_max(),
3678*2d9fd380Sjfb8856606 			total_mbps, get_vector_filename());
3679*2d9fd380Sjfb8856606 }
3680*2d9fd380Sjfb8856606 
3681*2d9fd380Sjfb8856606 /*
3682*2d9fd380Sjfb8856606  * Test function that determines BLER wireless performance
3683*2d9fd380Sjfb8856606  */
3684*2d9fd380Sjfb8856606 static int
bler_test(struct active_device * ad,struct test_op_params * op_params)3685*2d9fd380Sjfb8856606 bler_test(struct active_device *ad,
3686*2d9fd380Sjfb8856606 		struct test_op_params *op_params)
3687*2d9fd380Sjfb8856606 {
3688*2d9fd380Sjfb8856606 	int ret;
3689*2d9fd380Sjfb8856606 	unsigned int lcore_id, used_cores = 0;
3690*2d9fd380Sjfb8856606 	struct thread_params *t_params;
3691*2d9fd380Sjfb8856606 	struct rte_bbdev_info info;
3692*2d9fd380Sjfb8856606 	lcore_function_t *bler_function;
3693*2d9fd380Sjfb8856606 	uint16_t num_lcores;
3694*2d9fd380Sjfb8856606 	const char *op_type_str;
3695*2d9fd380Sjfb8856606 
3696*2d9fd380Sjfb8856606 	rte_bbdev_info_get(ad->dev_id, &info);
3697*2d9fd380Sjfb8856606 
3698*2d9fd380Sjfb8856606 	op_type_str = rte_bbdev_op_type_str(test_vector.op_type);
3699*2d9fd380Sjfb8856606 	TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u",
3700*2d9fd380Sjfb8856606 			test_vector.op_type);
3701*2d9fd380Sjfb8856606 
3702*2d9fd380Sjfb8856606 	printf("+ ------------------------------------------------------- +\n");
3703*2d9fd380Sjfb8856606 	printf("== test: bler\ndev: %s, nb_queues: %u, burst size: %u, num ops: %u, num_lcores: %u, op type: %s, itr mode: %s, GHz: %lg\n",
3704*2d9fd380Sjfb8856606 			info.dev_name, ad->nb_queues, op_params->burst_sz,
3705*2d9fd380Sjfb8856606 			op_params->num_to_process, op_params->num_lcores,
3706*2d9fd380Sjfb8856606 			op_type_str,
3707*2d9fd380Sjfb8856606 			intr_enabled ? "Interrupt mode" : "PMD mode",
3708*2d9fd380Sjfb8856606 			(double)rte_get_tsc_hz() / 1000000000.0);
3709*2d9fd380Sjfb8856606 
3710*2d9fd380Sjfb8856606 	/* Set number of lcores */
3711*2d9fd380Sjfb8856606 	num_lcores = (ad->nb_queues < (op_params->num_lcores))
3712*2d9fd380Sjfb8856606 			? ad->nb_queues
3713*2d9fd380Sjfb8856606 			: op_params->num_lcores;
3714*2d9fd380Sjfb8856606 
3715*2d9fd380Sjfb8856606 	/* Allocate memory for thread parameters structure */
3716*2d9fd380Sjfb8856606 	t_params = rte_zmalloc(NULL, num_lcores * sizeof(struct thread_params),
3717*2d9fd380Sjfb8856606 			RTE_CACHE_LINE_SIZE);
3718*2d9fd380Sjfb8856606 	TEST_ASSERT_NOT_NULL(t_params, "Failed to alloc %zuB for t_params",
3719*2d9fd380Sjfb8856606 			RTE_ALIGN(sizeof(struct thread_params) * num_lcores,
3720*2d9fd380Sjfb8856606 				RTE_CACHE_LINE_SIZE));
3721*2d9fd380Sjfb8856606 
3722*2d9fd380Sjfb8856606 	if ((test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC) &&
3723*2d9fd380Sjfb8856606 			!check_bit(test_vector.ldpc_dec.op_flags,
3724*2d9fd380Sjfb8856606 			RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)
3725*2d9fd380Sjfb8856606 			&& !check_bit(test_vector.ldpc_dec.op_flags,
3726*2d9fd380Sjfb8856606 			RTE_BBDEV_LDPC_LLR_COMPRESSION))
3727*2d9fd380Sjfb8856606 		bler_function = bler_pmd_lcore_ldpc_dec;
3728*2d9fd380Sjfb8856606 	else
3729*2d9fd380Sjfb8856606 		return TEST_SKIPPED;
3730*2d9fd380Sjfb8856606 
3731*2d9fd380Sjfb8856606 	rte_atomic16_set(&op_params->sync, SYNC_WAIT);
3732*2d9fd380Sjfb8856606 
3733*2d9fd380Sjfb8856606 	/* Main core is set at first entry */
3734*2d9fd380Sjfb8856606 	t_params[0].dev_id = ad->dev_id;
3735*2d9fd380Sjfb8856606 	t_params[0].lcore_id = rte_lcore_id();
3736*2d9fd380Sjfb8856606 	t_params[0].op_params = op_params;
3737*2d9fd380Sjfb8856606 	t_params[0].queue_id = ad->queue_ids[used_cores++];
3738*2d9fd380Sjfb8856606 	t_params[0].iter_count = 0;
3739*2d9fd380Sjfb8856606 
3740*2d9fd380Sjfb8856606 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
3741*2d9fd380Sjfb8856606 		if (used_cores >= num_lcores)
3742*2d9fd380Sjfb8856606 			break;
3743*2d9fd380Sjfb8856606 
3744*2d9fd380Sjfb8856606 		t_params[used_cores].dev_id = ad->dev_id;
3745*2d9fd380Sjfb8856606 		t_params[used_cores].lcore_id = lcore_id;
3746*2d9fd380Sjfb8856606 		t_params[used_cores].op_params = op_params;
3747*2d9fd380Sjfb8856606 		t_params[used_cores].queue_id = ad->queue_ids[used_cores];
3748*2d9fd380Sjfb8856606 		t_params[used_cores].iter_count = 0;
3749*2d9fd380Sjfb8856606 
3750*2d9fd380Sjfb8856606 		rte_eal_remote_launch(bler_function,
3751*2d9fd380Sjfb8856606 				&t_params[used_cores++], lcore_id);
3752*2d9fd380Sjfb8856606 	}
3753*2d9fd380Sjfb8856606 
3754*2d9fd380Sjfb8856606 	rte_atomic16_set(&op_params->sync, SYNC_START);
3755*2d9fd380Sjfb8856606 	ret = bler_function(&t_params[0]);
3756*2d9fd380Sjfb8856606 
3757*2d9fd380Sjfb8856606 	/* Main core is always used */
3758*2d9fd380Sjfb8856606 	for (used_cores = 1; used_cores < num_lcores; used_cores++)
3759*2d9fd380Sjfb8856606 		ret |= rte_eal_wait_lcore(t_params[used_cores].lcore_id);
3760*2d9fd380Sjfb8856606 
3761*2d9fd380Sjfb8856606 	print_dec_bler(t_params, num_lcores);
3762*2d9fd380Sjfb8856606 
3763*2d9fd380Sjfb8856606 	/* Return if test failed */
3764*2d9fd380Sjfb8856606 	if (ret) {
3765*2d9fd380Sjfb8856606 		rte_free(t_params);
3766*2d9fd380Sjfb8856606 		return ret;
3767*2d9fd380Sjfb8856606 	}
3768*2d9fd380Sjfb8856606 
3769*2d9fd380Sjfb8856606 	/* Function to print something  here*/
3770*2d9fd380Sjfb8856606 	rte_free(t_params);
3771*2d9fd380Sjfb8856606 	return ret;
3772*2d9fd380Sjfb8856606 }
3773*2d9fd380Sjfb8856606 
3774d30ea906Sjfb8856606 /*
3775d30ea906Sjfb8856606  * Test function that determines how long an enqueue + dequeue of a burst
3776d30ea906Sjfb8856606  * takes on available lcores.
3777d30ea906Sjfb8856606  */
3778d30ea906Sjfb8856606 static int
throughput_test(struct active_device * ad,struct test_op_params * op_params)3779d30ea906Sjfb8856606 throughput_test(struct active_device *ad,
3780d30ea906Sjfb8856606 		struct test_op_params *op_params)
3781d30ea906Sjfb8856606 {
3782d30ea906Sjfb8856606 	int ret;
3783d30ea906Sjfb8856606 	unsigned int lcore_id, used_cores = 0;
37844418919fSjohnjiang 	struct thread_params *t_params, *tp;
3785d30ea906Sjfb8856606 	struct rte_bbdev_info info;
3786d30ea906Sjfb8856606 	lcore_function_t *throughput_function;
3787d30ea906Sjfb8856606 	uint16_t num_lcores;
3788d30ea906Sjfb8856606 	const char *op_type_str;
3789d30ea906Sjfb8856606 
3790d30ea906Sjfb8856606 	rte_bbdev_info_get(ad->dev_id, &info);
3791d30ea906Sjfb8856606 
3792d30ea906Sjfb8856606 	op_type_str = rte_bbdev_op_type_str(test_vector.op_type);
3793d30ea906Sjfb8856606 	TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u",
3794d30ea906Sjfb8856606 			test_vector.op_type);
3795d30ea906Sjfb8856606 
37964418919fSjohnjiang 	printf("+ ------------------------------------------------------- +\n");
37974418919fSjohnjiang 	printf("== test: throughput\ndev: %s, nb_queues: %u, burst size: %u, num ops: %u, num_lcores: %u, op type: %s, itr mode: %s, GHz: %lg\n",
3798d30ea906Sjfb8856606 			info.dev_name, ad->nb_queues, op_params->burst_sz,
3799d30ea906Sjfb8856606 			op_params->num_to_process, op_params->num_lcores,
3800d30ea906Sjfb8856606 			op_type_str,
3801d30ea906Sjfb8856606 			intr_enabled ? "Interrupt mode" : "PMD mode",
3802d30ea906Sjfb8856606 			(double)rte_get_tsc_hz() / 1000000000.0);
3803d30ea906Sjfb8856606 
3804d30ea906Sjfb8856606 	/* Set number of lcores */
3805d30ea906Sjfb8856606 	num_lcores = (ad->nb_queues < (op_params->num_lcores))
3806d30ea906Sjfb8856606 			? ad->nb_queues
3807d30ea906Sjfb8856606 			: op_params->num_lcores;
3808d30ea906Sjfb8856606 
38094418919fSjohnjiang 	/* Allocate memory for thread parameters structure */
38104418919fSjohnjiang 	t_params = rte_zmalloc(NULL, num_lcores * sizeof(struct thread_params),
38114418919fSjohnjiang 			RTE_CACHE_LINE_SIZE);
38124418919fSjohnjiang 	TEST_ASSERT_NOT_NULL(t_params, "Failed to alloc %zuB for t_params",
38134418919fSjohnjiang 			RTE_ALIGN(sizeof(struct thread_params) * num_lcores,
38144418919fSjohnjiang 				RTE_CACHE_LINE_SIZE));
38154418919fSjohnjiang 
3816d30ea906Sjfb8856606 	if (intr_enabled) {
3817d30ea906Sjfb8856606 		if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
3818d30ea906Sjfb8856606 			throughput_function = throughput_intr_lcore_dec;
38194418919fSjohnjiang 		else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
3820*2d9fd380Sjfb8856606 			throughput_function = throughput_intr_lcore_ldpc_dec;
38214418919fSjohnjiang 		else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
38224418919fSjohnjiang 			throughput_function = throughput_intr_lcore_enc;
38234418919fSjohnjiang 		else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
3824*2d9fd380Sjfb8856606 			throughput_function = throughput_intr_lcore_ldpc_enc;
3825d30ea906Sjfb8856606 		else
3826d30ea906Sjfb8856606 			throughput_function = throughput_intr_lcore_enc;
3827d30ea906Sjfb8856606 
3828d30ea906Sjfb8856606 		/* Dequeue interrupt callback registration */
3829d30ea906Sjfb8856606 		ret = rte_bbdev_callback_register(ad->dev_id,
3830d30ea906Sjfb8856606 				RTE_BBDEV_EVENT_DEQUEUE, dequeue_event_callback,
38314418919fSjohnjiang 				t_params);
38324418919fSjohnjiang 		if (ret < 0) {
38334418919fSjohnjiang 			rte_free(t_params);
3834d30ea906Sjfb8856606 			return ret;
38354418919fSjohnjiang 		}
3836d30ea906Sjfb8856606 	} else {
3837d30ea906Sjfb8856606 		if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
3838d30ea906Sjfb8856606 			throughput_function = throughput_pmd_lcore_dec;
38394418919fSjohnjiang 		else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
38404418919fSjohnjiang 			throughput_function = throughput_pmd_lcore_ldpc_dec;
38414418919fSjohnjiang 		else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
38424418919fSjohnjiang 			throughput_function = throughput_pmd_lcore_enc;
38434418919fSjohnjiang 		else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
38444418919fSjohnjiang 			throughput_function = throughput_pmd_lcore_ldpc_enc;
3845d30ea906Sjfb8856606 		else
3846d30ea906Sjfb8856606 			throughput_function = throughput_pmd_lcore_enc;
3847d30ea906Sjfb8856606 	}
3848d30ea906Sjfb8856606 
3849d30ea906Sjfb8856606 	rte_atomic16_set(&op_params->sync, SYNC_WAIT);
3850d30ea906Sjfb8856606 
3851*2d9fd380Sjfb8856606 	/* Main core is set at first entry */
38524418919fSjohnjiang 	t_params[0].dev_id = ad->dev_id;
38534418919fSjohnjiang 	t_params[0].lcore_id = rte_lcore_id();
38544418919fSjohnjiang 	t_params[0].op_params = op_params;
38554418919fSjohnjiang 	t_params[0].queue_id = ad->queue_ids[used_cores++];
38564418919fSjohnjiang 	t_params[0].iter_count = 0;
3857d30ea906Sjfb8856606 
3858*2d9fd380Sjfb8856606 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
3859d30ea906Sjfb8856606 		if (used_cores >= num_lcores)
3860d30ea906Sjfb8856606 			break;
3861d30ea906Sjfb8856606 
38624418919fSjohnjiang 		t_params[used_cores].dev_id = ad->dev_id;
38634418919fSjohnjiang 		t_params[used_cores].lcore_id = lcore_id;
38644418919fSjohnjiang 		t_params[used_cores].op_params = op_params;
38654418919fSjohnjiang 		t_params[used_cores].queue_id = ad->queue_ids[used_cores];
38664418919fSjohnjiang 		t_params[used_cores].iter_count = 0;
3867d30ea906Sjfb8856606 
38684418919fSjohnjiang 		rte_eal_remote_launch(throughput_function,
38694418919fSjohnjiang 				&t_params[used_cores++], lcore_id);
3870d30ea906Sjfb8856606 	}
3871d30ea906Sjfb8856606 
3872d30ea906Sjfb8856606 	rte_atomic16_set(&op_params->sync, SYNC_START);
38734418919fSjohnjiang 	ret = throughput_function(&t_params[0]);
3874d30ea906Sjfb8856606 
3875*2d9fd380Sjfb8856606 	/* Main core is always used */
38764418919fSjohnjiang 	for (used_cores = 1; used_cores < num_lcores; used_cores++)
38774418919fSjohnjiang 		ret |= rte_eal_wait_lcore(t_params[used_cores].lcore_id);
3878d30ea906Sjfb8856606 
3879d30ea906Sjfb8856606 	/* Return if test failed */
38804418919fSjohnjiang 	if (ret) {
38814418919fSjohnjiang 		rte_free(t_params);
3882d30ea906Sjfb8856606 		return ret;
38834418919fSjohnjiang 	}
3884d30ea906Sjfb8856606 
3885d30ea906Sjfb8856606 	/* Print throughput if interrupts are disabled and test passed */
3886d30ea906Sjfb8856606 	if (!intr_enabled) {
38874418919fSjohnjiang 		if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC ||
38884418919fSjohnjiang 				test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
38894418919fSjohnjiang 			print_dec_throughput(t_params, num_lcores);
38904418919fSjohnjiang 		else
38914418919fSjohnjiang 			print_enc_throughput(t_params, num_lcores);
38924418919fSjohnjiang 		rte_free(t_params);
3893d30ea906Sjfb8856606 		return ret;
3894d30ea906Sjfb8856606 	}
3895d30ea906Sjfb8856606 
3896d30ea906Sjfb8856606 	/* In interrupt TC we need to wait for the interrupt callback to deqeue
3897d30ea906Sjfb8856606 	 * all pending operations. Skip waiting for queues which reported an
3898d30ea906Sjfb8856606 	 * error using processing_status variable.
3899*2d9fd380Sjfb8856606 	 * Wait for main lcore operations.
3900d30ea906Sjfb8856606 	 */
39014418919fSjohnjiang 	tp = &t_params[0];
3902d30ea906Sjfb8856606 	while ((rte_atomic16_read(&tp->nb_dequeued) <
3903d30ea906Sjfb8856606 			op_params->num_to_process) &&
3904d30ea906Sjfb8856606 			(rte_atomic16_read(&tp->processing_status) !=
3905d30ea906Sjfb8856606 			TEST_FAILED))
3906d30ea906Sjfb8856606 		rte_pause();
3907d30ea906Sjfb8856606 
39084418919fSjohnjiang 	tp->ops_per_sec /= TEST_REPETITIONS;
39094418919fSjohnjiang 	tp->mbps /= TEST_REPETITIONS;
39104418919fSjohnjiang 	ret |= (int)rte_atomic16_read(&tp->processing_status);
3911d30ea906Sjfb8856606 
3912*2d9fd380Sjfb8856606 	/* Wait for worker lcores operations */
39134418919fSjohnjiang 	for (used_cores = 1; used_cores < num_lcores; used_cores++) {
39144418919fSjohnjiang 		tp = &t_params[used_cores];
3915d30ea906Sjfb8856606 
3916d30ea906Sjfb8856606 		while ((rte_atomic16_read(&tp->nb_dequeued) <
3917d30ea906Sjfb8856606 				op_params->num_to_process) &&
3918d30ea906Sjfb8856606 				(rte_atomic16_read(&tp->processing_status) !=
3919d30ea906Sjfb8856606 				TEST_FAILED))
3920d30ea906Sjfb8856606 			rte_pause();
3921d30ea906Sjfb8856606 
39224418919fSjohnjiang 		tp->ops_per_sec /= TEST_REPETITIONS;
39234418919fSjohnjiang 		tp->mbps /= TEST_REPETITIONS;
39244418919fSjohnjiang 		ret |= (int)rte_atomic16_read(&tp->processing_status);
3925d30ea906Sjfb8856606 	}
3926d30ea906Sjfb8856606 
3927d30ea906Sjfb8856606 	/* Print throughput if test passed */
39284418919fSjohnjiang 	if (!ret) {
39294418919fSjohnjiang 		if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC ||
39304418919fSjohnjiang 				test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC)
39314418919fSjohnjiang 			print_dec_throughput(t_params, num_lcores);
39324418919fSjohnjiang 		else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC ||
39334418919fSjohnjiang 				test_vector.op_type == RTE_BBDEV_OP_LDPC_ENC)
39344418919fSjohnjiang 			print_enc_throughput(t_params, num_lcores);
39354418919fSjohnjiang 	}
3936d30ea906Sjfb8856606 
39374418919fSjohnjiang 	rte_free(t_params);
3938d30ea906Sjfb8856606 	return ret;
3939d30ea906Sjfb8856606 }
3940d30ea906Sjfb8856606 
3941d30ea906Sjfb8856606 static int
latency_test_dec(struct rte_mempool * mempool,struct test_buffers * bufs,struct rte_bbdev_dec_op * ref_op,int vector_mask,uint16_t dev_id,uint16_t queue_id,const uint16_t num_to_process,uint16_t burst_sz,uint64_t * total_time,uint64_t * min_time,uint64_t * max_time)3942d30ea906Sjfb8856606 latency_test_dec(struct rte_mempool *mempool,
3943d30ea906Sjfb8856606 		struct test_buffers *bufs, struct rte_bbdev_dec_op *ref_op,
3944d30ea906Sjfb8856606 		int vector_mask, uint16_t dev_id, uint16_t queue_id,
3945d30ea906Sjfb8856606 		const uint16_t num_to_process, uint16_t burst_sz,
3946d30ea906Sjfb8856606 		uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
3947d30ea906Sjfb8856606 {
3948d30ea906Sjfb8856606 	int ret = TEST_SUCCESS;
3949d30ea906Sjfb8856606 	uint16_t i, j, dequeued;
3950d30ea906Sjfb8856606 	struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
3951d30ea906Sjfb8856606 	uint64_t start_time = 0, last_time = 0;
3952d30ea906Sjfb8856606 
3953d30ea906Sjfb8856606 	for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
3954d30ea906Sjfb8856606 		uint16_t enq = 0, deq = 0;
3955d30ea906Sjfb8856606 		bool first_time = true;
3956d30ea906Sjfb8856606 		last_time = 0;
3957d30ea906Sjfb8856606 
3958d30ea906Sjfb8856606 		if (unlikely(num_to_process - dequeued < burst_sz))
3959d30ea906Sjfb8856606 			burst_sz = num_to_process - dequeued;
3960d30ea906Sjfb8856606 
3961d30ea906Sjfb8856606 		ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
3962d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(ret,
3963d30ea906Sjfb8856606 				"rte_bbdev_dec_op_alloc_bulk() failed");
3964d30ea906Sjfb8856606 		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
3965d30ea906Sjfb8856606 			copy_reference_dec_op(ops_enq, burst_sz, dequeued,
3966d30ea906Sjfb8856606 					bufs->inputs,
3967d30ea906Sjfb8856606 					bufs->hard_outputs,
3968d30ea906Sjfb8856606 					bufs->soft_outputs,
3969d30ea906Sjfb8856606 					ref_op);
3970d30ea906Sjfb8856606 
3971d30ea906Sjfb8856606 		/* Set counter to validate the ordering */
3972d30ea906Sjfb8856606 		for (j = 0; j < burst_sz; ++j)
3973d30ea906Sjfb8856606 			ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
3974d30ea906Sjfb8856606 
3975d30ea906Sjfb8856606 		start_time = rte_rdtsc_precise();
3976d30ea906Sjfb8856606 
3977d30ea906Sjfb8856606 		enq = rte_bbdev_enqueue_dec_ops(dev_id, queue_id, &ops_enq[enq],
3978d30ea906Sjfb8856606 				burst_sz);
3979d30ea906Sjfb8856606 		TEST_ASSERT(enq == burst_sz,
3980d30ea906Sjfb8856606 				"Error enqueueing burst, expected %u, got %u",
3981d30ea906Sjfb8856606 				burst_sz, enq);
3982d30ea906Sjfb8856606 
3983d30ea906Sjfb8856606 		/* Dequeue */
3984d30ea906Sjfb8856606 		do {
3985d30ea906Sjfb8856606 			deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
3986d30ea906Sjfb8856606 					&ops_deq[deq], burst_sz - deq);
3987d30ea906Sjfb8856606 			if (likely(first_time && (deq > 0))) {
3988d30ea906Sjfb8856606 				last_time = rte_rdtsc_precise() - start_time;
3989d30ea906Sjfb8856606 				first_time = false;
3990d30ea906Sjfb8856606 			}
3991d30ea906Sjfb8856606 		} while (unlikely(burst_sz != deq));
3992d30ea906Sjfb8856606 
3993d30ea906Sjfb8856606 		*max_time = RTE_MAX(*max_time, last_time);
3994d30ea906Sjfb8856606 		*min_time = RTE_MIN(*min_time, last_time);
3995d30ea906Sjfb8856606 		*total_time += last_time;
3996d30ea906Sjfb8856606 
3997d30ea906Sjfb8856606 		if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
3998d30ea906Sjfb8856606 			ret = validate_dec_op(ops_deq, burst_sz, ref_op,
3999d30ea906Sjfb8856606 					vector_mask);
4000d30ea906Sjfb8856606 			TEST_ASSERT_SUCCESS(ret, "Validation failed!");
4001d30ea906Sjfb8856606 		}
4002d30ea906Sjfb8856606 
4003d30ea906Sjfb8856606 		rte_bbdev_dec_op_free_bulk(ops_enq, deq);
4004d30ea906Sjfb8856606 		dequeued += deq;
4005d30ea906Sjfb8856606 	}
4006d30ea906Sjfb8856606 
4007d30ea906Sjfb8856606 	return i;
4008d30ea906Sjfb8856606 }
4009d30ea906Sjfb8856606 
4010*2d9fd380Sjfb8856606 /* Test case for latency/validation for LDPC Decoder */
4011d30ea906Sjfb8856606 static int
latency_test_ldpc_dec(struct rte_mempool * mempool,struct test_buffers * bufs,struct rte_bbdev_dec_op * ref_op,int vector_mask,uint16_t dev_id,uint16_t queue_id,const uint16_t num_to_process,uint16_t burst_sz,uint64_t * total_time,uint64_t * min_time,uint64_t * max_time,bool disable_et)40124418919fSjohnjiang latency_test_ldpc_dec(struct rte_mempool *mempool,
40134418919fSjohnjiang 		struct test_buffers *bufs, struct rte_bbdev_dec_op *ref_op,
40144418919fSjohnjiang 		int vector_mask, uint16_t dev_id, uint16_t queue_id,
40154418919fSjohnjiang 		const uint16_t num_to_process, uint16_t burst_sz,
4016*2d9fd380Sjfb8856606 		uint64_t *total_time, uint64_t *min_time, uint64_t *max_time,
4017*2d9fd380Sjfb8856606 		bool disable_et)
40184418919fSjohnjiang {
40194418919fSjohnjiang 	int ret = TEST_SUCCESS;
40204418919fSjohnjiang 	uint16_t i, j, dequeued;
40214418919fSjohnjiang 	struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
40224418919fSjohnjiang 	uint64_t start_time = 0, last_time = 0;
4023*2d9fd380Sjfb8856606 	bool extDdr = ldpc_cap_flags &
4024*2d9fd380Sjfb8856606 			RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE;
40254418919fSjohnjiang 
40264418919fSjohnjiang 	for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
40274418919fSjohnjiang 		uint16_t enq = 0, deq = 0;
40284418919fSjohnjiang 		bool first_time = true;
40294418919fSjohnjiang 		last_time = 0;
40304418919fSjohnjiang 
40314418919fSjohnjiang 		if (unlikely(num_to_process - dequeued < burst_sz))
40324418919fSjohnjiang 			burst_sz = num_to_process - dequeued;
40334418919fSjohnjiang 
40344418919fSjohnjiang 		ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
40354418919fSjohnjiang 		TEST_ASSERT_SUCCESS(ret,
40364418919fSjohnjiang 				"rte_bbdev_dec_op_alloc_bulk() failed");
4037*2d9fd380Sjfb8856606 
4038*2d9fd380Sjfb8856606 		/* For latency tests we need to disable early termination */
4039*2d9fd380Sjfb8856606 		if (disable_et && check_bit(ref_op->ldpc_dec.op_flags,
4040*2d9fd380Sjfb8856606 				RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE))
4041*2d9fd380Sjfb8856606 			ref_op->ldpc_dec.op_flags -=
4042*2d9fd380Sjfb8856606 					RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE;
4043*2d9fd380Sjfb8856606 		ref_op->ldpc_dec.iter_max = get_iter_max();
4044*2d9fd380Sjfb8856606 		ref_op->ldpc_dec.iter_count = ref_op->ldpc_dec.iter_max;
4045*2d9fd380Sjfb8856606 
40464418919fSjohnjiang 		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
40474418919fSjohnjiang 			copy_reference_ldpc_dec_op(ops_enq, burst_sz, dequeued,
40484418919fSjohnjiang 					bufs->inputs,
40494418919fSjohnjiang 					bufs->hard_outputs,
40504418919fSjohnjiang 					bufs->soft_outputs,
40514418919fSjohnjiang 					bufs->harq_inputs,
40524418919fSjohnjiang 					bufs->harq_outputs,
40534418919fSjohnjiang 					ref_op);
40544418919fSjohnjiang 
4055*2d9fd380Sjfb8856606 		if (extDdr)
4056*2d9fd380Sjfb8856606 			preload_harq_ddr(dev_id, queue_id, ops_enq,
4057*2d9fd380Sjfb8856606 					burst_sz, true);
4058*2d9fd380Sjfb8856606 
40594418919fSjohnjiang 		/* Set counter to validate the ordering */
40604418919fSjohnjiang 		for (j = 0; j < burst_sz; ++j)
40614418919fSjohnjiang 			ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
40624418919fSjohnjiang 
40634418919fSjohnjiang 		start_time = rte_rdtsc_precise();
40644418919fSjohnjiang 
40654418919fSjohnjiang 		enq = rte_bbdev_enqueue_ldpc_dec_ops(dev_id, queue_id,
40664418919fSjohnjiang 				&ops_enq[enq], burst_sz);
40674418919fSjohnjiang 		TEST_ASSERT(enq == burst_sz,
40684418919fSjohnjiang 				"Error enqueueing burst, expected %u, got %u",
40694418919fSjohnjiang 				burst_sz, enq);
40704418919fSjohnjiang 
40714418919fSjohnjiang 		/* Dequeue */
40724418919fSjohnjiang 		do {
40734418919fSjohnjiang 			deq += rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
40744418919fSjohnjiang 					&ops_deq[deq], burst_sz - deq);
40754418919fSjohnjiang 			if (likely(first_time && (deq > 0))) {
40764418919fSjohnjiang 				last_time = rte_rdtsc_precise() - start_time;
40774418919fSjohnjiang 				first_time = false;
40784418919fSjohnjiang 			}
40794418919fSjohnjiang 		} while (unlikely(burst_sz != deq));
40804418919fSjohnjiang 
40814418919fSjohnjiang 		*max_time = RTE_MAX(*max_time, last_time);
40824418919fSjohnjiang 		*min_time = RTE_MIN(*min_time, last_time);
40834418919fSjohnjiang 		*total_time += last_time;
40844418919fSjohnjiang 
4085*2d9fd380Sjfb8856606 		if (extDdr)
4086*2d9fd380Sjfb8856606 			retrieve_harq_ddr(dev_id, queue_id, ops_enq, burst_sz);
4087*2d9fd380Sjfb8856606 
40884418919fSjohnjiang 		if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
40894418919fSjohnjiang 			ret = validate_ldpc_dec_op(ops_deq, burst_sz, ref_op,
40904418919fSjohnjiang 					vector_mask);
40914418919fSjohnjiang 			TEST_ASSERT_SUCCESS(ret, "Validation failed!");
40924418919fSjohnjiang 		}
40934418919fSjohnjiang 
40944418919fSjohnjiang 		rte_bbdev_dec_op_free_bulk(ops_enq, deq);
40954418919fSjohnjiang 		dequeued += deq;
40964418919fSjohnjiang 	}
40974418919fSjohnjiang 	return i;
40984418919fSjohnjiang }
40994418919fSjohnjiang 
41004418919fSjohnjiang static int
latency_test_enc(struct rte_mempool * mempool,struct test_buffers * bufs,struct rte_bbdev_enc_op * ref_op,uint16_t dev_id,uint16_t queue_id,const uint16_t num_to_process,uint16_t burst_sz,uint64_t * total_time,uint64_t * min_time,uint64_t * max_time)4101d30ea906Sjfb8856606 latency_test_enc(struct rte_mempool *mempool,
4102d30ea906Sjfb8856606 		struct test_buffers *bufs, struct rte_bbdev_enc_op *ref_op,
4103d30ea906Sjfb8856606 		uint16_t dev_id, uint16_t queue_id,
4104d30ea906Sjfb8856606 		const uint16_t num_to_process, uint16_t burst_sz,
4105d30ea906Sjfb8856606 		uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
4106d30ea906Sjfb8856606 {
4107d30ea906Sjfb8856606 	int ret = TEST_SUCCESS;
4108d30ea906Sjfb8856606 	uint16_t i, j, dequeued;
4109d30ea906Sjfb8856606 	struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
4110d30ea906Sjfb8856606 	uint64_t start_time = 0, last_time = 0;
4111d30ea906Sjfb8856606 
4112d30ea906Sjfb8856606 	for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
4113d30ea906Sjfb8856606 		uint16_t enq = 0, deq = 0;
4114d30ea906Sjfb8856606 		bool first_time = true;
4115d30ea906Sjfb8856606 		last_time = 0;
4116d30ea906Sjfb8856606 
4117d30ea906Sjfb8856606 		if (unlikely(num_to_process - dequeued < burst_sz))
4118d30ea906Sjfb8856606 			burst_sz = num_to_process - dequeued;
4119d30ea906Sjfb8856606 
4120d30ea906Sjfb8856606 		ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
4121d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(ret,
4122d30ea906Sjfb8856606 				"rte_bbdev_enc_op_alloc_bulk() failed");
4123d30ea906Sjfb8856606 		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
4124d30ea906Sjfb8856606 			copy_reference_enc_op(ops_enq, burst_sz, dequeued,
4125d30ea906Sjfb8856606 					bufs->inputs,
4126d30ea906Sjfb8856606 					bufs->hard_outputs,
4127d30ea906Sjfb8856606 					ref_op);
4128d30ea906Sjfb8856606 
4129d30ea906Sjfb8856606 		/* Set counter to validate the ordering */
4130d30ea906Sjfb8856606 		for (j = 0; j < burst_sz; ++j)
4131d30ea906Sjfb8856606 			ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
4132d30ea906Sjfb8856606 
4133d30ea906Sjfb8856606 		start_time = rte_rdtsc_precise();
4134d30ea906Sjfb8856606 
4135d30ea906Sjfb8856606 		enq = rte_bbdev_enqueue_enc_ops(dev_id, queue_id, &ops_enq[enq],
4136d30ea906Sjfb8856606 				burst_sz);
4137d30ea906Sjfb8856606 		TEST_ASSERT(enq == burst_sz,
4138d30ea906Sjfb8856606 				"Error enqueueing burst, expected %u, got %u",
4139d30ea906Sjfb8856606 				burst_sz, enq);
4140d30ea906Sjfb8856606 
4141d30ea906Sjfb8856606 		/* Dequeue */
4142d30ea906Sjfb8856606 		do {
4143d30ea906Sjfb8856606 			deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
4144d30ea906Sjfb8856606 					&ops_deq[deq], burst_sz - deq);
4145d30ea906Sjfb8856606 			if (likely(first_time && (deq > 0))) {
4146d30ea906Sjfb8856606 				last_time += rte_rdtsc_precise() - start_time;
4147d30ea906Sjfb8856606 				first_time = false;
4148d30ea906Sjfb8856606 			}
4149d30ea906Sjfb8856606 		} while (unlikely(burst_sz != deq));
4150d30ea906Sjfb8856606 
4151d30ea906Sjfb8856606 		*max_time = RTE_MAX(*max_time, last_time);
4152d30ea906Sjfb8856606 		*min_time = RTE_MIN(*min_time, last_time);
4153d30ea906Sjfb8856606 		*total_time += last_time;
4154d30ea906Sjfb8856606 
4155d30ea906Sjfb8856606 		if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
4156d30ea906Sjfb8856606 			ret = validate_enc_op(ops_deq, burst_sz, ref_op);
4157d30ea906Sjfb8856606 			TEST_ASSERT_SUCCESS(ret, "Validation failed!");
4158d30ea906Sjfb8856606 		}
4159d30ea906Sjfb8856606 
4160d30ea906Sjfb8856606 		rte_bbdev_enc_op_free_bulk(ops_enq, deq);
4161d30ea906Sjfb8856606 		dequeued += deq;
4162d30ea906Sjfb8856606 	}
4163d30ea906Sjfb8856606 
4164d30ea906Sjfb8856606 	return i;
4165d30ea906Sjfb8856606 }
4166d30ea906Sjfb8856606 
4167d30ea906Sjfb8856606 static int
latency_test_ldpc_enc(struct rte_mempool * mempool,struct test_buffers * bufs,struct rte_bbdev_enc_op * ref_op,uint16_t dev_id,uint16_t queue_id,const uint16_t num_to_process,uint16_t burst_sz,uint64_t * total_time,uint64_t * min_time,uint64_t * max_time)41684418919fSjohnjiang latency_test_ldpc_enc(struct rte_mempool *mempool,
41694418919fSjohnjiang 		struct test_buffers *bufs, struct rte_bbdev_enc_op *ref_op,
41704418919fSjohnjiang 		uint16_t dev_id, uint16_t queue_id,
41714418919fSjohnjiang 		const uint16_t num_to_process, uint16_t burst_sz,
41724418919fSjohnjiang 		uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
41734418919fSjohnjiang {
41744418919fSjohnjiang 	int ret = TEST_SUCCESS;
41754418919fSjohnjiang 	uint16_t i, j, dequeued;
41764418919fSjohnjiang 	struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
41774418919fSjohnjiang 	uint64_t start_time = 0, last_time = 0;
41784418919fSjohnjiang 
41794418919fSjohnjiang 	for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
41804418919fSjohnjiang 		uint16_t enq = 0, deq = 0;
41814418919fSjohnjiang 		bool first_time = true;
41824418919fSjohnjiang 		last_time = 0;
41834418919fSjohnjiang 
41844418919fSjohnjiang 		if (unlikely(num_to_process - dequeued < burst_sz))
41854418919fSjohnjiang 			burst_sz = num_to_process - dequeued;
41864418919fSjohnjiang 
41874418919fSjohnjiang 		ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
41884418919fSjohnjiang 		TEST_ASSERT_SUCCESS(ret,
41894418919fSjohnjiang 				"rte_bbdev_enc_op_alloc_bulk() failed");
41904418919fSjohnjiang 		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
41914418919fSjohnjiang 			copy_reference_ldpc_enc_op(ops_enq, burst_sz, dequeued,
41924418919fSjohnjiang 					bufs->inputs,
41934418919fSjohnjiang 					bufs->hard_outputs,
41944418919fSjohnjiang 					ref_op);
41954418919fSjohnjiang 
41964418919fSjohnjiang 		/* Set counter to validate the ordering */
41974418919fSjohnjiang 		for (j = 0; j < burst_sz; ++j)
41984418919fSjohnjiang 			ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
41994418919fSjohnjiang 
42004418919fSjohnjiang 		start_time = rte_rdtsc_precise();
42014418919fSjohnjiang 
42024418919fSjohnjiang 		enq = rte_bbdev_enqueue_ldpc_enc_ops(dev_id, queue_id,
42034418919fSjohnjiang 				&ops_enq[enq], burst_sz);
42044418919fSjohnjiang 		TEST_ASSERT(enq == burst_sz,
42054418919fSjohnjiang 				"Error enqueueing burst, expected %u, got %u",
42064418919fSjohnjiang 				burst_sz, enq);
42074418919fSjohnjiang 
42084418919fSjohnjiang 		/* Dequeue */
42094418919fSjohnjiang 		do {
42104418919fSjohnjiang 			deq += rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
42114418919fSjohnjiang 					&ops_deq[deq], burst_sz - deq);
42124418919fSjohnjiang 			if (likely(first_time && (deq > 0))) {
42134418919fSjohnjiang 				last_time += rte_rdtsc_precise() - start_time;
42144418919fSjohnjiang 				first_time = false;
42154418919fSjohnjiang 			}
42164418919fSjohnjiang 		} while (unlikely(burst_sz != deq));
42174418919fSjohnjiang 
42184418919fSjohnjiang 		*max_time = RTE_MAX(*max_time, last_time);
42194418919fSjohnjiang 		*min_time = RTE_MIN(*min_time, last_time);
42204418919fSjohnjiang 		*total_time += last_time;
42214418919fSjohnjiang 
42224418919fSjohnjiang 		if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
42234418919fSjohnjiang 			ret = validate_enc_op(ops_deq, burst_sz, ref_op);
42244418919fSjohnjiang 			TEST_ASSERT_SUCCESS(ret, "Validation failed!");
42254418919fSjohnjiang 		}
42264418919fSjohnjiang 
42274418919fSjohnjiang 		rte_bbdev_enc_op_free_bulk(ops_enq, deq);
42284418919fSjohnjiang 		dequeued += deq;
42294418919fSjohnjiang 	}
42304418919fSjohnjiang 
42314418919fSjohnjiang 	return i;
42324418919fSjohnjiang }
42334418919fSjohnjiang 
4234*2d9fd380Sjfb8856606 /* Common function for running validation and latency test cases */
42354418919fSjohnjiang static int
validation_latency_test(struct active_device * ad,struct test_op_params * op_params,bool latency_flag)4236*2d9fd380Sjfb8856606 validation_latency_test(struct active_device *ad,
4237*2d9fd380Sjfb8856606 		struct test_op_params *op_params, bool latency_flag)
4238d30ea906Sjfb8856606 {
4239d30ea906Sjfb8856606 	int iter;
4240d30ea906Sjfb8856606 	uint16_t burst_sz = op_params->burst_sz;
4241d30ea906Sjfb8856606 	const uint16_t num_to_process = op_params->num_to_process;
4242d30ea906Sjfb8856606 	const enum rte_bbdev_op_type op_type = test_vector.op_type;
4243d30ea906Sjfb8856606 	const uint16_t queue_id = ad->queue_ids[0];
4244d30ea906Sjfb8856606 	struct test_buffers *bufs = NULL;
4245d30ea906Sjfb8856606 	struct rte_bbdev_info info;
4246d30ea906Sjfb8856606 	uint64_t total_time, min_time, max_time;
4247d30ea906Sjfb8856606 	const char *op_type_str;
4248d30ea906Sjfb8856606 
4249d30ea906Sjfb8856606 	total_time = max_time = 0;
4250d30ea906Sjfb8856606 	min_time = UINT64_MAX;
4251d30ea906Sjfb8856606 
4252d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
4253d30ea906Sjfb8856606 			"BURST_SIZE should be <= %u", MAX_BURST);
4254d30ea906Sjfb8856606 
4255d30ea906Sjfb8856606 	rte_bbdev_info_get(ad->dev_id, &info);
4256d30ea906Sjfb8856606 	bufs = &op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
4257d30ea906Sjfb8856606 
4258d30ea906Sjfb8856606 	op_type_str = rte_bbdev_op_type_str(op_type);
4259d30ea906Sjfb8856606 	TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
4260d30ea906Sjfb8856606 
42614418919fSjohnjiang 	printf("+ ------------------------------------------------------- +\n");
4262*2d9fd380Sjfb8856606 	if (latency_flag)
4263*2d9fd380Sjfb8856606 		printf("== test: latency\ndev:");
4264*2d9fd380Sjfb8856606 	else
4265*2d9fd380Sjfb8856606 		printf("== test: validation\ndev:");
4266*2d9fd380Sjfb8856606 	printf("%s, burst size: %u, num ops: %u, op type: %s\n",
4267d30ea906Sjfb8856606 			info.dev_name, burst_sz, num_to_process, op_type_str);
4268d30ea906Sjfb8856606 
4269d30ea906Sjfb8856606 	if (op_type == RTE_BBDEV_OP_TURBO_DEC)
4270d30ea906Sjfb8856606 		iter = latency_test_dec(op_params->mp, bufs,
4271d30ea906Sjfb8856606 				op_params->ref_dec_op, op_params->vector_mask,
4272d30ea906Sjfb8856606 				ad->dev_id, queue_id, num_to_process,
4273d30ea906Sjfb8856606 				burst_sz, &total_time, &min_time, &max_time);
42744418919fSjohnjiang 	else if (op_type == RTE_BBDEV_OP_LDPC_ENC)
42754418919fSjohnjiang 		iter = latency_test_ldpc_enc(op_params->mp, bufs,
42764418919fSjohnjiang 				op_params->ref_enc_op, ad->dev_id, queue_id,
42774418919fSjohnjiang 				num_to_process, burst_sz, &total_time,
42784418919fSjohnjiang 				&min_time, &max_time);
42794418919fSjohnjiang 	else if (op_type == RTE_BBDEV_OP_LDPC_DEC)
42804418919fSjohnjiang 		iter = latency_test_ldpc_dec(op_params->mp, bufs,
42814418919fSjohnjiang 				op_params->ref_dec_op, op_params->vector_mask,
42824418919fSjohnjiang 				ad->dev_id, queue_id, num_to_process,
4283*2d9fd380Sjfb8856606 				burst_sz, &total_time, &min_time, &max_time,
4284*2d9fd380Sjfb8856606 				latency_flag);
4285*2d9fd380Sjfb8856606 	else /* RTE_BBDEV_OP_TURBO_ENC */
42864418919fSjohnjiang 		iter = latency_test_enc(op_params->mp, bufs,
42874418919fSjohnjiang 				op_params->ref_enc_op,
42884418919fSjohnjiang 				ad->dev_id, queue_id,
42894418919fSjohnjiang 				num_to_process, burst_sz, &total_time,
42904418919fSjohnjiang 				&min_time, &max_time);
4291d30ea906Sjfb8856606 
4292d30ea906Sjfb8856606 	if (iter <= 0)
4293d30ea906Sjfb8856606 		return TEST_FAILED;
4294d30ea906Sjfb8856606 
42954418919fSjohnjiang 	printf("Operation latency:\n"
42964418919fSjohnjiang 			"\tavg: %lg cycles, %lg us\n"
42974418919fSjohnjiang 			"\tmin: %lg cycles, %lg us\n"
42984418919fSjohnjiang 			"\tmax: %lg cycles, %lg us\n",
4299d30ea906Sjfb8856606 			(double)total_time / (double)iter,
4300d30ea906Sjfb8856606 			(double)(total_time * 1000000) / (double)iter /
4301d30ea906Sjfb8856606 			(double)rte_get_tsc_hz(), (double)min_time,
4302d30ea906Sjfb8856606 			(double)(min_time * 1000000) / (double)rte_get_tsc_hz(),
4303d30ea906Sjfb8856606 			(double)max_time, (double)(max_time * 1000000) /
4304d30ea906Sjfb8856606 			(double)rte_get_tsc_hz());
4305d30ea906Sjfb8856606 
4306d30ea906Sjfb8856606 	return TEST_SUCCESS;
4307d30ea906Sjfb8856606 }
4308d30ea906Sjfb8856606 
4309*2d9fd380Sjfb8856606 static int
latency_test(struct active_device * ad,struct test_op_params * op_params)4310*2d9fd380Sjfb8856606 latency_test(struct active_device *ad, struct test_op_params *op_params)
4311*2d9fd380Sjfb8856606 {
4312*2d9fd380Sjfb8856606 	return validation_latency_test(ad, op_params, true);
4313*2d9fd380Sjfb8856606 }
4314*2d9fd380Sjfb8856606 
4315*2d9fd380Sjfb8856606 static int
validation_test(struct active_device * ad,struct test_op_params * op_params)4316*2d9fd380Sjfb8856606 validation_test(struct active_device *ad, struct test_op_params *op_params)
4317*2d9fd380Sjfb8856606 {
4318*2d9fd380Sjfb8856606 	return validation_latency_test(ad, op_params, false);
4319*2d9fd380Sjfb8856606 }
4320*2d9fd380Sjfb8856606 
4321d30ea906Sjfb8856606 #ifdef RTE_BBDEV_OFFLOAD_COST
4322d30ea906Sjfb8856606 static int
get_bbdev_queue_stats(uint16_t dev_id,uint16_t queue_id,struct rte_bbdev_stats * stats)4323d30ea906Sjfb8856606 get_bbdev_queue_stats(uint16_t dev_id, uint16_t queue_id,
4324d30ea906Sjfb8856606 		struct rte_bbdev_stats *stats)
4325d30ea906Sjfb8856606 {
4326d30ea906Sjfb8856606 	struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
4327d30ea906Sjfb8856606 	struct rte_bbdev_stats *q_stats;
4328d30ea906Sjfb8856606 
4329d30ea906Sjfb8856606 	if (queue_id >= dev->data->num_queues)
4330d30ea906Sjfb8856606 		return -1;
4331d30ea906Sjfb8856606 
4332d30ea906Sjfb8856606 	q_stats = &dev->data->queues[queue_id].queue_stats;
4333d30ea906Sjfb8856606 
4334d30ea906Sjfb8856606 	stats->enqueued_count = q_stats->enqueued_count;
4335d30ea906Sjfb8856606 	stats->dequeued_count = q_stats->dequeued_count;
4336d30ea906Sjfb8856606 	stats->enqueue_err_count = q_stats->enqueue_err_count;
4337d30ea906Sjfb8856606 	stats->dequeue_err_count = q_stats->dequeue_err_count;
43384418919fSjohnjiang 	stats->acc_offload_cycles = q_stats->acc_offload_cycles;
4339d30ea906Sjfb8856606 
4340d30ea906Sjfb8856606 	return 0;
4341d30ea906Sjfb8856606 }
4342d30ea906Sjfb8856606 
4343d30ea906Sjfb8856606 static int
offload_latency_test_dec(struct rte_mempool * mempool,struct test_buffers * bufs,struct rte_bbdev_dec_op * ref_op,uint16_t dev_id,uint16_t queue_id,const uint16_t num_to_process,uint16_t burst_sz,struct test_time_stats * time_st)4344d30ea906Sjfb8856606 offload_latency_test_dec(struct rte_mempool *mempool, struct test_buffers *bufs,
4345d30ea906Sjfb8856606 		struct rte_bbdev_dec_op *ref_op, uint16_t dev_id,
4346d30ea906Sjfb8856606 		uint16_t queue_id, const uint16_t num_to_process,
4347d30ea906Sjfb8856606 		uint16_t burst_sz, struct test_time_stats *time_st)
4348d30ea906Sjfb8856606 {
4349d30ea906Sjfb8856606 	int i, dequeued, ret;
4350d30ea906Sjfb8856606 	struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
4351d30ea906Sjfb8856606 	uint64_t enq_start_time, deq_start_time;
4352d30ea906Sjfb8856606 	uint64_t enq_sw_last_time, deq_last_time;
4353d30ea906Sjfb8856606 	struct rte_bbdev_stats stats;
4354d30ea906Sjfb8856606 
4355d30ea906Sjfb8856606 	for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
4356d30ea906Sjfb8856606 		uint16_t enq = 0, deq = 0;
4357d30ea906Sjfb8856606 
4358d30ea906Sjfb8856606 		if (unlikely(num_to_process - dequeued < burst_sz))
4359d30ea906Sjfb8856606 			burst_sz = num_to_process - dequeued;
4360d30ea906Sjfb8856606 
43614418919fSjohnjiang 		rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
4362d30ea906Sjfb8856606 		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
4363d30ea906Sjfb8856606 			copy_reference_dec_op(ops_enq, burst_sz, dequeued,
4364d30ea906Sjfb8856606 					bufs->inputs,
4365d30ea906Sjfb8856606 					bufs->hard_outputs,
4366d30ea906Sjfb8856606 					bufs->soft_outputs,
4367d30ea906Sjfb8856606 					ref_op);
4368d30ea906Sjfb8856606 
4369d30ea906Sjfb8856606 		/* Start time meas for enqueue function offload latency */
4370d30ea906Sjfb8856606 		enq_start_time = rte_rdtsc_precise();
4371d30ea906Sjfb8856606 		do {
4372d30ea906Sjfb8856606 			enq += rte_bbdev_enqueue_dec_ops(dev_id, queue_id,
4373d30ea906Sjfb8856606 					&ops_enq[enq], burst_sz - enq);
4374d30ea906Sjfb8856606 		} while (unlikely(burst_sz != enq));
4375d30ea906Sjfb8856606 
4376d30ea906Sjfb8856606 		ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
4377d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(ret,
4378d30ea906Sjfb8856606 				"Failed to get stats for queue (%u) of device (%u)",
4379d30ea906Sjfb8856606 				queue_id, dev_id);
4380d30ea906Sjfb8856606 
4381d30ea906Sjfb8856606 		enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
43824418919fSjohnjiang 				stats.acc_offload_cycles;
4383d30ea906Sjfb8856606 		time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
4384d30ea906Sjfb8856606 				enq_sw_last_time);
4385d30ea906Sjfb8856606 		time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
4386d30ea906Sjfb8856606 				enq_sw_last_time);
43874418919fSjohnjiang 		time_st->enq_sw_total_time += enq_sw_last_time;
4388d30ea906Sjfb8856606 
43894418919fSjohnjiang 		time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
43904418919fSjohnjiang 				stats.acc_offload_cycles);
43914418919fSjohnjiang 		time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
43924418919fSjohnjiang 				stats.acc_offload_cycles);
43934418919fSjohnjiang 		time_st->enq_acc_total_time += stats.acc_offload_cycles;
4394d30ea906Sjfb8856606 
43954418919fSjohnjiang 		/* give time for device to process ops */
4396*2d9fd380Sjfb8856606 		rte_delay_us(WAIT_OFFLOAD_US);
4397d30ea906Sjfb8856606 
4398d30ea906Sjfb8856606 		/* Start time meas for dequeue function offload latency */
4399d30ea906Sjfb8856606 		deq_start_time = rte_rdtsc_precise();
4400d30ea906Sjfb8856606 		/* Dequeue one operation */
4401d30ea906Sjfb8856606 		do {
4402d30ea906Sjfb8856606 			deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
4403*2d9fd380Sjfb8856606 					&ops_deq[deq], enq);
4404*2d9fd380Sjfb8856606 		} while (unlikely(deq == 0));
4405d30ea906Sjfb8856606 
4406d30ea906Sjfb8856606 		deq_last_time = rte_rdtsc_precise() - deq_start_time;
4407d30ea906Sjfb8856606 		time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
4408d30ea906Sjfb8856606 				deq_last_time);
4409d30ea906Sjfb8856606 		time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
4410d30ea906Sjfb8856606 				deq_last_time);
44114418919fSjohnjiang 		time_st->deq_total_time += deq_last_time;
44124418919fSjohnjiang 
44134418919fSjohnjiang 		/* Dequeue remaining operations if needed*/
44144418919fSjohnjiang 		while (burst_sz != deq)
44154418919fSjohnjiang 			deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
44164418919fSjohnjiang 					&ops_deq[deq], burst_sz - deq);
44174418919fSjohnjiang 
44184418919fSjohnjiang 		rte_bbdev_dec_op_free_bulk(ops_enq, deq);
44194418919fSjohnjiang 		dequeued += deq;
44204418919fSjohnjiang 	}
44214418919fSjohnjiang 
44224418919fSjohnjiang 	return i;
44234418919fSjohnjiang }
44244418919fSjohnjiang 
44254418919fSjohnjiang static int
offload_latency_test_ldpc_dec(struct rte_mempool * mempool,struct test_buffers * bufs,struct rte_bbdev_dec_op * ref_op,uint16_t dev_id,uint16_t queue_id,const uint16_t num_to_process,uint16_t burst_sz,struct test_time_stats * time_st)44264418919fSjohnjiang offload_latency_test_ldpc_dec(struct rte_mempool *mempool,
44274418919fSjohnjiang 		struct test_buffers *bufs,
44284418919fSjohnjiang 		struct rte_bbdev_dec_op *ref_op, uint16_t dev_id,
44294418919fSjohnjiang 		uint16_t queue_id, const uint16_t num_to_process,
44304418919fSjohnjiang 		uint16_t burst_sz, struct test_time_stats *time_st)
44314418919fSjohnjiang {
44324418919fSjohnjiang 	int i, dequeued, ret;
44334418919fSjohnjiang 	struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
44344418919fSjohnjiang 	uint64_t enq_start_time, deq_start_time;
44354418919fSjohnjiang 	uint64_t enq_sw_last_time, deq_last_time;
44364418919fSjohnjiang 	struct rte_bbdev_stats stats;
4437*2d9fd380Sjfb8856606 	bool extDdr = ldpc_cap_flags &
4438*2d9fd380Sjfb8856606 			RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE;
44394418919fSjohnjiang 
44404418919fSjohnjiang 	for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
44414418919fSjohnjiang 		uint16_t enq = 0, deq = 0;
44424418919fSjohnjiang 
44434418919fSjohnjiang 		if (unlikely(num_to_process - dequeued < burst_sz))
44444418919fSjohnjiang 			burst_sz = num_to_process - dequeued;
44454418919fSjohnjiang 
44464418919fSjohnjiang 		rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
44474418919fSjohnjiang 		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
44484418919fSjohnjiang 			copy_reference_ldpc_dec_op(ops_enq, burst_sz, dequeued,
44494418919fSjohnjiang 					bufs->inputs,
44504418919fSjohnjiang 					bufs->hard_outputs,
44514418919fSjohnjiang 					bufs->soft_outputs,
44524418919fSjohnjiang 					bufs->harq_inputs,
44534418919fSjohnjiang 					bufs->harq_outputs,
44544418919fSjohnjiang 					ref_op);
44554418919fSjohnjiang 
4456*2d9fd380Sjfb8856606 		if (extDdr)
4457*2d9fd380Sjfb8856606 			preload_harq_ddr(dev_id, queue_id, ops_enq,
4458*2d9fd380Sjfb8856606 					burst_sz, true);
4459*2d9fd380Sjfb8856606 
44604418919fSjohnjiang 		/* Start time meas for enqueue function offload latency */
44614418919fSjohnjiang 		enq_start_time = rte_rdtsc_precise();
44624418919fSjohnjiang 		do {
44634418919fSjohnjiang 			enq += rte_bbdev_enqueue_ldpc_dec_ops(dev_id, queue_id,
44644418919fSjohnjiang 					&ops_enq[enq], burst_sz - enq);
44654418919fSjohnjiang 		} while (unlikely(burst_sz != enq));
44664418919fSjohnjiang 
4467*2d9fd380Sjfb8856606 		enq_sw_last_time = rte_rdtsc_precise() - enq_start_time;
44684418919fSjohnjiang 		ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
44694418919fSjohnjiang 		TEST_ASSERT_SUCCESS(ret,
44704418919fSjohnjiang 				"Failed to get stats for queue (%u) of device (%u)",
44714418919fSjohnjiang 				queue_id, dev_id);
44724418919fSjohnjiang 
4473*2d9fd380Sjfb8856606 		enq_sw_last_time -= stats.acc_offload_cycles;
44744418919fSjohnjiang 		time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
44754418919fSjohnjiang 				enq_sw_last_time);
44764418919fSjohnjiang 		time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
44774418919fSjohnjiang 				enq_sw_last_time);
44784418919fSjohnjiang 		time_st->enq_sw_total_time += enq_sw_last_time;
44794418919fSjohnjiang 
44804418919fSjohnjiang 		time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
44814418919fSjohnjiang 				stats.acc_offload_cycles);
44824418919fSjohnjiang 		time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
44834418919fSjohnjiang 				stats.acc_offload_cycles);
44844418919fSjohnjiang 		time_st->enq_acc_total_time += stats.acc_offload_cycles;
44854418919fSjohnjiang 
44864418919fSjohnjiang 		/* give time for device to process ops */
4487*2d9fd380Sjfb8856606 		rte_delay_us(WAIT_OFFLOAD_US);
44884418919fSjohnjiang 
44894418919fSjohnjiang 		/* Start time meas for dequeue function offload latency */
44904418919fSjohnjiang 		deq_start_time = rte_rdtsc_precise();
44914418919fSjohnjiang 		/* Dequeue one operation */
44924418919fSjohnjiang 		do {
44934418919fSjohnjiang 			deq += rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
4494*2d9fd380Sjfb8856606 					&ops_deq[deq], enq);
4495*2d9fd380Sjfb8856606 		} while (unlikely(deq == 0));
44964418919fSjohnjiang 
44974418919fSjohnjiang 		deq_last_time = rte_rdtsc_precise() - deq_start_time;
44984418919fSjohnjiang 		time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
44994418919fSjohnjiang 				deq_last_time);
45004418919fSjohnjiang 		time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
45014418919fSjohnjiang 				deq_last_time);
45024418919fSjohnjiang 		time_st->deq_total_time += deq_last_time;
4503d30ea906Sjfb8856606 
4504d30ea906Sjfb8856606 		/* Dequeue remaining operations if needed*/
4505d30ea906Sjfb8856606 		while (burst_sz != deq)
4506*2d9fd380Sjfb8856606 			deq += rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id,
4507d30ea906Sjfb8856606 					&ops_deq[deq], burst_sz - deq);
4508d30ea906Sjfb8856606 
4509*2d9fd380Sjfb8856606 		if (extDdr) {
4510*2d9fd380Sjfb8856606 			/* Read loopback is not thread safe */
4511*2d9fd380Sjfb8856606 			retrieve_harq_ddr(dev_id, queue_id, ops_enq, burst_sz);
4512*2d9fd380Sjfb8856606 		}
4513*2d9fd380Sjfb8856606 
4514d30ea906Sjfb8856606 		rte_bbdev_dec_op_free_bulk(ops_enq, deq);
4515d30ea906Sjfb8856606 		dequeued += deq;
4516d30ea906Sjfb8856606 	}
4517d30ea906Sjfb8856606 
4518d30ea906Sjfb8856606 	return i;
4519d30ea906Sjfb8856606 }
4520d30ea906Sjfb8856606 
4521d30ea906Sjfb8856606 static int
offload_latency_test_enc(struct rte_mempool * mempool,struct test_buffers * bufs,struct rte_bbdev_enc_op * ref_op,uint16_t dev_id,uint16_t queue_id,const uint16_t num_to_process,uint16_t burst_sz,struct test_time_stats * time_st)4522d30ea906Sjfb8856606 offload_latency_test_enc(struct rte_mempool *mempool, struct test_buffers *bufs,
4523d30ea906Sjfb8856606 		struct rte_bbdev_enc_op *ref_op, uint16_t dev_id,
4524d30ea906Sjfb8856606 		uint16_t queue_id, const uint16_t num_to_process,
4525d30ea906Sjfb8856606 		uint16_t burst_sz, struct test_time_stats *time_st)
4526d30ea906Sjfb8856606 {
4527d30ea906Sjfb8856606 	int i, dequeued, ret;
4528d30ea906Sjfb8856606 	struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
4529d30ea906Sjfb8856606 	uint64_t enq_start_time, deq_start_time;
4530d30ea906Sjfb8856606 	uint64_t enq_sw_last_time, deq_last_time;
4531d30ea906Sjfb8856606 	struct rte_bbdev_stats stats;
4532d30ea906Sjfb8856606 
4533d30ea906Sjfb8856606 	for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
4534d30ea906Sjfb8856606 		uint16_t enq = 0, deq = 0;
4535d30ea906Sjfb8856606 
4536d30ea906Sjfb8856606 		if (unlikely(num_to_process - dequeued < burst_sz))
4537d30ea906Sjfb8856606 			burst_sz = num_to_process - dequeued;
4538d30ea906Sjfb8856606 
45391646932aSjfb8856606 		ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
4540*2d9fd380Sjfb8856606 		TEST_ASSERT_SUCCESS(ret,
4541*2d9fd380Sjfb8856606 				"rte_bbdev_enc_op_alloc_bulk() failed");
4542d30ea906Sjfb8856606 		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
4543d30ea906Sjfb8856606 			copy_reference_enc_op(ops_enq, burst_sz, dequeued,
4544d30ea906Sjfb8856606 					bufs->inputs,
4545d30ea906Sjfb8856606 					bufs->hard_outputs,
4546d30ea906Sjfb8856606 					ref_op);
4547d30ea906Sjfb8856606 
4548d30ea906Sjfb8856606 		/* Start time meas for enqueue function offload latency */
4549d30ea906Sjfb8856606 		enq_start_time = rte_rdtsc_precise();
4550d30ea906Sjfb8856606 		do {
4551d30ea906Sjfb8856606 			enq += rte_bbdev_enqueue_enc_ops(dev_id, queue_id,
4552d30ea906Sjfb8856606 					&ops_enq[enq], burst_sz - enq);
4553d30ea906Sjfb8856606 		} while (unlikely(burst_sz != enq));
4554d30ea906Sjfb8856606 
4555*2d9fd380Sjfb8856606 		enq_sw_last_time = rte_rdtsc_precise() - enq_start_time;
4556*2d9fd380Sjfb8856606 
4557d30ea906Sjfb8856606 		ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
4558d30ea906Sjfb8856606 		TEST_ASSERT_SUCCESS(ret,
4559d30ea906Sjfb8856606 				"Failed to get stats for queue (%u) of device (%u)",
4560d30ea906Sjfb8856606 				queue_id, dev_id);
4561*2d9fd380Sjfb8856606 		enq_sw_last_time -= stats.acc_offload_cycles;
4562d30ea906Sjfb8856606 		time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
4563d30ea906Sjfb8856606 				enq_sw_last_time);
4564d30ea906Sjfb8856606 		time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
4565d30ea906Sjfb8856606 				enq_sw_last_time);
45664418919fSjohnjiang 		time_st->enq_sw_total_time += enq_sw_last_time;
4567d30ea906Sjfb8856606 
45684418919fSjohnjiang 		time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
45694418919fSjohnjiang 				stats.acc_offload_cycles);
45704418919fSjohnjiang 		time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
45714418919fSjohnjiang 				stats.acc_offload_cycles);
45724418919fSjohnjiang 		time_st->enq_acc_total_time += stats.acc_offload_cycles;
4573d30ea906Sjfb8856606 
45744418919fSjohnjiang 		/* give time for device to process ops */
4575*2d9fd380Sjfb8856606 		rte_delay_us(WAIT_OFFLOAD_US);
4576d30ea906Sjfb8856606 
4577d30ea906Sjfb8856606 		/* Start time meas for dequeue function offload latency */
4578d30ea906Sjfb8856606 		deq_start_time = rte_rdtsc_precise();
4579d30ea906Sjfb8856606 		/* Dequeue one operation */
4580d30ea906Sjfb8856606 		do {
4581d30ea906Sjfb8856606 			deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
4582*2d9fd380Sjfb8856606 					&ops_deq[deq], enq);
4583*2d9fd380Sjfb8856606 		} while (unlikely(deq == 0));
4584d30ea906Sjfb8856606 
4585d30ea906Sjfb8856606 		deq_last_time = rte_rdtsc_precise() - deq_start_time;
4586d30ea906Sjfb8856606 		time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
4587d30ea906Sjfb8856606 				deq_last_time);
4588d30ea906Sjfb8856606 		time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
4589d30ea906Sjfb8856606 				deq_last_time);
45904418919fSjohnjiang 		time_st->deq_total_time += deq_last_time;
4591d30ea906Sjfb8856606 
4592d30ea906Sjfb8856606 		while (burst_sz != deq)
4593d30ea906Sjfb8856606 			deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
4594d30ea906Sjfb8856606 					&ops_deq[deq], burst_sz - deq);
4595d30ea906Sjfb8856606 
4596d30ea906Sjfb8856606 		rte_bbdev_enc_op_free_bulk(ops_enq, deq);
4597d30ea906Sjfb8856606 		dequeued += deq;
4598d30ea906Sjfb8856606 	}
4599d30ea906Sjfb8856606 
4600d30ea906Sjfb8856606 	return i;
4601d30ea906Sjfb8856606 }
46024418919fSjohnjiang 
46034418919fSjohnjiang static int
offload_latency_test_ldpc_enc(struct rte_mempool * mempool,struct test_buffers * bufs,struct rte_bbdev_enc_op * ref_op,uint16_t dev_id,uint16_t queue_id,const uint16_t num_to_process,uint16_t burst_sz,struct test_time_stats * time_st)46044418919fSjohnjiang offload_latency_test_ldpc_enc(struct rte_mempool *mempool,
46054418919fSjohnjiang 		struct test_buffers *bufs,
46064418919fSjohnjiang 		struct rte_bbdev_enc_op *ref_op, uint16_t dev_id,
46074418919fSjohnjiang 		uint16_t queue_id, const uint16_t num_to_process,
46084418919fSjohnjiang 		uint16_t burst_sz, struct test_time_stats *time_st)
46094418919fSjohnjiang {
46104418919fSjohnjiang 	int i, dequeued, ret;
46114418919fSjohnjiang 	struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
46124418919fSjohnjiang 	uint64_t enq_start_time, deq_start_time;
46134418919fSjohnjiang 	uint64_t enq_sw_last_time, deq_last_time;
46144418919fSjohnjiang 	struct rte_bbdev_stats stats;
46154418919fSjohnjiang 
46164418919fSjohnjiang 	for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
46174418919fSjohnjiang 		uint16_t enq = 0, deq = 0;
46184418919fSjohnjiang 
46194418919fSjohnjiang 		if (unlikely(num_to_process - dequeued < burst_sz))
46204418919fSjohnjiang 			burst_sz = num_to_process - dequeued;
46214418919fSjohnjiang 
46224418919fSjohnjiang 		ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
4623*2d9fd380Sjfb8856606 		TEST_ASSERT_SUCCESS(ret,
4624*2d9fd380Sjfb8856606 				"rte_bbdev_enc_op_alloc_bulk() failed");
46254418919fSjohnjiang 		if (test_vector.op_type != RTE_BBDEV_OP_NONE)
46264418919fSjohnjiang 			copy_reference_ldpc_enc_op(ops_enq, burst_sz, dequeued,
46274418919fSjohnjiang 					bufs->inputs,
46284418919fSjohnjiang 					bufs->hard_outputs,
46294418919fSjohnjiang 					ref_op);
46304418919fSjohnjiang 
46314418919fSjohnjiang 		/* Start time meas for enqueue function offload latency */
46324418919fSjohnjiang 		enq_start_time = rte_rdtsc_precise();
46334418919fSjohnjiang 		do {
46344418919fSjohnjiang 			enq += rte_bbdev_enqueue_ldpc_enc_ops(dev_id, queue_id,
46354418919fSjohnjiang 					&ops_enq[enq], burst_sz - enq);
46364418919fSjohnjiang 		} while (unlikely(burst_sz != enq));
46374418919fSjohnjiang 
4638*2d9fd380Sjfb8856606 		enq_sw_last_time = rte_rdtsc_precise() - enq_start_time;
46394418919fSjohnjiang 		ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
46404418919fSjohnjiang 		TEST_ASSERT_SUCCESS(ret,
46414418919fSjohnjiang 				"Failed to get stats for queue (%u) of device (%u)",
46424418919fSjohnjiang 				queue_id, dev_id);
46434418919fSjohnjiang 
4644*2d9fd380Sjfb8856606 		enq_sw_last_time -= stats.acc_offload_cycles;
46454418919fSjohnjiang 		time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
46464418919fSjohnjiang 				enq_sw_last_time);
46474418919fSjohnjiang 		time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
46484418919fSjohnjiang 				enq_sw_last_time);
46494418919fSjohnjiang 		time_st->enq_sw_total_time += enq_sw_last_time;
46504418919fSjohnjiang 
46514418919fSjohnjiang 		time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
46524418919fSjohnjiang 				stats.acc_offload_cycles);
46534418919fSjohnjiang 		time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
46544418919fSjohnjiang 				stats.acc_offload_cycles);
46554418919fSjohnjiang 		time_st->enq_acc_total_time += stats.acc_offload_cycles;
46564418919fSjohnjiang 
46574418919fSjohnjiang 		/* give time for device to process ops */
4658*2d9fd380Sjfb8856606 		rte_delay_us(WAIT_OFFLOAD_US);
46594418919fSjohnjiang 
46604418919fSjohnjiang 		/* Start time meas for dequeue function offload latency */
46614418919fSjohnjiang 		deq_start_time = rte_rdtsc_precise();
46624418919fSjohnjiang 		/* Dequeue one operation */
46634418919fSjohnjiang 		do {
46644418919fSjohnjiang 			deq += rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
4665*2d9fd380Sjfb8856606 					&ops_deq[deq], enq);
4666*2d9fd380Sjfb8856606 		} while (unlikely(deq == 0));
46674418919fSjohnjiang 
46684418919fSjohnjiang 		deq_last_time = rte_rdtsc_precise() - deq_start_time;
46694418919fSjohnjiang 		time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
46704418919fSjohnjiang 				deq_last_time);
46714418919fSjohnjiang 		time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
46724418919fSjohnjiang 				deq_last_time);
46734418919fSjohnjiang 		time_st->deq_total_time += deq_last_time;
46744418919fSjohnjiang 
46754418919fSjohnjiang 		while (burst_sz != deq)
46764418919fSjohnjiang 			deq += rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id,
46774418919fSjohnjiang 					&ops_deq[deq], burst_sz - deq);
46784418919fSjohnjiang 
46794418919fSjohnjiang 		rte_bbdev_enc_op_free_bulk(ops_enq, deq);
46804418919fSjohnjiang 		dequeued += deq;
46814418919fSjohnjiang 	}
46824418919fSjohnjiang 
46834418919fSjohnjiang 	return i;
46844418919fSjohnjiang }
4685d30ea906Sjfb8856606 #endif
4686d30ea906Sjfb8856606 
4687d30ea906Sjfb8856606 static int
offload_cost_test(struct active_device * ad,struct test_op_params * op_params)4688d30ea906Sjfb8856606 offload_cost_test(struct active_device *ad,
4689d30ea906Sjfb8856606 		struct test_op_params *op_params)
4690d30ea906Sjfb8856606 {
4691d30ea906Sjfb8856606 #ifndef RTE_BBDEV_OFFLOAD_COST
4692d30ea906Sjfb8856606 	RTE_SET_USED(ad);
4693d30ea906Sjfb8856606 	RTE_SET_USED(op_params);
4694d30ea906Sjfb8856606 	printf("Offload latency test is disabled.\n");
4695d30ea906Sjfb8856606 	printf("Set RTE_BBDEV_OFFLOAD_COST to 'y' to turn the test on.\n");
4696d30ea906Sjfb8856606 	return TEST_SKIPPED;
4697d30ea906Sjfb8856606 #else
4698d30ea906Sjfb8856606 	int iter;
4699d30ea906Sjfb8856606 	uint16_t burst_sz = op_params->burst_sz;
4700d30ea906Sjfb8856606 	const uint16_t num_to_process = op_params->num_to_process;
4701d30ea906Sjfb8856606 	const enum rte_bbdev_op_type op_type = test_vector.op_type;
4702d30ea906Sjfb8856606 	const uint16_t queue_id = ad->queue_ids[0];
4703d30ea906Sjfb8856606 	struct test_buffers *bufs = NULL;
4704d30ea906Sjfb8856606 	struct rte_bbdev_info info;
4705d30ea906Sjfb8856606 	const char *op_type_str;
4706d30ea906Sjfb8856606 	struct test_time_stats time_st;
4707d30ea906Sjfb8856606 
4708d30ea906Sjfb8856606 	memset(&time_st, 0, sizeof(struct test_time_stats));
4709d30ea906Sjfb8856606 	time_st.enq_sw_min_time = UINT64_MAX;
47104418919fSjohnjiang 	time_st.enq_acc_min_time = UINT64_MAX;
4711d30ea906Sjfb8856606 	time_st.deq_min_time = UINT64_MAX;
4712d30ea906Sjfb8856606 
4713d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
4714d30ea906Sjfb8856606 			"BURST_SIZE should be <= %u", MAX_BURST);
4715d30ea906Sjfb8856606 
4716d30ea906Sjfb8856606 	rte_bbdev_info_get(ad->dev_id, &info);
4717d30ea906Sjfb8856606 	bufs = &op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
4718d30ea906Sjfb8856606 
4719d30ea906Sjfb8856606 	op_type_str = rte_bbdev_op_type_str(op_type);
4720d30ea906Sjfb8856606 	TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
4721d30ea906Sjfb8856606 
47224418919fSjohnjiang 	printf("+ ------------------------------------------------------- +\n");
47234418919fSjohnjiang 	printf("== test: offload latency test\ndev: %s, burst size: %u, num ops: %u, op type: %s\n",
4724d30ea906Sjfb8856606 			info.dev_name, burst_sz, num_to_process, op_type_str);
4725d30ea906Sjfb8856606 
4726d30ea906Sjfb8856606 	if (op_type == RTE_BBDEV_OP_TURBO_DEC)
4727d30ea906Sjfb8856606 		iter = offload_latency_test_dec(op_params->mp, bufs,
4728d30ea906Sjfb8856606 				op_params->ref_dec_op, ad->dev_id, queue_id,
4729d30ea906Sjfb8856606 				num_to_process, burst_sz, &time_st);
47304418919fSjohnjiang 	else if (op_type == RTE_BBDEV_OP_TURBO_ENC)
47314418919fSjohnjiang 		iter = offload_latency_test_enc(op_params->mp, bufs,
47324418919fSjohnjiang 				op_params->ref_enc_op, ad->dev_id, queue_id,
47334418919fSjohnjiang 				num_to_process, burst_sz, &time_st);
47344418919fSjohnjiang 	else if (op_type == RTE_BBDEV_OP_LDPC_ENC)
47354418919fSjohnjiang 		iter = offload_latency_test_ldpc_enc(op_params->mp, bufs,
47364418919fSjohnjiang 				op_params->ref_enc_op, ad->dev_id, queue_id,
47374418919fSjohnjiang 				num_to_process, burst_sz, &time_st);
47384418919fSjohnjiang 	else if (op_type == RTE_BBDEV_OP_LDPC_DEC)
47394418919fSjohnjiang 		iter = offload_latency_test_ldpc_dec(op_params->mp, bufs,
47404418919fSjohnjiang 			op_params->ref_dec_op, ad->dev_id, queue_id,
47414418919fSjohnjiang 			num_to_process, burst_sz, &time_st);
4742d30ea906Sjfb8856606 	else
4743d30ea906Sjfb8856606 		iter = offload_latency_test_enc(op_params->mp, bufs,
4744d30ea906Sjfb8856606 				op_params->ref_enc_op, ad->dev_id, queue_id,
4745d30ea906Sjfb8856606 				num_to_process, burst_sz, &time_st);
4746d30ea906Sjfb8856606 
4747d30ea906Sjfb8856606 	if (iter <= 0)
4748d30ea906Sjfb8856606 		return TEST_FAILED;
4749d30ea906Sjfb8856606 
47504418919fSjohnjiang 	printf("Enqueue driver offload cost latency:\n"
47514418919fSjohnjiang 			"\tavg: %lg cycles, %lg us\n"
47524418919fSjohnjiang 			"\tmin: %lg cycles, %lg us\n"
47534418919fSjohnjiang 			"\tmax: %lg cycles, %lg us\n"
47544418919fSjohnjiang 			"Enqueue accelerator offload cost latency:\n"
47554418919fSjohnjiang 			"\tavg: %lg cycles, %lg us\n"
47564418919fSjohnjiang 			"\tmin: %lg cycles, %lg us\n"
47574418919fSjohnjiang 			"\tmax: %lg cycles, %lg us\n",
47584418919fSjohnjiang 			(double)time_st.enq_sw_total_time / (double)iter,
47594418919fSjohnjiang 			(double)(time_st.enq_sw_total_time * 1000000) /
4760d30ea906Sjfb8856606 			(double)iter / (double)rte_get_tsc_hz(),
4761d30ea906Sjfb8856606 			(double)time_st.enq_sw_min_time,
4762d30ea906Sjfb8856606 			(double)(time_st.enq_sw_min_time * 1000000) /
4763d30ea906Sjfb8856606 			rte_get_tsc_hz(), (double)time_st.enq_sw_max_time,
4764d30ea906Sjfb8856606 			(double)(time_st.enq_sw_max_time * 1000000) /
47654418919fSjohnjiang 			rte_get_tsc_hz(), (double)time_st.enq_acc_total_time /
4766d30ea906Sjfb8856606 			(double)iter,
47674418919fSjohnjiang 			(double)(time_st.enq_acc_total_time * 1000000) /
4768d30ea906Sjfb8856606 			(double)iter / (double)rte_get_tsc_hz(),
47694418919fSjohnjiang 			(double)time_st.enq_acc_min_time,
47704418919fSjohnjiang 			(double)(time_st.enq_acc_min_time * 1000000) /
47714418919fSjohnjiang 			rte_get_tsc_hz(), (double)time_st.enq_acc_max_time,
47724418919fSjohnjiang 			(double)(time_st.enq_acc_max_time * 1000000) /
4773d30ea906Sjfb8856606 			rte_get_tsc_hz());
4774d30ea906Sjfb8856606 
47754418919fSjohnjiang 	printf("Dequeue offload cost latency - one op:\n"
47764418919fSjohnjiang 			"\tavg: %lg cycles, %lg us\n"
47774418919fSjohnjiang 			"\tmin: %lg cycles, %lg us\n"
47784418919fSjohnjiang 			"\tmax: %lg cycles, %lg us\n",
47794418919fSjohnjiang 			(double)time_st.deq_total_time / (double)iter,
47804418919fSjohnjiang 			(double)(time_st.deq_total_time * 1000000) /
4781d30ea906Sjfb8856606 			(double)iter / (double)rte_get_tsc_hz(),
4782d30ea906Sjfb8856606 			(double)time_st.deq_min_time,
4783d30ea906Sjfb8856606 			(double)(time_st.deq_min_time * 1000000) /
4784d30ea906Sjfb8856606 			rte_get_tsc_hz(), (double)time_st.deq_max_time,
4785d30ea906Sjfb8856606 			(double)(time_st.deq_max_time * 1000000) /
4786d30ea906Sjfb8856606 			rte_get_tsc_hz());
4787d30ea906Sjfb8856606 
4788*2d9fd380Sjfb8856606 	struct rte_bbdev_stats stats = {0};
4789*2d9fd380Sjfb8856606 	get_bbdev_queue_stats(ad->dev_id, queue_id, &stats);
4790*2d9fd380Sjfb8856606 	if (op_type != RTE_BBDEV_OP_LDPC_DEC) {
4791*2d9fd380Sjfb8856606 		TEST_ASSERT_SUCCESS(stats.enqueued_count != num_to_process,
4792*2d9fd380Sjfb8856606 				"Mismatch in enqueue count %10"PRIu64" %d",
4793*2d9fd380Sjfb8856606 				stats.enqueued_count, num_to_process);
4794*2d9fd380Sjfb8856606 		TEST_ASSERT_SUCCESS(stats.dequeued_count != num_to_process,
4795*2d9fd380Sjfb8856606 				"Mismatch in dequeue count %10"PRIu64" %d",
4796*2d9fd380Sjfb8856606 				stats.dequeued_count, num_to_process);
4797*2d9fd380Sjfb8856606 	}
4798*2d9fd380Sjfb8856606 	TEST_ASSERT_SUCCESS(stats.enqueue_err_count != 0,
4799*2d9fd380Sjfb8856606 			"Enqueue count Error %10"PRIu64"",
4800*2d9fd380Sjfb8856606 			stats.enqueue_err_count);
4801*2d9fd380Sjfb8856606 	TEST_ASSERT_SUCCESS(stats.dequeue_err_count != 0,
4802*2d9fd380Sjfb8856606 			"Dequeue count Error (%10"PRIu64"",
4803*2d9fd380Sjfb8856606 			stats.dequeue_err_count);
4804*2d9fd380Sjfb8856606 
4805d30ea906Sjfb8856606 	return TEST_SUCCESS;
4806d30ea906Sjfb8856606 #endif
4807d30ea906Sjfb8856606 }
4808d30ea906Sjfb8856606 
4809d30ea906Sjfb8856606 #ifdef RTE_BBDEV_OFFLOAD_COST
4810d30ea906Sjfb8856606 static int
offload_latency_empty_q_test_dec(uint16_t dev_id,uint16_t queue_id,const uint16_t num_to_process,uint16_t burst_sz,uint64_t * deq_total_time,uint64_t * deq_min_time,uint64_t * deq_max_time,const enum rte_bbdev_op_type op_type)4811d30ea906Sjfb8856606 offload_latency_empty_q_test_dec(uint16_t dev_id, uint16_t queue_id,
4812d30ea906Sjfb8856606 		const uint16_t num_to_process, uint16_t burst_sz,
48134418919fSjohnjiang 		uint64_t *deq_total_time, uint64_t *deq_min_time,
4814*2d9fd380Sjfb8856606 		uint64_t *deq_max_time, const enum rte_bbdev_op_type op_type)
4815d30ea906Sjfb8856606 {
4816d30ea906Sjfb8856606 	int i, deq_total;
4817d30ea906Sjfb8856606 	struct rte_bbdev_dec_op *ops[MAX_BURST];
4818d30ea906Sjfb8856606 	uint64_t deq_start_time, deq_last_time;
4819d30ea906Sjfb8856606 
4820d30ea906Sjfb8856606 	/* Test deq offload latency from an empty queue */
4821d30ea906Sjfb8856606 
4822d30ea906Sjfb8856606 	for (i = 0, deq_total = 0; deq_total < num_to_process;
4823d30ea906Sjfb8856606 			++i, deq_total += burst_sz) {
4824d30ea906Sjfb8856606 		deq_start_time = rte_rdtsc_precise();
4825d30ea906Sjfb8856606 
4826d30ea906Sjfb8856606 		if (unlikely(num_to_process - deq_total < burst_sz))
4827d30ea906Sjfb8856606 			burst_sz = num_to_process - deq_total;
4828*2d9fd380Sjfb8856606 		if (op_type == RTE_BBDEV_OP_LDPC_DEC)
4829*2d9fd380Sjfb8856606 			rte_bbdev_dequeue_ldpc_dec_ops(dev_id, queue_id, ops,
4830*2d9fd380Sjfb8856606 					burst_sz);
4831*2d9fd380Sjfb8856606 		else
4832*2d9fd380Sjfb8856606 			rte_bbdev_dequeue_dec_ops(dev_id, queue_id, ops,
4833*2d9fd380Sjfb8856606 					burst_sz);
4834d30ea906Sjfb8856606 
4835d30ea906Sjfb8856606 		deq_last_time = rte_rdtsc_precise() - deq_start_time;
4836d30ea906Sjfb8856606 		*deq_max_time = RTE_MAX(*deq_max_time, deq_last_time);
4837d30ea906Sjfb8856606 		*deq_min_time = RTE_MIN(*deq_min_time, deq_last_time);
48384418919fSjohnjiang 		*deq_total_time += deq_last_time;
4839d30ea906Sjfb8856606 	}
4840d30ea906Sjfb8856606 
4841d30ea906Sjfb8856606 	return i;
4842d30ea906Sjfb8856606 }
4843d30ea906Sjfb8856606 
4844d30ea906Sjfb8856606 static int
offload_latency_empty_q_test_enc(uint16_t dev_id,uint16_t queue_id,const uint16_t num_to_process,uint16_t burst_sz,uint64_t * deq_total_time,uint64_t * deq_min_time,uint64_t * deq_max_time,const enum rte_bbdev_op_type op_type)4845d30ea906Sjfb8856606 offload_latency_empty_q_test_enc(uint16_t dev_id, uint16_t queue_id,
4846d30ea906Sjfb8856606 		const uint16_t num_to_process, uint16_t burst_sz,
48474418919fSjohnjiang 		uint64_t *deq_total_time, uint64_t *deq_min_time,
4848*2d9fd380Sjfb8856606 		uint64_t *deq_max_time, const enum rte_bbdev_op_type op_type)
4849d30ea906Sjfb8856606 {
4850d30ea906Sjfb8856606 	int i, deq_total;
4851d30ea906Sjfb8856606 	struct rte_bbdev_enc_op *ops[MAX_BURST];
4852d30ea906Sjfb8856606 	uint64_t deq_start_time, deq_last_time;
4853d30ea906Sjfb8856606 
4854d30ea906Sjfb8856606 	/* Test deq offload latency from an empty queue */
4855d30ea906Sjfb8856606 	for (i = 0, deq_total = 0; deq_total < num_to_process;
4856d30ea906Sjfb8856606 			++i, deq_total += burst_sz) {
4857d30ea906Sjfb8856606 		deq_start_time = rte_rdtsc_precise();
4858d30ea906Sjfb8856606 
4859d30ea906Sjfb8856606 		if (unlikely(num_to_process - deq_total < burst_sz))
4860d30ea906Sjfb8856606 			burst_sz = num_to_process - deq_total;
4861*2d9fd380Sjfb8856606 		if (op_type == RTE_BBDEV_OP_LDPC_ENC)
4862*2d9fd380Sjfb8856606 			rte_bbdev_dequeue_ldpc_enc_ops(dev_id, queue_id, ops,
4863*2d9fd380Sjfb8856606 					burst_sz);
4864*2d9fd380Sjfb8856606 		else
4865*2d9fd380Sjfb8856606 			rte_bbdev_dequeue_enc_ops(dev_id, queue_id, ops,
4866*2d9fd380Sjfb8856606 					burst_sz);
4867d30ea906Sjfb8856606 
4868d30ea906Sjfb8856606 		deq_last_time = rte_rdtsc_precise() - deq_start_time;
4869d30ea906Sjfb8856606 		*deq_max_time = RTE_MAX(*deq_max_time, deq_last_time);
4870d30ea906Sjfb8856606 		*deq_min_time = RTE_MIN(*deq_min_time, deq_last_time);
48714418919fSjohnjiang 		*deq_total_time += deq_last_time;
4872d30ea906Sjfb8856606 	}
4873d30ea906Sjfb8856606 
4874d30ea906Sjfb8856606 	return i;
4875d30ea906Sjfb8856606 }
4876*2d9fd380Sjfb8856606 
4877d30ea906Sjfb8856606 #endif
4878d30ea906Sjfb8856606 
4879d30ea906Sjfb8856606 static int
offload_latency_empty_q_test(struct active_device * ad,struct test_op_params * op_params)4880d30ea906Sjfb8856606 offload_latency_empty_q_test(struct active_device *ad,
4881d30ea906Sjfb8856606 		struct test_op_params *op_params)
4882d30ea906Sjfb8856606 {
4883d30ea906Sjfb8856606 #ifndef RTE_BBDEV_OFFLOAD_COST
4884d30ea906Sjfb8856606 	RTE_SET_USED(ad);
4885d30ea906Sjfb8856606 	RTE_SET_USED(op_params);
4886d30ea906Sjfb8856606 	printf("Offload latency empty dequeue test is disabled.\n");
4887d30ea906Sjfb8856606 	printf("Set RTE_BBDEV_OFFLOAD_COST to 'y' to turn the test on.\n");
4888d30ea906Sjfb8856606 	return TEST_SKIPPED;
4889d30ea906Sjfb8856606 #else
4890d30ea906Sjfb8856606 	int iter;
48914418919fSjohnjiang 	uint64_t deq_total_time, deq_min_time, deq_max_time;
4892d30ea906Sjfb8856606 	uint16_t burst_sz = op_params->burst_sz;
4893d30ea906Sjfb8856606 	const uint16_t num_to_process = op_params->num_to_process;
4894d30ea906Sjfb8856606 	const enum rte_bbdev_op_type op_type = test_vector.op_type;
4895d30ea906Sjfb8856606 	const uint16_t queue_id = ad->queue_ids[0];
4896d30ea906Sjfb8856606 	struct rte_bbdev_info info;
4897d30ea906Sjfb8856606 	const char *op_type_str;
4898d30ea906Sjfb8856606 
48994418919fSjohnjiang 	deq_total_time = deq_max_time = 0;
4900d30ea906Sjfb8856606 	deq_min_time = UINT64_MAX;
4901d30ea906Sjfb8856606 
4902d30ea906Sjfb8856606 	TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
4903d30ea906Sjfb8856606 			"BURST_SIZE should be <= %u", MAX_BURST);
4904d30ea906Sjfb8856606 
4905d30ea906Sjfb8856606 	rte_bbdev_info_get(ad->dev_id, &info);
4906d30ea906Sjfb8856606 
4907d30ea906Sjfb8856606 	op_type_str = rte_bbdev_op_type_str(op_type);
4908d30ea906Sjfb8856606 	TEST_ASSERT_NOT_NULL(op_type_str, "Invalid op type: %u", op_type);
4909d30ea906Sjfb8856606 
49104418919fSjohnjiang 	printf("+ ------------------------------------------------------- +\n");
49114418919fSjohnjiang 	printf("== test: offload latency empty dequeue\ndev: %s, burst size: %u, num ops: %u, op type: %s\n",
4912d30ea906Sjfb8856606 			info.dev_name, burst_sz, num_to_process, op_type_str);
4913d30ea906Sjfb8856606 
4914*2d9fd380Sjfb8856606 	if (op_type == RTE_BBDEV_OP_TURBO_DEC ||
4915*2d9fd380Sjfb8856606 			op_type == RTE_BBDEV_OP_LDPC_DEC)
4916d30ea906Sjfb8856606 		iter = offload_latency_empty_q_test_dec(ad->dev_id, queue_id,
49174418919fSjohnjiang 				num_to_process, burst_sz, &deq_total_time,
4918*2d9fd380Sjfb8856606 				&deq_min_time, &deq_max_time, op_type);
4919d30ea906Sjfb8856606 	else
4920d30ea906Sjfb8856606 		iter = offload_latency_empty_q_test_enc(ad->dev_id, queue_id,
49214418919fSjohnjiang 				num_to_process, burst_sz, &deq_total_time,
4922*2d9fd380Sjfb8856606 				&deq_min_time, &deq_max_time, op_type);
4923d30ea906Sjfb8856606 
4924d30ea906Sjfb8856606 	if (iter <= 0)
4925d30ea906Sjfb8856606 		return TEST_FAILED;
4926d30ea906Sjfb8856606 
49274418919fSjohnjiang 	printf("Empty dequeue offload:\n"
49284418919fSjohnjiang 			"\tavg: %lg cycles, %lg us\n"
49294418919fSjohnjiang 			"\tmin: %lg cycles, %lg us\n"
49304418919fSjohnjiang 			"\tmax: %lg cycles, %lg us\n",
49314418919fSjohnjiang 			(double)deq_total_time / (double)iter,
49324418919fSjohnjiang 			(double)(deq_total_time * 1000000) / (double)iter /
4933d30ea906Sjfb8856606 			(double)rte_get_tsc_hz(), (double)deq_min_time,
4934d30ea906Sjfb8856606 			(double)(deq_min_time * 1000000) / rte_get_tsc_hz(),
4935d30ea906Sjfb8856606 			(double)deq_max_time, (double)(deq_max_time * 1000000) /
4936d30ea906Sjfb8856606 			rte_get_tsc_hz());
4937d30ea906Sjfb8856606 
4938d30ea906Sjfb8856606 	return TEST_SUCCESS;
4939d30ea906Sjfb8856606 #endif
4940d30ea906Sjfb8856606 }
4941d30ea906Sjfb8856606 
4942d30ea906Sjfb8856606 static int
bler_tc(void)4943*2d9fd380Sjfb8856606 bler_tc(void)
4944*2d9fd380Sjfb8856606 {
4945*2d9fd380Sjfb8856606 	return run_test_case(bler_test);
4946*2d9fd380Sjfb8856606 }
4947*2d9fd380Sjfb8856606 
4948*2d9fd380Sjfb8856606 static int
throughput_tc(void)4949d30ea906Sjfb8856606 throughput_tc(void)
4950d30ea906Sjfb8856606 {
4951d30ea906Sjfb8856606 	return run_test_case(throughput_test);
4952d30ea906Sjfb8856606 }
4953d30ea906Sjfb8856606 
4954d30ea906Sjfb8856606 static int
offload_cost_tc(void)4955d30ea906Sjfb8856606 offload_cost_tc(void)
4956d30ea906Sjfb8856606 {
4957d30ea906Sjfb8856606 	return run_test_case(offload_cost_test);
4958d30ea906Sjfb8856606 }
4959d30ea906Sjfb8856606 
4960d30ea906Sjfb8856606 static int
offload_latency_empty_q_tc(void)4961d30ea906Sjfb8856606 offload_latency_empty_q_tc(void)
4962d30ea906Sjfb8856606 {
4963d30ea906Sjfb8856606 	return run_test_case(offload_latency_empty_q_test);
4964d30ea906Sjfb8856606 }
4965d30ea906Sjfb8856606 
4966d30ea906Sjfb8856606 static int
latency_tc(void)4967d30ea906Sjfb8856606 latency_tc(void)
4968d30ea906Sjfb8856606 {
4969d30ea906Sjfb8856606 	return run_test_case(latency_test);
4970d30ea906Sjfb8856606 }
4971d30ea906Sjfb8856606 
4972d30ea906Sjfb8856606 static int
validation_tc(void)4973*2d9fd380Sjfb8856606 validation_tc(void)
4974*2d9fd380Sjfb8856606 {
4975*2d9fd380Sjfb8856606 	return run_test_case(validation_test);
4976*2d9fd380Sjfb8856606 }
4977*2d9fd380Sjfb8856606 
4978*2d9fd380Sjfb8856606 static int
interrupt_tc(void)4979d30ea906Sjfb8856606 interrupt_tc(void)
4980d30ea906Sjfb8856606 {
4981d30ea906Sjfb8856606 	return run_test_case(throughput_test);
4982d30ea906Sjfb8856606 }
4983d30ea906Sjfb8856606 
4984*2d9fd380Sjfb8856606 static struct unit_test_suite bbdev_bler_testsuite = {
4985*2d9fd380Sjfb8856606 	.suite_name = "BBdev BLER Tests",
4986*2d9fd380Sjfb8856606 	.setup = testsuite_setup,
4987*2d9fd380Sjfb8856606 	.teardown = testsuite_teardown,
4988*2d9fd380Sjfb8856606 	.unit_test_cases = {
4989*2d9fd380Sjfb8856606 		TEST_CASE_ST(ut_setup, ut_teardown, bler_tc),
4990*2d9fd380Sjfb8856606 		TEST_CASES_END() /**< NULL terminate unit test array */
4991*2d9fd380Sjfb8856606 	}
4992*2d9fd380Sjfb8856606 };
4993*2d9fd380Sjfb8856606 
4994d30ea906Sjfb8856606 static struct unit_test_suite bbdev_throughput_testsuite = {
4995d30ea906Sjfb8856606 	.suite_name = "BBdev Throughput Tests",
4996d30ea906Sjfb8856606 	.setup = testsuite_setup,
4997d30ea906Sjfb8856606 	.teardown = testsuite_teardown,
4998d30ea906Sjfb8856606 	.unit_test_cases = {
4999d30ea906Sjfb8856606 		TEST_CASE_ST(ut_setup, ut_teardown, throughput_tc),
5000d30ea906Sjfb8856606 		TEST_CASES_END() /**< NULL terminate unit test array */
5001d30ea906Sjfb8856606 	}
5002d30ea906Sjfb8856606 };
5003d30ea906Sjfb8856606 
5004d30ea906Sjfb8856606 static struct unit_test_suite bbdev_validation_testsuite = {
5005d30ea906Sjfb8856606 	.suite_name = "BBdev Validation Tests",
5006d30ea906Sjfb8856606 	.setup = testsuite_setup,
5007d30ea906Sjfb8856606 	.teardown = testsuite_teardown,
5008d30ea906Sjfb8856606 	.unit_test_cases = {
5009*2d9fd380Sjfb8856606 		TEST_CASE_ST(ut_setup, ut_teardown, validation_tc),
5010d30ea906Sjfb8856606 		TEST_CASES_END() /**< NULL terminate unit test array */
5011d30ea906Sjfb8856606 	}
5012d30ea906Sjfb8856606 };
5013d30ea906Sjfb8856606 
5014d30ea906Sjfb8856606 static struct unit_test_suite bbdev_latency_testsuite = {
5015d30ea906Sjfb8856606 	.suite_name = "BBdev Latency Tests",
5016d30ea906Sjfb8856606 	.setup = testsuite_setup,
5017d30ea906Sjfb8856606 	.teardown = testsuite_teardown,
5018d30ea906Sjfb8856606 	.unit_test_cases = {
5019d30ea906Sjfb8856606 		TEST_CASE_ST(ut_setup, ut_teardown, latency_tc),
5020d30ea906Sjfb8856606 		TEST_CASES_END() /**< NULL terminate unit test array */
5021d30ea906Sjfb8856606 	}
5022d30ea906Sjfb8856606 };
5023d30ea906Sjfb8856606 
5024d30ea906Sjfb8856606 static struct unit_test_suite bbdev_offload_cost_testsuite = {
5025d30ea906Sjfb8856606 	.suite_name = "BBdev Offload Cost Tests",
5026d30ea906Sjfb8856606 	.setup = testsuite_setup,
5027d30ea906Sjfb8856606 	.teardown = testsuite_teardown,
5028d30ea906Sjfb8856606 	.unit_test_cases = {
5029d30ea906Sjfb8856606 		TEST_CASE_ST(ut_setup, ut_teardown, offload_cost_tc),
5030d30ea906Sjfb8856606 		TEST_CASE_ST(ut_setup, ut_teardown, offload_latency_empty_q_tc),
5031d30ea906Sjfb8856606 		TEST_CASES_END() /**< NULL terminate unit test array */
5032d30ea906Sjfb8856606 	}
5033d30ea906Sjfb8856606 };
5034d30ea906Sjfb8856606 
5035d30ea906Sjfb8856606 static struct unit_test_suite bbdev_interrupt_testsuite = {
5036d30ea906Sjfb8856606 	.suite_name = "BBdev Interrupt Tests",
5037d30ea906Sjfb8856606 	.setup = interrupt_testsuite_setup,
5038d30ea906Sjfb8856606 	.teardown = testsuite_teardown,
5039d30ea906Sjfb8856606 	.unit_test_cases = {
5040d30ea906Sjfb8856606 		TEST_CASE_ST(ut_setup, ut_teardown, interrupt_tc),
5041d30ea906Sjfb8856606 		TEST_CASES_END() /**< NULL terminate unit test array */
5042d30ea906Sjfb8856606 	}
5043d30ea906Sjfb8856606 };
5044d30ea906Sjfb8856606 
5045*2d9fd380Sjfb8856606 REGISTER_TEST_COMMAND(bler, bbdev_bler_testsuite);
5046d30ea906Sjfb8856606 REGISTER_TEST_COMMAND(throughput, bbdev_throughput_testsuite);
5047d30ea906Sjfb8856606 REGISTER_TEST_COMMAND(validation, bbdev_validation_testsuite);
5048d30ea906Sjfb8856606 REGISTER_TEST_COMMAND(latency, bbdev_latency_testsuite);
5049d30ea906Sjfb8856606 REGISTER_TEST_COMMAND(offload, bbdev_offload_cost_testsuite);
5050d30ea906Sjfb8856606 REGISTER_TEST_COMMAND(interrupt, bbdev_interrupt_testsuite);
5051