xref: /dpdk/examples/bbdev_app/main.c (revision 2a5aa6e7)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <stdint.h>
9 #include <inttypes.h>
10 #include <sys/types.h>
11 #include <sys/unistd.h>
12 #include <sys/queue.h>
13 #include <stdarg.h>
14 #include <ctype.h>
15 #include <errno.h>
16 #include <math.h>
17 #include <assert.h>
18 #include <getopt.h>
19 #include <signal.h>
20 
21 #include "rte_atomic.h"
22 #include "rte_common.h"
23 #include "rte_eal.h"
24 #include "rte_cycles.h"
25 #include "rte_ether.h"
26 #include "rte_ethdev.h"
27 #include "rte_ip.h"
28 #include "rte_lcore.h"
29 #include "rte_malloc.h"
30 #include "rte_mbuf.h"
31 #include "rte_memory.h"
32 #include "rte_mempool.h"
33 #include "rte_log.h"
34 #include "rte_bbdev.h"
35 #include "rte_bbdev_op.h"
36 
37 /* LLR values - negative value for '1' bit */
38 #define LLR_1_BIT 0x81
39 #define LLR_0_BIT 0x7F
40 
41 #define MAX_PKT_BURST 32
42 #define NB_MBUF 8191
43 #define MEMPOOL_CACHE_SIZE 256
44 
45 /* Hardcoded K value */
46 #define K 40
47 #define NCB (3 * RTE_ALIGN_CEIL(K + 4, 32))
48 
49 #define CRC_24B_LEN 3
50 
51 /* Configurable number of RX/TX ring descriptors */
52 #define RTE_TEST_RX_DESC_DEFAULT 128
53 #define RTE_TEST_TX_DESC_DEFAULT 512
54 
55 #define BBDEV_ASSERT(a) do { \
56 	if (!(a)) { \
57 		usage(prgname); \
58 		return -1; \
59 	} \
60 } while (0)
61 
62 static const struct rte_eth_conf port_conf = {
63 	.rxmode = {
64 		.mq_mode = ETH_MQ_RX_NONE,
65 		.max_rx_pkt_len = ETHER_MAX_LEN,
66 		.split_hdr_size = 0,
67 		.header_split = 0, /**< Header Split disabled */
68 		.hw_ip_checksum = 0, /**< IP checksum offload disabled */
69 		.hw_vlan_filter = 0, /**< VLAN filtering disabled */
70 		.jumbo_frame = 0, /**< Jumbo Frame Support disabled */
71 		.hw_strip_crc = 0, /**< CRC stripped by hardware */
72 	},
73 	.txmode = {
74 		.mq_mode = ETH_MQ_TX_NONE,
75 	},
76 };
77 
78 struct rte_bbdev_op_turbo_enc def_op_enc = {
79 	/* These values are arbitrarily put, and does not map to the real
80 	 * values for the data received from ethdev ports
81 	 */
82 	.rv_index = 0,
83 	.code_block_mode = 1,
84 	.cb_params = {
85 		.k = K,
86 	},
87 	.op_flags = RTE_BBDEV_TURBO_CRC_24A_ATTACH
88 };
89 
90 struct rte_bbdev_op_turbo_dec def_op_dec = {
91 	/* These values are arbitrarily put, and does not map to the real
92 	 * values for the data received from ethdev ports
93 	 */
94 	.code_block_mode = 1,
95 	.cb_params = {
96 		.k = K,
97 	},
98 	.rv_index = 0,
99 	.iter_max = 8,
100 	.iter_min = 4,
101 	.ext_scale = 15,
102 	.num_maps = 0,
103 	.op_flags = RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN
104 };
105 
106 struct app_config_params {
107 	/* Placeholders for app params */
108 	uint16_t port_id;
109 	uint16_t bbdev_id;
110 	uint64_t enc_core_mask;
111 	uint64_t dec_core_mask;
112 
113 	/* Values filled during init time */
114 	uint16_t enc_queue_ids[RTE_MAX_LCORE];
115 	uint16_t dec_queue_ids[RTE_MAX_LCORE];
116 	uint16_t num_enc_cores;
117 	uint16_t num_dec_cores;
118 };
119 
120 struct lcore_statistics {
121 	unsigned int enqueued;
122 	unsigned int dequeued;
123 	unsigned int rx_lost_packets;
124 	unsigned int enc_to_dec_lost_packets;
125 	unsigned int tx_lost_packets;
126 } __rte_cache_aligned;
127 
128 /** each lcore configuration */
129 struct lcore_conf {
130 	uint64_t core_type;
131 
132 	unsigned int port_id;
133 	unsigned int rx_queue_id;
134 	unsigned int tx_queue_id;
135 
136 	unsigned int bbdev_id;
137 	unsigned int enc_queue_id;
138 	unsigned int dec_queue_id;
139 
140 	uint8_t llr_temp_buf[NCB];
141 
142 	struct rte_mempool *bbdev_dec_op_pool;
143 	struct rte_mempool *bbdev_enc_op_pool;
144 	struct rte_mempool *enc_out_pool;
145 	struct rte_ring *enc_to_dec_ring;
146 
147 	struct lcore_statistics *lcore_stats;
148 } __rte_cache_aligned;
149 
150 struct stats_lcore_params {
151 	struct lcore_conf *lconf;
152 	struct app_config_params *app_params;
153 };
154 
155 
156 static const struct app_config_params def_app_config = {
157 	.port_id = 0,
158 	.bbdev_id = 0,
159 	.enc_core_mask = 0x2,
160 	.dec_core_mask = 0x4,
161 	.num_enc_cores = 1,
162 	.num_dec_cores = 1,
163 };
164 
165 static rte_atomic16_t global_exit_flag;
166 
167 /* display usage */
168 static inline void
169 usage(const char *prgname)
170 {
171 	printf("%s [EAL options] "
172 			"  --\n"
173 			"  --enc_cores - number of encoding cores (default = 0x2)\n"
174 			"  --dec_cores - number of decoding cores (default = 0x4)\n"
175 			"  --port_id - Ethernet port ID (default = 0)\n"
176 			"  --bbdev_id - BBDev ID (default = 0)\n"
177 			"\n", prgname);
178 }
179 
180 /* parse core mask */
181 static inline
182 uint16_t bbdev_parse_mask(const char *mask)
183 {
184 	char *end = NULL;
185 	unsigned long pm;
186 
187 	/* parse hexadecimal string */
188 	pm = strtoul(mask, &end, 16);
189 	if ((mask[0] == '\0') || (end == NULL) || (*end != '\0'))
190 		return 0;
191 
192 	return pm;
193 }
194 
195 /* parse core mask */
196 static inline
197 uint16_t bbdev_parse_number(const char *mask)
198 {
199 	char *end = NULL;
200 	unsigned long pm;
201 
202 	/* parse hexadecimal string */
203 	pm = strtoul(mask, &end, 10);
204 	if ((mask[0] == '\0') || (end == NULL) || (*end != '\0'))
205 		return 0;
206 
207 	return pm;
208 }
209 
210 static int
211 bbdev_parse_args(int argc, char **argv,
212 		struct app_config_params *app_params)
213 {
214 	int optind = 0;
215 	int opt;
216 	int opt_indx = 0;
217 	char *prgname = argv[0];
218 
219 	static struct option lgopts[] = {
220 		{ "enc_core_mask", required_argument, 0, 'e' },
221 		{ "dec_core_mask", required_argument, 0, 'd' },
222 		{ "port_id", required_argument, 0, 'p' },
223 		{ "bbdev_id", required_argument, 0, 'b' },
224 		{ NULL, 0, 0, 0 }
225 	};
226 
227 	BBDEV_ASSERT(argc != 0);
228 	BBDEV_ASSERT(argv != NULL);
229 	BBDEV_ASSERT(app_params != NULL);
230 
231 	while ((opt = getopt_long(argc, argv, "e:d:p:b:", lgopts, &opt_indx)) !=
232 		EOF) {
233 		switch (opt) {
234 		case 'e':
235 			app_params->enc_core_mask =
236 				bbdev_parse_mask(optarg);
237 			if (app_params->enc_core_mask == 0) {
238 				usage(prgname);
239 				return -1;
240 			}
241 			app_params->num_enc_cores =
242 				__builtin_popcount(app_params->enc_core_mask);
243 			break;
244 
245 		case 'd':
246 			app_params->dec_core_mask =
247 				bbdev_parse_mask(optarg);
248 			if (app_params->dec_core_mask == 0) {
249 				usage(prgname);
250 				return -1;
251 			}
252 			app_params->num_dec_cores =
253 				__builtin_popcount(app_params->dec_core_mask);
254 			break;
255 
256 		case 'p':
257 			app_params->port_id = bbdev_parse_number(optarg);
258 			break;
259 
260 		case 'b':
261 			app_params->bbdev_id = bbdev_parse_number(optarg);
262 			break;
263 
264 		default:
265 			usage(prgname);
266 			return -1;
267 		}
268 	}
269 	optind = 0;
270 	return optind;
271 }
272 
273 static void
274 signal_handler(int signum)
275 {
276 	printf("\nSignal %d received\n", signum);
277 	rte_atomic16_set(&global_exit_flag, 1);
278 }
279 
280 static void
281 print_mac(unsigned int portid, struct ether_addr *bbdev_ports_eth_address)
282 {
283 	printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
284 			(unsigned int) portid,
285 			bbdev_ports_eth_address->addr_bytes[0],
286 			bbdev_ports_eth_address->addr_bytes[1],
287 			bbdev_ports_eth_address->addr_bytes[2],
288 			bbdev_ports_eth_address->addr_bytes[3],
289 			bbdev_ports_eth_address->addr_bytes[4],
290 			bbdev_ports_eth_address->addr_bytes[5]);
291 }
292 
293 static inline void
294 pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int nb_to_free)
295 {
296 	unsigned int i;
297 	for (i = 0; i < nb_to_free; ++i)
298 		rte_pktmbuf_free(mbufs[i]);
299 }
300 
301 static inline void
302 pktmbuf_userdata_free_bulk(struct rte_mbuf **mbufs, unsigned int nb_to_free)
303 {
304 	unsigned int i;
305 	for (i = 0; i < nb_to_free; ++i) {
306 		struct rte_mbuf *rx_pkt = mbufs[i]->userdata;
307 		rte_pktmbuf_free(rx_pkt);
308 		rte_pktmbuf_free(mbufs[i]);
309 	}
310 }
311 
312 /* Check the link status of all ports in up to 9s, and print them finally */
313 static int
314 check_port_link_status(uint16_t port_id)
315 {
316 #define CHECK_INTERVAL 100 /* 100ms */
317 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
318 	uint8_t count;
319 	struct rte_eth_link link;
320 
321 	printf("\nChecking link status.");
322 	fflush(stdout);
323 
324 	for (count = 0; count <= MAX_CHECK_TIME &&
325 			!rte_atomic16_read(&global_exit_flag); count++) {
326 		memset(&link, 0, sizeof(link));
327 		rte_eth_link_get_nowait(port_id, &link);
328 
329 		if (link.link_status) {
330 			const char *dp = (link.link_duplex ==
331 				ETH_LINK_FULL_DUPLEX) ?
332 				"full-duplex" : "half-duplex";
333 			printf("\nPort %u Link Up - speed %u Mbps - %s\n",
334 				port_id, link.link_speed, dp);
335 			return 0;
336 		}
337 		printf(".");
338 		fflush(stdout);
339 		rte_delay_ms(CHECK_INTERVAL);
340 	}
341 
342 	printf("\nPort %d Link Down\n", port_id);
343 	return 0;
344 }
345 
346 static inline void
347 add_ether_hdr(struct rte_mbuf *pkt_src, struct rte_mbuf *pkt_dst)
348 {
349 	struct ether_hdr *eth_from;
350 	struct ether_hdr *eth_to;
351 
352 	eth_from = rte_pktmbuf_mtod(pkt_src, struct ether_hdr *);
353 	eth_to = rte_pktmbuf_mtod(pkt_dst, struct ether_hdr *);
354 
355 	/* copy header */
356 	rte_memcpy(eth_to, eth_from, sizeof(struct ether_hdr));
357 }
358 
359 static inline void
360 add_awgn(struct rte_mbuf **mbufs, uint16_t num_pkts)
361 {
362 	RTE_SET_USED(mbufs);
363 	RTE_SET_USED(num_pkts);
364 }
365 
366 /* Encoder output to Decoder input adapter. The Decoder accepts only soft input
367  * so each bit of the encoder output must be translated into one byte of LLR. If
368  * Sub-block Deinterleaver is bypassed, which is the case, the padding bytes
369  * must additionally be insterted at the end of each sub-block.
370  */
371 static inline void
372 transform_enc_out_dec_in(struct rte_mbuf **mbufs, uint8_t *temp_buf,
373 		uint16_t num_pkts, uint16_t k)
374 {
375 	uint16_t i, l, j;
376 	uint16_t start_bit_idx;
377 	uint16_t out_idx;
378 	uint16_t d = k + 4;
379 	uint16_t kpi = RTE_ALIGN_CEIL(d, 32);
380 	uint16_t nd = kpi - d;
381 	uint16_t ncb = 3 * kpi;
382 
383 	for (i = 0; i < num_pkts; ++i) {
384 		uint16_t pkt_data_len = rte_pktmbuf_data_len(mbufs[i]) -
385 				sizeof(struct ether_hdr);
386 
387 		/* Resize the packet if needed */
388 		if (pkt_data_len < ncb) {
389 			char *data = rte_pktmbuf_append(mbufs[i],
390 					ncb - pkt_data_len);
391 			if (data == NULL)
392 				printf(
393 					"Not enough space in decoder input packet");
394 		}
395 
396 		/* Translate each bit into 1 LLR byte. */
397 		start_bit_idx = 0;
398 		out_idx = 0;
399 		for (j = 0; j < 3; ++j) {
400 			for (l = start_bit_idx; l < start_bit_idx + d; ++l) {
401 				uint8_t *data = rte_pktmbuf_mtod_offset(
402 					mbufs[i], uint8_t *,
403 					sizeof(struct ether_hdr) + (l >> 3));
404 				if (*data & (0x80 >> (l & 7)))
405 					temp_buf[out_idx] = LLR_1_BIT;
406 				else
407 					temp_buf[out_idx] = LLR_0_BIT;
408 				++out_idx;
409 			}
410 			/* Padding bytes should be at the end of the sub-block.
411 			 */
412 			memset(&temp_buf[out_idx], 0, nd);
413 			out_idx += nd;
414 			start_bit_idx += d;
415 		}
416 
417 		rte_memcpy(rte_pktmbuf_mtod_offset(mbufs[i], uint8_t *,
418 				sizeof(struct ether_hdr)), temp_buf, ncb);
419 	}
420 }
421 
422 static inline void
423 verify_data(struct rte_mbuf **mbufs, uint16_t num_pkts)
424 {
425 	uint16_t i;
426 	for (i = 0; i < num_pkts; ++i) {
427 		struct rte_mbuf *out = mbufs[i];
428 		struct rte_mbuf *in = out->userdata;
429 
430 		if (memcmp(rte_pktmbuf_mtod_offset(in, uint8_t *,
431 				sizeof(struct ether_hdr)),
432 				rte_pktmbuf_mtod_offset(out, uint8_t *,
433 				sizeof(struct ether_hdr)),
434 				K / 8 - CRC_24B_LEN))
435 			printf("Input and output buffers are not equal!\n");
436 	}
437 }
438 
439 static int
440 initialize_ports(struct app_config_params *app_params,
441 		struct rte_mempool *ethdev_mbuf_mempool)
442 {
443 	int ret;
444 	uint16_t port_id = app_params->port_id;
445 	uint16_t q;
446 	/* ethernet addresses of ports */
447 	struct ether_addr bbdev_port_eth_addr;
448 
449 	/* initialize ports */
450 	printf("\nInitializing port %u...\n", app_params->port_id);
451 	ret = rte_eth_dev_configure(port_id, app_params->num_enc_cores,
452 		app_params->num_dec_cores, &port_conf);
453 
454 	if (ret < 0) {
455 		printf("Cannot configure device: err=%d, port=%u\n",
456 			ret, port_id);
457 		return -1;
458 	}
459 
460 	/* initialize RX queues for encoder */
461 	for (q = 0; q < app_params->num_enc_cores; q++) {
462 		ret = rte_eth_rx_queue_setup(port_id, q,
463 			RTE_TEST_RX_DESC_DEFAULT,
464 			rte_eth_dev_socket_id(port_id),
465 			NULL, ethdev_mbuf_mempool);
466 		if (ret < 0) {
467 			printf("rte_eth_rx_queue_setup: err=%d, queue=%u\n",
468 				ret, q);
469 			return -1;
470 		}
471 	}
472 	/* initialize TX queues for decoder */
473 	for (q = 0; q < app_params->num_dec_cores; q++) {
474 		ret = rte_eth_tx_queue_setup(port_id, q,
475 			RTE_TEST_TX_DESC_DEFAULT,
476 			rte_eth_dev_socket_id(port_id), NULL);
477 		if (ret < 0) {
478 			printf("rte_eth_tx_queue_setup: err=%d, queue=%u\n",
479 				ret, q);
480 			return -1;
481 		}
482 	}
483 
484 	rte_eth_promiscuous_enable(port_id);
485 
486 	rte_eth_macaddr_get(port_id, &bbdev_port_eth_addr);
487 	print_mac(port_id, &bbdev_port_eth_addr);
488 
489 	return 0;
490 }
491 
492 static void
493 lcore_conf_init(struct app_config_params *app_params,
494 		struct lcore_conf *lcore_conf,
495 		struct rte_mempool **bbdev_op_pools,
496 		struct rte_mempool *bbdev_mbuf_mempool,
497 		struct rte_ring *enc_to_dec_ring,
498 		struct lcore_statistics *lcore_stats)
499 {
500 	unsigned int lcore_id;
501 	struct lcore_conf *lconf;
502 	uint16_t rx_queue_id = 0;
503 	uint16_t tx_queue_id = 0;
504 	uint16_t enc_q_id = 0;
505 	uint16_t dec_q_id = 0;
506 
507 	/* Configure lcores */
508 	for (lcore_id = 0; lcore_id < 8 * sizeof(uint64_t); ++lcore_id) {
509 		lconf = &lcore_conf[lcore_id];
510 		lconf->core_type = 0;
511 
512 		if ((1ULL << lcore_id) & app_params->enc_core_mask) {
513 			lconf->core_type |= (1 << RTE_BBDEV_OP_TURBO_ENC);
514 			lconf->rx_queue_id = rx_queue_id++;
515 			lconf->enc_queue_id =
516 					app_params->enc_queue_ids[enc_q_id++];
517 		}
518 
519 		if ((1ULL << lcore_id) & app_params->dec_core_mask) {
520 			lconf->core_type |= (1 << RTE_BBDEV_OP_TURBO_DEC);
521 			lconf->tx_queue_id = tx_queue_id++;
522 			lconf->dec_queue_id =
523 					app_params->dec_queue_ids[dec_q_id++];
524 		}
525 
526 		lconf->bbdev_enc_op_pool =
527 				bbdev_op_pools[RTE_BBDEV_OP_TURBO_ENC];
528 		lconf->bbdev_dec_op_pool =
529 				bbdev_op_pools[RTE_BBDEV_OP_TURBO_DEC];
530 		lconf->bbdev_id = app_params->bbdev_id;
531 		lconf->port_id = app_params->port_id;
532 		lconf->enc_out_pool = bbdev_mbuf_mempool;
533 		lconf->enc_to_dec_ring = enc_to_dec_ring;
534 		lconf->lcore_stats = &lcore_stats[lcore_id];
535 	}
536 }
537 
538 static void
539 print_lcore_stats(struct lcore_statistics *lstats, unsigned int lcore_id)
540 {
541 	static const char *stats_border = "_______";
542 
543 	printf("\nLcore %d: %s enqueued count:\t\t%u\n",
544 			lcore_id, stats_border, lstats->enqueued);
545 	printf("Lcore %d: %s dequeued count:\t\t%u\n",
546 			lcore_id, stats_border, lstats->dequeued);
547 	printf("Lcore %d: %s RX lost packets count:\t\t%u\n",
548 			lcore_id, stats_border, lstats->rx_lost_packets);
549 	printf("Lcore %d: %s encoder-to-decoder lost count:\t%u\n",
550 			lcore_id, stats_border,
551 			lstats->enc_to_dec_lost_packets);
552 	printf("Lcore %d: %s TX lost packets count:\t\t%u\n",
553 			lcore_id, stats_border, lstats->tx_lost_packets);
554 }
555 
556 static void
557 print_stats(struct stats_lcore_params *stats_lcore)
558 {
559 	unsigned int l_id;
560 	unsigned int bbdev_id = stats_lcore->app_params->bbdev_id;
561 	unsigned int port_id = stats_lcore->app_params->port_id;
562 	int len, ret, i;
563 
564 	struct rte_eth_xstat *xstats;
565 	struct rte_eth_xstat_name *xstats_names;
566 	struct rte_bbdev_stats bbstats;
567 	static const char *stats_border = "_______";
568 
569 	const char clr[] = { 27, '[', '2', 'J', '\0' };
570 	const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
571 
572 	/* Clear screen and move to top left */
573 	printf("%s%s", clr, topLeft);
574 
575 	printf("PORT STATISTICS:\n================\n");
576 	len = rte_eth_xstats_get(port_id, NULL, 0);
577 	if (len < 0)
578 		rte_exit(EXIT_FAILURE,
579 				"rte_eth_xstats_get(%u) failed: %d", port_id,
580 				len);
581 
582 	xstats = calloc(len, sizeof(*xstats));
583 	if (xstats == NULL)
584 		rte_exit(EXIT_FAILURE,
585 				"Failed to calloc memory for xstats");
586 
587 	ret = rte_eth_xstats_get(port_id, xstats, len);
588 	if (ret < 0 || ret > len) {
589 		free(xstats);
590 		rte_exit(EXIT_FAILURE,
591 				"rte_eth_xstats_get(%u) len%i failed: %d",
592 				port_id, len, ret);
593 	}
594 
595 	xstats_names = calloc(len, sizeof(*xstats_names));
596 	if (xstats_names == NULL) {
597 		free(xstats);
598 		rte_exit(EXIT_FAILURE,
599 				"Failed to calloc memory for xstats_names");
600 	}
601 
602 	ret = rte_eth_xstats_get_names(port_id, xstats_names, len);
603 	if (ret < 0 || ret > len) {
604 		free(xstats);
605 		free(xstats_names);
606 		rte_exit(EXIT_FAILURE,
607 				"rte_eth_xstats_get_names(%u) len%i failed: %d",
608 				port_id, len, ret);
609 	}
610 
611 	for (i = 0; i < len; i++) {
612 		if (xstats[i].value > 0)
613 			printf("Port %u: %s %s:\t\t%"PRIu64"\n",
614 					port_id, stats_border,
615 					xstats_names[i].name,
616 					xstats[i].value);
617 	}
618 
619 	printf("\nBBDEV STATISTICS:\n=================\n");
620 	rte_bbdev_stats_get(bbdev_id, &bbstats);
621 	printf("BBDEV %u: %s enqueue count:\t\t%"PRIu64"\n",
622 			bbdev_id, stats_border,
623 			bbstats.enqueued_count);
624 	printf("BBDEV %u: %s dequeue count:\t\t%"PRIu64"\n",
625 			bbdev_id, stats_border,
626 			bbstats.dequeued_count);
627 	printf("BBDEV %u: %s enqueue error count:\t\t%"PRIu64"\n",
628 			bbdev_id, stats_border,
629 			bbstats.enqueue_err_count);
630 	printf("BBDEV %u: %s dequeue error count:\t\t%"PRIu64"\n\n",
631 			bbdev_id, stats_border,
632 			bbstats.dequeue_err_count);
633 
634 	printf("LCORE STATISTICS:\n=================\n");
635 	for (l_id = 0; l_id < RTE_MAX_LCORE; ++l_id) {
636 		if (stats_lcore->lconf[l_id].core_type == 0)
637 			continue;
638 		print_lcore_stats(stats_lcore->lconf[l_id].lcore_stats, l_id);
639 	}
640 
641 	free(xstats);
642 	free(xstats_names);
643 }
644 
645 static int
646 stats_loop(void *arg)
647 {
648 	struct stats_lcore_params *stats_lcore = arg;
649 
650 	while (!rte_atomic16_read(&global_exit_flag)) {
651 		print_stats(stats_lcore);
652 		rte_delay_ms(500);
653 	}
654 
655 	return 0;
656 }
657 
658 static inline void
659 run_encoding(struct lcore_conf *lcore_conf)
660 {
661 	uint16_t i;
662 	uint16_t port_id, rx_queue_id;
663 	uint16_t bbdev_id, enc_queue_id;
664 	uint16_t nb_rx, nb_enq, nb_deq, nb_sent;
665 	struct rte_mbuf *rx_pkts_burst[MAX_PKT_BURST];
666 	struct rte_mbuf *enc_out_pkts[MAX_PKT_BURST];
667 	struct rte_bbdev_enc_op *bbdev_ops_burst[MAX_PKT_BURST];
668 	struct lcore_statistics *lcore_stats;
669 	struct rte_mempool *bbdev_op_pool, *enc_out_pool;
670 	struct rte_ring *enc_to_dec_ring;
671 	const int in_data_len = (def_op_enc.cb_params.k / 8) - CRC_24B_LEN;
672 
673 	lcore_stats = lcore_conf->lcore_stats;
674 	port_id = lcore_conf->port_id;
675 	rx_queue_id = lcore_conf->rx_queue_id;
676 	bbdev_id = lcore_conf->bbdev_id;
677 	enc_queue_id = lcore_conf->enc_queue_id;
678 	bbdev_op_pool = lcore_conf->bbdev_enc_op_pool;
679 	enc_out_pool = lcore_conf->enc_out_pool;
680 	enc_to_dec_ring = lcore_conf->enc_to_dec_ring;
681 
682 	/* Read packet from RX queues*/
683 	nb_rx = rte_eth_rx_burst(port_id, rx_queue_id, rx_pkts_burst,
684 			MAX_PKT_BURST);
685 	if (!nb_rx)
686 		return;
687 
688 	if (unlikely(rte_mempool_get_bulk(enc_out_pool, (void **)enc_out_pkts,
689 			nb_rx) != 0)) {
690 		pktmbuf_free_bulk(rx_pkts_burst, nb_rx);
691 		lcore_stats->rx_lost_packets += nb_rx;
692 		return;
693 	}
694 
695 	if (unlikely(rte_bbdev_enc_op_alloc_bulk(bbdev_op_pool, bbdev_ops_burst,
696 			nb_rx) != 0)) {
697 		pktmbuf_free_bulk(enc_out_pkts, nb_rx);
698 		pktmbuf_free_bulk(rx_pkts_burst, nb_rx);
699 		lcore_stats->rx_lost_packets += nb_rx;
700 		return;
701 	}
702 
703 	for (i = 0; i < nb_rx; i++) {
704 		char *data;
705 		const uint16_t pkt_data_len =
706 				rte_pktmbuf_data_len(rx_pkts_burst[i]) -
707 				sizeof(struct ether_hdr);
708 		/* save input mbuf pointer for later comparison */
709 		enc_out_pkts[i]->userdata = rx_pkts_burst[i];
710 
711 		/* copy ethernet header */
712 		rte_pktmbuf_reset(enc_out_pkts[i]);
713 		data = rte_pktmbuf_append(enc_out_pkts[i],
714 				sizeof(struct ether_hdr));
715 		if (data == NULL) {
716 			printf(
717 				"Not enough space for ethernet header in encoder output mbuf\n");
718 			continue;
719 		}
720 		add_ether_hdr(rx_pkts_burst[i], enc_out_pkts[i]);
721 
722 		/* set op */
723 		bbdev_ops_burst[i]->turbo_enc = def_op_enc;
724 
725 		bbdev_ops_burst[i]->turbo_enc.input.data =
726 				rx_pkts_burst[i];
727 		bbdev_ops_burst[i]->turbo_enc.input.offset =
728 				sizeof(struct ether_hdr);
729 		/* Encoder will attach the CRC24B, adjust the length */
730 		bbdev_ops_burst[i]->turbo_enc.input.length = in_data_len;
731 
732 		if (in_data_len < pkt_data_len)
733 			rte_pktmbuf_trim(rx_pkts_burst[i], pkt_data_len -
734 					in_data_len);
735 		else if (in_data_len > pkt_data_len) {
736 			data = rte_pktmbuf_append(rx_pkts_burst[i],
737 					in_data_len - pkt_data_len);
738 			if (data == NULL)
739 				printf(
740 					"Not enough storage in mbuf to perform the encoding\n");
741 		}
742 
743 		bbdev_ops_burst[i]->turbo_enc.output.data =
744 				enc_out_pkts[i];
745 		bbdev_ops_burst[i]->turbo_enc.output.offset =
746 				sizeof(struct ether_hdr);
747 	}
748 
749 	/* Enqueue packets on BBDevice */
750 	nb_enq = rte_bbdev_enqueue_enc_ops(bbdev_id, enc_queue_id,
751 			bbdev_ops_burst, nb_rx);
752 	if (unlikely(nb_enq < nb_rx)) {
753 		pktmbuf_userdata_free_bulk(&enc_out_pkts[nb_enq],
754 				nb_rx - nb_enq);
755 		rte_bbdev_enc_op_free_bulk(&bbdev_ops_burst[nb_enq],
756 				nb_rx - nb_enq);
757 		lcore_stats->rx_lost_packets += nb_rx - nb_enq;
758 
759 		if (!nb_enq)
760 			return;
761 	}
762 
763 	lcore_stats->enqueued += nb_enq;
764 
765 	/* Dequeue packets from bbdev device*/
766 	nb_deq = 0;
767 	do {
768 		nb_deq += rte_bbdev_dequeue_enc_ops(bbdev_id, enc_queue_id,
769 				&bbdev_ops_burst[nb_deq], nb_enq - nb_deq);
770 	} while (unlikely(nb_deq < nb_enq));
771 
772 	lcore_stats->dequeued += nb_deq;
773 
774 	/* Generate and add AWGN */
775 	add_awgn(enc_out_pkts, nb_deq);
776 
777 	rte_bbdev_enc_op_free_bulk(bbdev_ops_burst, nb_deq);
778 
779 	/* Enqueue packets to encoder-to-decoder ring */
780 	nb_sent = rte_ring_enqueue_burst(enc_to_dec_ring, (void **)enc_out_pkts,
781 			nb_deq, NULL);
782 	if (unlikely(nb_sent < nb_deq)) {
783 		pktmbuf_userdata_free_bulk(&enc_out_pkts[nb_sent],
784 				nb_deq - nb_sent);
785 		lcore_stats->enc_to_dec_lost_packets += nb_deq - nb_sent;
786 	}
787 }
788 
789 static void
790 run_decoding(struct lcore_conf *lcore_conf)
791 {
792 	uint16_t i;
793 	uint16_t port_id, tx_queue_id;
794 	uint16_t bbdev_id, bbdev_queue_id;
795 	uint16_t nb_recv, nb_enq, nb_deq, nb_tx;
796 	uint8_t *llr_temp_buf;
797 	struct rte_mbuf *recv_pkts_burst[MAX_PKT_BURST];
798 	struct rte_bbdev_dec_op *bbdev_ops_burst[MAX_PKT_BURST];
799 	struct lcore_statistics *lcore_stats;
800 	struct rte_mempool *bbdev_op_pool;
801 	struct rte_ring *enc_to_dec_ring;
802 
803 	lcore_stats = lcore_conf->lcore_stats;
804 	port_id = lcore_conf->port_id;
805 	tx_queue_id = lcore_conf->tx_queue_id;
806 	bbdev_id = lcore_conf->bbdev_id;
807 	bbdev_queue_id = lcore_conf->dec_queue_id;
808 	bbdev_op_pool = lcore_conf->bbdev_dec_op_pool;
809 	enc_to_dec_ring = lcore_conf->enc_to_dec_ring;
810 	llr_temp_buf = lcore_conf->llr_temp_buf;
811 
812 	/* Dequeue packets from the ring */
813 	nb_recv = rte_ring_dequeue_burst(enc_to_dec_ring,
814 			(void **)recv_pkts_burst, MAX_PKT_BURST, NULL);
815 	if (!nb_recv)
816 		return;
817 
818 	if (unlikely(rte_bbdev_dec_op_alloc_bulk(bbdev_op_pool, bbdev_ops_burst,
819 			nb_recv) != 0)) {
820 		pktmbuf_userdata_free_bulk(recv_pkts_burst, nb_recv);
821 		lcore_stats->rx_lost_packets += nb_recv;
822 		return;
823 	}
824 
825 	transform_enc_out_dec_in(recv_pkts_burst, llr_temp_buf, nb_recv,
826 			def_op_dec.cb_params.k);
827 
828 	for (i = 0; i < nb_recv; i++) {
829 		/* set op */
830 		bbdev_ops_burst[i]->turbo_dec = def_op_dec;
831 
832 		bbdev_ops_burst[i]->turbo_dec.input.data = recv_pkts_burst[i];
833 		bbdev_ops_burst[i]->turbo_dec.input.offset =
834 				sizeof(struct ether_hdr);
835 		bbdev_ops_burst[i]->turbo_dec.input.length =
836 				rte_pktmbuf_data_len(recv_pkts_burst[i])
837 				- sizeof(struct ether_hdr);
838 
839 		bbdev_ops_burst[i]->turbo_dec.hard_output.data =
840 				recv_pkts_burst[i];
841 		bbdev_ops_burst[i]->turbo_dec.hard_output.offset =
842 				sizeof(struct ether_hdr);
843 	}
844 
845 	/* Enqueue packets on BBDevice */
846 	nb_enq = rte_bbdev_enqueue_dec_ops(bbdev_id, bbdev_queue_id,
847 			bbdev_ops_burst, nb_recv);
848 	if (unlikely(nb_enq < nb_recv)) {
849 		pktmbuf_userdata_free_bulk(&recv_pkts_burst[nb_enq],
850 				nb_recv - nb_enq);
851 		rte_bbdev_dec_op_free_bulk(&bbdev_ops_burst[nb_enq],
852 				nb_recv - nb_enq);
853 		lcore_stats->rx_lost_packets += nb_recv - nb_enq;
854 
855 		if (!nb_enq)
856 			return;
857 	}
858 
859 	lcore_stats->enqueued += nb_enq;
860 
861 	/* Dequeue packets from BBDevice */
862 	nb_deq = 0;
863 	do {
864 		nb_deq += rte_bbdev_dequeue_dec_ops(bbdev_id, bbdev_queue_id,
865 				&bbdev_ops_burst[nb_deq], nb_enq - nb_deq);
866 	} while (unlikely(nb_deq < nb_enq));
867 
868 	lcore_stats->dequeued += nb_deq;
869 
870 	rte_bbdev_dec_op_free_bulk(bbdev_ops_burst, nb_deq);
871 
872 	verify_data(recv_pkts_burst, nb_deq);
873 
874 	/* Free the RX mbufs after verification */
875 	for (i = 0; i < nb_deq; ++i)
876 		rte_pktmbuf_free(recv_pkts_burst[i]->userdata);
877 
878 	/* Transmit the packets */
879 	nb_tx = rte_eth_tx_burst(port_id, tx_queue_id, recv_pkts_burst, nb_deq);
880 	if (unlikely(nb_tx < nb_deq)) {
881 		pktmbuf_userdata_free_bulk(&recv_pkts_burst[nb_tx],
882 				nb_deq - nb_tx);
883 		lcore_stats->tx_lost_packets += nb_deq - nb_tx;
884 	}
885 }
886 
887 static int
888 processing_loop(void *arg)
889 {
890 	struct lcore_conf *lcore_conf = arg;
891 	const bool run_encoder = (lcore_conf->core_type &
892 			(1 << RTE_BBDEV_OP_TURBO_ENC));
893 	const bool run_decoder = (lcore_conf->core_type &
894 			(1 << RTE_BBDEV_OP_TURBO_DEC));
895 
896 	while (!rte_atomic16_read(&global_exit_flag)) {
897 		if (run_encoder)
898 			run_encoding(lcore_conf);
899 		if (run_decoder)
900 			run_decoding(lcore_conf);
901 	}
902 
903 	return 0;
904 }
905 
906 static int
907 prepare_bbdev_device(unsigned int dev_id, struct rte_bbdev_info *info,
908 		struct app_config_params *app_params)
909 {
910 	int ret;
911 	unsigned int q_id, dec_q_id, enc_q_id;
912 	struct rte_bbdev_queue_conf qconf = {0};
913 	uint16_t dec_qs_nb = app_params->num_dec_cores;
914 	uint16_t enc_qs_nb = app_params->num_enc_cores;
915 	uint16_t tot_qs = dec_qs_nb + enc_qs_nb;
916 
917 	ret = rte_bbdev_setup_queues(dev_id, tot_qs, info->socket_id);
918 	if (ret < 0)
919 		rte_exit(EXIT_FAILURE,
920 				"ERROR(%d): BBDEV %u not configured properly\n",
921 				ret, dev_id);
922 
923 	/* setup device DEC queues */
924 	qconf.socket = info->socket_id;
925 	qconf.queue_size = info->drv.queue_size_lim;
926 	qconf.op_type = RTE_BBDEV_OP_TURBO_DEC;
927 
928 	for (q_id = 0, dec_q_id = 0; q_id < dec_qs_nb; q_id++) {
929 		ret = rte_bbdev_queue_configure(dev_id, q_id, &qconf);
930 		if (ret < 0)
931 			rte_exit(EXIT_FAILURE,
932 					"ERROR(%d): BBDEV %u DEC queue %u not configured properly\n",
933 					ret, dev_id, q_id);
934 		app_params->dec_queue_ids[dec_q_id++] = q_id;
935 	}
936 
937 	/* setup device ENC queues */
938 	qconf.op_type = RTE_BBDEV_OP_TURBO_ENC;
939 
940 	for (q_id = dec_qs_nb, enc_q_id = 0; q_id < tot_qs; q_id++) {
941 		ret = rte_bbdev_queue_configure(dev_id, q_id, &qconf);
942 		if (ret < 0)
943 			rte_exit(EXIT_FAILURE,
944 					"ERROR(%d): BBDEV %u ENC queue %u not configured properly\n",
945 					ret, dev_id, q_id);
946 		app_params->enc_queue_ids[enc_q_id++] = q_id;
947 	}
948 
949 	ret = rte_bbdev_start(dev_id);
950 
951 	if (ret != 0)
952 		rte_exit(EXIT_FAILURE, "ERROR(%d): BBDEV %u not started\n",
953 			ret, dev_id);
954 
955 	printf("BBdev %u started\n", dev_id);
956 
957 	return 0;
958 }
959 
960 static inline bool
961 check_matching_capabilities(uint64_t mask, uint64_t required_mask)
962 {
963 	return (mask & required_mask) == required_mask;
964 }
965 
966 static void
967 enable_bbdev(struct app_config_params *app_params)
968 {
969 	struct rte_bbdev_info dev_info;
970 	const struct rte_bbdev_op_cap *op_cap;
971 	uint16_t bbdev_id = app_params->bbdev_id;
972 	bool encoder_capable = false;
973 	bool decoder_capable = false;
974 
975 	rte_bbdev_info_get(bbdev_id, &dev_info);
976 	op_cap = dev_info.drv.capabilities;
977 
978 	while (op_cap->type != RTE_BBDEV_OP_NONE) {
979 		if (op_cap->type == RTE_BBDEV_OP_TURBO_ENC) {
980 			if (check_matching_capabilities(
981 					op_cap->cap.turbo_enc.capability_flags,
982 					def_op_enc.op_flags))
983 				encoder_capable = true;
984 		}
985 
986 		if (op_cap->type == RTE_BBDEV_OP_TURBO_DEC) {
987 			if (check_matching_capabilities(
988 					op_cap->cap.turbo_dec.capability_flags,
989 					def_op_dec.op_flags))
990 				decoder_capable = true;
991 		}
992 
993 		op_cap++;
994 	}
995 
996 	if (encoder_capable == false)
997 		rte_exit(EXIT_FAILURE,
998 			"The specified BBDev %u doesn't have required encoder capabilities!\n",
999 			bbdev_id);
1000 	if (decoder_capable == false)
1001 		rte_exit(EXIT_FAILURE,
1002 			"The specified BBDev %u doesn't have required decoder capabilities!\n",
1003 			bbdev_id);
1004 
1005 	prepare_bbdev_device(bbdev_id, &dev_info, app_params);
1006 }
1007 
1008 int
1009 main(int argc, char **argv)
1010 {
1011 	int ret;
1012 	unsigned int nb_bbdevs, nb_ports, flags, lcore_id;
1013 	void *sigret;
1014 	struct app_config_params app_params = def_app_config;
1015 	struct rte_mempool *ethdev_mbuf_mempool, *bbdev_mbuf_mempool;
1016 	struct rte_mempool *bbdev_op_pools[RTE_BBDEV_OP_TYPE_COUNT];
1017 	struct lcore_conf lcore_conf[RTE_MAX_LCORE] = { {0} };
1018 	struct lcore_statistics lcore_stats[RTE_MAX_LCORE] = { {0} };
1019 	struct stats_lcore_params stats_lcore;
1020 	struct rte_ring *enc_to_dec_ring;
1021 	bool stats_thread_started = false;
1022 	unsigned int master_lcore_id = rte_get_master_lcore();
1023 
1024 	rte_atomic16_init(&global_exit_flag);
1025 
1026 	sigret = signal(SIGTERM, signal_handler);
1027 	if (sigret == SIG_ERR)
1028 		rte_exit(EXIT_FAILURE, "signal(%d, ...) failed", SIGTERM);
1029 
1030 	sigret = signal(SIGINT, signal_handler);
1031 	if (sigret == SIG_ERR)
1032 		rte_exit(EXIT_FAILURE, "signal(%d, ...) failed", SIGINT);
1033 
1034 	ret = rte_eal_init(argc, argv);
1035 	if (ret < 0)
1036 		rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
1037 
1038 	argc -= ret;
1039 	argv += ret;
1040 
1041 	/* parse application arguments (after the EAL ones) */
1042 	ret = bbdev_parse_args(argc, argv, &app_params);
1043 	if (ret < 0)
1044 		rte_exit(EXIT_FAILURE, "Invalid BBDEV arguments\n");
1045 
1046 	/*create bbdev op pools*/
1047 	bbdev_op_pools[RTE_BBDEV_OP_TURBO_DEC] =
1048 			rte_bbdev_op_pool_create("bbdev_op_pool_dec",
1049 			RTE_BBDEV_OP_TURBO_DEC, NB_MBUF, 128, rte_socket_id());
1050 	bbdev_op_pools[RTE_BBDEV_OP_TURBO_ENC] =
1051 			rte_bbdev_op_pool_create("bbdev_op_pool_enc",
1052 			RTE_BBDEV_OP_TURBO_ENC, NB_MBUF, 128, rte_socket_id());
1053 
1054 	if ((bbdev_op_pools[RTE_BBDEV_OP_TURBO_DEC] == NULL) ||
1055 			(bbdev_op_pools[RTE_BBDEV_OP_TURBO_ENC] == NULL))
1056 		rte_exit(EXIT_FAILURE, "Cannot create bbdev op pools\n");
1057 
1058 	/* Create encoder to decoder ring */
1059 	flags = (app_params.num_enc_cores == 1) ? RING_F_SP_ENQ : 0;
1060 	if (app_params.num_dec_cores == 1)
1061 		flags |= RING_F_SC_DEQ;
1062 
1063 	enc_to_dec_ring = rte_ring_create("enc_to_dec_ring",
1064 		rte_align32pow2(NB_MBUF), rte_socket_id(), flags);
1065 
1066 	/* Get the number of available bbdev devices */
1067 	nb_bbdevs = rte_bbdev_count();
1068 	if (nb_bbdevs <= app_params.bbdev_id)
1069 		rte_exit(EXIT_FAILURE,
1070 				"%u BBDevs detected, cannot use BBDev with ID %u!\n",
1071 				nb_bbdevs, app_params.bbdev_id);
1072 	printf("Number of bbdevs detected: %d\n", nb_bbdevs);
1073 
1074 	/* Get the number of available ethdev devices */
1075 	nb_ports = rte_eth_dev_count();
1076 	if (nb_ports <= app_params.port_id)
1077 		rte_exit(EXIT_FAILURE,
1078 				"%u ports detected, cannot use port with ID %u!\n",
1079 				nb_ports, app_params.port_id);
1080 
1081 	/* create the mbuf mempool for ethdev pkts */
1082 	ethdev_mbuf_mempool = rte_pktmbuf_pool_create("ethdev_mbuf_pool",
1083 			NB_MBUF, MEMPOOL_CACHE_SIZE, 0,
1084 			RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
1085 	if (ethdev_mbuf_mempool == NULL)
1086 		rte_exit(EXIT_FAILURE, "Cannot create ethdev mbuf mempool\n");
1087 
1088 	/* create the mbuf mempool for encoder output */
1089 	bbdev_mbuf_mempool = rte_pktmbuf_pool_create("bbdev_mbuf_pool",
1090 			NB_MBUF, MEMPOOL_CACHE_SIZE, 0,
1091 			RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
1092 	if (bbdev_mbuf_mempool == NULL)
1093 		rte_exit(EXIT_FAILURE, "Cannot create ethdev mbuf mempool\n");
1094 
1095 	/* initialize ports */
1096 	ret = initialize_ports(&app_params, ethdev_mbuf_mempool);
1097 
1098 	/* Check if all requested lcores are available */
1099 	for (lcore_id = 0; lcore_id < 8 * sizeof(uint64_t); ++lcore_id)
1100 		if (((1ULL << lcore_id) & app_params.enc_core_mask) ||
1101 				((1ULL << lcore_id) & app_params.dec_core_mask))
1102 			if (!rte_lcore_is_enabled(lcore_id))
1103 				rte_exit(EXIT_FAILURE,
1104 						"Requested lcore_id %u is not enabled!\n",
1105 						lcore_id);
1106 
1107 	/* Start ethernet port */
1108 	ret = rte_eth_dev_start(app_params.port_id);
1109 	if (ret < 0)
1110 		rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n",
1111 				ret, app_params.port_id);
1112 
1113 	ret = check_port_link_status(app_params.port_id);
1114 	if (ret < 0)
1115 		exit(EXIT_FAILURE);
1116 
1117 	/* start BBDevice and save BBDev queue IDs */
1118 	enable_bbdev(&app_params);
1119 
1120 	/* Initialize the port/queue configuration of each logical core */
1121 	lcore_conf_init(&app_params, lcore_conf, bbdev_op_pools,
1122 			bbdev_mbuf_mempool, enc_to_dec_ring, lcore_stats);
1123 
1124 	stats_lcore.app_params = &app_params;
1125 	stats_lcore.lconf = lcore_conf;
1126 
1127 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1128 		if (lcore_conf[lcore_id].core_type != 0)
1129 			/* launch per-lcore processing loop on slave lcores */
1130 			rte_eal_remote_launch(processing_loop,
1131 					&lcore_conf[lcore_id], lcore_id);
1132 		else if (!stats_thread_started) {
1133 			/* launch statistics printing loop */
1134 			rte_eal_remote_launch(stats_loop, &stats_lcore,
1135 					lcore_id);
1136 			stats_thread_started = true;
1137 		}
1138 	}
1139 
1140 	if (!stats_thread_started &&
1141 			lcore_conf[master_lcore_id].core_type != 0)
1142 		rte_exit(EXIT_FAILURE,
1143 				"Not enough lcores to run the statistics printing loop!");
1144 	else if (lcore_conf[master_lcore_id].core_type != 0)
1145 		processing_loop(&lcore_conf[master_lcore_id]);
1146 	else if (!stats_thread_started)
1147 		stats_loop(&stats_lcore);
1148 
1149 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1150 		ret |= rte_eal_wait_lcore(lcore_id);
1151 	}
1152 
1153 	return ret;
1154 }
1155