xref: /dpdk/examples/ipsec-secgw/ipsec-secgw.c (revision cf435a07)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <stdint.h>
37 #include <inttypes.h>
38 #include <sys/types.h>
39 #include <netinet/in.h>
40 #include <netinet/ip.h>
41 #include <netinet/ip6.h>
42 #include <string.h>
43 #include <sys/queue.h>
44 #include <stdarg.h>
45 #include <errno.h>
46 #include <getopt.h>
47 
48 #include <rte_common.h>
49 #include <rte_byteorder.h>
50 #include <rte_log.h>
51 #include <rte_eal.h>
52 #include <rte_launch.h>
53 #include <rte_atomic.h>
54 #include <rte_cycles.h>
55 #include <rte_prefetch.h>
56 #include <rte_lcore.h>
57 #include <rte_per_lcore.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_interrupts.h>
60 #include <rte_pci.h>
61 #include <rte_random.h>
62 #include <rte_debug.h>
63 #include <rte_ether.h>
64 #include <rte_ethdev.h>
65 #include <rte_mempool.h>
66 #include <rte_mbuf.h>
67 #include <rte_acl.h>
68 #include <rte_lpm.h>
69 #include <rte_lpm6.h>
70 #include <rte_hash.h>
71 #include <rte_jhash.h>
72 #include <rte_cryptodev.h>
73 
74 #include "ipsec.h"
75 #include "parser.h"
76 
77 #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
78 
79 #define MAX_JUMBO_PKT_LEN  9600
80 
81 #define MEMPOOL_CACHE_SIZE 256
82 
83 #define NB_MBUF	(32000)
84 
85 #define CDEV_QUEUE_DESC 2048
86 #define CDEV_MAP_ENTRIES 1024
87 #define CDEV_MP_NB_OBJS 2048
88 #define CDEV_MP_CACHE_SZ 64
89 #define MAX_QUEUE_PAIRS 1
90 
91 #define OPTION_CONFIG		"config"
92 #define OPTION_SINGLE_SA	"single-sa"
93 
94 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
95 
96 #define NB_SOCKETS 4
97 
98 /* Configure how many packets ahead to prefetch, when reading packets */
99 #define PREFETCH_OFFSET	3
100 
101 #define MAX_RX_QUEUE_PER_LCORE 16
102 
103 #define MAX_LCORE_PARAMS 1024
104 
105 #define UNPROTECTED_PORT(port) (unprotected_port_mask & (1 << portid))
106 
107 /*
108  * Configurable number of RX/TX ring descriptors
109  */
110 #define IPSEC_SECGW_RX_DESC_DEFAULT 128
111 #define IPSEC_SECGW_TX_DESC_DEFAULT 512
112 static uint16_t nb_rxd = IPSEC_SECGW_RX_DESC_DEFAULT;
113 static uint16_t nb_txd = IPSEC_SECGW_TX_DESC_DEFAULT;
114 
115 #if RTE_BYTE_ORDER != RTE_LITTLE_ENDIAN
116 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
117 	(((uint64_t)((a) & 0xff) << 56) | \
118 	((uint64_t)((b) & 0xff) << 48) | \
119 	((uint64_t)((c) & 0xff) << 40) | \
120 	((uint64_t)((d) & 0xff) << 32) | \
121 	((uint64_t)((e) & 0xff) << 24) | \
122 	((uint64_t)((f) & 0xff) << 16) | \
123 	((uint64_t)((g) & 0xff) << 8)  | \
124 	((uint64_t)(h) & 0xff))
125 #else
126 #define __BYTES_TO_UINT64(a, b, c, d, e, f, g, h) \
127 	(((uint64_t)((h) & 0xff) << 56) | \
128 	((uint64_t)((g) & 0xff) << 48) | \
129 	((uint64_t)((f) & 0xff) << 40) | \
130 	((uint64_t)((e) & 0xff) << 32) | \
131 	((uint64_t)((d) & 0xff) << 24) | \
132 	((uint64_t)((c) & 0xff) << 16) | \
133 	((uint64_t)((b) & 0xff) << 8) | \
134 	((uint64_t)(a) & 0xff))
135 #endif
136 #define ETHADDR(a, b, c, d, e, f) (__BYTES_TO_UINT64(a, b, c, d, e, f, 0, 0))
137 
138 #define ETHADDR_TO_UINT64(addr) __BYTES_TO_UINT64( \
139 		addr.addr_bytes[0], addr.addr_bytes[1], \
140 		addr.addr_bytes[2], addr.addr_bytes[3], \
141 		addr.addr_bytes[4], addr.addr_bytes[5], \
142 		0, 0)
143 
144 /* port/source ethernet addr and destination ethernet addr */
145 struct ethaddr_info {
146 	uint64_t src, dst;
147 };
148 
149 struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
150 	{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x7e, 0x94, 0x9a) },
151 	{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x22, 0xa1, 0xd9) },
152 	{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x08, 0x69, 0x26) },
153 	{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
154 };
155 
156 /* mask of enabled ports */
157 static uint32_t enabled_port_mask;
158 static uint32_t unprotected_port_mask;
159 static int32_t promiscuous_on = 1;
160 static int32_t numa_on = 1; /**< NUMA is enabled by default. */
161 static uint32_t nb_lcores;
162 static uint32_t single_sa;
163 static uint32_t single_sa_idx;
164 
165 struct lcore_rx_queue {
166 	uint8_t port_id;
167 	uint8_t queue_id;
168 } __rte_cache_aligned;
169 
170 struct lcore_params {
171 	uint8_t port_id;
172 	uint8_t queue_id;
173 	uint8_t lcore_id;
174 } __rte_cache_aligned;
175 
176 static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
177 
178 static struct lcore_params *lcore_params;
179 static uint16_t nb_lcore_params;
180 
181 static struct rte_hash *cdev_map_in;
182 static struct rte_hash *cdev_map_out;
183 
184 struct buffer {
185 	uint16_t len;
186 	struct rte_mbuf *m_table[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
187 };
188 
189 struct lcore_conf {
190 	uint16_t nb_rx_queue;
191 	struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
192 	uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
193 	struct buffer tx_mbufs[RTE_MAX_ETHPORTS];
194 	struct ipsec_ctx inbound;
195 	struct ipsec_ctx outbound;
196 	struct rt_ctx *rt4_ctx;
197 	struct rt_ctx *rt6_ctx;
198 } __rte_cache_aligned;
199 
200 static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
201 
202 static struct rte_eth_conf port_conf = {
203 	.rxmode = {
204 		.mq_mode	= ETH_MQ_RX_RSS,
205 		.max_rx_pkt_len = ETHER_MAX_LEN,
206 		.split_hdr_size = 0,
207 		.header_split   = 0, /**< Header Split disabled */
208 		.hw_ip_checksum = 1, /**< IP checksum offload enabled */
209 		.hw_vlan_filter = 0, /**< VLAN filtering disabled */
210 		.jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
211 		.hw_strip_crc   = 0, /**< CRC stripped by hardware */
212 	},
213 	.rx_adv_conf = {
214 		.rss_conf = {
215 			.rss_key = NULL,
216 			.rss_hf = ETH_RSS_IP | ETH_RSS_UDP |
217 				ETH_RSS_TCP | ETH_RSS_SCTP,
218 		},
219 	},
220 	.txmode = {
221 		.mq_mode = ETH_MQ_TX_NONE,
222 	},
223 };
224 
225 static struct socket_ctx socket_ctx[NB_SOCKETS];
226 
227 struct traffic_type {
228 	const uint8_t *data[MAX_PKT_BURST * 2];
229 	struct rte_mbuf *pkts[MAX_PKT_BURST * 2];
230 	uint32_t res[MAX_PKT_BURST * 2];
231 	uint32_t num;
232 };
233 
234 struct ipsec_traffic {
235 	struct traffic_type ipsec;
236 	struct traffic_type ip4;
237 	struct traffic_type ip6;
238 };
239 
240 static inline void
241 prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t)
242 {
243 	uint8_t *nlp;
244 	struct ether_hdr *eth;
245 
246 	eth = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
247 	if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
248 		nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
249 		nlp = RTE_PTR_ADD(nlp, offsetof(struct ip, ip_p));
250 		if (*nlp == IPPROTO_ESP)
251 			t->ipsec.pkts[(t->ipsec.num)++] = pkt;
252 		else {
253 			t->ip4.data[t->ip4.num] = nlp;
254 			t->ip4.pkts[(t->ip4.num)++] = pkt;
255 		}
256 	} else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
257 		nlp = (uint8_t *)rte_pktmbuf_adj(pkt, ETHER_HDR_LEN);
258 		nlp = RTE_PTR_ADD(nlp, offsetof(struct ip6_hdr, ip6_nxt));
259 		if (*nlp == IPPROTO_ESP)
260 			t->ipsec.pkts[(t->ipsec.num)++] = pkt;
261 		else {
262 			t->ip6.data[t->ip6.num] = nlp;
263 			t->ip6.pkts[(t->ip6.num)++] = pkt;
264 		}
265 	} else {
266 		/* Unknown/Unsupported type, drop the packet */
267 		RTE_LOG(ERR, IPSEC, "Unsupported packet type\n");
268 		rte_pktmbuf_free(pkt);
269 	}
270 }
271 
272 static inline void
273 prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
274 		uint16_t nb_pkts)
275 {
276 	int32_t i;
277 
278 	t->ipsec.num = 0;
279 	t->ip4.num = 0;
280 	t->ip6.num = 0;
281 
282 	for (i = 0; i < (nb_pkts - PREFETCH_OFFSET); i++) {
283 		rte_prefetch0(rte_pktmbuf_mtod(pkts[i + PREFETCH_OFFSET],
284 					void *));
285 		prepare_one_packet(pkts[i], t);
286 	}
287 	/* Process left packets */
288 	for (; i < nb_pkts; i++)
289 		prepare_one_packet(pkts[i], t);
290 }
291 
292 static inline void
293 prepare_tx_pkt(struct rte_mbuf *pkt, uint8_t port)
294 {
295 	struct ip *ip;
296 	struct ether_hdr *ethhdr;
297 
298 	ip = rte_pktmbuf_mtod(pkt, struct ip *);
299 
300 	ethhdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, ETHER_HDR_LEN);
301 
302 	if (ip->ip_v == IPVERSION) {
303 		pkt->ol_flags |= PKT_TX_IP_CKSUM | PKT_TX_IPV4;
304 		pkt->l3_len = sizeof(struct ip);
305 		pkt->l2_len = ETHER_HDR_LEN;
306 
307 		ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
308 	} else {
309 		pkt->ol_flags |= PKT_TX_IPV6;
310 		pkt->l3_len = sizeof(struct ip6_hdr);
311 		pkt->l2_len = ETHER_HDR_LEN;
312 
313 		ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
314 	}
315 
316 	memcpy(&ethhdr->s_addr, &ethaddr_tbl[port].src,
317 			sizeof(struct ether_addr));
318 	memcpy(&ethhdr->d_addr, &ethaddr_tbl[port].dst,
319 			sizeof(struct ether_addr));
320 }
321 
322 static inline void
323 prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint8_t port)
324 {
325 	int32_t i;
326 	const int32_t prefetch_offset = 2;
327 
328 	for (i = 0; i < (nb_pkts - prefetch_offset); i++) {
329 		rte_mbuf_prefetch_part2(pkts[i + prefetch_offset]);
330 		prepare_tx_pkt(pkts[i], port);
331 	}
332 	/* Process left packets */
333 	for (; i < nb_pkts; i++)
334 		prepare_tx_pkt(pkts[i], port);
335 }
336 
337 /* Send burst of packets on an output interface */
338 static inline int32_t
339 send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
340 {
341 	struct rte_mbuf **m_table;
342 	int32_t ret;
343 	uint16_t queueid;
344 
345 	queueid = qconf->tx_queue_id[port];
346 	m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
347 
348 	prepare_tx_burst(m_table, n, port);
349 
350 	ret = rte_eth_tx_burst(port, queueid, m_table, n);
351 	if (unlikely(ret < n)) {
352 		do {
353 			rte_pktmbuf_free(m_table[ret]);
354 		} while (++ret < n);
355 	}
356 
357 	return 0;
358 }
359 
360 /* Enqueue a single packet, and send burst if queue is filled */
361 static inline int32_t
362 send_single_packet(struct rte_mbuf *m, uint8_t port)
363 {
364 	uint32_t lcore_id;
365 	uint16_t len;
366 	struct lcore_conf *qconf;
367 
368 	lcore_id = rte_lcore_id();
369 
370 	qconf = &lcore_conf[lcore_id];
371 	len = qconf->tx_mbufs[port].len;
372 	qconf->tx_mbufs[port].m_table[len] = m;
373 	len++;
374 
375 	/* enough pkts to be sent */
376 	if (unlikely(len == MAX_PKT_BURST)) {
377 		send_burst(qconf, MAX_PKT_BURST, port);
378 		len = 0;
379 	}
380 
381 	qconf->tx_mbufs[port].len = len;
382 	return 0;
383 }
384 
385 static inline void
386 inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
387 		uint16_t lim)
388 {
389 	struct rte_mbuf *m;
390 	uint32_t i, j, res, sa_idx;
391 
392 	if (ip->num == 0 || sp == NULL)
393 		return;
394 
395 	rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
396 			ip->num, DEFAULT_MAX_CATEGORIES);
397 
398 	j = 0;
399 	for (i = 0; i < ip->num; i++) {
400 		m = ip->pkts[i];
401 		res = ip->res[i];
402 		if (res & BYPASS) {
403 			ip->pkts[j++] = m;
404 			continue;
405 		}
406 		if (res & DISCARD || i < lim) {
407 			rte_pktmbuf_free(m);
408 			continue;
409 		}
410 		/* Only check SPI match for processed IPSec packets */
411 		sa_idx = ip->res[i] & PROTECT_MASK;
412 		if (sa_idx == 0 || !inbound_sa_check(sa, m, sa_idx)) {
413 			rte_pktmbuf_free(m);
414 			continue;
415 		}
416 		ip->pkts[j++] = m;
417 	}
418 	ip->num = j;
419 }
420 
421 static inline void
422 process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
423 		struct ipsec_traffic *traffic)
424 {
425 	struct rte_mbuf *m;
426 	uint16_t idx, nb_pkts_in, i, n_ip4, n_ip6;
427 
428 	nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
429 			traffic->ipsec.num, MAX_PKT_BURST);
430 
431 	n_ip4 = traffic->ip4.num;
432 	n_ip6 = traffic->ip6.num;
433 
434 	/* SP/ACL Inbound check ipsec and ip4 */
435 	for (i = 0; i < nb_pkts_in; i++) {
436 		m = traffic->ipsec.pkts[i];
437 		struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
438 		if (ip->ip_v == IPVERSION) {
439 			idx = traffic->ip4.num++;
440 			traffic->ip4.pkts[idx] = m;
441 			traffic->ip4.data[idx] = rte_pktmbuf_mtod_offset(m,
442 					uint8_t *, offsetof(struct ip, ip_p));
443 		} else if (ip->ip_v == IP6_VERSION) {
444 			idx = traffic->ip6.num++;
445 			traffic->ip6.pkts[idx] = m;
446 			traffic->ip6.data[idx] = rte_pktmbuf_mtod_offset(m,
447 					uint8_t *,
448 					offsetof(struct ip6_hdr, ip6_nxt));
449 		} else
450 			rte_pktmbuf_free(m);
451 	}
452 
453 	inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
454 			n_ip4);
455 
456 	inbound_sp_sa(ipsec_ctx->sp6_ctx, ipsec_ctx->sa_ctx, &traffic->ip6,
457 			n_ip6);
458 }
459 
460 static inline void
461 outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
462 		struct traffic_type *ipsec)
463 {
464 	struct rte_mbuf *m;
465 	uint32_t i, j, sa_idx;
466 
467 	if (ip->num == 0 || sp == NULL)
468 		return;
469 
470 	rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res,
471 			ip->num, DEFAULT_MAX_CATEGORIES);
472 
473 	j = 0;
474 	for (i = 0; i < ip->num; i++) {
475 		m = ip->pkts[i];
476 		sa_idx = ip->res[i] & PROTECT_MASK;
477 		if ((ip->res[i] == 0) || (ip->res[i] & DISCARD))
478 			rte_pktmbuf_free(m);
479 		else if (sa_idx != 0) {
480 			ipsec->res[ipsec->num] = sa_idx;
481 			ipsec->pkts[ipsec->num++] = m;
482 		} else /* BYPASS */
483 			ip->pkts[j++] = m;
484 	}
485 	ip->num = j;
486 }
487 
488 static inline void
489 process_pkts_outbound(struct ipsec_ctx *ipsec_ctx,
490 		struct ipsec_traffic *traffic)
491 {
492 	struct rte_mbuf *m;
493 	uint16_t idx, nb_pkts_out, i;
494 
495 	/* Drop any IPsec traffic from protected ports */
496 	for (i = 0; i < traffic->ipsec.num; i++)
497 		rte_pktmbuf_free(traffic->ipsec.pkts[i]);
498 
499 	traffic->ipsec.num = 0;
500 
501 	outbound_sp(ipsec_ctx->sp4_ctx, &traffic->ip4, &traffic->ipsec);
502 
503 	outbound_sp(ipsec_ctx->sp6_ctx, &traffic->ip6, &traffic->ipsec);
504 
505 	nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
506 			traffic->ipsec.res, traffic->ipsec.num,
507 			MAX_PKT_BURST);
508 
509 	for (i = 0; i < nb_pkts_out; i++) {
510 		m = traffic->ipsec.pkts[i];
511 		struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
512 		if (ip->ip_v == IPVERSION) {
513 			idx = traffic->ip4.num++;
514 			traffic->ip4.pkts[idx] = m;
515 		} else {
516 			idx = traffic->ip6.num++;
517 			traffic->ip6.pkts[idx] = m;
518 		}
519 	}
520 }
521 
522 static inline void
523 process_pkts_inbound_nosp(struct ipsec_ctx *ipsec_ctx,
524 		struct ipsec_traffic *traffic)
525 {
526 	struct rte_mbuf *m;
527 	uint32_t nb_pkts_in, i, idx;
528 
529 	/* Drop any IPv4 traffic from unprotected ports */
530 	for (i = 0; i < traffic->ip4.num; i++)
531 		rte_pktmbuf_free(traffic->ip4.pkts[i]);
532 
533 	traffic->ip4.num = 0;
534 
535 	/* Drop any IPv6 traffic from unprotected ports */
536 	for (i = 0; i < traffic->ip6.num; i++)
537 		rte_pktmbuf_free(traffic->ip6.pkts[i]);
538 
539 	traffic->ip6.num = 0;
540 
541 	nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
542 			traffic->ipsec.num, MAX_PKT_BURST);
543 
544 	for (i = 0; i < nb_pkts_in; i++) {
545 		m = traffic->ipsec.pkts[i];
546 		struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
547 		if (ip->ip_v == IPVERSION) {
548 			idx = traffic->ip4.num++;
549 			traffic->ip4.pkts[idx] = m;
550 		} else {
551 			idx = traffic->ip6.num++;
552 			traffic->ip6.pkts[idx] = m;
553 		}
554 	}
555 }
556 
557 static inline void
558 process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
559 		struct ipsec_traffic *traffic)
560 {
561 	struct rte_mbuf *m;
562 	uint32_t nb_pkts_out, i;
563 	struct ip *ip;
564 
565 	/* Drop any IPsec traffic from protected ports */
566 	for (i = 0; i < traffic->ipsec.num; i++)
567 		rte_pktmbuf_free(traffic->ipsec.pkts[i]);
568 
569 	traffic->ipsec.num = 0;
570 
571 	for (i = 0; i < traffic->ip4.num; i++)
572 		traffic->ip4.res[i] = single_sa_idx;
573 
574 	for (i = 0; i < traffic->ip6.num; i++)
575 		traffic->ip6.res[i] = single_sa_idx;
576 
577 	nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ip4.pkts,
578 			traffic->ip4.res, traffic->ip4.num,
579 			MAX_PKT_BURST);
580 
581 	/* They all sue the same SA (ip4 or ip6 tunnel) */
582 	m = traffic->ipsec.pkts[i];
583 	ip = rte_pktmbuf_mtod(m, struct ip *);
584 	if (ip->ip_v == IPVERSION)
585 		traffic->ip4.num = nb_pkts_out;
586 	else
587 		traffic->ip6.num = nb_pkts_out;
588 }
589 
590 static inline void
591 route4_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
592 {
593 	uint32_t hop[MAX_PKT_BURST * 2];
594 	uint32_t dst_ip[MAX_PKT_BURST * 2];
595 	uint16_t i, offset;
596 
597 	if (nb_pkts == 0)
598 		return;
599 
600 	for (i = 0; i < nb_pkts; i++) {
601 		offset = offsetof(struct ip, ip_dst);
602 		dst_ip[i] = *rte_pktmbuf_mtod_offset(pkts[i],
603 				uint32_t *, offset);
604 		dst_ip[i] = rte_be_to_cpu_32(dst_ip[i]);
605 	}
606 
607 	rte_lpm_lookup_bulk((struct rte_lpm *)rt_ctx, dst_ip, hop, nb_pkts);
608 
609 	for (i = 0; i < nb_pkts; i++) {
610 		if ((hop[i] & RTE_LPM_LOOKUP_SUCCESS) == 0) {
611 			rte_pktmbuf_free(pkts[i]);
612 			continue;
613 		}
614 		send_single_packet(pkts[i], hop[i] & 0xff);
615 	}
616 }
617 
618 static inline void
619 route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
620 {
621 	int16_t hop[MAX_PKT_BURST * 2];
622 	uint8_t dst_ip[MAX_PKT_BURST * 2][16];
623 	uint8_t *ip6_dst;
624 	uint16_t i, offset;
625 
626 	if (nb_pkts == 0)
627 		return;
628 
629 	for (i = 0; i < nb_pkts; i++) {
630 		offset = offsetof(struct ip6_hdr, ip6_dst);
631 		ip6_dst = rte_pktmbuf_mtod_offset(pkts[i], uint8_t *, offset);
632 		memcpy(&dst_ip[i][0], ip6_dst, 16);
633 	}
634 
635 	rte_lpm6_lookup_bulk_func((struct rte_lpm6 *)rt_ctx, dst_ip,
636 			hop, nb_pkts);
637 
638 	for (i = 0; i < nb_pkts; i++) {
639 		if (hop[i] == -1) {
640 			rte_pktmbuf_free(pkts[i]);
641 			continue;
642 		}
643 		send_single_packet(pkts[i], hop[i] & 0xff);
644 	}
645 }
646 
647 static inline void
648 process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
649 		uint8_t nb_pkts, uint8_t portid)
650 {
651 	struct ipsec_traffic traffic;
652 
653 	prepare_traffic(pkts, &traffic, nb_pkts);
654 
655 	if (unlikely(single_sa)) {
656 		if (UNPROTECTED_PORT(portid))
657 			process_pkts_inbound_nosp(&qconf->inbound, &traffic);
658 		else
659 			process_pkts_outbound_nosp(&qconf->outbound, &traffic);
660 	} else {
661 		if (UNPROTECTED_PORT(portid))
662 			process_pkts_inbound(&qconf->inbound, &traffic);
663 		else
664 			process_pkts_outbound(&qconf->outbound, &traffic);
665 	}
666 
667 	route4_pkts(qconf->rt4_ctx, traffic.ip4.pkts, traffic.ip4.num);
668 	route6_pkts(qconf->rt6_ctx, traffic.ip6.pkts, traffic.ip6.num);
669 }
670 
671 static inline void
672 drain_buffers(struct lcore_conf *qconf)
673 {
674 	struct buffer *buf;
675 	uint32_t portid;
676 
677 	for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
678 		buf = &qconf->tx_mbufs[portid];
679 		if (buf->len == 0)
680 			continue;
681 		send_burst(qconf, buf->len, portid);
682 		buf->len = 0;
683 	}
684 }
685 
686 /* main processing loop */
687 static int32_t
688 main_loop(__attribute__((unused)) void *dummy)
689 {
690 	struct rte_mbuf *pkts[MAX_PKT_BURST];
691 	uint32_t lcore_id;
692 	uint64_t prev_tsc, diff_tsc, cur_tsc;
693 	int32_t i, nb_rx;
694 	uint8_t portid, queueid;
695 	struct lcore_conf *qconf;
696 	int32_t socket_id;
697 	const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
698 			/ US_PER_S * BURST_TX_DRAIN_US;
699 	struct lcore_rx_queue *rxql;
700 
701 	prev_tsc = 0;
702 	lcore_id = rte_lcore_id();
703 	qconf = &lcore_conf[lcore_id];
704 	rxql = qconf->rx_queue_list;
705 	socket_id = rte_lcore_to_socket_id(lcore_id);
706 
707 	qconf->rt4_ctx = socket_ctx[socket_id].rt_ip4;
708 	qconf->rt6_ctx = socket_ctx[socket_id].rt_ip6;
709 	qconf->inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
710 	qconf->inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
711 	qconf->inbound.sa_ctx = socket_ctx[socket_id].sa_in;
712 	qconf->inbound.cdev_map = cdev_map_in;
713 	qconf->outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
714 	qconf->outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
715 	qconf->outbound.sa_ctx = socket_ctx[socket_id].sa_out;
716 	qconf->outbound.cdev_map = cdev_map_out;
717 
718 	if (qconf->nb_rx_queue == 0) {
719 		RTE_LOG(INFO, IPSEC, "lcore %u has nothing to do\n", lcore_id);
720 		return 0;
721 	}
722 
723 	RTE_LOG(INFO, IPSEC, "entering main loop on lcore %u\n", lcore_id);
724 
725 	for (i = 0; i < qconf->nb_rx_queue; i++) {
726 		portid = rxql[i].port_id;
727 		queueid = rxql[i].queue_id;
728 		RTE_LOG(INFO, IPSEC,
729 			" -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
730 			lcore_id, portid, queueid);
731 	}
732 
733 	while (1) {
734 		cur_tsc = rte_rdtsc();
735 
736 		/* TX queue buffer drain */
737 		diff_tsc = cur_tsc - prev_tsc;
738 
739 		if (unlikely(diff_tsc > drain_tsc)) {
740 			drain_buffers(qconf);
741 			prev_tsc = cur_tsc;
742 		}
743 
744 		/* Read packet from RX queues */
745 		for (i = 0; i < qconf->nb_rx_queue; ++i) {
746 			portid = rxql[i].port_id;
747 			queueid = rxql[i].queue_id;
748 			nb_rx = rte_eth_rx_burst(portid, queueid,
749 					pkts, MAX_PKT_BURST);
750 
751 			if (nb_rx > 0)
752 				process_pkts(qconf, pkts, nb_rx, portid);
753 		}
754 	}
755 }
756 
757 static int32_t
758 check_params(void)
759 {
760 	uint8_t lcore, portid, nb_ports;
761 	uint16_t i;
762 	int32_t socket_id;
763 
764 	if (lcore_params == NULL) {
765 		printf("Error: No port/queue/core mappings\n");
766 		return -1;
767 	}
768 
769 	nb_ports = rte_eth_dev_count();
770 
771 	for (i = 0; i < nb_lcore_params; ++i) {
772 		lcore = lcore_params[i].lcore_id;
773 		if (!rte_lcore_is_enabled(lcore)) {
774 			printf("error: lcore %hhu is not enabled in "
775 				"lcore mask\n", lcore);
776 			return -1;
777 		}
778 		socket_id = rte_lcore_to_socket_id(lcore);
779 		if (socket_id != 0 && numa_on == 0) {
780 			printf("warning: lcore %hhu is on socket %d "
781 				"with numa off\n",
782 				lcore, socket_id);
783 		}
784 		portid = lcore_params[i].port_id;
785 		if ((enabled_port_mask & (1 << portid)) == 0) {
786 			printf("port %u is not enabled in port mask\n", portid);
787 			return -1;
788 		}
789 		if (portid >= nb_ports) {
790 			printf("port %u is not present on the board\n", portid);
791 			return -1;
792 		}
793 	}
794 	return 0;
795 }
796 
797 static uint8_t
798 get_port_nb_rx_queues(const uint8_t port)
799 {
800 	int32_t queue = -1;
801 	uint16_t i;
802 
803 	for (i = 0; i < nb_lcore_params; ++i) {
804 		if (lcore_params[i].port_id == port &&
805 				lcore_params[i].queue_id > queue)
806 			queue = lcore_params[i].queue_id;
807 	}
808 	return (uint8_t)(++queue);
809 }
810 
811 static int32_t
812 init_lcore_rx_queues(void)
813 {
814 	uint16_t i, nb_rx_queue;
815 	uint8_t lcore;
816 
817 	for (i = 0; i < nb_lcore_params; ++i) {
818 		lcore = lcore_params[i].lcore_id;
819 		nb_rx_queue = lcore_conf[lcore].nb_rx_queue;
820 		if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
821 			printf("error: too many queues (%u) for lcore: %u\n",
822 					nb_rx_queue + 1, lcore);
823 			return -1;
824 		}
825 		lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
826 			lcore_params[i].port_id;
827 		lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
828 			lcore_params[i].queue_id;
829 		lcore_conf[lcore].nb_rx_queue++;
830 	}
831 	return 0;
832 }
833 
834 /* display usage */
835 static void
836 print_usage(const char *prgname)
837 {
838 	printf("%s [EAL options] -- -p PORTMASK -P -u PORTMASK"
839 		"  --"OPTION_CONFIG" (port,queue,lcore)[,(port,queue,lcore]"
840 		" --single-sa SAIDX -f CONFIG_FILE\n"
841 		"  -p PORTMASK: hexadecimal bitmask of ports to configure\n"
842 		"  -P : enable promiscuous mode\n"
843 		"  -u PORTMASK: hexadecimal bitmask of unprotected ports\n"
844 		"  --"OPTION_CONFIG": (port,queue,lcore): "
845 		"rx queues configuration\n"
846 		"  --single-sa SAIDX: use single SA index for outbound, "
847 		"bypassing the SP\n"
848 		"  -f CONFIG_FILE: Configuration file path\n",
849 		prgname);
850 }
851 
852 static int32_t
853 parse_portmask(const char *portmask)
854 {
855 	char *end = NULL;
856 	unsigned long pm;
857 
858 	/* parse hexadecimal string */
859 	pm = strtoul(portmask, &end, 16);
860 	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
861 		return -1;
862 
863 	if ((pm == 0) && errno)
864 		return -1;
865 
866 	return pm;
867 }
868 
869 static int32_t
870 parse_decimal(const char *str)
871 {
872 	char *end = NULL;
873 	unsigned long num;
874 
875 	num = strtoul(str, &end, 10);
876 	if ((str[0] == '\0') || (end == NULL) || (*end != '\0'))
877 		return -1;
878 
879 	return num;
880 }
881 
882 static int32_t
883 parse_config(const char *q_arg)
884 {
885 	char s[256];
886 	const char *p, *p0 = q_arg;
887 	char *end;
888 	enum fieldnames {
889 		FLD_PORT = 0,
890 		FLD_QUEUE,
891 		FLD_LCORE,
892 		_NUM_FLD
893 	};
894 	unsigned long int_fld[_NUM_FLD];
895 	char *str_fld[_NUM_FLD];
896 	int32_t i;
897 	uint32_t size;
898 
899 	nb_lcore_params = 0;
900 
901 	while ((p = strchr(p0, '(')) != NULL) {
902 		++p;
903 		p0 = strchr(p, ')');
904 		if (p0 == NULL)
905 			return -1;
906 
907 		size = p0 - p;
908 		if (size >= sizeof(s))
909 			return -1;
910 
911 		snprintf(s, sizeof(s), "%.*s", size, p);
912 		if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
913 				_NUM_FLD)
914 			return -1;
915 		for (i = 0; i < _NUM_FLD; i++) {
916 			errno = 0;
917 			int_fld[i] = strtoul(str_fld[i], &end, 0);
918 			if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
919 				return -1;
920 		}
921 		if (nb_lcore_params >= MAX_LCORE_PARAMS) {
922 			printf("exceeded max number of lcore params: %hu\n",
923 				nb_lcore_params);
924 			return -1;
925 		}
926 		lcore_params_array[nb_lcore_params].port_id =
927 			(uint8_t)int_fld[FLD_PORT];
928 		lcore_params_array[nb_lcore_params].queue_id =
929 			(uint8_t)int_fld[FLD_QUEUE];
930 		lcore_params_array[nb_lcore_params].lcore_id =
931 			(uint8_t)int_fld[FLD_LCORE];
932 		++nb_lcore_params;
933 	}
934 	lcore_params = lcore_params_array;
935 	return 0;
936 }
937 
938 #define __STRNCMP(name, opt) (!strncmp(name, opt, sizeof(opt)))
939 static int32_t
940 parse_args_long_options(struct option *lgopts, int32_t option_index)
941 {
942 	int32_t ret = -1;
943 	const char *optname = lgopts[option_index].name;
944 
945 	if (__STRNCMP(optname, OPTION_CONFIG)) {
946 		ret = parse_config(optarg);
947 		if (ret)
948 			printf("invalid config\n");
949 	}
950 
951 	if (__STRNCMP(optname, OPTION_SINGLE_SA)) {
952 		ret = parse_decimal(optarg);
953 		if (ret != -1) {
954 			single_sa = 1;
955 			single_sa_idx = ret;
956 			printf("Configured with single SA index %u\n",
957 					single_sa_idx);
958 			ret = 0;
959 		}
960 	}
961 
962 	return ret;
963 }
964 #undef __STRNCMP
965 
966 static int32_t
967 parse_args(int32_t argc, char **argv)
968 {
969 	int32_t opt, ret;
970 	char **argvopt;
971 	int32_t option_index;
972 	char *prgname = argv[0];
973 	static struct option lgopts[] = {
974 		{OPTION_CONFIG, 1, 0, 0},
975 		{OPTION_SINGLE_SA, 1, 0, 0},
976 		{NULL, 0, 0, 0}
977 	};
978 	int32_t f_present = 0;
979 
980 	argvopt = argv;
981 
982 	while ((opt = getopt_long(argc, argvopt, "p:Pu:f:",
983 				lgopts, &option_index)) != EOF) {
984 
985 		switch (opt) {
986 		case 'p':
987 			enabled_port_mask = parse_portmask(optarg);
988 			if (enabled_port_mask == 0) {
989 				printf("invalid portmask\n");
990 				print_usage(prgname);
991 				return -1;
992 			}
993 			break;
994 		case 'P':
995 			printf("Promiscuous mode selected\n");
996 			promiscuous_on = 1;
997 			break;
998 		case 'u':
999 			unprotected_port_mask = parse_portmask(optarg);
1000 			if (unprotected_port_mask == 0) {
1001 				printf("invalid unprotected portmask\n");
1002 				print_usage(prgname);
1003 				return -1;
1004 			}
1005 			break;
1006 		case 'f':
1007 			if (f_present == 1) {
1008 				printf("\"-f\" option present more than "
1009 					"once!\n");
1010 				print_usage(prgname);
1011 				return -1;
1012 			}
1013 			if (parse_cfg_file(optarg) < 0) {
1014 				printf("parsing file \"%s\" failed\n",
1015 					optarg);
1016 				print_usage(prgname);
1017 				return -1;
1018 			}
1019 			f_present = 1;
1020 			break;
1021 		case 0:
1022 			if (parse_args_long_options(lgopts, option_index)) {
1023 				print_usage(prgname);
1024 				return -1;
1025 			}
1026 			break;
1027 		default:
1028 			print_usage(prgname);
1029 			return -1;
1030 		}
1031 	}
1032 
1033 	if (f_present == 0) {
1034 		printf("Mandatory option \"-f\" not present\n");
1035 		return -1;
1036 	}
1037 
1038 	if (optind >= 0)
1039 		argv[optind-1] = prgname;
1040 
1041 	ret = optind-1;
1042 	optind = 0; /* reset getopt lib */
1043 	return ret;
1044 }
1045 
1046 static void
1047 print_ethaddr(const char *name, const struct ether_addr *eth_addr)
1048 {
1049 	char buf[ETHER_ADDR_FMT_SIZE];
1050 	ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
1051 	printf("%s%s", name, buf);
1052 }
1053 
1054 /* Check the link status of all ports in up to 9s, and print them finally */
1055 static void
1056 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
1057 {
1058 #define CHECK_INTERVAL 100 /* 100ms */
1059 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1060 	uint8_t portid, count, all_ports_up, print_flag = 0;
1061 	struct rte_eth_link link;
1062 
1063 	printf("\nChecking link status");
1064 	fflush(stdout);
1065 	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1066 		all_ports_up = 1;
1067 		for (portid = 0; portid < port_num; portid++) {
1068 			if ((port_mask & (1 << portid)) == 0)
1069 				continue;
1070 			memset(&link, 0, sizeof(link));
1071 			rte_eth_link_get_nowait(portid, &link);
1072 			/* print link status if flag set */
1073 			if (print_flag == 1) {
1074 				if (link.link_status)
1075 					printf("Port %d Link Up - speed %u "
1076 						"Mbps - %s\n", (uint8_t)portid,
1077 						(uint32_t)link.link_speed,
1078 				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1079 					("full-duplex") : ("half-duplex\n"));
1080 				else
1081 					printf("Port %d Link Down\n",
1082 						(uint8_t)portid);
1083 				continue;
1084 			}
1085 			/* clear all_ports_up flag if any link down */
1086 			if (link.link_status == ETH_LINK_DOWN) {
1087 				all_ports_up = 0;
1088 				break;
1089 			}
1090 		}
1091 		/* after finally printing all link status, get out */
1092 		if (print_flag == 1)
1093 			break;
1094 
1095 		if (all_ports_up == 0) {
1096 			printf(".");
1097 			fflush(stdout);
1098 			rte_delay_ms(CHECK_INTERVAL);
1099 		}
1100 
1101 		/* set the print_flag if all ports up or timeout */
1102 		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1103 			print_flag = 1;
1104 			printf("done\n");
1105 		}
1106 	}
1107 }
1108 
1109 static int32_t
1110 add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
1111 		uint16_t qp, struct lcore_params *params,
1112 		struct ipsec_ctx *ipsec_ctx,
1113 		const struct rte_cryptodev_capabilities *cipher,
1114 		const struct rte_cryptodev_capabilities *auth)
1115 {
1116 	int32_t ret = 0;
1117 	unsigned long i;
1118 	struct cdev_key key = { 0 };
1119 
1120 	key.lcore_id = params->lcore_id;
1121 	if (cipher)
1122 		key.cipher_algo = cipher->sym.cipher.algo;
1123 	if (auth)
1124 		key.auth_algo = auth->sym.auth.algo;
1125 
1126 	ret = rte_hash_lookup(map, &key);
1127 	if (ret != -ENOENT)
1128 		return 0;
1129 
1130 	for (i = 0; i < ipsec_ctx->nb_qps; i++)
1131 		if (ipsec_ctx->tbl[i].id == cdev_id)
1132 			break;
1133 
1134 	if (i == ipsec_ctx->nb_qps) {
1135 		if (ipsec_ctx->nb_qps == MAX_QP_PER_LCORE) {
1136 			printf("Maximum number of crypto devices assigned to "
1137 				"a core, increase MAX_QP_PER_LCORE value\n");
1138 			return 0;
1139 		}
1140 		ipsec_ctx->tbl[i].id = cdev_id;
1141 		ipsec_ctx->tbl[i].qp = qp;
1142 		ipsec_ctx->nb_qps++;
1143 		printf("%s cdev mapping: lcore %u using cdev %u qp %u "
1144 				"(cdev_id_qp %lu)\n", str, key.lcore_id,
1145 				cdev_id, qp, i);
1146 	}
1147 
1148 	ret = rte_hash_add_key_data(map, &key, (void *)i);
1149 	if (ret < 0) {
1150 		printf("Faled to insert cdev mapping for (lcore %u, "
1151 				"cdev %u, qp %u), errno %d\n",
1152 				key.lcore_id, ipsec_ctx->tbl[i].id,
1153 				ipsec_ctx->tbl[i].qp, ret);
1154 		return 0;
1155 	}
1156 
1157 	return 1;
1158 }
1159 
1160 static int32_t
1161 add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
1162 		uint16_t qp, struct lcore_params *params)
1163 {
1164 	int32_t ret = 0;
1165 	const struct rte_cryptodev_capabilities *i, *j;
1166 	struct rte_hash *map;
1167 	struct lcore_conf *qconf;
1168 	struct ipsec_ctx *ipsec_ctx;
1169 	const char *str;
1170 
1171 	qconf = &lcore_conf[params->lcore_id];
1172 
1173 	if ((unprotected_port_mask & (1 << params->port_id)) == 0) {
1174 		map = cdev_map_out;
1175 		ipsec_ctx = &qconf->outbound;
1176 		str = "Outbound";
1177 	} else {
1178 		map = cdev_map_in;
1179 		ipsec_ctx = &qconf->inbound;
1180 		str = "Inbound";
1181 	}
1182 
1183 	/* Required cryptodevs with operation chainning */
1184 	if (!(dev_info->feature_flags &
1185 				RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
1186 		return ret;
1187 
1188 	for (i = dev_info->capabilities;
1189 			i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) {
1190 		if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1191 			continue;
1192 
1193 		if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
1194 			continue;
1195 
1196 		for (j = dev_info->capabilities;
1197 				j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; j++) {
1198 			if (j->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
1199 				continue;
1200 
1201 			if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
1202 				continue;
1203 
1204 			ret |= add_mapping(map, str, cdev_id, qp, params,
1205 					ipsec_ctx, i, j);
1206 		}
1207 	}
1208 
1209 	return ret;
1210 }
1211 
1212 static int32_t
1213 cryptodevs_init(void)
1214 {
1215 	struct rte_cryptodev_config dev_conf;
1216 	struct rte_cryptodev_qp_conf qp_conf;
1217 	uint16_t idx, max_nb_qps, qp, i;
1218 	int16_t cdev_id;
1219 	struct rte_hash_parameters params = { 0 };
1220 
1221 	params.entries = CDEV_MAP_ENTRIES;
1222 	params.key_len = sizeof(struct cdev_key);
1223 	params.hash_func = rte_jhash;
1224 	params.hash_func_init_val = 0;
1225 	params.socket_id = rte_socket_id();
1226 
1227 	params.name = "cdev_map_in";
1228 	cdev_map_in = rte_hash_create(&params);
1229 	if (cdev_map_in == NULL)
1230 		rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1231 				rte_errno);
1232 
1233 	params.name = "cdev_map_out";
1234 	cdev_map_out = rte_hash_create(&params);
1235 	if (cdev_map_out == NULL)
1236 		rte_panic("Failed to create cdev_map hash table, errno = %d\n",
1237 				rte_errno);
1238 
1239 	printf("lcore/cryptodev/qp mappings:\n");
1240 
1241 	idx = 0;
1242 	/* Start from last cdev id to give HW priority */
1243 	for (cdev_id = rte_cryptodev_count() - 1; cdev_id >= 0; cdev_id--) {
1244 		struct rte_cryptodev_info cdev_info;
1245 
1246 		rte_cryptodev_info_get(cdev_id, &cdev_info);
1247 
1248 		if (nb_lcore_params > cdev_info.max_nb_queue_pairs)
1249 			max_nb_qps = cdev_info.max_nb_queue_pairs;
1250 		else
1251 			max_nb_qps = nb_lcore_params;
1252 
1253 		qp = 0;
1254 		i = 0;
1255 		while (qp < max_nb_qps && i < nb_lcore_params) {
1256 			if (add_cdev_mapping(&cdev_info, cdev_id, qp,
1257 						&lcore_params[idx]))
1258 				qp++;
1259 			idx++;
1260 			idx = idx % nb_lcore_params;
1261 			i++;
1262 		}
1263 
1264 		if (qp == 0)
1265 			continue;
1266 
1267 		dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
1268 		dev_conf.nb_queue_pairs = qp;
1269 		dev_conf.session_mp.nb_objs = CDEV_MP_NB_OBJS;
1270 		dev_conf.session_mp.cache_size = CDEV_MP_CACHE_SZ;
1271 
1272 		if (rte_cryptodev_configure(cdev_id, &dev_conf))
1273 			rte_panic("Failed to initialize crypodev %u\n",
1274 					cdev_id);
1275 
1276 		qp_conf.nb_descriptors = CDEV_QUEUE_DESC;
1277 		for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
1278 			if (rte_cryptodev_queue_pair_setup(cdev_id, qp,
1279 						&qp_conf, dev_conf.socket_id))
1280 				rte_panic("Failed to setup queue %u for "
1281 						"cdev_id %u\n",	0, cdev_id);
1282 
1283 		if (rte_cryptodev_start(cdev_id))
1284 			rte_panic("Failed to start cryptodev %u\n",
1285 					cdev_id);
1286 	}
1287 
1288 	printf("\n");
1289 
1290 	return 0;
1291 }
1292 
1293 static void
1294 port_init(uint8_t portid)
1295 {
1296 	struct rte_eth_dev_info dev_info;
1297 	struct rte_eth_txconf *txconf;
1298 	uint16_t nb_tx_queue, nb_rx_queue;
1299 	uint16_t tx_queueid, rx_queueid, queue, lcore_id;
1300 	int32_t ret, socket_id;
1301 	struct lcore_conf *qconf;
1302 	struct ether_addr ethaddr;
1303 
1304 	rte_eth_dev_info_get(portid, &dev_info);
1305 
1306 	printf("Configuring device port %u:\n", portid);
1307 
1308 	rte_eth_macaddr_get(portid, &ethaddr);
1309 	ethaddr_tbl[portid].src = ETHADDR_TO_UINT64(ethaddr);
1310 	print_ethaddr("Address: ", &ethaddr);
1311 	printf("\n");
1312 
1313 	nb_rx_queue = get_port_nb_rx_queues(portid);
1314 	nb_tx_queue = nb_lcores;
1315 
1316 	if (nb_rx_queue > dev_info.max_rx_queues)
1317 		rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1318 				"(max rx queue is %u)\n",
1319 				nb_rx_queue, dev_info.max_rx_queues);
1320 
1321 	if (nb_tx_queue > dev_info.max_tx_queues)
1322 		rte_exit(EXIT_FAILURE, "Error: queue %u not available "
1323 				"(max tx queue is %u)\n",
1324 				nb_tx_queue, dev_info.max_tx_queues);
1325 
1326 	printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
1327 			nb_rx_queue, nb_tx_queue);
1328 
1329 	ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
1330 			&port_conf);
1331 	if (ret < 0)
1332 		rte_exit(EXIT_FAILURE, "Cannot configure device: "
1333 				"err=%d, port=%d\n", ret, portid);
1334 
1335 	/* init one TX queue per lcore */
1336 	tx_queueid = 0;
1337 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1338 		if (rte_lcore_is_enabled(lcore_id) == 0)
1339 			continue;
1340 
1341 		if (numa_on)
1342 			socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1343 		else
1344 			socket_id = 0;
1345 
1346 		/* init TX queue */
1347 		printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
1348 
1349 		txconf = &dev_info.default_txconf;
1350 		txconf->txq_flags = 0;
1351 
1352 		ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
1353 				socket_id, txconf);
1354 		if (ret < 0)
1355 			rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
1356 					"err=%d, port=%d\n", ret, portid);
1357 
1358 		qconf = &lcore_conf[lcore_id];
1359 		qconf->tx_queue_id[portid] = tx_queueid;
1360 		tx_queueid++;
1361 
1362 		/* init RX queues */
1363 		for (queue = 0; queue < qconf->nb_rx_queue; ++queue) {
1364 			if (portid != qconf->rx_queue_list[queue].port_id)
1365 				continue;
1366 
1367 			rx_queueid = qconf->rx_queue_list[queue].queue_id;
1368 
1369 			printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid,
1370 					socket_id);
1371 
1372 			ret = rte_eth_rx_queue_setup(portid, rx_queueid,
1373 					nb_rxd,	socket_id, NULL,
1374 					socket_ctx[socket_id].mbuf_pool);
1375 			if (ret < 0)
1376 				rte_exit(EXIT_FAILURE,
1377 					"rte_eth_rx_queue_setup: err=%d, "
1378 					"port=%d\n", ret, portid);
1379 		}
1380 	}
1381 	printf("\n");
1382 }
1383 
1384 static void
1385 pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
1386 {
1387 	char s[64];
1388 
1389 	snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
1390 	ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
1391 			MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
1392 			RTE_MBUF_DEFAULT_BUF_SIZE,
1393 			socket_id);
1394 	if (ctx->mbuf_pool == NULL)
1395 		rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
1396 				socket_id);
1397 	else
1398 		printf("Allocated mbuf pool on socket %d\n", socket_id);
1399 }
1400 
1401 int32_t
1402 main(int32_t argc, char **argv)
1403 {
1404 	int32_t ret;
1405 	uint32_t lcore_id, nb_ports;
1406 	uint8_t portid, socket_id;
1407 
1408 	/* init EAL */
1409 	ret = rte_eal_init(argc, argv);
1410 	if (ret < 0)
1411 		rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
1412 	argc -= ret;
1413 	argv += ret;
1414 
1415 	/* parse application arguments (after the EAL ones) */
1416 	ret = parse_args(argc, argv);
1417 	if (ret < 0)
1418 		rte_exit(EXIT_FAILURE, "Invalid parameters\n");
1419 
1420 	if ((unprotected_port_mask & enabled_port_mask) !=
1421 			unprotected_port_mask)
1422 		rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
1423 				unprotected_port_mask);
1424 
1425 	nb_ports = rte_eth_dev_count();
1426 
1427 	if (check_params() < 0)
1428 		rte_exit(EXIT_FAILURE, "check_params failed\n");
1429 
1430 	ret = init_lcore_rx_queues();
1431 	if (ret < 0)
1432 		rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
1433 
1434 	nb_lcores = rte_lcore_count();
1435 
1436 	/* Replicate each contex per socket */
1437 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1438 		if (rte_lcore_is_enabled(lcore_id) == 0)
1439 			continue;
1440 
1441 		if (numa_on)
1442 			socket_id = (uint8_t)rte_lcore_to_socket_id(lcore_id);
1443 		else
1444 			socket_id = 0;
1445 
1446 		if (socket_ctx[socket_id].mbuf_pool)
1447 			continue;
1448 
1449 		sa_init(&socket_ctx[socket_id], socket_id);
1450 
1451 		sp4_init(&socket_ctx[socket_id], socket_id);
1452 
1453 		sp6_init(&socket_ctx[socket_id], socket_id);
1454 
1455 		rt_init(&socket_ctx[socket_id], socket_id);
1456 
1457 		pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
1458 	}
1459 
1460 	for (portid = 0; portid < nb_ports; portid++) {
1461 		if ((enabled_port_mask & (1 << portid)) == 0)
1462 			continue;
1463 
1464 		port_init(portid);
1465 	}
1466 
1467 	cryptodevs_init();
1468 
1469 	/* start ports */
1470 	for (portid = 0; portid < nb_ports; portid++) {
1471 		if ((enabled_port_mask & (1 << portid)) == 0)
1472 			continue;
1473 
1474 		/* Start device */
1475 		ret = rte_eth_dev_start(portid);
1476 		if (ret < 0)
1477 			rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
1478 					"err=%d, port=%d\n", ret, portid);
1479 		/*
1480 		 * If enabled, put device in promiscuous mode.
1481 		 * This allows IO forwarding mode to forward packets
1482 		 * to itself through 2 cross-connected  ports of the
1483 		 * target machine.
1484 		 */
1485 		if (promiscuous_on)
1486 			rte_eth_promiscuous_enable(portid);
1487 	}
1488 
1489 	check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
1490 
1491 	/* launch per-lcore init on every lcore */
1492 	rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
1493 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
1494 		if (rte_eal_wait_lcore(lcore_id) < 0)
1495 			return -1;
1496 	}
1497 
1498 	return 0;
1499 }
1500