1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
3 * Copyright (C) 2020 Marvell International Ltd.
4 */
5 #include <rte_acl.h>
6 #include <rte_event_eth_tx_adapter.h>
7 #include <rte_lpm.h>
8 #include <rte_lpm6.h>
9
10 #include "event_helper.h"
11 #include "ipsec.h"
12 #include "ipsec-secgw.h"
13 #include "ipsec_worker.h"
14
15 struct port_drv_mode_data {
16 struct rte_security_session *sess;
17 struct rte_security_ctx *ctx;
18 };
19
20 static inline enum pkt_type
process_ipsec_get_pkt_type(struct rte_mbuf * pkt,uint8_t ** nlp)21 process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
22 {
23 struct rte_ether_hdr *eth;
24 uint32_t ptype = pkt->packet_type;
25
26 eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
27 rte_prefetch0(eth);
28
29 if (RTE_ETH_IS_IPV4_HDR(ptype)) {
30 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
31 offsetof(struct ip, ip_p));
32 if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
33 return PKT_TYPE_IPSEC_IPV4;
34 else
35 return PKT_TYPE_PLAIN_IPV4;
36 } else if (RTE_ETH_IS_IPV6_HDR(ptype)) {
37 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
38 offsetof(struct ip6_hdr, ip6_nxt));
39 if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
40 return PKT_TYPE_IPSEC_IPV6;
41 else
42 return PKT_TYPE_PLAIN_IPV6;
43 }
44
45 /* Unknown/Unsupported type */
46 return PKT_TYPE_INVALID;
47 }
48
49 static inline void
update_mac_addrs(struct rte_mbuf * pkt,uint16_t portid)50 update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
51 {
52 struct rte_ether_hdr *ethhdr;
53
54 ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
55 memcpy(ðhdr->src_addr, ðaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
56 memcpy(ðhdr->dst_addr, ðaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
57 }
58
59 static inline void
ipsec_event_pre_forward(struct rte_mbuf * m,unsigned int port_id)60 ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
61 {
62 /* Save the destination port in the mbuf */
63 m->port = port_id;
64
65 /* Save eth queue for Tx */
66 rte_event_eth_tx_adapter_txq_set(m, 0);
67 }
68
69 static inline void
ev_vector_attr_init(struct rte_event_vector * vec)70 ev_vector_attr_init(struct rte_event_vector *vec)
71 {
72 vec->attr_valid = 1;
73 vec->port = 0xFFFF;
74 vec->queue = 0;
75 }
76
77 static inline void
ev_vector_attr_update(struct rte_event_vector * vec,struct rte_mbuf * pkt)78 ev_vector_attr_update(struct rte_event_vector *vec, struct rte_mbuf *pkt)
79 {
80 if (vec->port == 0xFFFF) {
81 vec->port = pkt->port;
82 return;
83 }
84 if (vec->attr_valid && (vec->port != pkt->port))
85 vec->attr_valid = 0;
86 }
87
88 static inline void
prepare_out_sessions_tbl(struct sa_ctx * sa_out,struct port_drv_mode_data * data,uint16_t size)89 prepare_out_sessions_tbl(struct sa_ctx *sa_out,
90 struct port_drv_mode_data *data,
91 uint16_t size)
92 {
93 struct rte_ipsec_session *pri_sess;
94 struct ipsec_sa *sa;
95 uint32_t i;
96
97 if (!sa_out)
98 return;
99
100 for (i = 0; i < sa_out->nb_sa; i++) {
101
102 sa = &sa_out->sa[i];
103 if (!sa)
104 continue;
105
106 pri_sess = ipsec_get_primary_session(sa);
107 if (!pri_sess)
108 continue;
109
110 if (pri_sess->type !=
111 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
112
113 RTE_LOG(ERR, IPSEC, "Invalid session type %d\n",
114 pri_sess->type);
115 continue;
116 }
117
118 if (sa->portid >= size) {
119 RTE_LOG(ERR, IPSEC,
120 "Port id >= than table size %d, %d\n",
121 sa->portid, size);
122 continue;
123 }
124
125 /* Use only first inline session found for a given port */
126 if (data[sa->portid].sess)
127 continue;
128 data[sa->portid].sess = pri_sess->security.ses;
129 data[sa->portid].ctx = pri_sess->security.ctx;
130 }
131 }
132
133 static inline int
check_sp(struct sp_ctx * sp,const uint8_t * nlp,uint32_t * sa_idx)134 check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
135 {
136 uint32_t res;
137
138 if (unlikely(sp == NULL))
139 return 0;
140
141 rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
142 DEFAULT_MAX_CATEGORIES);
143
144 if (unlikely(res == DISCARD))
145 return 0;
146 else if (res == BYPASS) {
147 *sa_idx = -1;
148 return 1;
149 }
150
151 *sa_idx = res - 1;
152 return 1;
153 }
154
155 static inline void
check_sp_bulk(struct sp_ctx * sp,struct traffic_type * ip,struct traffic_type * ipsec)156 check_sp_bulk(struct sp_ctx *sp, struct traffic_type *ip,
157 struct traffic_type *ipsec)
158 {
159 uint32_t i, j, res;
160 struct rte_mbuf *m;
161
162 if (unlikely(sp == NULL || ip->num == 0))
163 return;
164
165 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
166 DEFAULT_MAX_CATEGORIES);
167
168 j = 0;
169 for (i = 0; i < ip->num; i++) {
170 m = ip->pkts[i];
171 res = ip->res[i];
172 if (unlikely(res == DISCARD))
173 free_pkts(&m, 1);
174 else if (res == BYPASS)
175 ip->pkts[j++] = m;
176 else {
177 ipsec->res[ipsec->num] = res - 1;
178 ipsec->pkts[ipsec->num++] = m;
179 }
180 }
181 ip->num = j;
182 }
183
184 static inline void
check_sp_sa_bulk(struct sp_ctx * sp,struct sa_ctx * sa_ctx,struct traffic_type * ip)185 check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx,
186 struct traffic_type *ip)
187 {
188 struct ipsec_sa *sa;
189 uint32_t i, j, res;
190 struct rte_mbuf *m;
191
192 if (unlikely(sp == NULL || ip->num == 0))
193 return;
194
195 rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
196 DEFAULT_MAX_CATEGORIES);
197
198 j = 0;
199 for (i = 0; i < ip->num; i++) {
200 m = ip->pkts[i];
201 res = ip->res[i];
202 if (unlikely(res == DISCARD))
203 free_pkts(&m, 1);
204 else if (res == BYPASS)
205 ip->pkts[j++] = m;
206 else {
207 sa = *(struct ipsec_sa **)rte_security_dynfield(m);
208 if (sa == NULL) {
209 free_pkts(&m, 1);
210 continue;
211 }
212
213 /* SPI on the packet should match with the one in SA */
214 if (unlikely(sa->spi != sa_ctx->sa[res - 1].spi)) {
215 free_pkts(&m, 1);
216 continue;
217 }
218
219 ip->pkts[j++] = m;
220 }
221 }
222 ip->num = j;
223 }
224
225 static inline uint16_t
route4_pkt(struct rte_mbuf * pkt,struct rt_ctx * rt_ctx)226 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
227 {
228 uint32_t dst_ip;
229 uint16_t offset;
230 uint32_t hop;
231 int ret;
232
233 offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
234 dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
235 dst_ip = rte_be_to_cpu_32(dst_ip);
236
237 ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
238
239 if (ret == 0) {
240 /* We have a hit */
241 return hop;
242 }
243
244 /* else */
245 return RTE_MAX_ETHPORTS;
246 }
247
248 /* TODO: To be tested */
249 static inline uint16_t
route6_pkt(struct rte_mbuf * pkt,struct rt_ctx * rt_ctx)250 route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
251 {
252 uint8_t dst_ip[16];
253 uint8_t *ip6_dst;
254 uint16_t offset;
255 uint32_t hop;
256 int ret;
257
258 offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
259 ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
260 memcpy(&dst_ip[0], ip6_dst, 16);
261
262 ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
263
264 if (ret == 0) {
265 /* We have a hit */
266 return hop;
267 }
268
269 /* else */
270 return RTE_MAX_ETHPORTS;
271 }
272
273 static inline uint16_t
get_route(struct rte_mbuf * pkt,struct route_table * rt,enum pkt_type type)274 get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
275 {
276 if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
277 return route4_pkt(pkt, rt->rt4_ctx);
278 else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
279 return route6_pkt(pkt, rt->rt6_ctx);
280
281 return RTE_MAX_ETHPORTS;
282 }
283
284 static inline int
process_ipsec_ev_inbound(struct ipsec_ctx * ctx,struct route_table * rt,struct rte_event * ev)285 process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
286 struct rte_event *ev)
287 {
288 struct ipsec_sa *sa = NULL;
289 struct rte_mbuf *pkt;
290 uint16_t port_id = 0;
291 enum pkt_type type;
292 uint32_t sa_idx;
293 uint8_t *nlp;
294
295 /* Get pkt from event */
296 pkt = ev->mbuf;
297
298 /* Check the packet type */
299 type = process_ipsec_get_pkt_type(pkt, &nlp);
300
301 switch (type) {
302 case PKT_TYPE_PLAIN_IPV4:
303 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
304 if (unlikely(pkt->ol_flags &
305 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
306 RTE_LOG(ERR, IPSEC,
307 "Inbound security offload failed\n");
308 goto drop_pkt_and_exit;
309 }
310 sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
311 }
312
313 /* Check if we have a match */
314 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
315 /* No valid match */
316 goto drop_pkt_and_exit;
317 }
318 break;
319
320 case PKT_TYPE_PLAIN_IPV6:
321 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
322 if (unlikely(pkt->ol_flags &
323 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
324 RTE_LOG(ERR, IPSEC,
325 "Inbound security offload failed\n");
326 goto drop_pkt_and_exit;
327 }
328 sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
329 }
330
331 /* Check if we have a match */
332 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
333 /* No valid match */
334 goto drop_pkt_and_exit;
335 }
336 break;
337
338 default:
339 RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
340 type);
341 goto drop_pkt_and_exit;
342 }
343
344 /* Check if the packet has to be bypassed */
345 if (sa_idx == BYPASS)
346 goto route_and_send_pkt;
347
348 /* Validate sa_idx */
349 if (sa_idx >= ctx->sa_ctx->nb_sa)
350 goto drop_pkt_and_exit;
351
352 /* Else the packet has to be protected with SA */
353
354 /* If the packet was IPsec processed, then SA pointer should be set */
355 if (sa == NULL)
356 goto drop_pkt_and_exit;
357
358 /* SPI on the packet should match with the one in SA */
359 if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
360 goto drop_pkt_and_exit;
361
362 route_and_send_pkt:
363 port_id = get_route(pkt, rt, type);
364 if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
365 /* no match */
366 goto drop_pkt_and_exit;
367 }
368 /* else, we have a matching route */
369
370 /* Update mac addresses */
371 update_mac_addrs(pkt, port_id);
372
373 /* Update the event with the dest port */
374 ipsec_event_pre_forward(pkt, port_id);
375 return PKT_FORWARDED;
376
377 drop_pkt_and_exit:
378 RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
379 rte_pktmbuf_free(pkt);
380 ev->mbuf = NULL;
381 return PKT_DROPPED;
382 }
383
384 static inline int
process_ipsec_ev_outbound(struct ipsec_ctx * ctx,struct route_table * rt,struct rte_event * ev)385 process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
386 struct rte_event *ev)
387 {
388 struct rte_ipsec_session *sess;
389 struct sa_ctx *sa_ctx;
390 struct rte_mbuf *pkt;
391 uint16_t port_id = 0;
392 struct ipsec_sa *sa;
393 enum pkt_type type;
394 uint32_t sa_idx;
395 uint8_t *nlp;
396
397 /* Get pkt from event */
398 pkt = ev->mbuf;
399
400 /* Check the packet type */
401 type = process_ipsec_get_pkt_type(pkt, &nlp);
402
403 switch (type) {
404 case PKT_TYPE_PLAIN_IPV4:
405 /* Check if we have a match */
406 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
407 /* No valid match */
408 goto drop_pkt_and_exit;
409 }
410 break;
411 case PKT_TYPE_PLAIN_IPV6:
412 /* Check if we have a match */
413 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
414 /* No valid match */
415 goto drop_pkt_and_exit;
416 }
417 break;
418 default:
419 /*
420 * Only plain IPv4 & IPv6 packets are allowed
421 * on protected port. Drop the rest.
422 */
423 RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
424 goto drop_pkt_and_exit;
425 }
426
427 /* Check if the packet has to be bypassed */
428 if (sa_idx == BYPASS) {
429 port_id = get_route(pkt, rt, type);
430 if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
431 /* no match */
432 goto drop_pkt_and_exit;
433 }
434 /* else, we have a matching route */
435 goto send_pkt;
436 }
437
438 /* Validate sa_idx */
439 if (unlikely(sa_idx >= ctx->sa_ctx->nb_sa))
440 goto drop_pkt_and_exit;
441
442 /* Else the packet has to be protected */
443
444 /* Get SA ctx*/
445 sa_ctx = ctx->sa_ctx;
446
447 /* Get SA */
448 sa = &(sa_ctx->sa[sa_idx]);
449
450 /* Get IPsec session */
451 sess = ipsec_get_primary_session(sa);
452
453 /* Allow only inline protocol for now */
454 if (unlikely(sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
455 RTE_LOG(ERR, IPSEC, "SA type not supported\n");
456 goto drop_pkt_and_exit;
457 }
458
459 rte_security_set_pkt_metadata(sess->security.ctx,
460 sess->security.ses, pkt, NULL);
461
462 /* Mark the packet for Tx security offload */
463 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
464
465 /* Get the port to which this pkt need to be submitted */
466 port_id = sa->portid;
467
468 send_pkt:
469 /* Provide L2 len for Outbound processing */
470 pkt->l2_len = RTE_ETHER_HDR_LEN;
471
472 /* Update mac addresses */
473 update_mac_addrs(pkt, port_id);
474
475 /* Update the event with the dest port */
476 ipsec_event_pre_forward(pkt, port_id);
477 return PKT_FORWARDED;
478
479 drop_pkt_and_exit:
480 RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
481 rte_pktmbuf_free(pkt);
482 ev->mbuf = NULL;
483 return PKT_DROPPED;
484 }
485
486 static inline int
ipsec_ev_route_pkts(struct rte_event_vector * vec,struct route_table * rt,struct ipsec_traffic * t,struct sa_ctx * sa_ctx)487 ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
488 struct ipsec_traffic *t, struct sa_ctx *sa_ctx)
489 {
490 struct rte_ipsec_session *sess;
491 uint32_t sa_idx, i, j = 0;
492 uint16_t port_id = 0;
493 struct rte_mbuf *pkt;
494 struct ipsec_sa *sa;
495
496 /* Route IPv4 packets */
497 for (i = 0; i < t->ip4.num; i++) {
498 pkt = t->ip4.pkts[i];
499 port_id = route4_pkt(pkt, rt->rt4_ctx);
500 if (port_id != RTE_MAX_ETHPORTS) {
501 /* Update mac addresses */
502 update_mac_addrs(pkt, port_id);
503 /* Update the event with the dest port */
504 ipsec_event_pre_forward(pkt, port_id);
505 ev_vector_attr_update(vec, pkt);
506 vec->mbufs[j++] = pkt;
507 } else
508 free_pkts(&pkt, 1);
509 }
510
511 /* Route IPv6 packets */
512 for (i = 0; i < t->ip6.num; i++) {
513 pkt = t->ip6.pkts[i];
514 port_id = route6_pkt(pkt, rt->rt6_ctx);
515 if (port_id != RTE_MAX_ETHPORTS) {
516 /* Update mac addresses */
517 update_mac_addrs(pkt, port_id);
518 /* Update the event with the dest port */
519 ipsec_event_pre_forward(pkt, port_id);
520 ev_vector_attr_update(vec, pkt);
521 vec->mbufs[j++] = pkt;
522 } else
523 free_pkts(&pkt, 1);
524 }
525
526 /* Route ESP packets */
527 for (i = 0; i < t->ipsec.num; i++) {
528 /* Validate sa_idx */
529 sa_idx = t->ipsec.res[i];
530 pkt = t->ipsec.pkts[i];
531 if (unlikely(sa_idx >= sa_ctx->nb_sa))
532 free_pkts(&pkt, 1);
533 else {
534 /* Else the packet has to be protected */
535 sa = &(sa_ctx->sa[sa_idx]);
536 /* Get IPsec session */
537 sess = ipsec_get_primary_session(sa);
538 /* Allow only inline protocol for now */
539 if (unlikely(sess->type !=
540 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
541 RTE_LOG(ERR, IPSEC, "SA type not supported\n");
542 free_pkts(&pkt, 1);
543 continue;
544 }
545 rte_security_set_pkt_metadata(sess->security.ctx,
546 sess->security.ses, pkt, NULL);
547
548 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
549 port_id = sa->portid;
550 update_mac_addrs(pkt, port_id);
551 ipsec_event_pre_forward(pkt, port_id);
552 ev_vector_attr_update(vec, pkt);
553 vec->mbufs[j++] = pkt;
554 }
555 }
556
557 return j;
558 }
559
560 static inline void
classify_pkt(struct rte_mbuf * pkt,struct ipsec_traffic * t)561 classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t)
562 {
563 enum pkt_type type;
564 uint8_t *nlp;
565
566 /* Check the packet type */
567 type = process_ipsec_get_pkt_type(pkt, &nlp);
568
569 switch (type) {
570 case PKT_TYPE_PLAIN_IPV4:
571 t->ip4.data[t->ip4.num] = nlp;
572 t->ip4.pkts[(t->ip4.num)++] = pkt;
573 break;
574 case PKT_TYPE_PLAIN_IPV6:
575 t->ip6.data[t->ip6.num] = nlp;
576 t->ip6.pkts[(t->ip6.num)++] = pkt;
577 break;
578 default:
579 RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
580 type);
581 free_pkts(&pkt, 1);
582 break;
583 }
584 }
585
586 static inline int
process_ipsec_ev_inbound_vector(struct ipsec_ctx * ctx,struct route_table * rt,struct rte_event_vector * vec)587 process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
588 struct rte_event_vector *vec)
589 {
590 struct ipsec_traffic t;
591 struct rte_mbuf *pkt;
592 int i;
593
594 t.ip4.num = 0;
595 t.ip6.num = 0;
596 t.ipsec.num = 0;
597
598 for (i = 0; i < vec->nb_elem; i++) {
599 /* Get pkt from event */
600 pkt = vec->mbufs[i];
601
602 if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
603 if (unlikely(pkt->ol_flags &
604 RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
605 RTE_LOG(ERR, IPSEC,
606 "Inbound security offload failed\n");
607 free_pkts(&pkt, 1);
608 continue;
609 }
610 }
611
612 classify_pkt(pkt, &t);
613 }
614
615 check_sp_sa_bulk(ctx->sp4_ctx, ctx->sa_ctx, &t.ip4);
616 check_sp_sa_bulk(ctx->sp6_ctx, ctx->sa_ctx, &t.ip6);
617
618 return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
619 }
620
621 static inline int
process_ipsec_ev_outbound_vector(struct ipsec_ctx * ctx,struct route_table * rt,struct rte_event_vector * vec)622 process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
623 struct rte_event_vector *vec)
624 {
625 struct ipsec_traffic t;
626 struct rte_mbuf *pkt;
627 uint32_t i;
628
629 t.ip4.num = 0;
630 t.ip6.num = 0;
631 t.ipsec.num = 0;
632
633 for (i = 0; i < vec->nb_elem; i++) {
634 /* Get pkt from event */
635 pkt = vec->mbufs[i];
636
637 classify_pkt(pkt, &t);
638
639 /* Provide L2 len for Outbound processing */
640 pkt->l2_len = RTE_ETHER_HDR_LEN;
641 }
642
643 check_sp_bulk(ctx->sp4_ctx, &t.ip4, &t.ipsec);
644 check_sp_bulk(ctx->sp6_ctx, &t.ip6, &t.ipsec);
645
646 return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
647 }
648
649 static inline int
process_ipsec_ev_drv_mode_outbound_vector(struct rte_event_vector * vec,struct port_drv_mode_data * data)650 process_ipsec_ev_drv_mode_outbound_vector(struct rte_event_vector *vec,
651 struct port_drv_mode_data *data)
652 {
653 struct rte_mbuf *pkt;
654 int16_t port_id;
655 uint32_t i;
656 int j = 0;
657
658 for (i = 0; i < vec->nb_elem; i++) {
659 pkt = vec->mbufs[i];
660 port_id = pkt->port;
661
662 if (unlikely(!data[port_id].sess)) {
663 free_pkts(&pkt, 1);
664 continue;
665 }
666 ipsec_event_pre_forward(pkt, port_id);
667 /* Save security session */
668 rte_security_set_pkt_metadata(data[port_id].ctx,
669 data[port_id].sess, pkt,
670 NULL);
671
672 /* Mark the packet for Tx security offload */
673 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
674
675 /* Provide L2 len for Outbound processing */
676 pkt->l2_len = RTE_ETHER_HDR_LEN;
677
678 vec->mbufs[j++] = pkt;
679 }
680
681 return j;
682 }
683
684 static inline void
ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr * lconf,struct eh_event_link_info * links,struct rte_event * ev)685 ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
686 struct eh_event_link_info *links,
687 struct rte_event *ev)
688 {
689 struct rte_event_vector *vec = ev->vec;
690 struct rte_mbuf *pkt;
691 int ret;
692
693 pkt = vec->mbufs[0];
694
695 ev_vector_attr_init(vec);
696 if (is_unprotected_port(pkt->port))
697 ret = process_ipsec_ev_inbound_vector(&lconf->inbound,
698 &lconf->rt, vec);
699 else
700 ret = process_ipsec_ev_outbound_vector(&lconf->outbound,
701 &lconf->rt, vec);
702
703 if (likely(ret > 0)) {
704 vec->nb_elem = ret;
705 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
706 links[0].event_port_id,
707 ev, 1, 0);
708 } else {
709 rte_mempool_put(rte_mempool_from_obj(vec), vec);
710 }
711 }
712
713 static inline void
ipsec_ev_vector_drv_mode_process(struct eh_event_link_info * links,struct rte_event * ev,struct port_drv_mode_data * data)714 ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
715 struct rte_event *ev,
716 struct port_drv_mode_data *data)
717 {
718 struct rte_event_vector *vec = ev->vec;
719 struct rte_mbuf *pkt;
720
721 pkt = vec->mbufs[0];
722
723 if (!is_unprotected_port(pkt->port))
724 vec->nb_elem = process_ipsec_ev_drv_mode_outbound_vector(vec,
725 data);
726 if (vec->nb_elem > 0)
727 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
728 links[0].event_port_id,
729 ev, 1, 0);
730 else
731 rte_mempool_put(rte_mempool_from_obj(vec), vec);
732 }
733
734 /*
735 * Event mode exposes various operating modes depending on the
736 * capabilities of the event device and the operating mode
737 * selected.
738 */
739
740 static void
ipsec_event_port_flush(uint8_t eventdev_id __rte_unused,struct rte_event ev,void * args __rte_unused)741 ipsec_event_port_flush(uint8_t eventdev_id __rte_unused, struct rte_event ev,
742 void *args __rte_unused)
743 {
744 rte_pktmbuf_free(ev.mbuf);
745 }
746
747 /* Workers registered */
748 #define IPSEC_EVENTMODE_WORKERS 2
749
750 /*
751 * Event mode worker
752 * Operating parameters : non-burst - Tx internal port - driver mode
753 */
754 static void
ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info * links,uint8_t nb_links)755 ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
756 uint8_t nb_links)
757 {
758 struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
759 unsigned int nb_rx = 0, nb_tx;
760 struct rte_mbuf *pkt;
761 struct rte_event ev;
762 uint32_t lcore_id;
763 int32_t socket_id;
764 int16_t port_id;
765
766 /* Check if we have links registered for this lcore */
767 if (nb_links == 0) {
768 /* No links registered - exit */
769 return;
770 }
771
772 memset(&data, 0, sizeof(struct port_drv_mode_data));
773
774 /* Get core ID */
775 lcore_id = rte_lcore_id();
776
777 /* Get socket ID */
778 socket_id = rte_lcore_to_socket_id(lcore_id);
779
780 /*
781 * Prepare security sessions table. In outbound driver mode
782 * we always use first session configured for a given port
783 */
784 prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, data,
785 RTE_MAX_ETHPORTS);
786
787 RTE_LOG(INFO, IPSEC,
788 "Launching event mode worker (non-burst - Tx internal port - "
789 "driver mode) on lcore %d\n", lcore_id);
790
791 /* We have valid links */
792
793 /* Check if it's single link */
794 if (nb_links != 1) {
795 RTE_LOG(INFO, IPSEC,
796 "Multiple links not supported. Using first link\n");
797 }
798
799 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
800 links[0].event_port_id);
801 while (!force_quit) {
802 /* Read packet from event queues */
803 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
804 links[0].event_port_id,
805 &ev, /* events */
806 1, /* nb_events */
807 0 /* timeout_ticks */);
808
809 if (nb_rx == 0)
810 continue;
811
812 switch (ev.event_type) {
813 case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
814 case RTE_EVENT_TYPE_ETHDEV_VECTOR:
815 ipsec_ev_vector_drv_mode_process(links, &ev, data);
816 continue;
817 case RTE_EVENT_TYPE_ETHDEV:
818 break;
819 default:
820 RTE_LOG(ERR, IPSEC, "Invalid event type %u",
821 ev.event_type);
822 continue;
823 }
824
825 pkt = ev.mbuf;
826 port_id = pkt->port;
827
828 rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
829
830 /* Process packet */
831 ipsec_event_pre_forward(pkt, port_id);
832
833 if (!is_unprotected_port(port_id)) {
834
835 if (unlikely(!data[port_id].sess)) {
836 rte_pktmbuf_free(pkt);
837 continue;
838 }
839
840 /* Save security session */
841 rte_security_set_pkt_metadata(data[port_id].ctx,
842 data[port_id].sess, pkt,
843 NULL);
844
845 /* Mark the packet for Tx security offload */
846 pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
847
848 /* Provide L2 len for Outbound processing */
849 pkt->l2_len = RTE_ETHER_HDR_LEN;
850 }
851
852 /*
853 * Since tx internal port is available, events can be
854 * directly enqueued to the adapter and it would be
855 * internally submitted to the eth device.
856 */
857 nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
858 links[0].event_port_id,
859 &ev, /* events */
860 1, /* nb_events */
861 0 /* flags */);
862 if (!nb_tx)
863 rte_pktmbuf_free(ev.mbuf);
864 }
865
866 if (ev.u64) {
867 ev.op = RTE_EVENT_OP_RELEASE;
868 rte_event_enqueue_burst(links[0].eventdev_id,
869 links[0].event_port_id, &ev, 1);
870 }
871
872 rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
873 ipsec_event_port_flush, NULL);
874 }
875
876 /*
877 * Event mode worker
878 * Operating parameters : non-burst - Tx internal port - app mode
879 */
880 static void
ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info * links,uint8_t nb_links)881 ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
882 uint8_t nb_links)
883 {
884 struct lcore_conf_ev_tx_int_port_wrkr lconf;
885 unsigned int nb_rx = 0, nb_tx;
886 struct rte_event ev;
887 uint32_t lcore_id;
888 int32_t socket_id;
889 int ret;
890
891 /* Check if we have links registered for this lcore */
892 if (nb_links == 0) {
893 /* No links registered - exit */
894 return;
895 }
896
897 /* We have valid links */
898
899 /* Get core ID */
900 lcore_id = rte_lcore_id();
901
902 /* Get socket ID */
903 socket_id = rte_lcore_to_socket_id(lcore_id);
904
905 /* Save routing table */
906 lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
907 lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
908 lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
909 lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
910 lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
911 lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
912 lconf.inbound.session_priv_pool =
913 socket_ctx[socket_id].session_priv_pool;
914 lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
915 lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
916 lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
917 lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
918 lconf.outbound.session_priv_pool =
919 socket_ctx[socket_id].session_priv_pool;
920
921 RTE_LOG(INFO, IPSEC,
922 "Launching event mode worker (non-burst - Tx internal port - "
923 "app mode) on lcore %d\n", lcore_id);
924
925 /* Check if it's single link */
926 if (nb_links != 1) {
927 RTE_LOG(INFO, IPSEC,
928 "Multiple links not supported. Using first link\n");
929 }
930
931 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
932 links[0].event_port_id);
933
934 while (!force_quit) {
935 /* Read packet from event queues */
936 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
937 links[0].event_port_id,
938 &ev, /* events */
939 1, /* nb_events */
940 0 /* timeout_ticks */);
941
942 if (nb_rx == 0)
943 continue;
944
945 switch (ev.event_type) {
946 case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
947 case RTE_EVENT_TYPE_ETHDEV_VECTOR:
948 ipsec_ev_vector_process(&lconf, links, &ev);
949 continue;
950 case RTE_EVENT_TYPE_ETHDEV:
951 break;
952 default:
953 RTE_LOG(ERR, IPSEC, "Invalid event type %u",
954 ev.event_type);
955 continue;
956 }
957
958 if (is_unprotected_port(ev.mbuf->port))
959 ret = process_ipsec_ev_inbound(&lconf.inbound,
960 &lconf.rt, &ev);
961 else
962 ret = process_ipsec_ev_outbound(&lconf.outbound,
963 &lconf.rt, &ev);
964 if (ret != 1)
965 /* The pkt has been dropped */
966 continue;
967
968 /*
969 * Since tx internal port is available, events can be
970 * directly enqueued to the adapter and it would be
971 * internally submitted to the eth device.
972 */
973 nb_tx = rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
974 links[0].event_port_id,
975 &ev, /* events */
976 1, /* nb_events */
977 0 /* flags */);
978 if (!nb_tx)
979 rte_pktmbuf_free(ev.mbuf);
980 }
981
982 if (ev.u64) {
983 ev.op = RTE_EVENT_OP_RELEASE;
984 rte_event_enqueue_burst(links[0].eventdev_id,
985 links[0].event_port_id, &ev, 1);
986 }
987
988 rte_event_port_quiesce(links[0].eventdev_id, links[0].event_port_id,
989 ipsec_event_port_flush, NULL);
990 }
991
992 static uint8_t
ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params * wrkrs)993 ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
994 {
995 struct eh_app_worker_params *wrkr;
996 uint8_t nb_wrkr_param = 0;
997
998 /* Save workers */
999 wrkr = wrkrs;
1000
1001 /* Non-burst - Tx internal port - driver mode */
1002 wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
1003 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
1004 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
1005 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
1006 wrkr++;
1007 nb_wrkr_param++;
1008
1009 /* Non-burst - Tx internal port - app mode */
1010 wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
1011 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
1012 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
1013 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
1014 nb_wrkr_param++;
1015
1016 return nb_wrkr_param;
1017 }
1018
1019 static void
ipsec_eventmode_worker(struct eh_conf * conf)1020 ipsec_eventmode_worker(struct eh_conf *conf)
1021 {
1022 struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
1023 {{{0} }, NULL } };
1024 uint8_t nb_wrkr_param;
1025
1026 /* Populate l2fwd_wrkr params */
1027 nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
1028
1029 /*
1030 * Launch correct worker after checking
1031 * the event device's capabilities.
1032 */
1033 eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
1034 }
1035
ipsec_launch_one_lcore(void * args)1036 int ipsec_launch_one_lcore(void *args)
1037 {
1038 struct eh_conf *conf;
1039
1040 conf = (struct eh_conf *)args;
1041
1042 if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
1043 /* Run in poll mode */
1044 ipsec_poll_mode_worker();
1045 } else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
1046 /* Run in event mode */
1047 ipsec_eventmode_worker(conf);
1048 }
1049 return 0;
1050 }
1051