1*2d9fd380Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2*2d9fd380Sjfb8856606 * Copyright(c) 2010-2016 Intel Corporation
3*2d9fd380Sjfb8856606 * Copyright (C) 2020 Marvell International Ltd.
4*2d9fd380Sjfb8856606 */
5*2d9fd380Sjfb8856606 #include <rte_acl.h>
6*2d9fd380Sjfb8856606 #include <rte_event_eth_tx_adapter.h>
7*2d9fd380Sjfb8856606 #include <rte_lpm.h>
8*2d9fd380Sjfb8856606 #include <rte_lpm6.h>
9*2d9fd380Sjfb8856606
10*2d9fd380Sjfb8856606 #include "event_helper.h"
11*2d9fd380Sjfb8856606 #include "ipsec.h"
12*2d9fd380Sjfb8856606 #include "ipsec-secgw.h"
13*2d9fd380Sjfb8856606 #include "ipsec_worker.h"
14*2d9fd380Sjfb8856606
15*2d9fd380Sjfb8856606 static inline enum pkt_type
process_ipsec_get_pkt_type(struct rte_mbuf * pkt,uint8_t ** nlp)16*2d9fd380Sjfb8856606 process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
17*2d9fd380Sjfb8856606 {
18*2d9fd380Sjfb8856606 struct rte_ether_hdr *eth;
19*2d9fd380Sjfb8856606
20*2d9fd380Sjfb8856606 eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
21*2d9fd380Sjfb8856606 if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
22*2d9fd380Sjfb8856606 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
23*2d9fd380Sjfb8856606 offsetof(struct ip, ip_p));
24*2d9fd380Sjfb8856606 if (**nlp == IPPROTO_ESP)
25*2d9fd380Sjfb8856606 return PKT_TYPE_IPSEC_IPV4;
26*2d9fd380Sjfb8856606 else
27*2d9fd380Sjfb8856606 return PKT_TYPE_PLAIN_IPV4;
28*2d9fd380Sjfb8856606 } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
29*2d9fd380Sjfb8856606 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
30*2d9fd380Sjfb8856606 offsetof(struct ip6_hdr, ip6_nxt));
31*2d9fd380Sjfb8856606 if (**nlp == IPPROTO_ESP)
32*2d9fd380Sjfb8856606 return PKT_TYPE_IPSEC_IPV6;
33*2d9fd380Sjfb8856606 else
34*2d9fd380Sjfb8856606 return PKT_TYPE_PLAIN_IPV6;
35*2d9fd380Sjfb8856606 }
36*2d9fd380Sjfb8856606
37*2d9fd380Sjfb8856606 /* Unknown/Unsupported type */
38*2d9fd380Sjfb8856606 return PKT_TYPE_INVALID;
39*2d9fd380Sjfb8856606 }
40*2d9fd380Sjfb8856606
41*2d9fd380Sjfb8856606 static inline void
update_mac_addrs(struct rte_mbuf * pkt,uint16_t portid)42*2d9fd380Sjfb8856606 update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
43*2d9fd380Sjfb8856606 {
44*2d9fd380Sjfb8856606 struct rte_ether_hdr *ethhdr;
45*2d9fd380Sjfb8856606
46*2d9fd380Sjfb8856606 ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
47*2d9fd380Sjfb8856606 memcpy(ðhdr->s_addr, ðaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
48*2d9fd380Sjfb8856606 memcpy(ðhdr->d_addr, ðaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
49*2d9fd380Sjfb8856606 }
50*2d9fd380Sjfb8856606
51*2d9fd380Sjfb8856606 static inline void
ipsec_event_pre_forward(struct rte_mbuf * m,unsigned int port_id)52*2d9fd380Sjfb8856606 ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
53*2d9fd380Sjfb8856606 {
54*2d9fd380Sjfb8856606 /* Save the destination port in the mbuf */
55*2d9fd380Sjfb8856606 m->port = port_id;
56*2d9fd380Sjfb8856606
57*2d9fd380Sjfb8856606 /* Save eth queue for Tx */
58*2d9fd380Sjfb8856606 rte_event_eth_tx_adapter_txq_set(m, 0);
59*2d9fd380Sjfb8856606 }
60*2d9fd380Sjfb8856606
61*2d9fd380Sjfb8856606 static inline void
prepare_out_sessions_tbl(struct sa_ctx * sa_out,struct rte_security_session ** sess_tbl,uint16_t size)62*2d9fd380Sjfb8856606 prepare_out_sessions_tbl(struct sa_ctx *sa_out,
63*2d9fd380Sjfb8856606 struct rte_security_session **sess_tbl, uint16_t size)
64*2d9fd380Sjfb8856606 {
65*2d9fd380Sjfb8856606 struct rte_ipsec_session *pri_sess;
66*2d9fd380Sjfb8856606 struct ipsec_sa *sa;
67*2d9fd380Sjfb8856606 uint32_t i;
68*2d9fd380Sjfb8856606
69*2d9fd380Sjfb8856606 if (!sa_out)
70*2d9fd380Sjfb8856606 return;
71*2d9fd380Sjfb8856606
72*2d9fd380Sjfb8856606 for (i = 0; i < sa_out->nb_sa; i++) {
73*2d9fd380Sjfb8856606
74*2d9fd380Sjfb8856606 sa = &sa_out->sa[i];
75*2d9fd380Sjfb8856606 if (!sa)
76*2d9fd380Sjfb8856606 continue;
77*2d9fd380Sjfb8856606
78*2d9fd380Sjfb8856606 pri_sess = ipsec_get_primary_session(sa);
79*2d9fd380Sjfb8856606 if (!pri_sess)
80*2d9fd380Sjfb8856606 continue;
81*2d9fd380Sjfb8856606
82*2d9fd380Sjfb8856606 if (pri_sess->type !=
83*2d9fd380Sjfb8856606 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
84*2d9fd380Sjfb8856606
85*2d9fd380Sjfb8856606 RTE_LOG(ERR, IPSEC, "Invalid session type %d\n",
86*2d9fd380Sjfb8856606 pri_sess->type);
87*2d9fd380Sjfb8856606 continue;
88*2d9fd380Sjfb8856606 }
89*2d9fd380Sjfb8856606
90*2d9fd380Sjfb8856606 if (sa->portid >= size) {
91*2d9fd380Sjfb8856606 RTE_LOG(ERR, IPSEC,
92*2d9fd380Sjfb8856606 "Port id >= than table size %d, %d\n",
93*2d9fd380Sjfb8856606 sa->portid, size);
94*2d9fd380Sjfb8856606 continue;
95*2d9fd380Sjfb8856606 }
96*2d9fd380Sjfb8856606
97*2d9fd380Sjfb8856606 /* Use only first inline session found for a given port */
98*2d9fd380Sjfb8856606 if (sess_tbl[sa->portid])
99*2d9fd380Sjfb8856606 continue;
100*2d9fd380Sjfb8856606 sess_tbl[sa->portid] = pri_sess->security.ses;
101*2d9fd380Sjfb8856606 }
102*2d9fd380Sjfb8856606 }
103*2d9fd380Sjfb8856606
104*2d9fd380Sjfb8856606 static inline int
check_sp(struct sp_ctx * sp,const uint8_t * nlp,uint32_t * sa_idx)105*2d9fd380Sjfb8856606 check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
106*2d9fd380Sjfb8856606 {
107*2d9fd380Sjfb8856606 uint32_t res;
108*2d9fd380Sjfb8856606
109*2d9fd380Sjfb8856606 if (unlikely(sp == NULL))
110*2d9fd380Sjfb8856606 return 0;
111*2d9fd380Sjfb8856606
112*2d9fd380Sjfb8856606 rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
113*2d9fd380Sjfb8856606 DEFAULT_MAX_CATEGORIES);
114*2d9fd380Sjfb8856606
115*2d9fd380Sjfb8856606 if (unlikely(res == DISCARD))
116*2d9fd380Sjfb8856606 return 0;
117*2d9fd380Sjfb8856606 else if (res == BYPASS) {
118*2d9fd380Sjfb8856606 *sa_idx = -1;
119*2d9fd380Sjfb8856606 return 1;
120*2d9fd380Sjfb8856606 }
121*2d9fd380Sjfb8856606
122*2d9fd380Sjfb8856606 *sa_idx = res - 1;
123*2d9fd380Sjfb8856606 return 1;
124*2d9fd380Sjfb8856606 }
125*2d9fd380Sjfb8856606
126*2d9fd380Sjfb8856606 static inline uint16_t
route4_pkt(struct rte_mbuf * pkt,struct rt_ctx * rt_ctx)127*2d9fd380Sjfb8856606 route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
128*2d9fd380Sjfb8856606 {
129*2d9fd380Sjfb8856606 uint32_t dst_ip;
130*2d9fd380Sjfb8856606 uint16_t offset;
131*2d9fd380Sjfb8856606 uint32_t hop;
132*2d9fd380Sjfb8856606 int ret;
133*2d9fd380Sjfb8856606
134*2d9fd380Sjfb8856606 offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
135*2d9fd380Sjfb8856606 dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
136*2d9fd380Sjfb8856606 dst_ip = rte_be_to_cpu_32(dst_ip);
137*2d9fd380Sjfb8856606
138*2d9fd380Sjfb8856606 ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
139*2d9fd380Sjfb8856606
140*2d9fd380Sjfb8856606 if (ret == 0) {
141*2d9fd380Sjfb8856606 /* We have a hit */
142*2d9fd380Sjfb8856606 return hop;
143*2d9fd380Sjfb8856606 }
144*2d9fd380Sjfb8856606
145*2d9fd380Sjfb8856606 /* else */
146*2d9fd380Sjfb8856606 return RTE_MAX_ETHPORTS;
147*2d9fd380Sjfb8856606 }
148*2d9fd380Sjfb8856606
149*2d9fd380Sjfb8856606 /* TODO: To be tested */
150*2d9fd380Sjfb8856606 static inline uint16_t
route6_pkt(struct rte_mbuf * pkt,struct rt_ctx * rt_ctx)151*2d9fd380Sjfb8856606 route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
152*2d9fd380Sjfb8856606 {
153*2d9fd380Sjfb8856606 uint8_t dst_ip[16];
154*2d9fd380Sjfb8856606 uint8_t *ip6_dst;
155*2d9fd380Sjfb8856606 uint16_t offset;
156*2d9fd380Sjfb8856606 uint32_t hop;
157*2d9fd380Sjfb8856606 int ret;
158*2d9fd380Sjfb8856606
159*2d9fd380Sjfb8856606 offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
160*2d9fd380Sjfb8856606 ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
161*2d9fd380Sjfb8856606 memcpy(&dst_ip[0], ip6_dst, 16);
162*2d9fd380Sjfb8856606
163*2d9fd380Sjfb8856606 ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
164*2d9fd380Sjfb8856606
165*2d9fd380Sjfb8856606 if (ret == 0) {
166*2d9fd380Sjfb8856606 /* We have a hit */
167*2d9fd380Sjfb8856606 return hop;
168*2d9fd380Sjfb8856606 }
169*2d9fd380Sjfb8856606
170*2d9fd380Sjfb8856606 /* else */
171*2d9fd380Sjfb8856606 return RTE_MAX_ETHPORTS;
172*2d9fd380Sjfb8856606 }
173*2d9fd380Sjfb8856606
174*2d9fd380Sjfb8856606 static inline uint16_t
get_route(struct rte_mbuf * pkt,struct route_table * rt,enum pkt_type type)175*2d9fd380Sjfb8856606 get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
176*2d9fd380Sjfb8856606 {
177*2d9fd380Sjfb8856606 if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
178*2d9fd380Sjfb8856606 return route4_pkt(pkt, rt->rt4_ctx);
179*2d9fd380Sjfb8856606 else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
180*2d9fd380Sjfb8856606 return route6_pkt(pkt, rt->rt6_ctx);
181*2d9fd380Sjfb8856606
182*2d9fd380Sjfb8856606 return RTE_MAX_ETHPORTS;
183*2d9fd380Sjfb8856606 }
184*2d9fd380Sjfb8856606
185*2d9fd380Sjfb8856606 static inline int
process_ipsec_ev_inbound(struct ipsec_ctx * ctx,struct route_table * rt,struct rte_event * ev)186*2d9fd380Sjfb8856606 process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
187*2d9fd380Sjfb8856606 struct rte_event *ev)
188*2d9fd380Sjfb8856606 {
189*2d9fd380Sjfb8856606 struct ipsec_sa *sa = NULL;
190*2d9fd380Sjfb8856606 struct rte_mbuf *pkt;
191*2d9fd380Sjfb8856606 uint16_t port_id = 0;
192*2d9fd380Sjfb8856606 enum pkt_type type;
193*2d9fd380Sjfb8856606 uint32_t sa_idx;
194*2d9fd380Sjfb8856606 uint8_t *nlp;
195*2d9fd380Sjfb8856606
196*2d9fd380Sjfb8856606 /* Get pkt from event */
197*2d9fd380Sjfb8856606 pkt = ev->mbuf;
198*2d9fd380Sjfb8856606
199*2d9fd380Sjfb8856606 /* Check the packet type */
200*2d9fd380Sjfb8856606 type = process_ipsec_get_pkt_type(pkt, &nlp);
201*2d9fd380Sjfb8856606
202*2d9fd380Sjfb8856606 switch (type) {
203*2d9fd380Sjfb8856606 case PKT_TYPE_PLAIN_IPV4:
204*2d9fd380Sjfb8856606 if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
205*2d9fd380Sjfb8856606 if (unlikely(pkt->ol_flags &
206*2d9fd380Sjfb8856606 PKT_RX_SEC_OFFLOAD_FAILED)) {
207*2d9fd380Sjfb8856606 RTE_LOG(ERR, IPSEC,
208*2d9fd380Sjfb8856606 "Inbound security offload failed\n");
209*2d9fd380Sjfb8856606 goto drop_pkt_and_exit;
210*2d9fd380Sjfb8856606 }
211*2d9fd380Sjfb8856606 sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
212*2d9fd380Sjfb8856606 }
213*2d9fd380Sjfb8856606
214*2d9fd380Sjfb8856606 /* Check if we have a match */
215*2d9fd380Sjfb8856606 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
216*2d9fd380Sjfb8856606 /* No valid match */
217*2d9fd380Sjfb8856606 goto drop_pkt_and_exit;
218*2d9fd380Sjfb8856606 }
219*2d9fd380Sjfb8856606 break;
220*2d9fd380Sjfb8856606
221*2d9fd380Sjfb8856606 case PKT_TYPE_PLAIN_IPV6:
222*2d9fd380Sjfb8856606 if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
223*2d9fd380Sjfb8856606 if (unlikely(pkt->ol_flags &
224*2d9fd380Sjfb8856606 PKT_RX_SEC_OFFLOAD_FAILED)) {
225*2d9fd380Sjfb8856606 RTE_LOG(ERR, IPSEC,
226*2d9fd380Sjfb8856606 "Inbound security offload failed\n");
227*2d9fd380Sjfb8856606 goto drop_pkt_and_exit;
228*2d9fd380Sjfb8856606 }
229*2d9fd380Sjfb8856606 sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
230*2d9fd380Sjfb8856606 }
231*2d9fd380Sjfb8856606
232*2d9fd380Sjfb8856606 /* Check if we have a match */
233*2d9fd380Sjfb8856606 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
234*2d9fd380Sjfb8856606 /* No valid match */
235*2d9fd380Sjfb8856606 goto drop_pkt_and_exit;
236*2d9fd380Sjfb8856606 }
237*2d9fd380Sjfb8856606 break;
238*2d9fd380Sjfb8856606
239*2d9fd380Sjfb8856606 default:
240*2d9fd380Sjfb8856606 RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
241*2d9fd380Sjfb8856606 goto drop_pkt_and_exit;
242*2d9fd380Sjfb8856606 }
243*2d9fd380Sjfb8856606
244*2d9fd380Sjfb8856606 /* Check if the packet has to be bypassed */
245*2d9fd380Sjfb8856606 if (sa_idx == BYPASS)
246*2d9fd380Sjfb8856606 goto route_and_send_pkt;
247*2d9fd380Sjfb8856606
248*2d9fd380Sjfb8856606 /* Validate sa_idx */
249*2d9fd380Sjfb8856606 if (sa_idx >= ctx->sa_ctx->nb_sa)
250*2d9fd380Sjfb8856606 goto drop_pkt_and_exit;
251*2d9fd380Sjfb8856606
252*2d9fd380Sjfb8856606 /* Else the packet has to be protected with SA */
253*2d9fd380Sjfb8856606
254*2d9fd380Sjfb8856606 /* If the packet was IPsec processed, then SA pointer should be set */
255*2d9fd380Sjfb8856606 if (sa == NULL)
256*2d9fd380Sjfb8856606 goto drop_pkt_and_exit;
257*2d9fd380Sjfb8856606
258*2d9fd380Sjfb8856606 /* SPI on the packet should match with the one in SA */
259*2d9fd380Sjfb8856606 if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
260*2d9fd380Sjfb8856606 goto drop_pkt_and_exit;
261*2d9fd380Sjfb8856606
262*2d9fd380Sjfb8856606 route_and_send_pkt:
263*2d9fd380Sjfb8856606 port_id = get_route(pkt, rt, type);
264*2d9fd380Sjfb8856606 if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
265*2d9fd380Sjfb8856606 /* no match */
266*2d9fd380Sjfb8856606 goto drop_pkt_and_exit;
267*2d9fd380Sjfb8856606 }
268*2d9fd380Sjfb8856606 /* else, we have a matching route */
269*2d9fd380Sjfb8856606
270*2d9fd380Sjfb8856606 /* Update mac addresses */
271*2d9fd380Sjfb8856606 update_mac_addrs(pkt, port_id);
272*2d9fd380Sjfb8856606
273*2d9fd380Sjfb8856606 /* Update the event with the dest port */
274*2d9fd380Sjfb8856606 ipsec_event_pre_forward(pkt, port_id);
275*2d9fd380Sjfb8856606 return PKT_FORWARDED;
276*2d9fd380Sjfb8856606
277*2d9fd380Sjfb8856606 drop_pkt_and_exit:
278*2d9fd380Sjfb8856606 RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
279*2d9fd380Sjfb8856606 rte_pktmbuf_free(pkt);
280*2d9fd380Sjfb8856606 ev->mbuf = NULL;
281*2d9fd380Sjfb8856606 return PKT_DROPPED;
282*2d9fd380Sjfb8856606 }
283*2d9fd380Sjfb8856606
284*2d9fd380Sjfb8856606 static inline int
process_ipsec_ev_outbound(struct ipsec_ctx * ctx,struct route_table * rt,struct rte_event * ev)285*2d9fd380Sjfb8856606 process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
286*2d9fd380Sjfb8856606 struct rte_event *ev)
287*2d9fd380Sjfb8856606 {
288*2d9fd380Sjfb8856606 struct rte_ipsec_session *sess;
289*2d9fd380Sjfb8856606 struct sa_ctx *sa_ctx;
290*2d9fd380Sjfb8856606 struct rte_mbuf *pkt;
291*2d9fd380Sjfb8856606 uint16_t port_id = 0;
292*2d9fd380Sjfb8856606 struct ipsec_sa *sa;
293*2d9fd380Sjfb8856606 enum pkt_type type;
294*2d9fd380Sjfb8856606 uint32_t sa_idx;
295*2d9fd380Sjfb8856606 uint8_t *nlp;
296*2d9fd380Sjfb8856606
297*2d9fd380Sjfb8856606 /* Get pkt from event */
298*2d9fd380Sjfb8856606 pkt = ev->mbuf;
299*2d9fd380Sjfb8856606
300*2d9fd380Sjfb8856606 /* Check the packet type */
301*2d9fd380Sjfb8856606 type = process_ipsec_get_pkt_type(pkt, &nlp);
302*2d9fd380Sjfb8856606
303*2d9fd380Sjfb8856606 switch (type) {
304*2d9fd380Sjfb8856606 case PKT_TYPE_PLAIN_IPV4:
305*2d9fd380Sjfb8856606 /* Check if we have a match */
306*2d9fd380Sjfb8856606 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
307*2d9fd380Sjfb8856606 /* No valid match */
308*2d9fd380Sjfb8856606 goto drop_pkt_and_exit;
309*2d9fd380Sjfb8856606 }
310*2d9fd380Sjfb8856606 break;
311*2d9fd380Sjfb8856606 case PKT_TYPE_PLAIN_IPV6:
312*2d9fd380Sjfb8856606 /* Check if we have a match */
313*2d9fd380Sjfb8856606 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
314*2d9fd380Sjfb8856606 /* No valid match */
315*2d9fd380Sjfb8856606 goto drop_pkt_and_exit;
316*2d9fd380Sjfb8856606 }
317*2d9fd380Sjfb8856606 break;
318*2d9fd380Sjfb8856606 default:
319*2d9fd380Sjfb8856606 /*
320*2d9fd380Sjfb8856606 * Only plain IPv4 & IPv6 packets are allowed
321*2d9fd380Sjfb8856606 * on protected port. Drop the rest.
322*2d9fd380Sjfb8856606 */
323*2d9fd380Sjfb8856606 RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
324*2d9fd380Sjfb8856606 goto drop_pkt_and_exit;
325*2d9fd380Sjfb8856606 }
326*2d9fd380Sjfb8856606
327*2d9fd380Sjfb8856606 /* Check if the packet has to be bypassed */
328*2d9fd380Sjfb8856606 if (sa_idx == BYPASS) {
329*2d9fd380Sjfb8856606 port_id = get_route(pkt, rt, type);
330*2d9fd380Sjfb8856606 if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
331*2d9fd380Sjfb8856606 /* no match */
332*2d9fd380Sjfb8856606 goto drop_pkt_and_exit;
333*2d9fd380Sjfb8856606 }
334*2d9fd380Sjfb8856606 /* else, we have a matching route */
335*2d9fd380Sjfb8856606 goto send_pkt;
336*2d9fd380Sjfb8856606 }
337*2d9fd380Sjfb8856606
338*2d9fd380Sjfb8856606 /* Validate sa_idx */
339*2d9fd380Sjfb8856606 if (sa_idx >= ctx->sa_ctx->nb_sa)
340*2d9fd380Sjfb8856606 goto drop_pkt_and_exit;
341*2d9fd380Sjfb8856606
342*2d9fd380Sjfb8856606 /* Else the packet has to be protected */
343*2d9fd380Sjfb8856606
344*2d9fd380Sjfb8856606 /* Get SA ctx*/
345*2d9fd380Sjfb8856606 sa_ctx = ctx->sa_ctx;
346*2d9fd380Sjfb8856606
347*2d9fd380Sjfb8856606 /* Get SA */
348*2d9fd380Sjfb8856606 sa = &(sa_ctx->sa[sa_idx]);
349*2d9fd380Sjfb8856606
350*2d9fd380Sjfb8856606 /* Get IPsec session */
351*2d9fd380Sjfb8856606 sess = ipsec_get_primary_session(sa);
352*2d9fd380Sjfb8856606
353*2d9fd380Sjfb8856606 /* Allow only inline protocol for now */
354*2d9fd380Sjfb8856606 if (sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
355*2d9fd380Sjfb8856606 RTE_LOG(ERR, IPSEC, "SA type not supported\n");
356*2d9fd380Sjfb8856606 goto drop_pkt_and_exit;
357*2d9fd380Sjfb8856606 }
358*2d9fd380Sjfb8856606
359*2d9fd380Sjfb8856606 if (sess->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
360*2d9fd380Sjfb8856606 *(struct rte_security_session **)rte_security_dynfield(pkt) =
361*2d9fd380Sjfb8856606 sess->security.ses;
362*2d9fd380Sjfb8856606
363*2d9fd380Sjfb8856606 /* Mark the packet for Tx security offload */
364*2d9fd380Sjfb8856606 pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
365*2d9fd380Sjfb8856606
366*2d9fd380Sjfb8856606 /* Get the port to which this pkt need to be submitted */
367*2d9fd380Sjfb8856606 port_id = sa->portid;
368*2d9fd380Sjfb8856606
369*2d9fd380Sjfb8856606 send_pkt:
370*2d9fd380Sjfb8856606 /* Update mac addresses */
371*2d9fd380Sjfb8856606 update_mac_addrs(pkt, port_id);
372*2d9fd380Sjfb8856606
373*2d9fd380Sjfb8856606 /* Update the event with the dest port */
374*2d9fd380Sjfb8856606 ipsec_event_pre_forward(pkt, port_id);
375*2d9fd380Sjfb8856606 return PKT_FORWARDED;
376*2d9fd380Sjfb8856606
377*2d9fd380Sjfb8856606 drop_pkt_and_exit:
378*2d9fd380Sjfb8856606 RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
379*2d9fd380Sjfb8856606 rte_pktmbuf_free(pkt);
380*2d9fd380Sjfb8856606 ev->mbuf = NULL;
381*2d9fd380Sjfb8856606 return PKT_DROPPED;
382*2d9fd380Sjfb8856606 }
383*2d9fd380Sjfb8856606
384*2d9fd380Sjfb8856606 /*
385*2d9fd380Sjfb8856606 * Event mode exposes various operating modes depending on the
386*2d9fd380Sjfb8856606 * capabilities of the event device and the operating mode
387*2d9fd380Sjfb8856606 * selected.
388*2d9fd380Sjfb8856606 */
389*2d9fd380Sjfb8856606
390*2d9fd380Sjfb8856606 /* Workers registered */
391*2d9fd380Sjfb8856606 #define IPSEC_EVENTMODE_WORKERS 2
392*2d9fd380Sjfb8856606
393*2d9fd380Sjfb8856606 /*
394*2d9fd380Sjfb8856606 * Event mode worker
395*2d9fd380Sjfb8856606 * Operating parameters : non-burst - Tx internal port - driver mode
396*2d9fd380Sjfb8856606 */
397*2d9fd380Sjfb8856606 static void
ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info * links,uint8_t nb_links)398*2d9fd380Sjfb8856606 ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
399*2d9fd380Sjfb8856606 uint8_t nb_links)
400*2d9fd380Sjfb8856606 {
401*2d9fd380Sjfb8856606 struct rte_security_session *sess_tbl[RTE_MAX_ETHPORTS] = { NULL };
402*2d9fd380Sjfb8856606 unsigned int nb_rx = 0;
403*2d9fd380Sjfb8856606 struct rte_mbuf *pkt;
404*2d9fd380Sjfb8856606 struct rte_event ev;
405*2d9fd380Sjfb8856606 uint32_t lcore_id;
406*2d9fd380Sjfb8856606 int32_t socket_id;
407*2d9fd380Sjfb8856606 int16_t port_id;
408*2d9fd380Sjfb8856606
409*2d9fd380Sjfb8856606 /* Check if we have links registered for this lcore */
410*2d9fd380Sjfb8856606 if (nb_links == 0) {
411*2d9fd380Sjfb8856606 /* No links registered - exit */
412*2d9fd380Sjfb8856606 return;
413*2d9fd380Sjfb8856606 }
414*2d9fd380Sjfb8856606
415*2d9fd380Sjfb8856606 /* Get core ID */
416*2d9fd380Sjfb8856606 lcore_id = rte_lcore_id();
417*2d9fd380Sjfb8856606
418*2d9fd380Sjfb8856606 /* Get socket ID */
419*2d9fd380Sjfb8856606 socket_id = rte_lcore_to_socket_id(lcore_id);
420*2d9fd380Sjfb8856606
421*2d9fd380Sjfb8856606 /*
422*2d9fd380Sjfb8856606 * Prepare security sessions table. In outbound driver mode
423*2d9fd380Sjfb8856606 * we always use first session configured for a given port
424*2d9fd380Sjfb8856606 */
425*2d9fd380Sjfb8856606 prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, sess_tbl,
426*2d9fd380Sjfb8856606 RTE_MAX_ETHPORTS);
427*2d9fd380Sjfb8856606
428*2d9fd380Sjfb8856606 RTE_LOG(INFO, IPSEC,
429*2d9fd380Sjfb8856606 "Launching event mode worker (non-burst - Tx internal port - "
430*2d9fd380Sjfb8856606 "driver mode) on lcore %d\n", lcore_id);
431*2d9fd380Sjfb8856606
432*2d9fd380Sjfb8856606 /* We have valid links */
433*2d9fd380Sjfb8856606
434*2d9fd380Sjfb8856606 /* Check if it's single link */
435*2d9fd380Sjfb8856606 if (nb_links != 1) {
436*2d9fd380Sjfb8856606 RTE_LOG(INFO, IPSEC,
437*2d9fd380Sjfb8856606 "Multiple links not supported. Using first link\n");
438*2d9fd380Sjfb8856606 }
439*2d9fd380Sjfb8856606
440*2d9fd380Sjfb8856606 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
441*2d9fd380Sjfb8856606 links[0].event_port_id);
442*2d9fd380Sjfb8856606 while (!force_quit) {
443*2d9fd380Sjfb8856606 /* Read packet from event queues */
444*2d9fd380Sjfb8856606 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
445*2d9fd380Sjfb8856606 links[0].event_port_id,
446*2d9fd380Sjfb8856606 &ev, /* events */
447*2d9fd380Sjfb8856606 1, /* nb_events */
448*2d9fd380Sjfb8856606 0 /* timeout_ticks */);
449*2d9fd380Sjfb8856606
450*2d9fd380Sjfb8856606 if (nb_rx == 0)
451*2d9fd380Sjfb8856606 continue;
452*2d9fd380Sjfb8856606
453*2d9fd380Sjfb8856606 pkt = ev.mbuf;
454*2d9fd380Sjfb8856606 port_id = pkt->port;
455*2d9fd380Sjfb8856606
456*2d9fd380Sjfb8856606 rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
457*2d9fd380Sjfb8856606
458*2d9fd380Sjfb8856606 /* Process packet */
459*2d9fd380Sjfb8856606 ipsec_event_pre_forward(pkt, port_id);
460*2d9fd380Sjfb8856606
461*2d9fd380Sjfb8856606 if (!is_unprotected_port(port_id)) {
462*2d9fd380Sjfb8856606
463*2d9fd380Sjfb8856606 if (unlikely(!sess_tbl[port_id])) {
464*2d9fd380Sjfb8856606 rte_pktmbuf_free(pkt);
465*2d9fd380Sjfb8856606 continue;
466*2d9fd380Sjfb8856606 }
467*2d9fd380Sjfb8856606
468*2d9fd380Sjfb8856606 /* Save security session */
469*2d9fd380Sjfb8856606 if (rte_security_dynfield_is_registered())
470*2d9fd380Sjfb8856606 *(struct rte_security_session **)
471*2d9fd380Sjfb8856606 rte_security_dynfield(pkt) =
472*2d9fd380Sjfb8856606 sess_tbl[port_id];
473*2d9fd380Sjfb8856606
474*2d9fd380Sjfb8856606 /* Mark the packet for Tx security offload */
475*2d9fd380Sjfb8856606 pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
476*2d9fd380Sjfb8856606 }
477*2d9fd380Sjfb8856606
478*2d9fd380Sjfb8856606 /*
479*2d9fd380Sjfb8856606 * Since tx internal port is available, events can be
480*2d9fd380Sjfb8856606 * directly enqueued to the adapter and it would be
481*2d9fd380Sjfb8856606 * internally submitted to the eth device.
482*2d9fd380Sjfb8856606 */
483*2d9fd380Sjfb8856606 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
484*2d9fd380Sjfb8856606 links[0].event_port_id,
485*2d9fd380Sjfb8856606 &ev, /* events */
486*2d9fd380Sjfb8856606 1, /* nb_events */
487*2d9fd380Sjfb8856606 0 /* flags */);
488*2d9fd380Sjfb8856606 }
489*2d9fd380Sjfb8856606 }
490*2d9fd380Sjfb8856606
491*2d9fd380Sjfb8856606 /*
492*2d9fd380Sjfb8856606 * Event mode worker
493*2d9fd380Sjfb8856606 * Operating parameters : non-burst - Tx internal port - app mode
494*2d9fd380Sjfb8856606 */
495*2d9fd380Sjfb8856606 static void
ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info * links,uint8_t nb_links)496*2d9fd380Sjfb8856606 ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
497*2d9fd380Sjfb8856606 uint8_t nb_links)
498*2d9fd380Sjfb8856606 {
499*2d9fd380Sjfb8856606 struct lcore_conf_ev_tx_int_port_wrkr lconf;
500*2d9fd380Sjfb8856606 unsigned int nb_rx = 0;
501*2d9fd380Sjfb8856606 struct rte_event ev;
502*2d9fd380Sjfb8856606 uint32_t lcore_id;
503*2d9fd380Sjfb8856606 int32_t socket_id;
504*2d9fd380Sjfb8856606 int ret;
505*2d9fd380Sjfb8856606
506*2d9fd380Sjfb8856606 /* Check if we have links registered for this lcore */
507*2d9fd380Sjfb8856606 if (nb_links == 0) {
508*2d9fd380Sjfb8856606 /* No links registered - exit */
509*2d9fd380Sjfb8856606 return;
510*2d9fd380Sjfb8856606 }
511*2d9fd380Sjfb8856606
512*2d9fd380Sjfb8856606 /* We have valid links */
513*2d9fd380Sjfb8856606
514*2d9fd380Sjfb8856606 /* Get core ID */
515*2d9fd380Sjfb8856606 lcore_id = rte_lcore_id();
516*2d9fd380Sjfb8856606
517*2d9fd380Sjfb8856606 /* Get socket ID */
518*2d9fd380Sjfb8856606 socket_id = rte_lcore_to_socket_id(lcore_id);
519*2d9fd380Sjfb8856606
520*2d9fd380Sjfb8856606 /* Save routing table */
521*2d9fd380Sjfb8856606 lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
522*2d9fd380Sjfb8856606 lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
523*2d9fd380Sjfb8856606 lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
524*2d9fd380Sjfb8856606 lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
525*2d9fd380Sjfb8856606 lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
526*2d9fd380Sjfb8856606 lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
527*2d9fd380Sjfb8856606 lconf.inbound.session_priv_pool =
528*2d9fd380Sjfb8856606 socket_ctx[socket_id].session_priv_pool;
529*2d9fd380Sjfb8856606 lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
530*2d9fd380Sjfb8856606 lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
531*2d9fd380Sjfb8856606 lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
532*2d9fd380Sjfb8856606 lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
533*2d9fd380Sjfb8856606 lconf.outbound.session_priv_pool =
534*2d9fd380Sjfb8856606 socket_ctx[socket_id].session_priv_pool;
535*2d9fd380Sjfb8856606
536*2d9fd380Sjfb8856606 RTE_LOG(INFO, IPSEC,
537*2d9fd380Sjfb8856606 "Launching event mode worker (non-burst - Tx internal port - "
538*2d9fd380Sjfb8856606 "app mode) on lcore %d\n", lcore_id);
539*2d9fd380Sjfb8856606
540*2d9fd380Sjfb8856606 /* Check if it's single link */
541*2d9fd380Sjfb8856606 if (nb_links != 1) {
542*2d9fd380Sjfb8856606 RTE_LOG(INFO, IPSEC,
543*2d9fd380Sjfb8856606 "Multiple links not supported. Using first link\n");
544*2d9fd380Sjfb8856606 }
545*2d9fd380Sjfb8856606
546*2d9fd380Sjfb8856606 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
547*2d9fd380Sjfb8856606 links[0].event_port_id);
548*2d9fd380Sjfb8856606
549*2d9fd380Sjfb8856606 while (!force_quit) {
550*2d9fd380Sjfb8856606 /* Read packet from event queues */
551*2d9fd380Sjfb8856606 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
552*2d9fd380Sjfb8856606 links[0].event_port_id,
553*2d9fd380Sjfb8856606 &ev, /* events */
554*2d9fd380Sjfb8856606 1, /* nb_events */
555*2d9fd380Sjfb8856606 0 /* timeout_ticks */);
556*2d9fd380Sjfb8856606
557*2d9fd380Sjfb8856606 if (nb_rx == 0)
558*2d9fd380Sjfb8856606 continue;
559*2d9fd380Sjfb8856606
560*2d9fd380Sjfb8856606 if (unlikely(ev.event_type != RTE_EVENT_TYPE_ETHDEV)) {
561*2d9fd380Sjfb8856606 RTE_LOG(ERR, IPSEC, "Invalid event type %u",
562*2d9fd380Sjfb8856606 ev.event_type);
563*2d9fd380Sjfb8856606
564*2d9fd380Sjfb8856606 continue;
565*2d9fd380Sjfb8856606 }
566*2d9fd380Sjfb8856606
567*2d9fd380Sjfb8856606 if (is_unprotected_port(ev.mbuf->port))
568*2d9fd380Sjfb8856606 ret = process_ipsec_ev_inbound(&lconf.inbound,
569*2d9fd380Sjfb8856606 &lconf.rt, &ev);
570*2d9fd380Sjfb8856606 else
571*2d9fd380Sjfb8856606 ret = process_ipsec_ev_outbound(&lconf.outbound,
572*2d9fd380Sjfb8856606 &lconf.rt, &ev);
573*2d9fd380Sjfb8856606 if (ret != 1)
574*2d9fd380Sjfb8856606 /* The pkt has been dropped */
575*2d9fd380Sjfb8856606 continue;
576*2d9fd380Sjfb8856606
577*2d9fd380Sjfb8856606 /*
578*2d9fd380Sjfb8856606 * Since tx internal port is available, events can be
579*2d9fd380Sjfb8856606 * directly enqueued to the adapter and it would be
580*2d9fd380Sjfb8856606 * internally submitted to the eth device.
581*2d9fd380Sjfb8856606 */
582*2d9fd380Sjfb8856606 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
583*2d9fd380Sjfb8856606 links[0].event_port_id,
584*2d9fd380Sjfb8856606 &ev, /* events */
585*2d9fd380Sjfb8856606 1, /* nb_events */
586*2d9fd380Sjfb8856606 0 /* flags */);
587*2d9fd380Sjfb8856606 }
588*2d9fd380Sjfb8856606 }
589*2d9fd380Sjfb8856606
590*2d9fd380Sjfb8856606 static uint8_t
ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params * wrkrs)591*2d9fd380Sjfb8856606 ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
592*2d9fd380Sjfb8856606 {
593*2d9fd380Sjfb8856606 struct eh_app_worker_params *wrkr;
594*2d9fd380Sjfb8856606 uint8_t nb_wrkr_param = 0;
595*2d9fd380Sjfb8856606
596*2d9fd380Sjfb8856606 /* Save workers */
597*2d9fd380Sjfb8856606 wrkr = wrkrs;
598*2d9fd380Sjfb8856606
599*2d9fd380Sjfb8856606 /* Non-burst - Tx internal port - driver mode */
600*2d9fd380Sjfb8856606 wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
601*2d9fd380Sjfb8856606 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
602*2d9fd380Sjfb8856606 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
603*2d9fd380Sjfb8856606 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
604*2d9fd380Sjfb8856606 wrkr++;
605*2d9fd380Sjfb8856606 nb_wrkr_param++;
606*2d9fd380Sjfb8856606
607*2d9fd380Sjfb8856606 /* Non-burst - Tx internal port - app mode */
608*2d9fd380Sjfb8856606 wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
609*2d9fd380Sjfb8856606 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
610*2d9fd380Sjfb8856606 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
611*2d9fd380Sjfb8856606 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
612*2d9fd380Sjfb8856606 nb_wrkr_param++;
613*2d9fd380Sjfb8856606
614*2d9fd380Sjfb8856606 return nb_wrkr_param;
615*2d9fd380Sjfb8856606 }
616*2d9fd380Sjfb8856606
617*2d9fd380Sjfb8856606 static void
ipsec_eventmode_worker(struct eh_conf * conf)618*2d9fd380Sjfb8856606 ipsec_eventmode_worker(struct eh_conf *conf)
619*2d9fd380Sjfb8856606 {
620*2d9fd380Sjfb8856606 struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
621*2d9fd380Sjfb8856606 {{{0} }, NULL } };
622*2d9fd380Sjfb8856606 uint8_t nb_wrkr_param;
623*2d9fd380Sjfb8856606
624*2d9fd380Sjfb8856606 /* Populate l2fwd_wrkr params */
625*2d9fd380Sjfb8856606 nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
626*2d9fd380Sjfb8856606
627*2d9fd380Sjfb8856606 /*
628*2d9fd380Sjfb8856606 * Launch correct worker after checking
629*2d9fd380Sjfb8856606 * the event device's capabilities.
630*2d9fd380Sjfb8856606 */
631*2d9fd380Sjfb8856606 eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
632*2d9fd380Sjfb8856606 }
633*2d9fd380Sjfb8856606
ipsec_launch_one_lcore(void * args)634*2d9fd380Sjfb8856606 int ipsec_launch_one_lcore(void *args)
635*2d9fd380Sjfb8856606 {
636*2d9fd380Sjfb8856606 struct eh_conf *conf;
637*2d9fd380Sjfb8856606
638*2d9fd380Sjfb8856606 conf = (struct eh_conf *)args;
639*2d9fd380Sjfb8856606
640*2d9fd380Sjfb8856606 if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
641*2d9fd380Sjfb8856606 /* Run in poll mode */
642*2d9fd380Sjfb8856606 ipsec_poll_mode_worker();
643*2d9fd380Sjfb8856606 } else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
644*2d9fd380Sjfb8856606 /* Run in event mode */
645*2d9fd380Sjfb8856606 ipsec_eventmode_worker(conf);
646*2d9fd380Sjfb8856606 }
647*2d9fd380Sjfb8856606 return 0;
648*2d9fd380Sjfb8856606 }
649