1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2020 Intel Corporation
3 */
4 #include <sys/types.h>
5 #include <netinet/in.h>
6 #include <netinet/ip.h>
7
8 #include <rte_branch_prediction.h>
9 #include <rte_log.h>
10 #include <rte_crypto.h>
11 #include <rte_security.h>
12 #include <rte_cryptodev.h>
13 #include <rte_ipsec.h>
14 #include <rte_ethdev.h>
15 #include <rte_mbuf.h>
16 #include <rte_hash.h>
17
18 #include "ipsec.h"
19 #include "esp.h"
20
21 static inline void
set_ipsec_conf(struct ipsec_sa * sa,struct rte_security_ipsec_xform * ipsec)22 set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec)
23 {
24 if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
25 struct rte_security_ipsec_tunnel_param *tunnel =
26 &ipsec->tunnel;
27 if (IS_IP4_TUNNEL(sa->flags)) {
28 tunnel->type =
29 RTE_SECURITY_IPSEC_TUNNEL_IPV4;
30 tunnel->ipv4.ttl = IPDEFTTL;
31
32 memcpy((uint8_t *)&tunnel->ipv4.src_ip,
33 (uint8_t *)&sa->src.ip.ip4, 4);
34
35 memcpy((uint8_t *)&tunnel->ipv4.dst_ip,
36 (uint8_t *)&sa->dst.ip.ip4, 4);
37 } else if (IS_IP6_TUNNEL(sa->flags)) {
38 tunnel->type =
39 RTE_SECURITY_IPSEC_TUNNEL_IPV6;
40 tunnel->ipv6.hlimit = IPDEFTTL;
41 tunnel->ipv6.dscp = 0;
42 tunnel->ipv6.flabel = 0;
43
44 memcpy((uint8_t *)&tunnel->ipv6.src_addr,
45 (uint8_t *)&sa->src.ip.ip6.ip6_b, 16);
46
47 memcpy((uint8_t *)&tunnel->ipv6.dst_addr,
48 (uint8_t *)&sa->dst.ip.ip6.ip6_b, 16);
49 }
50 /* TODO support for Transport */
51 }
52 ipsec->esn_soft_limit = IPSEC_OFFLOAD_ESN_SOFTLIMIT;
53 ipsec->replay_win_sz = app_sa_prm.window_size;
54 ipsec->options.esn = app_sa_prm.enable_esn;
55 }
56
57 int
create_lookaside_session(struct ipsec_ctx * ipsec_ctx,struct ipsec_sa * sa,struct rte_ipsec_session * ips)58 create_lookaside_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa,
59 struct rte_ipsec_session *ips)
60 {
61 struct rte_cryptodev_info cdev_info;
62 unsigned long cdev_id_qp = 0;
63 int32_t ret = 0;
64 struct cdev_key key = { 0 };
65
66 key.lcore_id = (uint8_t)rte_lcore_id();
67
68 key.cipher_algo = (uint8_t)sa->cipher_algo;
69 key.auth_algo = (uint8_t)sa->auth_algo;
70 key.aead_algo = (uint8_t)sa->aead_algo;
71
72 ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
73 (void **)&cdev_id_qp);
74 if (ret < 0) {
75 RTE_LOG(ERR, IPSEC,
76 "No cryptodev: core %u, cipher_algo %u, "
77 "auth_algo %u, aead_algo %u\n",
78 key.lcore_id,
79 key.cipher_algo,
80 key.auth_algo,
81 key.aead_algo);
82 return -1;
83 }
84
85 RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
86 "%u qp %u\n", sa->spi,
87 ipsec_ctx->tbl[cdev_id_qp].id,
88 ipsec_ctx->tbl[cdev_id_qp].qp);
89
90 if (ips->type != RTE_SECURITY_ACTION_TYPE_NONE &&
91 ips->type != RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
92 struct rte_security_session_conf sess_conf = {
93 .action_type = ips->type,
94 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
95 {.ipsec = {
96 .spi = sa->spi,
97 .salt = sa->salt,
98 .options = { 0 },
99 .replay_win_sz = 0,
100 .direction = sa->direction,
101 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
102 .mode = (IS_TUNNEL(sa->flags)) ?
103 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
104 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
105 } },
106 .crypto_xform = sa->xforms,
107 .userdata = NULL,
108
109 };
110
111 if (ips->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
112 struct rte_security_ctx *ctx = (struct rte_security_ctx *)
113 rte_cryptodev_get_sec_ctx(
114 ipsec_ctx->tbl[cdev_id_qp].id);
115
116 /* Set IPsec parameters in conf */
117 set_ipsec_conf(sa, &(sess_conf.ipsec));
118
119 ips->security.ses = rte_security_session_create(ctx,
120 &sess_conf, ipsec_ctx->session_pool,
121 ipsec_ctx->session_priv_pool);
122 if (ips->security.ses == NULL) {
123 RTE_LOG(ERR, IPSEC,
124 "SEC Session init failed: err: %d\n", ret);
125 return -1;
126 }
127 } else {
128 RTE_LOG(ERR, IPSEC, "Inline not supported\n");
129 return -1;
130 }
131 } else {
132 if (ips->type == RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO) {
133 struct rte_cryptodev_info info;
134 uint16_t cdev_id;
135
136 cdev_id = ipsec_ctx->tbl[cdev_id_qp].id;
137 rte_cryptodev_info_get(cdev_id, &info);
138 if (!(info.feature_flags &
139 RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO))
140 return -ENOTSUP;
141
142 ips->crypto.dev_id = cdev_id;
143 }
144 ips->crypto.ses = rte_cryptodev_sym_session_create(
145 ipsec_ctx->session_pool);
146 rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
147 ips->crypto.ses, sa->xforms,
148 ipsec_ctx->session_priv_pool);
149
150 rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
151 &cdev_info);
152 }
153
154 sa->cdev_id_qp = cdev_id_qp;
155
156 return 0;
157 }
158
159 int
create_inline_session(struct socket_ctx * skt_ctx,struct ipsec_sa * sa,struct rte_ipsec_session * ips)160 create_inline_session(struct socket_ctx *skt_ctx, struct ipsec_sa *sa,
161 struct rte_ipsec_session *ips)
162 {
163 int32_t ret = 0;
164 struct rte_security_ctx *sec_ctx;
165 struct rte_security_session_conf sess_conf = {
166 .action_type = ips->type,
167 .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
168 {.ipsec = {
169 .spi = sa->spi,
170 .salt = sa->salt,
171 .options = { 0 },
172 .replay_win_sz = 0,
173 .direction = sa->direction,
174 .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
175 .mode = (sa->flags == IP4_TUNNEL ||
176 sa->flags == IP6_TUNNEL) ?
177 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
178 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
179 } },
180 .crypto_xform = sa->xforms,
181 .userdata = NULL,
182 };
183
184 RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on port %u\n",
185 sa->spi, sa->portid);
186
187 if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
188 struct rte_flow_error err;
189 const struct rte_security_capability *sec_cap;
190 int ret = 0;
191
192 sec_ctx = (struct rte_security_ctx *)
193 rte_eth_dev_get_sec_ctx(
194 sa->portid);
195 if (sec_ctx == NULL) {
196 RTE_LOG(ERR, IPSEC,
197 " rte_eth_dev_get_sec_ctx failed\n");
198 return -1;
199 }
200
201 ips->security.ses = rte_security_session_create(sec_ctx,
202 &sess_conf, skt_ctx->session_pool,
203 skt_ctx->session_priv_pool);
204 if (ips->security.ses == NULL) {
205 RTE_LOG(ERR, IPSEC,
206 "SEC Session init failed: err: %d\n", ret);
207 return -1;
208 }
209
210 sec_cap = rte_security_capabilities_get(sec_ctx);
211
212 /* iterate until ESP tunnel*/
213 while (sec_cap->action != RTE_SECURITY_ACTION_TYPE_NONE) {
214 if (sec_cap->action == ips->type &&
215 sec_cap->protocol ==
216 RTE_SECURITY_PROTOCOL_IPSEC &&
217 sec_cap->ipsec.mode ==
218 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
219 sec_cap->ipsec.direction == sa->direction)
220 break;
221 sec_cap++;
222 }
223
224 if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
225 RTE_LOG(ERR, IPSEC,
226 "No suitable security capability found\n");
227 return -1;
228 }
229
230 ips->security.ol_flags = sec_cap->ol_flags;
231 ips->security.ctx = sec_ctx;
232 sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
233
234 if (IS_IP6(sa->flags)) {
235 sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
236 sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
237 sa->pattern[1].spec = &sa->ipv6_spec;
238
239 memcpy(sa->ipv6_spec.hdr.dst_addr,
240 sa->dst.ip.ip6.ip6_b, 16);
241 memcpy(sa->ipv6_spec.hdr.src_addr,
242 sa->src.ip.ip6.ip6_b, 16);
243 } else if (IS_IP4(sa->flags)) {
244 sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
245 sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
246 sa->pattern[1].spec = &sa->ipv4_spec;
247
248 sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
249 sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
250 }
251
252 sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
253 sa->pattern[2].spec = &sa->esp_spec;
254 sa->pattern[2].mask = &rte_flow_item_esp_mask;
255 sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
256
257 sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
258
259 sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
260 sa->action[0].conf = ips->security.ses;
261
262 sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
263
264 sa->attr.egress = (sa->direction ==
265 RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
266 sa->attr.ingress = (sa->direction ==
267 RTE_SECURITY_IPSEC_SA_DIR_INGRESS);
268 if (sa->attr.ingress) {
269 uint8_t rss_key[40];
270 struct rte_eth_rss_conf rss_conf = {
271 .rss_key = rss_key,
272 .rss_key_len = 40,
273 };
274 struct rte_eth_dev_info dev_info;
275 uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
276 struct rte_flow_action_rss action_rss;
277 unsigned int i;
278 unsigned int j;
279
280 /* Don't create flow if default flow is created */
281 if (flow_info_tbl[sa->portid].rx_def_flow)
282 return 0;
283
284 ret = rte_eth_dev_info_get(sa->portid, &dev_info);
285 if (ret != 0) {
286 RTE_LOG(ERR, IPSEC,
287 "Error during getting device (port %u) info: %s\n",
288 sa->portid, strerror(-ret));
289 return ret;
290 }
291
292 sa->action[2].type = RTE_FLOW_ACTION_TYPE_END;
293 /* Try RSS. */
294 sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS;
295 sa->action[1].conf = &action_rss;
296 ret = rte_eth_dev_rss_hash_conf_get(sa->portid,
297 &rss_conf);
298 if (ret != 0) {
299 RTE_LOG(ERR, IPSEC,
300 "rte_eth_dev_rss_hash_conf_get:ret=%d\n",
301 ret);
302 return -1;
303 }
304 for (i = 0, j = 0; i < dev_info.nb_rx_queues; ++i)
305 queue[j++] = i;
306
307 action_rss = (struct rte_flow_action_rss){
308 .types = rss_conf.rss_hf,
309 .key_len = rss_conf.rss_key_len,
310 .queue_num = j,
311 .key = rss_key,
312 .queue = queue,
313 };
314 ret = rte_flow_validate(sa->portid, &sa->attr,
315 sa->pattern, sa->action,
316 &err);
317 if (!ret)
318 goto flow_create;
319 /* Try Queue. */
320 sa->action[1].type = RTE_FLOW_ACTION_TYPE_QUEUE;
321 sa->action[1].conf =
322 &(struct rte_flow_action_queue){
323 .index = 0,
324 };
325 ret = rte_flow_validate(sa->portid, &sa->attr,
326 sa->pattern, sa->action,
327 &err);
328 /* Try End. */
329 sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
330 sa->action[1].conf = NULL;
331 ret = rte_flow_validate(sa->portid, &sa->attr,
332 sa->pattern, sa->action,
333 &err);
334 if (ret)
335 goto flow_create_failure;
336 } else if (sa->attr.egress &&
337 (ips->security.ol_flags &
338 RTE_SECURITY_TX_HW_TRAILER_OFFLOAD)) {
339 sa->action[1].type =
340 RTE_FLOW_ACTION_TYPE_PASSTHRU;
341 sa->action[2].type =
342 RTE_FLOW_ACTION_TYPE_END;
343 }
344 flow_create:
345 sa->flow = rte_flow_create(sa->portid,
346 &sa->attr, sa->pattern, sa->action, &err);
347 if (sa->flow == NULL) {
348 flow_create_failure:
349 RTE_LOG(ERR, IPSEC,
350 "Failed to create ipsec flow msg: %s\n",
351 err.message);
352 return -1;
353 }
354 } else if (ips->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
355 const struct rte_security_capability *sec_cap;
356
357 sec_ctx = (struct rte_security_ctx *)
358 rte_eth_dev_get_sec_ctx(sa->portid);
359
360 if (sec_ctx == NULL) {
361 RTE_LOG(ERR, IPSEC,
362 "Ethernet device doesn't have security features registered\n");
363 return -1;
364 }
365
366 /* Set IPsec parameters in conf */
367 set_ipsec_conf(sa, &(sess_conf.ipsec));
368
369 /* Save SA as userdata for the security session. When
370 * the packet is received, this userdata will be
371 * retrieved using the metadata from the packet.
372 *
373 * The PMD is expected to set similar metadata for other
374 * operations, like rte_eth_event, which are tied to
375 * security session. In such cases, the userdata could
376 * be obtained to uniquely identify the security
377 * parameters denoted.
378 */
379
380 sess_conf.userdata = (void *) sa;
381
382 ips->security.ses = rte_security_session_create(sec_ctx,
383 &sess_conf, skt_ctx->session_pool,
384 skt_ctx->session_priv_pool);
385 if (ips->security.ses == NULL) {
386 RTE_LOG(ERR, IPSEC,
387 "SEC Session init failed: err: %d\n", ret);
388 return -1;
389 }
390
391 sec_cap = rte_security_capabilities_get(sec_ctx);
392 if (sec_cap == NULL) {
393 RTE_LOG(ERR, IPSEC,
394 "No capabilities registered\n");
395 return -1;
396 }
397
398 /* iterate until ESP tunnel*/
399 while (sec_cap->action !=
400 RTE_SECURITY_ACTION_TYPE_NONE) {
401 if (sec_cap->action == ips->type &&
402 sec_cap->protocol ==
403 RTE_SECURITY_PROTOCOL_IPSEC &&
404 sec_cap->ipsec.mode ==
405 sess_conf.ipsec.mode &&
406 sec_cap->ipsec.direction == sa->direction)
407 break;
408 sec_cap++;
409 }
410
411 if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
412 RTE_LOG(ERR, IPSEC,
413 "No suitable security capability found\n");
414 return -1;
415 }
416
417 ips->security.ol_flags = sec_cap->ol_flags;
418 ips->security.ctx = sec_ctx;
419 }
420
421 return 0;
422 }
423
424 int
create_ipsec_esp_flow(struct ipsec_sa * sa)425 create_ipsec_esp_flow(struct ipsec_sa *sa)
426 {
427 int ret = 0;
428 struct rte_flow_error err;
429 if (sa->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
430 RTE_LOG(ERR, IPSEC,
431 "No Flow director rule for Egress traffic\n");
432 return -1;
433 }
434 if (sa->flags == TRANSPORT) {
435 RTE_LOG(ERR, IPSEC,
436 "No Flow director rule for transport mode\n");
437 return -1;
438 }
439 sa->action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
440 sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
441 sa->action[0].conf = &(struct rte_flow_action_queue) {
442 .index = sa->fdir_qid,
443 };
444 sa->attr.egress = 0;
445 sa->attr.ingress = 1;
446 if (IS_IP6(sa->flags)) {
447 sa->pattern[1].mask = &rte_flow_item_ipv6_mask;
448 sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV6;
449 sa->pattern[1].spec = &sa->ipv6_spec;
450 memcpy(sa->ipv6_spec.hdr.dst_addr,
451 sa->dst.ip.ip6.ip6_b, sizeof(sa->dst.ip.ip6.ip6_b));
452 memcpy(sa->ipv6_spec.hdr.src_addr,
453 sa->src.ip.ip6.ip6_b, sizeof(sa->src.ip.ip6.ip6_b));
454 sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
455 sa->pattern[2].spec = &sa->esp_spec;
456 sa->pattern[2].mask = &rte_flow_item_esp_mask;
457 sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
458 sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
459 } else if (IS_IP4(sa->flags)) {
460 sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
461 sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
462 sa->pattern[1].spec = &sa->ipv4_spec;
463 sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
464 sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
465 sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
466 sa->pattern[2].spec = &sa->esp_spec;
467 sa->pattern[2].mask = &rte_flow_item_esp_mask;
468 sa->esp_spec.hdr.spi = rte_cpu_to_be_32(sa->spi);
469 sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
470 }
471 sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
472
473 ret = rte_flow_validate(sa->portid, &sa->attr, sa->pattern, sa->action,
474 &err);
475 if (ret < 0) {
476 RTE_LOG(ERR, IPSEC, "Flow validation failed %s\n", err.message);
477 return ret;
478 }
479
480 sa->flow = rte_flow_create(sa->portid, &sa->attr, sa->pattern,
481 sa->action, &err);
482 if (!sa->flow) {
483 RTE_LOG(ERR, IPSEC, "Flow creation failed %s\n", err.message);
484 return -1;
485 }
486
487 return 0;
488 }
489
490 /*
491 * queue crypto-ops into PMD queue.
492 */
493 void
enqueue_cop_burst(struct cdev_qp * cqp)494 enqueue_cop_burst(struct cdev_qp *cqp)
495 {
496 uint32_t i, len, ret;
497
498 len = cqp->len;
499 ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cqp->buf, len);
500 if (ret < len) {
501 RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:"
502 " enqueued %u crypto ops out of %u\n",
503 cqp->id, cqp->qp, ret, len);
504 /* drop packets that we fail to enqueue */
505 for (i = ret; i < len; i++)
506 free_pkts(&cqp->buf[i]->sym->m_src, 1);
507 }
508 cqp->in_flight += ret;
509 cqp->len = 0;
510 }
511
512 static inline void
enqueue_cop(struct cdev_qp * cqp,struct rte_crypto_op * cop)513 enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
514 {
515 cqp->buf[cqp->len++] = cop;
516
517 if (cqp->len == MAX_PKT_BURST)
518 enqueue_cop_burst(cqp);
519 }
520
521 static inline void
ipsec_enqueue(ipsec_xform_fn xform_func,struct ipsec_ctx * ipsec_ctx,struct rte_mbuf * pkts[],void * sas[],uint16_t nb_pkts)522 ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
523 struct rte_mbuf *pkts[], void *sas[],
524 uint16_t nb_pkts)
525 {
526 int32_t ret = 0, i;
527 struct ipsec_mbuf_metadata *priv;
528 struct rte_crypto_sym_op *sym_cop;
529 struct ipsec_sa *sa;
530 struct rte_ipsec_session *ips;
531
532 for (i = 0; i < nb_pkts; i++) {
533 if (unlikely(sas[i] == NULL)) {
534 free_pkts(&pkts[i], 1);
535 continue;
536 }
537
538 rte_prefetch0(sas[i]);
539 rte_prefetch0(pkts[i]);
540
541 priv = get_priv(pkts[i]);
542 sa = ipsec_mask_saptr(sas[i]);
543 priv->sa = sa;
544 ips = ipsec_get_primary_session(sa);
545
546 switch (ips->type) {
547 case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
548 priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
549 priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
550
551 rte_prefetch0(&priv->sym_cop);
552
553 if ((unlikely(ips->security.ses == NULL)) &&
554 create_lookaside_session(ipsec_ctx, sa, ips)) {
555 free_pkts(&pkts[i], 1);
556 continue;
557 }
558
559 sym_cop = get_sym_cop(&priv->cop);
560 sym_cop->m_src = pkts[i];
561
562 rte_security_attach_session(&priv->cop,
563 ips->security.ses);
564 break;
565
566 case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
567 RTE_LOG(ERR, IPSEC, "CPU crypto is not supported by the"
568 " legacy mode.");
569 free_pkts(&pkts[i], 1);
570 continue;
571
572 case RTE_SECURITY_ACTION_TYPE_NONE:
573
574 priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
575 priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
576
577 rte_prefetch0(&priv->sym_cop);
578
579 if ((unlikely(ips->crypto.ses == NULL)) &&
580 create_lookaside_session(ipsec_ctx, sa, ips)) {
581 free_pkts(&pkts[i], 1);
582 continue;
583 }
584
585 rte_crypto_op_attach_sym_session(&priv->cop,
586 ips->crypto.ses);
587
588 ret = xform_func(pkts[i], sa, &priv->cop);
589 if (unlikely(ret)) {
590 free_pkts(&pkts[i], 1);
591 continue;
592 }
593 break;
594 case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
595 RTE_ASSERT(ips->security.ses != NULL);
596 ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
597 if (ips->security.ol_flags &
598 RTE_SECURITY_TX_OLOAD_NEED_MDATA)
599 rte_security_set_pkt_metadata(
600 ips->security.ctx, ips->security.ses,
601 pkts[i], NULL);
602 continue;
603 case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
604 RTE_ASSERT(ips->security.ses != NULL);
605 priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
606 priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
607
608 rte_prefetch0(&priv->sym_cop);
609 rte_security_attach_session(&priv->cop,
610 ips->security.ses);
611
612 ret = xform_func(pkts[i], sa, &priv->cop);
613 if (unlikely(ret)) {
614 free_pkts(&pkts[i], 1);
615 continue;
616 }
617
618 ipsec_ctx->ol_pkts[ipsec_ctx->ol_pkts_cnt++] = pkts[i];
619 if (ips->security.ol_flags &
620 RTE_SECURITY_TX_OLOAD_NEED_MDATA)
621 rte_security_set_pkt_metadata(
622 ips->security.ctx, ips->security.ses,
623 pkts[i], NULL);
624 continue;
625 }
626
627 RTE_ASSERT(sa->cdev_id_qp < ipsec_ctx->nb_qps);
628 enqueue_cop(&ipsec_ctx->tbl[sa->cdev_id_qp], &priv->cop);
629 }
630 }
631
632 static inline int32_t
ipsec_inline_dequeue(ipsec_xform_fn xform_func,struct ipsec_ctx * ipsec_ctx,struct rte_mbuf * pkts[],uint16_t max_pkts)633 ipsec_inline_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
634 struct rte_mbuf *pkts[], uint16_t max_pkts)
635 {
636 int32_t nb_pkts, ret;
637 struct ipsec_mbuf_metadata *priv;
638 struct ipsec_sa *sa;
639 struct rte_mbuf *pkt;
640
641 nb_pkts = 0;
642 while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) {
643 pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt];
644 rte_prefetch0(pkt);
645 priv = get_priv(pkt);
646 sa = priv->sa;
647 ret = xform_func(pkt, sa, &priv->cop);
648 if (unlikely(ret)) {
649 free_pkts(&pkt, 1);
650 continue;
651 }
652 pkts[nb_pkts++] = pkt;
653 }
654
655 return nb_pkts;
656 }
657
658 static inline int
ipsec_dequeue(ipsec_xform_fn xform_func,struct ipsec_ctx * ipsec_ctx,struct rte_mbuf * pkts[],uint16_t max_pkts)659 ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
660 struct rte_mbuf *pkts[], uint16_t max_pkts)
661 {
662 int32_t nb_pkts = 0, ret = 0, i, j, nb_cops;
663 struct ipsec_mbuf_metadata *priv;
664 struct rte_crypto_op *cops[max_pkts];
665 struct ipsec_sa *sa;
666 struct rte_mbuf *pkt;
667
668 for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) {
669 struct cdev_qp *cqp;
670
671 cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp++];
672 if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
673 ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
674
675 if (cqp->in_flight == 0)
676 continue;
677
678 nb_cops = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp,
679 cops, max_pkts - nb_pkts);
680
681 cqp->in_flight -= nb_cops;
682
683 for (j = 0; j < nb_cops; j++) {
684 pkt = cops[j]->sym->m_src;
685 rte_prefetch0(pkt);
686
687 priv = get_priv(pkt);
688 sa = priv->sa;
689
690 RTE_ASSERT(sa != NULL);
691
692 if (ipsec_get_action_type(sa) ==
693 RTE_SECURITY_ACTION_TYPE_NONE) {
694 ret = xform_func(pkt, sa, cops[j]);
695 if (unlikely(ret)) {
696 free_pkts(&pkt, 1);
697 continue;
698 }
699 } else if (ipsec_get_action_type(sa) ==
700 RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
701 if (cops[j]->status) {
702 free_pkts(&pkt, 1);
703 continue;
704 }
705 }
706 pkts[nb_pkts++] = pkt;
707 }
708 }
709
710 /* return packets */
711 return nb_pkts;
712 }
713
714 uint16_t
ipsec_inbound(struct ipsec_ctx * ctx,struct rte_mbuf * pkts[],uint16_t nb_pkts,uint16_t len)715 ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
716 uint16_t nb_pkts, uint16_t len)
717 {
718 void *sas[nb_pkts];
719
720 inbound_sa_lookup(ctx->sa_ctx, pkts, sas, nb_pkts);
721
722 ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts);
723
724 return ipsec_inline_dequeue(esp_inbound_post, ctx, pkts, len);
725 }
726
727 uint16_t
ipsec_inbound_cqp_dequeue(struct ipsec_ctx * ctx,struct rte_mbuf * pkts[],uint16_t len)728 ipsec_inbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
729 uint16_t len)
730 {
731 return ipsec_dequeue(esp_inbound_post, ctx, pkts, len);
732 }
733
734 uint16_t
ipsec_outbound(struct ipsec_ctx * ctx,struct rte_mbuf * pkts[],uint32_t sa_idx[],uint16_t nb_pkts,uint16_t len)735 ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
736 uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len)
737 {
738 void *sas[nb_pkts];
739
740 outbound_sa_lookup(ctx->sa_ctx, sa_idx, sas, nb_pkts);
741
742 ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts);
743
744 return ipsec_inline_dequeue(esp_outbound_post, ctx, pkts, len);
745 }
746
747 uint16_t
ipsec_outbound_cqp_dequeue(struct ipsec_ctx * ctx,struct rte_mbuf * pkts[],uint16_t len)748 ipsec_outbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
749 uint16_t len)
750 {
751 return ipsec_dequeue(esp_outbound_post, ctx, pkts, len);
752 }
753