1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
3 */
4
5 #ifndef RTE_EXEC_ENV_WINDOWS
6
7 #include <rte_common.h>
8 #include <rte_cryptodev.h>
9 #include <rte_esp.h>
10 #include <rte_ip.h>
11 #include <rte_security.h>
12 #include <rte_tcp.h>
13 #include <rte_udp.h>
14
15 #include "test.h"
16 #include "test_cryptodev_security_ipsec.h"
17
18 #define IV_LEN_MAX 16
19
20 struct crypto_param_comb alg_list[RTE_DIM(aead_list) +
21 (RTE_DIM(cipher_list) *
22 RTE_DIM(auth_list))];
23
24 struct crypto_param_comb ah_alg_list[2 * (RTE_DIM(auth_list) - 1)];
25
26 static bool
is_valid_ipv4_pkt(const struct rte_ipv4_hdr * pkt)27 is_valid_ipv4_pkt(const struct rte_ipv4_hdr *pkt)
28 {
29 /* The IP version number must be 4 */
30 if (((pkt->version_ihl) >> 4) != 4)
31 return false;
32 /*
33 * The IP header length field must be large enough to hold the
34 * minimum length legal IP datagram (20 bytes = 5 words).
35 */
36 if ((pkt->version_ihl & 0xf) < 5)
37 return false;
38
39 /*
40 * The IP total length field must be large enough to hold the IP
41 * datagram header, whose length is specified in the IP header length
42 * field.
43 */
44 if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct rte_ipv4_hdr))
45 return false;
46
47 return true;
48 }
49
50 static bool
is_valid_ipv6_pkt(const struct rte_ipv6_hdr * pkt)51 is_valid_ipv6_pkt(const struct rte_ipv6_hdr *pkt)
52 {
53 /* The IP version number must be 6 */
54 if ((rte_be_to_cpu_32((pkt->vtc_flow)) >> 28) != 6)
55 return false;
56
57 return true;
58 }
59
60 void
test_ipsec_alg_list_populate(void)61 test_ipsec_alg_list_populate(void)
62 {
63 unsigned long i, j, index = 0;
64
65 for (i = 0; i < RTE_DIM(aead_list); i++) {
66 alg_list[index].param1 = &aead_list[i];
67 alg_list[index].param2 = NULL;
68 index++;
69 }
70
71 for (i = 0; i < RTE_DIM(cipher_list); i++) {
72 for (j = 0; j < RTE_DIM(auth_list); j++) {
73 alg_list[index].param1 = &cipher_list[i];
74 alg_list[index].param2 = &auth_list[j];
75 index++;
76 }
77 }
78 }
79
80 void
test_ipsec_ah_alg_list_populate(void)81 test_ipsec_ah_alg_list_populate(void)
82 {
83 unsigned long i, index = 0;
84
85 for (i = 1; i < RTE_DIM(auth_list); i++) {
86 ah_alg_list[index].param1 = &auth_list[i];
87 ah_alg_list[index].param2 = NULL;
88 index++;
89 }
90
91 for (i = 1; i < RTE_DIM(auth_list); i++) {
92 /* NULL cipher */
93 ah_alg_list[index].param1 = &cipher_list[0];
94
95 ah_alg_list[index].param2 = &auth_list[i];
96 index++;
97 }
98 }
99
100 int
test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform * ipsec_xform,const struct rte_security_capability * sec_cap,bool silent)101 test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform *ipsec_xform,
102 const struct rte_security_capability *sec_cap,
103 bool silent)
104 {
105 /* Verify security capabilities */
106
107 if (ipsec_xform->options.esn == 1 && sec_cap->ipsec.options.esn == 0) {
108 if (!silent)
109 RTE_LOG(INFO, USER1, "ESN is not supported\n");
110 return -ENOTSUP;
111 }
112
113 if (ipsec_xform->options.udp_encap == 1 &&
114 sec_cap->ipsec.options.udp_encap == 0) {
115 if (!silent)
116 RTE_LOG(INFO, USER1, "UDP encapsulation is not supported\n");
117 return -ENOTSUP;
118 }
119
120 if (ipsec_xform->options.udp_ports_verify == 1 &&
121 sec_cap->ipsec.options.udp_ports_verify == 0) {
122 if (!silent)
123 RTE_LOG(INFO, USER1, "UDP encapsulation ports "
124 "verification is not supported\n");
125 return -ENOTSUP;
126 }
127
128 if (ipsec_xform->options.copy_dscp == 1 &&
129 sec_cap->ipsec.options.copy_dscp == 0) {
130 if (!silent)
131 RTE_LOG(INFO, USER1, "Copy DSCP is not supported\n");
132 return -ENOTSUP;
133 }
134
135 if (ipsec_xform->options.copy_flabel == 1 &&
136 sec_cap->ipsec.options.copy_flabel == 0) {
137 if (!silent)
138 RTE_LOG(INFO, USER1, "Copy Flow Label is not supported\n");
139 return -ENOTSUP;
140 }
141
142 if (ipsec_xform->options.copy_df == 1 &&
143 sec_cap->ipsec.options.copy_df == 0) {
144 if (!silent)
145 RTE_LOG(INFO, USER1, "Copy DP bit is not supported\n");
146 return -ENOTSUP;
147 }
148
149 if (ipsec_xform->options.dec_ttl == 1 &&
150 sec_cap->ipsec.options.dec_ttl == 0) {
151 if (!silent)
152 RTE_LOG(INFO, USER1, "Decrement TTL is not supported\n");
153 return -ENOTSUP;
154 }
155
156 if (ipsec_xform->options.ecn == 1 && sec_cap->ipsec.options.ecn == 0) {
157 if (!silent)
158 RTE_LOG(INFO, USER1, "ECN is not supported\n");
159 return -ENOTSUP;
160 }
161
162 if (ipsec_xform->options.stats == 1 &&
163 sec_cap->ipsec.options.stats == 0) {
164 if (!silent)
165 RTE_LOG(INFO, USER1, "Stats is not supported\n");
166 return -ENOTSUP;
167 }
168
169 if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
170 (ipsec_xform->options.iv_gen_disable == 1) &&
171 (sec_cap->ipsec.options.iv_gen_disable != 1)) {
172 if (!silent)
173 RTE_LOG(INFO, USER1,
174 "Application provided IV is not supported\n");
175 return -ENOTSUP;
176 }
177
178 if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
179 (ipsec_xform->options.tunnel_hdr_verify >
180 sec_cap->ipsec.options.tunnel_hdr_verify)) {
181 if (!silent)
182 RTE_LOG(INFO, USER1,
183 "Tunnel header verify is not supported\n");
184 return -ENOTSUP;
185 }
186
187 if (ipsec_xform->options.ip_csum_enable == 1 &&
188 sec_cap->ipsec.options.ip_csum_enable == 0) {
189 if (!silent)
190 RTE_LOG(INFO, USER1,
191 "Inner IP checksum is not supported\n");
192 return -ENOTSUP;
193 }
194
195 if (ipsec_xform->options.l4_csum_enable == 1 &&
196 sec_cap->ipsec.options.l4_csum_enable == 0) {
197 if (!silent)
198 RTE_LOG(INFO, USER1,
199 "Inner L4 checksum is not supported\n");
200 return -ENOTSUP;
201 }
202
203 if (ipsec_xform->replay_win_sz > sec_cap->ipsec.replay_win_sz_max) {
204 if (!silent)
205 RTE_LOG(INFO, USER1,
206 "Replay window size is not supported\n");
207 return -ENOTSUP;
208 }
209
210 return 0;
211 }
212
213 int
test_ipsec_crypto_caps_aead_verify(const struct rte_security_capability * sec_cap,struct rte_crypto_sym_xform * aead)214 test_ipsec_crypto_caps_aead_verify(
215 const struct rte_security_capability *sec_cap,
216 struct rte_crypto_sym_xform *aead)
217 {
218 const struct rte_cryptodev_symmetric_capability *sym_cap;
219 const struct rte_cryptodev_capabilities *crypto_cap;
220 int j = 0;
221
222 while ((crypto_cap = &sec_cap->crypto_capabilities[j++])->op !=
223 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
224 if (crypto_cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
225 crypto_cap->sym.xform_type == aead->type &&
226 crypto_cap->sym.aead.algo == aead->aead.algo) {
227 sym_cap = &crypto_cap->sym;
228 if (rte_cryptodev_sym_capability_check_aead(sym_cap,
229 aead->aead.key.length,
230 aead->aead.digest_length,
231 aead->aead.aad_length,
232 aead->aead.iv.length) == 0)
233 return 0;
234 }
235 }
236
237 return -ENOTSUP;
238 }
239
240 int
test_ipsec_crypto_caps_cipher_verify(const struct rte_security_capability * sec_cap,struct rte_crypto_sym_xform * cipher)241 test_ipsec_crypto_caps_cipher_verify(
242 const struct rte_security_capability *sec_cap,
243 struct rte_crypto_sym_xform *cipher)
244 {
245 const struct rte_cryptodev_symmetric_capability *sym_cap;
246 const struct rte_cryptodev_capabilities *cap;
247 int j = 0;
248
249 while ((cap = &sec_cap->crypto_capabilities[j++])->op !=
250 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
251 if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
252 cap->sym.xform_type == cipher->type &&
253 cap->sym.cipher.algo == cipher->cipher.algo) {
254 sym_cap = &cap->sym;
255 if (rte_cryptodev_sym_capability_check_cipher(sym_cap,
256 cipher->cipher.key.length,
257 cipher->cipher.iv.length) == 0)
258 return 0;
259 }
260 }
261
262 return -ENOTSUP;
263 }
264
265 int
test_ipsec_crypto_caps_auth_verify(const struct rte_security_capability * sec_cap,struct rte_crypto_sym_xform * auth)266 test_ipsec_crypto_caps_auth_verify(
267 const struct rte_security_capability *sec_cap,
268 struct rte_crypto_sym_xform *auth)
269 {
270 const struct rte_cryptodev_symmetric_capability *sym_cap;
271 const struct rte_cryptodev_capabilities *cap;
272 int j = 0;
273
274 while ((cap = &sec_cap->crypto_capabilities[j++])->op !=
275 RTE_CRYPTO_OP_TYPE_UNDEFINED) {
276 if (cap->op == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
277 cap->sym.xform_type == auth->type &&
278 cap->sym.auth.algo == auth->auth.algo) {
279 sym_cap = &cap->sym;
280 if (rte_cryptodev_sym_capability_check_auth(sym_cap,
281 auth->auth.key.length,
282 auth->auth.digest_length,
283 auth->auth.iv.length) == 0)
284 return 0;
285 }
286 }
287
288 return -ENOTSUP;
289 }
290
291 void
test_ipsec_td_in_from_out(const struct ipsec_test_data * td_out,struct ipsec_test_data * td_in)292 test_ipsec_td_in_from_out(const struct ipsec_test_data *td_out,
293 struct ipsec_test_data *td_in)
294 {
295 memcpy(td_in, td_out, sizeof(*td_in));
296
297 /* Populate output text of td_in with input text of td_out */
298 memcpy(td_in->output_text.data, td_out->input_text.data,
299 td_out->input_text.len);
300 td_in->output_text.len = td_out->input_text.len;
301
302 /* Populate input text of td_in with output text of td_out */
303 memcpy(td_in->input_text.data, td_out->output_text.data,
304 td_out->output_text.len);
305 td_in->input_text.len = td_out->output_text.len;
306
307 td_in->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
308
309 if (td_in->aead) {
310 td_in->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
311 } else {
312 td_in->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
313 td_in->xform.chain.cipher.cipher.op =
314 RTE_CRYPTO_CIPHER_OP_DECRYPT;
315 }
316 }
317
318 static bool
is_ipv4(void * ip)319 is_ipv4(void *ip)
320 {
321 struct rte_ipv4_hdr *ipv4 = ip;
322 uint8_t ip_ver;
323
324 ip_ver = (ipv4->version_ihl & 0xf0) >> RTE_IPV4_IHL_MULTIPLIER;
325 if (ip_ver == IPVERSION)
326 return true;
327 else
328 return false;
329 }
330
331 static void
test_ipsec_csum_init(void * ip,bool l3,bool l4)332 test_ipsec_csum_init(void *ip, bool l3, bool l4)
333 {
334 struct rte_ipv4_hdr *ipv4;
335 struct rte_tcp_hdr *tcp;
336 struct rte_udp_hdr *udp;
337 uint8_t next_proto;
338 uint8_t size;
339
340 if (is_ipv4(ip)) {
341 ipv4 = ip;
342 size = sizeof(struct rte_ipv4_hdr);
343 next_proto = ipv4->next_proto_id;
344
345 if (l3)
346 ipv4->hdr_checksum = 0;
347 } else {
348 size = sizeof(struct rte_ipv6_hdr);
349 next_proto = ((struct rte_ipv6_hdr *)ip)->proto;
350 }
351
352 if (l4) {
353 switch (next_proto) {
354 case IPPROTO_TCP:
355 tcp = (struct rte_tcp_hdr *)RTE_PTR_ADD(ip, size);
356 tcp->cksum = 0;
357 break;
358 case IPPROTO_UDP:
359 udp = (struct rte_udp_hdr *)RTE_PTR_ADD(ip, size);
360 udp->dgram_cksum = 0;
361 break;
362 default:
363 return;
364 }
365 }
366 }
367
368 void
test_ipsec_td_prepare(const struct crypto_param * param1,const struct crypto_param * param2,const struct ipsec_test_flags * flags,struct ipsec_test_data * td_array,int nb_td)369 test_ipsec_td_prepare(const struct crypto_param *param1,
370 const struct crypto_param *param2,
371 const struct ipsec_test_flags *flags,
372 struct ipsec_test_data *td_array,
373 int nb_td)
374
375 {
376 struct ipsec_test_data *td;
377 int i;
378
379 memset(td_array, 0, nb_td * sizeof(*td));
380
381 for (i = 0; i < nb_td; i++) {
382 td = &td_array[i];
383
384 /* Prepare fields based on param */
385
386 if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
387 /* Copy template for packet & key fields */
388 if (flags->ipv6)
389 memcpy(td, &pkt_aes_256_gcm_v6, sizeof(*td));
390 else
391 memcpy(td, &pkt_aes_256_gcm, sizeof(*td));
392
393 td->aead = true;
394 td->xform.aead.aead.algo = param1->alg.aead;
395 td->xform.aead.aead.key.length = param1->key_length;
396 } else {
397 /* Copy template for packet & key fields */
398 if (flags->ipv6)
399 memcpy(td, &pkt_aes_128_cbc_hmac_sha256_v6,
400 sizeof(*td));
401 else
402 memcpy(td, &pkt_aes_128_cbc_hmac_sha256,
403 sizeof(*td));
404
405 td->aead = false;
406
407 if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
408 td->xform.chain.auth.auth.algo =
409 param1->alg.auth;
410 td->xform.chain.auth.auth.key.length =
411 param1->key_length;
412 td->xform.chain.auth.auth.digest_length =
413 param1->digest_length;
414 td->auth_only = true;
415
416 if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
417 td->xform.chain.auth.auth.iv.length =
418 param1->iv_length;
419 td->aes_gmac = true;
420 }
421 } else {
422 td->xform.chain.cipher.cipher.algo =
423 param1->alg.cipher;
424 td->xform.chain.cipher.cipher.key.length =
425 param1->key_length;
426 td->xform.chain.cipher.cipher.iv.length =
427 param1->iv_length;
428 td->xform.chain.auth.auth.algo =
429 param2->alg.auth;
430 td->xform.chain.auth.auth.key.length =
431 param2->key_length;
432 td->xform.chain.auth.auth.digest_length =
433 param2->digest_length;
434
435 if (td->xform.chain.auth.auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
436 td->xform.chain.auth.auth.iv.length =
437 param2->iv_length;
438 td->aes_gmac = true;
439 }
440 }
441 }
442
443 if (flags->ah) {
444 td->ipsec_xform.proto =
445 RTE_SECURITY_IPSEC_SA_PROTO_AH;
446 }
447
448 if (flags->iv_gen)
449 td->ipsec_xform.options.iv_gen_disable = 0;
450
451 if (flags->sa_expiry_pkts_soft)
452 td->ipsec_xform.life.packets_soft_limit =
453 IPSEC_TEST_PACKETS_MAX - 1;
454
455 if (flags->ip_csum) {
456 td->ipsec_xform.options.ip_csum_enable = 1;
457 test_ipsec_csum_init(&td->input_text.data, true, false);
458 }
459
460 if (flags->l4_csum) {
461 td->ipsec_xform.options.l4_csum_enable = 1;
462 test_ipsec_csum_init(&td->input_text.data, false, true);
463 }
464
465 if (flags->transport) {
466 td->ipsec_xform.mode =
467 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT;
468 } else {
469 td->ipsec_xform.mode =
470 RTE_SECURITY_IPSEC_SA_MODE_TUNNEL;
471
472 if (flags->tunnel_ipv6)
473 td->ipsec_xform.tunnel.type =
474 RTE_SECURITY_IPSEC_TUNNEL_IPV6;
475 else
476 td->ipsec_xform.tunnel.type =
477 RTE_SECURITY_IPSEC_TUNNEL_IPV4;
478 }
479
480 if (flags->stats_success)
481 td->ipsec_xform.options.stats = 1;
482
483 if (flags->fragment) {
484 struct rte_ipv4_hdr *ip;
485 ip = (struct rte_ipv4_hdr *)&td->input_text.data;
486 ip->fragment_offset = 4;
487 ip->hdr_checksum = rte_ipv4_cksum(ip);
488 }
489
490 if (flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
491 flags->df == TEST_IPSEC_COPY_DF_INNER_1)
492 td->ipsec_xform.options.copy_df = 1;
493
494 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 ||
495 flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1)
496 td->ipsec_xform.options.copy_dscp = 1;
497
498 if (flags->dec_ttl_or_hop_limit)
499 td->ipsec_xform.options.dec_ttl = 1;
500 }
501 }
502
503 void
test_ipsec_td_update(struct ipsec_test_data td_inb[],const struct ipsec_test_data td_outb[],int nb_td,const struct ipsec_test_flags * flags)504 test_ipsec_td_update(struct ipsec_test_data td_inb[],
505 const struct ipsec_test_data td_outb[],
506 int nb_td,
507 const struct ipsec_test_flags *flags)
508 {
509 int i;
510
511 for (i = 0; i < nb_td; i++) {
512 memcpy(td_inb[i].output_text.data, td_outb[i].input_text.data,
513 td_outb[i].input_text.len);
514 td_inb[i].output_text.len = td_outb->input_text.len;
515
516 if (flags->icv_corrupt) {
517 int icv_pos = td_inb[i].input_text.len - 4;
518 td_inb[i].input_text.data[icv_pos] += 1;
519 }
520
521 if (flags->sa_expiry_pkts_hard)
522 td_inb[i].ipsec_xform.life.packets_hard_limit =
523 IPSEC_TEST_PACKETS_MAX - 1;
524
525 if (flags->udp_encap)
526 td_inb[i].ipsec_xform.options.udp_encap = 1;
527
528 if (flags->udp_ports_verify)
529 td_inb[i].ipsec_xform.options.udp_ports_verify = 1;
530
531 td_inb[i].ipsec_xform.options.tunnel_hdr_verify =
532 flags->tunnel_hdr_verify;
533
534 if (flags->ip_csum)
535 td_inb[i].ipsec_xform.options.ip_csum_enable = 1;
536
537 if (flags->l4_csum)
538 td_inb[i].ipsec_xform.options.l4_csum_enable = 1;
539
540 /* Clear outbound specific flags */
541 td_inb[i].ipsec_xform.options.iv_gen_disable = 0;
542 }
543 }
544
545 void
test_ipsec_display_alg(const struct crypto_param * param1,const struct crypto_param * param2)546 test_ipsec_display_alg(const struct crypto_param *param1,
547 const struct crypto_param *param2)
548 {
549 if (param1->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
550 printf("\t%s [%d]",
551 rte_crypto_aead_algorithm_strings[param1->alg.aead],
552 param1->key_length * 8);
553 } else if (param1->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
554 printf("\t%s",
555 rte_crypto_auth_algorithm_strings[param1->alg.auth]);
556 if (param1->alg.auth != RTE_CRYPTO_AUTH_NULL)
557 printf(" [%dB ICV]", param1->digest_length);
558 } else {
559 printf("\t%s",
560 rte_crypto_cipher_algorithm_strings[param1->alg.cipher]);
561 if (param1->alg.cipher != RTE_CRYPTO_CIPHER_NULL)
562 printf(" [%d]", param1->key_length * 8);
563 printf(" %s",
564 rte_crypto_auth_algorithm_strings[param2->alg.auth]);
565 if (param2->alg.auth != RTE_CRYPTO_AUTH_NULL)
566 printf(" [%dB ICV]", param2->digest_length);
567 }
568 printf("\n");
569 }
570
571 static int
test_ipsec_tunnel_hdr_len_get(const struct ipsec_test_data * td)572 test_ipsec_tunnel_hdr_len_get(const struct ipsec_test_data *td)
573 {
574 int len = 0;
575
576 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
577 if (td->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
578 if (td->ipsec_xform.tunnel.type ==
579 RTE_SECURITY_IPSEC_TUNNEL_IPV4)
580 len += sizeof(struct rte_ipv4_hdr);
581 else
582 len += sizeof(struct rte_ipv6_hdr);
583 }
584 }
585
586 return len;
587 }
588
589 static int
test_ipsec_iv_verify_push(struct rte_mbuf * m,const struct ipsec_test_data * td)590 test_ipsec_iv_verify_push(struct rte_mbuf *m, const struct ipsec_test_data *td)
591 {
592 static uint8_t iv_queue[IV_LEN_MAX * IPSEC_TEST_PACKETS_MAX];
593 uint8_t *iv_tmp, *output_text = rte_pktmbuf_mtod(m, uint8_t *);
594 int i, iv_pos, iv_len;
595 static int index;
596
597 if (td->aead)
598 iv_len = td->xform.aead.aead.iv.length - td->salt.len;
599 else
600 iv_len = td->xform.chain.cipher.cipher.iv.length;
601
602 iv_pos = test_ipsec_tunnel_hdr_len_get(td) + sizeof(struct rte_esp_hdr);
603 output_text += iv_pos;
604
605 TEST_ASSERT(iv_len <= IV_LEN_MAX, "IV length greater than supported");
606
607 /* Compare against previous values */
608 for (i = 0; i < index; i++) {
609 iv_tmp = &iv_queue[i * IV_LEN_MAX];
610
611 if (memcmp(output_text, iv_tmp, iv_len) == 0) {
612 printf("IV repeated");
613 return TEST_FAILED;
614 }
615 }
616
617 /* Save IV for future comparisons */
618
619 iv_tmp = &iv_queue[index * IV_LEN_MAX];
620 memcpy(iv_tmp, output_text, iv_len);
621 index++;
622
623 if (index == IPSEC_TEST_PACKETS_MAX)
624 index = 0;
625
626 return TEST_SUCCESS;
627 }
628
629 static int
test_ipsec_l3_csum_verify(struct rte_mbuf * m)630 test_ipsec_l3_csum_verify(struct rte_mbuf *m)
631 {
632 uint16_t actual_cksum, expected_cksum;
633 struct rte_ipv4_hdr *ip;
634
635 ip = rte_pktmbuf_mtod(m, struct rte_ipv4_hdr *);
636
637 if (!is_ipv4((void *)ip))
638 return TEST_SKIPPED;
639
640 actual_cksum = ip->hdr_checksum;
641
642 ip->hdr_checksum = 0;
643
644 expected_cksum = rte_ipv4_cksum(ip);
645
646 if (actual_cksum != expected_cksum)
647 return TEST_FAILED;
648
649 return TEST_SUCCESS;
650 }
651
652 static int
test_ipsec_l4_csum_verify(struct rte_mbuf * m)653 test_ipsec_l4_csum_verify(struct rte_mbuf *m)
654 {
655 uint16_t actual_cksum = 0, expected_cksum = 0;
656 struct rte_ipv4_hdr *ipv4;
657 struct rte_ipv6_hdr *ipv6;
658 struct rte_tcp_hdr *tcp;
659 struct rte_udp_hdr *udp;
660 void *ip, *l4;
661
662 ip = rte_pktmbuf_mtod(m, void *);
663
664 if (is_ipv4(ip)) {
665 ipv4 = ip;
666 l4 = RTE_PTR_ADD(ipv4, sizeof(struct rte_ipv4_hdr));
667
668 switch (ipv4->next_proto_id) {
669 case IPPROTO_TCP:
670 tcp = (struct rte_tcp_hdr *)l4;
671 actual_cksum = tcp->cksum;
672 tcp->cksum = 0;
673 expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
674 break;
675 case IPPROTO_UDP:
676 udp = (struct rte_udp_hdr *)l4;
677 actual_cksum = udp->dgram_cksum;
678 udp->dgram_cksum = 0;
679 expected_cksum = rte_ipv4_udptcp_cksum(ipv4, l4);
680 break;
681 default:
682 break;
683 }
684 } else {
685 ipv6 = ip;
686 l4 = RTE_PTR_ADD(ipv6, sizeof(struct rte_ipv6_hdr));
687
688 switch (ipv6->proto) {
689 case IPPROTO_TCP:
690 tcp = (struct rte_tcp_hdr *)l4;
691 actual_cksum = tcp->cksum;
692 tcp->cksum = 0;
693 expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
694 break;
695 case IPPROTO_UDP:
696 udp = (struct rte_udp_hdr *)l4;
697 actual_cksum = udp->dgram_cksum;
698 udp->dgram_cksum = 0;
699 expected_cksum = rte_ipv6_udptcp_cksum(ipv6, l4);
700 break;
701 default:
702 break;
703 }
704 }
705
706 if (actual_cksum != expected_cksum)
707 return TEST_FAILED;
708
709 return TEST_SUCCESS;
710 }
711
712 static int
test_ipsec_ttl_or_hop_decrement_verify(void * received,void * expected)713 test_ipsec_ttl_or_hop_decrement_verify(void *received, void *expected)
714 {
715 struct rte_ipv4_hdr *iph4_ex, *iph4_re;
716 struct rte_ipv6_hdr *iph6_ex, *iph6_re;
717
718 if (is_ipv4(received) && is_ipv4(expected)) {
719 iph4_ex = expected;
720 iph4_re = received;
721 iph4_ex->time_to_live -= 1;
722 if (iph4_re->time_to_live != iph4_ex->time_to_live)
723 return TEST_FAILED;
724 } else if (!is_ipv4(received) && !is_ipv4(expected)) {
725 iph6_ex = expected;
726 iph6_re = received;
727 iph6_ex->hop_limits -= 1;
728 if (iph6_re->hop_limits != iph6_ex->hop_limits)
729 return TEST_FAILED;
730 } else {
731 printf("IP header version miss match\n");
732 return TEST_FAILED;
733 }
734
735 return TEST_SUCCESS;
736 }
737
738 static int
test_ipsec_td_verify(struct rte_mbuf * m,const struct ipsec_test_data * td,bool silent,const struct ipsec_test_flags * flags)739 test_ipsec_td_verify(struct rte_mbuf *m, const struct ipsec_test_data *td,
740 bool silent, const struct ipsec_test_flags *flags)
741 {
742 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
743 uint32_t skip, len = rte_pktmbuf_pkt_len(m);
744 uint8_t td_output_text[4096];
745 int ret;
746
747 /* For tests with status as error for test success, skip verification */
748 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
749 (flags->icv_corrupt ||
750 flags->sa_expiry_pkts_hard ||
751 flags->tunnel_hdr_verify ||
752 td->ar_packet))
753 return TEST_SUCCESS;
754
755 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
756 flags->udp_encap) {
757 const struct rte_ipv4_hdr *iph4;
758 const struct rte_ipv6_hdr *iph6;
759
760 if (td->ipsec_xform.tunnel.type ==
761 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
762 iph4 = (const struct rte_ipv4_hdr *)output_text;
763 if (iph4->next_proto_id != IPPROTO_UDP) {
764 printf("UDP header is not found\n");
765 return TEST_FAILED;
766 }
767 } else {
768 iph6 = (const struct rte_ipv6_hdr *)output_text;
769 if (iph6->proto != IPPROTO_UDP) {
770 printf("UDP header is not found\n");
771 return TEST_FAILED;
772 }
773 }
774
775 len -= sizeof(struct rte_udp_hdr);
776 output_text += sizeof(struct rte_udp_hdr);
777 }
778
779 if (len != td->output_text.len) {
780 printf("Output length (%d) not matching with expected (%d)\n",
781 len, td->output_text.len);
782 return TEST_FAILED;
783 }
784
785 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) &&
786 flags->fragment) {
787 const struct rte_ipv4_hdr *iph4;
788 iph4 = (const struct rte_ipv4_hdr *)output_text;
789 if (iph4->fragment_offset) {
790 printf("Output packet is fragmented");
791 return TEST_FAILED;
792 }
793 }
794
795 skip = test_ipsec_tunnel_hdr_len_get(td);
796
797 len -= skip;
798 output_text += skip;
799
800 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
801 flags->ip_csum) {
802 if (m->ol_flags & RTE_MBUF_F_RX_IP_CKSUM_GOOD)
803 ret = test_ipsec_l3_csum_verify(m);
804 else
805 ret = TEST_FAILED;
806
807 if (ret == TEST_FAILED)
808 printf("Inner IP checksum test failed\n");
809
810 return ret;
811 }
812
813 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
814 flags->l4_csum) {
815 if (m->ol_flags & RTE_MBUF_F_RX_L4_CKSUM_GOOD)
816 ret = test_ipsec_l4_csum_verify(m);
817 else
818 ret = TEST_FAILED;
819
820 if (ret == TEST_FAILED)
821 printf("Inner L4 checksum test failed\n");
822
823 return ret;
824 }
825
826 memcpy(td_output_text, td->output_text.data + skip, len);
827
828 if ((td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
829 flags->dec_ttl_or_hop_limit) {
830 if (test_ipsec_ttl_or_hop_decrement_verify(output_text, td_output_text)) {
831 printf("Inner TTL/hop limit decrement test failed\n");
832 return TEST_FAILED;
833 }
834 }
835
836 if (test_ipsec_pkt_update(td_output_text, flags)) {
837 printf("Could not update expected vector");
838 return TEST_FAILED;
839 }
840
841 if (memcmp(output_text, td_output_text, len)) {
842 if (silent)
843 return TEST_FAILED;
844
845 printf("TestCase %s line %d: %s\n", __func__, __LINE__,
846 "output text not as expected\n");
847
848 rte_hexdump(stdout, "expected", td_output_text, len);
849 rte_hexdump(stdout, "actual", output_text, len);
850 return TEST_FAILED;
851 }
852
853 return TEST_SUCCESS;
854 }
855
856 static int
test_ipsec_res_d_prepare(struct rte_mbuf * m,const struct ipsec_test_data * td,struct ipsec_test_data * res_d)857 test_ipsec_res_d_prepare(struct rte_mbuf *m, const struct ipsec_test_data *td,
858 struct ipsec_test_data *res_d)
859 {
860 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
861 uint32_t len = rte_pktmbuf_pkt_len(m);
862
863 memcpy(res_d, td, sizeof(*res_d));
864 memcpy(res_d->input_text.data, output_text, len);
865 res_d->input_text.len = len;
866
867 res_d->ipsec_xform.direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS;
868 if (res_d->aead) {
869 res_d->xform.aead.aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
870 } else {
871 res_d->xform.chain.cipher.cipher.op =
872 RTE_CRYPTO_CIPHER_OP_DECRYPT;
873 res_d->xform.chain.auth.auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
874 }
875
876 return TEST_SUCCESS;
877 }
878
879 static int
test_ipsec_iph4_hdr_validate(const struct rte_ipv4_hdr * iph4,const struct ipsec_test_flags * flags)880 test_ipsec_iph4_hdr_validate(const struct rte_ipv4_hdr *iph4,
881 const struct ipsec_test_flags *flags)
882 {
883 uint8_t tos, dscp;
884 uint16_t f_off;
885
886 if (!is_valid_ipv4_pkt(iph4)) {
887 printf("Tunnel outer header is not IPv4\n");
888 return -1;
889 }
890
891 if (flags->ah && iph4->next_proto_id != IPPROTO_AH) {
892 printf("Tunnel outer header proto is not AH\n");
893 return -1;
894 }
895
896 f_off = rte_be_to_cpu_16(iph4->fragment_offset);
897 if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
898 flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
899 if (!(f_off & RTE_IPV4_HDR_DF_FLAG)) {
900 printf("DF bit is not set\n");
901 return -1;
902 }
903 } else {
904 if (f_off & RTE_IPV4_HDR_DF_FLAG) {
905 printf("DF bit is set\n");
906 return -1;
907 }
908 }
909
910 tos = iph4->type_of_service;
911 dscp = (tos & RTE_IPV4_HDR_DSCP_MASK) >> 2;
912
913 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
914 flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
915 if (dscp != TEST_IPSEC_DSCP_VAL) {
916 printf("DSCP value is not matching [exp: %x, actual: %x]\n",
917 TEST_IPSEC_DSCP_VAL, dscp);
918 return -1;
919 }
920 } else {
921 if (dscp != 0) {
922 printf("DSCP value is set [exp: 0, actual: %x]\n",
923 dscp);
924 return -1;
925 }
926 }
927
928 return 0;
929 }
930
931 static int
test_ipsec_iph6_hdr_validate(const struct rte_ipv6_hdr * iph6,const struct ipsec_test_flags * flags)932 test_ipsec_iph6_hdr_validate(const struct rte_ipv6_hdr *iph6,
933 const struct ipsec_test_flags *flags)
934 {
935 uint32_t vtc_flow;
936 uint8_t dscp;
937
938 if (!is_valid_ipv6_pkt(iph6)) {
939 printf("Tunnel outer header is not IPv6\n");
940 return -1;
941 }
942
943 vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow);
944 dscp = (vtc_flow & RTE_IPV6_HDR_DSCP_MASK) >>
945 (RTE_IPV6_HDR_TC_SHIFT + 2);
946
947 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
948 flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
949 if (dscp != TEST_IPSEC_DSCP_VAL) {
950 printf("DSCP value is not matching [exp: %x, actual: %x]\n",
951 TEST_IPSEC_DSCP_VAL, dscp);
952 return -1;
953 }
954 } else {
955 if (dscp != 0) {
956 printf("DSCP value is set [exp: 0, actual: %x]\n",
957 dscp);
958 return -1;
959 }
960 }
961
962 return 0;
963 }
964
965 int
test_ipsec_post_process(struct rte_mbuf * m,const struct ipsec_test_data * td,struct ipsec_test_data * res_d,bool silent,const struct ipsec_test_flags * flags)966 test_ipsec_post_process(struct rte_mbuf *m, const struct ipsec_test_data *td,
967 struct ipsec_test_data *res_d, bool silent,
968 const struct ipsec_test_flags *flags)
969 {
970 uint8_t *output_text = rte_pktmbuf_mtod(m, uint8_t *);
971 int ret;
972
973 if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
974 const struct rte_ipv4_hdr *iph4;
975 const struct rte_ipv6_hdr *iph6;
976
977 if (flags->iv_gen) {
978 ret = test_ipsec_iv_verify_push(m, td);
979 if (ret != TEST_SUCCESS)
980 return ret;
981 }
982
983 iph4 = (const struct rte_ipv4_hdr *)output_text;
984
985 if (td->ipsec_xform.mode ==
986 RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
987 if (flags->ipv6) {
988 iph6 = (const struct rte_ipv6_hdr *)output_text;
989 if (is_valid_ipv6_pkt(iph6) == false) {
990 printf("Transport packet is not IPv6\n");
991 return TEST_FAILED;
992 }
993 } else {
994 if (is_valid_ipv4_pkt(iph4) == false) {
995 printf("Transport packet is not IPv4\n");
996 return TEST_FAILED;
997 }
998
999 if (flags->ah && iph4->next_proto_id != IPPROTO_AH) {
1000 printf("Transport IPv4 header proto is not AH\n");
1001 return -1;
1002 }
1003 }
1004 } else {
1005 if (td->ipsec_xform.tunnel.type ==
1006 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
1007 if (test_ipsec_iph4_hdr_validate(iph4, flags))
1008 return TEST_FAILED;
1009 } else {
1010 iph6 = (const struct rte_ipv6_hdr *)output_text;
1011 if (test_ipsec_iph6_hdr_validate(iph6, flags))
1012 return TEST_FAILED;
1013 }
1014 }
1015 }
1016
1017 /*
1018 * In case of known vector tests & all inbound tests, res_d provided
1019 * would be NULL and output data need to be validated against expected.
1020 * For inbound, output_text would be plain packet and for outbound
1021 * output_text would IPsec packet. Validate by comparing against
1022 * known vectors.
1023 *
1024 * In case of combined mode tests, the output_text from outbound
1025 * operation (ie, IPsec packet) would need to be inbound processed to
1026 * obtain the plain text. Copy output_text to result data, 'res_d', so
1027 * that inbound processing can be done.
1028 */
1029
1030 if (res_d == NULL)
1031 return test_ipsec_td_verify(m, td, silent, flags);
1032 else
1033 return test_ipsec_res_d_prepare(m, td, res_d);
1034 }
1035
1036 int
test_ipsec_status_check(const struct ipsec_test_data * td,struct rte_crypto_op * op,const struct ipsec_test_flags * flags,enum rte_security_ipsec_sa_direction dir,int pkt_num)1037 test_ipsec_status_check(const struct ipsec_test_data *td,
1038 struct rte_crypto_op *op,
1039 const struct ipsec_test_flags *flags,
1040 enum rte_security_ipsec_sa_direction dir,
1041 int pkt_num)
1042 {
1043 int ret = TEST_SUCCESS;
1044
1045 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
1046 td->ar_packet) {
1047 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1048 printf("Anti replay test case failed\n");
1049 return TEST_FAILED;
1050 } else {
1051 return TEST_SUCCESS;
1052 }
1053 }
1054
1055 if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
1056 flags->sa_expiry_pkts_hard &&
1057 pkt_num == IPSEC_TEST_PACKETS_MAX) {
1058 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1059 printf("SA hard expiry (pkts) test failed\n");
1060 return TEST_FAILED;
1061 } else {
1062 return TEST_SUCCESS;
1063 }
1064 }
1065
1066 if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
1067 flags->tunnel_hdr_verify) {
1068 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1069 printf("Tunnel header verify test case failed\n");
1070 return TEST_FAILED;
1071 } else {
1072 return TEST_SUCCESS;
1073 }
1074 }
1075
1076 if (dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS && flags->icv_corrupt) {
1077 if (op->status != RTE_CRYPTO_OP_STATUS_ERROR) {
1078 printf("ICV corruption test case failed\n");
1079 ret = TEST_FAILED;
1080 }
1081 } else {
1082 if (op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
1083 printf("Security op processing failed [pkt_num: %d]\n",
1084 pkt_num);
1085 ret = TEST_FAILED;
1086 }
1087 }
1088
1089 if (flags->sa_expiry_pkts_soft && pkt_num == IPSEC_TEST_PACKETS_MAX) {
1090 if (!(op->aux_flags &
1091 RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY)) {
1092 printf("SA soft expiry (pkts) test failed\n");
1093 ret = TEST_FAILED;
1094 }
1095 }
1096
1097 return ret;
1098 }
1099
1100 int
test_ipsec_stats_verify(struct rte_security_ctx * ctx,struct rte_security_session * sess,const struct ipsec_test_flags * flags,enum rte_security_ipsec_sa_direction dir)1101 test_ipsec_stats_verify(struct rte_security_ctx *ctx,
1102 struct rte_security_session *sess,
1103 const struct ipsec_test_flags *flags,
1104 enum rte_security_ipsec_sa_direction dir)
1105 {
1106 struct rte_security_stats stats = {0};
1107 int ret = TEST_SUCCESS;
1108
1109 if (flags->stats_success) {
1110 if (rte_security_session_stats_get(ctx, sess, &stats) < 0)
1111 return TEST_FAILED;
1112
1113 if (dir == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1114 if (stats.ipsec.opackets != 1 ||
1115 stats.ipsec.oerrors != 0)
1116 ret = TEST_FAILED;
1117 } else {
1118 if (stats.ipsec.ipackets != 1 ||
1119 stats.ipsec.ierrors != 0)
1120 ret = TEST_FAILED;
1121 }
1122 }
1123
1124 return ret;
1125 }
1126
1127 int
test_ipsec_pkt_update(uint8_t * pkt,const struct ipsec_test_flags * flags)1128 test_ipsec_pkt_update(uint8_t *pkt, const struct ipsec_test_flags *flags)
1129 {
1130 struct rte_ipv4_hdr *iph4;
1131 struct rte_ipv6_hdr *iph6;
1132 bool cksum_dirty = false;
1133
1134 iph4 = (struct rte_ipv4_hdr *)pkt;
1135
1136 if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
1137 flags->df == TEST_IPSEC_SET_DF_0_INNER_1 ||
1138 flags->df == TEST_IPSEC_COPY_DF_INNER_0 ||
1139 flags->df == TEST_IPSEC_SET_DF_1_INNER_0) {
1140 uint16_t frag_off;
1141
1142 if (!is_ipv4(iph4)) {
1143 printf("Invalid packet type\n");
1144 return -1;
1145 }
1146
1147 frag_off = rte_be_to_cpu_16(iph4->fragment_offset);
1148
1149 if (flags->df == TEST_IPSEC_COPY_DF_INNER_1 ||
1150 flags->df == TEST_IPSEC_SET_DF_0_INNER_1)
1151 frag_off |= RTE_IPV4_HDR_DF_FLAG;
1152 else
1153 frag_off &= ~RTE_IPV4_HDR_DF_FLAG;
1154
1155 iph4->fragment_offset = rte_cpu_to_be_16(frag_off);
1156 cksum_dirty = true;
1157 }
1158
1159 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1160 flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1 ||
1161 flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_0 ||
1162 flags->dscp == TEST_IPSEC_SET_DSCP_1_INNER_0) {
1163
1164 if (is_ipv4(iph4)) {
1165 uint8_t tos;
1166
1167 tos = iph4->type_of_service;
1168 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1169 flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
1170 tos |= (RTE_IPV4_HDR_DSCP_MASK &
1171 (TEST_IPSEC_DSCP_VAL << 2));
1172 else
1173 tos &= ~RTE_IPV4_HDR_DSCP_MASK;
1174
1175 iph4->type_of_service = tos;
1176 cksum_dirty = true;
1177 } else {
1178 uint32_t vtc_flow;
1179
1180 iph6 = (struct rte_ipv6_hdr *)pkt;
1181
1182 vtc_flow = rte_be_to_cpu_32(iph6->vtc_flow);
1183 if (flags->dscp == TEST_IPSEC_COPY_DSCP_INNER_1 ||
1184 flags->dscp == TEST_IPSEC_SET_DSCP_0_INNER_1)
1185 vtc_flow |= (RTE_IPV6_HDR_DSCP_MASK &
1186 (TEST_IPSEC_DSCP_VAL << (RTE_IPV6_HDR_TC_SHIFT + 2)));
1187 else
1188 vtc_flow &= ~RTE_IPV6_HDR_DSCP_MASK;
1189
1190 iph6->vtc_flow = rte_cpu_to_be_32(vtc_flow);
1191 }
1192 }
1193
1194 if (cksum_dirty && is_ipv4(iph4)) {
1195 iph4->hdr_checksum = 0;
1196 iph4->hdr_checksum = rte_ipv4_cksum(iph4);
1197 }
1198
1199 return 0;
1200 }
1201
1202 #endif /* !RTE_EXEC_ENV_WINDOWS */
1203