xref: /f-stack/dpdk/examples/ipsec-secgw/esp.c (revision fa64a7ff)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <sys/types.h>
8 #include <sys/stat.h>
9 #include <netinet/in.h>
10 #include <netinet/ip.h>
11 #include <netinet/ip6.h>
12 #include <fcntl.h>
13 #include <unistd.h>
14 
15 #include <rte_common.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_random.h>
19 
20 #include "ipsec.h"
21 #include "esp.h"
22 #include "ipip.h"
23 
24 int
25 esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
26 		struct rte_crypto_op *cop)
27 {
28 	struct ip *ip4;
29 	struct rte_crypto_sym_op *sym_cop;
30 	int32_t payload_len, ip_hdr_len;
31 
32 	RTE_ASSERT(sa != NULL);
33 	if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
34 		return 0;
35 
36 	RTE_ASSERT(m != NULL);
37 	RTE_ASSERT(cop != NULL);
38 
39 	ip4 = rte_pktmbuf_mtod(m, struct ip *);
40 	if (likely(ip4->ip_v == IPVERSION))
41 		ip_hdr_len = ip4->ip_hl * 4;
42 	else if (ip4->ip_v == IP6_VERSION)
43 		/* XXX No option headers supported */
44 		ip_hdr_len = sizeof(struct ip6_hdr);
45 	else {
46 		RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
47 				ip4->ip_v);
48 		return -EINVAL;
49 	}
50 
51 	payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len -
52 		sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len;
53 
54 	if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) {
55 		RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n",
56 				payload_len, sa->block_size);
57 		return -EINVAL;
58 	}
59 
60 	sym_cop = get_sym_cop(cop);
61 	sym_cop->m_src = m;
62 
63 	if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
64 		sym_cop->aead.data.offset =  ip_hdr_len + sizeof(struct esp_hdr) +
65 			sa->iv_len;
66 		sym_cop->aead.data.length = payload_len;
67 
68 		struct cnt_blk *icb;
69 		uint8_t *aad;
70 		uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
71 
72 		icb = get_cnt_blk(m);
73 		icb->salt = sa->salt;
74 		memcpy(&icb->iv, iv, 8);
75 		icb->cnt = rte_cpu_to_be_32(1);
76 
77 		aad = get_aad(m);
78 		memcpy(aad, iv - sizeof(struct esp_hdr), 8);
79 		sym_cop->aead.aad.data = aad;
80 		sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
81 				aad - rte_pktmbuf_mtod(m, uint8_t *));
82 
83 		sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*,
84 				rte_pktmbuf_pkt_len(m) - sa->digest_len);
85 		sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
86 				rte_pktmbuf_pkt_len(m) - sa->digest_len);
87 	} else {
88 		sym_cop->cipher.data.offset =  ip_hdr_len + sizeof(struct esp_hdr) +
89 			sa->iv_len;
90 		sym_cop->cipher.data.length = payload_len;
91 
92 		struct cnt_blk *icb;
93 		uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr));
94 		uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop,
95 					uint8_t *, IV_OFFSET);
96 
97 		switch (sa->cipher_algo) {
98 		case RTE_CRYPTO_CIPHER_NULL:
99 		case RTE_CRYPTO_CIPHER_3DES_CBC:
100 		case RTE_CRYPTO_CIPHER_AES_CBC:
101 			/* Copy IV at the end of crypto operation */
102 			rte_memcpy(iv_ptr, iv, sa->iv_len);
103 			break;
104 		case RTE_CRYPTO_CIPHER_AES_CTR:
105 			icb = get_cnt_blk(m);
106 			icb->salt = sa->salt;
107 			memcpy(&icb->iv, iv, 8);
108 			icb->cnt = rte_cpu_to_be_32(1);
109 			break;
110 		default:
111 			RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
112 					sa->cipher_algo);
113 			return -EINVAL;
114 		}
115 
116 		switch (sa->auth_algo) {
117 		case RTE_CRYPTO_AUTH_NULL:
118 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
119 		case RTE_CRYPTO_AUTH_SHA256_HMAC:
120 			sym_cop->auth.data.offset = ip_hdr_len;
121 			sym_cop->auth.data.length = sizeof(struct esp_hdr) +
122 				sa->iv_len + payload_len;
123 			break;
124 		default:
125 			RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
126 					sa->auth_algo);
127 			return -EINVAL;
128 		}
129 
130 		sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
131 				rte_pktmbuf_pkt_len(m) - sa->digest_len);
132 		sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
133 				rte_pktmbuf_pkt_len(m) - sa->digest_len);
134 	}
135 
136 	return 0;
137 }
138 
139 int
140 esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
141 		struct rte_crypto_op *cop)
142 {
143 	struct ip *ip4, *ip;
144 	struct ip6_hdr *ip6;
145 	uint8_t *nexthdr, *pad_len;
146 	uint8_t *padding;
147 	uint16_t i;
148 
149 	RTE_ASSERT(m != NULL);
150 	RTE_ASSERT(sa != NULL);
151 	RTE_ASSERT(cop != NULL);
152 
153 	if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
154 			(sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
155 		if (m->ol_flags & PKT_RX_SEC_OFFLOAD) {
156 			if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
157 				cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
158 			else
159 				cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
160 		} else
161 			cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
162 	}
163 
164 	if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
165 		RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n", __func__);
166 		return -1;
167 	}
168 
169 	if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
170 	    sa->ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) {
171 		nexthdr = &m->inner_esp_next_proto;
172 	} else {
173 		nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
174 				rte_pktmbuf_pkt_len(m) - sa->digest_len - 1);
175 		pad_len = nexthdr - 1;
176 
177 		padding = pad_len - *pad_len;
178 		for (i = 0; i < *pad_len; i++) {
179 			if (padding[i] != i + 1) {
180 				RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
181 				return -EINVAL;
182 			}
183 		}
184 
185 		if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
186 			RTE_LOG(ERR, IPSEC_ESP,
187 					"failed to remove pad_len + digest\n");
188 			return -EINVAL;
189 		}
190 	}
191 
192 	if (unlikely(IS_TRANSPORT(sa->flags))) {
193 		ip = rte_pktmbuf_mtod(m, struct ip *);
194 		ip4 = (struct ip *)rte_pktmbuf_adj(m,
195 				sizeof(struct esp_hdr) + sa->iv_len);
196 		if (likely(ip->ip_v == IPVERSION)) {
197 			memmove(ip4, ip, ip->ip_hl * 4);
198 			ip4->ip_p = *nexthdr;
199 			ip4->ip_len = htons(rte_pktmbuf_data_len(m));
200 		} else {
201 			ip6 = (struct ip6_hdr *)ip4;
202 			/* XXX No option headers supported */
203 			memmove(ip6, ip, sizeof(struct ip6_hdr));
204 			ip6->ip6_nxt = *nexthdr;
205 			ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
206 					      sizeof(struct ip6_hdr));
207 		}
208 	} else
209 		ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
210 
211 	return 0;
212 }
213 
214 int
215 esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
216 		struct rte_crypto_op *cop)
217 {
218 	struct ip *ip4;
219 	struct ip6_hdr *ip6;
220 	struct esp_hdr *esp = NULL;
221 	uint8_t *padding = NULL, *new_ip, nlp;
222 	struct rte_crypto_sym_op *sym_cop;
223 	int32_t i;
224 	uint16_t pad_payload_len, pad_len, ip_hdr_len;
225 
226 	RTE_ASSERT(m != NULL);
227 	RTE_ASSERT(sa != NULL);
228 
229 	ip_hdr_len = 0;
230 
231 	ip4 = rte_pktmbuf_mtod(m, struct ip *);
232 	if (likely(ip4->ip_v == IPVERSION)) {
233 		if (unlikely(IS_TRANSPORT(sa->flags))) {
234 			ip_hdr_len = ip4->ip_hl * 4;
235 			nlp = ip4->ip_p;
236 		} else
237 			nlp = IPPROTO_IPIP;
238 	} else if (ip4->ip_v == IP6_VERSION) {
239 		if (unlikely(IS_TRANSPORT(sa->flags))) {
240 			/* XXX No option headers supported */
241 			ip_hdr_len = sizeof(struct ip6_hdr);
242 			ip6 = (struct ip6_hdr *)ip4;
243 			nlp = ip6->ip6_nxt;
244 		} else
245 			nlp = IPPROTO_IPV6;
246 	} else {
247 		RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n",
248 				ip4->ip_v);
249 		return -EINVAL;
250 	}
251 
252 	/* Padded payload length */
253 	pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) -
254 			ip_hdr_len + 2, sa->block_size);
255 	pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m);
256 
257 	RTE_ASSERT(IS_TUNNEL(sa->flags) || IS_TRANSPORT(sa->flags));
258 
259 	if (likely(IS_IP4_TUNNEL(sa->flags)))
260 		ip_hdr_len = sizeof(struct ip);
261 	else if (IS_IP6_TUNNEL(sa->flags))
262 		ip_hdr_len = sizeof(struct ip6_hdr);
263 	else if (!IS_TRANSPORT(sa->flags)) {
264 		RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n",
265 				sa->flags);
266 		return -EINVAL;
267 	}
268 
269 	/* Check maximum packet size */
270 	if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len +
271 			pad_payload_len + sa->digest_len > IP_MAXPACKET)) {
272 		RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n");
273 		return -EINVAL;
274 	}
275 
276 	/* Add trailer padding if it is not constructed by HW */
277 	if (sa->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
278 	    (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
279 	     !(sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) {
280 		padding = (uint8_t *)rte_pktmbuf_append(m, pad_len +
281 							sa->digest_len);
282 		if (unlikely(padding == NULL)) {
283 			RTE_LOG(ERR, IPSEC_ESP,
284 					"not enough mbuf trailing space\n");
285 			return -ENOSPC;
286 		}
287 		rte_prefetch0(padding);
288 	}
289 
290 	switch (WITHOUT_TRANSPORT_VERSION(sa->flags)) {
291 	case IP4_TUNNEL:
292 		ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
293 				&sa->src, &sa->dst);
294 		esp = (struct esp_hdr *)(ip4 + 1);
295 		break;
296 	case IP6_TUNNEL:
297 		ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len,
298 				&sa->src, &sa->dst);
299 		esp = (struct esp_hdr *)(ip6 + 1);
300 		break;
301 	case TRANSPORT:
302 		new_ip = (uint8_t *)rte_pktmbuf_prepend(m,
303 				sizeof(struct esp_hdr) + sa->iv_len);
304 		memmove(new_ip, ip4, ip_hdr_len);
305 		esp = (struct esp_hdr *)(new_ip + ip_hdr_len);
306 		ip4 = (struct ip *)new_ip;
307 		if (likely(ip4->ip_v == IPVERSION)) {
308 			ip4->ip_p = IPPROTO_ESP;
309 			ip4->ip_len = htons(rte_pktmbuf_data_len(m));
310 		} else {
311 			ip6 = (struct ip6_hdr *)new_ip;
312 			ip6->ip6_nxt = IPPROTO_ESP;
313 			ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
314 					      sizeof(struct ip6_hdr));
315 		}
316 	}
317 
318 	sa->seq++;
319 	esp->spi = rte_cpu_to_be_32(sa->spi);
320 	esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq);
321 
322 	/* set iv */
323 	uint64_t *iv = (uint64_t *)(esp + 1);
324 	if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
325 		*iv = rte_cpu_to_be_64(sa->seq);
326 	} else {
327 		switch (sa->cipher_algo) {
328 		case RTE_CRYPTO_CIPHER_NULL:
329 		case RTE_CRYPTO_CIPHER_3DES_CBC:
330 		case RTE_CRYPTO_CIPHER_AES_CBC:
331 			memset(iv, 0, sa->iv_len);
332 			break;
333 		case RTE_CRYPTO_CIPHER_AES_CTR:
334 			*iv = rte_cpu_to_be_64(sa->seq);
335 			break;
336 		default:
337 			RTE_LOG(ERR, IPSEC_ESP,
338 				"unsupported cipher algorithm %u\n",
339 				sa->cipher_algo);
340 			return -EINVAL;
341 		}
342 	}
343 
344 	if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
345 		if (sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) {
346 			/* Set the inner esp next protocol for HW trailer */
347 			m->inner_esp_next_proto = nlp;
348 			m->packet_type |= RTE_PTYPE_TUNNEL_ESP;
349 		} else {
350 			padding[pad_len - 2] = pad_len - 2;
351 			padding[pad_len - 1] = nlp;
352 		}
353 		goto done;
354 	}
355 
356 	RTE_ASSERT(cop != NULL);
357 	sym_cop = get_sym_cop(cop);
358 	sym_cop->m_src = m;
359 
360 	if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
361 		uint8_t *aad;
362 
363 		sym_cop->aead.data.offset = ip_hdr_len +
364 			sizeof(struct esp_hdr) + sa->iv_len;
365 		sym_cop->aead.data.length = pad_payload_len;
366 
367 		/* Fill pad_len using default sequential scheme */
368 		for (i = 0; i < pad_len - 2; i++)
369 			padding[i] = i + 1;
370 		padding[pad_len - 2] = pad_len - 2;
371 		padding[pad_len - 1] = nlp;
372 
373 		struct cnt_blk *icb = get_cnt_blk(m);
374 		icb->salt = sa->salt;
375 		icb->iv = rte_cpu_to_be_64(sa->seq);
376 		icb->cnt = rte_cpu_to_be_32(1);
377 
378 		aad = get_aad(m);
379 		memcpy(aad, esp, 8);
380 		sym_cop->aead.aad.data = aad;
381 		sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
382 				aad - rte_pktmbuf_mtod(m, uint8_t *));
383 
384 		sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
385 			rte_pktmbuf_pkt_len(m) - sa->digest_len);
386 		sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
387 			rte_pktmbuf_pkt_len(m) - sa->digest_len);
388 	} else {
389 		switch (sa->cipher_algo) {
390 		case RTE_CRYPTO_CIPHER_NULL:
391 		case RTE_CRYPTO_CIPHER_3DES_CBC:
392 		case RTE_CRYPTO_CIPHER_AES_CBC:
393 			sym_cop->cipher.data.offset = ip_hdr_len +
394 				sizeof(struct esp_hdr);
395 			sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
396 			break;
397 		case RTE_CRYPTO_CIPHER_AES_CTR:
398 			sym_cop->cipher.data.offset = ip_hdr_len +
399 				sizeof(struct esp_hdr) + sa->iv_len;
400 			sym_cop->cipher.data.length = pad_payload_len;
401 			break;
402 		default:
403 			RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n",
404 					sa->cipher_algo);
405 			return -EINVAL;
406 		}
407 
408 		/* Fill pad_len using default sequential scheme */
409 		for (i = 0; i < pad_len - 2; i++)
410 			padding[i] = i + 1;
411 		padding[pad_len - 2] = pad_len - 2;
412 		padding[pad_len - 1] = nlp;
413 
414 		struct cnt_blk *icb = get_cnt_blk(m);
415 		icb->salt = sa->salt;
416 		icb->iv = rte_cpu_to_be_64(sa->seq);
417 		icb->cnt = rte_cpu_to_be_32(1);
418 
419 		switch (sa->auth_algo) {
420 		case RTE_CRYPTO_AUTH_NULL:
421 		case RTE_CRYPTO_AUTH_SHA1_HMAC:
422 		case RTE_CRYPTO_AUTH_SHA256_HMAC:
423 			sym_cop->auth.data.offset = ip_hdr_len;
424 			sym_cop->auth.data.length = sizeof(struct esp_hdr) +
425 				sa->iv_len + pad_payload_len;
426 			break;
427 		default:
428 			RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n",
429 					sa->auth_algo);
430 			return -EINVAL;
431 		}
432 
433 		sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
434 				rte_pktmbuf_pkt_len(m) - sa->digest_len);
435 		sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
436 				rte_pktmbuf_pkt_len(m) - sa->digest_len);
437 	}
438 
439 done:
440 	return 0;
441 }
442 
443 int
444 esp_outbound_post(struct rte_mbuf *m,
445 		  struct ipsec_sa *sa,
446 		  struct rte_crypto_op *cop)
447 {
448 	RTE_ASSERT(m != NULL);
449 	RTE_ASSERT(sa != NULL);
450 
451 	if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) ||
452 			(sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) {
453 		m->ol_flags |= PKT_TX_SEC_OFFLOAD;
454 	} else {
455 		RTE_ASSERT(cop != NULL);
456 		if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
457 			RTE_LOG(ERR, IPSEC_ESP, "%s() failed crypto op\n",
458 				__func__);
459 			return -1;
460 		}
461 	}
462 
463 	return 0;
464 }
465