1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <stdint.h> 35 #include <stdlib.h> 36 #include <sys/types.h> 37 #include <sys/stat.h> 38 #include <netinet/in.h> 39 #include <netinet/ip.h> 40 #include <netinet/ip6.h> 41 #include <fcntl.h> 42 #include <unistd.h> 43 44 #include <rte_common.h> 45 #include <rte_crypto.h> 46 #include <rte_cryptodev.h> 47 #include <rte_random.h> 48 49 #include "ipsec.h" 50 #include "esp.h" 51 #include "ipip.h" 52 53 int 54 esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, 55 struct rte_crypto_op *cop) 56 { 57 struct ip *ip4; 58 struct rte_crypto_sym_op *sym_cop; 59 int32_t payload_len, ip_hdr_len; 60 61 RTE_ASSERT(sa != NULL); 62 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) 63 return 0; 64 65 RTE_ASSERT(m != NULL); 66 RTE_ASSERT(cop != NULL); 67 68 ip4 = rte_pktmbuf_mtod(m, struct ip *); 69 if (likely(ip4->ip_v == IPVERSION)) 70 ip_hdr_len = ip4->ip_hl * 4; 71 else if (ip4->ip_v == IP6_VERSION) 72 /* XXX No option headers supported */ 73 ip_hdr_len = sizeof(struct ip6_hdr); 74 else { 75 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n", 76 ip4->ip_v); 77 return -EINVAL; 78 } 79 80 payload_len = rte_pktmbuf_pkt_len(m) - ip_hdr_len - 81 sizeof(struct esp_hdr) - sa->iv_len - sa->digest_len; 82 83 if ((payload_len & (sa->block_size - 1)) || (payload_len <= 0)) { 84 RTE_LOG_DP(DEBUG, IPSEC_ESP, "payload %d not multiple of %u\n", 85 payload_len, sa->block_size); 86 return -EINVAL; 87 } 88 89 sym_cop = get_sym_cop(cop); 90 sym_cop->m_src = m; 91 92 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { 93 sym_cop->aead.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + 94 sa->iv_len; 95 sym_cop->aead.data.length = payload_len; 96 97 struct cnt_blk *icb; 98 uint8_t *aad; 99 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr)); 100 101 icb = get_cnt_blk(m); 102 icb->salt = sa->salt; 103 memcpy(&icb->iv, iv, 8); 104 icb->cnt = rte_cpu_to_be_32(1); 105 106 aad = get_aad(m); 107 memcpy(aad, iv - sizeof(struct esp_hdr), 8); 108 sym_cop->aead.aad.data = aad; 109 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m, 110 aad - rte_pktmbuf_mtod(m, uint8_t *)); 111 112 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*, 113 rte_pktmbuf_pkt_len(m) - sa->digest_len); 114 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m, 115 rte_pktmbuf_pkt_len(m) - sa->digest_len); 116 } else { 117 sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + 118 sa->iv_len; 119 sym_cop->cipher.data.length = payload_len; 120 121 struct cnt_blk *icb; 122 uint8_t *iv = RTE_PTR_ADD(ip4, ip_hdr_len + sizeof(struct esp_hdr)); 123 uint8_t *iv_ptr = rte_crypto_op_ctod_offset(cop, 124 uint8_t *, IV_OFFSET); 125 126 switch (sa->cipher_algo) { 127 case RTE_CRYPTO_CIPHER_NULL: 128 case RTE_CRYPTO_CIPHER_AES_CBC: 129 /* Copy IV at the end of crypto operation */ 130 rte_memcpy(iv_ptr, iv, sa->iv_len); 131 break; 132 case RTE_CRYPTO_CIPHER_AES_CTR: 133 icb = get_cnt_blk(m); 134 icb->salt = sa->salt; 135 memcpy(&icb->iv, iv, 8); 136 icb->cnt = rte_cpu_to_be_32(1); 137 break; 138 default: 139 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n", 140 sa->cipher_algo); 141 return -EINVAL; 142 } 143 144 switch (sa->auth_algo) { 145 case RTE_CRYPTO_AUTH_NULL: 146 case RTE_CRYPTO_AUTH_SHA1_HMAC: 147 case RTE_CRYPTO_AUTH_SHA256_HMAC: 148 sym_cop->auth.data.offset = ip_hdr_len; 149 sym_cop->auth.data.length = sizeof(struct esp_hdr) + 150 sa->iv_len + payload_len; 151 break; 152 default: 153 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n", 154 sa->auth_algo); 155 return -EINVAL; 156 } 157 158 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*, 159 rte_pktmbuf_pkt_len(m) - sa->digest_len); 160 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m, 161 rte_pktmbuf_pkt_len(m) - sa->digest_len); 162 } 163 164 return 0; 165 } 166 167 int 168 esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, 169 struct rte_crypto_op *cop) 170 { 171 struct ip *ip4, *ip; 172 struct ip6_hdr *ip6; 173 uint8_t *nexthdr, *pad_len; 174 uint8_t *padding; 175 uint16_t i; 176 177 RTE_ASSERT(m != NULL); 178 RTE_ASSERT(sa != NULL); 179 RTE_ASSERT(cop != NULL); 180 181 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { 182 if (m->ol_flags & PKT_RX_SEC_OFFLOAD) { 183 if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) 184 cop->status = RTE_CRYPTO_OP_STATUS_ERROR; 185 else 186 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; 187 } else 188 cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; 189 } 190 191 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { 192 RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n"); 193 return -1; 194 } 195 196 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO && 197 sa->ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) { 198 nexthdr = &m->inner_esp_next_proto; 199 } else { 200 nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*, 201 rte_pktmbuf_pkt_len(m) - sa->digest_len - 1); 202 pad_len = nexthdr - 1; 203 204 padding = pad_len - *pad_len; 205 for (i = 0; i < *pad_len; i++) { 206 if (padding[i] != i + 1) { 207 RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n"); 208 return -EINVAL; 209 } 210 } 211 212 if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) { 213 RTE_LOG(ERR, IPSEC_ESP, 214 "failed to remove pad_len + digest\n"); 215 return -EINVAL; 216 } 217 } 218 219 if (unlikely(sa->flags == TRANSPORT)) { 220 ip = rte_pktmbuf_mtod(m, struct ip *); 221 ip4 = (struct ip *)rte_pktmbuf_adj(m, 222 sizeof(struct esp_hdr) + sa->iv_len); 223 if (likely(ip->ip_v == IPVERSION)) { 224 memmove(ip4, ip, ip->ip_hl * 4); 225 ip4->ip_p = *nexthdr; 226 ip4->ip_len = htons(rte_pktmbuf_data_len(m)); 227 } else { 228 ip6 = (struct ip6_hdr *)ip4; 229 /* XXX No option headers supported */ 230 memmove(ip6, ip, sizeof(struct ip6_hdr)); 231 ip6->ip6_nxt = *nexthdr; 232 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) - 233 sizeof(struct ip6_hdr)); 234 } 235 } else 236 ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len); 237 238 return 0; 239 } 240 241 int 242 esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, 243 struct rte_crypto_op *cop) 244 { 245 struct ip *ip4; 246 struct ip6_hdr *ip6; 247 struct esp_hdr *esp = NULL; 248 uint8_t *padding = NULL, *new_ip, nlp; 249 struct rte_crypto_sym_op *sym_cop; 250 int32_t i; 251 uint16_t pad_payload_len, pad_len, ip_hdr_len; 252 253 RTE_ASSERT(m != NULL); 254 RTE_ASSERT(sa != NULL); 255 256 ip_hdr_len = 0; 257 258 ip4 = rte_pktmbuf_mtod(m, struct ip *); 259 if (likely(ip4->ip_v == IPVERSION)) { 260 if (unlikely(sa->flags == TRANSPORT)) { 261 ip_hdr_len = ip4->ip_hl * 4; 262 nlp = ip4->ip_p; 263 } else 264 nlp = IPPROTO_IPIP; 265 } else if (ip4->ip_v == IP6_VERSION) { 266 if (unlikely(sa->flags == TRANSPORT)) { 267 /* XXX No option headers supported */ 268 ip_hdr_len = sizeof(struct ip6_hdr); 269 ip6 = (struct ip6_hdr *)ip4; 270 nlp = ip6->ip6_nxt; 271 } else 272 nlp = IPPROTO_IPV6; 273 } else { 274 RTE_LOG(ERR, IPSEC_ESP, "invalid IP packet type %d\n", 275 ip4->ip_v); 276 return -EINVAL; 277 } 278 279 /* Padded payload length */ 280 pad_payload_len = RTE_ALIGN_CEIL(rte_pktmbuf_pkt_len(m) - 281 ip_hdr_len + 2, sa->block_size); 282 pad_len = pad_payload_len + ip_hdr_len - rte_pktmbuf_pkt_len(m); 283 284 RTE_ASSERT(sa->flags == IP4_TUNNEL || sa->flags == IP6_TUNNEL || 285 sa->flags == TRANSPORT); 286 287 if (likely(sa->flags == IP4_TUNNEL)) 288 ip_hdr_len = sizeof(struct ip); 289 else if (sa->flags == IP6_TUNNEL) 290 ip_hdr_len = sizeof(struct ip6_hdr); 291 else if (sa->flags != TRANSPORT) { 292 RTE_LOG(ERR, IPSEC_ESP, "Unsupported SA flags: 0x%x\n", 293 sa->flags); 294 return -EINVAL; 295 } 296 297 /* Check maximum packet size */ 298 if (unlikely(ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len + 299 pad_payload_len + sa->digest_len > IP_MAXPACKET)) { 300 RTE_LOG(ERR, IPSEC_ESP, "ipsec packet is too big\n"); 301 return -EINVAL; 302 } 303 304 /* Add trailer padding if it is not constructed by HW */ 305 if (sa->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || 306 (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO && 307 !(sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) { 308 padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + 309 sa->digest_len); 310 if (unlikely(padding == NULL)) { 311 RTE_LOG(ERR, IPSEC_ESP, 312 "not enough mbuf trailing space\n"); 313 return -ENOSPC; 314 } 315 rte_prefetch0(padding); 316 } 317 318 switch (sa->flags) { 319 case IP4_TUNNEL: 320 ip4 = ip4ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len, 321 &sa->src, &sa->dst); 322 esp = (struct esp_hdr *)(ip4 + 1); 323 break; 324 case IP6_TUNNEL: 325 ip6 = ip6ip_outbound(m, sizeof(struct esp_hdr) + sa->iv_len, 326 &sa->src, &sa->dst); 327 esp = (struct esp_hdr *)(ip6 + 1); 328 break; 329 case TRANSPORT: 330 new_ip = (uint8_t *)rte_pktmbuf_prepend(m, 331 sizeof(struct esp_hdr) + sa->iv_len); 332 memmove(new_ip, ip4, ip_hdr_len); 333 esp = (struct esp_hdr *)(new_ip + ip_hdr_len); 334 ip4 = (struct ip *)new_ip; 335 if (likely(ip4->ip_v == IPVERSION)) { 336 ip4->ip_p = IPPROTO_ESP; 337 ip4->ip_len = htons(rte_pktmbuf_data_len(m)); 338 } else { 339 ip6 = (struct ip6_hdr *)new_ip; 340 ip6->ip6_nxt = IPPROTO_ESP; 341 ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) - 342 sizeof(struct ip6_hdr)); 343 } 344 } 345 346 sa->seq++; 347 esp->spi = rte_cpu_to_be_32(sa->spi); 348 esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq); 349 350 /* set iv */ 351 uint64_t *iv = (uint64_t *)(esp + 1); 352 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { 353 *iv = rte_cpu_to_be_64(sa->seq); 354 } else { 355 switch (sa->cipher_algo) { 356 case RTE_CRYPTO_CIPHER_NULL: 357 case RTE_CRYPTO_CIPHER_AES_CBC: 358 memset(iv, 0, sa->iv_len); 359 break; 360 case RTE_CRYPTO_CIPHER_AES_CTR: 361 *iv = rte_cpu_to_be_64(sa->seq); 362 break; 363 default: 364 RTE_LOG(ERR, IPSEC_ESP, 365 "unsupported cipher algorithm %u\n", 366 sa->cipher_algo); 367 return -EINVAL; 368 } 369 } 370 371 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { 372 if (sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) { 373 /* Set the inner esp next protocol for HW trailer */ 374 m->inner_esp_next_proto = nlp; 375 m->packet_type |= RTE_PTYPE_TUNNEL_ESP; 376 } else { 377 padding[pad_len - 2] = pad_len - 2; 378 padding[pad_len - 1] = nlp; 379 } 380 goto done; 381 } 382 383 RTE_ASSERT(cop != NULL); 384 sym_cop = get_sym_cop(cop); 385 sym_cop->m_src = m; 386 387 if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { 388 uint8_t *aad; 389 390 sym_cop->aead.data.offset = ip_hdr_len + 391 sizeof(struct esp_hdr) + sa->iv_len; 392 sym_cop->aead.data.length = pad_payload_len; 393 394 /* Fill pad_len using default sequential scheme */ 395 for (i = 0; i < pad_len - 2; i++) 396 padding[i] = i + 1; 397 padding[pad_len - 2] = pad_len - 2; 398 padding[pad_len - 1] = nlp; 399 400 struct cnt_blk *icb = get_cnt_blk(m); 401 icb->salt = sa->salt; 402 icb->iv = rte_cpu_to_be_64(sa->seq); 403 icb->cnt = rte_cpu_to_be_32(1); 404 405 aad = get_aad(m); 406 memcpy(aad, esp, 8); 407 sym_cop->aead.aad.data = aad; 408 sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m, 409 aad - rte_pktmbuf_mtod(m, uint8_t *)); 410 411 sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *, 412 rte_pktmbuf_pkt_len(m) - sa->digest_len); 413 sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m, 414 rte_pktmbuf_pkt_len(m) - sa->digest_len); 415 } else { 416 switch (sa->cipher_algo) { 417 case RTE_CRYPTO_CIPHER_NULL: 418 case RTE_CRYPTO_CIPHER_AES_CBC: 419 sym_cop->cipher.data.offset = ip_hdr_len + 420 sizeof(struct esp_hdr); 421 sym_cop->cipher.data.length = pad_payload_len + sa->iv_len; 422 break; 423 case RTE_CRYPTO_CIPHER_AES_CTR: 424 sym_cop->cipher.data.offset = ip_hdr_len + 425 sizeof(struct esp_hdr) + sa->iv_len; 426 sym_cop->cipher.data.length = pad_payload_len; 427 break; 428 default: 429 RTE_LOG(ERR, IPSEC_ESP, "unsupported cipher algorithm %u\n", 430 sa->cipher_algo); 431 return -EINVAL; 432 } 433 434 /* Fill pad_len using default sequential scheme */ 435 for (i = 0; i < pad_len - 2; i++) 436 padding[i] = i + 1; 437 padding[pad_len - 2] = pad_len - 2; 438 padding[pad_len - 1] = nlp; 439 440 struct cnt_blk *icb = get_cnt_blk(m); 441 icb->salt = sa->salt; 442 icb->iv = rte_cpu_to_be_64(sa->seq); 443 icb->cnt = rte_cpu_to_be_32(1); 444 445 switch (sa->auth_algo) { 446 case RTE_CRYPTO_AUTH_NULL: 447 case RTE_CRYPTO_AUTH_SHA1_HMAC: 448 case RTE_CRYPTO_AUTH_SHA256_HMAC: 449 sym_cop->auth.data.offset = ip_hdr_len; 450 sym_cop->auth.data.length = sizeof(struct esp_hdr) + 451 sa->iv_len + pad_payload_len; 452 break; 453 default: 454 RTE_LOG(ERR, IPSEC_ESP, "unsupported auth algorithm %u\n", 455 sa->auth_algo); 456 return -EINVAL; 457 } 458 459 sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *, 460 rte_pktmbuf_pkt_len(m) - sa->digest_len); 461 sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m, 462 rte_pktmbuf_pkt_len(m) - sa->digest_len); 463 } 464 465 done: 466 return 0; 467 } 468 469 int 470 esp_outbound_post(struct rte_mbuf *m, 471 struct ipsec_sa *sa, 472 struct rte_crypto_op *cop) 473 { 474 RTE_ASSERT(m != NULL); 475 RTE_ASSERT(sa != NULL); 476 477 if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { 478 m->ol_flags |= PKT_TX_SEC_OFFLOAD; 479 } else { 480 RTE_ASSERT(cop != NULL); 481 if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { 482 RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n"); 483 return -1; 484 } 485 } 486 487 return 0; 488 } 489