xref: /xnu-11215/bsd/netinet6/esp_output.c (revision 8d741a5d)
1 /*
2  * Copyright (c) 2008-2017, 2022-2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*	$FreeBSD: src/sys/netinet6/esp_output.c,v 1.1.2.3 2002/04/28 05:40:26 suz Exp $	*/
30 /*	$KAME: esp_output.c,v 1.44 2001/07/26 06:53:15 jinmei Exp $	*/
31 
32 /*
33  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. Neither the name of the project nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  */
60 
61 #define _IP_VHL
62 
63 /*
64  * RFC1827/2406 Encapsulated Security Payload.
65  */
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/malloc.h>
70 #include <sys/mbuf.h>
71 #include <sys/domain.h>
72 #include <sys/protosw.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/errno.h>
76 #include <sys/time.h>
77 #include <sys/kernel.h>
78 #include <sys/syslog.h>
79 
80 #include <net/if.h>
81 #include <net/route.h>
82 #include <net/multi_layer_pkt_log.h>
83 
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/ip.h>
87 #include <netinet/in_var.h>
88 #include <netinet/udp.h> /* for nat traversal */
89 #include <netinet/tcp.h>
90 #include <netinet/in_tclass.h>
91 
92 #include <netinet/ip6.h>
93 #include <netinet6/ip6_var.h>
94 #include <netinet/icmp6.h>
95 
96 #include <netinet6/ipsec.h>
97 #include <netinet6/ipsec6.h>
98 #include <netinet6/ah.h>
99 #include <netinet6/ah6.h>
100 #include <netinet6/esp.h>
101 #include <netinet6/esp6.h>
102 #include <netkey/key.h>
103 #include <netkey/keydb.h>
104 
105 #include <net/net_osdep.h>
106 
107 #if SKYWALK
108 #include <skywalk/os_skywalk_private.h>
109 #endif // SKYWALK
110 
111 #include <sys/kdebug.h>
112 #define DBG_LAYER_BEG           NETDBG_CODE(DBG_NETIPSEC, 1)
113 #define DBG_LAYER_END           NETDBG_CODE(DBG_NETIPSEC, 3)
114 #define DBG_FNC_ESPOUT          NETDBG_CODE(DBG_NETIPSEC, (4 << 8))
115 #define DBG_FNC_ENCRYPT         NETDBG_CODE(DBG_NETIPSEC, (5 << 8))
116 
117 static int esp_output(struct mbuf *, u_char *, struct mbuf *,
118     int, struct secasvar *sav);
119 
120 extern int      esp_udp_encap_port;
121 extern u_int64_t natt_now;
122 
123 /*
124  * compute ESP header size.
125  */
126 size_t
esp_hdrsiz(__unused struct ipsecrequest * isr)127 esp_hdrsiz(__unused struct ipsecrequest *isr)
128 {
129 #if 0
130 	/* sanity check */
131 	if (isr == NULL) {
132 		panic("esp_hdrsiz: NULL was passed.");
133 	}
134 
135 
136 	lck_mtx_lock(sadb_mutex);
137 	{
138 		struct secasvar *sav;
139 		const struct esp_algorithm *algo;
140 		const struct ah_algorithm *aalgo;
141 		size_t ivlen;
142 		size_t authlen;
143 		size_t hdrsiz;
144 		size_t maxpad;
145 
146 		/*%%%% this needs to change - no sav in ipsecrequest any more */
147 		sav = isr->sav;
148 
149 		if (isr->saidx.proto != IPPROTO_ESP) {
150 			panic("unsupported mode passed to esp_hdrsiz");
151 		}
152 
153 		if (sav == NULL) {
154 			goto estimate;
155 		}
156 		if (sav->state != SADB_SASTATE_MATURE
157 		    && sav->state != SADB_SASTATE_DYING) {
158 			goto estimate;
159 		}
160 
161 		/* we need transport mode ESP. */
162 		algo = esp_algorithm_lookup(sav->alg_enc);
163 		if (!algo) {
164 			goto estimate;
165 		}
166 		ivlen = sav->ivlen;
167 		if (ivlen < 0) {
168 			goto estimate;
169 		}
170 
171 		if (algo->padbound) {
172 			maxpad = algo->padbound;
173 		} else {
174 			maxpad = 4;
175 		}
176 		maxpad += 1; /* maximum 'extendsiz' is padbound + 1, see esp_output */
177 
178 		if (sav->flags & SADB_X_EXT_OLD) {
179 			/* RFC 1827 */
180 			hdrsiz = sizeof(struct esp) + ivlen + maxpad;
181 		} else {
182 			/* RFC 2406 */
183 			aalgo = ah_algorithm_lookup(sav->alg_auth);
184 			if (aalgo && sav->replay[0] != NULL && sav->key_auth) {
185 				authlen = (aalgo->sumsiz)(sav);
186 			} else {
187 				authlen = 0;
188 			}
189 			hdrsiz = sizeof(struct newesp) + ivlen + maxpad + authlen;
190 		}
191 
192 		/*
193 		 * If the security association indicates that NATT is required,
194 		 * add the size of the NATT encapsulation header:
195 		 */
196 		if ((sav->flags & SADB_X_EXT_NATT) != 0) {
197 			hdrsiz += sizeof(struct udphdr) + 4;
198 		}
199 
200 		lck_mtx_unlock(sadb_mutex);
201 		return hdrsiz;
202 	}
203 estimate:
204 	lck_mtx_unlock(sadb_mutex);
205 #endif
206 	/*
207 	 * ASSUMING:
208 	 *	sizeof(struct newesp) > sizeof(struct esp). (8)
209 	 *	esp_max_ivlen() = max ivlen for CBC mode
210 	 *	17 = (maximum padding length without random padding length)
211 	 *	   + (Pad Length field) + (Next Header field).
212 	 *	64 = maximum ICV we support.
213 	 *  sizeof(struct udphdr) in case NAT traversal is used
214 	 */
215 	return sizeof(struct newesp) + esp_max_ivlen() + 17 + AH_MAXSUMSIZE + sizeof(struct udphdr);
216 }
217 
218 /*
219  * Modify the packet so that the payload is encrypted.
220  * The mbuf (m) must start with IPv4 or IPv6 header.
221  * On failure, free the given mbuf and return NULL.
222  *
223  * on invocation:
224  *	m   nexthdrp md
225  *	v   v        v
226  *	IP ......... payload
227  * during the encryption:
228  *	m   nexthdrp mprev md
229  *	v   v        v     v
230  *	IP ............... esp iv payload pad padlen nxthdr
231  *	                   <--><-><------><--------------->
232  *	                   esplen plen    extendsiz
233  *	                       ivlen
234  *	                   <-----> esphlen
235  *	<-> hlen
236  *	<-----------------> espoff
237  */
238 static int
esp_output(struct mbuf * m,u_char * nexthdrp,struct mbuf * md,int af,struct secasvar * sav)239 esp_output(
240 	struct mbuf *m,
241 	u_char *nexthdrp,
242 	struct mbuf *md,
243 	int af,
244 	struct secasvar *sav)
245 {
246 	struct mbuf *n;
247 	struct mbuf *mprev;
248 	struct esp *esp;
249 	struct esptail *esptail;
250 	const struct esp_algorithm *algo;
251 	struct tcphdr th = {};
252 	u_int32_t spi;
253 	u_int32_t seq;
254 	size_t inner_payload_len = 0;
255 	u_int8_t inner_protocol = 0;
256 	u_int8_t nxt = 0;
257 	size_t plen;    /*payload length to be encrypted*/
258 	size_t espoff;
259 	size_t esphlen; /* sizeof(struct esp/newesp) + ivlen */
260 	int ivlen;
261 	int afnumber;
262 	size_t extendsiz;
263 	int error = 0;
264 	struct ipsecstat *stat;
265 	struct udphdr *udp = NULL;
266 	int     udp_encapsulate = (sav->flags & SADB_X_EXT_NATT && (af == AF_INET || af == AF_INET6) &&
267 	    ((esp_udp_encap_port & 0xFFFF) != 0 || sav->natt_encapsulated_src_port != 0));
268 
269 	KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_START, sav->ivlen, 0, 0, 0, 0);
270 	switch (af) {
271 	case AF_INET:
272 		afnumber = 4;
273 		stat = &ipsecstat;
274 		break;
275 	case AF_INET6:
276 		afnumber = 6;
277 		stat = &ipsec6stat;
278 		break;
279 	default:
280 		ipseclog((LOG_ERR, "esp_output: unsupported af %d\n", af));
281 		KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 1, 0, 0, 0, 0);
282 		return 0;       /* no change at all */
283 	}
284 
285 	mbuf_traffic_class_t traffic_class = 0;
286 	if ((sav->flags2 & SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) ==
287 	    SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) {
288 		u_int8_t dscp = 0;
289 		switch (af) {
290 		case AF_INET:
291 		{
292 			struct ip *ip = mtod(m, struct ip *);
293 			dscp = ip->ip_tos >> IPTOS_DSCP_SHIFT;
294 			break;
295 		}
296 		case AF_INET6:
297 		{
298 			struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
299 			dscp = (ntohl(ip6->ip6_flow) & IP6FLOW_DSCP_MASK) >> IP6FLOW_DSCP_SHIFT;
300 			break;
301 		}
302 		default:
303 			panic("esp_output: should not reach here");
304 		}
305 		traffic_class = rfc4594_dscp_to_tc(dscp);
306 	}
307 
308 	/* some sanity check */
309 	if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[traffic_class] == NULL) {
310 		switch (af) {
311 		case AF_INET:
312 		{
313 			struct ip *ip;
314 
315 			ip = mtod(m, struct ip *);
316 			ipseclog((LOG_DEBUG, "esp4_output: internal error: "
317 			    "sav->replay is null: %x->%x, SPI=%u\n",
318 			    (u_int32_t)ntohl(ip->ip_src.s_addr),
319 			    (u_int32_t)ntohl(ip->ip_dst.s_addr),
320 			    (u_int32_t)ntohl(sav->spi)));
321 			IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
322 			break;
323 		}
324 		case AF_INET6:
325 			ipseclog((LOG_DEBUG, "esp6_output: internal error: "
326 			    "sav->replay is null: SPI=%u\n",
327 			    (u_int32_t)ntohl(sav->spi)));
328 			IPSEC_STAT_INCREMENT(ipsec6stat.out_inval);
329 			break;
330 		default:
331 			panic("esp_output: should not reach here");
332 		}
333 		m_freem(m);
334 		KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 2, 0, 0, 0, 0);
335 		return EINVAL;
336 	}
337 
338 	algo = esp_algorithm_lookup(sav->alg_enc);
339 	if (!algo) {
340 		ipseclog((LOG_ERR, "esp_output: unsupported algorithm: "
341 		    "SPI=%u\n", (u_int32_t)ntohl(sav->spi)));
342 		m_freem(m);
343 		KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 3, 0, 0, 0, 0);
344 		return EINVAL;
345 	}
346 	spi = sav->spi;
347 	ivlen = sav->ivlen;
348 	/* should be okey */
349 	if (ivlen < 0) {
350 		panic("invalid ivlen");
351 	}
352 
353 	{
354 		/*
355 		 * insert ESP header.
356 		 * XXX inserts ESP header right after IPv4 header.  should
357 		 * chase the header chain.
358 		 * XXX sequential number
359 		 */
360 		struct ip *ip = NULL;
361 		struct ip6_hdr *ip6 = NULL;
362 		size_t esplen; /* sizeof(struct esp/newesp) */
363 		size_t hlen = 0; /* ip header len */
364 
365 		if (sav->flags & SADB_X_EXT_OLD) {
366 			/* RFC 1827 */
367 			esplen = sizeof(struct esp);
368 		} else {
369 			/* RFC 2406 */
370 			if (sav->flags & SADB_X_EXT_DERIV) {
371 				esplen = sizeof(struct esp);
372 			} else {
373 				esplen = sizeof(struct newesp);
374 			}
375 		}
376 		esphlen = esplen + ivlen;
377 
378 		for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) {
379 			;
380 		}
381 		if (mprev == NULL || mprev->m_next != md) {
382 			ipseclog((LOG_DEBUG, "esp%d_output: md is not in chain\n",
383 			    afnumber));
384 			m_freem(m);
385 			KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 4, 0, 0, 0, 0);
386 			return EINVAL;
387 		}
388 
389 		plen = 0;
390 		for (n = md; n; n = n->m_next) {
391 			plen += n->m_len;
392 		}
393 
394 		switch (af) {
395 		case AF_INET:
396 			ip = mtod(m, struct ip *);
397 #ifdef _IP_VHL
398 			hlen = IP_VHL_HL(ip->ip_vhl) << 2;
399 #else
400 			hlen = ip->ip_hl << 2;
401 #endif
402 			break;
403 		case AF_INET6:
404 			ip6 = mtod(m, struct ip6_hdr *);
405 			hlen = sizeof(*ip6);
406 			break;
407 		}
408 
409 		/* grab info for packet logging */
410 		struct secashead *sah = sav->sah;
411 		if (net_mpklog_enabled &&
412 		    sah != NULL && sah->ipsec_if != NULL) {
413 			ifnet_t ifp = sah->ipsec_if;
414 
415 			if ((ifp->if_xflags & IFXF_MPK_LOG) == IFXF_MPK_LOG) {
416 				size_t iphlen = 0;
417 
418 				if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) {
419 					struct ip *inner_ip = mtod(md, struct ip *);
420 					if (IP_VHL_V(inner_ip->ip_vhl) == IPVERSION) {
421 #ifdef _IP_VHL
422 						iphlen = IP_VHL_HL(inner_ip->ip_vhl) << 2;
423 #else
424 						iphlen = inner_ip->ip_hl << 2;
425 #endif
426 						inner_protocol = inner_ip->ip_p;
427 					} else if (IP_VHL_V(inner_ip->ip_vhl) == 6) {
428 						struct ip6_hdr *inner_ip6 = mtod(md, struct ip6_hdr *);
429 						iphlen = sizeof(struct ip6_hdr);
430 						inner_protocol = inner_ip6->ip6_nxt;
431 					}
432 
433 					if (inner_protocol == IPPROTO_TCP) {
434 						if ((int)(iphlen + sizeof(th)) <=
435 						    (m->m_pkthdr.len - m->m_len)) {
436 							m_copydata(md, (int)iphlen, sizeof(th), (u_int8_t *)&th);
437 						}
438 
439 						inner_payload_len = m->m_pkthdr.len - m->m_len - iphlen - (th.th_off << 2);
440 					}
441 				} else {
442 					iphlen = hlen;
443 					if (af == AF_INET) {
444 						inner_protocol = ip->ip_p;
445 					} else if (af == AF_INET6) {
446 						inner_protocol = ip6->ip6_nxt;
447 					}
448 
449 					if (inner_protocol == IPPROTO_TCP) {
450 						if ((int)(iphlen + sizeof(th)) <=
451 						    m->m_pkthdr.len) {
452 							m_copydata(m, (int)iphlen, sizeof(th), (u_int8_t *)&th);
453 						}
454 
455 						inner_payload_len = m->m_pkthdr.len - iphlen - (th.th_off << 2);
456 					}
457 				}
458 			}
459 		}
460 
461 		/* make the packet over-writable */
462 		mprev->m_next = NULL;
463 		if ((md = ipsec_copypkt(md)) == NULL) {
464 			m_freem(m);
465 			error = ENOBUFS;
466 			goto fail;
467 		}
468 		mprev->m_next = md;
469 
470 		/*
471 		 * Translate UDP source port back to its original value.
472 		 * SADB_X_EXT_NATT_MULTIPLEUSERS is only set for transort mode.
473 		 */
474 		if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) {
475 			/* if not UDP - drop it */
476 			if (ip->ip_p != IPPROTO_UDP) {
477 				IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
478 				m_freem(m);
479 				error = EINVAL;
480 				goto fail;
481 			}
482 
483 			udp = mtod(md, struct udphdr *);
484 
485 			/* if src port not set in sav - find it */
486 			if (sav->natt_encapsulated_src_port == 0) {
487 				if (key_natt_get_translated_port(sav) == 0) {
488 					m_freem(m);
489 					error = EINVAL;
490 					goto fail;
491 				}
492 			}
493 			if (sav->remote_ike_port == htons(udp->uh_dport)) {
494 				/* translate UDP port */
495 				udp->uh_dport = sav->natt_encapsulated_src_port;
496 				udp->uh_sum = 0; /* don't need checksum with ESP auth */
497 			} else {
498 				/* drop the packet - can't translate the port */
499 				IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
500 				m_freem(m);
501 				error = EINVAL;
502 				goto fail;
503 			}
504 		}
505 
506 
507 		espoff = m->m_pkthdr.len - plen;
508 
509 		if (udp_encapsulate) {
510 			esphlen += sizeof(struct udphdr);
511 			espoff += sizeof(struct udphdr);
512 		}
513 
514 		/*
515 		 * grow the mbuf to accomodate ESP header.
516 		 * before: IP ... payload
517 		 * after:  IP ... [UDP] ESP IV payload
518 		 */
519 		if (M_LEADINGSPACE(md) < esphlen || (md->m_flags & M_EXT) != 0) {
520 			MGET(n, M_DONTWAIT, MT_DATA);
521 			if (!n) {
522 				m_freem(m);
523 				error = ENOBUFS;
524 				goto fail;
525 			}
526 			VERIFY(esphlen <= INT32_MAX);
527 			n->m_len = (int)esphlen;
528 			mprev->m_next = n;
529 			n->m_next = md;
530 			m->m_pkthdr.len += esphlen;
531 			if (udp_encapsulate) {
532 				udp = mtod(n, struct udphdr *);
533 				esp = (struct esp *)(void *)((caddr_t)udp + sizeof(struct udphdr));
534 			} else {
535 				esp = mtod(n, struct esp *);
536 			}
537 		} else {
538 			md->m_len += esphlen;
539 			md->m_data -= esphlen;
540 			m->m_pkthdr.len += esphlen;
541 			esp = mtod(md, struct esp *);
542 			if (udp_encapsulate) {
543 				udp = mtod(md, struct udphdr *);
544 				esp = (struct esp *)(void *)((caddr_t)udp + sizeof(struct udphdr));
545 			} else {
546 				esp = mtod(md, struct esp *);
547 			}
548 		}
549 
550 		switch (af) {
551 		case AF_INET:
552 			if (esphlen < (IP_MAXPACKET - ntohs(ip->ip_len))) {
553 				ip->ip_len = htons(ntohs(ip->ip_len) + (u_short)esphlen);
554 			} else {
555 				ipseclog((LOG_ERR,
556 				    "IPv4 ESP output: size exceeds limit\n"));
557 				IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
558 				m_freem(m);
559 				error = EMSGSIZE;
560 				goto fail;
561 			}
562 			break;
563 		case AF_INET6:
564 			/* total packet length will be computed in ip6_output() */
565 			break;
566 		}
567 	}
568 
569 	/* initialize esp header. */
570 	esp->esp_spi = spi;
571 	if ((sav->flags & SADB_X_EXT_OLD) == 0) {
572 		struct newesp *nesp;
573 		nesp = (struct newesp *)esp;
574 		if (sav->replay[traffic_class]->seq == sav->replay[traffic_class]->lastseq) {
575 			if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) {
576 				/* XXX Is it noisy ? */
577 				ipseclog((LOG_WARNING,
578 				    "replay counter overflowed. %s\n",
579 				    ipsec_logsastr(sav)));
580 				IPSEC_STAT_INCREMENT(stat->out_inval);
581 				m_freem(m);
582 				KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 5, 0, 0, 0, 0);
583 				return EINVAL;
584 			}
585 		}
586 		lck_mtx_lock(sadb_mutex);
587 		sav->replay[traffic_class]->count++;
588 		sav->replay[traffic_class]->seq++;
589 		lck_mtx_unlock(sadb_mutex);
590 		/*
591 		 * XXX sequence number must not be cycled, if the SA is
592 		 * installed by IKE daemon.
593 		 */
594 		nesp->esp_seq = htonl(sav->replay[traffic_class]->seq);
595 		seq = sav->replay[traffic_class]->seq;
596 	}
597 
598 	{
599 		/*
600 		 * find the last mbuf. make some room for ESP trailer.
601 		 */
602 		struct ip *ip = NULL;
603 		size_t padbound;
604 		u_char *extend;
605 		int i;
606 		int randpadmax;
607 
608 		if (algo->padbound) {
609 			padbound = algo->padbound;
610 		} else {
611 			padbound = 4;
612 		}
613 		/* ESP packet, including nxthdr field, must be length of 4n */
614 		if (padbound < 4) {
615 			padbound = 4;
616 		}
617 
618 		extendsiz = padbound - (plen % padbound);
619 		if (extendsiz == 1) {
620 			extendsiz = padbound + 1;
621 		}
622 
623 		/* random padding */
624 		switch (af) {
625 		case AF_INET:
626 			randpadmax = ip4_esp_randpad;
627 			break;
628 		case AF_INET6:
629 			randpadmax = ip6_esp_randpad;
630 			break;
631 		default:
632 			randpadmax = -1;
633 			break;
634 		}
635 		if (randpadmax < 0 || plen + extendsiz >= randpadmax) {
636 			;
637 		} else {
638 			size_t pad;
639 
640 			/* round */
641 			randpadmax = (int)((randpadmax / padbound) * padbound);
642 			pad = (randpadmax - plen + extendsiz) / padbound;
643 
644 			if (pad > 0) {
645 				pad = (random() % pad) * padbound;
646 			} else {
647 				pad = 0;
648 			}
649 
650 			/*
651 			 * make sure we do not pad too much.
652 			 * MLEN limitation comes from the trailer attachment
653 			 * code below.
654 			 * 256 limitation comes from sequential padding.
655 			 * also, the 1-octet length field in ESP trailer imposes
656 			 * limitation (but is less strict than sequential padding
657 			 * as length field do not count the last 2 octets).
658 			 */
659 			if (extendsiz + pad <= MLEN && extendsiz + pad < 256) {
660 				extendsiz += pad;
661 			}
662 		}
663 
664 		n = m;
665 		while (n->m_next) {
666 			n = n->m_next;
667 		}
668 
669 		/*
670 		 * if M_EXT, the external mbuf data may be shared among
671 		 * two consequtive TCP packets, and it may be unsafe to use the
672 		 * trailing space.
673 		 */
674 		if (!(n->m_flags & M_EXT) && extendsiz < M_TRAILINGSPACE(n)) {
675 			extend = mtod(n, u_char *) + n->m_len;
676 			n->m_len += (int)extendsiz;
677 			m->m_pkthdr.len += extendsiz;
678 		} else {
679 			struct mbuf *nn;
680 
681 			MGET(nn, M_DONTWAIT, MT_DATA);
682 			if (!nn) {
683 				ipseclog((LOG_DEBUG, "esp%d_output: can't alloc mbuf",
684 				    afnumber));
685 				m_freem(m);
686 				error = ENOBUFS;
687 				goto fail;
688 			}
689 			extend = mtod(nn, u_char *);
690 			VERIFY(extendsiz <= INT_MAX);
691 			nn->m_len = (int)extendsiz;
692 			nn->m_next = NULL;
693 			n->m_next = nn;
694 			n = nn;
695 			m->m_pkthdr.len += extendsiz;
696 		}
697 		switch (sav->flags & SADB_X_EXT_PMASK) {
698 		case SADB_X_EXT_PRAND:
699 			key_randomfill(extend, extendsiz);
700 			break;
701 		case SADB_X_EXT_PZERO:
702 			bzero(extend, extendsiz);
703 			break;
704 		case SADB_X_EXT_PSEQ:
705 			for (i = 0; i < extendsiz; i++) {
706 				extend[i] = (i + 1) & 0xff;
707 			}
708 			break;
709 		}
710 
711 		nxt = *nexthdrp;
712 		if (udp_encapsulate) {
713 			*nexthdrp = IPPROTO_UDP;
714 
715 			/* Fill out the UDP header */
716 			if (sav->natt_encapsulated_src_port != 0) {
717 				udp->uh_sport = (u_short)sav->natt_encapsulated_src_port;
718 			} else {
719 				udp->uh_sport = htons((u_short)esp_udp_encap_port);
720 			}
721 			udp->uh_dport = htons(sav->remote_ike_port);
722 			// udp->uh_len set later, after all length tweaks are complete
723 			udp->uh_sum = 0;
724 
725 			/* Update last sent so we know if we need to send keepalive */
726 			sav->natt_last_activity = natt_now;
727 		} else {
728 			*nexthdrp = IPPROTO_ESP;
729 		}
730 
731 		/* initialize esp trailer. */
732 		esptail = (struct esptail *)
733 		    (mtod(n, u_int8_t *) + n->m_len - sizeof(struct esptail));
734 		esptail->esp_nxt = nxt;
735 		VERIFY((extendsiz - 2) <= UINT8_MAX);
736 		esptail->esp_padlen = (u_int8_t)(extendsiz - 2);
737 
738 		/* modify IP header (for ESP header part only) */
739 		switch (af) {
740 		case AF_INET:
741 			ip = mtod(m, struct ip *);
742 			if (extendsiz < (IP_MAXPACKET - ntohs(ip->ip_len))) {
743 				ip->ip_len = htons(ntohs(ip->ip_len) + (u_short)extendsiz);
744 			} else {
745 				ipseclog((LOG_ERR,
746 				    "IPv4 ESP output: size exceeds limit\n"));
747 				IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
748 				m_freem(m);
749 				error = EMSGSIZE;
750 				goto fail;
751 			}
752 			break;
753 		case AF_INET6:
754 			/* total packet length will be computed in ip6_output() */
755 			break;
756 		}
757 	}
758 
759 	/*
760 	 * pre-compute and cache intermediate key
761 	 */
762 	error = esp_schedule(algo, sav);
763 	if (error) {
764 		m_freem(m);
765 		IPSEC_STAT_INCREMENT(stat->out_inval);
766 		goto fail;
767 	}
768 
769 	/*
770 	 * encrypt the packet, based on security association
771 	 * and the algorithm specified.
772 	 */
773 	if (!algo->encrypt) {
774 		panic("internal error: no encrypt function");
775 	}
776 	KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_START, 0, 0, 0, 0, 0);
777 	if ((*algo->encrypt)(m, espoff, plen + extendsiz, sav, algo, ivlen)) {
778 		/* m is already freed */
779 		ipseclog((LOG_ERR, "packet encryption failure\n"));
780 		IPSEC_STAT_INCREMENT(stat->out_inval);
781 		error = EINVAL;
782 		KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 1, error, 0, 0, 0);
783 		goto fail;
784 	}
785 	KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 2, 0, 0, 0, 0);
786 
787 	/*
788 	 * calculate ICV if required.
789 	 */
790 	size_t siz = 0;
791 	u_char authbuf[AH_MAXSUMSIZE] __attribute__((aligned(4)));
792 
793 	if (algo->finalizeencrypt) {
794 		siz = algo->icvlen;
795 		if ((*algo->finalizeencrypt)(sav, authbuf, siz)) {
796 			ipseclog((LOG_ERR, "packet encryption ICV failure\n"));
797 			IPSEC_STAT_INCREMENT(stat->out_inval);
798 			error = EINVAL;
799 			KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 1, error, 0, 0, 0);
800 			goto fail;
801 		}
802 		goto fill_icv;
803 	}
804 
805 	if (!sav->replay[traffic_class]) {
806 		goto noantireplay;
807 	}
808 	if (!sav->key_auth) {
809 		goto noantireplay;
810 	}
811 	if (sav->key_auth == SADB_AALG_NONE) {
812 		goto noantireplay;
813 	}
814 
815 	{
816 		const struct ah_algorithm *aalgo;
817 
818 		aalgo = ah_algorithm_lookup(sav->alg_auth);
819 		if (!aalgo) {
820 			goto noantireplay;
821 		}
822 		siz = ((aalgo->sumsiz)(sav) + 3) & ~(4 - 1);
823 		if (AH_MAXSUMSIZE < siz) {
824 			panic("assertion failed for AH_MAXSUMSIZE");
825 		}
826 
827 		if (esp_auth(m, espoff, m->m_pkthdr.len - espoff, sav, authbuf)) {
828 			ipseclog((LOG_ERR, "ESP checksum generation failure\n"));
829 			m_freem(m);
830 			error = EINVAL;
831 			IPSEC_STAT_INCREMENT(stat->out_inval);
832 			goto fail;
833 		}
834 	}
835 
836 fill_icv:
837 	{
838 		struct ip *ip;
839 		u_char *p;
840 
841 		n = m;
842 		while (n->m_next) {
843 			n = n->m_next;
844 		}
845 
846 		if (!(n->m_flags & M_EXT) && siz < M_TRAILINGSPACE(n)) { /* XXX */
847 			n->m_len += siz;
848 			m->m_pkthdr.len += siz;
849 			p = mtod(n, u_char *) + n->m_len - siz;
850 		} else {
851 			struct mbuf *nn;
852 
853 			MGET(nn, M_DONTWAIT, MT_DATA);
854 			if (!nn) {
855 				ipseclog((LOG_DEBUG, "can't alloc mbuf in esp%d_output",
856 				    afnumber));
857 				m_freem(m);
858 				error = ENOBUFS;
859 				goto fail;
860 			}
861 			nn->m_len = (int)siz;
862 			nn->m_next = NULL;
863 			n->m_next = nn;
864 			n = nn;
865 			m->m_pkthdr.len += siz;
866 			p = mtod(nn, u_char *);
867 		}
868 		bcopy(authbuf, p, siz);
869 
870 		/* modify IP header (for ESP header part only) */
871 		switch (af) {
872 		case AF_INET:
873 			ip = mtod(m, struct ip *);
874 			if (siz < (IP_MAXPACKET - ntohs(ip->ip_len))) {
875 				ip->ip_len = htons(ntohs(ip->ip_len) + (u_short)siz);
876 			} else {
877 				ipseclog((LOG_ERR,
878 				    "IPv4 ESP output: size exceeds limit\n"));
879 				IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
880 				m_freem(m);
881 				error = EMSGSIZE;
882 				goto fail;
883 			}
884 			break;
885 		case AF_INET6:
886 			/* total packet length will be computed in ip6_output() */
887 			break;
888 		}
889 	}
890 
891 	if (udp_encapsulate) {
892 		struct ip *ip;
893 		struct ip6_hdr *ip6;
894 
895 		switch (af) {
896 		case AF_INET:
897 			ip = mtod(m, struct ip *);
898 			udp->uh_ulen = htons((u_int16_t)(ntohs(ip->ip_len) - (IP_VHL_HL(ip->ip_vhl) << 2)));
899 			break;
900 		case AF_INET6:
901 			ip6 = mtod(m, struct ip6_hdr *);
902 			VERIFY((plen + siz + extendsiz + esphlen) <= UINT16_MAX);
903 			udp->uh_ulen = htons((u_int16_t)(plen + siz + extendsiz + esphlen));
904 			udp->uh_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst, htonl(ntohs(udp->uh_ulen) + IPPROTO_UDP));
905 			m->m_pkthdr.csum_flags = (CSUM_UDPIPV6 | CSUM_ZERO_INVERT);
906 			m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
907 			break;
908 		}
909 	}
910 
911 noantireplay:
912 	if (net_mpklog_enabled && sav->sah != NULL &&
913 	    sav->sah->ipsec_if != NULL &&
914 	    (sav->sah->ipsec_if->if_xflags & IFXF_MPK_LOG) &&
915 	    inner_protocol == IPPROTO_TCP) {
916 		MPKL_ESP_OUTPUT_TCP(esp_mpkl_log_object,
917 		    ntohl(spi), seq,
918 		    ntohs(th.th_sport), ntohs(th.th_dport),
919 		    ntohl(th.th_seq), ntohl(th.th_ack),
920 		    inner_payload_len, th.th_flags);
921 	}
922 
923 	lck_mtx_lock(sadb_mutex);
924 	if (!m) {
925 		ipseclog((LOG_ERR,
926 		    "NULL mbuf after encryption in esp%d_output", afnumber));
927 	} else {
928 		IPSEC_STAT_INCREMENT(stat->out_success);
929 	}
930 	IPSEC_STAT_INCREMENT(stat->out_esphist[sav->alg_enc]);
931 	lck_mtx_unlock(sadb_mutex);
932 	key_sa_recordxfer(sav, m->m_pkthdr.len);
933 	KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 6, 0, 0, 0, 0);
934 	return 0;
935 
936 fail:
937 	KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 7, error, 0, 0, 0);
938 	return error;
939 }
940 
941 int
esp4_output(struct mbuf * m,struct secasvar * sav)942 esp4_output(
943 	struct mbuf *m,
944 	struct secasvar *sav)
945 {
946 	struct ip *ip;
947 	if (m->m_len < sizeof(struct ip)) {
948 		ipseclog((LOG_DEBUG, "esp4_output: first mbuf too short\n"));
949 		m_freem(m);
950 		return EINVAL;
951 	}
952 	ip = mtod(m, struct ip *);
953 	/* XXX assumes that m->m_next points to payload */
954 	return esp_output(m, &ip->ip_p, m->m_next, AF_INET, sav);
955 }
956 
957 int
esp6_output(struct mbuf * m,u_char * nexthdrp,struct mbuf * md,struct secasvar * sav)958 esp6_output(
959 	struct mbuf *m,
960 	u_char *nexthdrp,
961 	struct mbuf *md,
962 	struct secasvar *sav)
963 {
964 	if (m->m_len < sizeof(struct ip6_hdr)) {
965 		ipseclog((LOG_DEBUG, "esp6_output: first mbuf too short\n"));
966 		m_freem(m);
967 		return EINVAL;
968 	}
969 	return esp_output(m, nexthdrp, md, AF_INET6, sav);
970 }
971 
972 int
esp_kpipe_output(struct secasvar * sav,kern_packet_t sph,kern_packet_t dph)973 esp_kpipe_output(struct secasvar *sav, kern_packet_t sph, kern_packet_t dph)
974 {
975 	struct newesp *esp = NULL;
976 	struct esptail *esptail = NULL;
977 	struct ipsecstat *stat = NULL;
978 	uint8_t *sbaddr = NULL, *dbaddr = NULL;
979 	uint8_t *src_payload = NULL, *dst_payload = NULL;
980 	uint8_t *iv = NULL;
981 	uint8_t *auth_buf = NULL;
982 	const struct esp_algorithm *e_algo = NULL;
983 	const struct ah_algorithm *a_algo = NULL;
984 	mbuf_traffic_class_t traffic_class = 0;
985 	size_t iphlen = 0, esphlen = 0, padbound = 0, extendsiz = 0, plen = 0;
986 	size_t auth_size = 0, add_ip_len = 0;
987 	int af = 0, ivlen = 0;
988 	uint32_t slim = 0, slen = 0;
989 	uint32_t dlim = 0, dlen = 0;
990 	uint8_t dscp = 0, nxt_proto = 0;
991 	int err = 0;
992 
993 	KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_START, sav->ivlen, 0, 0, 0, 0);
994 
995 	VERIFY(sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT);
996 	VERIFY((sav->flags & (SADB_X_EXT_OLD | SADB_X_EXT_DERIV |
997 	    SADB_X_EXT_NATT | SADB_X_EXT_NATT_MULTIPLEUSERS |
998 	    SADB_X_EXT_CYCSEQ | SADB_X_EXT_PMASK)) == 0);
999 
1000 	kern_buflet_t __single sbuf = __packet_get_next_buflet(sph, NULL);
1001 	VERIFY(sbuf != NULL);
1002 	slen = __buflet_get_data_length(sbuf);
1003 	sbaddr = ipsec_kern_buflet_to_buffer(sbuf);
1004 	slim = __buflet_get_data_limit(sbuf);
1005 	slim -= __buflet_get_data_offset(sbuf);
1006 
1007 	kern_buflet_t __single dbuf = __packet_get_next_buflet(dph, NULL);
1008 	VERIFY(dbuf != NULL);
1009 	dlen = __buflet_get_data_length(dbuf);
1010 	dbaddr = ipsec_kern_buflet_to_buffer(dbuf);
1011 	dlim = __buflet_get_data_limit(dbuf);
1012 	dlim -= __buflet_get_data_offset(dbuf);
1013 
1014 	struct ip *ip_hdr = (struct ip *)(void *)sbaddr;
1015 	ASSERT(IP_HDR_ALIGNED_P(ip_hdr));
1016 
1017 	u_int ip_vers = IP_VHL_V(ip_hdr->ip_vhl);
1018 	switch (ip_vers) {
1019 	case IPVERSION: {
1020 #ifdef _IP_VHL
1021 		iphlen = IP_VHL_HL(ip_hdr->ip_vhl) << 2;
1022 #else /* _IP_VHL */
1023 		iphlen = ip_hdr->ip_hl << 2;
1024 #endif /* _IP_VHL */
1025 		dscp = ip_hdr->ip_tos >> IPTOS_DSCP_SHIFT;
1026 		nxt_proto = ip_hdr->ip_p;
1027 		stat = &ipsecstat;
1028 		af = AF_INET;
1029 		break;
1030 	}
1031 	case 6: {
1032 		struct ip6_hdr *ip6 = (struct ip6_hdr *)sbaddr;
1033 		iphlen = sizeof(struct ip6_hdr);
1034 		dscp = (ntohl(ip6->ip6_flow) & IP6FLOW_DSCP_MASK) >> IP6FLOW_DSCP_SHIFT;
1035 		nxt_proto = ip6->ip6_nxt;
1036 		stat = &ipsec6stat;
1037 		af = AF_INET6;
1038 		break;
1039 	}
1040 	default:
1041 		panic("esp kpipe output, ipversion %u, SPI=%x",
1042 		    ip_vers, ntohl(sav->spi));
1043 		/* NOTREACHED */
1044 		__builtin_unreachable();
1045 	}
1046 
1047 	if (__improbable(slen <= iphlen)) {
1048 		esp_log_info("esp kpipe output, slen(%u) <= iphlen(%zu) "
1049 		    "SPI=%x\n", slen, iphlen, ntohl(sav->spi));
1050 		IPSEC_STAT_INCREMENT(stat->out_inval);
1051 		KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 1, EINVAL, 0, 0, 0);
1052 		return EINVAL;
1053 	}
1054 
1055 	if ((sav->flags2 & SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) ==
1056 	    SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) {
1057 		traffic_class = rfc4594_dscp_to_tc(dscp);
1058 	}
1059 	if (__improbable(sav->replay[traffic_class] == NULL)) {
1060 		esp_log_info("esp kpipe output, missing "
1061 		    "replay window, SPI=%x\n", ntohl(sav->spi));
1062 		IPSEC_STAT_INCREMENT(stat->out_inval);
1063 		KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 2, EINVAL, 0, 0, 0);
1064 		return EINVAL;
1065 	}
1066 
1067 	e_algo = esp_algorithm_lookup(sav->alg_enc);
1068 	if (__improbable(e_algo == NULL)) {
1069 		esp_log_info("esp kpipe output: unsupported algorithm, SPI=%x\n",
1070 		    ntohl(sav->spi));
1071 		IPSEC_STAT_INCREMENT(stat->out_inval);
1072 		KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 3, EINVAL, 0, 0, 0);
1073 		return EINVAL;
1074 	}
1075 
1076 	if ((sav->flags & SADB_X_EXT_IIV) == 0) {
1077 		ivlen = sav->ivlen;
1078 		if (__improbable(ivlen < 0)) {
1079 			panic("invalid ivlen(%d) SPI=%x", ivlen, ntohl(sav->spi));
1080 			/* NOTREACHED */
1081 			__builtin_unreachable();
1082 		}
1083 
1084 		iv = dbaddr + iphlen + sizeof(struct newesp);
1085 	}
1086 
1087 	esphlen = sizeof(struct newesp) + ivlen;
1088 	if (e_algo->padbound) {
1089 		padbound = e_algo->padbound;
1090 		/*ESP packet, including nxthdr field, must be length of 4n */
1091 		if (padbound < 4) {
1092 			padbound = 4;
1093 		}
1094 	} else {
1095 		padbound = 4;
1096 	}
1097 	plen = slen - iphlen;
1098 	extendsiz = padbound - (plen % padbound);
1099 	if (extendsiz == 1) {
1100 		extendsiz = padbound + 1;
1101 	}
1102 	VERIFY(extendsiz <= UINT8_MAX);
1103 	if (e_algo->finalizeencrypt) {
1104 		auth_size = e_algo->icvlen;
1105 	} else {
1106 		a_algo = ah_algorithm_lookup(sav->alg_auth);
1107 		if (a_algo != NULL) {
1108 			auth_size = ((a_algo->sumsiz)(sav) + 3) & ~(4 - 1);
1109 			if (__improbable(auth_size > AH_MAXSUMSIZE)) {
1110 				panic("auth size %zu greater than AH_MAXSUMSIZE",
1111 				    auth_size);
1112 				/* NOTREACHED */
1113 				__builtin_unreachable();
1114 			}
1115 		}
1116 	}
1117 
1118 	/*
1119 	 * Validate destination buffer has sufficient space -
1120 	 * {IP header + ESP header + Payload + Padding + ESP trailer + ESP Auth}
1121 	 */
1122 	size_t total_len = iphlen + esphlen + plen + extendsiz + auth_size;
1123 	if (__improbable(total_len > dlim)) {
1124 		esp_log_info("esp kpipe output: destination buffer too short");
1125 		IPSEC_STAT_INCREMENT(stat->out_nomem);
1126 		KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 4, EMSGSIZE, 0, 0, 0);
1127 		return EMSGSIZE;
1128 	}
1129 
1130 	/*
1131 	 * Validate source buffer has sufficient space to including padding and
1132 	 * ESP trailer. This is done so that source buffer can be passed as
1133 	 * input to encrypt cipher.
1134 	 */
1135 	if (__improbable((slen + extendsiz) > slim)) {
1136 		esp_log_info("esp kpipe output: source buffer too short");
1137 		IPSEC_STAT_INCREMENT(stat->out_nomem);
1138 		KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 5, EMSGSIZE, 0, 0, 0);
1139 		return EMSGSIZE;
1140 	}
1141 
1142 	/*
1143 	 * Increment IP payload length to include ESP header length +
1144 	 * Padding + ESP trailer + ESP Auth
1145 	 */
1146 	add_ip_len = esphlen + extendsiz + auth_size;
1147 	switch (af) {
1148 	case AF_INET: {
1149 		struct ip *ip = (struct ip *)(void *)dbaddr;
1150 		ASSERT(IP_HDR_ALIGNED_P(ip));
1151 		if (__probable(ntohs(ip->ip_len) + add_ip_len <= IP_MAXPACKET)) {
1152 			ip->ip_len = htons(ntohs(ip->ip_len) + (u_short)add_ip_len);
1153 			ip->ip_p = IPPROTO_ESP;
1154 			ip->ip_sum = 0; // Recalculate checksum
1155 			ip->ip_sum = in_cksum_hdr_opt(ip);
1156 		} else {
1157 			esp_log_info("esp kpipe output: ipv4 packet "
1158 			    "size exceeded, ip payload len %u, SPI=%x\n",
1159 			    ntohs(ip->ip_len), ntohl(sav->spi));
1160 			IPSEC_STAT_INCREMENT(stat->out_nomem);
1161 			KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 6, EMSGSIZE, 0, 0, 0);
1162 			return EMSGSIZE;
1163 		}
1164 		break;
1165 	}
1166 	case AF_INET6: {
1167 		struct ip6_hdr *ip6 = (struct ip6_hdr *)dbaddr;
1168 		if (__probable(ntohs(ip6->ip6_plen) + add_ip_len <= IP_MAXPACKET)) {
1169 			ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) + (u_short)add_ip_len);
1170 			ip6->ip6_nxt = IPPROTO_ESP;
1171 		} else {
1172 			esp_log_info("esp kpipe output: ipv6 packet "
1173 			    "size exceeded, ip payload len %u, SPI=%x\n",
1174 			    ntohs(ip6->ip6_plen), ntohl(sav->spi));
1175 			IPSEC_STAT_INCREMENT(stat->out_nomem);
1176 			KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 7, EMSGSIZE, 0, 0, 0);
1177 			return EMSGSIZE;
1178 		}
1179 		break;
1180 	}
1181 	}
1182 
1183 	if (__improbable(sav->replay[traffic_class]->seq >=
1184 	    sav->replay[traffic_class]->lastseq)) {
1185 		esp_log_info("replay counter overflowed, SPI=%x\n", ntohl(sav->spi));
1186 		IPSEC_STAT_INCREMENT(stat->out_inval);
1187 		KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 8, EINVAL, 0, 0, 0);
1188 		return EINVAL;
1189 	}
1190 
1191 	os_atomic_inc(&sav->replay[traffic_class]->count, relaxed);
1192 
1193 	esp = (struct newesp *)(void *)(dbaddr + iphlen);
1194 	ASSERT(IS_P2ALIGNED(esp, sizeof(uint32_t)));
1195 	esp->esp_spi = sav->spi;
1196 	esp->esp_seq = htonl(os_atomic_inc(&sav->replay[traffic_class]->seq, relaxed));
1197 
1198 	esptail = (struct esptail *)(sbaddr + slen + extendsiz - sizeof(struct esptail));
1199 	esptail->esp_nxt = nxt_proto;
1200 	esptail->esp_padlen = (u_int8_t)(extendsiz - 2);
1201 
1202 	/*
1203 	 * pre-compute and cache intermediate key
1204 	 */
1205 	err = esp_schedule(e_algo, sav);
1206 	if (__improbable(err != 0)) {
1207 		esp_log_info("esp schedule failed %d, SPI=%x\n", err, ntohl(sav->spi));
1208 		IPSEC_STAT_INCREMENT(stat->out_inval);
1209 		KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 9, err, 0, 0, 0);
1210 		return err;
1211 	}
1212 
1213 	if (__improbable(!e_algo->encrypt_pkt)) {
1214 		panic("esp kpipe output: missing algo encrypt pkt");
1215 		/* NOTREACHED */
1216 		__builtin_unreachable();
1217 	}
1218 
1219 	KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_START, 0, 0, 0, 0, 0);
1220 	src_payload = sbaddr + iphlen;
1221 	dst_payload = dbaddr + iphlen + esphlen;
1222 	if (__improbable((err = (*e_algo->encrypt_pkt)(sav, src_payload, plen + extendsiz,
1223 	    esp, iv, ivlen, dst_payload, plen + extendsiz)) != 0)) {
1224 		esp_log_info("esp encrypt failed %d, SPI=%x\n", err, ntohl(sav->spi));
1225 		IPSEC_STAT_INCREMENT(stat->out_inval);
1226 		KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 1, err, 0, 0, 0);
1227 		KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 10, err, 0, 0, 0);
1228 		return err;
1229 	}
1230 	KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 2, 0, 0, 0, 0);
1231 
1232 	auth_buf = dst_payload + plen + extendsiz;
1233 	if (e_algo->finalizeencrypt) {
1234 		if (__improbable((err = (*e_algo->finalizeencrypt)(sav, auth_buf,
1235 		    auth_size)) != 0)) {
1236 			esp_log_info("esp finalize encrypt failed %d, SPI=%x\n",
1237 			    err, ntohl(sav->spi));
1238 			IPSEC_STAT_INCREMENT(stat->out_inval);
1239 			KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 11, err, 0, 0, 0);
1240 			return err;
1241 		}
1242 	} else if (sav->key_auth != NULL && auth_size > 0) {
1243 		if (__improbable((err = esp_auth_data(sav, (uint8_t *)esp,
1244 		    esphlen + plen + extendsiz, auth_buf, auth_size)) != 0)) {
1245 			esp_log_info("esp auth data failed %d, SPI=%x\n",
1246 			    err, ntohl(sav->spi));
1247 			IPSEC_STAT_INCREMENT(stat->out_inval);
1248 			KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 12, err, 0, 0, 0);
1249 			return err;
1250 		}
1251 	}
1252 
1253 	__buflet_set_data_length(dbuf, (uint16_t)total_len);
1254 
1255 	IPSEC_STAT_INCREMENT(stat->out_success);
1256 	IPSEC_STAT_INCREMENT(stat->out_esphist[sav->alg_enc]);
1257 	key_sa_recordxfer(sav, total_len);
1258 	KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 13, 0, 0, 0, 0);
1259 	return 0;
1260 }
1261