xref: /xnu-11215/bsd/netinet6/esp_input.c (revision 8d741a5d)
1 /*
2  * Copyright (c) 2008-2016, 2022-2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*	$FreeBSD: src/sys/netinet6/esp_input.c,v 1.1.2.3 2001/07/03 11:01:50 ume Exp $	*/
30 /*	$KAME: esp_input.c,v 1.55 2001/03/23 08:08:47 itojun Exp $	*/
31 
32 /*
33  * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  * 3. Neither the name of the project nor the names of its contributors
45  *    may be used to endorse or promote products derived from this software
46  *    without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  */
60 
61 #define _IP_VHL
62 
63 /*
64  * RFC1827/2406 Encapsulated Security Payload.
65  */
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/malloc.h>
70 #include <sys/mbuf.h>
71 #include <sys/mcache.h>
72 #include <sys/domain.h>
73 #include <sys/protosw.h>
74 #include <sys/socket.h>
75 #include <sys/errno.h>
76 #include <sys/time.h>
77 #include <sys/kernel.h>
78 #include <sys/syslog.h>
79 
80 #include <net/if.h>
81 #include <net/if_ipsec.h>
82 #include <net/multi_layer_pkt_log.h>
83 #include <net/route.h>
84 #include <net/if_ports_used.h>
85 #include <kern/cpu_number.h>
86 #include <kern/locks.h>
87 
88 #include <netinet/in.h>
89 #include <netinet/in_systm.h>
90 #include <netinet/ip.h>
91 #include <netinet/ip_var.h>
92 #include <netinet/in_var.h>
93 #include <netinet/ip_ecn.h>
94 #include <netinet/in_pcb.h>
95 #include <netinet/udp.h>
96 #include <netinet/tcp.h>
97 #include <netinet/in_tclass.h>
98 #include <netinet6/ip6_ecn.h>
99 
100 #include <netinet/ip6.h>
101 #include <netinet6/in6_pcb.h>
102 #include <netinet6/ip6_var.h>
103 #include <netinet/icmp6.h>
104 #include <netinet6/ip6protosw.h>
105 
106 #include <netinet6/ipsec.h>
107 #include <netinet6/ipsec6.h>
108 #include <netinet6/ah.h>
109 #include <netinet6/ah6.h>
110 #include <netinet6/esp.h>
111 #include <netinet6/esp6.h>
112 #include <netkey/key.h>
113 #include <netkey/keydb.h>
114 #include <netkey/key_debug.h>
115 
116 #include <net/kpi_protocol.h>
117 #include <netinet/kpi_ipfilter_var.h>
118 
119 #include <net/net_osdep.h>
120 #include <mach/sdt.h>
121 #include <corecrypto/cc.h>
122 
123 #if SKYWALK
124 #include <skywalk/os_skywalk_private.h>
125 #endif // SKYWALK
126 
127 #include <sys/kdebug.h>
128 #define DBG_LAYER_BEG           NETDBG_CODE(DBG_NETIPSEC, 1)
129 #define DBG_LAYER_END           NETDBG_CODE(DBG_NETIPSEC, 3)
130 #define DBG_FNC_ESPIN           NETDBG_CODE(DBG_NETIPSEC, (6 << 8))
131 #define DBG_FNC_DECRYPT         NETDBG_CODE(DBG_NETIPSEC, (7 << 8))
132 #define IPLEN_FLIPPED
133 
134 #define ESPMAXLEN \
135 	(sizeof(struct esp) < sizeof(struct newesp) \
136 	        ? sizeof(struct newesp) : sizeof(struct esp))
137 
138 static struct ip *
esp4_input_strip_udp_encap(struct mbuf * m,int iphlen)139 esp4_input_strip_udp_encap(struct mbuf *m, int iphlen)
140 {
141 	// strip the udp header that's encapsulating ESP
142 	struct ip *ip;
143 	u_int8_t stripsiz = (u_int8_t)sizeof(struct udphdr);
144 
145 	ip = mtod(m, __typeof__(ip));
146 	ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), iphlen);
147 	m->m_data += stripsiz;
148 	m->m_len -= stripsiz;
149 	m->m_pkthdr.len -= stripsiz;
150 	ip = mtod(m, __typeof__(ip));
151 	ip->ip_len = ip->ip_len - stripsiz;
152 	ip->ip_p = IPPROTO_ESP;
153 	return ip;
154 }
155 
156 static struct ip6_hdr *
esp6_input_strip_udp_encap(struct mbuf * m,int ip6hlen)157 esp6_input_strip_udp_encap(struct mbuf *m, int ip6hlen)
158 {
159 	// strip the udp header that's encapsulating ESP
160 	struct ip6_hdr *ip6;
161 	u_int8_t stripsiz = (u_int8_t)sizeof(struct udphdr);
162 
163 	ip6 = mtod(m, __typeof__(ip6));
164 	ovbcopy((caddr_t)ip6, (caddr_t)(((u_char *)ip6) + stripsiz), ip6hlen);
165 	m->m_data += stripsiz;
166 	m->m_len -= stripsiz;
167 	m->m_pkthdr.len -= stripsiz;
168 	ip6 = mtod(m, __typeof__(ip6));
169 	ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
170 	ip6->ip6_nxt = IPPROTO_ESP;
171 	return ip6;
172 }
173 
174 static void
esp_input_log(struct mbuf * m,struct secasvar * sav,u_int32_t spi,u_int32_t seq)175 esp_input_log(struct mbuf *m, struct secasvar *sav, u_int32_t spi, u_int32_t seq)
176 {
177 	if (net_mpklog_enabled &&
178 	    (sav->sah->ipsec_if->if_xflags & IFXF_MPK_LOG) == IFXF_MPK_LOG) {
179 		struct tcphdr th = {};
180 		u_int32_t proto_len = 0;
181 		u_int8_t iphlen = 0;
182 		u_int8_t proto = 0;
183 
184 		struct ip *inner_ip = mtod(m, struct ip *);
185 		if (IP_VHL_V(inner_ip->ip_vhl) == 4) {
186 			iphlen = (u_int8_t)(IP_VHL_HL(inner_ip->ip_vhl) << 2);
187 			proto = inner_ip->ip_p;
188 		} else if (IP_VHL_V(inner_ip->ip_vhl) == 6) {
189 			struct ip6_hdr *inner_ip6 = mtod(m, struct ip6_hdr *);
190 			iphlen = sizeof(struct ip6_hdr);
191 			proto = inner_ip6->ip6_nxt;
192 		}
193 
194 		if (proto == IPPROTO_TCP) {
195 			if ((int)(iphlen + sizeof(th)) <= m->m_pkthdr.len) {
196 				m_copydata(m, iphlen, sizeof(th), (u_int8_t *)&th);
197 			}
198 
199 			proto_len = m->m_pkthdr.len - iphlen - (th.th_off << 2);
200 			MPKL_ESP_INPUT_TCP(esp_mpkl_log_object,
201 			    ntohl(spi), seq,
202 			    ntohs(th.th_sport), ntohs(th.th_dport),
203 			    ntohl(th.th_seq), proto_len);
204 		}
205 	}
206 }
207 
208 void
esp4_input(struct mbuf * m,int off)209 esp4_input(struct mbuf *m, int off)
210 {
211 	(void)esp4_input_extended(m, off, NULL);
212 }
213 
214 struct mbuf *
esp4_input_extended(struct mbuf * m,int off,ifnet_t interface)215 esp4_input_extended(struct mbuf *m, int off, ifnet_t interface)
216 {
217 	union sockaddr_in_4_6 src = {};
218 	union sockaddr_in_4_6 dst = {};
219 	struct ip *ip;
220 	struct ip6_hdr *ip6;
221 	struct esp *esp;
222 	struct esptail esptail;
223 	u_int32_t spi;
224 	u_int32_t seq;
225 	u_int32_t replay_index = 0;
226 	struct secasvar *sav = NULL;
227 	size_t taillen;
228 	u_int16_t nxt;
229 	const struct esp_algorithm *algo;
230 	int ivlen;
231 	size_t esplen;
232 	u_int8_t hlen;
233 	sa_family_t     ifamily;
234 	struct mbuf *out_m = NULL;
235 
236 	KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_START, 0, 0, 0, 0, 0);
237 	/* sanity check for alignment. */
238 	if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
239 		ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem "
240 		    "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
241 		IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
242 		goto bad;
243 	}
244 
245 	if (m->m_len < off + ESPMAXLEN) {
246 		m = m_pullup(m, off + ESPMAXLEN);
247 		if (!m) {
248 			ipseclog((LOG_DEBUG,
249 			    "IPv4 ESP input: can't pullup in esp4_input\n"));
250 			IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
251 			goto bad;
252 		}
253 	}
254 
255 	m->m_pkthdr.csum_flags &= ~CSUM_RX_FLAGS;
256 
257 	/* Expect 32-bit aligned data pointer on strict-align platforms */
258 	MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
259 
260 	ip = mtod(m, struct ip *);
261 	// expect udp-encap and esp packets only
262 	if (ip->ip_p != IPPROTO_ESP &&
263 	    !(ip->ip_p == IPPROTO_UDP && off >= sizeof(struct udphdr))) {
264 		ipseclog((LOG_DEBUG,
265 		    "IPv4 ESP input: invalid protocol type\n"));
266 		IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
267 		goto bad;
268 	}
269 	esp = (struct esp *)(void *)(((u_int8_t *)ip) + off);
270 #ifdef _IP_VHL
271 	hlen = (u_int8_t)(IP_VHL_HL(ip->ip_vhl) << 2);
272 #else
273 	hlen = ip->ip_hl << 2;
274 #endif
275 
276 	/* find the sassoc. */
277 	spi = esp->esp_spi;
278 
279 	ipsec_fill_ip_sockaddr_4_6(&src, ip->ip_src, 0);
280 	ipsec_fill_ip_sockaddr_4_6(&dst, ip->ip_dst, 0);
281 
282 	if ((sav = key_allocsa(&src, &dst, IPPROTO_ESP, spi, interface)) == 0) {
283 		ipseclog((LOG_WARNING,
284 		    "IPv4 ESP input: no key association found for spi %u (0x%08x)\n",
285 		    (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi)));
286 		IPSEC_STAT_INCREMENT(ipsecstat.in_nosa);
287 		goto bad;
288 	}
289 	KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
290 	    printf("DP esp4_input called to allocate SA:0x%llx\n",
291 	    (uint64_t)VM_KERNEL_ADDRPERM(sav)));
292 	if (sav->state != SADB_SASTATE_MATURE
293 	    && sav->state != SADB_SASTATE_DYING) {
294 		ipseclog((LOG_DEBUG,
295 		    "IPv4 ESP input: non-mature/dying SA found for spi %u (0x%08x)\n",
296 		    (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi)));
297 		IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
298 		goto bad;
299 	}
300 	algo = esp_algorithm_lookup(sav->alg_enc);
301 	if (!algo) {
302 		ipseclog((LOG_DEBUG, "IPv4 ESP input: "
303 		    "unsupported encryption algorithm for spi %u (0x%08x)\n",
304 		    (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi)));
305 		IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
306 		goto bad;
307 	}
308 
309 	/* check if we have proper ivlen information */
310 	ivlen = sav->ivlen;
311 	if (ivlen < 0) {
312 		ipseclog((LOG_ERR, "inproper ivlen in IPv4 ESP input: %s %s\n",
313 		    ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
314 		IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
315 		goto bad;
316 	}
317 
318 	seq = ntohl(((struct newesp *)esp)->esp_seq);
319 
320 	if ((sav->flags2 & SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) ==
321 	    SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) {
322 		replay_index = seq >> PER_TC_REPLAY_WINDOW_SN_SHIFT;
323 	}
324 
325 	if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[replay_index] != NULL &&
326 	    ((sav->alg_auth && sav->key_auth) || algo->finalizedecrypt))) {
327 		goto noreplaycheck;
328 	}
329 
330 	if ((sav->alg_auth == SADB_X_AALG_NULL || sav->alg_auth == SADB_AALG_NONE) &&
331 	    !algo->finalizedecrypt) {
332 		goto noreplaycheck;
333 	}
334 
335 	/*
336 	 * check for sequence number.
337 	 */
338 	_CASSERT(MBUF_TC_MAX <= UINT8_MAX);
339 	if (ipsec_chkreplay(seq, sav, (u_int8_t)replay_index)) {
340 		; /*okey*/
341 	} else {
342 		IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
343 		ipseclog((LOG_WARNING,
344 		    "replay packet in IPv4 ESP input: seq(%u) idx(%u) %s %s\n",
345 		    seq, (u_int8_t)replay_index, ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
346 		goto bad;
347 	}
348 
349 	/* Save ICV from packet for verification later */
350 	size_t siz = 0;
351 	unsigned char saved_icv[AH_MAXSUMSIZE] __attribute__((aligned(4)));
352 	if (algo->finalizedecrypt) {
353 		siz = algo->icvlen;
354 		VERIFY(siz <= USHRT_MAX);
355 		if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
356 			ipseclog((LOG_DEBUG,
357 			    "invalid ESP packet length %u, missing ICV\n",
358 			    m->m_pkthdr.len));
359 			IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
360 			goto bad;
361 		}
362 		m_copydata(m, m->m_pkthdr.len - (u_short)siz, (u_short)siz, (caddr_t) saved_icv);
363 	} else {
364 		/* check ICV immediately */
365 		u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4)));
366 		u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4)));
367 		const struct ah_algorithm *sumalgo;
368 
369 		sumalgo = ah_algorithm_lookup(sav->alg_auth);
370 		if (!sumalgo) {
371 			goto noreplaycheck;
372 		}
373 		siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
374 		if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
375 			ipseclog((LOG_DEBUG,
376 			    "invalid ESP packet length %u, missing ICV\n",
377 			    m->m_pkthdr.len));
378 			IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
379 			goto bad;
380 		}
381 		if (AH_MAXSUMSIZE < siz) {
382 			ipseclog((LOG_DEBUG,
383 			    "internal error: AH_MAXSUMSIZE must be larger than %u\n",
384 			    (u_int32_t)siz));
385 			IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
386 			goto bad;
387 		}
388 
389 		m_copydata(m, m->m_pkthdr.len - (int)siz, (int)siz, (caddr_t) &sum0[0]);
390 
391 		if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
392 			ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
393 			    ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
394 			IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
395 			goto bad;
396 		}
397 
398 		if (cc_cmp_safe(siz, sum0, sum)) {
399 			ipseclog((LOG_WARNING, "cc_cmp fail in IPv4 ESP input: %s %s\n",
400 			    ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
401 			IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
402 			goto bad;
403 		}
404 
405 		m->m_flags |= M_AUTHIPDGM;
406 		IPSEC_STAT_INCREMENT(ipsecstat.in_espauthsucc);
407 
408 		/*
409 		 * update replay window.
410 		 */
411 		if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[replay_index] != NULL) {
412 			if (ipsec_updatereplay(seq, sav, (u_int8_t)replay_index)) {
413 				IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
414 				goto bad;
415 			}
416 		}
417 	}
418 
419 
420 	/* strip off the authentication data */
421 	m_adj(m, (int)-siz);
422 	ip = mtod(m, struct ip *);
423 #ifdef IPLEN_FLIPPED
424 	ip->ip_len = ip->ip_len - (u_short)siz;
425 #else
426 	ip->ip_len = htons(ntohs(ip->ip_len) - siz);
427 #endif
428 
429 noreplaycheck:
430 
431 	/* process main esp header. */
432 	if (sav->flags & SADB_X_EXT_OLD) {
433 		/* RFC 1827 */
434 		esplen = sizeof(struct esp);
435 	} else {
436 		/* RFC 2406 */
437 		if (sav->flags & SADB_X_EXT_DERIV) {
438 			esplen = sizeof(struct esp);
439 		} else {
440 			esplen = sizeof(struct newesp);
441 		}
442 	}
443 
444 	if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
445 		ipseclog((LOG_WARNING,
446 		    "IPv4 ESP input: packet too short\n"));
447 		IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
448 		goto bad;
449 	}
450 
451 	if (m->m_len < off + esplen + ivlen) {
452 		m = m_pullup(m, (int)(off + esplen + ivlen));
453 		if (!m) {
454 			ipseclog((LOG_DEBUG,
455 			    "IPv4 ESP input: can't pullup in esp4_input\n"));
456 			IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
457 			goto bad;
458 		}
459 		ip = mtod(m, struct ip *);
460 	}
461 
462 	/*
463 	 * pre-compute and cache intermediate key
464 	 */
465 	if (esp_schedule(algo, sav) != 0) {
466 		IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
467 		goto bad;
468 	}
469 
470 	/*
471 	 * decrypt the packet.
472 	 */
473 	if (!algo->decrypt) {
474 		panic("internal error: no decrypt function");
475 	}
476 	KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_START, 0, 0, 0, 0, 0);
477 	if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
478 		/* m is already freed */
479 		m = NULL;
480 		ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n",
481 		    ipsec_logsastr(sav)));
482 		IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
483 		KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1, 0, 0, 0, 0);
484 		goto bad;
485 	}
486 	KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 2, 0, 0, 0, 0);
487 	IPSEC_STAT_INCREMENT(ipsecstat.in_esphist[sav->alg_enc]);
488 
489 	m->m_flags |= M_DECRYPTED;
490 
491 	if (algo->finalizedecrypt) {
492 		if ((*algo->finalizedecrypt)(sav, saved_icv, algo->icvlen)) {
493 			ipseclog((LOG_ERR, "esp4 packet decryption ICV failure: %s\n",
494 			    ipsec_logsastr(sav)));
495 			IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
496 			KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1, 0, 0, 0, 0);
497 			goto bad;
498 		} else {
499 			m->m_flags |= M_AUTHIPDGM;
500 			IPSEC_STAT_INCREMENT(ipsecstat.in_espauthsucc);
501 
502 			/*
503 			 * update replay window.
504 			 */
505 			if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[replay_index] != NULL) {
506 				if (ipsec_updatereplay(seq, sav, (u_int8_t)replay_index)) {
507 					IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
508 					goto bad;
509 				}
510 			}
511 		}
512 	}
513 
514 	/*
515 	 * find the trailer of the ESP.
516 	 */
517 	m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
518 	    (caddr_t)&esptail);
519 	nxt = esptail.esp_nxt;
520 	taillen = esptail.esp_padlen + sizeof(esptail);
521 
522 	if (m->m_pkthdr.len < taillen
523 	    || m->m_pkthdr.len - taillen < hlen) { /*?*/
524 		ipseclog((LOG_WARNING,
525 		    "bad pad length in IPv4 ESP input: %s %s\n",
526 		    ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
527 		IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
528 		goto bad;
529 	}
530 
531 	/* strip off the trailing pad area. */
532 	m_adj(m, (int)-taillen);
533 	ip = mtod(m, struct ip *);
534 #ifdef IPLEN_FLIPPED
535 	ip->ip_len = ip->ip_len - (u_short)taillen;
536 #else
537 	ip->ip_len = htons(ntohs(ip->ip_len) - taillen);
538 #endif
539 	if (ip->ip_p == IPPROTO_UDP) {
540 		// offset includes the outer ip and udp header lengths.
541 		if (m->m_len < off) {
542 			m = m_pullup(m, off);
543 			if (!m) {
544 				ipseclog((LOG_DEBUG,
545 				    "IPv4 ESP input: invalid udp encapsulated ESP packet length \n"));
546 				IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
547 				goto bad;
548 			}
549 			ip = mtod(m, struct ip *);
550 		}
551 
552 		// check the UDP encap header to detect changes in the source port, and then strip the header
553 		off -= sizeof(struct udphdr); // off no longer includes the udphdr's size
554 		// if peer is behind nat and this is the latest esp packet
555 		if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 &&
556 		    (sav->flags & SADB_X_EXT_OLD) == 0 &&
557 		    seq && sav->replay[replay_index] &&
558 		    seq >= sav->replay[replay_index]->lastseq) {
559 			struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip + off);
560 			if (encap_uh->uh_sport &&
561 			    ntohs(encap_uh->uh_sport) != sav->remote_ike_port) {
562 				sav->remote_ike_port = ntohs(encap_uh->uh_sport);
563 			}
564 		}
565 		ip = esp4_input_strip_udp_encap(m, off);
566 		esp = (struct esp *)(void *)(((u_int8_t *)ip) + off);
567 	}
568 
569 	/* was it transmitted over the IPsec tunnel SA? */
570 	if (ipsec4_tunnel_validate(m, (int)(off + esplen + ivlen), nxt, sav, &ifamily)) {
571 		ifaddr_t ifa;
572 		struct sockaddr_storage addr;
573 
574 		/*
575 		 * strip off all the headers that precedes ESP header.
576 		 *	IP4 xx ESP IP4' payload -> IP4' payload
577 		 *
578 		 * XXX more sanity checks
579 		 * XXX relationship with gif?
580 		 */
581 		u_int8_t tos, otos;
582 		u_int8_t inner_ip_proto = 0;
583 		int sum;
584 
585 		tos = ip->ip_tos;
586 		m_adj(m, (int)(off + esplen + ivlen));
587 		if (ifamily == AF_INET) {
588 			struct sockaddr_in *ipaddr;
589 
590 			if (m->m_len < sizeof(*ip)) {
591 				m = m_pullup(m, sizeof(*ip));
592 				if (!m) {
593 					IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
594 					goto bad;
595 				}
596 			}
597 			ip = mtod(m, struct ip *);
598 			/* ECN consideration. */
599 
600 			otos = ip->ip_tos;
601 			if (ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos) == 0) {
602 				IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
603 				goto bad;
604 			}
605 
606 			if (otos != ip->ip_tos) {
607 				sum = ~ntohs(ip->ip_sum) & 0xffff;
608 				sum += (~otos & 0xffff) + ip->ip_tos;
609 				sum = (sum >> 16) + (sum & 0xffff);
610 				sum += (sum >> 16); /* add carry */
611 				ip->ip_sum = htons(~sum & 0xffff);
612 			}
613 
614 			if (!key_checktunnelsanity(sav, AF_INET,
615 			    (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
616 				ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
617 				    "in ESP input: %s %s\n",
618 				    ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
619 				IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
620 				goto bad;
621 			}
622 
623 			inner_ip_proto = ip->ip_p;
624 
625 			bzero(&addr, sizeof(addr));
626 			ipaddr = (__typeof__(ipaddr)) & addr;
627 			ipaddr->sin_family = AF_INET;
628 			ipaddr->sin_len = sizeof(*ipaddr);
629 			ipaddr->sin_addr = ip->ip_dst;
630 		} else if (ifamily == AF_INET6) {
631 			struct sockaddr_in6 *ip6addr;
632 
633 			/*
634 			 * m_pullup is prohibited in KAME IPv6 input processing
635 			 * but there's no other way!
636 			 */
637 			if (m->m_len < sizeof(*ip6)) {
638 				m = m_pullup(m, sizeof(*ip6));
639 				if (!m) {
640 					IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
641 					goto bad;
642 				}
643 			}
644 
645 			/*
646 			 * Expect 32-bit aligned data pointer on strict-align
647 			 * platforms.
648 			 */
649 			MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
650 
651 			ip6 = mtod(m, struct ip6_hdr *);
652 
653 			/* ECN consideration. */
654 			if (ip64_ecn_egress(ip4_ipsec_ecn, &tos, &ip6->ip6_flow) == 0) {
655 				IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
656 				goto bad;
657 			}
658 
659 			if (!key_checktunnelsanity(sav, AF_INET6,
660 			    (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
661 				ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
662 				    "in ESP input: %s %s\n",
663 				    ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
664 				IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
665 				goto bad;
666 			}
667 
668 			inner_ip_proto = ip6->ip6_nxt;
669 
670 			bzero(&addr, sizeof(addr));
671 			ip6addr = (__typeof__(ip6addr)) & addr;
672 			ip6addr->sin6_family = AF_INET6;
673 			ip6addr->sin6_len = sizeof(*ip6addr);
674 			ip6addr->sin6_addr = ip6->ip6_dst;
675 		} else {
676 			ipseclog((LOG_ERR, "ipsec tunnel unsupported address family "
677 			    "in ESP input\n"));
678 			goto bad;
679 		}
680 
681 		key_sa_recordxfer(sav, m->m_pkthdr.len);
682 		if (ipsec_incr_history_count(m, IPPROTO_ESP, spi) != 0 ||
683 		    ipsec_incr_history_count(m, IPPROTO_IPV4, 0) != 0) {
684 			IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
685 			goto bad;
686 		}
687 
688 		// update the receiving interface address based on the inner address
689 		ifa = ifa_ifwithaddr((struct sockaddr *)&addr);
690 		if (ifa) {
691 			m->m_pkthdr.rcvif = ifa->ifa_ifp;
692 			ifa_remref(ifa);
693 		}
694 
695 		/* Clear the csum flags, they can't be valid for the inner headers */
696 		m->m_pkthdr.csum_flags = 0;
697 
698 		// Input via IPsec interface
699 		lck_mtx_lock(sadb_mutex);
700 		ifnet_t ipsec_if = sav->sah->ipsec_if;
701 		if (ipsec_if != NULL) {
702 			// If an interface is found, add a reference count before dropping the lock
703 			ifnet_reference(ipsec_if);
704 		}
705 		lck_mtx_unlock(sadb_mutex);
706 
707 		if ((m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) == PKTF_WAKE_PKT) {
708 			if (m->m_pkthdr.rcvif != NULL) {
709 				if_ports_used_match_mbuf(m->m_pkthdr.rcvif, ifamily, m);
710 			} else {
711 				ipseclog((LOG_ERR, "no input interface for ipsec wake packet\n"));
712 			}
713 		}
714 
715 		if (ipsec_if != NULL) {
716 			esp_input_log(m, sav, spi, seq);
717 
718 			// Return mbuf
719 			if (interface != NULL &&
720 			    interface == ipsec_if) {
721 				out_m = m;
722 				ifnet_release(ipsec_if);
723 				goto done;
724 			}
725 
726 			errno_t inject_error = ipsec_inject_inbound_packet(ipsec_if, m);
727 			ifnet_release(ipsec_if);
728 
729 			if (inject_error == 0) {
730 				m = NULL;
731 				goto done;
732 			} else {
733 				goto bad;
734 			}
735 		}
736 
737 		if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0) {
738 			goto bad;
739 		}
740 
741 		nxt = IPPROTO_DONE;
742 		KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 2, 0, 0, 0, 0);
743 	} else {
744 		/*
745 		 * strip off ESP header and IV.
746 		 * even in m_pulldown case, we need to strip off ESP so that
747 		 * we can always compute checksum for AH correctly.
748 		 */
749 		size_t stripsiz;
750 
751 		stripsiz = esplen + ivlen;
752 
753 		ip = mtod(m, struct ip *);
754 		ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), off);
755 		m->m_data += stripsiz;
756 		m->m_len -= stripsiz;
757 		m->m_pkthdr.len -= stripsiz;
758 
759 		ip = mtod(m, struct ip *);
760 #ifdef IPLEN_FLIPPED
761 		ip->ip_len = ip->ip_len - (u_short)stripsiz;
762 #else
763 		ip->ip_len = htons(ntohs(ip->ip_len) - stripsiz);
764 #endif
765 		ip->ip_p = (u_int8_t)nxt;
766 
767 		key_sa_recordxfer(sav, m->m_pkthdr.len);
768 		if (ipsec_incr_history_count(m, IPPROTO_ESP, spi) != 0) {
769 			IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
770 			goto bad;
771 		}
772 
773 		/*
774 		 * Set the csum valid flag, if we authenticated the
775 		 * packet, the payload shouldn't be corrupt unless
776 		 * it was corrupted before being signed on the other
777 		 * side.
778 		 */
779 		if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
780 			m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
781 			m->m_pkthdr.csum_data = 0xFFFF;
782 			_CASSERT(offsetof(struct pkthdr, csum_data) == offsetof(struct pkthdr, csum_rx_val));
783 		}
784 
785 		if (nxt != IPPROTO_DONE) {
786 			if ((ip_protox[nxt]->pr_flags & PR_LASTHDR) != 0 &&
787 			    ipsec4_in_reject(m, NULL)) {
788 				IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
789 				goto bad;
790 			}
791 			KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 3, 0, 0, 0, 0);
792 
793 			/* translate encapsulated UDP port ? */
794 			if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) {
795 				struct udphdr   *udp;
796 
797 				if (nxt != IPPROTO_UDP) {       /* not UPD packet - drop it */
798 					IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
799 					goto bad;
800 				}
801 
802 				if (m->m_len < off + sizeof(struct udphdr)) {
803 					m = m_pullup(m, off + sizeof(struct udphdr));
804 					if (!m) {
805 						ipseclog((LOG_DEBUG,
806 						    "IPv4 ESP input: can't pullup UDP header in esp4_input\n"));
807 						IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
808 						goto bad;
809 					}
810 					ip = mtod(m, struct ip *);
811 				}
812 				udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + off);
813 
814 				lck_mtx_lock(sadb_mutex);
815 				if (sav->natt_encapsulated_src_port == 0) {
816 					sav->natt_encapsulated_src_port = udp->uh_sport;
817 				} else if (sav->natt_encapsulated_src_port != udp->uh_sport) {  /* something wrong */
818 					IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
819 					lck_mtx_unlock(sadb_mutex);
820 					goto bad;
821 				}
822 				lck_mtx_unlock(sadb_mutex);
823 				udp->uh_sport = htons(sav->remote_ike_port);
824 				udp->uh_sum = 0;
825 			}
826 
827 			DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
828 			    struct ip *, ip, struct ifnet *, m->m_pkthdr.rcvif,
829 			    struct ip *, ip, struct ip6_hdr *, NULL);
830 
831 			// Input via IPsec interface legacy path
832 			lck_mtx_lock(sadb_mutex);
833 			ifnet_t ipsec_if = sav->sah->ipsec_if;
834 			if (ipsec_if != NULL) {
835 				// If an interface is found, add a reference count before dropping the lock
836 				ifnet_reference(ipsec_if);
837 			}
838 			lck_mtx_unlock(sadb_mutex);
839 			if (ipsec_if != NULL) {
840 				int mlen;
841 				if ((mlen = m_length2(m, NULL)) < hlen) {
842 					ipseclog((LOG_DEBUG,
843 					    "IPv4 ESP input: decrypted packet too short %d < %u\n",
844 					    mlen, hlen));
845 					IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
846 					ifnet_release(ipsec_if);
847 					goto bad;
848 				}
849 				ip->ip_len = htons(ip->ip_len + hlen);
850 				ip->ip_off = htons(ip->ip_off);
851 				ip->ip_sum = 0;
852 				ip->ip_sum = ip_cksum_hdr_in(m, hlen);
853 
854 				esp_input_log(m, sav, spi, seq);
855 
856 				if ((m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) == PKTF_WAKE_PKT) {
857 					if_ports_used_match_mbuf(ipsec_if, PF_INET, m);
858 				}
859 
860 				// Return mbuf
861 				if (interface != NULL &&
862 				    interface == ipsec_if) {
863 					out_m = m;
864 					ifnet_release(ipsec_if);
865 					goto done;
866 				}
867 
868 				errno_t inject_error = ipsec_inject_inbound_packet(ipsec_if, m);
869 				ifnet_release(ipsec_if);
870 
871 				if (inject_error == 0) {
872 					m = NULL;
873 					goto done;
874 				} else {
875 					goto bad;
876 				}
877 			}
878 
879 			if ((m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) == PKTF_WAKE_PKT) {
880 				if_ports_used_match_mbuf(m->m_pkthdr.rcvif, PF_INET, m);
881 				if (m->m_pkthdr.rcvif == NULL) {
882 					ipseclog((LOG_ERR, "no input interface for ipsec wake packet\n"));
883 				}
884 			}
885 
886 			ip_proto_dispatch_in(m, off, (u_int8_t)nxt, 0);
887 		} else {
888 			m_freem(m);
889 		}
890 		m = NULL;
891 	}
892 
893 done:
894 	if (sav) {
895 		KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
896 		    printf("DP esp4_input call free SA:0x%llx\n",
897 		    (uint64_t)VM_KERNEL_ADDRPERM(sav)));
898 		key_freesav(sav, KEY_SADB_UNLOCKED);
899 	}
900 	IPSEC_STAT_INCREMENT(ipsecstat.in_success);
901 	return out_m;
902 bad:
903 	if (sav) {
904 		KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
905 		    printf("DP esp4_input call free SA:0x%llx\n",
906 		    (uint64_t)VM_KERNEL_ADDRPERM(sav)));
907 		key_freesav(sav, KEY_SADB_UNLOCKED);
908 	}
909 	if (m) {
910 		m_freem(m);
911 	}
912 	KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 4, 0, 0, 0, 0);
913 	return out_m;
914 }
915 
916 int
esp6_input(struct mbuf ** mp,int * offp,int proto)917 esp6_input(struct mbuf **mp, int *offp, int proto)
918 {
919 	return esp6_input_extended(mp, offp, proto, NULL);
920 }
921 
922 int
esp6_input_extended(struct mbuf ** mp,int * offp,int proto,ifnet_t interface)923 esp6_input_extended(struct mbuf **mp, int *offp, int proto, ifnet_t interface)
924 {
925 	union sockaddr_in_4_6 src = {};
926 	union sockaddr_in_4_6 dst = {};
927 	struct mbuf *m = *mp;
928 	int off = *offp;
929 	struct ip *ip;
930 	struct ip6_hdr *ip6;
931 	struct esp *esp;
932 	struct esptail esptail;
933 	u_int32_t spi;
934 	u_int32_t seq;
935 	u_int32_t replay_index = 0;
936 	struct secasvar *sav = NULL;
937 	u_int16_t nxt;
938 	const struct esp_algorithm *algo;
939 	int ivlen;
940 	size_t esplen;
941 	u_int16_t taillen;
942 	sa_family_t ifamily;
943 
944 	/* sanity check for alignment. */
945 	if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
946 		ipseclog((LOG_ERR, "IPv6 ESP input: packet alignment problem "
947 		    "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
948 		IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
949 		goto bad;
950 	}
951 
952 #ifndef PULLDOWN_TEST
953 	IP6_EXTHDR_CHECK(m, off, ESPMAXLEN, {return IPPROTO_DONE;});
954 	esp = (struct esp *)(void *)(mtod(m, caddr_t) + off);
955 #else
956 	IP6_EXTHDR_GET(esp, struct esp *, m, off, ESPMAXLEN);
957 	if (esp == NULL) {
958 		IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
959 		return IPPROTO_DONE;
960 	}
961 #endif
962 	m->m_pkthdr.csum_flags &= ~CSUM_RX_FLAGS;
963 
964 	/* Expect 32-bit data aligned pointer on strict-align platforms */
965 	MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
966 
967 	ip6 = mtod(m, struct ip6_hdr *);
968 	if (__improbable(ntohs(ip6->ip6_plen) == 0)) {
969 		ipseclog((LOG_ERR, "IPv6 ESP input: "
970 		    "ESP with IPv6 jumbogram is not supported.\n"));
971 		IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
972 		goto bad;
973 	}
974 
975 	if (__improbable(proto != IPPROTO_ESP &&
976 	    !(proto == IPPROTO_UDP &&
977 	    off >= (sizeof(struct udphdr) + sizeof(struct ip6_hdr))))) {
978 		ipseclog((LOG_DEBUG, "IPv6 ESP input: invalid protocol type\n"));
979 		IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
980 		goto bad;
981 	}
982 
983 	/* find the sassoc. */
984 	spi = esp->esp_spi;
985 
986 	ipsec_fill_ip6_sockaddr_4_6(&src, &ip6->ip6_src, 0);
987 	ipsec_fill_ip6_sockaddr_4_6_with_ifscope(&dst, &ip6->ip6_dst, 0,
988 	    interface != NULL ? interface->if_index : IFSCOPE_UNKNOWN);
989 
990 	if ((sav = key_allocsa(&src, &dst, IPPROTO_ESP, spi, interface)) == 0) {
991 		ipseclog((LOG_WARNING,
992 		    "IPv6 ESP input: no key association found for spi %u (0x%08x) seq %u"
993 		    " src %s dst %s if %s\n",
994 		    (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi), ntohl(((struct newesp *)esp)->esp_seq),
995 		    ip6_sprintf(&ip6->ip6_src), ip6_sprintf(&ip6->ip6_dst),
996 		    ((interface != NULL) ? if_name(interface) : "NONE")));
997 		IPSEC_STAT_INCREMENT(ipsec6stat.in_nosa);
998 		goto bad;
999 	}
1000 	KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1001 	    printf("DP esp6_input called to allocate SA:0x%llx\n",
1002 	    (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1003 
1004 	if (sav->state != SADB_SASTATE_MATURE
1005 	    && sav->state != SADB_SASTATE_DYING) {
1006 		ipseclog((LOG_DEBUG,
1007 		    "IPv6 ESP input: non-mature/dying SA found for spi %u (0x%08x)\n",
1008 		    (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi)));
1009 		IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
1010 		goto bad;
1011 	}
1012 
1013 	algo = esp_algorithm_lookup(sav->alg_enc);
1014 	if (!algo) {
1015 		ipseclog((LOG_DEBUG, "IPv6 ESP input: "
1016 		    "unsupported encryption algorithm for spi %u (0x%08x)\n",
1017 		    (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi)));
1018 		IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
1019 		goto bad;
1020 	}
1021 
1022 	/* check if we have proper ivlen information */
1023 	ivlen = sav->ivlen;
1024 	if (__improbable(ivlen < 0)) {
1025 		ipseclog((LOG_ERR, "improper ivlen in IPv6 ESP input: %s %s\n",
1026 		    ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1027 		IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
1028 		goto bad;
1029 	}
1030 
1031 	seq = ntohl(((struct newesp *)esp)->esp_seq);
1032 
1033 	if ((sav->flags2 & SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) ==
1034 	    SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) {
1035 		replay_index = seq >> PER_TC_REPLAY_WINDOW_SN_SHIFT;
1036 	}
1037 
1038 	if (!((sav->flags & SADB_X_EXT_OLD) == 0 &&
1039 	    sav->replay[replay_index] != NULL &&
1040 	    ((sav->alg_auth && sav->key_auth) || algo->finalizedecrypt))) {
1041 		goto noreplaycheck;
1042 	}
1043 
1044 	if ((sav->alg_auth == SADB_X_AALG_NULL || sav->alg_auth == SADB_AALG_NONE) &&
1045 	    !algo->finalizedecrypt) {
1046 		goto noreplaycheck;
1047 	}
1048 
1049 	/*
1050 	 * check for sequence number.
1051 	 */
1052 	if (ipsec_chkreplay(seq, sav, (u_int8_t)replay_index)) {
1053 		; /*okey*/
1054 	} else {
1055 		IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
1056 		ipseclog((LOG_WARNING,
1057 		    "replay packet in IPv6 ESP input: seq(%u) idx(%u) %s %s\n",
1058 		    seq, (u_int8_t)replay_index, ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1059 		goto bad;
1060 	}
1061 
1062 	/* Save ICV from packet for verification later */
1063 	size_t siz = 0;
1064 	unsigned char saved_icv[AH_MAXSUMSIZE] __attribute__((aligned(4)));
1065 	if (algo->finalizedecrypt) {
1066 		siz = algo->icvlen;
1067 		VERIFY(siz <= UINT16_MAX);
1068 		if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
1069 			ipseclog((LOG_DEBUG,
1070 			    "invalid ESP packet length %u, missing ICV\n",
1071 			    m->m_pkthdr.len));
1072 			IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1073 			goto bad;
1074 		}
1075 		m_copydata(m, m->m_pkthdr.len - (int)siz, (int)siz, (caddr_t) saved_icv);
1076 	} else {
1077 		/* check ICV immediately */
1078 		u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4)));
1079 		u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4)));
1080 		const struct ah_algorithm *sumalgo;
1081 
1082 		sumalgo = ah_algorithm_lookup(sav->alg_auth);
1083 		if (!sumalgo) {
1084 			goto noreplaycheck;
1085 		}
1086 		siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
1087 		if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
1088 			ipseclog((LOG_DEBUG,
1089 			    "invalid ESP packet length %u, missing ICV\n",
1090 			    m->m_pkthdr.len));
1091 			IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1092 			goto bad;
1093 		}
1094 		if (__improbable(AH_MAXSUMSIZE < siz)) {
1095 			ipseclog((LOG_DEBUG,
1096 			    "internal error: AH_MAXSUMSIZE must be larger than %u\n",
1097 			    (u_int32_t)siz));
1098 			IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1099 			goto bad;
1100 		}
1101 
1102 		m_copydata(m, m->m_pkthdr.len - (int)siz, (int)siz, (caddr_t) &sum0[0]);
1103 
1104 		if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
1105 			ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
1106 			    ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1107 			IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
1108 			goto bad;
1109 		}
1110 
1111 		if (cc_cmp_safe(siz, sum0, sum)) {
1112 			ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
1113 			    ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1114 			IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
1115 			goto bad;
1116 		}
1117 
1118 		m->m_flags |= M_AUTHIPDGM;
1119 		IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthsucc);
1120 
1121 		/*
1122 		 * update replay window.
1123 		 */
1124 		if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[replay_index] != NULL) {
1125 			if (ipsec_updatereplay(seq, sav, (u_int8_t)replay_index)) {
1126 				IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
1127 				goto bad;
1128 			}
1129 		}
1130 	}
1131 
1132 	/* strip off the authentication data */
1133 	m_adj(m, (int)-siz);
1134 	ip6 = mtod(m, struct ip6_hdr *);
1135 	ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - (u_int16_t)siz);
1136 
1137 noreplaycheck:
1138 
1139 	/* process main esp header. */
1140 	if (sav->flags & SADB_X_EXT_OLD) {
1141 		/* RFC 1827 */
1142 		esplen = sizeof(struct esp);
1143 	} else {
1144 		/* RFC 2406 */
1145 		if (sav->flags & SADB_X_EXT_DERIV) {
1146 			esplen = sizeof(struct esp);
1147 		} else {
1148 			esplen = sizeof(struct newesp);
1149 		}
1150 	}
1151 
1152 	if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
1153 		ipseclog((LOG_WARNING,
1154 		    "IPv6 ESP input: packet too short\n"));
1155 		IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1156 		goto bad;
1157 	}
1158 
1159 #ifndef PULLDOWN_TEST
1160 	IP6_EXTHDR_CHECK(m, off, (int)(esplen + ivlen), goto bad);  /*XXX*/
1161 #else
1162 	IP6_EXTHDR_GET(esp, struct esp *, m, off, esplen + ivlen);
1163 	if (esp == NULL) {
1164 		IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1165 		m = NULL;
1166 		goto bad;
1167 	}
1168 #endif
1169 	ip6 = mtod(m, struct ip6_hdr *);        /*set it again just in case*/
1170 
1171 	/*
1172 	 * pre-compute and cache intermediate key
1173 	 */
1174 	if (esp_schedule(algo, sav) != 0) {
1175 		IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1176 		goto bad;
1177 	}
1178 
1179 	/*
1180 	 * decrypt the packet.
1181 	 */
1182 	if (!algo->decrypt) {
1183 		panic("internal error: no decrypt function");
1184 	}
1185 	KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_START, 0, 0, 0, 0, 0);
1186 	if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
1187 		/* m is already freed */
1188 		m = NULL;
1189 		ipseclog((LOG_ERR, "decrypt fail in IPv6 ESP input: %s\n",
1190 		    ipsec_logsastr(sav)));
1191 		IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1192 		KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1, 0, 0, 0, 0);
1193 		goto bad;
1194 	}
1195 	KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 2, 0, 0, 0, 0);
1196 	IPSEC_STAT_INCREMENT(ipsec6stat.in_esphist[sav->alg_enc]);
1197 
1198 	m->m_flags |= M_DECRYPTED;
1199 
1200 	if (algo->finalizedecrypt) {
1201 		if ((*algo->finalizedecrypt)(sav, saved_icv, algo->icvlen)) {
1202 			ipseclog((LOG_ERR, "esp6 packet decryption ICV failure: %s\n",
1203 			    ipsec_logsastr(sav)));
1204 			IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
1205 			KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1, 0, 0, 0, 0);
1206 			goto bad;
1207 		} else {
1208 			m->m_flags |= M_AUTHIPDGM;
1209 			IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthsucc);
1210 
1211 			/*
1212 			 * update replay window.
1213 			 */
1214 			if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[replay_index] != NULL) {
1215 				if (ipsec_updatereplay(seq, sav, (u_int8_t)replay_index)) {
1216 					IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
1217 					goto bad;
1218 				}
1219 			}
1220 		}
1221 	}
1222 
1223 	/*
1224 	 * find the trailer of the ESP.
1225 	 */
1226 	m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
1227 	    (caddr_t)&esptail);
1228 	nxt = esptail.esp_nxt;
1229 	taillen = esptail.esp_padlen + sizeof(esptail);
1230 
1231 	if (m->m_pkthdr.len < taillen
1232 	    || m->m_pkthdr.len - taillen < sizeof(struct ip6_hdr)) {    /*?*/
1233 		ipseclog((LOG_WARNING,
1234 		    "bad pad length in IPv6 ESP input: %s %s\n",
1235 		    ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1236 		IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1237 		goto bad;
1238 	}
1239 
1240 	/* strip off the trailing pad area. */
1241 	m_adj(m, -taillen);
1242 	ip6 = mtod(m, struct ip6_hdr *);
1243 	ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - taillen);
1244 
1245 	if (proto == IPPROTO_UDP) {
1246 		// offset includes the outer ip and udp header lengths.
1247 		if (m->m_len < off) {
1248 			m = m_pullup(m, off);
1249 			if (!m) {
1250 				ipseclog((LOG_DEBUG,
1251 				    "IPv6 ESP input: invalid udp encapsulated ESP packet length\n"));
1252 				IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1253 				goto bad;
1254 			}
1255 			ip6 = mtod(m, struct ip6_hdr *);
1256 		}
1257 
1258 		// check the UDP encap header to detect changes in the source port, and then strip the header
1259 		off -= sizeof(struct udphdr); // off no longer includes the udphdr's size
1260 		// if peer is behind nat and this is the latest esp packet
1261 		if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 &&
1262 		    (sav->flags & SADB_X_EXT_OLD) == 0 &&
1263 		    seq && sav->replay[replay_index] &&
1264 		    seq >= sav->replay[replay_index]->lastseq) {
1265 			struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip6 + off);
1266 			if (encap_uh->uh_sport &&
1267 			    ntohs(encap_uh->uh_sport) != sav->remote_ike_port) {
1268 				sav->remote_ike_port = ntohs(encap_uh->uh_sport);
1269 			}
1270 		}
1271 		ip6 = esp6_input_strip_udp_encap(m, off);
1272 		esp = (struct esp *)(void *)(((u_int8_t *)ip6) + off);
1273 	}
1274 
1275 
1276 	/* was it transmitted over the IPsec tunnel SA? */
1277 	if (ipsec6_tunnel_validate(m, (int)(off + esplen + ivlen), nxt, sav, &ifamily)) {
1278 		ifaddr_t ifa;
1279 		struct sockaddr_storage addr;
1280 		u_int8_t inner_ip_proto = 0;
1281 
1282 		/*
1283 		 * strip off all the headers that precedes ESP header.
1284 		 *	IP6 xx ESP IP6' payload -> IP6' payload
1285 		 *
1286 		 * XXX more sanity checks
1287 		 * XXX relationship with gif?
1288 		 */
1289 		u_int32_t flowinfo;     /*net endian*/
1290 		flowinfo = ip6->ip6_flow;
1291 		m_adj(m, (int)(off + esplen + ivlen));
1292 		if (ifamily == AF_INET6) {
1293 			struct sockaddr_in6 *ip6addr;
1294 
1295 			if (m->m_len < sizeof(*ip6)) {
1296 #ifndef PULLDOWN_TEST
1297 				/*
1298 				 * m_pullup is prohibited in KAME IPv6 input processing
1299 				 * but there's no other way!
1300 				 */
1301 #else
1302 				/* okay to pullup in m_pulldown style */
1303 #endif
1304 				m = m_pullup(m, sizeof(*ip6));
1305 				if (!m) {
1306 					IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1307 					goto bad;
1308 				}
1309 			}
1310 			ip6 = mtod(m, struct ip6_hdr *);
1311 			/* ECN consideration. */
1312 			if (ip6_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip6->ip6_flow) == 0) {
1313 				IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1314 				goto bad;
1315 			}
1316 			if (!key_checktunnelsanity(sav, AF_INET6,
1317 			    (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
1318 				ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
1319 				    "in IPv6 ESP input: %s %s\n",
1320 				    ipsec6_logpacketstr(ip6, spi),
1321 				    ipsec_logsastr(sav)));
1322 				IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1323 				goto bad;
1324 			}
1325 
1326 			inner_ip_proto = ip6->ip6_nxt;
1327 
1328 			bzero(&addr, sizeof(addr));
1329 			ip6addr = (__typeof__(ip6addr)) & addr;
1330 			ip6addr->sin6_family = AF_INET6;
1331 			ip6addr->sin6_len = sizeof(*ip6addr);
1332 			ip6addr->sin6_addr = ip6->ip6_dst;
1333 		} else if (ifamily == AF_INET) {
1334 			struct sockaddr_in *ipaddr;
1335 
1336 			if (m->m_len < sizeof(*ip)) {
1337 				m = m_pullup(m, sizeof(*ip));
1338 				if (!m) {
1339 					IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1340 					goto bad;
1341 				}
1342 			}
1343 
1344 			u_int8_t otos;
1345 			int sum;
1346 
1347 			ip = mtod(m, struct ip *);
1348 			otos = ip->ip_tos;
1349 			/* ECN consideration. */
1350 			if (ip46_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip->ip_tos) == 0) {
1351 				IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1352 				goto bad;
1353 			}
1354 
1355 			if (otos != ip->ip_tos) {
1356 				sum = ~ntohs(ip->ip_sum) & 0xffff;
1357 				sum += (~otos & 0xffff) + ip->ip_tos;
1358 				sum = (sum >> 16) + (sum & 0xffff);
1359 				sum += (sum >> 16); /* add carry */
1360 				ip->ip_sum = htons(~sum & 0xffff);
1361 			}
1362 
1363 			if (!key_checktunnelsanity(sav, AF_INET,
1364 			    (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
1365 				ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
1366 				    "in ESP input: %s %s\n",
1367 				    ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
1368 				IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1369 				goto bad;
1370 			}
1371 
1372 			inner_ip_proto = ip->ip_p;
1373 
1374 			bzero(&addr, sizeof(addr));
1375 			ipaddr = (__typeof__(ipaddr)) & addr;
1376 			ipaddr->sin_family = AF_INET;
1377 			ipaddr->sin_len = sizeof(*ipaddr);
1378 			ipaddr->sin_addr = ip->ip_dst;
1379 		}
1380 
1381 		key_sa_recordxfer(sav, m->m_pkthdr.len);
1382 		if (ipsec_incr_history_count(m, IPPROTO_ESP, spi) != 0 ||
1383 		    ipsec_incr_history_count(m, IPPROTO_IPV6, 0) != 0) {
1384 			IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
1385 			goto bad;
1386 		}
1387 
1388 		// update the receiving interface address based on the inner address
1389 		ifa = ifa_ifwithaddr((struct sockaddr *)&addr);
1390 		if (ifa) {
1391 			m->m_pkthdr.rcvif = ifa->ifa_ifp;
1392 			ifa_remref(ifa);
1393 		}
1394 
1395 		// Input via IPsec interface
1396 		lck_mtx_lock(sadb_mutex);
1397 		ifnet_t ipsec_if = sav->sah->ipsec_if;
1398 		if (ipsec_if != NULL) {
1399 			// If an interface is found, add a reference count before dropping the lock
1400 			ifnet_reference(ipsec_if);
1401 		}
1402 		lck_mtx_unlock(sadb_mutex);
1403 
1404 		if ((m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) == PKTF_WAKE_PKT) {
1405 			if_ports_used_match_mbuf(m->m_pkthdr.rcvif, ifamily, m);
1406 			if (m->m_pkthdr.rcvif == NULL) {
1407 				ipseclog((LOG_ERR, "no input interface for ipsec wake packet\n"));
1408 			}
1409 		}
1410 
1411 		if (ipsec_if != NULL) {
1412 			esp_input_log(m, sav, spi, seq);
1413 
1414 			// Return mbuf
1415 			if (interface != NULL &&
1416 			    interface == ipsec_if) {
1417 				ifnet_release(ipsec_if);
1418 				goto done;
1419 			}
1420 
1421 			errno_t inject_error = ipsec_inject_inbound_packet(ipsec_if, m);
1422 			ifnet_release(ipsec_if);
1423 
1424 			if (inject_error == 0) {
1425 				m = NULL;
1426 				nxt = IPPROTO_DONE;
1427 				goto done;
1428 			} else {
1429 				goto bad;
1430 			}
1431 		}
1432 
1433 		if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0) {
1434 			goto bad;
1435 		}
1436 		nxt = IPPROTO_DONE;
1437 	} else {
1438 		/*
1439 		 * strip off ESP header and IV.
1440 		 * even in m_pulldown case, we need to strip off ESP so that
1441 		 * we can always compute checksum for AH correctly.
1442 		 */
1443 		u_int16_t stripsiz;
1444 		char *prvnxtp;
1445 
1446 		/*
1447 		 * Set the next header field of the previous header correctly.
1448 		 */
1449 		prvnxtp = ip6_get_prevhdr(m, off); /* XXX */
1450 		*prvnxtp = (u_int8_t)nxt;
1451 
1452 		VERIFY(esplen + ivlen <= UINT16_MAX);
1453 		stripsiz = (u_int16_t)(esplen + ivlen);
1454 
1455 		ip6 = mtod(m, struct ip6_hdr *);
1456 		if (m->m_len >= stripsiz + off) {
1457 			ovbcopy((caddr_t)ip6, ((caddr_t)ip6) + stripsiz, off);
1458 			m->m_data += stripsiz;
1459 			m->m_len -= stripsiz;
1460 			m->m_pkthdr.len -= stripsiz;
1461 		} else {
1462 			/*
1463 			 * this comes with no copy if the boundary is on
1464 			 * cluster
1465 			 */
1466 			struct mbuf *n;
1467 
1468 			n = m_split(m, off, M_DONTWAIT);
1469 			if (n == NULL) {
1470 				/* m is retained by m_split */
1471 				goto bad;
1472 			}
1473 			m_adj(n, stripsiz);
1474 			/* m_cat does not update m_pkthdr.len */
1475 			m->m_pkthdr.len += n->m_pkthdr.len;
1476 			m_cat(m, n);
1477 		}
1478 
1479 #ifndef PULLDOWN_TEST
1480 		/*
1481 		 * KAME requires that the packet to be contiguous on the
1482 		 * mbuf.  We need to make that sure.
1483 		 * this kind of code should be avoided.
1484 		 * XXX other conditions to avoid running this part?
1485 		 */
1486 		if (m->m_len != m->m_pkthdr.len) {
1487 			struct mbuf *n = NULL;
1488 			int maxlen;
1489 
1490 			MGETHDR(n, M_DONTWAIT, MT_HEADER);      /* MAC-OK */
1491 			maxlen = MHLEN;
1492 			if (n) {
1493 				M_COPY_PKTHDR(n, m);
1494 			}
1495 			if (n && m->m_pkthdr.len > maxlen) {
1496 				MCLGET(n, M_DONTWAIT);
1497 				maxlen = MCLBYTES;
1498 				if ((n->m_flags & M_EXT) == 0) {
1499 					m_free(n);
1500 					n = NULL;
1501 				}
1502 			}
1503 			if (!n) {
1504 				printf("esp6_input: mbuf allocation failed\n");
1505 				goto bad;
1506 			}
1507 
1508 			if (m->m_pkthdr.len <= maxlen) {
1509 				m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t));
1510 				n->m_len = m->m_pkthdr.len;
1511 				n->m_pkthdr.len = m->m_pkthdr.len;
1512 				n->m_next = NULL;
1513 				m_freem(m);
1514 			} else {
1515 				m_copydata(m, 0, maxlen, mtod(n, caddr_t));
1516 				n->m_len = maxlen;
1517 				n->m_pkthdr.len = m->m_pkthdr.len;
1518 				n->m_next = m;
1519 				m_adj(m, maxlen);
1520 				m->m_flags &= ~M_PKTHDR;
1521 			}
1522 			m = n;
1523 		}
1524 #endif
1525 		ip6 = mtod(m, struct ip6_hdr *);
1526 		ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
1527 
1528 		key_sa_recordxfer(sav, m->m_pkthdr.len);
1529 		if (ipsec_incr_history_count(m, IPPROTO_ESP, spi) != 0) {
1530 			IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
1531 			goto bad;
1532 		}
1533 
1534 		/*
1535 		 * Set the csum valid flag, if we authenticated the
1536 		 * packet, the payload shouldn't be corrupt unless
1537 		 * it was corrupted before being signed on the other
1538 		 * side.
1539 		 */
1540 		if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
1541 			m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1542 			m->m_pkthdr.csum_data = 0xFFFF;
1543 			_CASSERT(offsetof(struct pkthdr, csum_data) == offsetof(struct pkthdr, csum_rx_val));
1544 		}
1545 
1546 		// Input via IPsec interface
1547 		lck_mtx_lock(sadb_mutex);
1548 		ifnet_t ipsec_if = sav->sah->ipsec_if;
1549 		if (ipsec_if != NULL) {
1550 			// If an interface is found, add a reference count before dropping the lock
1551 			ifnet_reference(ipsec_if);
1552 		}
1553 		lck_mtx_unlock(sadb_mutex);
1554 		if (ipsec_if != NULL) {
1555 			esp_input_log(m, sav, spi, seq);
1556 
1557 			if ((m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) == PKTF_WAKE_PKT) {
1558 				if_ports_used_match_mbuf(ipsec_if, PF_INET6, m);
1559 			}
1560 
1561 			// Return mbuf
1562 			if (interface != NULL &&
1563 			    interface == ipsec_if) {
1564 				ifnet_release(ipsec_if);
1565 				goto done;
1566 			}
1567 
1568 			errno_t inject_error = ipsec_inject_inbound_packet(ipsec_if, m);
1569 			ifnet_release(ipsec_if);
1570 
1571 			if (inject_error == 0) {
1572 				m = NULL;
1573 				nxt = IPPROTO_DONE;
1574 				goto done;
1575 			} else {
1576 				goto bad;
1577 			}
1578 		} else {
1579 			if ((m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) == PKTF_WAKE_PKT) {
1580 				if_ports_used_match_mbuf(m->m_pkthdr.rcvif, PF_INET, m);
1581 				if (m->m_pkthdr.rcvif == NULL) {
1582 					ipseclog((LOG_ERR, "no input interface for ipsec wake packet\n"));
1583 				}
1584 			}
1585 		}
1586 	}
1587 
1588 done:
1589 	*offp = off;
1590 	*mp = m;
1591 	if (sav) {
1592 		KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1593 		    printf("DP esp6_input call free SA:0x%llx\n",
1594 		    (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1595 		key_freesav(sav, KEY_SADB_UNLOCKED);
1596 	}
1597 	IPSEC_STAT_INCREMENT(ipsec6stat.in_success);
1598 	return nxt;
1599 
1600 bad:
1601 	if (sav) {
1602 		KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1603 		    printf("DP esp6_input call free SA:0x%llx\n",
1604 		    (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1605 		key_freesav(sav, KEY_SADB_UNLOCKED);
1606 	}
1607 	if (m) {
1608 		m_freem(m);
1609 	}
1610 	if (interface != NULL) {
1611 		*mp = NULL;
1612 	}
1613 	return IPPROTO_DONE;
1614 }
1615 
1616 void
esp6_ctlinput(int cmd,struct sockaddr * sa,void * d,__unused struct ifnet * ifp)1617 esp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp)
1618 {
1619 	union sockaddr_in_4_6 src = {};
1620 	union sockaddr_in_4_6 dst = {};
1621 	const struct newesp *espp;
1622 	struct newesp esp;
1623 	struct ip6ctlparam *ip6cp = NULL, ip6cp1;
1624 	struct secasvar *sav;
1625 	struct ip6_hdr *ip6;
1626 	struct mbuf *m;
1627 	struct sockaddr_in6 *sa6_src, *sa6_dst;
1628 	int off = 0;
1629 
1630 	if (sa->sa_family != AF_INET6 ||
1631 	    sa->sa_len != sizeof(struct sockaddr_in6)) {
1632 		return;
1633 	}
1634 	if ((unsigned)cmd >= PRC_NCMDS) {
1635 		return;
1636 	}
1637 
1638 	/* if the parameter is from icmp6, decode it. */
1639 	if (d != NULL) {
1640 		ip6cp = (struct ip6ctlparam *)d;
1641 		m = ip6cp->ip6c_m;
1642 		ip6 = ip6cp->ip6c_ip6;
1643 		off = ip6cp->ip6c_off;
1644 	} else {
1645 		m = NULL;
1646 		ip6 = NULL;
1647 	}
1648 
1649 	if (ip6) {
1650 		/*
1651 		 * Notify the error to all possible sockets via pfctlinput2.
1652 		 * Since the upper layer information (such as protocol type,
1653 		 * source and destination ports) is embedded in the encrypted
1654 		 * data and might have been cut, we can't directly call
1655 		 * an upper layer ctlinput function. However, the pcbnotify
1656 		 * function will consider source and destination addresses
1657 		 * as well as the flow info value, and may be able to find
1658 		 * some PCB that should be notified.
1659 		 * Although pfctlinput2 will call esp6_ctlinput(), there is
1660 		 * no possibility of an infinite loop of function calls,
1661 		 * because we don't pass the inner IPv6 header.
1662 		 */
1663 		bzero(&ip6cp1, sizeof(ip6cp1));
1664 		ip6cp1.ip6c_src = ip6cp->ip6c_src;
1665 		pfctlinput2(cmd, sa, (void *)&ip6cp1);
1666 
1667 		/*
1668 		 * Then go to special cases that need ESP header information.
1669 		 * XXX: We assume that when ip6 is non NULL,
1670 		 * M and OFF are valid.
1671 		 */
1672 
1673 		/* check if we can safely examine src and dst ports */
1674 		if (m->m_pkthdr.len < off + sizeof(esp)) {
1675 			return;
1676 		}
1677 
1678 		if (m->m_len < off + sizeof(esp)) {
1679 			/*
1680 			 * this should be rare case,
1681 			 * so we compromise on this copy...
1682 			 */
1683 			m_copydata(m, off, sizeof(esp), (caddr_t)&esp);
1684 			espp = &esp;
1685 		} else {
1686 			espp = (struct newesp*)(void *)(mtod(m, caddr_t) + off);
1687 		}
1688 
1689 		if (cmd == PRC_MSGSIZE) {
1690 			int valid = 0;
1691 
1692 			/*
1693 			 * Check to see if we have a valid SA corresponding to
1694 			 * the address in the ICMP message payload.
1695 			 */
1696 			sa6_src = ip6cp->ip6c_src;
1697 			sa6_dst = SIN6(sa);
1698 			ipsec_fill_ip6_sockaddr_4_6(&src, &sa6_src->sin6_addr, 0);
1699 			ipsec_fill_ip6_sockaddr_4_6_with_ifscope(&dst,
1700 			    &sa6_dst->sin6_addr, 0, sa6_dst->sin6_scope_id);
1701 
1702 			sav = key_allocsa(&src, &dst, IPPROTO_ESP, espp->esp_spi, NULL);
1703 			if (sav) {
1704 				if (sav->state == SADB_SASTATE_MATURE ||
1705 				    sav->state == SADB_SASTATE_DYING) {
1706 					valid++;
1707 				}
1708 				key_freesav(sav, KEY_SADB_UNLOCKED);
1709 			}
1710 
1711 			/* XXX Further validation? */
1712 
1713 			/*
1714 			 * Depending on the value of "valid" and routing table
1715 			 * size (mtudisc_{hi,lo}wat), we will:
1716 			 * - recalcurate the new MTU and create the
1717 			 *   corresponding routing entry, or
1718 			 * - ignore the MTU change notification.
1719 			 */
1720 			icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
1721 		}
1722 	} else {
1723 		/* we normally notify any pcb here */
1724 	}
1725 }
1726 
1727 int
esp_kpipe_input(ifnet_t interface,kern_packet_t sph,kern_packet_t dph)1728 esp_kpipe_input(ifnet_t interface, kern_packet_t sph, kern_packet_t dph)
1729 {
1730 	union sockaddr_in_4_6 src = {}, dst = {};
1731 	struct newesp *esp = NULL;
1732 	struct esptail *esptail = NULL;
1733 	struct secasvar *sav = NULL;
1734 	struct ipsecstat *stat = NULL;
1735 	const struct esp_algorithm *e_algo = NULL;
1736 	const struct ah_algorithm *a_algo = NULL;
1737 	uint8_t *sbaddr = NULL, *dbaddr = NULL;
1738 	uint8_t *src_payload = NULL, *dst_payload = NULL;
1739 	uint8_t *iv = NULL;
1740 	size_t iphlen = 0;
1741 	size_t auth_size = 0;
1742 	size_t esphlen = 0;
1743 	u_int32_t replay_index = 0;
1744 	int ivlen = 0;
1745 	int err = 0;
1746 	uint32_t slen = 0;
1747 	uint32_t dlim = 0;
1748 	uint8_t dscp = 0, nxt_proto = 0;
1749 
1750 	KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_START, 0, 0, 0, 0, 0);
1751 
1752 	kern_buflet_t __single sbuf = __packet_get_next_buflet(sph, NULL);
1753 	VERIFY(sbuf != NULL);
1754 	slen = __buflet_get_data_length(sbuf);
1755 	sbaddr = ipsec_kern_buflet_to_buffer(sbuf);
1756 
1757 	kern_buflet_t __single dbuf = __packet_get_next_buflet(dph, NULL);
1758 	VERIFY(dbuf != NULL);
1759 	dbaddr = ipsec_kern_buflet_to_buffer(dbuf);
1760 	dlim = __buflet_get_data_limit(dbuf);
1761 	dlim -= __buflet_get_data_offset(dbuf);
1762 
1763 	struct ip *ip_hdr = (struct ip *)(void *)sbaddr;
1764 	ASSERT(IP_HDR_ALIGNED_P(ip_hdr));
1765 
1766 	u_int ip_vers = IP_VHL_V(ip_hdr->ip_vhl);
1767 	switch (ip_vers) {
1768 	case IPVERSION: {
1769 #ifdef _IP_VHL
1770 		iphlen = IP_VHL_HL(ip_hdr->ip_vhl) << 2;
1771 #else /* _IP_VHL */
1772 		iphlen = ip_hdr->ip_hl << 2;
1773 #endif /* _IP_VHL */
1774 		nxt_proto = ip_hdr->ip_p;
1775 		dscp = ip_hdr->ip_tos >> IPTOS_DSCP_SHIFT;
1776 		ipsec_fill_ip_sockaddr_4_6(&src, ip_hdr->ip_src, 0);
1777 		ipsec_fill_ip_sockaddr_4_6(&dst, ip_hdr->ip_dst, 0);
1778 		stat = &ipsecstat;
1779 		break;
1780 	}
1781 	case 6: {
1782 		struct ip6_hdr *ip6 = (struct ip6_hdr *)sbaddr;
1783 		iphlen = sizeof(struct ip6_hdr);
1784 		nxt_proto = ip6->ip6_nxt;
1785 		dscp = (ntohl(ip6->ip6_flow) & IP6FLOW_DSCP_MASK) >> IP6FLOW_DSCP_SHIFT;
1786 		ipsec_fill_ip6_sockaddr_4_6(&src, &ip6->ip6_src, 0);
1787 		ipsec_fill_ip6_sockaddr_4_6_with_ifscope(&dst, &ip6->ip6_dst, 0,
1788 		    interface != NULL ? interface->if_index : IFSCOPE_UNKNOWN);
1789 		stat = &ipsec6stat;
1790 		if (__improbable(ip6->ip6_plen == 0)) {
1791 			esp_packet_log_err("esp kpipe input, jumbogram not supported");
1792 			IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1793 			goto bad;
1794 		}
1795 
1796 		break;
1797 	}
1798 	default: {
1799 		esp_log_info("esp kpipe input, ipversion %u, SPI=%x",
1800 		    ip_vers, ntohl(sav->spi));
1801 		err = EINVAL;
1802 		goto bad;
1803 	}
1804 	}
1805 
1806 	if (__improbable(dlim < slen)) {
1807 		esp_packet_log_err("esp kpipe input, output buffer is short(%u), "
1808 		    "compared to input buffer(%u) SPI=%x\n", dlim, slen, ntohl(sav->spi));
1809 		IPSEC_STAT_INCREMENT(stat->in_inval);
1810 		err = EINVAL;
1811 		goto bad;
1812 	}
1813 
1814 	if (__improbable(nxt_proto != IPPROTO_ESP)) {
1815 		esp_packet_log_err("esp kpipe input, invalid nxt proto %u", nxt_proto);
1816 		IPSEC_STAT_INCREMENT(stat->in_inval);
1817 		err = EINVAL;
1818 		goto bad;
1819 	}
1820 
1821 	if (__improbable(slen < (iphlen + sizeof(struct newesp)))) {
1822 		esp_packet_log_err("esp kpipe input, slen too short %u", slen);
1823 		IPSEC_STAT_INCREMENT(stat->in_inval);
1824 		err = EINVAL;
1825 		goto bad;
1826 	}
1827 
1828 	esp = (struct newesp *)(void *)(sbaddr + iphlen);
1829 
1830 	sav = key_allocsa(&src, &dst, IPPROTO_ESP, esp->esp_spi, interface);
1831 	if (__improbable(sav == NULL)) {
1832 		if (ipsec_debug) {
1833 			char src_buf[MAX_IPv6_STR_LEN] = {};
1834 			char dst_buf[MAX_IPv6_STR_LEN] = {};
1835 			if (src.sa.sa_family == AF_INET) {
1836 				inet_ntop(AF_INET, &src.sin.sin_addr, src_buf, sizeof(src_buf));
1837 				inet_ntop(AF_INET, &dst.sin.sin_addr, dst_buf, sizeof(src_buf));
1838 			} else {
1839 				inet_ntop(AF_INET6, &src.sin6.sin6_addr, src_buf, sizeof(src_buf));
1840 				inet_ntop(AF_INET6, &dst.sin6.sin6_addr, dst_buf, sizeof(src_buf));
1841 			}
1842 			esp_packet_log_err("esp kpipe input, no SA found for SPI=%x, "
1843 			    "packet %s<->%s", ntohl(esp->esp_spi), src_buf, dst_buf);
1844 		}
1845 		IPSEC_STAT_INCREMENT(stat->in_nosa);
1846 		err = ENOENT;
1847 		goto bad;
1848 	}
1849 
1850 	if (__improbable(sav->sah == NULL)) {
1851 		esp_log_err("esp kpipe input, sah is NULL\n");
1852 		IPSEC_STAT_INCREMENT(stat->in_nosa);
1853 		err = ENOENT;
1854 		goto bad;
1855 	}
1856 	if (__improbable(sav->sah->saidx.mode != IPSEC_MODE_TRANSPORT)) {
1857 		esp_log_err("ipsec tunnel mode not supported "
1858 		    "in kpipe mode, SPI=%x\n", ntohl(sav->spi));
1859 		IPSEC_STAT_INCREMENT(stat->in_nosa);
1860 		err = EINVAL;
1861 		goto bad;
1862 	}
1863 	if (__improbable((sav->flags & (SADB_X_EXT_OLD | SADB_X_EXT_DERIV |
1864 	    SADB_X_EXT_NATT | SADB_X_EXT_NATT_MULTIPLEUSERS |
1865 	    SADB_X_EXT_CYCSEQ | SADB_X_EXT_PMASK)) != 0)) {
1866 		esp_log_err("sadb flag %x not supported in "
1867 		    "kpipe mode, SPI=%x\n", sav->flags, ntohl(sav->spi));
1868 		IPSEC_STAT_INCREMENT(stat->in_nosa);
1869 		err = EINVAL;
1870 		goto bad;
1871 	}
1872 	if (__improbable(sav->state != SADB_SASTATE_MATURE &&
1873 	    sav->state != SADB_SASTATE_DYING)) {
1874 		esp_log_info("esp kpipe input, invalid SA state %u, SPI=%x",
1875 		    sav->state, ntohl(sav->spi));
1876 		IPSEC_STAT_INCREMENT(stat->in_inval);
1877 		err = EINVAL;
1878 		goto bad;
1879 	}
1880 
1881 	if ((sav->flags2 & SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) ==
1882 	    SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) {
1883 		replay_index = ntohl(esp->esp_seq) >> PER_TC_REPLAY_WINDOW_SN_SHIFT;
1884 	}
1885 
1886 	if (__improbable(sav->replay[replay_index] == NULL)) {
1887 		esp_log_err("esp kpipe input, missing replay window, SPI=%x\n",
1888 		    ntohl(sav->spi));
1889 		IPSEC_STAT_INCREMENT(stat->in_inval);
1890 		err = EINVAL;
1891 		goto bad;
1892 	}
1893 
1894 	/*
1895 	 * check for sequence number
1896 	 */
1897 	if (__improbable(!ipsec_chkreplay(ntohl(esp->esp_seq), sav,
1898 	    (uint8_t)replay_index))) {
1899 		esp_packet_log_err("esp kpipe input, replay packet, "
1900 		    "seq(%u), idx(%u), SPI=%x\n", ntohl(esp->esp_seq),
1901 		    replay_index, ntohl(sav->spi));
1902 		IPSEC_STAT_INCREMENT(stat->in_espreplay);
1903 		err = EINVAL;
1904 		goto bad;
1905 	}
1906 
1907 	e_algo = esp_algorithm_lookup(sav->alg_enc);
1908 	if (__improbable(e_algo == NULL)) {
1909 		esp_log_info("esp kpipe input, unsupported algorithm(%d) for, SPI=%x",
1910 		    sav->alg_enc, ntohl(sav->spi));
1911 		IPSEC_STAT_INCREMENT(stat->in_inval);
1912 		err = EINVAL;
1913 		goto bad;
1914 	}
1915 
1916 	if ((sav->flags & SADB_X_EXT_IIV) == 0) {
1917 		ivlen = sav->ivlen;
1918 		if (__improbable(ivlen < 0)) {
1919 			panic("esp kpipe input: invalid ivlen(%d) SPI=%x",
1920 			    ivlen, ntohl(sav->spi));
1921 			/* NOTREACHED */
1922 			__builtin_unreachable();
1923 		}
1924 
1925 		iv = sbaddr + iphlen + sizeof(struct newesp);
1926 	}
1927 
1928 	esphlen = sizeof(struct newesp) + ivlen;
1929 
1930 	if (sav->alg_auth != SADB_X_AALG_NULL &&
1931 	    sav->alg_auth != SADB_AALG_NONE) {
1932 		a_algo = ah_algorithm_lookup(sav->alg_auth);
1933 		if (a_algo != NULL && sav->key_auth != NULL) {
1934 			auth_size = (((*a_algo->sumsiz)(sav) + 3) & ~(4 - 1));
1935 			VERIFY(auth_size < AH_MAXSUMSIZE);
1936 
1937 			if (__improbable(slen < iphlen + esphlen + auth_size)) {
1938 				esp_packet_log_err("esp kpipe input, input buffer "
1939 				    "does not contain auth, SPI=%x\n", ntohl(sav->spi));
1940 				IPSEC_STAT_INCREMENT(stat->in_espauthfail);
1941 				err = EBADMSG;
1942 				goto bad;
1943 			}
1944 
1945 			/*
1946 			 * Use destination buffer to store authentication
1947 			 * tag for comparison.
1948 			 */
1949 			uint8_t *auth_buf = dbaddr + dlim - auth_size;
1950 			if (__improbable((err = esp_auth_data(sav, (uint8_t *)esp,
1951 			    slen - iphlen - auth_size, auth_buf, auth_size)) != 0)) {
1952 				esp_packet_log_err("esp kpipe input, esp auth "
1953 				    "data failed, SPI=%x\n", ntohl(sav->spi));
1954 				IPSEC_STAT_INCREMENT(stat->in_espauthfail);
1955 				err = EBADMSG;
1956 				goto bad;
1957 			}
1958 
1959 			if (__improbable(cc_cmp_safe(auth_size, auth_buf,
1960 			    sbaddr + slen - auth_size) != 0)) {
1961 				esp_packet_log_err("esp kpipe input, auth compare "
1962 				    "failed, SPI=%x\n", ntohl(sav->spi));
1963 				IPSEC_STAT_INCREMENT(stat->in_espauthfail);
1964 				err = EBADMSG;
1965 				goto bad;
1966 			}
1967 
1968 			IPSEC_STAT_INCREMENT(stat->in_espauthsucc);
1969 		}
1970 	} else if (e_algo->finalizedecrypt) {
1971 		auth_size = e_algo->icvlen;
1972 	}
1973 
1974 	if (__improbable(slen <= (iphlen + esphlen + sizeof(struct esptail) +
1975 	    auth_size))) {
1976 		esp_packet_log_err("esp kpipe input, input buffer is short(%u), "
1977 		    "to contain ivlen and esptail SPI=%x\n", slen, ntohl(sav->spi));
1978 		IPSEC_STAT_INCREMENT(stat->in_inval);
1979 		err = EBADMSG;
1980 		goto bad;
1981 	}
1982 
1983 	/*
1984 	 * pre-compute and cache intermediate key
1985 	 */
1986 	if (__improbable((err = esp_schedule(e_algo, sav)) != 0)) {
1987 		esp_log_info("esp schedule failed %d, SPI=%x\n", err, ntohl(sav->spi));
1988 		IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1989 		goto bad;
1990 	}
1991 
1992 	VERIFY(e_algo->decrypt_pkt);
1993 	KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_START, 0, 0, 0, 0, 0);
1994 	src_payload = sbaddr + iphlen + esphlen;
1995 	dst_payload = dbaddr + iphlen;
1996 	uint16_t encrypted_payload_len = (uint16_t)(slen - iphlen - esphlen - auth_size);
1997 	if (__improbable((err = (*e_algo->decrypt_pkt)(sav, src_payload,
1998 	    encrypted_payload_len, esp, iv, ivlen, dst_payload,
1999 	    encrypted_payload_len)) != 0)) {
2000 		esp_packet_log_err("esp kpipe input: decryption failed, SPI=%x\n",
2001 		    ntohl(sav->spi));
2002 		IPSEC_STAT_INCREMENT(stat->in_inval);
2003 		KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1, 0, 0, 0, 0);
2004 		goto bad;
2005 	}
2006 	KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 2, 0, 0, 0, 0);
2007 	IPSEC_STAT_INCREMENT(stat->in_esphist[sav->alg_enc]);
2008 
2009 	if (e_algo->finalizedecrypt) {
2010 		if (__improbable((err = (*e_algo->finalizedecrypt)(sav,
2011 		    sbaddr + slen - auth_size, e_algo->icvlen)) != 0)) {
2012 			esp_packet_log_err("esp kpipe input: ICV failed, SPI=%x\n",
2013 			    ntohl(sav->spi));
2014 			IPSEC_STAT_INCREMENT(stat->in_espauthfail);
2015 			KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 3, 0, 0, 0, 0);
2016 			goto bad;
2017 		}
2018 
2019 		IPSEC_STAT_INCREMENT(stat->in_espauthsucc);
2020 		KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 4, 0, 0, 0, 0);
2021 	}
2022 
2023 	if (__improbable(ipsec_updatereplay(ntohl(esp->esp_seq), sav,
2024 	    (uint8_t)replay_index))) {
2025 		esp_packet_log_err("esp kpipe input: update replay failed, SPI=%x\n",
2026 		    ntohl(sav->spi));
2027 		IPSEC_STAT_INCREMENT(stat->in_espreplay);
2028 		goto bad;
2029 	}
2030 
2031 	esptail = (struct esptail *)(dst_payload + encrypted_payload_len -
2032 	    sizeof(struct esptail));
2033 	nxt_proto = esptail->esp_nxt;
2034 
2035 	size_t taillen = sizeof(struct esptail) + esptail->esp_padlen;
2036 	if (__improbable(encrypted_payload_len <= taillen)) {
2037 		esp_packet_log_err("esp kpipe input: encrypted payload len %u, "
2038 		    "is invalid, taillen %zu, SPI=%x\n",
2039 		    encrypted_payload_len, taillen, ntohl(sav->spi));
2040 		IPSEC_STAT_INCREMENT(stat->in_inval);
2041 		goto bad;
2042 	}
2043 
2044 	uint16_t decrypted_payload_len = encrypted_payload_len - (uint16_t)taillen;
2045 
2046 	switch (ip_vers) {
2047 	case IPVERSION: {
2048 		struct ip *ip = (struct ip *)(void *)dbaddr;
2049 		ASSERT(IP_HDR_ALIGNED_P(ip));
2050 		ip->ip_p = nxt_proto;
2051 		ip->ip_len = htons((uint16_t)(iphlen + decrypted_payload_len));
2052 		ip->ip_sum = 0; // Recalculate checksum
2053 		ip->ip_sum = in_cksum_hdr_opt(ip);
2054 		break;
2055 	}
2056 	case 6: {
2057 		struct ip6_hdr *ip6 = (struct ip6_hdr *)dbaddr;
2058 		ip6->ip6_plen = htons((uint16_t)decrypted_payload_len);
2059 		ip6->ip6_nxt = nxt_proto;
2060 		break;
2061 	}
2062 	}
2063 
2064 	if (nxt_proto == IPPROTO_TCP || nxt_proto == IPPROTO_UDP) {
2065 		__packet_set_inet_checksum(dph,
2066 		    PACKET_CSUM_DATA_VALID | PACKET_CSUM_PSEUDO_HDR, 0,
2067 		    0xFFFF, FALSE);
2068 	}
2069 
2070 	__buflet_set_data_length(dbuf, (uint16_t)(iphlen + decrypted_payload_len));
2071 
2072 	key_sa_recordxfer(sav, iphlen + decrypted_payload_len);
2073 	IPSEC_STAT_INCREMENT(stat->in_success);
2074 	key_freesav(sav, KEY_SADB_UNLOCKED);
2075 	KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 1, 0, 0, 0, 0);
2076 
2077 	return 0;
2078 bad:
2079 	if (sav != NULL) {
2080 		key_freesav(sav, KEY_SADB_UNLOCKED);
2081 		sav = NULL;
2082 	}
2083 
2084 	KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 2, err, 0, 0, 0);
2085 	return err;
2086 }
2087