xref: /dpdk/examples/ipsec-secgw/sad.h (revision 9a1cc8f1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4 
5 #ifndef __SAD_H__
6 #define __SAD_H__
7 
8 #include <rte_ipsec_sad.h>
9 
10 #define SA_CACHE_SZ	128
11 #define SPI2IDX(spi, mask)	((spi) & (mask))
12 
13 struct ipsec_sad_cache {
14 	struct ipsec_sa **v4;
15 	struct ipsec_sa **v6;
16 	uint32_t mask;
17 };
18 
19 RTE_DECLARE_PER_LCORE(struct ipsec_sad_cache, sad_cache);
20 
21 int ipsec_sad_create(const char *name, struct ipsec_sad *sad,
22 	int socket_id, struct ipsec_sa_cnt *sa_cnt);
23 
24 int ipsec_sad_add(struct ipsec_sad *sad, struct ipsec_sa *sa);
25 
26 int ipsec_sad_lcore_cache_init(uint32_t nb_cache_ent);
27 
28 static inline int
cmp_sa_key(struct ipsec_sa * sa,int is_v4,struct rte_ipv4_hdr * ipv4,struct rte_ipv6_hdr * ipv6)29 cmp_sa_key(struct ipsec_sa *sa, int is_v4, struct rte_ipv4_hdr *ipv4,
30 	struct rte_ipv6_hdr *ipv6)
31 {
32 	int sa_type = WITHOUT_TRANSPORT_VERSION(sa->flags);
33 	if ((sa_type == TRANSPORT) ||
34 			/* IPv4 check */
35 			(is_v4 && (sa_type == IP4_TUNNEL) &&
36 			(sa->src.ip.ip4 == ipv4->src_addr) &&
37 			(sa->dst.ip.ip4 == ipv4->dst_addr)) ||
38 			/* IPv6 check */
39 			(!is_v4 && (sa_type == IP6_TUNNEL) &&
40 			(!memcmp(sa->src.ip.ip6.ip6, ipv6->src_addr, 16)) &&
41 			(!memcmp(sa->dst.ip.ip6.ip6, ipv6->dst_addr, 16))))
42 		return 1;
43 
44 	return 0;
45 }
46 
47 static inline void
sa_cache_update(struct ipsec_sa ** sa_cache,struct ipsec_sa * sa,uint32_t mask)48 sa_cache_update(struct ipsec_sa **sa_cache, struct ipsec_sa *sa, uint32_t mask)
49 {
50 	uint32_t cache_idx;
51 
52 	/* SAD cache is disabled */
53 	if (mask == 0)
54 		return;
55 
56 	cache_idx = SPI2IDX(sa->spi, mask);
57 	sa_cache[cache_idx] = sa;
58 }
59 
60 static inline void
sad_lookup(struct ipsec_sad * sad,struct rte_mbuf * pkts[],void * sa[],uint16_t nb_pkts)61 sad_lookup(struct ipsec_sad *sad, struct rte_mbuf *pkts[],
62 	void *sa[], uint16_t nb_pkts)
63 {
64 	uint32_t i;
65 	uint32_t nb_v4 = 0, nb_v6 = 0;
66 	struct rte_esp_hdr *esp;
67 	struct rte_ipv4_hdr *ipv4;
68 	struct rte_ipv6_hdr *ipv6;
69 	struct rte_ipsec_sadv4_key	v4[nb_pkts];
70 	struct rte_ipsec_sadv6_key	v6[nb_pkts];
71 	int v4_idxes[nb_pkts];
72 	int v6_idxes[nb_pkts];
73 	const union rte_ipsec_sad_key	*keys_v4[nb_pkts];
74 	const union rte_ipsec_sad_key	*keys_v6[nb_pkts];
75 	void *v4_res[nb_pkts];
76 	void *v6_res[nb_pkts];
77 	uint32_t spi, cache_idx;
78 	struct ipsec_sad_cache *cache;
79 	struct ipsec_sa *cached_sa;
80 	uint16_t udp_hdr_len = 0;
81 	int is_ipv4;
82 
83 	cache  = &RTE_PER_LCORE(sad_cache);
84 
85 	/* split received packets by address family into two arrays */
86 	for (i = 0; i < nb_pkts; i++) {
87 		ipv4 = rte_pktmbuf_mtod(pkts[i], struct rte_ipv4_hdr *);
88 		ipv6 = rte_pktmbuf_mtod(pkts[i], struct rte_ipv6_hdr *);
89 		if ((pkts[i]->packet_type &
90 				(RTE_PTYPE_TUNNEL_MASK | RTE_PTYPE_L4_MASK)) ==
91 				MBUF_PTYPE_TUNNEL_ESP_IN_UDP)
92 			udp_hdr_len = sizeof(struct rte_udp_hdr);
93 		esp = rte_pktmbuf_mtod_offset(pkts[i], struct rte_esp_hdr *,
94 				pkts[i]->l3_len + udp_hdr_len);
95 
96 		is_ipv4 = pkts[i]->packet_type & RTE_PTYPE_L3_IPV4;
97 		spi = rte_be_to_cpu_32(esp->spi);
98 		cache_idx = SPI2IDX(spi, cache->mask);
99 
100 		if (is_ipv4) {
101 			cached_sa = (cache->mask != 0) ?
102 				cache->v4[cache_idx] : NULL;
103 			/* check SAD cache entry */
104 			if ((cached_sa != NULL) && (cached_sa->spi == spi)) {
105 				if (cmp_sa_key(cached_sa, 1, ipv4, ipv6)) {
106 					/* cache hit */
107 					sa[i] = cached_sa;
108 					continue;
109 				}
110 			}
111 			/*
112 			 * cache miss
113 			 * preparing sad key to proceed with sad lookup
114 			 */
115 			v4[nb_v4].spi = esp->spi;
116 			v4[nb_v4].dip = ipv4->dst_addr;
117 			v4[nb_v4].sip = ipv4->src_addr;
118 			keys_v4[nb_v4] = (const union rte_ipsec_sad_key *)
119 						&v4[nb_v4];
120 			v4_idxes[nb_v4++] = i;
121 		} else {
122 			cached_sa = (cache->mask != 0) ?
123 				cache->v6[cache_idx] : NULL;
124 			if ((cached_sa != NULL) && (cached_sa->spi == spi)) {
125 				if (cmp_sa_key(cached_sa, 0, ipv4, ipv6)) {
126 					sa[i] = cached_sa;
127 					continue;
128 				}
129 			}
130 			v6[nb_v6].spi = esp->spi;
131 			memcpy(v6[nb_v6].dip, ipv6->dst_addr,
132 					sizeof(ipv6->dst_addr));
133 			memcpy(v6[nb_v6].sip, ipv6->src_addr,
134 					sizeof(ipv6->src_addr));
135 			keys_v6[nb_v6] = (const union rte_ipsec_sad_key *)
136 						&v6[nb_v6];
137 			v6_idxes[nb_v6++] = i;
138 		}
139 	}
140 
141 	if (nb_v4 != 0)
142 		rte_ipsec_sad_lookup(sad->sad_v4, keys_v4, v4_res, nb_v4);
143 	if (nb_v6 != 0)
144 		rte_ipsec_sad_lookup(sad->sad_v6, keys_v6, v6_res, nb_v6);
145 
146 	for (i = 0; i < nb_v4; i++) {
147 		ipv4 = rte_pktmbuf_mtod(pkts[v4_idxes[i]],
148 			struct rte_ipv4_hdr *);
149 		if ((v4_res[i] != NULL) &&
150 				(cmp_sa_key(v4_res[i], 1, ipv4, NULL))) {
151 			sa[v4_idxes[i]] = v4_res[i];
152 			sa_cache_update(cache->v4, (struct ipsec_sa *)v4_res[i],
153 				cache->mask);
154 		} else
155 			sa[v4_idxes[i]] = NULL;
156 	}
157 	for (i = 0; i < nb_v6; i++) {
158 		ipv6 = rte_pktmbuf_mtod(pkts[v6_idxes[i]],
159 			struct rte_ipv6_hdr *);
160 		if ((v6_res[i] != NULL) &&
161 				(cmp_sa_key(v6_res[i], 0, NULL, ipv6))) {
162 			sa[v6_idxes[i]] = v6_res[i];
163 			sa_cache_update(cache->v6, (struct ipsec_sa *)v6_res[i],
164 				cache->mask);
165 		} else
166 			sa[v6_idxes[i]] = NULL;
167 	}
168 }
169 
170 #endif /* __SAD_H__ */
171