1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
3 */
4
5 #include <rte_malloc.h>
6 #include <rte_mbuf.h>
7 #include <rte_ethdev.h>
8 #include <rte_udp.h>
9
10 #include "gro_vxlan_tcp4.h"
11
12 void *
gro_vxlan_tcp4_tbl_create(uint16_t socket_id,uint16_t max_flow_num,uint16_t max_item_per_flow)13 gro_vxlan_tcp4_tbl_create(uint16_t socket_id,
14 uint16_t max_flow_num,
15 uint16_t max_item_per_flow)
16 {
17 struct gro_vxlan_tcp4_tbl *tbl;
18 size_t size;
19 uint32_t entries_num, i;
20
21 entries_num = max_flow_num * max_item_per_flow;
22 entries_num = RTE_MIN(entries_num, GRO_VXLAN_TCP4_TBL_MAX_ITEM_NUM);
23
24 if (entries_num == 0)
25 return NULL;
26
27 tbl = rte_zmalloc_socket(__func__,
28 sizeof(struct gro_vxlan_tcp4_tbl),
29 RTE_CACHE_LINE_SIZE,
30 socket_id);
31 if (tbl == NULL)
32 return NULL;
33
34 size = sizeof(struct gro_vxlan_tcp4_item) * entries_num;
35 tbl->items = rte_zmalloc_socket(__func__,
36 size,
37 RTE_CACHE_LINE_SIZE,
38 socket_id);
39 if (tbl->items == NULL) {
40 rte_free(tbl);
41 return NULL;
42 }
43 tbl->max_item_num = entries_num;
44
45 size = sizeof(struct gro_vxlan_tcp4_flow) * entries_num;
46 tbl->flows = rte_zmalloc_socket(__func__,
47 size,
48 RTE_CACHE_LINE_SIZE,
49 socket_id);
50 if (tbl->flows == NULL) {
51 rte_free(tbl->items);
52 rte_free(tbl);
53 return NULL;
54 }
55
56 for (i = 0; i < entries_num; i++)
57 tbl->flows[i].start_index = INVALID_ARRAY_INDEX;
58 tbl->max_flow_num = entries_num;
59
60 return tbl;
61 }
62
63 void
gro_vxlan_tcp4_tbl_destroy(void * tbl)64 gro_vxlan_tcp4_tbl_destroy(void *tbl)
65 {
66 struct gro_vxlan_tcp4_tbl *vxlan_tbl = tbl;
67
68 if (vxlan_tbl) {
69 rte_free(vxlan_tbl->items);
70 rte_free(vxlan_tbl->flows);
71 }
72 rte_free(vxlan_tbl);
73 }
74
75 static inline uint32_t
find_an_empty_item(struct gro_vxlan_tcp4_tbl * tbl)76 find_an_empty_item(struct gro_vxlan_tcp4_tbl *tbl)
77 {
78 uint32_t max_item_num = tbl->max_item_num, i;
79
80 for (i = 0; i < max_item_num; i++)
81 if (tbl->items[i].inner_item.firstseg == NULL)
82 return i;
83 return INVALID_ARRAY_INDEX;
84 }
85
86 static inline uint32_t
find_an_empty_flow(struct gro_vxlan_tcp4_tbl * tbl)87 find_an_empty_flow(struct gro_vxlan_tcp4_tbl *tbl)
88 {
89 uint32_t max_flow_num = tbl->max_flow_num, i;
90
91 for (i = 0; i < max_flow_num; i++)
92 if (tbl->flows[i].start_index == INVALID_ARRAY_INDEX)
93 return i;
94 return INVALID_ARRAY_INDEX;
95 }
96
97 static inline uint32_t
insert_new_item(struct gro_vxlan_tcp4_tbl * tbl,struct rte_mbuf * pkt,uint64_t start_time,uint32_t prev_idx,uint32_t sent_seq,uint16_t outer_ip_id,uint16_t ip_id,uint8_t outer_is_atomic,uint8_t is_atomic)98 insert_new_item(struct gro_vxlan_tcp4_tbl *tbl,
99 struct rte_mbuf *pkt,
100 uint64_t start_time,
101 uint32_t prev_idx,
102 uint32_t sent_seq,
103 uint16_t outer_ip_id,
104 uint16_t ip_id,
105 uint8_t outer_is_atomic,
106 uint8_t is_atomic)
107 {
108 uint32_t item_idx;
109
110 item_idx = find_an_empty_item(tbl);
111 if (unlikely(item_idx == INVALID_ARRAY_INDEX))
112 return INVALID_ARRAY_INDEX;
113
114 tbl->items[item_idx].inner_item.firstseg = pkt;
115 tbl->items[item_idx].inner_item.lastseg = rte_pktmbuf_lastseg(pkt);
116 tbl->items[item_idx].inner_item.start_time = start_time;
117 tbl->items[item_idx].inner_item.next_pkt_idx = INVALID_ARRAY_INDEX;
118 tbl->items[item_idx].inner_item.sent_seq = sent_seq;
119 tbl->items[item_idx].inner_item.ip_id = ip_id;
120 tbl->items[item_idx].inner_item.nb_merged = 1;
121 tbl->items[item_idx].inner_item.is_atomic = is_atomic;
122 tbl->items[item_idx].outer_ip_id = outer_ip_id;
123 tbl->items[item_idx].outer_is_atomic = outer_is_atomic;
124 tbl->item_num++;
125
126 /* If the previous packet exists, chain the new one with it. */
127 if (prev_idx != INVALID_ARRAY_INDEX) {
128 tbl->items[item_idx].inner_item.next_pkt_idx =
129 tbl->items[prev_idx].inner_item.next_pkt_idx;
130 tbl->items[prev_idx].inner_item.next_pkt_idx = item_idx;
131 }
132
133 return item_idx;
134 }
135
136 static inline uint32_t
delete_item(struct gro_vxlan_tcp4_tbl * tbl,uint32_t item_idx,uint32_t prev_item_idx)137 delete_item(struct gro_vxlan_tcp4_tbl *tbl,
138 uint32_t item_idx,
139 uint32_t prev_item_idx)
140 {
141 uint32_t next_idx = tbl->items[item_idx].inner_item.next_pkt_idx;
142
143 /* NULL indicates an empty item. */
144 tbl->items[item_idx].inner_item.firstseg = NULL;
145 tbl->item_num--;
146 if (prev_item_idx != INVALID_ARRAY_INDEX)
147 tbl->items[prev_item_idx].inner_item.next_pkt_idx = next_idx;
148
149 return next_idx;
150 }
151
152 static inline uint32_t
insert_new_flow(struct gro_vxlan_tcp4_tbl * tbl,struct vxlan_tcp4_flow_key * src,uint32_t item_idx)153 insert_new_flow(struct gro_vxlan_tcp4_tbl *tbl,
154 struct vxlan_tcp4_flow_key *src,
155 uint32_t item_idx)
156 {
157 struct vxlan_tcp4_flow_key *dst;
158 uint32_t flow_idx;
159
160 flow_idx = find_an_empty_flow(tbl);
161 if (unlikely(flow_idx == INVALID_ARRAY_INDEX))
162 return INVALID_ARRAY_INDEX;
163
164 dst = &(tbl->flows[flow_idx].key);
165
166 rte_ether_addr_copy(&(src->inner_key.eth_saddr),
167 &(dst->inner_key.eth_saddr));
168 rte_ether_addr_copy(&(src->inner_key.eth_daddr),
169 &(dst->inner_key.eth_daddr));
170 dst->inner_key.ip_src_addr = src->inner_key.ip_src_addr;
171 dst->inner_key.ip_dst_addr = src->inner_key.ip_dst_addr;
172 dst->inner_key.recv_ack = src->inner_key.recv_ack;
173 dst->inner_key.src_port = src->inner_key.src_port;
174 dst->inner_key.dst_port = src->inner_key.dst_port;
175
176 dst->vxlan_hdr.vx_flags = src->vxlan_hdr.vx_flags;
177 dst->vxlan_hdr.vx_vni = src->vxlan_hdr.vx_vni;
178 rte_ether_addr_copy(&(src->outer_eth_saddr), &(dst->outer_eth_saddr));
179 rte_ether_addr_copy(&(src->outer_eth_daddr), &(dst->outer_eth_daddr));
180 dst->outer_ip_src_addr = src->outer_ip_src_addr;
181 dst->outer_ip_dst_addr = src->outer_ip_dst_addr;
182 dst->outer_src_port = src->outer_src_port;
183 dst->outer_dst_port = src->outer_dst_port;
184
185 tbl->flows[flow_idx].start_index = item_idx;
186 tbl->flow_num++;
187
188 return flow_idx;
189 }
190
191 static inline int
is_same_vxlan_tcp4_flow(struct vxlan_tcp4_flow_key k1,struct vxlan_tcp4_flow_key k2)192 is_same_vxlan_tcp4_flow(struct vxlan_tcp4_flow_key k1,
193 struct vxlan_tcp4_flow_key k2)
194 {
195 return (rte_is_same_ether_addr(&k1.outer_eth_saddr,
196 &k2.outer_eth_saddr) &&
197 rte_is_same_ether_addr(&k1.outer_eth_daddr,
198 &k2.outer_eth_daddr) &&
199 (k1.outer_ip_src_addr == k2.outer_ip_src_addr) &&
200 (k1.outer_ip_dst_addr == k2.outer_ip_dst_addr) &&
201 (k1.outer_src_port == k2.outer_src_port) &&
202 (k1.outer_dst_port == k2.outer_dst_port) &&
203 (k1.vxlan_hdr.vx_flags == k2.vxlan_hdr.vx_flags) &&
204 (k1.vxlan_hdr.vx_vni == k2.vxlan_hdr.vx_vni) &&
205 is_same_tcp4_flow(k1.inner_key, k2.inner_key));
206 }
207
208 static inline int
check_vxlan_seq_option(struct gro_vxlan_tcp4_item * item,struct rte_tcp_hdr * tcp_hdr,uint32_t sent_seq,uint16_t outer_ip_id,uint16_t ip_id,uint16_t tcp_hl,uint16_t tcp_dl,uint8_t outer_is_atomic,uint8_t is_atomic)209 check_vxlan_seq_option(struct gro_vxlan_tcp4_item *item,
210 struct rte_tcp_hdr *tcp_hdr,
211 uint32_t sent_seq,
212 uint16_t outer_ip_id,
213 uint16_t ip_id,
214 uint16_t tcp_hl,
215 uint16_t tcp_dl,
216 uint8_t outer_is_atomic,
217 uint8_t is_atomic)
218 {
219 struct rte_mbuf *pkt = item->inner_item.firstseg;
220 int cmp;
221 uint16_t l2_offset;
222
223 /* Don't merge packets whose outer DF bits are different. */
224 if (unlikely(item->outer_is_atomic ^ outer_is_atomic))
225 return 0;
226
227 l2_offset = pkt->outer_l2_len + pkt->outer_l3_len;
228 cmp = check_seq_option(&item->inner_item, tcp_hdr, sent_seq, ip_id,
229 tcp_hl, tcp_dl, l2_offset, is_atomic);
230 if ((cmp > 0) && (outer_is_atomic ||
231 (outer_ip_id == item->outer_ip_id + 1)))
232 /* Append the new packet. */
233 return 1;
234 else if ((cmp < 0) && (outer_is_atomic ||
235 (outer_ip_id + item->inner_item.nb_merged ==
236 item->outer_ip_id)))
237 /* Prepend the new packet. */
238 return -1;
239
240 return 0;
241 }
242
243 static inline int
merge_two_vxlan_tcp4_packets(struct gro_vxlan_tcp4_item * item,struct rte_mbuf * pkt,int cmp,uint32_t sent_seq,uint16_t outer_ip_id,uint16_t ip_id)244 merge_two_vxlan_tcp4_packets(struct gro_vxlan_tcp4_item *item,
245 struct rte_mbuf *pkt,
246 int cmp,
247 uint32_t sent_seq,
248 uint16_t outer_ip_id,
249 uint16_t ip_id)
250 {
251 if (merge_two_tcp4_packets(&item->inner_item, pkt, cmp, sent_seq,
252 ip_id, pkt->outer_l2_len +
253 pkt->outer_l3_len)) {
254 /* Update the outer IPv4 ID to the large value. */
255 item->outer_ip_id = cmp > 0 ? outer_ip_id : item->outer_ip_id;
256 return 1;
257 }
258
259 return 0;
260 }
261
262 static inline void
update_vxlan_header(struct gro_vxlan_tcp4_item * item)263 update_vxlan_header(struct gro_vxlan_tcp4_item *item)
264 {
265 struct rte_ipv4_hdr *ipv4_hdr;
266 struct rte_udp_hdr *udp_hdr;
267 struct rte_mbuf *pkt = item->inner_item.firstseg;
268 uint16_t len;
269
270 /* Update the outer IPv4 header. */
271 len = pkt->pkt_len - pkt->outer_l2_len;
272 ipv4_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
273 pkt->outer_l2_len);
274 ipv4_hdr->total_length = rte_cpu_to_be_16(len);
275
276 /* Update the outer UDP header. */
277 len -= pkt->outer_l3_len;
278 udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr + pkt->outer_l3_len);
279 udp_hdr->dgram_len = rte_cpu_to_be_16(len);
280
281 /* Update the inner IPv4 header. */
282 len -= pkt->l2_len;
283 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)udp_hdr + pkt->l2_len);
284 ipv4_hdr->total_length = rte_cpu_to_be_16(len);
285 }
286
287 int32_t
gro_vxlan_tcp4_reassemble(struct rte_mbuf * pkt,struct gro_vxlan_tcp4_tbl * tbl,uint64_t start_time)288 gro_vxlan_tcp4_reassemble(struct rte_mbuf *pkt,
289 struct gro_vxlan_tcp4_tbl *tbl,
290 uint64_t start_time)
291 {
292 struct rte_ether_hdr *outer_eth_hdr, *eth_hdr;
293 struct rte_ipv4_hdr *outer_ipv4_hdr, *ipv4_hdr;
294 struct rte_tcp_hdr *tcp_hdr;
295 struct rte_udp_hdr *udp_hdr;
296 struct rte_vxlan_hdr *vxlan_hdr;
297 uint32_t sent_seq;
298 int32_t tcp_dl;
299 uint16_t frag_off, outer_ip_id, ip_id;
300 uint8_t outer_is_atomic, is_atomic;
301
302 struct vxlan_tcp4_flow_key key;
303 uint32_t cur_idx, prev_idx, item_idx;
304 uint32_t i, max_flow_num, remaining_flow_num;
305 int cmp;
306 uint16_t hdr_len;
307 uint8_t find;
308
309 /*
310 * Don't process the packet whose TCP header length is greater
311 * than 60 bytes or less than 20 bytes.
312 */
313 if (unlikely(INVALID_TCP_HDRLEN(pkt->l4_len)))
314 return -1;
315
316 outer_eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
317 outer_ipv4_hdr = (struct rte_ipv4_hdr *)((char *)outer_eth_hdr +
318 pkt->outer_l2_len);
319 udp_hdr = (struct rte_udp_hdr *)((char *)outer_ipv4_hdr +
320 pkt->outer_l3_len);
321 vxlan_hdr = (struct rte_vxlan_hdr *)((char *)udp_hdr +
322 sizeof(struct rte_udp_hdr));
323 eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_hdr +
324 sizeof(struct rte_vxlan_hdr));
325 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)udp_hdr + pkt->l2_len);
326 tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
327
328 /*
329 * Don't process the packet which has FIN, SYN, RST, PSH, URG,
330 * ECE or CWR set.
331 */
332 if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG)
333 return -1;
334
335 hdr_len = pkt->outer_l2_len + pkt->outer_l3_len + pkt->l2_len +
336 pkt->l3_len + pkt->l4_len;
337 /*
338 * Don't process the packet whose payload length is less than or
339 * equal to 0.
340 */
341 tcp_dl = pkt->pkt_len - hdr_len;
342 if (tcp_dl <= 0)
343 return -1;
344
345 /*
346 * Save IPv4 ID for the packet whose DF bit is 0. For the packet
347 * whose DF bit is 1, IPv4 ID is ignored.
348 */
349 frag_off = rte_be_to_cpu_16(outer_ipv4_hdr->fragment_offset);
350 outer_is_atomic =
351 (frag_off & RTE_IPV4_HDR_DF_FLAG) == RTE_IPV4_HDR_DF_FLAG;
352 outer_ip_id = outer_is_atomic ? 0 :
353 rte_be_to_cpu_16(outer_ipv4_hdr->packet_id);
354 frag_off = rte_be_to_cpu_16(ipv4_hdr->fragment_offset);
355 is_atomic = (frag_off & RTE_IPV4_HDR_DF_FLAG) == RTE_IPV4_HDR_DF_FLAG;
356 ip_id = is_atomic ? 0 : rte_be_to_cpu_16(ipv4_hdr->packet_id);
357
358 sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);
359
360 rte_ether_addr_copy(&(eth_hdr->src_addr), &(key.inner_key.eth_saddr));
361 rte_ether_addr_copy(&(eth_hdr->dst_addr), &(key.inner_key.eth_daddr));
362 key.inner_key.ip_src_addr = ipv4_hdr->src_addr;
363 key.inner_key.ip_dst_addr = ipv4_hdr->dst_addr;
364 key.inner_key.recv_ack = tcp_hdr->recv_ack;
365 key.inner_key.src_port = tcp_hdr->src_port;
366 key.inner_key.dst_port = tcp_hdr->dst_port;
367
368 key.vxlan_hdr.vx_flags = vxlan_hdr->vx_flags;
369 key.vxlan_hdr.vx_vni = vxlan_hdr->vx_vni;
370 rte_ether_addr_copy(&(outer_eth_hdr->src_addr), &(key.outer_eth_saddr));
371 rte_ether_addr_copy(&(outer_eth_hdr->dst_addr), &(key.outer_eth_daddr));
372 key.outer_ip_src_addr = outer_ipv4_hdr->src_addr;
373 key.outer_ip_dst_addr = outer_ipv4_hdr->dst_addr;
374 key.outer_src_port = udp_hdr->src_port;
375 key.outer_dst_port = udp_hdr->dst_port;
376
377 /* Search for a matched flow. */
378 max_flow_num = tbl->max_flow_num;
379 remaining_flow_num = tbl->flow_num;
380 find = 0;
381 for (i = 0; i < max_flow_num && remaining_flow_num; i++) {
382 if (tbl->flows[i].start_index != INVALID_ARRAY_INDEX) {
383 if (is_same_vxlan_tcp4_flow(tbl->flows[i].key, key)) {
384 find = 1;
385 break;
386 }
387 remaining_flow_num--;
388 }
389 }
390
391 /*
392 * Can't find a matched flow. Insert a new flow and store the
393 * packet into the flow.
394 */
395 if (find == 0) {
396 item_idx = insert_new_item(tbl, pkt, start_time,
397 INVALID_ARRAY_INDEX, sent_seq, outer_ip_id,
398 ip_id, outer_is_atomic, is_atomic);
399 if (item_idx == INVALID_ARRAY_INDEX)
400 return -1;
401 if (insert_new_flow(tbl, &key, item_idx) ==
402 INVALID_ARRAY_INDEX) {
403 /*
404 * Fail to insert a new flow, so
405 * delete the inserted packet.
406 */
407 delete_item(tbl, item_idx, INVALID_ARRAY_INDEX);
408 return -1;
409 }
410 return 0;
411 }
412
413 /* Check all packets in the flow and try to find a neighbor. */
414 cur_idx = tbl->flows[i].start_index;
415 prev_idx = cur_idx;
416 do {
417 cmp = check_vxlan_seq_option(&(tbl->items[cur_idx]), tcp_hdr,
418 sent_seq, outer_ip_id, ip_id, pkt->l4_len,
419 tcp_dl, outer_is_atomic, is_atomic);
420 if (cmp) {
421 if (merge_two_vxlan_tcp4_packets(&(tbl->items[cur_idx]),
422 pkt, cmp, sent_seq,
423 outer_ip_id, ip_id))
424 return 1;
425 /*
426 * Can't merge two packets, as the packet
427 * length will be greater than the max value.
428 * Insert the packet into the flow.
429 */
430 if (insert_new_item(tbl, pkt, start_time, prev_idx,
431 sent_seq, outer_ip_id,
432 ip_id, outer_is_atomic,
433 is_atomic) ==
434 INVALID_ARRAY_INDEX)
435 return -1;
436 return 0;
437 }
438 prev_idx = cur_idx;
439 cur_idx = tbl->items[cur_idx].inner_item.next_pkt_idx;
440 } while (cur_idx != INVALID_ARRAY_INDEX);
441
442 /* Can't find neighbor. Insert the packet into the flow. */
443 if (insert_new_item(tbl, pkt, start_time, prev_idx, sent_seq,
444 outer_ip_id, ip_id, outer_is_atomic,
445 is_atomic) == INVALID_ARRAY_INDEX)
446 return -1;
447
448 return 0;
449 }
450
451 uint16_t
gro_vxlan_tcp4_tbl_timeout_flush(struct gro_vxlan_tcp4_tbl * tbl,uint64_t flush_timestamp,struct rte_mbuf ** out,uint16_t nb_out)452 gro_vxlan_tcp4_tbl_timeout_flush(struct gro_vxlan_tcp4_tbl *tbl,
453 uint64_t flush_timestamp,
454 struct rte_mbuf **out,
455 uint16_t nb_out)
456 {
457 uint16_t k = 0;
458 uint32_t i, j;
459 uint32_t max_flow_num = tbl->max_flow_num;
460
461 for (i = 0; i < max_flow_num; i++) {
462 if (unlikely(tbl->flow_num == 0))
463 return k;
464
465 j = tbl->flows[i].start_index;
466 while (j != INVALID_ARRAY_INDEX) {
467 if (tbl->items[j].inner_item.start_time <=
468 flush_timestamp) {
469 out[k++] = tbl->items[j].inner_item.firstseg;
470 if (tbl->items[j].inner_item.nb_merged > 1)
471 update_vxlan_header(&(tbl->items[j]));
472 /*
473 * Delete the item and get the next packet
474 * index.
475 */
476 j = delete_item(tbl, j, INVALID_ARRAY_INDEX);
477 tbl->flows[i].start_index = j;
478 if (j == INVALID_ARRAY_INDEX)
479 tbl->flow_num--;
480
481 if (unlikely(k == nb_out))
482 return k;
483 } else
484 /*
485 * The left packets in the flow won't be
486 * timeout. Go to check other flows.
487 */
488 break;
489 }
490 }
491 return k;
492 }
493
494 uint32_t
gro_vxlan_tcp4_tbl_pkt_count(void * tbl)495 gro_vxlan_tcp4_tbl_pkt_count(void *tbl)
496 {
497 struct gro_vxlan_tcp4_tbl *gro_tbl = tbl;
498
499 if (gro_tbl)
500 return gro_tbl->item_num;
501
502 return 0;
503 }
504