1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 5 #include <rte_malloc.h> 6 #include <rte_cycles.h> 7 #include <rte_ethdev.h> 8 9 #include "rte_gro.h" 10 #include "gro_tcp4.h" 11 #include "gro_udp4.h" 12 #include "gro_vxlan_tcp4.h" 13 #include "gro_vxlan_udp4.h" 14 15 typedef void *(*gro_tbl_create_fn)(uint16_t socket_id, 16 uint16_t max_flow_num, 17 uint16_t max_item_per_flow); 18 typedef void (*gro_tbl_destroy_fn)(void *tbl); 19 typedef uint32_t (*gro_tbl_pkt_count_fn)(void *tbl); 20 21 static gro_tbl_create_fn tbl_create_fn[RTE_GRO_TYPE_MAX_NUM] = { 22 gro_tcp4_tbl_create, gro_vxlan_tcp4_tbl_create, 23 gro_udp4_tbl_create, gro_vxlan_udp4_tbl_create, NULL}; 24 static gro_tbl_destroy_fn tbl_destroy_fn[RTE_GRO_TYPE_MAX_NUM] = { 25 gro_tcp4_tbl_destroy, gro_vxlan_tcp4_tbl_destroy, 26 gro_udp4_tbl_destroy, gro_vxlan_udp4_tbl_destroy, 27 NULL}; 28 static gro_tbl_pkt_count_fn tbl_pkt_count_fn[RTE_GRO_TYPE_MAX_NUM] = { 29 gro_tcp4_tbl_pkt_count, gro_vxlan_tcp4_tbl_pkt_count, 30 gro_udp4_tbl_pkt_count, gro_vxlan_udp4_tbl_pkt_count, 31 NULL}; 32 33 #define IS_IPV4_TCP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \ 34 ((ptype & RTE_PTYPE_L4_TCP) == RTE_PTYPE_L4_TCP) && \ 35 (RTE_ETH_IS_TUNNEL_PKT(ptype) == 0)) 36 37 #define IS_IPV4_UDP_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \ 38 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \ 39 (RTE_ETH_IS_TUNNEL_PKT(ptype) == 0)) 40 41 #define IS_IPV4_VXLAN_TCP4_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \ 42 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \ 43 ((ptype & RTE_PTYPE_TUNNEL_VXLAN) == \ 44 RTE_PTYPE_TUNNEL_VXLAN) && \ 45 ((ptype & RTE_PTYPE_INNER_L4_TCP) == \ 46 RTE_PTYPE_INNER_L4_TCP) && \ 47 (((ptype & RTE_PTYPE_INNER_L3_MASK) == \ 48 RTE_PTYPE_INNER_L3_IPV4) || \ 49 ((ptype & RTE_PTYPE_INNER_L3_MASK) == \ 50 RTE_PTYPE_INNER_L3_IPV4_EXT) || \ 51 ((ptype & RTE_PTYPE_INNER_L3_MASK) == \ 52 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN))) 53 54 #define IS_IPV4_VXLAN_UDP4_PKT(ptype) (RTE_ETH_IS_IPV4_HDR(ptype) && \ 55 ((ptype & RTE_PTYPE_L4_UDP) == RTE_PTYPE_L4_UDP) && \ 56 ((ptype & RTE_PTYPE_TUNNEL_VXLAN) == \ 57 RTE_PTYPE_TUNNEL_VXLAN) && \ 58 ((ptype & RTE_PTYPE_INNER_L4_UDP) == \ 59 RTE_PTYPE_INNER_L4_UDP) && \ 60 (((ptype & RTE_PTYPE_INNER_L3_MASK) == \ 61 RTE_PTYPE_INNER_L3_IPV4) || \ 62 ((ptype & RTE_PTYPE_INNER_L3_MASK) == \ 63 RTE_PTYPE_INNER_L3_IPV4_EXT) || \ 64 ((ptype & RTE_PTYPE_INNER_L3_MASK) == \ 65 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN))) 66 67 /* 68 * GRO context structure. It keeps the table structures, which are 69 * used to merge packets, for different GRO types. Before using 70 * rte_gro_reassemble(), applications need to create the GRO context 71 * first. 72 */ 73 struct gro_ctx { 74 /* GRO types to perform */ 75 uint64_t gro_types; 76 /* reassembly tables */ 77 void *tbls[RTE_GRO_TYPE_MAX_NUM]; 78 }; 79 80 void * 81 rte_gro_ctx_create(const struct rte_gro_param *param) 82 { 83 struct gro_ctx *gro_ctx; 84 gro_tbl_create_fn create_tbl_fn; 85 uint64_t gro_type_flag = 0; 86 uint64_t gro_types = 0; 87 uint8_t i; 88 89 gro_ctx = rte_zmalloc_socket(__func__, 90 sizeof(struct gro_ctx), 91 RTE_CACHE_LINE_SIZE, 92 param->socket_id); 93 if (gro_ctx == NULL) 94 return NULL; 95 96 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) { 97 gro_type_flag = 1ULL << i; 98 if ((param->gro_types & gro_type_flag) == 0) 99 continue; 100 101 create_tbl_fn = tbl_create_fn[i]; 102 if (create_tbl_fn == NULL) 103 continue; 104 105 gro_ctx->tbls[i] = create_tbl_fn(param->socket_id, 106 param->max_flow_num, 107 param->max_item_per_flow); 108 if (gro_ctx->tbls[i] == NULL) { 109 /* destroy all created tables */ 110 gro_ctx->gro_types = gro_types; 111 rte_gro_ctx_destroy(gro_ctx); 112 return NULL; 113 } 114 gro_types |= gro_type_flag; 115 } 116 gro_ctx->gro_types = param->gro_types; 117 118 return gro_ctx; 119 } 120 121 void 122 rte_gro_ctx_destroy(void *ctx) 123 { 124 gro_tbl_destroy_fn destroy_tbl_fn; 125 struct gro_ctx *gro_ctx = ctx; 126 uint64_t gro_type_flag; 127 uint8_t i; 128 129 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM; i++) { 130 gro_type_flag = 1ULL << i; 131 if ((gro_ctx->gro_types & gro_type_flag) == 0) 132 continue; 133 destroy_tbl_fn = tbl_destroy_fn[i]; 134 if (destroy_tbl_fn) 135 destroy_tbl_fn(gro_ctx->tbls[i]); 136 } 137 rte_free(gro_ctx); 138 } 139 140 uint16_t 141 rte_gro_reassemble_burst(struct rte_mbuf **pkts, 142 uint16_t nb_pkts, 143 const struct rte_gro_param *param) 144 { 145 /* allocate a reassembly table for TCP/IPv4 GRO */ 146 struct gro_tcp4_tbl tcp_tbl; 147 struct gro_tcp4_flow tcp_flows[RTE_GRO_MAX_BURST_ITEM_NUM]; 148 struct gro_tcp4_item tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} }; 149 150 /* allocate a reassembly table for UDP/IPv4 GRO */ 151 struct gro_udp4_tbl udp_tbl; 152 struct gro_udp4_flow udp_flows[RTE_GRO_MAX_BURST_ITEM_NUM]; 153 struct gro_udp4_item udp_items[RTE_GRO_MAX_BURST_ITEM_NUM] = {{0} }; 154 155 /* Allocate a reassembly table for VXLAN TCP GRO */ 156 struct gro_vxlan_tcp4_tbl vxlan_tcp_tbl; 157 struct gro_vxlan_tcp4_flow vxlan_tcp_flows[RTE_GRO_MAX_BURST_ITEM_NUM]; 158 struct gro_vxlan_tcp4_item vxlan_tcp_items[RTE_GRO_MAX_BURST_ITEM_NUM] 159 = {{{0}, 0, 0} }; 160 161 /* Allocate a reassembly table for VXLAN UDP GRO */ 162 struct gro_vxlan_udp4_tbl vxlan_udp_tbl; 163 struct gro_vxlan_udp4_flow vxlan_udp_flows[RTE_GRO_MAX_BURST_ITEM_NUM]; 164 struct gro_vxlan_udp4_item vxlan_udp_items[RTE_GRO_MAX_BURST_ITEM_NUM] 165 = {{{0}} }; 166 167 struct rte_mbuf *unprocess_pkts[nb_pkts]; 168 uint32_t item_num; 169 int32_t ret; 170 uint16_t i, unprocess_num = 0, nb_after_gro = nb_pkts; 171 uint8_t do_tcp4_gro = 0, do_vxlan_tcp_gro = 0, do_udp4_gro = 0, 172 do_vxlan_udp_gro = 0; 173 174 if (unlikely((param->gro_types & (RTE_GRO_IPV4_VXLAN_TCP_IPV4 | 175 RTE_GRO_TCP_IPV4 | 176 RTE_GRO_IPV4_VXLAN_UDP_IPV4 | 177 RTE_GRO_UDP_IPV4)) == 0)) 178 return nb_pkts; 179 180 /* Get the maximum number of packets */ 181 item_num = RTE_MIN(nb_pkts, (param->max_flow_num * 182 param->max_item_per_flow)); 183 item_num = RTE_MIN(item_num, RTE_GRO_MAX_BURST_ITEM_NUM); 184 185 if (param->gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) { 186 for (i = 0; i < item_num; i++) 187 vxlan_tcp_flows[i].start_index = INVALID_ARRAY_INDEX; 188 189 vxlan_tcp_tbl.flows = vxlan_tcp_flows; 190 vxlan_tcp_tbl.items = vxlan_tcp_items; 191 vxlan_tcp_tbl.flow_num = 0; 192 vxlan_tcp_tbl.item_num = 0; 193 vxlan_tcp_tbl.max_flow_num = item_num; 194 vxlan_tcp_tbl.max_item_num = item_num; 195 do_vxlan_tcp_gro = 1; 196 } 197 198 if (param->gro_types & RTE_GRO_IPV4_VXLAN_UDP_IPV4) { 199 for (i = 0; i < item_num; i++) 200 vxlan_udp_flows[i].start_index = INVALID_ARRAY_INDEX; 201 202 vxlan_udp_tbl.flows = vxlan_udp_flows; 203 vxlan_udp_tbl.items = vxlan_udp_items; 204 vxlan_udp_tbl.flow_num = 0; 205 vxlan_udp_tbl.item_num = 0; 206 vxlan_udp_tbl.max_flow_num = item_num; 207 vxlan_udp_tbl.max_item_num = item_num; 208 do_vxlan_udp_gro = 1; 209 } 210 211 if (param->gro_types & RTE_GRO_TCP_IPV4) { 212 for (i = 0; i < item_num; i++) 213 tcp_flows[i].start_index = INVALID_ARRAY_INDEX; 214 215 tcp_tbl.flows = tcp_flows; 216 tcp_tbl.items = tcp_items; 217 tcp_tbl.flow_num = 0; 218 tcp_tbl.item_num = 0; 219 tcp_tbl.max_flow_num = item_num; 220 tcp_tbl.max_item_num = item_num; 221 do_tcp4_gro = 1; 222 } 223 224 if (param->gro_types & RTE_GRO_UDP_IPV4) { 225 for (i = 0; i < item_num; i++) 226 udp_flows[i].start_index = INVALID_ARRAY_INDEX; 227 228 udp_tbl.flows = udp_flows; 229 udp_tbl.items = udp_items; 230 udp_tbl.flow_num = 0; 231 udp_tbl.item_num = 0; 232 udp_tbl.max_flow_num = item_num; 233 udp_tbl.max_item_num = item_num; 234 do_udp4_gro = 1; 235 } 236 237 238 for (i = 0; i < nb_pkts; i++) { 239 /* 240 * The timestamp is ignored, since all packets 241 * will be flushed from the tables. 242 */ 243 if (IS_IPV4_VXLAN_TCP4_PKT(pkts[i]->packet_type) && 244 do_vxlan_tcp_gro) { 245 ret = gro_vxlan_tcp4_reassemble(pkts[i], 246 &vxlan_tcp_tbl, 0); 247 if (ret > 0) 248 /* Merge successfully */ 249 nb_after_gro--; 250 else if (ret < 0) 251 unprocess_pkts[unprocess_num++] = pkts[i]; 252 } else if (IS_IPV4_VXLAN_UDP4_PKT(pkts[i]->packet_type) && 253 do_vxlan_udp_gro) { 254 ret = gro_vxlan_udp4_reassemble(pkts[i], 255 &vxlan_udp_tbl, 0); 256 if (ret > 0) 257 /* Merge successfully */ 258 nb_after_gro--; 259 else if (ret < 0) 260 unprocess_pkts[unprocess_num++] = pkts[i]; 261 } else if (IS_IPV4_TCP_PKT(pkts[i]->packet_type) && 262 do_tcp4_gro) { 263 ret = gro_tcp4_reassemble(pkts[i], &tcp_tbl, 0); 264 if (ret > 0) 265 /* merge successfully */ 266 nb_after_gro--; 267 else if (ret < 0) 268 unprocess_pkts[unprocess_num++] = pkts[i]; 269 } else if (IS_IPV4_UDP_PKT(pkts[i]->packet_type) && 270 do_udp4_gro) { 271 ret = gro_udp4_reassemble(pkts[i], &udp_tbl, 0); 272 if (ret > 0) 273 /* merge successfully */ 274 nb_after_gro--; 275 else if (ret < 0) 276 unprocess_pkts[unprocess_num++] = pkts[i]; 277 } else 278 unprocess_pkts[unprocess_num++] = pkts[i]; 279 } 280 281 if ((nb_after_gro < nb_pkts) 282 || (unprocess_num < nb_pkts)) { 283 i = 0; 284 /* Flush all packets from the tables */ 285 if (do_vxlan_tcp_gro) { 286 i = gro_vxlan_tcp4_tbl_timeout_flush(&vxlan_tcp_tbl, 287 0, pkts, nb_pkts); 288 } 289 290 if (do_vxlan_udp_gro) { 291 i += gro_vxlan_udp4_tbl_timeout_flush(&vxlan_udp_tbl, 292 0, &pkts[i], nb_pkts - i); 293 294 } 295 296 if (do_tcp4_gro) { 297 i += gro_tcp4_tbl_timeout_flush(&tcp_tbl, 0, 298 &pkts[i], nb_pkts - i); 299 } 300 301 if (do_udp4_gro) { 302 i += gro_udp4_tbl_timeout_flush(&udp_tbl, 0, 303 &pkts[i], nb_pkts - i); 304 } 305 /* Copy unprocessed packets */ 306 if (unprocess_num > 0) { 307 memcpy(&pkts[i], unprocess_pkts, 308 sizeof(struct rte_mbuf *) * 309 unprocess_num); 310 } 311 nb_after_gro = i + unprocess_num; 312 } 313 314 return nb_after_gro; 315 } 316 317 uint16_t 318 rte_gro_reassemble(struct rte_mbuf **pkts, 319 uint16_t nb_pkts, 320 void *ctx) 321 { 322 struct rte_mbuf *unprocess_pkts[nb_pkts]; 323 struct gro_ctx *gro_ctx = ctx; 324 void *tcp_tbl, *udp_tbl, *vxlan_tcp_tbl, *vxlan_udp_tbl; 325 uint64_t current_time; 326 uint16_t i, unprocess_num = 0; 327 uint8_t do_tcp4_gro, do_vxlan_tcp_gro, do_udp4_gro, do_vxlan_udp_gro; 328 329 if (unlikely((gro_ctx->gro_types & (RTE_GRO_IPV4_VXLAN_TCP_IPV4 | 330 RTE_GRO_TCP_IPV4 | 331 RTE_GRO_IPV4_VXLAN_UDP_IPV4 | 332 RTE_GRO_UDP_IPV4)) == 0)) 333 return nb_pkts; 334 335 tcp_tbl = gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX]; 336 vxlan_tcp_tbl = gro_ctx->tbls[RTE_GRO_IPV4_VXLAN_TCP_IPV4_INDEX]; 337 udp_tbl = gro_ctx->tbls[RTE_GRO_UDP_IPV4_INDEX]; 338 vxlan_udp_tbl = gro_ctx->tbls[RTE_GRO_IPV4_VXLAN_UDP_IPV4_INDEX]; 339 340 do_tcp4_gro = (gro_ctx->gro_types & RTE_GRO_TCP_IPV4) == 341 RTE_GRO_TCP_IPV4; 342 do_vxlan_tcp_gro = (gro_ctx->gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) == 343 RTE_GRO_IPV4_VXLAN_TCP_IPV4; 344 do_udp4_gro = (gro_ctx->gro_types & RTE_GRO_UDP_IPV4) == 345 RTE_GRO_UDP_IPV4; 346 do_vxlan_udp_gro = (gro_ctx->gro_types & RTE_GRO_IPV4_VXLAN_UDP_IPV4) == 347 RTE_GRO_IPV4_VXLAN_UDP_IPV4; 348 349 current_time = rte_rdtsc(); 350 351 for (i = 0; i < nb_pkts; i++) { 352 if (IS_IPV4_VXLAN_TCP4_PKT(pkts[i]->packet_type) && 353 do_vxlan_tcp_gro) { 354 if (gro_vxlan_tcp4_reassemble(pkts[i], vxlan_tcp_tbl, 355 current_time) < 0) 356 unprocess_pkts[unprocess_num++] = pkts[i]; 357 } else if (IS_IPV4_VXLAN_UDP4_PKT(pkts[i]->packet_type) && 358 do_vxlan_udp_gro) { 359 if (gro_vxlan_udp4_reassemble(pkts[i], vxlan_udp_tbl, 360 current_time) < 0) 361 unprocess_pkts[unprocess_num++] = pkts[i]; 362 } else if (IS_IPV4_TCP_PKT(pkts[i]->packet_type) && 363 do_tcp4_gro) { 364 if (gro_tcp4_reassemble(pkts[i], tcp_tbl, 365 current_time) < 0) 366 unprocess_pkts[unprocess_num++] = pkts[i]; 367 } else if (IS_IPV4_UDP_PKT(pkts[i]->packet_type) && 368 do_udp4_gro) { 369 if (gro_udp4_reassemble(pkts[i], udp_tbl, 370 current_time) < 0) 371 unprocess_pkts[unprocess_num++] = pkts[i]; 372 } else 373 unprocess_pkts[unprocess_num++] = pkts[i]; 374 } 375 if (unprocess_num > 0) { 376 memcpy(pkts, unprocess_pkts, sizeof(struct rte_mbuf *) * 377 unprocess_num); 378 } 379 380 return unprocess_num; 381 } 382 383 uint16_t 384 rte_gro_timeout_flush(void *ctx, 385 uint64_t timeout_cycles, 386 uint64_t gro_types, 387 struct rte_mbuf **out, 388 uint16_t max_nb_out) 389 { 390 struct gro_ctx *gro_ctx = ctx; 391 uint64_t flush_timestamp; 392 uint16_t num = 0; 393 uint16_t left_nb_out = max_nb_out; 394 395 gro_types = gro_types & gro_ctx->gro_types; 396 flush_timestamp = rte_rdtsc() - timeout_cycles; 397 398 if (gro_types & RTE_GRO_IPV4_VXLAN_TCP_IPV4) { 399 num = gro_vxlan_tcp4_tbl_timeout_flush(gro_ctx->tbls[ 400 RTE_GRO_IPV4_VXLAN_TCP_IPV4_INDEX], 401 flush_timestamp, out, left_nb_out); 402 left_nb_out = max_nb_out - num; 403 } 404 405 if ((gro_types & RTE_GRO_IPV4_VXLAN_UDP_IPV4) && left_nb_out > 0) { 406 num += gro_vxlan_udp4_tbl_timeout_flush(gro_ctx->tbls[ 407 RTE_GRO_IPV4_VXLAN_UDP_IPV4_INDEX], 408 flush_timestamp, &out[num], left_nb_out); 409 left_nb_out = max_nb_out - num; 410 } 411 412 /* If no available space in 'out', stop flushing. */ 413 if ((gro_types & RTE_GRO_TCP_IPV4) && left_nb_out > 0) { 414 num += gro_tcp4_tbl_timeout_flush( 415 gro_ctx->tbls[RTE_GRO_TCP_IPV4_INDEX], 416 flush_timestamp, 417 &out[num], left_nb_out); 418 left_nb_out = max_nb_out - num; 419 } 420 421 /* If no available space in 'out', stop flushing. */ 422 if ((gro_types & RTE_GRO_UDP_IPV4) && left_nb_out > 0) { 423 num += gro_udp4_tbl_timeout_flush( 424 gro_ctx->tbls[RTE_GRO_UDP_IPV4_INDEX], 425 flush_timestamp, 426 &out[num], left_nb_out); 427 } 428 429 return num; 430 } 431 432 uint64_t 433 rte_gro_get_pkt_count(void *ctx) 434 { 435 struct gro_ctx *gro_ctx = ctx; 436 gro_tbl_pkt_count_fn pkt_count_fn; 437 uint64_t gro_types = gro_ctx->gro_types, flag; 438 uint64_t item_num = 0; 439 uint8_t i; 440 441 for (i = 0; i < RTE_GRO_TYPE_MAX_NUM && gro_types; i++) { 442 flag = 1ULL << i; 443 if ((gro_types & flag) == 0) 444 continue; 445 446 gro_types ^= flag; 447 pkt_count_fn = tbl_pkt_count_fn[i]; 448 if (pkt_count_fn) 449 item_num += pkt_count_fn(gro_ctx->tbls[i]); 450 } 451 452 return item_num; 453 } 454