1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <errno.h> 7 #include <stddef.h> 8 #include <stdint.h> 9 #include <string.h> 10 11 #include <rte_common.h> 12 #include <rte_errno.h> 13 #include <rte_branch_prediction.h> 14 #include <rte_string_fns.h> 15 #include <rte_mbuf.h> 16 #include <rte_mbuf_dyn.h> 17 #include "rte_ethdev.h" 18 #include "rte_flow_driver.h" 19 #include "rte_flow.h" 20 21 /* Mbuf dynamic field name for metadata. */ 22 int32_t rte_flow_dynf_metadata_offs = -1; 23 24 /* Mbuf dynamic field flag bit number for metadata. */ 25 uint64_t rte_flow_dynf_metadata_mask; 26 27 /** 28 * Flow elements description tables. 29 */ 30 struct rte_flow_desc_data { 31 const char *name; 32 size_t size; 33 }; 34 35 /** Generate flow_item[] entry. */ 36 #define MK_FLOW_ITEM(t, s) \ 37 [RTE_FLOW_ITEM_TYPE_ ## t] = { \ 38 .name = # t, \ 39 .size = s, \ 40 } 41 42 /** Information about known flow pattern items. */ 43 static const struct rte_flow_desc_data rte_flow_desc_item[] = { 44 MK_FLOW_ITEM(END, 0), 45 MK_FLOW_ITEM(VOID, 0), 46 MK_FLOW_ITEM(INVERT, 0), 47 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), 48 MK_FLOW_ITEM(PF, 0), 49 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)), 50 MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)), 51 MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)), 52 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), 53 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), 54 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), 55 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), 56 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), 57 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), 58 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), 59 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), 60 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), 61 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), 62 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), 63 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)), 64 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), 65 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)), 66 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)), 67 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)), 68 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), 69 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), 70 MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)), 71 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), 72 MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)), 73 MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)), 74 MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)), 75 MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)), 76 MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)), 77 MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)), 78 MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)), 79 MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH, 80 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)), 81 MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH, 82 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)), 83 MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)), 84 MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)), 85 MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)), 86 MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)), 87 MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)), 88 MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)), 89 MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)), 90 MK_FLOW_ITEM(PPPOE_PROTO_ID, 91 sizeof(struct rte_flow_item_pppoe_proto_id)), 92 MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)), 93 MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)), 94 MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)), 95 MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)), 96 }; 97 98 /** Generate flow_action[] entry. */ 99 #define MK_FLOW_ACTION(t, s) \ 100 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 101 .name = # t, \ 102 .size = s, \ 103 } 104 105 /** Information about known flow actions. */ 106 static const struct rte_flow_desc_data rte_flow_desc_action[] = { 107 MK_FLOW_ACTION(END, 0), 108 MK_FLOW_ACTION(VOID, 0), 109 MK_FLOW_ACTION(PASSTHRU, 0), 110 MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)), 111 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), 112 MK_FLOW_ACTION(FLAG, 0), 113 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), 114 MK_FLOW_ACTION(DROP, 0), 115 MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)), 116 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), 117 MK_FLOW_ACTION(PF, 0), 118 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), 119 MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)), 120 MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)), 121 MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)), 122 MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)), 123 MK_FLOW_ACTION(OF_SET_MPLS_TTL, 124 sizeof(struct rte_flow_action_of_set_mpls_ttl)), 125 MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0), 126 MK_FLOW_ACTION(OF_SET_NW_TTL, 127 sizeof(struct rte_flow_action_of_set_nw_ttl)), 128 MK_FLOW_ACTION(OF_DEC_NW_TTL, 0), 129 MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0), 130 MK_FLOW_ACTION(OF_COPY_TTL_IN, 0), 131 MK_FLOW_ACTION(OF_POP_VLAN, 0), 132 MK_FLOW_ACTION(OF_PUSH_VLAN, 133 sizeof(struct rte_flow_action_of_push_vlan)), 134 MK_FLOW_ACTION(OF_SET_VLAN_VID, 135 sizeof(struct rte_flow_action_of_set_vlan_vid)), 136 MK_FLOW_ACTION(OF_SET_VLAN_PCP, 137 sizeof(struct rte_flow_action_of_set_vlan_pcp)), 138 MK_FLOW_ACTION(OF_POP_MPLS, 139 sizeof(struct rte_flow_action_of_pop_mpls)), 140 MK_FLOW_ACTION(OF_PUSH_MPLS, 141 sizeof(struct rte_flow_action_of_push_mpls)), 142 MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)), 143 MK_FLOW_ACTION(VXLAN_DECAP, 0), 144 MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)), 145 MK_FLOW_ACTION(NVGRE_DECAP, 0), 146 MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)), 147 MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)), 148 MK_FLOW_ACTION(SET_IPV4_SRC, 149 sizeof(struct rte_flow_action_set_ipv4)), 150 MK_FLOW_ACTION(SET_IPV4_DST, 151 sizeof(struct rte_flow_action_set_ipv4)), 152 MK_FLOW_ACTION(SET_IPV6_SRC, 153 sizeof(struct rte_flow_action_set_ipv6)), 154 MK_FLOW_ACTION(SET_IPV6_DST, 155 sizeof(struct rte_flow_action_set_ipv6)), 156 MK_FLOW_ACTION(SET_TP_SRC, 157 sizeof(struct rte_flow_action_set_tp)), 158 MK_FLOW_ACTION(SET_TP_DST, 159 sizeof(struct rte_flow_action_set_tp)), 160 MK_FLOW_ACTION(MAC_SWAP, 0), 161 MK_FLOW_ACTION(DEC_TTL, 0), 162 MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)), 163 MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)), 164 MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)), 165 MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)), 166 MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)), 167 MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)), 168 MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)), 169 MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)), 170 MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)), 171 }; 172 173 int 174 rte_flow_dynf_metadata_register(void) 175 { 176 int offset; 177 int flag; 178 179 static const struct rte_mbuf_dynfield desc_offs = { 180 .name = RTE_MBUF_DYNFIELD_METADATA_NAME, 181 .size = sizeof(uint32_t), 182 .align = __alignof__(uint32_t), 183 }; 184 static const struct rte_mbuf_dynflag desc_flag = { 185 .name = RTE_MBUF_DYNFLAG_METADATA_NAME, 186 }; 187 188 offset = rte_mbuf_dynfield_register(&desc_offs); 189 if (offset < 0) 190 goto error; 191 flag = rte_mbuf_dynflag_register(&desc_flag); 192 if (flag < 0) 193 goto error; 194 rte_flow_dynf_metadata_offs = offset; 195 rte_flow_dynf_metadata_mask = (1ULL << flag); 196 return 0; 197 198 error: 199 rte_flow_dynf_metadata_offs = -1; 200 rte_flow_dynf_metadata_mask = 0ULL; 201 return -rte_errno; 202 } 203 204 static int 205 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error) 206 { 207 if (ret == 0) 208 return 0; 209 if (rte_eth_dev_is_removed(port_id)) 210 return rte_flow_error_set(error, EIO, 211 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 212 NULL, rte_strerror(EIO)); 213 return ret; 214 } 215 216 static enum rte_flow_item_type 217 rte_flow_expand_rss_item_complete(const struct rte_flow_item *item) 218 { 219 enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID; 220 uint16_t ether_type = 0; 221 uint16_t ether_type_m; 222 uint8_t ip_next_proto = 0; 223 uint8_t ip_next_proto_m; 224 225 if (item == NULL || item->spec == NULL) 226 return ret; 227 switch (item->type) { 228 case RTE_FLOW_ITEM_TYPE_ETH: 229 if (item->mask) 230 ether_type_m = ((const struct rte_flow_item_eth *) 231 (item->mask))->type; 232 else 233 ether_type_m = rte_flow_item_eth_mask.type; 234 if (ether_type_m != RTE_BE16(0xFFFF)) 235 break; 236 ether_type = ((const struct rte_flow_item_eth *) 237 (item->spec))->type; 238 if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4) 239 ret = RTE_FLOW_ITEM_TYPE_IPV4; 240 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6) 241 ret = RTE_FLOW_ITEM_TYPE_IPV6; 242 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) 243 ret = RTE_FLOW_ITEM_TYPE_VLAN; 244 else 245 ret = RTE_FLOW_ITEM_TYPE_END; 246 break; 247 case RTE_FLOW_ITEM_TYPE_VLAN: 248 if (item->mask) 249 ether_type_m = ((const struct rte_flow_item_vlan *) 250 (item->mask))->inner_type; 251 else 252 ether_type_m = rte_flow_item_vlan_mask.inner_type; 253 if (ether_type_m != RTE_BE16(0xFFFF)) 254 break; 255 ether_type = ((const struct rte_flow_item_vlan *) 256 (item->spec))->inner_type; 257 if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4) 258 ret = RTE_FLOW_ITEM_TYPE_IPV4; 259 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6) 260 ret = RTE_FLOW_ITEM_TYPE_IPV6; 261 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) 262 ret = RTE_FLOW_ITEM_TYPE_VLAN; 263 else 264 ret = RTE_FLOW_ITEM_TYPE_END; 265 break; 266 case RTE_FLOW_ITEM_TYPE_IPV4: 267 if (item->mask) 268 ip_next_proto_m = ((const struct rte_flow_item_ipv4 *) 269 (item->mask))->hdr.next_proto_id; 270 else 271 ip_next_proto_m = 272 rte_flow_item_ipv4_mask.hdr.next_proto_id; 273 if (ip_next_proto_m != 0xFF) 274 break; 275 ip_next_proto = ((const struct rte_flow_item_ipv4 *) 276 (item->spec))->hdr.next_proto_id; 277 if (ip_next_proto == IPPROTO_UDP) 278 ret = RTE_FLOW_ITEM_TYPE_UDP; 279 else if (ip_next_proto == IPPROTO_TCP) 280 ret = RTE_FLOW_ITEM_TYPE_TCP; 281 else if (ip_next_proto == IPPROTO_IP) 282 ret = RTE_FLOW_ITEM_TYPE_IPV4; 283 else if (ip_next_proto == IPPROTO_IPV6) 284 ret = RTE_FLOW_ITEM_TYPE_IPV6; 285 else 286 ret = RTE_FLOW_ITEM_TYPE_END; 287 break; 288 case RTE_FLOW_ITEM_TYPE_IPV6: 289 if (item->mask) 290 ip_next_proto_m = ((const struct rte_flow_item_ipv6 *) 291 (item->mask))->hdr.proto; 292 else 293 ip_next_proto_m = 294 rte_flow_item_ipv6_mask.hdr.proto; 295 if (ip_next_proto_m != 0xFF) 296 break; 297 ip_next_proto = ((const struct rte_flow_item_ipv6 *) 298 (item->spec))->hdr.proto; 299 if (ip_next_proto == IPPROTO_UDP) 300 ret = RTE_FLOW_ITEM_TYPE_UDP; 301 else if (ip_next_proto == IPPROTO_TCP) 302 ret = RTE_FLOW_ITEM_TYPE_TCP; 303 else if (ip_next_proto == IPPROTO_IP) 304 ret = RTE_FLOW_ITEM_TYPE_IPV4; 305 else if (ip_next_proto == IPPROTO_IPV6) 306 ret = RTE_FLOW_ITEM_TYPE_IPV6; 307 else 308 ret = RTE_FLOW_ITEM_TYPE_END; 309 break; 310 default: 311 ret = RTE_FLOW_ITEM_TYPE_VOID; 312 break; 313 } 314 return ret; 315 } 316 317 /* Get generic flow operations structure from a port. */ 318 const struct rte_flow_ops * 319 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error) 320 { 321 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 322 const struct rte_flow_ops *ops; 323 int code; 324 325 if (unlikely(!rte_eth_dev_is_valid_port(port_id))) 326 code = ENODEV; 327 else if (unlikely(!dev->dev_ops->filter_ctrl || 328 dev->dev_ops->filter_ctrl(dev, 329 RTE_ETH_FILTER_GENERIC, 330 RTE_ETH_FILTER_GET, 331 &ops) || 332 !ops)) 333 code = ENOSYS; 334 else 335 return ops; 336 rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 337 NULL, rte_strerror(code)); 338 return NULL; 339 } 340 341 /* Check whether a flow rule can be created on a given port. */ 342 int 343 rte_flow_validate(uint16_t port_id, 344 const struct rte_flow_attr *attr, 345 const struct rte_flow_item pattern[], 346 const struct rte_flow_action actions[], 347 struct rte_flow_error *error) 348 { 349 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 350 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 351 352 if (unlikely(!ops)) 353 return -rte_errno; 354 if (likely(!!ops->validate)) 355 return flow_err(port_id, ops->validate(dev, attr, pattern, 356 actions, error), error); 357 return rte_flow_error_set(error, ENOSYS, 358 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 359 NULL, rte_strerror(ENOSYS)); 360 } 361 362 /* Create a flow rule on a given port. */ 363 struct rte_flow * 364 rte_flow_create(uint16_t port_id, 365 const struct rte_flow_attr *attr, 366 const struct rte_flow_item pattern[], 367 const struct rte_flow_action actions[], 368 struct rte_flow_error *error) 369 { 370 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 371 struct rte_flow *flow; 372 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 373 374 if (unlikely(!ops)) 375 return NULL; 376 if (likely(!!ops->create)) { 377 flow = ops->create(dev, attr, pattern, actions, error); 378 if (flow == NULL) 379 flow_err(port_id, -rte_errno, error); 380 return flow; 381 } 382 rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 383 NULL, rte_strerror(ENOSYS)); 384 return NULL; 385 } 386 387 /* Destroy a flow rule on a given port. */ 388 int 389 rte_flow_destroy(uint16_t port_id, 390 struct rte_flow *flow, 391 struct rte_flow_error *error) 392 { 393 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 394 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 395 396 if (unlikely(!ops)) 397 return -rte_errno; 398 if (likely(!!ops->destroy)) 399 return flow_err(port_id, ops->destroy(dev, flow, error), 400 error); 401 return rte_flow_error_set(error, ENOSYS, 402 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 403 NULL, rte_strerror(ENOSYS)); 404 } 405 406 /* Destroy all flow rules associated with a port. */ 407 int 408 rte_flow_flush(uint16_t port_id, 409 struct rte_flow_error *error) 410 { 411 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 412 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 413 414 if (unlikely(!ops)) 415 return -rte_errno; 416 if (likely(!!ops->flush)) 417 return flow_err(port_id, ops->flush(dev, error), error); 418 return rte_flow_error_set(error, ENOSYS, 419 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 420 NULL, rte_strerror(ENOSYS)); 421 } 422 423 /* Query an existing flow rule. */ 424 int 425 rte_flow_query(uint16_t port_id, 426 struct rte_flow *flow, 427 const struct rte_flow_action *action, 428 void *data, 429 struct rte_flow_error *error) 430 { 431 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 432 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 433 434 if (!ops) 435 return -rte_errno; 436 if (likely(!!ops->query)) 437 return flow_err(port_id, ops->query(dev, flow, action, data, 438 error), error); 439 return rte_flow_error_set(error, ENOSYS, 440 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 441 NULL, rte_strerror(ENOSYS)); 442 } 443 444 /* Restrict ingress traffic to the defined flow rules. */ 445 int 446 rte_flow_isolate(uint16_t port_id, 447 int set, 448 struct rte_flow_error *error) 449 { 450 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 451 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 452 453 if (!ops) 454 return -rte_errno; 455 if (likely(!!ops->isolate)) 456 return flow_err(port_id, ops->isolate(dev, set, error), error); 457 return rte_flow_error_set(error, ENOSYS, 458 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 459 NULL, rte_strerror(ENOSYS)); 460 } 461 462 /* Initialize flow error structure. */ 463 int 464 rte_flow_error_set(struct rte_flow_error *error, 465 int code, 466 enum rte_flow_error_type type, 467 const void *cause, 468 const char *message) 469 { 470 if (error) { 471 *error = (struct rte_flow_error){ 472 .type = type, 473 .cause = cause, 474 .message = message, 475 }; 476 } 477 rte_errno = code; 478 return -code; 479 } 480 481 /** Pattern item specification types. */ 482 enum rte_flow_conv_item_spec_type { 483 RTE_FLOW_CONV_ITEM_SPEC, 484 RTE_FLOW_CONV_ITEM_LAST, 485 RTE_FLOW_CONV_ITEM_MASK, 486 }; 487 488 /** 489 * Copy pattern item specification. 490 * 491 * @param[out] buf 492 * Output buffer. Can be NULL if @p size is zero. 493 * @param size 494 * Size of @p buf in bytes. 495 * @param[in] item 496 * Pattern item to copy specification from. 497 * @param type 498 * Specification selector for either @p spec, @p last or @p mask. 499 * 500 * @return 501 * Number of bytes needed to store pattern item specification regardless 502 * of @p size. @p buf contents are truncated to @p size if not large 503 * enough. 504 */ 505 static size_t 506 rte_flow_conv_item_spec(void *buf, const size_t size, 507 const struct rte_flow_item *item, 508 enum rte_flow_conv_item_spec_type type) 509 { 510 size_t off; 511 const void *data = 512 type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec : 513 type == RTE_FLOW_CONV_ITEM_LAST ? item->last : 514 type == RTE_FLOW_CONV_ITEM_MASK ? item->mask : 515 NULL; 516 517 switch (item->type) { 518 union { 519 const struct rte_flow_item_raw *raw; 520 } spec; 521 union { 522 const struct rte_flow_item_raw *raw; 523 } last; 524 union { 525 const struct rte_flow_item_raw *raw; 526 } mask; 527 union { 528 const struct rte_flow_item_raw *raw; 529 } src; 530 union { 531 struct rte_flow_item_raw *raw; 532 } dst; 533 size_t tmp; 534 535 case RTE_FLOW_ITEM_TYPE_RAW: 536 spec.raw = item->spec; 537 last.raw = item->last ? item->last : item->spec; 538 mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask; 539 src.raw = data; 540 dst.raw = buf; 541 rte_memcpy(dst.raw, 542 (&(struct rte_flow_item_raw){ 543 .relative = src.raw->relative, 544 .search = src.raw->search, 545 .reserved = src.raw->reserved, 546 .offset = src.raw->offset, 547 .limit = src.raw->limit, 548 .length = src.raw->length, 549 }), 550 size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size); 551 off = sizeof(*dst.raw); 552 if (type == RTE_FLOW_CONV_ITEM_SPEC || 553 (type == RTE_FLOW_CONV_ITEM_MASK && 554 ((spec.raw->length & mask.raw->length) >= 555 (last.raw->length & mask.raw->length)))) 556 tmp = spec.raw->length & mask.raw->length; 557 else 558 tmp = last.raw->length & mask.raw->length; 559 if (tmp) { 560 off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern)); 561 if (size >= off + tmp) 562 dst.raw->pattern = rte_memcpy 563 ((void *)((uintptr_t)dst.raw + off), 564 src.raw->pattern, tmp); 565 off += tmp; 566 } 567 break; 568 default: 569 off = rte_flow_desc_item[item->type].size; 570 rte_memcpy(buf, data, (size > off ? off : size)); 571 break; 572 } 573 return off; 574 } 575 576 /** 577 * Copy action configuration. 578 * 579 * @param[out] buf 580 * Output buffer. Can be NULL if @p size is zero. 581 * @param size 582 * Size of @p buf in bytes. 583 * @param[in] action 584 * Action to copy configuration from. 585 * 586 * @return 587 * Number of bytes needed to store pattern item specification regardless 588 * of @p size. @p buf contents are truncated to @p size if not large 589 * enough. 590 */ 591 static size_t 592 rte_flow_conv_action_conf(void *buf, const size_t size, 593 const struct rte_flow_action *action) 594 { 595 size_t off; 596 597 switch (action->type) { 598 union { 599 const struct rte_flow_action_rss *rss; 600 const struct rte_flow_action_vxlan_encap *vxlan_encap; 601 const struct rte_flow_action_nvgre_encap *nvgre_encap; 602 } src; 603 union { 604 struct rte_flow_action_rss *rss; 605 struct rte_flow_action_vxlan_encap *vxlan_encap; 606 struct rte_flow_action_nvgre_encap *nvgre_encap; 607 } dst; 608 size_t tmp; 609 int ret; 610 611 case RTE_FLOW_ACTION_TYPE_RSS: 612 src.rss = action->conf; 613 dst.rss = buf; 614 rte_memcpy(dst.rss, 615 (&(struct rte_flow_action_rss){ 616 .func = src.rss->func, 617 .level = src.rss->level, 618 .types = src.rss->types, 619 .key_len = src.rss->key_len, 620 .queue_num = src.rss->queue_num, 621 }), 622 size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size); 623 off = sizeof(*dst.rss); 624 if (src.rss->key_len && src.rss->key) { 625 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key)); 626 tmp = sizeof(*src.rss->key) * src.rss->key_len; 627 if (size >= off + tmp) 628 dst.rss->key = rte_memcpy 629 ((void *)((uintptr_t)dst.rss + off), 630 src.rss->key, tmp); 631 off += tmp; 632 } 633 if (src.rss->queue_num) { 634 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue)); 635 tmp = sizeof(*src.rss->queue) * src.rss->queue_num; 636 if (size >= off + tmp) 637 dst.rss->queue = rte_memcpy 638 ((void *)((uintptr_t)dst.rss + off), 639 src.rss->queue, tmp); 640 off += tmp; 641 } 642 break; 643 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 644 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 645 src.vxlan_encap = action->conf; 646 dst.vxlan_encap = buf; 647 RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) != 648 sizeof(*src.nvgre_encap) || 649 offsetof(struct rte_flow_action_vxlan_encap, 650 definition) != 651 offsetof(struct rte_flow_action_nvgre_encap, 652 definition)); 653 off = sizeof(*dst.vxlan_encap); 654 if (src.vxlan_encap->definition) { 655 off = RTE_ALIGN_CEIL 656 (off, sizeof(*dst.vxlan_encap->definition)); 657 ret = rte_flow_conv 658 (RTE_FLOW_CONV_OP_PATTERN, 659 (void *)((uintptr_t)dst.vxlan_encap + off), 660 size > off ? size - off : 0, 661 src.vxlan_encap->definition, NULL); 662 if (ret < 0) 663 return 0; 664 if (size >= off + ret) 665 dst.vxlan_encap->definition = 666 (void *)((uintptr_t)dst.vxlan_encap + 667 off); 668 off += ret; 669 } 670 break; 671 default: 672 off = rte_flow_desc_action[action->type].size; 673 rte_memcpy(buf, action->conf, (size > off ? off : size)); 674 break; 675 } 676 return off; 677 } 678 679 /** 680 * Copy a list of pattern items. 681 * 682 * @param[out] dst 683 * Destination buffer. Can be NULL if @p size is zero. 684 * @param size 685 * Size of @p dst in bytes. 686 * @param[in] src 687 * Source pattern items. 688 * @param num 689 * Maximum number of pattern items to process from @p src or 0 to process 690 * the entire list. In both cases, processing stops after 691 * RTE_FLOW_ITEM_TYPE_END is encountered. 692 * @param[out] error 693 * Perform verbose error reporting if not NULL. 694 * 695 * @return 696 * A positive value representing the number of bytes needed to store 697 * pattern items regardless of @p size on success (@p buf contents are 698 * truncated to @p size if not large enough), a negative errno value 699 * otherwise and rte_errno is set. 700 */ 701 static int 702 rte_flow_conv_pattern(struct rte_flow_item *dst, 703 const size_t size, 704 const struct rte_flow_item *src, 705 unsigned int num, 706 struct rte_flow_error *error) 707 { 708 uintptr_t data = (uintptr_t)dst; 709 size_t off; 710 size_t ret; 711 unsigned int i; 712 713 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) { 714 if ((size_t)src->type >= RTE_DIM(rte_flow_desc_item) || 715 !rte_flow_desc_item[src->type].name) 716 return rte_flow_error_set 717 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src, 718 "cannot convert unknown item type"); 719 if (size >= off + sizeof(*dst)) 720 *dst = (struct rte_flow_item){ 721 .type = src->type, 722 }; 723 off += sizeof(*dst); 724 if (!src->type) 725 num = i + 1; 726 } 727 num = i; 728 src -= num; 729 dst -= num; 730 do { 731 if (src->spec) { 732 off = RTE_ALIGN_CEIL(off, sizeof(double)); 733 ret = rte_flow_conv_item_spec 734 ((void *)(data + off), 735 size > off ? size - off : 0, src, 736 RTE_FLOW_CONV_ITEM_SPEC); 737 if (size && size >= off + ret) 738 dst->spec = (void *)(data + off); 739 off += ret; 740 741 } 742 if (src->last) { 743 off = RTE_ALIGN_CEIL(off, sizeof(double)); 744 ret = rte_flow_conv_item_spec 745 ((void *)(data + off), 746 size > off ? size - off : 0, src, 747 RTE_FLOW_CONV_ITEM_LAST); 748 if (size && size >= off + ret) 749 dst->last = (void *)(data + off); 750 off += ret; 751 } 752 if (src->mask) { 753 off = RTE_ALIGN_CEIL(off, sizeof(double)); 754 ret = rte_flow_conv_item_spec 755 ((void *)(data + off), 756 size > off ? size - off : 0, src, 757 RTE_FLOW_CONV_ITEM_MASK); 758 if (size && size >= off + ret) 759 dst->mask = (void *)(data + off); 760 off += ret; 761 } 762 ++src; 763 ++dst; 764 } while (--num); 765 return off; 766 } 767 768 /** 769 * Copy a list of actions. 770 * 771 * @param[out] dst 772 * Destination buffer. Can be NULL if @p size is zero. 773 * @param size 774 * Size of @p dst in bytes. 775 * @param[in] src 776 * Source actions. 777 * @param num 778 * Maximum number of actions to process from @p src or 0 to process the 779 * entire list. In both cases, processing stops after 780 * RTE_FLOW_ACTION_TYPE_END is encountered. 781 * @param[out] error 782 * Perform verbose error reporting if not NULL. 783 * 784 * @return 785 * A positive value representing the number of bytes needed to store 786 * actions regardless of @p size on success (@p buf contents are truncated 787 * to @p size if not large enough), a negative errno value otherwise and 788 * rte_errno is set. 789 */ 790 static int 791 rte_flow_conv_actions(struct rte_flow_action *dst, 792 const size_t size, 793 const struct rte_flow_action *src, 794 unsigned int num, 795 struct rte_flow_error *error) 796 { 797 uintptr_t data = (uintptr_t)dst; 798 size_t off; 799 size_t ret; 800 unsigned int i; 801 802 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) { 803 if ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) || 804 !rte_flow_desc_action[src->type].name) 805 return rte_flow_error_set 806 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 807 src, "cannot convert unknown action type"); 808 if (size >= off + sizeof(*dst)) 809 *dst = (struct rte_flow_action){ 810 .type = src->type, 811 }; 812 off += sizeof(*dst); 813 if (!src->type) 814 num = i + 1; 815 } 816 num = i; 817 src -= num; 818 dst -= num; 819 do { 820 if (src->conf) { 821 off = RTE_ALIGN_CEIL(off, sizeof(double)); 822 ret = rte_flow_conv_action_conf 823 ((void *)(data + off), 824 size > off ? size - off : 0, src); 825 if (size && size >= off + ret) 826 dst->conf = (void *)(data + off); 827 off += ret; 828 } 829 ++src; 830 ++dst; 831 } while (--num); 832 return off; 833 } 834 835 /** 836 * Copy flow rule components. 837 * 838 * This comprises the flow rule descriptor itself, attributes, pattern and 839 * actions list. NULL components in @p src are skipped. 840 * 841 * @param[out] dst 842 * Destination buffer. Can be NULL if @p size is zero. 843 * @param size 844 * Size of @p dst in bytes. 845 * @param[in] src 846 * Source flow rule descriptor. 847 * @param[out] error 848 * Perform verbose error reporting if not NULL. 849 * 850 * @return 851 * A positive value representing the number of bytes needed to store all 852 * components including the descriptor regardless of @p size on success 853 * (@p buf contents are truncated to @p size if not large enough), a 854 * negative errno value otherwise and rte_errno is set. 855 */ 856 static int 857 rte_flow_conv_rule(struct rte_flow_conv_rule *dst, 858 const size_t size, 859 const struct rte_flow_conv_rule *src, 860 struct rte_flow_error *error) 861 { 862 size_t off; 863 int ret; 864 865 rte_memcpy(dst, 866 (&(struct rte_flow_conv_rule){ 867 .attr = NULL, 868 .pattern = NULL, 869 .actions = NULL, 870 }), 871 size > sizeof(*dst) ? sizeof(*dst) : size); 872 off = sizeof(*dst); 873 if (src->attr_ro) { 874 off = RTE_ALIGN_CEIL(off, sizeof(double)); 875 if (size && size >= off + sizeof(*dst->attr)) 876 dst->attr = rte_memcpy 877 ((void *)((uintptr_t)dst + off), 878 src->attr_ro, sizeof(*dst->attr)); 879 off += sizeof(*dst->attr); 880 } 881 if (src->pattern_ro) { 882 off = RTE_ALIGN_CEIL(off, sizeof(double)); 883 ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off), 884 size > off ? size - off : 0, 885 src->pattern_ro, 0, error); 886 if (ret < 0) 887 return ret; 888 if (size && size >= off + (size_t)ret) 889 dst->pattern = (void *)((uintptr_t)dst + off); 890 off += ret; 891 } 892 if (src->actions_ro) { 893 off = RTE_ALIGN_CEIL(off, sizeof(double)); 894 ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off), 895 size > off ? size - off : 0, 896 src->actions_ro, 0, error); 897 if (ret < 0) 898 return ret; 899 if (size >= off + (size_t)ret) 900 dst->actions = (void *)((uintptr_t)dst + off); 901 off += ret; 902 } 903 return off; 904 } 905 906 /** 907 * Retrieve the name of a pattern item/action type. 908 * 909 * @param is_action 910 * Nonzero when @p src represents an action type instead of a pattern item 911 * type. 912 * @param is_ptr 913 * Nonzero to write string address instead of contents into @p dst. 914 * @param[out] dst 915 * Destination buffer. Can be NULL if @p size is zero. 916 * @param size 917 * Size of @p dst in bytes. 918 * @param[in] src 919 * Depending on @p is_action, source pattern item or action type cast as a 920 * pointer. 921 * @param[out] error 922 * Perform verbose error reporting if not NULL. 923 * 924 * @return 925 * A positive value representing the number of bytes needed to store the 926 * name or its address regardless of @p size on success (@p buf contents 927 * are truncated to @p size if not large enough), a negative errno value 928 * otherwise and rte_errno is set. 929 */ 930 static int 931 rte_flow_conv_name(int is_action, 932 int is_ptr, 933 char *dst, 934 const size_t size, 935 const void *src, 936 struct rte_flow_error *error) 937 { 938 struct desc_info { 939 const struct rte_flow_desc_data *data; 940 size_t num; 941 }; 942 static const struct desc_info info_rep[2] = { 943 { rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), }, 944 { rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), }, 945 }; 946 const struct desc_info *const info = &info_rep[!!is_action]; 947 unsigned int type = (uintptr_t)src; 948 949 if (type >= info->num) 950 return rte_flow_error_set 951 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 952 "unknown object type to retrieve the name of"); 953 if (!is_ptr) 954 return strlcpy(dst, info->data[type].name, size); 955 if (size >= sizeof(const char **)) 956 *((const char **)dst) = info->data[type].name; 957 return sizeof(const char **); 958 } 959 960 /** Helper function to convert flow API objects. */ 961 int 962 rte_flow_conv(enum rte_flow_conv_op op, 963 void *dst, 964 size_t size, 965 const void *src, 966 struct rte_flow_error *error) 967 { 968 switch (op) { 969 const struct rte_flow_attr *attr; 970 971 case RTE_FLOW_CONV_OP_NONE: 972 return 0; 973 case RTE_FLOW_CONV_OP_ATTR: 974 attr = src; 975 if (size > sizeof(*attr)) 976 size = sizeof(*attr); 977 rte_memcpy(dst, attr, size); 978 return sizeof(*attr); 979 case RTE_FLOW_CONV_OP_ITEM: 980 return rte_flow_conv_pattern(dst, size, src, 1, error); 981 case RTE_FLOW_CONV_OP_ACTION: 982 return rte_flow_conv_actions(dst, size, src, 1, error); 983 case RTE_FLOW_CONV_OP_PATTERN: 984 return rte_flow_conv_pattern(dst, size, src, 0, error); 985 case RTE_FLOW_CONV_OP_ACTIONS: 986 return rte_flow_conv_actions(dst, size, src, 0, error); 987 case RTE_FLOW_CONV_OP_RULE: 988 return rte_flow_conv_rule(dst, size, src, error); 989 case RTE_FLOW_CONV_OP_ITEM_NAME: 990 return rte_flow_conv_name(0, 0, dst, size, src, error); 991 case RTE_FLOW_CONV_OP_ACTION_NAME: 992 return rte_flow_conv_name(1, 0, dst, size, src, error); 993 case RTE_FLOW_CONV_OP_ITEM_NAME_PTR: 994 return rte_flow_conv_name(0, 1, dst, size, src, error); 995 case RTE_FLOW_CONV_OP_ACTION_NAME_PTR: 996 return rte_flow_conv_name(1, 1, dst, size, src, error); 997 } 998 return rte_flow_error_set 999 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 1000 "unknown object conversion operation"); 1001 } 1002 1003 /** Store a full rte_flow description. */ 1004 size_t 1005 rte_flow_copy(struct rte_flow_desc *desc, size_t len, 1006 const struct rte_flow_attr *attr, 1007 const struct rte_flow_item *items, 1008 const struct rte_flow_action *actions) 1009 { 1010 /* 1011 * Overlap struct rte_flow_conv with struct rte_flow_desc in order 1012 * to convert the former to the latter without wasting space. 1013 */ 1014 struct rte_flow_conv_rule *dst = 1015 len ? 1016 (void *)((uintptr_t)desc + 1017 (offsetof(struct rte_flow_desc, actions) - 1018 offsetof(struct rte_flow_conv_rule, actions))) : 1019 NULL; 1020 size_t dst_size = 1021 len > sizeof(*desc) - sizeof(*dst) ? 1022 len - (sizeof(*desc) - sizeof(*dst)) : 1023 0; 1024 struct rte_flow_conv_rule src = { 1025 .attr_ro = NULL, 1026 .pattern_ro = items, 1027 .actions_ro = actions, 1028 }; 1029 int ret; 1030 1031 RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) < 1032 sizeof(struct rte_flow_conv_rule)); 1033 if (dst_size && 1034 (&dst->pattern != &desc->items || 1035 &dst->actions != &desc->actions || 1036 (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) { 1037 rte_errno = EINVAL; 1038 return 0; 1039 } 1040 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL); 1041 if (ret < 0) 1042 return 0; 1043 ret += sizeof(*desc) - sizeof(*dst); 1044 rte_memcpy(desc, 1045 (&(struct rte_flow_desc){ 1046 .size = ret, 1047 .attr = *attr, 1048 .items = dst_size ? dst->pattern : NULL, 1049 .actions = dst_size ? dst->actions : NULL, 1050 }), 1051 len > sizeof(*desc) ? sizeof(*desc) : len); 1052 return ret; 1053 } 1054 1055 /** 1056 * Expand RSS flows into several possible flows according to the RSS hash 1057 * fields requested and the driver capabilities. 1058 */ 1059 int 1060 rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size, 1061 const struct rte_flow_item *pattern, uint64_t types, 1062 const struct rte_flow_expand_node graph[], 1063 int graph_root_index) 1064 { 1065 const int elt_n = 8; 1066 const struct rte_flow_item *item; 1067 const struct rte_flow_expand_node *node = &graph[graph_root_index]; 1068 const int *next_node; 1069 const int *stack[elt_n]; 1070 int stack_pos = 0; 1071 struct rte_flow_item flow_items[elt_n]; 1072 unsigned int i; 1073 size_t lsize; 1074 size_t user_pattern_size = 0; 1075 void *addr = NULL; 1076 const struct rte_flow_expand_node *next = NULL; 1077 struct rte_flow_item missed_item; 1078 int missed = 0; 1079 int elt = 0; 1080 const struct rte_flow_item *last_item = NULL; 1081 1082 memset(&missed_item, 0, sizeof(missed_item)); 1083 lsize = offsetof(struct rte_flow_expand_rss, entry) + 1084 elt_n * sizeof(buf->entry[0]); 1085 if (lsize <= size) { 1086 buf->entry[0].priority = 0; 1087 buf->entry[0].pattern = (void *)&buf->entry[elt_n]; 1088 buf->entries = 0; 1089 addr = buf->entry[0].pattern; 1090 } 1091 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 1092 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1093 last_item = item; 1094 for (i = 0; node->next && node->next[i]; ++i) { 1095 next = &graph[node->next[i]]; 1096 if (next->type == item->type) 1097 break; 1098 } 1099 if (next) 1100 node = next; 1101 user_pattern_size += sizeof(*item); 1102 } 1103 user_pattern_size += sizeof(*item); /* Handle END item. */ 1104 lsize += user_pattern_size; 1105 /* Copy the user pattern in the first entry of the buffer. */ 1106 if (lsize <= size) { 1107 rte_memcpy(addr, pattern, user_pattern_size); 1108 addr = (void *)(((uintptr_t)addr) + user_pattern_size); 1109 buf->entries = 1; 1110 } 1111 /* Start expanding. */ 1112 memset(flow_items, 0, sizeof(flow_items)); 1113 user_pattern_size -= sizeof(*item); 1114 /* 1115 * Check if the last valid item has spec set, need complete pattern, 1116 * and the pattern can be used for expansion. 1117 */ 1118 missed_item.type = rte_flow_expand_rss_item_complete(last_item); 1119 if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) { 1120 /* Item type END indicates expansion is not required. */ 1121 return lsize; 1122 } 1123 if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) { 1124 next = NULL; 1125 missed = 1; 1126 for (i = 0; node->next && node->next[i]; ++i) { 1127 next = &graph[node->next[i]]; 1128 if (next->type == missed_item.type) { 1129 flow_items[0].type = missed_item.type; 1130 flow_items[1].type = RTE_FLOW_ITEM_TYPE_END; 1131 break; 1132 } 1133 next = NULL; 1134 } 1135 } 1136 if (next && missed) { 1137 elt = 2; /* missed item + item end. */ 1138 node = next; 1139 lsize += elt * sizeof(*item) + user_pattern_size; 1140 if ((node->rss_types & types) && lsize <= size) { 1141 buf->entry[buf->entries].priority = 1; 1142 buf->entry[buf->entries].pattern = addr; 1143 buf->entries++; 1144 rte_memcpy(addr, buf->entry[0].pattern, 1145 user_pattern_size); 1146 addr = (void *)(((uintptr_t)addr) + user_pattern_size); 1147 rte_memcpy(addr, flow_items, elt * sizeof(*item)); 1148 addr = (void *)(((uintptr_t)addr) + 1149 elt * sizeof(*item)); 1150 } 1151 } 1152 memset(flow_items, 0, sizeof(flow_items)); 1153 next_node = node->next; 1154 stack[stack_pos] = next_node; 1155 node = next_node ? &graph[*next_node] : NULL; 1156 while (node) { 1157 flow_items[stack_pos].type = node->type; 1158 if (node->rss_types & types) { 1159 /* 1160 * compute the number of items to copy from the 1161 * expansion and copy it. 1162 * When the stack_pos is 0, there are 1 element in it, 1163 * plus the addition END item. 1164 */ 1165 elt = stack_pos + 2; 1166 flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END; 1167 lsize += elt * sizeof(*item) + user_pattern_size; 1168 if (lsize <= size) { 1169 size_t n = elt * sizeof(*item); 1170 1171 buf->entry[buf->entries].priority = 1172 stack_pos + 1 + missed; 1173 buf->entry[buf->entries].pattern = addr; 1174 buf->entries++; 1175 rte_memcpy(addr, buf->entry[0].pattern, 1176 user_pattern_size); 1177 addr = (void *)(((uintptr_t)addr) + 1178 user_pattern_size); 1179 rte_memcpy(addr, &missed_item, 1180 missed * sizeof(*item)); 1181 addr = (void *)(((uintptr_t)addr) + 1182 missed * sizeof(*item)); 1183 rte_memcpy(addr, flow_items, n); 1184 addr = (void *)(((uintptr_t)addr) + n); 1185 } 1186 } 1187 /* Go deeper. */ 1188 if (node->next) { 1189 next_node = node->next; 1190 if (stack_pos++ == elt_n) { 1191 rte_errno = E2BIG; 1192 return -rte_errno; 1193 } 1194 stack[stack_pos] = next_node; 1195 } else if (*(next_node + 1)) { 1196 /* Follow up with the next possibility. */ 1197 ++next_node; 1198 } else { 1199 /* Move to the next path. */ 1200 if (stack_pos) 1201 next_node = stack[--stack_pos]; 1202 next_node++; 1203 stack[stack_pos] = next_node; 1204 } 1205 node = *next_node ? &graph[*next_node] : NULL; 1206 }; 1207 /* no expanded flows but we have missed item, create one rule for it */ 1208 if (buf->entries == 1 && missed != 0) { 1209 elt = 2; 1210 lsize += elt * sizeof(*item) + user_pattern_size; 1211 if (lsize <= size) { 1212 buf->entry[buf->entries].priority = 1; 1213 buf->entry[buf->entries].pattern = addr; 1214 buf->entries++; 1215 flow_items[0].type = missed_item.type; 1216 flow_items[1].type = RTE_FLOW_ITEM_TYPE_END; 1217 rte_memcpy(addr, buf->entry[0].pattern, 1218 user_pattern_size); 1219 addr = (void *)(((uintptr_t)addr) + user_pattern_size); 1220 rte_memcpy(addr, flow_items, elt * sizeof(*item)); 1221 addr = (void *)(((uintptr_t)addr) + 1222 elt * sizeof(*item)); 1223 } 1224 } 1225 return lsize; 1226 } 1227