1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <errno.h> 7 #include <stddef.h> 8 #include <stdint.h> 9 #include <string.h> 10 11 #include <rte_common.h> 12 #include <rte_errno.h> 13 #include <rte_branch_prediction.h> 14 #include <rte_string_fns.h> 15 #include <rte_mbuf.h> 16 #include <rte_mbuf_dyn.h> 17 #include "rte_ethdev.h" 18 #include "rte_flow_driver.h" 19 #include "rte_flow.h" 20 21 /* Mbuf dynamic field name for metadata. */ 22 int rte_flow_dynf_metadata_offs = -1; 23 24 /* Mbuf dynamic field flag bit number for metadata. */ 25 uint64_t rte_flow_dynf_metadata_mask; 26 27 /** 28 * Flow elements description tables. 29 */ 30 struct rte_flow_desc_data { 31 const char *name; 32 size_t size; 33 }; 34 35 /** Generate flow_item[] entry. */ 36 #define MK_FLOW_ITEM(t, s) \ 37 [RTE_FLOW_ITEM_TYPE_ ## t] = { \ 38 .name = # t, \ 39 .size = s, \ 40 } 41 42 /** Information about known flow pattern items. */ 43 static const struct rte_flow_desc_data rte_flow_desc_item[] = { 44 MK_FLOW_ITEM(END, 0), 45 MK_FLOW_ITEM(VOID, 0), 46 MK_FLOW_ITEM(INVERT, 0), 47 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), 48 MK_FLOW_ITEM(PF, 0), 49 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)), 50 MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)), 51 MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)), 52 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), 53 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), 54 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), 55 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), 56 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), 57 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), 58 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), 59 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), 60 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), 61 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), 62 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), 63 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)), 64 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), 65 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)), 66 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)), 67 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)), 68 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), 69 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), 70 MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)), 71 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), 72 MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)), 73 MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)), 74 MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)), 75 MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)), 76 MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)), 77 MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)), 78 MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)), 79 MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH, 80 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)), 81 MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH, 82 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)), 83 MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)), 84 MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)), 85 MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)), 86 MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)), 87 MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)), 88 MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)), 89 MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)), 90 MK_FLOW_ITEM(PPPOE_PROTO_ID, 91 sizeof(struct rte_flow_item_pppoe_proto_id)), 92 MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)), 93 MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)), 94 MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)), 95 MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)), 96 }; 97 98 /** Generate flow_action[] entry. */ 99 #define MK_FLOW_ACTION(t, s) \ 100 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 101 .name = # t, \ 102 .size = s, \ 103 } 104 105 /** Information about known flow actions. */ 106 static const struct rte_flow_desc_data rte_flow_desc_action[] = { 107 MK_FLOW_ACTION(END, 0), 108 MK_FLOW_ACTION(VOID, 0), 109 MK_FLOW_ACTION(PASSTHRU, 0), 110 MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)), 111 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), 112 MK_FLOW_ACTION(FLAG, 0), 113 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), 114 MK_FLOW_ACTION(DROP, 0), 115 MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)), 116 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), 117 MK_FLOW_ACTION(PF, 0), 118 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), 119 MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)), 120 MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)), 121 MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)), 122 MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)), 123 MK_FLOW_ACTION(OF_SET_MPLS_TTL, 124 sizeof(struct rte_flow_action_of_set_mpls_ttl)), 125 MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0), 126 MK_FLOW_ACTION(OF_SET_NW_TTL, 127 sizeof(struct rte_flow_action_of_set_nw_ttl)), 128 MK_FLOW_ACTION(OF_DEC_NW_TTL, 0), 129 MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0), 130 MK_FLOW_ACTION(OF_COPY_TTL_IN, 0), 131 MK_FLOW_ACTION(OF_POP_VLAN, 0), 132 MK_FLOW_ACTION(OF_PUSH_VLAN, 133 sizeof(struct rte_flow_action_of_push_vlan)), 134 MK_FLOW_ACTION(OF_SET_VLAN_VID, 135 sizeof(struct rte_flow_action_of_set_vlan_vid)), 136 MK_FLOW_ACTION(OF_SET_VLAN_PCP, 137 sizeof(struct rte_flow_action_of_set_vlan_pcp)), 138 MK_FLOW_ACTION(OF_POP_MPLS, 139 sizeof(struct rte_flow_action_of_pop_mpls)), 140 MK_FLOW_ACTION(OF_PUSH_MPLS, 141 sizeof(struct rte_flow_action_of_push_mpls)), 142 MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)), 143 MK_FLOW_ACTION(VXLAN_DECAP, 0), 144 MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)), 145 MK_FLOW_ACTION(NVGRE_DECAP, 0), 146 MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)), 147 MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)), 148 MK_FLOW_ACTION(SET_IPV4_SRC, 149 sizeof(struct rte_flow_action_set_ipv4)), 150 MK_FLOW_ACTION(SET_IPV4_DST, 151 sizeof(struct rte_flow_action_set_ipv4)), 152 MK_FLOW_ACTION(SET_IPV6_SRC, 153 sizeof(struct rte_flow_action_set_ipv6)), 154 MK_FLOW_ACTION(SET_IPV6_DST, 155 sizeof(struct rte_flow_action_set_ipv6)), 156 MK_FLOW_ACTION(SET_TP_SRC, 157 sizeof(struct rte_flow_action_set_tp)), 158 MK_FLOW_ACTION(SET_TP_DST, 159 sizeof(struct rte_flow_action_set_tp)), 160 MK_FLOW_ACTION(MAC_SWAP, 0), 161 MK_FLOW_ACTION(DEC_TTL, 0), 162 MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)), 163 MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)), 164 MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)), 165 MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)), 166 MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)), 167 MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)), 168 MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)), 169 MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)), 170 MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)), 171 }; 172 173 int 174 rte_flow_dynf_metadata_register(void) 175 { 176 int offset; 177 int flag; 178 179 static const struct rte_mbuf_dynfield desc_offs = { 180 .name = RTE_MBUF_DYNFIELD_METADATA_NAME, 181 .size = sizeof(uint32_t), 182 .align = __alignof__(uint32_t), 183 }; 184 static const struct rte_mbuf_dynflag desc_flag = { 185 .name = RTE_MBUF_DYNFLAG_METADATA_NAME, 186 }; 187 188 offset = rte_mbuf_dynfield_register(&desc_offs); 189 if (offset < 0) 190 goto error; 191 flag = rte_mbuf_dynflag_register(&desc_flag); 192 if (flag < 0) 193 goto error; 194 rte_flow_dynf_metadata_offs = offset; 195 rte_flow_dynf_metadata_mask = (1ULL << flag); 196 return 0; 197 198 error: 199 rte_flow_dynf_metadata_offs = -1; 200 rte_flow_dynf_metadata_mask = 0ULL; 201 return -rte_errno; 202 } 203 204 static int 205 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error) 206 { 207 if (ret == 0) 208 return 0; 209 if (rte_eth_dev_is_removed(port_id)) 210 return rte_flow_error_set(error, EIO, 211 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 212 NULL, rte_strerror(EIO)); 213 return ret; 214 } 215 216 static enum rte_flow_item_type 217 rte_flow_expand_rss_item_complete(const struct rte_flow_item *item) 218 { 219 enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID; 220 uint16_t ether_type = 0; 221 uint16_t ether_type_m; 222 uint8_t ip_next_proto = 0; 223 uint8_t ip_next_proto_m; 224 225 if (item == NULL || item->spec == NULL) 226 return ret; 227 switch (item->type) { 228 case RTE_FLOW_ITEM_TYPE_ETH: 229 if (item->mask) 230 ether_type_m = ((const struct rte_flow_item_eth *) 231 (item->mask))->type; 232 else 233 ether_type_m = rte_flow_item_eth_mask.type; 234 if (ether_type_m != RTE_BE16(0xFFFF)) 235 break; 236 ether_type = ((const struct rte_flow_item_eth *) 237 (item->spec))->type; 238 if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4) 239 ret = RTE_FLOW_ITEM_TYPE_IPV4; 240 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6) 241 ret = RTE_FLOW_ITEM_TYPE_IPV6; 242 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) 243 ret = RTE_FLOW_ITEM_TYPE_VLAN; 244 break; 245 case RTE_FLOW_ITEM_TYPE_VLAN: 246 if (item->mask) 247 ether_type_m = ((const struct rte_flow_item_vlan *) 248 (item->mask))->inner_type; 249 else 250 ether_type_m = rte_flow_item_vlan_mask.inner_type; 251 if (ether_type_m != RTE_BE16(0xFFFF)) 252 break; 253 ether_type = ((const struct rte_flow_item_vlan *) 254 (item->spec))->inner_type; 255 if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4) 256 ret = RTE_FLOW_ITEM_TYPE_IPV4; 257 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6) 258 ret = RTE_FLOW_ITEM_TYPE_IPV6; 259 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) 260 ret = RTE_FLOW_ITEM_TYPE_VLAN; 261 break; 262 case RTE_FLOW_ITEM_TYPE_IPV4: 263 if (item->mask) 264 ip_next_proto_m = ((const struct rte_flow_item_ipv4 *) 265 (item->mask))->hdr.next_proto_id; 266 else 267 ip_next_proto_m = 268 rte_flow_item_ipv4_mask.hdr.next_proto_id; 269 if (ip_next_proto_m != 0xFF) 270 break; 271 ip_next_proto = ((const struct rte_flow_item_ipv4 *) 272 (item->spec))->hdr.next_proto_id; 273 if (ip_next_proto == IPPROTO_UDP) 274 ret = RTE_FLOW_ITEM_TYPE_UDP; 275 else if (ip_next_proto == IPPROTO_TCP) 276 ret = RTE_FLOW_ITEM_TYPE_TCP; 277 else if (ip_next_proto == IPPROTO_IP) 278 ret = RTE_FLOW_ITEM_TYPE_IPV4; 279 else if (ip_next_proto == IPPROTO_IPV6) 280 ret = RTE_FLOW_ITEM_TYPE_IPV6; 281 break; 282 case RTE_FLOW_ITEM_TYPE_IPV6: 283 if (item->mask) 284 ip_next_proto_m = ((const struct rte_flow_item_ipv6 *) 285 (item->mask))->hdr.proto; 286 else 287 ip_next_proto_m = 288 rte_flow_item_ipv6_mask.hdr.proto; 289 if (ip_next_proto_m != 0xFF) 290 break; 291 ip_next_proto = ((const struct rte_flow_item_ipv6 *) 292 (item->spec))->hdr.proto; 293 if (ip_next_proto == IPPROTO_UDP) 294 ret = RTE_FLOW_ITEM_TYPE_UDP; 295 else if (ip_next_proto == IPPROTO_TCP) 296 ret = RTE_FLOW_ITEM_TYPE_TCP; 297 else if (ip_next_proto == IPPROTO_IP) 298 ret = RTE_FLOW_ITEM_TYPE_IPV4; 299 else if (ip_next_proto == IPPROTO_IPV6) 300 ret = RTE_FLOW_ITEM_TYPE_IPV6; 301 break; 302 default: 303 ret = RTE_FLOW_ITEM_TYPE_VOID; 304 break; 305 } 306 return ret; 307 } 308 309 /* Get generic flow operations structure from a port. */ 310 const struct rte_flow_ops * 311 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error) 312 { 313 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 314 const struct rte_flow_ops *ops; 315 int code; 316 317 if (unlikely(!rte_eth_dev_is_valid_port(port_id))) 318 code = ENODEV; 319 else if (unlikely(!dev->dev_ops->filter_ctrl || 320 dev->dev_ops->filter_ctrl(dev, 321 RTE_ETH_FILTER_GENERIC, 322 RTE_ETH_FILTER_GET, 323 &ops) || 324 !ops)) 325 code = ENOSYS; 326 else 327 return ops; 328 rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 329 NULL, rte_strerror(code)); 330 return NULL; 331 } 332 333 /* Check whether a flow rule can be created on a given port. */ 334 int 335 rte_flow_validate(uint16_t port_id, 336 const struct rte_flow_attr *attr, 337 const struct rte_flow_item pattern[], 338 const struct rte_flow_action actions[], 339 struct rte_flow_error *error) 340 { 341 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 342 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 343 344 if (unlikely(!ops)) 345 return -rte_errno; 346 if (likely(!!ops->validate)) 347 return flow_err(port_id, ops->validate(dev, attr, pattern, 348 actions, error), error); 349 return rte_flow_error_set(error, ENOSYS, 350 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 351 NULL, rte_strerror(ENOSYS)); 352 } 353 354 /* Create a flow rule on a given port. */ 355 struct rte_flow * 356 rte_flow_create(uint16_t port_id, 357 const struct rte_flow_attr *attr, 358 const struct rte_flow_item pattern[], 359 const struct rte_flow_action actions[], 360 struct rte_flow_error *error) 361 { 362 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 363 struct rte_flow *flow; 364 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 365 366 if (unlikely(!ops)) 367 return NULL; 368 if (likely(!!ops->create)) { 369 flow = ops->create(dev, attr, pattern, actions, error); 370 if (flow == NULL) 371 flow_err(port_id, -rte_errno, error); 372 return flow; 373 } 374 rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 375 NULL, rte_strerror(ENOSYS)); 376 return NULL; 377 } 378 379 /* Destroy a flow rule on a given port. */ 380 int 381 rte_flow_destroy(uint16_t port_id, 382 struct rte_flow *flow, 383 struct rte_flow_error *error) 384 { 385 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 386 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 387 388 if (unlikely(!ops)) 389 return -rte_errno; 390 if (likely(!!ops->destroy)) 391 return flow_err(port_id, ops->destroy(dev, flow, error), 392 error); 393 return rte_flow_error_set(error, ENOSYS, 394 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 395 NULL, rte_strerror(ENOSYS)); 396 } 397 398 /* Destroy all flow rules associated with a port. */ 399 int 400 rte_flow_flush(uint16_t port_id, 401 struct rte_flow_error *error) 402 { 403 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 404 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 405 406 if (unlikely(!ops)) 407 return -rte_errno; 408 if (likely(!!ops->flush)) 409 return flow_err(port_id, ops->flush(dev, error), error); 410 return rte_flow_error_set(error, ENOSYS, 411 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 412 NULL, rte_strerror(ENOSYS)); 413 } 414 415 /* Query an existing flow rule. */ 416 int 417 rte_flow_query(uint16_t port_id, 418 struct rte_flow *flow, 419 const struct rte_flow_action *action, 420 void *data, 421 struct rte_flow_error *error) 422 { 423 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 424 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 425 426 if (!ops) 427 return -rte_errno; 428 if (likely(!!ops->query)) 429 return flow_err(port_id, ops->query(dev, flow, action, data, 430 error), error); 431 return rte_flow_error_set(error, ENOSYS, 432 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 433 NULL, rte_strerror(ENOSYS)); 434 } 435 436 /* Restrict ingress traffic to the defined flow rules. */ 437 int 438 rte_flow_isolate(uint16_t port_id, 439 int set, 440 struct rte_flow_error *error) 441 { 442 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 443 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 444 445 if (!ops) 446 return -rte_errno; 447 if (likely(!!ops->isolate)) 448 return flow_err(port_id, ops->isolate(dev, set, error), error); 449 return rte_flow_error_set(error, ENOSYS, 450 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 451 NULL, rte_strerror(ENOSYS)); 452 } 453 454 /* Initialize flow error structure. */ 455 int 456 rte_flow_error_set(struct rte_flow_error *error, 457 int code, 458 enum rte_flow_error_type type, 459 const void *cause, 460 const char *message) 461 { 462 if (error) { 463 *error = (struct rte_flow_error){ 464 .type = type, 465 .cause = cause, 466 .message = message, 467 }; 468 } 469 rte_errno = code; 470 return -code; 471 } 472 473 /** Pattern item specification types. */ 474 enum rte_flow_conv_item_spec_type { 475 RTE_FLOW_CONV_ITEM_SPEC, 476 RTE_FLOW_CONV_ITEM_LAST, 477 RTE_FLOW_CONV_ITEM_MASK, 478 }; 479 480 /** 481 * Copy pattern item specification. 482 * 483 * @param[out] buf 484 * Output buffer. Can be NULL if @p size is zero. 485 * @param size 486 * Size of @p buf in bytes. 487 * @param[in] item 488 * Pattern item to copy specification from. 489 * @param type 490 * Specification selector for either @p spec, @p last or @p mask. 491 * 492 * @return 493 * Number of bytes needed to store pattern item specification regardless 494 * of @p size. @p buf contents are truncated to @p size if not large 495 * enough. 496 */ 497 static size_t 498 rte_flow_conv_item_spec(void *buf, const size_t size, 499 const struct rte_flow_item *item, 500 enum rte_flow_conv_item_spec_type type) 501 { 502 size_t off; 503 const void *data = 504 type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec : 505 type == RTE_FLOW_CONV_ITEM_LAST ? item->last : 506 type == RTE_FLOW_CONV_ITEM_MASK ? item->mask : 507 NULL; 508 509 switch (item->type) { 510 union { 511 const struct rte_flow_item_raw *raw; 512 } spec; 513 union { 514 const struct rte_flow_item_raw *raw; 515 } last; 516 union { 517 const struct rte_flow_item_raw *raw; 518 } mask; 519 union { 520 const struct rte_flow_item_raw *raw; 521 } src; 522 union { 523 struct rte_flow_item_raw *raw; 524 } dst; 525 size_t tmp; 526 527 case RTE_FLOW_ITEM_TYPE_RAW: 528 spec.raw = item->spec; 529 last.raw = item->last ? item->last : item->spec; 530 mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask; 531 src.raw = data; 532 dst.raw = buf; 533 rte_memcpy(dst.raw, 534 (&(struct rte_flow_item_raw){ 535 .relative = src.raw->relative, 536 .search = src.raw->search, 537 .reserved = src.raw->reserved, 538 .offset = src.raw->offset, 539 .limit = src.raw->limit, 540 .length = src.raw->length, 541 }), 542 size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size); 543 off = sizeof(*dst.raw); 544 if (type == RTE_FLOW_CONV_ITEM_SPEC || 545 (type == RTE_FLOW_CONV_ITEM_MASK && 546 ((spec.raw->length & mask.raw->length) >= 547 (last.raw->length & mask.raw->length)))) 548 tmp = spec.raw->length & mask.raw->length; 549 else 550 tmp = last.raw->length & mask.raw->length; 551 if (tmp) { 552 off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern)); 553 if (size >= off + tmp) 554 dst.raw->pattern = rte_memcpy 555 ((void *)((uintptr_t)dst.raw + off), 556 src.raw->pattern, tmp); 557 off += tmp; 558 } 559 break; 560 default: 561 off = rte_flow_desc_item[item->type].size; 562 rte_memcpy(buf, data, (size > off ? off : size)); 563 break; 564 } 565 return off; 566 } 567 568 /** 569 * Copy action configuration. 570 * 571 * @param[out] buf 572 * Output buffer. Can be NULL if @p size is zero. 573 * @param size 574 * Size of @p buf in bytes. 575 * @param[in] action 576 * Action to copy configuration from. 577 * 578 * @return 579 * Number of bytes needed to store pattern item specification regardless 580 * of @p size. @p buf contents are truncated to @p size if not large 581 * enough. 582 */ 583 static size_t 584 rte_flow_conv_action_conf(void *buf, const size_t size, 585 const struct rte_flow_action *action) 586 { 587 size_t off; 588 589 switch (action->type) { 590 union { 591 const struct rte_flow_action_rss *rss; 592 const struct rte_flow_action_vxlan_encap *vxlan_encap; 593 const struct rte_flow_action_nvgre_encap *nvgre_encap; 594 } src; 595 union { 596 struct rte_flow_action_rss *rss; 597 struct rte_flow_action_vxlan_encap *vxlan_encap; 598 struct rte_flow_action_nvgre_encap *nvgre_encap; 599 } dst; 600 size_t tmp; 601 int ret; 602 603 case RTE_FLOW_ACTION_TYPE_RSS: 604 src.rss = action->conf; 605 dst.rss = buf; 606 rte_memcpy(dst.rss, 607 (&(struct rte_flow_action_rss){ 608 .func = src.rss->func, 609 .level = src.rss->level, 610 .types = src.rss->types, 611 .key_len = src.rss->key_len, 612 .queue_num = src.rss->queue_num, 613 }), 614 size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size); 615 off = sizeof(*dst.rss); 616 if (src.rss->key_len) { 617 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key)); 618 tmp = sizeof(*src.rss->key) * src.rss->key_len; 619 if (size >= off + tmp) 620 dst.rss->key = rte_memcpy 621 ((void *)((uintptr_t)dst.rss + off), 622 src.rss->key, tmp); 623 off += tmp; 624 } 625 if (src.rss->queue_num) { 626 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue)); 627 tmp = sizeof(*src.rss->queue) * src.rss->queue_num; 628 if (size >= off + tmp) 629 dst.rss->queue = rte_memcpy 630 ((void *)((uintptr_t)dst.rss + off), 631 src.rss->queue, tmp); 632 off += tmp; 633 } 634 break; 635 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 636 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 637 src.vxlan_encap = action->conf; 638 dst.vxlan_encap = buf; 639 RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) != 640 sizeof(*src.nvgre_encap) || 641 offsetof(struct rte_flow_action_vxlan_encap, 642 definition) != 643 offsetof(struct rte_flow_action_nvgre_encap, 644 definition)); 645 off = sizeof(*dst.vxlan_encap); 646 if (src.vxlan_encap->definition) { 647 off = RTE_ALIGN_CEIL 648 (off, sizeof(*dst.vxlan_encap->definition)); 649 ret = rte_flow_conv 650 (RTE_FLOW_CONV_OP_PATTERN, 651 (void *)((uintptr_t)dst.vxlan_encap + off), 652 size > off ? size - off : 0, 653 src.vxlan_encap->definition, NULL); 654 if (ret < 0) 655 return 0; 656 if (size >= off + ret) 657 dst.vxlan_encap->definition = 658 (void *)((uintptr_t)dst.vxlan_encap + 659 off); 660 off += ret; 661 } 662 break; 663 default: 664 off = rte_flow_desc_action[action->type].size; 665 rte_memcpy(buf, action->conf, (size > off ? off : size)); 666 break; 667 } 668 return off; 669 } 670 671 /** 672 * Copy a list of pattern items. 673 * 674 * @param[out] dst 675 * Destination buffer. Can be NULL if @p size is zero. 676 * @param size 677 * Size of @p dst in bytes. 678 * @param[in] src 679 * Source pattern items. 680 * @param num 681 * Maximum number of pattern items to process from @p src or 0 to process 682 * the entire list. In both cases, processing stops after 683 * RTE_FLOW_ITEM_TYPE_END is encountered. 684 * @param[out] error 685 * Perform verbose error reporting if not NULL. 686 * 687 * @return 688 * A positive value representing the number of bytes needed to store 689 * pattern items regardless of @p size on success (@p buf contents are 690 * truncated to @p size if not large enough), a negative errno value 691 * otherwise and rte_errno is set. 692 */ 693 static int 694 rte_flow_conv_pattern(struct rte_flow_item *dst, 695 const size_t size, 696 const struct rte_flow_item *src, 697 unsigned int num, 698 struct rte_flow_error *error) 699 { 700 uintptr_t data = (uintptr_t)dst; 701 size_t off; 702 size_t ret; 703 unsigned int i; 704 705 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) { 706 if ((size_t)src->type >= RTE_DIM(rte_flow_desc_item) || 707 !rte_flow_desc_item[src->type].name) 708 return rte_flow_error_set 709 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src, 710 "cannot convert unknown item type"); 711 if (size >= off + sizeof(*dst)) 712 *dst = (struct rte_flow_item){ 713 .type = src->type, 714 }; 715 off += sizeof(*dst); 716 if (!src->type) 717 num = i + 1; 718 } 719 num = i; 720 src -= num; 721 dst -= num; 722 do { 723 if (src->spec) { 724 off = RTE_ALIGN_CEIL(off, sizeof(double)); 725 ret = rte_flow_conv_item_spec 726 ((void *)(data + off), 727 size > off ? size - off : 0, src, 728 RTE_FLOW_CONV_ITEM_SPEC); 729 if (size && size >= off + ret) 730 dst->spec = (void *)(data + off); 731 off += ret; 732 733 } 734 if (src->last) { 735 off = RTE_ALIGN_CEIL(off, sizeof(double)); 736 ret = rte_flow_conv_item_spec 737 ((void *)(data + off), 738 size > off ? size - off : 0, src, 739 RTE_FLOW_CONV_ITEM_LAST); 740 if (size && size >= off + ret) 741 dst->last = (void *)(data + off); 742 off += ret; 743 } 744 if (src->mask) { 745 off = RTE_ALIGN_CEIL(off, sizeof(double)); 746 ret = rte_flow_conv_item_spec 747 ((void *)(data + off), 748 size > off ? size - off : 0, src, 749 RTE_FLOW_CONV_ITEM_MASK); 750 if (size && size >= off + ret) 751 dst->mask = (void *)(data + off); 752 off += ret; 753 } 754 ++src; 755 ++dst; 756 } while (--num); 757 return off; 758 } 759 760 /** 761 * Copy a list of actions. 762 * 763 * @param[out] dst 764 * Destination buffer. Can be NULL if @p size is zero. 765 * @param size 766 * Size of @p dst in bytes. 767 * @param[in] src 768 * Source actions. 769 * @param num 770 * Maximum number of actions to process from @p src or 0 to process the 771 * entire list. In both cases, processing stops after 772 * RTE_FLOW_ACTION_TYPE_END is encountered. 773 * @param[out] error 774 * Perform verbose error reporting if not NULL. 775 * 776 * @return 777 * A positive value representing the number of bytes needed to store 778 * actions regardless of @p size on success (@p buf contents are truncated 779 * to @p size if not large enough), a negative errno value otherwise and 780 * rte_errno is set. 781 */ 782 static int 783 rte_flow_conv_actions(struct rte_flow_action *dst, 784 const size_t size, 785 const struct rte_flow_action *src, 786 unsigned int num, 787 struct rte_flow_error *error) 788 { 789 uintptr_t data = (uintptr_t)dst; 790 size_t off; 791 size_t ret; 792 unsigned int i; 793 794 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) { 795 if ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) || 796 !rte_flow_desc_action[src->type].name) 797 return rte_flow_error_set 798 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 799 src, "cannot convert unknown action type"); 800 if (size >= off + sizeof(*dst)) 801 *dst = (struct rte_flow_action){ 802 .type = src->type, 803 }; 804 off += sizeof(*dst); 805 if (!src->type) 806 num = i + 1; 807 } 808 num = i; 809 src -= num; 810 dst -= num; 811 do { 812 if (src->conf) { 813 off = RTE_ALIGN_CEIL(off, sizeof(double)); 814 ret = rte_flow_conv_action_conf 815 ((void *)(data + off), 816 size > off ? size - off : 0, src); 817 if (size && size >= off + ret) 818 dst->conf = (void *)(data + off); 819 off += ret; 820 } 821 ++src; 822 ++dst; 823 } while (--num); 824 return off; 825 } 826 827 /** 828 * Copy flow rule components. 829 * 830 * This comprises the flow rule descriptor itself, attributes, pattern and 831 * actions list. NULL components in @p src are skipped. 832 * 833 * @param[out] dst 834 * Destination buffer. Can be NULL if @p size is zero. 835 * @param size 836 * Size of @p dst in bytes. 837 * @param[in] src 838 * Source flow rule descriptor. 839 * @param[out] error 840 * Perform verbose error reporting if not NULL. 841 * 842 * @return 843 * A positive value representing the number of bytes needed to store all 844 * components including the descriptor regardless of @p size on success 845 * (@p buf contents are truncated to @p size if not large enough), a 846 * negative errno value otherwise and rte_errno is set. 847 */ 848 static int 849 rte_flow_conv_rule(struct rte_flow_conv_rule *dst, 850 const size_t size, 851 const struct rte_flow_conv_rule *src, 852 struct rte_flow_error *error) 853 { 854 size_t off; 855 int ret; 856 857 rte_memcpy(dst, 858 (&(struct rte_flow_conv_rule){ 859 .attr = NULL, 860 .pattern = NULL, 861 .actions = NULL, 862 }), 863 size > sizeof(*dst) ? sizeof(*dst) : size); 864 off = sizeof(*dst); 865 if (src->attr_ro) { 866 off = RTE_ALIGN_CEIL(off, sizeof(double)); 867 if (size && size >= off + sizeof(*dst->attr)) 868 dst->attr = rte_memcpy 869 ((void *)((uintptr_t)dst + off), 870 src->attr_ro, sizeof(*dst->attr)); 871 off += sizeof(*dst->attr); 872 } 873 if (src->pattern_ro) { 874 off = RTE_ALIGN_CEIL(off, sizeof(double)); 875 ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off), 876 size > off ? size - off : 0, 877 src->pattern_ro, 0, error); 878 if (ret < 0) 879 return ret; 880 if (size && size >= off + (size_t)ret) 881 dst->pattern = (void *)((uintptr_t)dst + off); 882 off += ret; 883 } 884 if (src->actions_ro) { 885 off = RTE_ALIGN_CEIL(off, sizeof(double)); 886 ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off), 887 size > off ? size - off : 0, 888 src->actions_ro, 0, error); 889 if (ret < 0) 890 return ret; 891 if (size >= off + (size_t)ret) 892 dst->actions = (void *)((uintptr_t)dst + off); 893 off += ret; 894 } 895 return off; 896 } 897 898 /** 899 * Retrieve the name of a pattern item/action type. 900 * 901 * @param is_action 902 * Nonzero when @p src represents an action type instead of a pattern item 903 * type. 904 * @param is_ptr 905 * Nonzero to write string address instead of contents into @p dst. 906 * @param[out] dst 907 * Destination buffer. Can be NULL if @p size is zero. 908 * @param size 909 * Size of @p dst in bytes. 910 * @param[in] src 911 * Depending on @p is_action, source pattern item or action type cast as a 912 * pointer. 913 * @param[out] error 914 * Perform verbose error reporting if not NULL. 915 * 916 * @return 917 * A positive value representing the number of bytes needed to store the 918 * name or its address regardless of @p size on success (@p buf contents 919 * are truncated to @p size if not large enough), a negative errno value 920 * otherwise and rte_errno is set. 921 */ 922 static int 923 rte_flow_conv_name(int is_action, 924 int is_ptr, 925 char *dst, 926 const size_t size, 927 const void *src, 928 struct rte_flow_error *error) 929 { 930 struct desc_info { 931 const struct rte_flow_desc_data *data; 932 size_t num; 933 }; 934 static const struct desc_info info_rep[2] = { 935 { rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), }, 936 { rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), }, 937 }; 938 const struct desc_info *const info = &info_rep[!!is_action]; 939 unsigned int type = (uintptr_t)src; 940 941 if (type >= info->num) 942 return rte_flow_error_set 943 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 944 "unknown object type to retrieve the name of"); 945 if (!is_ptr) 946 return strlcpy(dst, info->data[type].name, size); 947 if (size >= sizeof(const char **)) 948 *((const char **)dst) = info->data[type].name; 949 return sizeof(const char **); 950 } 951 952 /** Helper function to convert flow API objects. */ 953 int 954 rte_flow_conv(enum rte_flow_conv_op op, 955 void *dst, 956 size_t size, 957 const void *src, 958 struct rte_flow_error *error) 959 { 960 switch (op) { 961 const struct rte_flow_attr *attr; 962 963 case RTE_FLOW_CONV_OP_NONE: 964 return 0; 965 case RTE_FLOW_CONV_OP_ATTR: 966 attr = src; 967 if (size > sizeof(*attr)) 968 size = sizeof(*attr); 969 rte_memcpy(dst, attr, size); 970 return sizeof(*attr); 971 case RTE_FLOW_CONV_OP_ITEM: 972 return rte_flow_conv_pattern(dst, size, src, 1, error); 973 case RTE_FLOW_CONV_OP_ACTION: 974 return rte_flow_conv_actions(dst, size, src, 1, error); 975 case RTE_FLOW_CONV_OP_PATTERN: 976 return rte_flow_conv_pattern(dst, size, src, 0, error); 977 case RTE_FLOW_CONV_OP_ACTIONS: 978 return rte_flow_conv_actions(dst, size, src, 0, error); 979 case RTE_FLOW_CONV_OP_RULE: 980 return rte_flow_conv_rule(dst, size, src, error); 981 case RTE_FLOW_CONV_OP_ITEM_NAME: 982 return rte_flow_conv_name(0, 0, dst, size, src, error); 983 case RTE_FLOW_CONV_OP_ACTION_NAME: 984 return rte_flow_conv_name(1, 0, dst, size, src, error); 985 case RTE_FLOW_CONV_OP_ITEM_NAME_PTR: 986 return rte_flow_conv_name(0, 1, dst, size, src, error); 987 case RTE_FLOW_CONV_OP_ACTION_NAME_PTR: 988 return rte_flow_conv_name(1, 1, dst, size, src, error); 989 } 990 return rte_flow_error_set 991 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 992 "unknown object conversion operation"); 993 } 994 995 /** Store a full rte_flow description. */ 996 size_t 997 rte_flow_copy(struct rte_flow_desc *desc, size_t len, 998 const struct rte_flow_attr *attr, 999 const struct rte_flow_item *items, 1000 const struct rte_flow_action *actions) 1001 { 1002 /* 1003 * Overlap struct rte_flow_conv with struct rte_flow_desc in order 1004 * to convert the former to the latter without wasting space. 1005 */ 1006 struct rte_flow_conv_rule *dst = 1007 len ? 1008 (void *)((uintptr_t)desc + 1009 (offsetof(struct rte_flow_desc, actions) - 1010 offsetof(struct rte_flow_conv_rule, actions))) : 1011 NULL; 1012 size_t dst_size = 1013 len > sizeof(*desc) - sizeof(*dst) ? 1014 len - (sizeof(*desc) - sizeof(*dst)) : 1015 0; 1016 struct rte_flow_conv_rule src = { 1017 .attr_ro = NULL, 1018 .pattern_ro = items, 1019 .actions_ro = actions, 1020 }; 1021 int ret; 1022 1023 RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) < 1024 sizeof(struct rte_flow_conv_rule)); 1025 if (dst_size && 1026 (&dst->pattern != &desc->items || 1027 &dst->actions != &desc->actions || 1028 (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) { 1029 rte_errno = EINVAL; 1030 return 0; 1031 } 1032 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL); 1033 if (ret < 0) 1034 return 0; 1035 ret += sizeof(*desc) - sizeof(*dst); 1036 rte_memcpy(desc, 1037 (&(struct rte_flow_desc){ 1038 .size = ret, 1039 .attr = *attr, 1040 .items = dst_size ? dst->pattern : NULL, 1041 .actions = dst_size ? dst->actions : NULL, 1042 }), 1043 len > sizeof(*desc) ? sizeof(*desc) : len); 1044 return ret; 1045 } 1046 1047 /** 1048 * Expand RSS flows into several possible flows according to the RSS hash 1049 * fields requested and the driver capabilities. 1050 */ 1051 int 1052 rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size, 1053 const struct rte_flow_item *pattern, uint64_t types, 1054 const struct rte_flow_expand_node graph[], 1055 int graph_root_index) 1056 { 1057 const int elt_n = 8; 1058 const struct rte_flow_item *item; 1059 const struct rte_flow_expand_node *node = &graph[graph_root_index]; 1060 const int *next_node; 1061 const int *stack[elt_n]; 1062 int stack_pos = 0; 1063 struct rte_flow_item flow_items[elt_n]; 1064 unsigned int i; 1065 size_t lsize; 1066 size_t user_pattern_size = 0; 1067 void *addr = NULL; 1068 const struct rte_flow_expand_node *next = NULL; 1069 struct rte_flow_item missed_item; 1070 int missed = 0; 1071 int elt = 0; 1072 const struct rte_flow_item *last_item = NULL; 1073 1074 memset(&missed_item, 0, sizeof(missed_item)); 1075 lsize = offsetof(struct rte_flow_expand_rss, entry) + 1076 elt_n * sizeof(buf->entry[0]); 1077 if (lsize <= size) { 1078 buf->entry[0].priority = 0; 1079 buf->entry[0].pattern = (void *)&buf->entry[elt_n]; 1080 buf->entries = 0; 1081 addr = buf->entry[0].pattern; 1082 } 1083 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 1084 if (item->type != RTE_FLOW_ITEM_TYPE_VOID) 1085 last_item = item; 1086 for (i = 0; node->next && node->next[i]; ++i) { 1087 next = &graph[node->next[i]]; 1088 if (next->type == item->type) 1089 break; 1090 } 1091 if (next) 1092 node = next; 1093 user_pattern_size += sizeof(*item); 1094 } 1095 user_pattern_size += sizeof(*item); /* Handle END item. */ 1096 lsize += user_pattern_size; 1097 /* Copy the user pattern in the first entry of the buffer. */ 1098 if (lsize <= size) { 1099 rte_memcpy(addr, pattern, user_pattern_size); 1100 addr = (void *)(((uintptr_t)addr) + user_pattern_size); 1101 buf->entries = 1; 1102 } 1103 /* Start expanding. */ 1104 memset(flow_items, 0, sizeof(flow_items)); 1105 user_pattern_size -= sizeof(*item); 1106 /* 1107 * Check if the last valid item has spec set 1108 * and need complete pattern. 1109 */ 1110 missed_item.type = rte_flow_expand_rss_item_complete(last_item); 1111 if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) { 1112 next = NULL; 1113 missed = 1; 1114 for (i = 0; node->next && node->next[i]; ++i) { 1115 next = &graph[node->next[i]]; 1116 if (next->type == missed_item.type) { 1117 flow_items[0].type = missed_item.type; 1118 flow_items[1].type = RTE_FLOW_ITEM_TYPE_END; 1119 break; 1120 } 1121 next = NULL; 1122 } 1123 } 1124 if (next && missed) { 1125 elt = 2; /* missed item + item end. */ 1126 node = next; 1127 lsize += elt * sizeof(*item) + user_pattern_size; 1128 if ((node->rss_types & types) && lsize <= size) { 1129 buf->entry[buf->entries].priority = 1; 1130 buf->entry[buf->entries].pattern = addr; 1131 buf->entries++; 1132 rte_memcpy(addr, buf->entry[0].pattern, 1133 user_pattern_size); 1134 addr = (void *)(((uintptr_t)addr) + user_pattern_size); 1135 rte_memcpy(addr, flow_items, elt * sizeof(*item)); 1136 addr = (void *)(((uintptr_t)addr) + 1137 elt * sizeof(*item)); 1138 } 1139 } 1140 memset(flow_items, 0, sizeof(flow_items)); 1141 next_node = node->next; 1142 stack[stack_pos] = next_node; 1143 node = next_node ? &graph[*next_node] : NULL; 1144 while (node) { 1145 flow_items[stack_pos].type = node->type; 1146 if (node->rss_types & types) { 1147 /* 1148 * compute the number of items to copy from the 1149 * expansion and copy it. 1150 * When the stack_pos is 0, there are 1 element in it, 1151 * plus the addition END item. 1152 */ 1153 elt = stack_pos + 2; 1154 flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END; 1155 lsize += elt * sizeof(*item) + user_pattern_size; 1156 if (lsize <= size) { 1157 size_t n = elt * sizeof(*item); 1158 1159 buf->entry[buf->entries].priority = 1160 stack_pos + 1 + missed; 1161 buf->entry[buf->entries].pattern = addr; 1162 buf->entries++; 1163 rte_memcpy(addr, buf->entry[0].pattern, 1164 user_pattern_size); 1165 addr = (void *)(((uintptr_t)addr) + 1166 user_pattern_size); 1167 rte_memcpy(addr, &missed_item, 1168 missed * sizeof(*item)); 1169 addr = (void *)(((uintptr_t)addr) + 1170 missed * sizeof(*item)); 1171 rte_memcpy(addr, flow_items, n); 1172 addr = (void *)(((uintptr_t)addr) + n); 1173 } 1174 } 1175 /* Go deeper. */ 1176 if (node->next) { 1177 next_node = node->next; 1178 if (stack_pos++ == elt_n) { 1179 rte_errno = E2BIG; 1180 return -rte_errno; 1181 } 1182 stack[stack_pos] = next_node; 1183 } else if (*(next_node + 1)) { 1184 /* Follow up with the next possibility. */ 1185 ++next_node; 1186 } else { 1187 /* Move to the next path. */ 1188 if (stack_pos) 1189 next_node = stack[--stack_pos]; 1190 next_node++; 1191 stack[stack_pos] = next_node; 1192 } 1193 node = *next_node ? &graph[*next_node] : NULL; 1194 }; 1195 /* no expanded flows but we have missed item, create one rule for it */ 1196 if (buf->entries == 1 && missed != 0) { 1197 elt = 2; 1198 lsize += elt * sizeof(*item) + user_pattern_size; 1199 if (lsize <= size) { 1200 buf->entry[buf->entries].priority = 1; 1201 buf->entry[buf->entries].pattern = addr; 1202 buf->entries++; 1203 flow_items[0].type = missed_item.type; 1204 flow_items[1].type = RTE_FLOW_ITEM_TYPE_END; 1205 rte_memcpy(addr, buf->entry[0].pattern, 1206 user_pattern_size); 1207 addr = (void *)(((uintptr_t)addr) + user_pattern_size); 1208 rte_memcpy(addr, flow_items, elt * sizeof(*item)); 1209 addr = (void *)(((uintptr_t)addr) + 1210 elt * sizeof(*item)); 1211 } 1212 } 1213 return lsize; 1214 } 1215