1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2016 6WIND S.A. 3 * Copyright 2016 Mellanox Technologies, Ltd 4 */ 5 6 #include <errno.h> 7 #include <stddef.h> 8 #include <stdint.h> 9 #include <string.h> 10 11 #include <rte_common.h> 12 #include <rte_errno.h> 13 #include <rte_branch_prediction.h> 14 #include <rte_string_fns.h> 15 #include "rte_ethdev.h" 16 #include "rte_flow_driver.h" 17 #include "rte_flow.h" 18 19 /** 20 * Flow elements description tables. 21 */ 22 struct rte_flow_desc_data { 23 const char *name; 24 size_t size; 25 }; 26 27 /** Generate flow_item[] entry. */ 28 #define MK_FLOW_ITEM(t, s) \ 29 [RTE_FLOW_ITEM_TYPE_ ## t] = { \ 30 .name = # t, \ 31 .size = s, \ 32 } 33 34 /** Information about known flow pattern items. */ 35 static const struct rte_flow_desc_data rte_flow_desc_item[] = { 36 MK_FLOW_ITEM(END, 0), 37 MK_FLOW_ITEM(VOID, 0), 38 MK_FLOW_ITEM(INVERT, 0), 39 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)), 40 MK_FLOW_ITEM(PF, 0), 41 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)), 42 MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)), 43 MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)), 44 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), 45 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)), 46 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), 47 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), 48 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), 49 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), 50 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)), 51 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), 52 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), 53 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), 54 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), 55 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)), 56 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), 57 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)), 58 MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)), 59 MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)), 60 MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), 61 MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), 62 MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)), 63 MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), 64 MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)), 65 MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)), 66 MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)), 67 MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)), 68 MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)), 69 MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)), 70 MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)), 71 MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH, 72 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)), 73 MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH, 74 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)), 75 MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)), 76 MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)), 77 }; 78 79 /** Generate flow_action[] entry. */ 80 #define MK_FLOW_ACTION(t, s) \ 81 [RTE_FLOW_ACTION_TYPE_ ## t] = { \ 82 .name = # t, \ 83 .size = s, \ 84 } 85 86 /** Information about known flow actions. */ 87 static const struct rte_flow_desc_data rte_flow_desc_action[] = { 88 MK_FLOW_ACTION(END, 0), 89 MK_FLOW_ACTION(VOID, 0), 90 MK_FLOW_ACTION(PASSTHRU, 0), 91 MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)), 92 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)), 93 MK_FLOW_ACTION(FLAG, 0), 94 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)), 95 MK_FLOW_ACTION(DROP, 0), 96 MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)), 97 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), 98 MK_FLOW_ACTION(PF, 0), 99 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)), 100 MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)), 101 MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)), 102 MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)), 103 MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)), 104 MK_FLOW_ACTION(OF_SET_MPLS_TTL, 105 sizeof(struct rte_flow_action_of_set_mpls_ttl)), 106 MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0), 107 MK_FLOW_ACTION(OF_SET_NW_TTL, 108 sizeof(struct rte_flow_action_of_set_nw_ttl)), 109 MK_FLOW_ACTION(OF_DEC_NW_TTL, 0), 110 MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0), 111 MK_FLOW_ACTION(OF_COPY_TTL_IN, 0), 112 MK_FLOW_ACTION(OF_POP_VLAN, 0), 113 MK_FLOW_ACTION(OF_PUSH_VLAN, 114 sizeof(struct rte_flow_action_of_push_vlan)), 115 MK_FLOW_ACTION(OF_SET_VLAN_VID, 116 sizeof(struct rte_flow_action_of_set_vlan_vid)), 117 MK_FLOW_ACTION(OF_SET_VLAN_PCP, 118 sizeof(struct rte_flow_action_of_set_vlan_pcp)), 119 MK_FLOW_ACTION(OF_POP_MPLS, 120 sizeof(struct rte_flow_action_of_pop_mpls)), 121 MK_FLOW_ACTION(OF_PUSH_MPLS, 122 sizeof(struct rte_flow_action_of_push_mpls)), 123 MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)), 124 MK_FLOW_ACTION(VXLAN_DECAP, 0), 125 MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)), 126 MK_FLOW_ACTION(NVGRE_DECAP, 0), 127 MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)), 128 MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)), 129 MK_FLOW_ACTION(SET_IPV4_SRC, 130 sizeof(struct rte_flow_action_set_ipv4)), 131 MK_FLOW_ACTION(SET_IPV4_DST, 132 sizeof(struct rte_flow_action_set_ipv4)), 133 MK_FLOW_ACTION(SET_IPV6_SRC, 134 sizeof(struct rte_flow_action_set_ipv6)), 135 MK_FLOW_ACTION(SET_IPV6_DST, 136 sizeof(struct rte_flow_action_set_ipv6)), 137 MK_FLOW_ACTION(SET_TP_SRC, 138 sizeof(struct rte_flow_action_set_tp)), 139 MK_FLOW_ACTION(SET_TP_DST, 140 sizeof(struct rte_flow_action_set_tp)), 141 MK_FLOW_ACTION(MAC_SWAP, 0), 142 MK_FLOW_ACTION(DEC_TTL, 0), 143 MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)), 144 MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)), 145 MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)), 146 }; 147 148 static int 149 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error) 150 { 151 if (ret == 0) 152 return 0; 153 if (rte_eth_dev_is_removed(port_id)) 154 return rte_flow_error_set(error, EIO, 155 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 156 NULL, rte_strerror(EIO)); 157 return ret; 158 } 159 160 /* Get generic flow operations structure from a port. */ 161 const struct rte_flow_ops * 162 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error) 163 { 164 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 165 const struct rte_flow_ops *ops; 166 int code; 167 168 if (unlikely(!rte_eth_dev_is_valid_port(port_id))) 169 code = ENODEV; 170 else if (unlikely(!dev->dev_ops->filter_ctrl || 171 dev->dev_ops->filter_ctrl(dev, 172 RTE_ETH_FILTER_GENERIC, 173 RTE_ETH_FILTER_GET, 174 &ops) || 175 !ops)) 176 code = ENOSYS; 177 else 178 return ops; 179 rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 180 NULL, rte_strerror(code)); 181 return NULL; 182 } 183 184 /* Check whether a flow rule can be created on a given port. */ 185 int 186 rte_flow_validate(uint16_t port_id, 187 const struct rte_flow_attr *attr, 188 const struct rte_flow_item pattern[], 189 const struct rte_flow_action actions[], 190 struct rte_flow_error *error) 191 { 192 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 193 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 194 195 if (unlikely(!ops)) 196 return -rte_errno; 197 if (likely(!!ops->validate)) 198 return flow_err(port_id, ops->validate(dev, attr, pattern, 199 actions, error), error); 200 return rte_flow_error_set(error, ENOSYS, 201 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 202 NULL, rte_strerror(ENOSYS)); 203 } 204 205 /* Create a flow rule on a given port. */ 206 struct rte_flow * 207 rte_flow_create(uint16_t port_id, 208 const struct rte_flow_attr *attr, 209 const struct rte_flow_item pattern[], 210 const struct rte_flow_action actions[], 211 struct rte_flow_error *error) 212 { 213 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 214 struct rte_flow *flow; 215 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 216 217 if (unlikely(!ops)) 218 return NULL; 219 if (likely(!!ops->create)) { 220 flow = ops->create(dev, attr, pattern, actions, error); 221 if (flow == NULL) 222 flow_err(port_id, -rte_errno, error); 223 return flow; 224 } 225 rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 226 NULL, rte_strerror(ENOSYS)); 227 return NULL; 228 } 229 230 /* Destroy a flow rule on a given port. */ 231 int 232 rte_flow_destroy(uint16_t port_id, 233 struct rte_flow *flow, 234 struct rte_flow_error *error) 235 { 236 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 237 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 238 239 if (unlikely(!ops)) 240 return -rte_errno; 241 if (likely(!!ops->destroy)) 242 return flow_err(port_id, ops->destroy(dev, flow, error), 243 error); 244 return rte_flow_error_set(error, ENOSYS, 245 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 246 NULL, rte_strerror(ENOSYS)); 247 } 248 249 /* Destroy all flow rules associated with a port. */ 250 int 251 rte_flow_flush(uint16_t port_id, 252 struct rte_flow_error *error) 253 { 254 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 255 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 256 257 if (unlikely(!ops)) 258 return -rte_errno; 259 if (likely(!!ops->flush)) 260 return flow_err(port_id, ops->flush(dev, error), error); 261 return rte_flow_error_set(error, ENOSYS, 262 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 263 NULL, rte_strerror(ENOSYS)); 264 } 265 266 /* Query an existing flow rule. */ 267 int 268 rte_flow_query(uint16_t port_id, 269 struct rte_flow *flow, 270 const struct rte_flow_action *action, 271 void *data, 272 struct rte_flow_error *error) 273 { 274 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 275 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 276 277 if (!ops) 278 return -rte_errno; 279 if (likely(!!ops->query)) 280 return flow_err(port_id, ops->query(dev, flow, action, data, 281 error), error); 282 return rte_flow_error_set(error, ENOSYS, 283 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 284 NULL, rte_strerror(ENOSYS)); 285 } 286 287 /* Restrict ingress traffic to the defined flow rules. */ 288 int 289 rte_flow_isolate(uint16_t port_id, 290 int set, 291 struct rte_flow_error *error) 292 { 293 struct rte_eth_dev *dev = &rte_eth_devices[port_id]; 294 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); 295 296 if (!ops) 297 return -rte_errno; 298 if (likely(!!ops->isolate)) 299 return flow_err(port_id, ops->isolate(dev, set, error), error); 300 return rte_flow_error_set(error, ENOSYS, 301 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 302 NULL, rte_strerror(ENOSYS)); 303 } 304 305 /* Initialize flow error structure. */ 306 int 307 rte_flow_error_set(struct rte_flow_error *error, 308 int code, 309 enum rte_flow_error_type type, 310 const void *cause, 311 const char *message) 312 { 313 if (error) { 314 *error = (struct rte_flow_error){ 315 .type = type, 316 .cause = cause, 317 .message = message, 318 }; 319 } 320 rte_errno = code; 321 return -code; 322 } 323 324 /** Pattern item specification types. */ 325 enum rte_flow_conv_item_spec_type { 326 RTE_FLOW_CONV_ITEM_SPEC, 327 RTE_FLOW_CONV_ITEM_LAST, 328 RTE_FLOW_CONV_ITEM_MASK, 329 }; 330 331 /** 332 * Copy pattern item specification. 333 * 334 * @param[out] buf 335 * Output buffer. Can be NULL if @p size is zero. 336 * @param size 337 * Size of @p buf in bytes. 338 * @param[in] item 339 * Pattern item to copy specification from. 340 * @param type 341 * Specification selector for either @p spec, @p last or @p mask. 342 * 343 * @return 344 * Number of bytes needed to store pattern item specification regardless 345 * of @p size. @p buf contents are truncated to @p size if not large 346 * enough. 347 */ 348 static size_t 349 rte_flow_conv_item_spec(void *buf, const size_t size, 350 const struct rte_flow_item *item, 351 enum rte_flow_conv_item_spec_type type) 352 { 353 size_t off; 354 const void *data = 355 type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec : 356 type == RTE_FLOW_CONV_ITEM_LAST ? item->last : 357 type == RTE_FLOW_CONV_ITEM_MASK ? item->mask : 358 NULL; 359 360 switch (item->type) { 361 union { 362 const struct rte_flow_item_raw *raw; 363 } spec; 364 union { 365 const struct rte_flow_item_raw *raw; 366 } last; 367 union { 368 const struct rte_flow_item_raw *raw; 369 } mask; 370 union { 371 const struct rte_flow_item_raw *raw; 372 } src; 373 union { 374 struct rte_flow_item_raw *raw; 375 } dst; 376 size_t tmp; 377 378 case RTE_FLOW_ITEM_TYPE_RAW: 379 spec.raw = item->spec; 380 last.raw = item->last ? item->last : item->spec; 381 mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask; 382 src.raw = data; 383 dst.raw = buf; 384 rte_memcpy(dst.raw, 385 (&(struct rte_flow_item_raw){ 386 .relative = src.raw->relative, 387 .search = src.raw->search, 388 .reserved = src.raw->reserved, 389 .offset = src.raw->offset, 390 .limit = src.raw->limit, 391 .length = src.raw->length, 392 }), 393 size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size); 394 off = sizeof(*dst.raw); 395 if (type == RTE_FLOW_CONV_ITEM_SPEC || 396 (type == RTE_FLOW_CONV_ITEM_MASK && 397 ((spec.raw->length & mask.raw->length) >= 398 (last.raw->length & mask.raw->length)))) 399 tmp = spec.raw->length & mask.raw->length; 400 else 401 tmp = last.raw->length & mask.raw->length; 402 if (tmp) { 403 off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern)); 404 if (size >= off + tmp) 405 dst.raw->pattern = rte_memcpy 406 ((void *)((uintptr_t)dst.raw + off), 407 src.raw->pattern, tmp); 408 off += tmp; 409 } 410 break; 411 default: 412 off = rte_flow_desc_item[item->type].size; 413 rte_memcpy(buf, data, (size > off ? off : size)); 414 break; 415 } 416 return off; 417 } 418 419 /** 420 * Copy action configuration. 421 * 422 * @param[out] buf 423 * Output buffer. Can be NULL if @p size is zero. 424 * @param size 425 * Size of @p buf in bytes. 426 * @param[in] action 427 * Action to copy configuration from. 428 * 429 * @return 430 * Number of bytes needed to store pattern item specification regardless 431 * of @p size. @p buf contents are truncated to @p size if not large 432 * enough. 433 */ 434 static size_t 435 rte_flow_conv_action_conf(void *buf, const size_t size, 436 const struct rte_flow_action *action) 437 { 438 size_t off; 439 440 switch (action->type) { 441 union { 442 const struct rte_flow_action_rss *rss; 443 const struct rte_flow_action_vxlan_encap *vxlan_encap; 444 const struct rte_flow_action_nvgre_encap *nvgre_encap; 445 } src; 446 union { 447 struct rte_flow_action_rss *rss; 448 struct rte_flow_action_vxlan_encap *vxlan_encap; 449 struct rte_flow_action_nvgre_encap *nvgre_encap; 450 } dst; 451 size_t tmp; 452 int ret; 453 454 case RTE_FLOW_ACTION_TYPE_RSS: 455 src.rss = action->conf; 456 dst.rss = buf; 457 rte_memcpy(dst.rss, 458 (&(struct rte_flow_action_rss){ 459 .func = src.rss->func, 460 .level = src.rss->level, 461 .types = src.rss->types, 462 .key_len = src.rss->key_len, 463 .queue_num = src.rss->queue_num, 464 }), 465 size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size); 466 off = sizeof(*dst.rss); 467 if (src.rss->key_len) { 468 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key)); 469 tmp = sizeof(*src.rss->key) * src.rss->key_len; 470 if (size >= off + tmp) 471 dst.rss->key = rte_memcpy 472 ((void *)((uintptr_t)dst.rss + off), 473 src.rss->key, tmp); 474 off += tmp; 475 } 476 if (src.rss->queue_num) { 477 off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue)); 478 tmp = sizeof(*src.rss->queue) * src.rss->queue_num; 479 if (size >= off + tmp) 480 dst.rss->queue = rte_memcpy 481 ((void *)((uintptr_t)dst.rss + off), 482 src.rss->queue, tmp); 483 off += tmp; 484 } 485 break; 486 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: 487 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP: 488 src.vxlan_encap = action->conf; 489 dst.vxlan_encap = buf; 490 RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) != 491 sizeof(*src.nvgre_encap) || 492 offsetof(struct rte_flow_action_vxlan_encap, 493 definition) != 494 offsetof(struct rte_flow_action_nvgre_encap, 495 definition)); 496 off = sizeof(*dst.vxlan_encap); 497 if (src.vxlan_encap->definition) { 498 off = RTE_ALIGN_CEIL 499 (off, sizeof(*dst.vxlan_encap->definition)); 500 ret = rte_flow_conv 501 (RTE_FLOW_CONV_OP_PATTERN, 502 (void *)((uintptr_t)dst.vxlan_encap + off), 503 size > off ? size - off : 0, 504 src.vxlan_encap->definition, NULL); 505 if (ret < 0) 506 return 0; 507 if (size >= off + ret) 508 dst.vxlan_encap->definition = 509 (void *)((uintptr_t)dst.vxlan_encap + 510 off); 511 off += ret; 512 } 513 break; 514 default: 515 off = rte_flow_desc_action[action->type].size; 516 rte_memcpy(buf, action->conf, (size > off ? off : size)); 517 break; 518 } 519 return off; 520 } 521 522 /** 523 * Copy a list of pattern items. 524 * 525 * @param[out] dst 526 * Destination buffer. Can be NULL if @p size is zero. 527 * @param size 528 * Size of @p dst in bytes. 529 * @param[in] src 530 * Source pattern items. 531 * @param num 532 * Maximum number of pattern items to process from @p src or 0 to process 533 * the entire list. In both cases, processing stops after 534 * RTE_FLOW_ITEM_TYPE_END is encountered. 535 * @param[out] error 536 * Perform verbose error reporting if not NULL. 537 * 538 * @return 539 * A positive value representing the number of bytes needed to store 540 * pattern items regardless of @p size on success (@p buf contents are 541 * truncated to @p size if not large enough), a negative errno value 542 * otherwise and rte_errno is set. 543 */ 544 static int 545 rte_flow_conv_pattern(struct rte_flow_item *dst, 546 const size_t size, 547 const struct rte_flow_item *src, 548 unsigned int num, 549 struct rte_flow_error *error) 550 { 551 uintptr_t data = (uintptr_t)dst; 552 size_t off; 553 size_t ret; 554 unsigned int i; 555 556 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) { 557 if ((size_t)src->type >= RTE_DIM(rte_flow_desc_item) || 558 !rte_flow_desc_item[src->type].name) 559 return rte_flow_error_set 560 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src, 561 "cannot convert unknown item type"); 562 if (size >= off + sizeof(*dst)) 563 *dst = (struct rte_flow_item){ 564 .type = src->type, 565 }; 566 off += sizeof(*dst); 567 if (!src->type) 568 num = i + 1; 569 } 570 num = i; 571 src -= num; 572 dst -= num; 573 do { 574 if (src->spec) { 575 off = RTE_ALIGN_CEIL(off, sizeof(double)); 576 ret = rte_flow_conv_item_spec 577 ((void *)(data + off), 578 size > off ? size - off : 0, src, 579 RTE_FLOW_CONV_ITEM_SPEC); 580 if (size && size >= off + ret) 581 dst->spec = (void *)(data + off); 582 off += ret; 583 584 } 585 if (src->last) { 586 off = RTE_ALIGN_CEIL(off, sizeof(double)); 587 ret = rte_flow_conv_item_spec 588 ((void *)(data + off), 589 size > off ? size - off : 0, src, 590 RTE_FLOW_CONV_ITEM_LAST); 591 if (size && size >= off + ret) 592 dst->last = (void *)(data + off); 593 off += ret; 594 } 595 if (src->mask) { 596 off = RTE_ALIGN_CEIL(off, sizeof(double)); 597 ret = rte_flow_conv_item_spec 598 ((void *)(data + off), 599 size > off ? size - off : 0, src, 600 RTE_FLOW_CONV_ITEM_MASK); 601 if (size && size >= off + ret) 602 dst->mask = (void *)(data + off); 603 off += ret; 604 } 605 ++src; 606 ++dst; 607 } while (--num); 608 return off; 609 } 610 611 /** 612 * Copy a list of actions. 613 * 614 * @param[out] dst 615 * Destination buffer. Can be NULL if @p size is zero. 616 * @param size 617 * Size of @p dst in bytes. 618 * @param[in] src 619 * Source actions. 620 * @param num 621 * Maximum number of actions to process from @p src or 0 to process the 622 * entire list. In both cases, processing stops after 623 * RTE_FLOW_ACTION_TYPE_END is encountered. 624 * @param[out] error 625 * Perform verbose error reporting if not NULL. 626 * 627 * @return 628 * A positive value representing the number of bytes needed to store 629 * actions regardless of @p size on success (@p buf contents are truncated 630 * to @p size if not large enough), a negative errno value otherwise and 631 * rte_errno is set. 632 */ 633 static int 634 rte_flow_conv_actions(struct rte_flow_action *dst, 635 const size_t size, 636 const struct rte_flow_action *src, 637 unsigned int num, 638 struct rte_flow_error *error) 639 { 640 uintptr_t data = (uintptr_t)dst; 641 size_t off; 642 size_t ret; 643 unsigned int i; 644 645 for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) { 646 if ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) || 647 !rte_flow_desc_action[src->type].name) 648 return rte_flow_error_set 649 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, 650 src, "cannot convert unknown action type"); 651 if (size >= off + sizeof(*dst)) 652 *dst = (struct rte_flow_action){ 653 .type = src->type, 654 }; 655 off += sizeof(*dst); 656 if (!src->type) 657 num = i + 1; 658 } 659 num = i; 660 src -= num; 661 dst -= num; 662 do { 663 if (src->conf) { 664 off = RTE_ALIGN_CEIL(off, sizeof(double)); 665 ret = rte_flow_conv_action_conf 666 ((void *)(data + off), 667 size > off ? size - off : 0, src); 668 if (size && size >= off + ret) 669 dst->conf = (void *)(data + off); 670 off += ret; 671 } 672 ++src; 673 ++dst; 674 } while (--num); 675 return off; 676 } 677 678 /** 679 * Copy flow rule components. 680 * 681 * This comprises the flow rule descriptor itself, attributes, pattern and 682 * actions list. NULL components in @p src are skipped. 683 * 684 * @param[out] dst 685 * Destination buffer. Can be NULL if @p size is zero. 686 * @param size 687 * Size of @p dst in bytes. 688 * @param[in] src 689 * Source flow rule descriptor. 690 * @param[out] error 691 * Perform verbose error reporting if not NULL. 692 * 693 * @return 694 * A positive value representing the number of bytes needed to store all 695 * components including the descriptor regardless of @p size on success 696 * (@p buf contents are truncated to @p size if not large enough), a 697 * negative errno value otherwise and rte_errno is set. 698 */ 699 static int 700 rte_flow_conv_rule(struct rte_flow_conv_rule *dst, 701 const size_t size, 702 const struct rte_flow_conv_rule *src, 703 struct rte_flow_error *error) 704 { 705 size_t off; 706 int ret; 707 708 rte_memcpy(dst, 709 (&(struct rte_flow_conv_rule){ 710 .attr = NULL, 711 .pattern = NULL, 712 .actions = NULL, 713 }), 714 size > sizeof(*dst) ? sizeof(*dst) : size); 715 off = sizeof(*dst); 716 if (src->attr_ro) { 717 off = RTE_ALIGN_CEIL(off, sizeof(double)); 718 if (size && size >= off + sizeof(*dst->attr)) 719 dst->attr = rte_memcpy 720 ((void *)((uintptr_t)dst + off), 721 src->attr_ro, sizeof(*dst->attr)); 722 off += sizeof(*dst->attr); 723 } 724 if (src->pattern_ro) { 725 off = RTE_ALIGN_CEIL(off, sizeof(double)); 726 ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off), 727 size > off ? size - off : 0, 728 src->pattern_ro, 0, error); 729 if (ret < 0) 730 return ret; 731 if (size && size >= off + (size_t)ret) 732 dst->pattern = (void *)((uintptr_t)dst + off); 733 off += ret; 734 } 735 if (src->actions_ro) { 736 off = RTE_ALIGN_CEIL(off, sizeof(double)); 737 ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off), 738 size > off ? size - off : 0, 739 src->actions_ro, 0, error); 740 if (ret < 0) 741 return ret; 742 if (size >= off + (size_t)ret) 743 dst->actions = (void *)((uintptr_t)dst + off); 744 off += ret; 745 } 746 return off; 747 } 748 749 /** 750 * Retrieve the name of a pattern item/action type. 751 * 752 * @param is_action 753 * Nonzero when @p src represents an action type instead of a pattern item 754 * type. 755 * @param is_ptr 756 * Nonzero to write string address instead of contents into @p dst. 757 * @param[out] dst 758 * Destination buffer. Can be NULL if @p size is zero. 759 * @param size 760 * Size of @p dst in bytes. 761 * @param[in] src 762 * Depending on @p is_action, source pattern item or action type cast as a 763 * pointer. 764 * @param[out] error 765 * Perform verbose error reporting if not NULL. 766 * 767 * @return 768 * A positive value representing the number of bytes needed to store the 769 * name or its address regardless of @p size on success (@p buf contents 770 * are truncated to @p size if not large enough), a negative errno value 771 * otherwise and rte_errno is set. 772 */ 773 static int 774 rte_flow_conv_name(int is_action, 775 int is_ptr, 776 char *dst, 777 const size_t size, 778 const void *src, 779 struct rte_flow_error *error) 780 { 781 struct desc_info { 782 const struct rte_flow_desc_data *data; 783 size_t num; 784 }; 785 static const struct desc_info info_rep[2] = { 786 { rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), }, 787 { rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), }, 788 }; 789 const struct desc_info *const info = &info_rep[!!is_action]; 790 unsigned int type = (uintptr_t)src; 791 792 if (type >= info->num) 793 return rte_flow_error_set 794 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 795 "unknown object type to retrieve the name of"); 796 if (!is_ptr) 797 return strlcpy(dst, info->data[type].name, size); 798 if (size >= sizeof(const char **)) 799 *((const char **)dst) = info->data[type].name; 800 return sizeof(const char **); 801 } 802 803 /** Helper function to convert flow API objects. */ 804 int 805 rte_flow_conv(enum rte_flow_conv_op op, 806 void *dst, 807 size_t size, 808 const void *src, 809 struct rte_flow_error *error) 810 { 811 switch (op) { 812 const struct rte_flow_attr *attr; 813 814 case RTE_FLOW_CONV_OP_NONE: 815 return 0; 816 case RTE_FLOW_CONV_OP_ATTR: 817 attr = src; 818 if (size > sizeof(*attr)) 819 size = sizeof(*attr); 820 rte_memcpy(dst, attr, size); 821 return sizeof(*attr); 822 case RTE_FLOW_CONV_OP_ITEM: 823 return rte_flow_conv_pattern(dst, size, src, 1, error); 824 case RTE_FLOW_CONV_OP_ACTION: 825 return rte_flow_conv_actions(dst, size, src, 1, error); 826 case RTE_FLOW_CONV_OP_PATTERN: 827 return rte_flow_conv_pattern(dst, size, src, 0, error); 828 case RTE_FLOW_CONV_OP_ACTIONS: 829 return rte_flow_conv_actions(dst, size, src, 0, error); 830 case RTE_FLOW_CONV_OP_RULE: 831 return rte_flow_conv_rule(dst, size, src, error); 832 case RTE_FLOW_CONV_OP_ITEM_NAME: 833 return rte_flow_conv_name(0, 0, dst, size, src, error); 834 case RTE_FLOW_CONV_OP_ACTION_NAME: 835 return rte_flow_conv_name(1, 0, dst, size, src, error); 836 case RTE_FLOW_CONV_OP_ITEM_NAME_PTR: 837 return rte_flow_conv_name(0, 1, dst, size, src, error); 838 case RTE_FLOW_CONV_OP_ACTION_NAME_PTR: 839 return rte_flow_conv_name(1, 1, dst, size, src, error); 840 } 841 return rte_flow_error_set 842 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, 843 "unknown object conversion operation"); 844 } 845 846 /** Store a full rte_flow description. */ 847 size_t 848 rte_flow_copy(struct rte_flow_desc *desc, size_t len, 849 const struct rte_flow_attr *attr, 850 const struct rte_flow_item *items, 851 const struct rte_flow_action *actions) 852 { 853 /* 854 * Overlap struct rte_flow_conv with struct rte_flow_desc in order 855 * to convert the former to the latter without wasting space. 856 */ 857 struct rte_flow_conv_rule *dst = 858 len ? 859 (void *)((uintptr_t)desc + 860 (offsetof(struct rte_flow_desc, actions) - 861 offsetof(struct rte_flow_conv_rule, actions))) : 862 NULL; 863 size_t dst_size = 864 len > sizeof(*desc) - sizeof(*dst) ? 865 len - (sizeof(*desc) - sizeof(*dst)) : 866 0; 867 struct rte_flow_conv_rule src = { 868 .attr_ro = NULL, 869 .pattern_ro = items, 870 .actions_ro = actions, 871 }; 872 int ret; 873 874 RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) < 875 sizeof(struct rte_flow_conv_rule)); 876 if (dst_size && 877 (&dst->pattern != &desc->items || 878 &dst->actions != &desc->actions || 879 (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) { 880 rte_errno = EINVAL; 881 return 0; 882 } 883 ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL); 884 if (ret < 0) 885 return 0; 886 ret += sizeof(*desc) - sizeof(*dst); 887 rte_memcpy(desc, 888 (&(struct rte_flow_desc){ 889 .size = ret, 890 .attr = *attr, 891 .items = dst_size ? dst->pattern : NULL, 892 .actions = dst_size ? dst->actions : NULL, 893 }), 894 len > sizeof(*desc) ? sizeof(*desc) : len); 895 return ret; 896 } 897 898 /** 899 * Expand RSS flows into several possible flows according to the RSS hash 900 * fields requested and the driver capabilities. 901 */ 902 int __rte_experimental 903 rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size, 904 const struct rte_flow_item *pattern, uint64_t types, 905 const struct rte_flow_expand_node graph[], 906 int graph_root_index) 907 { 908 const int elt_n = 8; 909 const struct rte_flow_item *item; 910 const struct rte_flow_expand_node *node = &graph[graph_root_index]; 911 const int *next_node; 912 const int *stack[elt_n]; 913 int stack_pos = 0; 914 struct rte_flow_item flow_items[elt_n]; 915 unsigned int i; 916 size_t lsize; 917 size_t user_pattern_size = 0; 918 void *addr = NULL; 919 920 lsize = offsetof(struct rte_flow_expand_rss, entry) + 921 elt_n * sizeof(buf->entry[0]); 922 if (lsize <= size) { 923 buf->entry[0].priority = 0; 924 buf->entry[0].pattern = (void *)&buf->entry[elt_n]; 925 buf->entries = 0; 926 addr = buf->entry[0].pattern; 927 } 928 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { 929 const struct rte_flow_expand_node *next = NULL; 930 931 for (i = 0; node->next && node->next[i]; ++i) { 932 next = &graph[node->next[i]]; 933 if (next->type == item->type) 934 break; 935 } 936 if (next) 937 node = next; 938 user_pattern_size += sizeof(*item); 939 } 940 user_pattern_size += sizeof(*item); /* Handle END item. */ 941 lsize += user_pattern_size; 942 /* Copy the user pattern in the first entry of the buffer. */ 943 if (lsize <= size) { 944 rte_memcpy(addr, pattern, user_pattern_size); 945 addr = (void *)(((uintptr_t)addr) + user_pattern_size); 946 buf->entries = 1; 947 } 948 /* Start expanding. */ 949 memset(flow_items, 0, sizeof(flow_items)); 950 user_pattern_size -= sizeof(*item); 951 next_node = node->next; 952 stack[stack_pos] = next_node; 953 node = next_node ? &graph[*next_node] : NULL; 954 while (node) { 955 flow_items[stack_pos].type = node->type; 956 if (node->rss_types & types) { 957 /* 958 * compute the number of items to copy from the 959 * expansion and copy it. 960 * When the stack_pos is 0, there are 1 element in it, 961 * plus the addition END item. 962 */ 963 int elt = stack_pos + 2; 964 965 flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END; 966 lsize += elt * sizeof(*item) + user_pattern_size; 967 if (lsize <= size) { 968 size_t n = elt * sizeof(*item); 969 970 buf->entry[buf->entries].priority = 971 stack_pos + 1; 972 buf->entry[buf->entries].pattern = addr; 973 buf->entries++; 974 rte_memcpy(addr, buf->entry[0].pattern, 975 user_pattern_size); 976 addr = (void *)(((uintptr_t)addr) + 977 user_pattern_size); 978 rte_memcpy(addr, flow_items, n); 979 addr = (void *)(((uintptr_t)addr) + n); 980 } 981 } 982 /* Go deeper. */ 983 if (node->next) { 984 next_node = node->next; 985 if (stack_pos++ == elt_n) { 986 rte_errno = E2BIG; 987 return -rte_errno; 988 } 989 stack[stack_pos] = next_node; 990 } else if (*(next_node + 1)) { 991 /* Follow up with the next possibility. */ 992 ++next_node; 993 } else { 994 /* Move to the next path. */ 995 if (stack_pos) 996 next_node = stack[--stack_pos]; 997 next_node++; 998 stack[stack_pos] = next_node; 999 } 1000 node = *next_node ? &graph[*next_node] : NULL; 1001 }; 1002 return lsize; 1003 } 1004