| /dpdk/app/test-flow-perf/ |
| H A D | items_gen.c | 22 add_ether(struct rte_flow_item *items, in add_ether() argument 30 items[items_counter].spec = ð_spec; in add_ether() 31 items[items_counter].mask = ð_mask; in add_ether() 35 add_vlan(struct rte_flow_item *items, in add_vlan() argument 52 add_ipv4(struct rte_flow_item *items, in add_ipv4() argument 69 add_ipv6(struct rte_flow_item *items, in add_ipv6() argument 91 add_tcp(struct rte_flow_item *items, in add_tcp() argument 104 add_udp(struct rte_flow_item *items, in add_udp() argument 171 add_gre(struct rte_flow_item *items, in add_gre() argument 306 struct rte_flow_item *items, in fill_items() [all …]
|
| H A D | flow_gen.c | 57 struct rte_flow_item items[MAX_ITEMS_NUM]; in generate_flow() local 61 memset(items, 0, sizeof(items)); in generate_flow() 72 fill_items(items, flow_items, outer_ip_src, core_idx); in generate_flow() 74 flow = rte_flow_create(port_id, &attr, items, actions, error); in generate_flow()
|
| H A D | actions_gen.c | 847 static struct rte_flow_item items[5]; in add_vxlan_encap() local 858 items[0].spec = &item_eth; in add_vxlan_encap() 859 items[0].mask = &item_eth; in add_vxlan_encap() 860 items[0].type = RTE_FLOW_ITEM_TYPE_ETH; in add_vxlan_encap() 865 items[1].spec = &item_ipv4; in add_vxlan_encap() 866 items[1].mask = &item_ipv4; in add_vxlan_encap() 871 items[2].spec = &item_udp; in add_vxlan_encap() 872 items[2].mask = &item_udp; in add_vxlan_encap() 873 items[2].type = RTE_FLOW_ITEM_TYPE_UDP; in add_vxlan_encap() 877 items[3].spec = &item_vxlan; in add_vxlan_encap() [all …]
|
| /dpdk/lib/gro/ |
| H A D | gro_udp4.c | 34 tbl->items = rte_zmalloc_socket(__func__, in gro_udp4_tbl_create() 38 if (tbl->items == NULL) { in gro_udp4_tbl_create() 50 rte_free(tbl->items); in gro_udp4_tbl_create() 68 rte_free(udp_tbl->items); in gro_udp4_tbl_destroy() 81 if (tbl->items[i].firstseg == NULL) in find_an_empty_item() 112 tbl->items[item_idx].firstseg = pkt; in insert_new_item() 118 tbl->items[item_idx].nb_merged = 1; in insert_new_item() 123 tbl->items[item_idx].next_pkt_idx = in insert_new_item() 124 tbl->items[prev_idx].next_pkt_idx; in insert_new_item() 138 tbl->items[item_idx].firstseg = NULL; in delete_item() [all …]
|
| H A D | gro_tcp4.c | 38 if (tbl->items == NULL) { in gro_tcp4_tbl_create() 50 rte_free(tbl->items); in gro_tcp4_tbl_create() 68 rte_free(tcp_tbl->items); in gro_tcp4_tbl_destroy() 81 if (tbl->items[i].firstseg == NULL) in find_an_empty_item() 113 tbl->items[item_idx].firstseg = pkt; in insert_new_item() 118 tbl->items[item_idx].ip_id = ip_id; in insert_new_item() 119 tbl->items[item_idx].nb_merged = 1; in insert_new_item() 125 tbl->items[item_idx].next_pkt_idx = in insert_new_item() 126 tbl->items[prev_idx].next_pkt_idx; in insert_new_item() 140 tbl->items[item_idx].firstseg = NULL; in delete_item() [all …]
|
| H A D | gro_vxlan_udp4.c | 35 tbl->items = rte_zmalloc_socket(__func__, in gro_vxlan_udp4_tbl_create() 39 if (tbl->items == NULL) { in gro_vxlan_udp4_tbl_create() 51 rte_free(tbl->items); in gro_vxlan_udp4_tbl_create() 69 rte_free(vxlan_tbl->items); in gro_vxlan_udp4_tbl_destroy() 81 if (tbl->items[i].inner_item.firstseg == NULL) in find_an_empty_item() 111 tbl->items[item_idx].inner_item.firstseg = pkt; in insert_new_item() 117 tbl->items[item_idx].inner_item.nb_merged = 1; in insert_new_item() 122 tbl->items[item_idx].inner_item.next_pkt_idx = in insert_new_item() 401 &(tbl->items[cur_idx]), in gro_vxlan_udp4_reassemble() 472 &(tbl->items[start_idx]), in gro_vxlan_udp4_merge_items() [all …]
|
| H A D | gro_vxlan_tcp4.c | 35 tbl->items = rte_zmalloc_socket(__func__, in gro_vxlan_tcp4_tbl_create() 39 if (tbl->items == NULL) { in gro_vxlan_tcp4_tbl_create() 51 rte_free(tbl->items); in gro_vxlan_tcp4_tbl_create() 69 rte_free(vxlan_tbl->items); in gro_vxlan_tcp4_tbl_destroy() 81 if (tbl->items[i].inner_item.firstseg == NULL) in find_an_empty_item() 114 tbl->items[item_idx].inner_item.firstseg = pkt; in insert_new_item() 119 tbl->items[item_idx].inner_item.ip_id = ip_id; in insert_new_item() 120 tbl->items[item_idx].inner_item.nb_merged = 1; in insert_new_item() 122 tbl->items[item_idx].outer_ip_id = outer_ip_id; in insert_new_item() 467 if (tbl->items[j].inner_item.start_time <= in gro_vxlan_tcp4_tbl_timeout_flush() [all …]
|
| H A D | rte_gro.c | 190 vxlan_tcp_tbl.items = vxlan_tcp_items; in rte_gro_reassemble_burst() 203 vxlan_udp_tbl.items = vxlan_udp_items; in rte_gro_reassemble_burst() 216 tcp_tbl.items = tcp_items; in rte_gro_reassemble_burst() 229 udp_tbl.items = udp_items; in rte_gro_reassemble_burst()
|
| /dpdk/drivers/net/bnxt/tf_core/ |
| H A D | stack.c | 18 stack_init(int num_entries, uint32_t *items, struct stack *st) in stack_init() argument 20 if (items == NULL || st == NULL) in stack_init() 25 st->items = items; in stack_init() 35 return st->items; in stack_items() 72 st->items[++st->top] = x; in stack_push() 86 *x = st->items[st->top]; in stack_pop() 107 printf("item[%d] 0x%08x", i, st->items[i]); in stack_dump() 111 printf(" 0x%08x", st->items[i]); in stack_dump()
|
| H A D | stack.h | 18 uint32_t *items; /**< items in the stack */ member 36 uint32_t *items,
|
| /dpdk/drivers/net/mlx5/ |
| H A D | mlx5_flow_verbs.c | 1249 if (items == NULL) in flow_verbs_validate() 1254 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { in flow_verbs_validate() 1258 switch (items->type) { in flow_verbs_validate() 1268 if (items->mask != NULL && items->spec != NULL) { in flow_verbs_validate() 1271 items->spec)->type; in flow_verbs_validate() 1291 if (items->mask != NULL && items->spec != NULL) { in flow_verbs_validate() 1567 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { in flow_verbs_get_items_size() 1568 switch (items->type) { in flow_verbs_get_items_size() 1773 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { in flow_verbs_translate() 1776 switch (items->type) { in flow_verbs_translate() [all …]
|
| H A D | mlx5_dr.c | 85 mlx5dr_match_template_create(const struct rte_flow_item items[], in mlx5dr_match_template_create() argument 88 (void)items; in mlx5dr_match_template_create() 163 const struct rte_flow_item items[] __rte_unused, in mlx5dr_rule_create()
|
| H A D | mlx5_flow.c | 5367 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { in flow_meter_split_prep() 5374 pid_v = items->spec; in flow_meter_split_prep() 5389 items, in flow_meter_split_prep() 6050 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { in flow_sample_split_prep() 6545 &sfx_attr, items, in flow_create_split_meter() 6562 sfx_items : items, in flow_create_split_meter() 7016 items_tx.items, in flow_list_create() 10576 *items = &tunnel->item; in mlx5_flow_tunnel_match() 10598 else if (ctx->items) in tunnel_element_release_match() 10632 .items = pmd_items, in mlx5_flow_tunnel_item_release() [all …]
|
| H A D | mlx5_flow_dv.c | 2811 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { in flow_dev_get_vlan_info_from_items() 4123 if (!items) in flow_dv_convert_encap_data() 4127 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { in flow_dv_convert_encap_data() 6948 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { in flow_dv_validate() 6982 if (items->mask != NULL && items->spec != NULL) { in flow_dv_validate() 7001 if (items->mask != NULL && items->spec != NULL) { in flow_dv_validate() 11242 !items) { in flow_dv_hashfields_set() 11266 !items) { in flow_dv_hashfields_set() 11277 !items) { in flow_dv_hashfields_set() 13599 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { in flow_dv_translate() [all …]
|
| H A D | mlx5_flow_hw.c | 76 flow_hw_rss_item_flags_get(const struct rte_flow_item items[]) in flow_hw_rss_item_flags_get() argument 81 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { in flow_hw_rss_item_flags_get() 83 int item_type = items->type; in flow_hw_rss_item_flags_get() 496 const struct rte_flow_item *items, in flow_hw_encap_item_translate() argument 503 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++, items_m++, i++) { in flow_hw_encap_item_translate() 504 len = flow_dv_get_item_hdr_len(items->type); in flow_hw_encap_item_translate() 506 memcmp(items_m->spec, items->spec, len)) && in flow_hw_encap_item_translate() 1062 const struct rte_flow_item items[], in flow_hw_async_flow_create() argument 1109 pattern_template_index, items, in flow_hw_async_flow_create() 1732 const struct rte_flow_item items[], in flow_hw_pattern_template_create() argument [all …]
|
| /dpdk/drivers/net/tap/ |
| H A D | tap_flow.c | 391 .items[0] = { 397 .items[1] = { 408 .items[0] = { 417 .items[1] = { 428 .items[0] = { 437 .items[1] = { 448 .items[0] = { 451 .items[1] = { 462 .items[0] = { 1088 for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { in priv_flow_process() [all …]
|
| /dpdk/app/test-crypto-perf/ |
| H A D | dpdk-graph-crypto-perf.py | 221 default_params = parse_parameters(test_cases['default']['eal'].items()) 223 default_params += parse_parameters(test_cases['default']['app'].items()) 230 for (test, params) in {k: v for (k, v) in test_cases.items() if 231 k != "default"}.items(): 232 extra_params = parse_parameters(params.items()) 285 for (suite, test_cases) in {k: v for (k, v) in test_suite_ops.items() 286 if k in test_suites}.items(): 296 for (suite, test_cases) in test_suite_ops.items():
|
| /dpdk/drivers/net/ipn3ke/ |
| H A D | ipn3ke_flow.c | 65 const enum rte_flow_item_type *const items; member 625 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_ETH), 630 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VLAN, 636 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_MPLS, 642 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4, 648 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4, 839 if (!items) { in ipn3ke_flow_convert_items() 848 filter = ipn3ke_find_filter_func(items, &idx); in ipn3ke_flow_convert_items() 854 items, in ipn3ke_flow_convert_items() 861 return filter(items, error, parser); in ipn3ke_flow_convert_items() [all …]
|
| /dpdk/doc/guides/tools/ |
| H A D | flow-perf.rst | 164 Add VLAN item to all flows items, 170 Add IPv4 item to all flows items, 186 Add VXLAN item to all flows items, 192 Add VXLAN-GPE item to all flows items, 198 Add GRE item to all flows items, 204 Add GENEVE item to all flows items, 210 Add GTP item to all flows items, 216 Add Meta item to all flows items, 222 Add Tag item to all flows items, 254 Add queue action to all flows items, [all …]
|
| /dpdk/drivers/net/cxgbe/ |
| H A D | cxgbe_flow.c | 660 const struct rte_flow_item items[], in ch_rte_parse_atype_switch() argument 723 item_index = cxgbe_get_flow_item_index(items, in ch_rte_parse_atype_switch() 736 item_index = cxgbe_get_flow_item_index(items, in ch_rte_parse_atype_switch() 749 item_index = cxgbe_get_flow_item_index(items, in ch_rte_parse_atype_switch() 762 item_index = cxgbe_get_flow_item_index(items, in ch_rte_parse_atype_switch() 775 item_index = cxgbe_get_flow_item_index(items, in ch_rte_parse_atype_switch() 779 cxgbe_get_flow_item_index(items, in ch_rte_parse_atype_switch() 793 item_index = cxgbe_get_flow_item_index(items, in ch_rte_parse_atype_switch() 797 cxgbe_get_flow_item_index(items, in ch_rte_parse_atype_switch() 857 const struct rte_flow_item items[], in cxgbe_rtef_parse_actions() argument [all …]
|
| /dpdk/lib/flow_classify/ |
| H A D | rte_flow_classify.c | 95 struct rte_flow_item *items; in rte_flow_classify_validate() local 142 items = malloc(item_num * sizeof(struct rte_flow_item)); in rte_flow_classify_validate() 143 if (!items) { in rte_flow_classify_validate() 150 memset(items, 0, item_num * sizeof(struct rte_flow_item)); in rte_flow_classify_validate() 151 classify_pattern_skip_void_item(items, pattern); in rte_flow_classify_validate() 153 parse_filter = classify_find_parse_filter_func(items); in rte_flow_classify_validate() 158 free(items); in rte_flow_classify_validate() 162 ret = parse_filter(attr, items, actions, &cls->ntuple_filter, error); in rte_flow_classify_validate() 163 free(items); in rte_flow_classify_validate()
|
| H A D | rte_flow_classify_parse.c | 9 enum rte_flow_item_type *items; member 79 classify_pattern_skip_void_item(struct rte_flow_item *items, in classify_pattern_skip_void_item() argument 97 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count); in classify_pattern_skip_void_item() 99 items += cpy_count; in classify_pattern_skip_void_item() 107 rte_memcpy(items, pe, sizeof(struct rte_flow_item)); in classify_pattern_skip_void_item() 135 if (classify_match_pattern(classify_supported_patterns[i].items, in classify_find_parse_filter_func()
|
| /dpdk/drivers/event/sw/ |
| H A D | event_ring.h | 90 const uint32_t items = write - read; in rob_ring_dequeue() local 91 if (items < 1) in rob_ring_dequeue()
|
| /dpdk/doc/guides/nics/ |
| H A D | sfc_efx.rst | 151 Supported pattern items (***non-transfer*** rules): 193 Supported pattern items (***transfer*** rules): 195 - PORT_REPRESENTOR (cannot repeat; conflicts with other traffic source items) 197 - REPRESENTED_PORT (cannot repeat; conflicts with other traffic source items) 199 - PORT_ID (cannot repeat; conflicts with other traffic source items) 201 - PHY_PORT (cannot repeat; conflicts with other traffic source items) 203 - PF (cannot repeat; conflicts with other traffic source items) 205 - VF (cannot repeat; conflicts with other traffic source items) 290 - Filtering by IPv4 or IPv6 EtherType without pattern items of internet 295 - Filtering by TCP or UDP IP transport protocol without pattern items of
|
| /dpdk/drivers/net/iavf/ |
| H A D | iavf_generic_flow.c | 1972 iavf_pattern_skip_void_item(struct rte_flow_item *items, in iavf_pattern_skip_void_item() argument 1990 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count); in iavf_pattern_skip_void_item() 1992 items += cpy_count; in iavf_pattern_skip_void_item() 2000 rte_memcpy(items, pe, sizeof(struct rte_flow_item)); in iavf_pattern_skip_void_item() 2029 struct rte_flow_item *items; /* used for pattern without VOID items */ in iavf_search_pattern_match_item() local 2040 items = rte_zmalloc("iavf_pattern", in iavf_search_pattern_match_item() 2042 if (!items) { in iavf_search_pattern_match_item() 2055 iavf_pattern_skip_void_item(items, pattern); in iavf_search_pattern_match_item() 2059 items)) { in iavf_search_pattern_match_item() 2065 rte_free(items); in iavf_search_pattern_match_item() [all …]
|