| /dpdk/lib/acl/ |
| H A D | acl_run.h | 134 if (flows->num_packets < flows->total_packets) { in acl_start_next_trie() 135 parms[n].data = flows->data[flows->num_packets]; in acl_start_next_trie() 140 flows->last_cmplt = alloc_completion(flows->cmplt_array, in acl_start_next_trie() 142 flows->results + in acl_start_next_trie() 143 flows->num_packets * flows->categories); in acl_start_next_trie() 156 flows->trie++; in acl_start_next_trie() 158 flows->trie = 0; in acl_start_next_trie() 163 flows->started++; in acl_start_next_trie() 180 flows->started = 0; in acl_set_flow() 181 flows->trie = 0; in acl_set_flow() [all …]
|
| H A D | acl_run_neon.h | 82 struct acl_flow_data *flows, uint64_t transitions[]) in acl_match_check_x4() argument 86 parms, flows, resolve_priority_neon); in acl_match_check_x4() 88 parms, flows, resolve_priority_neon); in acl_match_check_x4() 90 parms, flows, resolve_priority_neon); in acl_match_check_x4() 92 parms, flows, resolve_priority_neon); in acl_match_check_x4() 164 struct acl_flow_data flows; in search_neon_8() local 182 while (flows.started > 0) { in search_neon_8() 226 struct acl_flow_data flows; in search_neon_4() local 243 while (flows.started > 0) { in search_neon_4() 251 input = transition4(input, flows.trans, index_array); in search_neon_4() [all …]
|
| H A D | acl_run_sse.h | 112 parms, flows, resolve_priority_sse); in acl_process_matches() 114 parms, flows, resolve_priority_sse); in acl_process_matches() 198 struct acl_flow_data flows; in search_sse_8() local 227 acl_match_check_x4(0, ctx, parms, &flows, in search_sse_8() 229 acl_match_check_x4(4, ctx, parms, &flows, in search_sse_8() 232 while (flows.started > 0) { in search_sse_8() 249 input0 = transition4(input0, flows.trans, in search_sse_8() 251 input1 = transition4(input1, flows.trans, in search_sse_8() 287 struct acl_flow_data flows; in search_sse_4() local 305 acl_match_check_x4(0, ctx, parms, &flows, in search_sse_4() [all …]
|
| H A D | acl_run_altivec.h | 92 parms, flows, resolve_priority_altivec); in acl_match_check_x4() 94 parms, flows, resolve_priority_altivec); in acl_match_check_x4() 96 parms, flows, resolve_priority_altivec); in acl_match_check_x4() 98 parms, flows, resolve_priority_altivec); in acl_match_check_x4() 191 struct acl_flow_data flows; in search_altivec_8() local 209 while (flows.started > 0) { in search_altivec_8() 262 struct acl_flow_data flows; in search_altivec_4() local 279 while (flows.started > 0) { in search_altivec_4() 288 input = transition4(input, flows.trans, in search_altivec_4() 290 input = transition4(input, flows.trans, in search_altivec_4() [all …]
|
| H A D | acl_run_avx2.h | 116 ctx, parms, flows, resolve_priority_sse); in acl_process_matches_avx2x8() 118 ctx, parms, flows, resolve_priority_sse); in acl_process_matches_avx2x8() 135 struct acl_flow_data *flows, uint32_t slot, in acl_match_check_avx2x8() argument 164 struct acl_flow_data flows; in search_avx2x16() local 199 while (flows.started > 0) { in search_avx2x16() 227 input[0] = transition8(input[0], flows.trans, in search_avx2x16() 229 input[1] = transition8(input[1], flows.trans, in search_avx2x16() 232 input[0] = transition8(input[0], flows.trans, in search_avx2x16() 234 input[1] = transition8(input[1], flows.trans, in search_avx2x16() 237 input[0] = transition8(input[0], flows.trans, in search_avx2x16() [all …]
|
| H A D | acl_run_scalar.c | 116 struct acl_flow_data flows; in rte_acl_classify_scalar() local 121 acl_set_flow(&flows, cmplt, RTE_DIM(cmplt), data, results, num, in rte_acl_classify_scalar() 126 index_array[n] = acl_start_next_trie(&flows, parms, n, ctx); in rte_acl_classify_scalar() 134 0, ctx, parms, &flows, resolve_priority_scalar); in rte_acl_classify_scalar() 136 1, ctx, parms, &flows, resolve_priority_scalar); in rte_acl_classify_scalar() 139 while (flows.started > 0) { in rte_acl_classify_scalar() 146 transition0 = scalar_transition(flows.trans, in rte_acl_classify_scalar() 150 transition1 = scalar_transition(flows.trans, in rte_acl_classify_scalar() 157 0, ctx, parms, &flows, resolve_priority_scalar); in rte_acl_classify_scalar() 159 1, ctx, parms, &flows, resolve_priority_scalar); in rte_acl_classify_scalar()
|
| /dpdk/doc/guides/tools/ |
| H A D | flow-perf.rst | 164 Add VLAN item to all flows items, 170 Add IPv4 item to all flows items, 175 Add IPv6 item to all flows item, 186 Add VXLAN item to all flows items, 198 Add GRE item to all flows items, 204 Add GENEVE item to all flows items, 210 Add GTP item to all flows items, 216 Add Meta item to all flows items, 222 Add Tag item to all flows items, 249 Add RSS action to all flows actions, [all …]
|
| /dpdk/lib/gro/ |
| H A D | gro_udp4.c | 45 tbl->flows = rte_zmalloc_socket(__func__, in gro_udp4_tbl_create() 49 if (tbl->flows == NULL) { in gro_udp4_tbl_create() 56 tbl->flows[i].start_index = INVALID_ARRAY_INDEX; in gro_udp4_tbl_create() 69 rte_free(udp_tbl->flows); in gro_udp4_tbl_destroy() 158 dst = &(tbl->flows[flow_idx].key); in insert_new_flow() 166 tbl->flows[flow_idx].start_index = item_idx; in insert_new_flow() 286 cur_idx = tbl->flows[i].start_index; in gro_udp4_reassemble() 319 if (cur_idx == tbl->flows[i].start_index) { in gro_udp4_reassemble() 327 tbl->flows[i].start_index = item_idx; in gro_udp4_reassemble() 391 j = tbl->flows[i].start_index; in gro_udp4_tbl_timeout_flush() [all …]
|
| H A D | gro_tcp4.c | 45 tbl->flows = rte_zmalloc_socket(__func__, in gro_tcp4_tbl_create() 49 if (tbl->flows == NULL) { in gro_tcp4_tbl_create() 56 tbl->flows[i].start_index = INVALID_ARRAY_INDEX; in gro_tcp4_tbl_create() 69 rte_free(tcp_tbl->flows); in gro_tcp4_tbl_destroy() 93 if (tbl->flows[i].start_index == INVALID_ARRAY_INDEX) in find_an_empty_flow() 160 dst = &(tbl->flows[flow_idx].key); in insert_new_flow() 170 tbl->flows[flow_idx].start_index = item_idx; in insert_new_flow() 259 if (is_same_tcp4_flow(tbl->flows[i].key, key)) { in gro_tcp4_reassemble() 293 cur_idx = tbl->flows[i].start_index; in gro_tcp4_reassemble() 340 j = tbl->flows[i].start_index; in gro_tcp4_tbl_timeout_flush() [all …]
|
| H A D | gro_vxlan_udp4.c | 46 tbl->flows = rte_zmalloc_socket(__func__, in gro_vxlan_udp4_tbl_create() 50 if (tbl->flows == NULL) { in gro_vxlan_udp4_tbl_create() 57 tbl->flows[i].start_index = INVALID_ARRAY_INDEX; in gro_vxlan_udp4_tbl_create() 70 rte_free(vxlan_tbl->flows); in gro_vxlan_udp4_tbl_destroy() 158 dst = &(tbl->flows[flow_idx].key); in insert_new_flow() 176 tbl->flows[flow_idx].start_index = item_idx; in insert_new_flow() 394 cur_idx = tbl->flows[i].start_index; in gro_vxlan_udp4_reassemble() 429 if (cur_idx == tbl->flows[i].start_index) { in gro_vxlan_udp4_reassemble() 437 tbl->flows[i].start_index = item_idx; in gro_vxlan_udp4_reassemble() 502 j = tbl->flows[i].start_index; in gro_vxlan_udp4_tbl_timeout_flush() [all …]
|
| H A D | gro_vxlan_tcp4.c | 46 tbl->flows = rte_zmalloc_socket(__func__, in gro_vxlan_tcp4_tbl_create() 50 if (tbl->flows == NULL) { in gro_vxlan_tcp4_tbl_create() 57 tbl->flows[i].start_index = INVALID_ARRAY_INDEX; in gro_vxlan_tcp4_tbl_create() 70 rte_free(vxlan_tbl->flows); in gro_vxlan_tcp4_tbl_destroy() 92 if (tbl->flows[i].start_index == INVALID_ARRAY_INDEX) in find_an_empty_flow() 164 dst = &(tbl->flows[flow_idx].key); in insert_new_flow() 185 tbl->flows[flow_idx].start_index = item_idx; in insert_new_flow() 382 if (tbl->flows[i].start_index != INVALID_ARRAY_INDEX) { in gro_vxlan_tcp4_reassemble() 414 cur_idx = tbl->flows[i].start_index; in gro_vxlan_tcp4_reassemble() 465 j = tbl->flows[i].start_index; in gro_vxlan_tcp4_tbl_timeout_flush() [all …]
|
| H A D | rte_gro.c | 189 vxlan_tcp_tbl.flows = vxlan_tcp_flows; in rte_gro_reassemble_burst() 202 vxlan_udp_tbl.flows = vxlan_udp_flows; in rte_gro_reassemble_burst() 215 tcp_tbl.flows = tcp_flows; in rte_gro_reassemble_burst() 228 udp_tbl.flows = udp_flows; in rte_gro_reassemble_burst()
|
| H A D | gro_vxlan_udp4.h | 55 struct gro_vxlan_udp4_flow *flows; member
|
| H A D | gro_vxlan_tcp4.h | 54 struct gro_vxlan_tcp4_flow *flows; member
|
| /dpdk/drivers/net/failsafe/ |
| H A D | failsafe_flow.c | 105 flow->flows[i] = rte_flow_create(PORT_ID(sdev), in fs_flow_create() 107 if (flow->flows[i] == NULL && fs_err(sdev, -rte_errno)) { in fs_flow_create() 118 if (flow->flows[i] != NULL) in fs_flow_create() 120 flow->flows[i], error); in fs_flow_create() 145 if (flow->flows[i] == NULL) in fs_flow_destroy() 148 flow->flows[i], error); in fs_flow_destroy() 204 flow->flows[SUB_ID(sdev)], in fs_flow_query()
|
| /dpdk/drivers/net/bonding/ |
| H A D | rte_eth_bond_flow.c | 101 flow->flows[i] = rte_flow_create(internals->slaves[i].port_id, in bond_flow_create() 103 if (unlikely(flow->flows[i] == NULL)) { in bond_flow_create() 114 if (flow->flows[i] != NULL) in bond_flow_create() 116 flow->flows[i], err); in bond_flow_create() 133 if (unlikely(flow->flows[i] == NULL)) in bond_flow_destroy() 136 flow->flows[i], err); in bond_flow_destroy() 186 flow->flows[i], action, in bond_flow_query_count()
|
| H A D | rte_eth_bond_api.c | 261 flow->flows[slave_id] = rte_flow_create(slave_port_id, in slave_rte_flow_prepare() 266 if (flow->flows[slave_id] == NULL) { in slave_rte_flow_prepare() 273 if (flow->flows[slave_id] != NULL) { in slave_rte_flow_prepare() 275 flow->flows[slave_id], in slave_rte_flow_prepare() 277 flow->flows[slave_id] = NULL; in slave_rte_flow_prepare() 700 if (flow->flows[slave_idx] != NULL) { in __eth_bond_slave_remove_lock_free() 701 rte_flow_destroy(slave_port_id, flow->flows[slave_idx], in __eth_bond_slave_remove_lock_free() 703 flow->flows[slave_idx] = NULL; in __eth_bond_slave_remove_lock_free()
|
| /dpdk/examples/server_node_efd/server/ |
| H A D | args.c | 101 parse_num_flows(const char *flows) in parse_num_flows() argument 106 num_flows = strtoul(flows, &end, 16); in parse_num_flows() 107 if ((flows[0] == '\0') || (end == NULL) || (*end != '\0')) in parse_num_flows()
|
| /dpdk/doc/guides/sample_app_ug/ |
| H A D | server_node_efd.rst | 30 using the EFD library to create a load-balancing table for flows, 33 individually load-balance millions of flows (number of targets * maximum number 34 of flows fit in a flow table per target) while still fitting in CPU cache. 71 hash table that stores the key default size 1M flows) which is populated with 76 lookup. If a match occurs then statistics are updated for flows serviced by 102 * ``-f NUM_FLOWS:`` Number of flows to be added in the EFD table (1 million, by default) 120 application has to be in sync with the traffic flows configured on the traffic 123 For examples of application command lines and traffic generator flows, please 136 which is used to distribute packets to nodes, which the number of flows 176 Then, the hash table that contains the flows that will be handled
|
| /dpdk/lib/distributor/ |
| H A D | rte_distributor.c | 449 uint16_t flows[RTE_DIST_BURST_SIZE] __rte_cache_aligned; in rte_distributor_process() local 490 flows[i] = mbufs[next_idx + i]->hash.usr | 1; in rte_distributor_process() 492 flows[i] = 0; in rte_distributor_process() 495 flows[i] = 0; in rte_distributor_process() 506 find_match_vec(d, &flows[0], in rte_distributor_process() 510 find_match_scalar(d, &flows[0], in rte_distributor_process() 590 if (flows[w] == new_tag) in rte_distributor_process()
|
| /dpdk/drivers/net/mlx4/ |
| H A D | mlx4_flow.c | 1155 struct rte_flow *curr = LIST_FIRST(&priv->flows); in mlx4_flow_create() 1159 LIST_INSERT_HEAD(&priv->flows, flow, next); in mlx4_flow_create() 1235 struct rte_flow *flow = LIST_FIRST(&priv->flows); in mlx4_flow_flush() 1406 for (flow = LIST_FIRST(&priv->flows); in mlx4_flow_internal() 1462 for (flow = LIST_FIRST(&priv->flows); in mlx4_flow_internal() 1504 flow = LIST_FIRST(&priv->flows); in mlx4_flow_internal() 1545 for (flow = LIST_FIRST(&priv->flows); in mlx4_flow_sync() 1547 flow = LIST_FIRST(&priv->flows)) in mlx4_flow_sync() 1557 LIST_FOREACH(flow, &priv->flows, next) { in mlx4_flow_sync() 1581 while ((flow = LIST_FIRST(&priv->flows))) in mlx4_flow_clean()
|
| /dpdk/doc/guides/eventdevs/ |
| H A D | dpaa.rst | 22 - Parallel flows 23 - Atomic flows
|
| H A D | dpaa2.rst | 23 - Parallel flows 24 - Atomic flows
|
| /dpdk/doc/guides/prog_guide/ |
| H A D | traffic_metering_and_policing.rst | 33 flow or potentially shared by several flows has to be specified at its 37 of the Ethernet device by linking it to one or several flows through the 39 for the same flow. An MTR object can only be destroyed if there are no flows
|
| H A D | efd_lib.rst | 19 flows to signatures in flow tables to send incoming packets to their 169 balancer, where flows are received at a front end server before being forwarded 171 deterministically co-locate flows together in order to minimize cross-server 173 (For example, flows requesting certain webpage objects are co-located 185 keys are not stored), it sustains a large number of flows (N*X, where N 186 is the maximum number of flows served by each back end server of the X 197 rather distributing the flows to a target back end node based on the 200 the flows served at each node is used and is 202 flows. 211 given maximum number of flows, a function is called to insert a flow key
|