| /dpdk/drivers/common/dpaax/ |
| H A D | dpaax_iova_table.c | 63 struct reg_node *nodes = NULL; in read_memory_node() local 128 if (!nodes) { in read_memory_node() 144 nodes[j].addr, nodes[j].len); in read_memory_node() 151 return nodes; in read_memory_node() 160 struct reg_node *nodes; in dpaax_iova_table_populate() local 177 if (nodes == NULL) { in dpaax_iova_table_populate() 185 tot_memory_size += nodes[i].len; in dpaax_iova_table_populate() 202 free(nodes); in dpaax_iova_table_populate() 228 entry[i].start = nodes[i].addr; in dpaax_iova_table_populate() 229 entry[i].len = nodes[i].len; in dpaax_iova_table_populate() [all …]
|
| /dpdk/doc/guides/prog_guide/ |
| H A D | graph_lib.rst | 22 - Support for out of tree nodes. 23 - Inbuilt nodes for packet processing. 122 Source nodes are static nodes created using ``RTE_NODE_REGISTER`` by passing 125 nodes will be called first. So that these nodes can be used as input nodes for a graph. 141 Topology after linking the nodes 166 This method enables the use case of Rx and Tx nodes where multiple of those nodes 170 ethdev nodes. 178 nodes or graph is not allowed. 259 Broadly speaking, there are two different types of nodes. 261 Static nodes [all …]
|
| H A D | traffic_management.rst | 22 shared (by multiple nodes) shapers 24 drop, WRED, private (per node) and shared (by multiple nodes) WRED contexts 41 parameters such as maximum number of nodes, maximum number of hierarchical 79 limiters) for the hierarchy nodes, subject to the specific implementation 86 nodes. 127 management for a group of leaf nodes. 164 The TM hierarchical tree consists of leaf nodes and non-leaf nodes. Each leaf 169 is reserved for leaf nodes. 176 The children nodes with different priorities are scheduled using the SP 190 node and all the nodes that are subsequently added have to be added as [all …]
|
| H A D | rib_lib.rst | 38 The binary tree consists of two types of nodes: 50 * The maximum number of nodes. 104 This returns 3 ``rte_rib_node`` nodes pointing to ``10.0.0.0/29``, ``10.0.0.160/27``
|
| /dpdk/drivers/net/ipn3ke/ |
| H A D | ipn3ke_tm.c | 112 if (!nodes) in ipn3ke_hw_tm_init() 121 rte_free(nodes); in ipn3ke_hw_tm_init() 125 hw->nodes = nodes; in ipn3ke_hw_tm_init() 126 hw->port_nodes = nodes; in ipn3ke_hw_tm_init() 134 i++, nodes++) { in ipn3ke_hw_tm_init() 135 nodes->node_index = i; in ipn3ke_hw_tm_init() 141 nodes->weight = 0; in ipn3ke_hw_tm_init() 151 i++, nodes++) { in ipn3ke_hw_tm_init() 158 nodes->weight = 0; in ipn3ke_hw_tm_init() 168 i++, nodes++) { in ipn3ke_hw_tm_init() [all …]
|
| /dpdk/drivers/net/dpaa2/ |
| H A D | dpaa2_tm.c | 27 LIST_INIT(&priv->nodes); in dpaa2_tm_init() 61 LIST_FOREACH(node, &priv->nodes, next) in dpaa2_node_from_id() 461 LIST_FOREACH(node, &priv->nodes, next) { in dpaa2_node_add() 657 nodes[n - 1]->id, nodes[n - 1]->priority, in dpaa2_tm_sort_and_configure() 664 if (nodes[i]->priority > nodes[i + 1]->priority) { in dpaa2_tm_sort_and_configure() 665 temp_node = nodes[i]; in dpaa2_tm_sort_and_configure() 666 nodes[i] = nodes[i + 1]; in dpaa2_tm_sort_and_configure() 667 nodes[i + 1] = temp_node; in dpaa2_tm_sort_and_configure() 673 nodes[n - 1]->id, nodes[n - 1]->priority, in dpaa2_tm_sort_and_configure() 708 nodes[i++] = leaf_node; in dpaa2_hierarchy_commit() [all …]
|
| H A D | dpaa2_ethdev.h | 212 LIST_HEAD(nodes, dpaa2_tm_node) nodes;
|
| /dpdk/examples/server_node_efd/server/ |
| H A D | args.c | 84 parse_num_nodes(const char *nodes) in parse_num_nodes() argument 89 if (nodes == NULL || *nodes == '\0') in parse_num_nodes() 92 temp = strtoul(nodes, &end, 10); in parse_num_nodes()
|
| H A D | init.c | 55 struct node *nodes; variable 178 nodes = rte_malloc("node details", in init_shm_rings() 179 sizeof(*nodes) * num_nodes, 0); in init_shm_rings() 180 if (nodes == NULL) in init_shm_rings() 188 nodes[i].rx_q = rte_ring_create(q_name, in init_shm_rings() 191 if (nodes[i].rx_q == NULL) in init_shm_rings()
|
| H A D | main.c | 154 const unsigned long long rx = nodes[i].stats.rx; in do_stats_display() 155 const unsigned long long rx_drop = nodes[i].stats.rx_drop; in do_stats_display() 210 nodes[i].stats.rx = nodes[i].stats.rx_drop = 0; in clear_stats() 228 cl = &nodes[node]; in flush_rx_queue()
|
| H A D | init.h | 32 extern struct node *nodes;
|
| /dpdk/doc/guides/sample_app_ug/ |
| H A D | l3_forward_graph.rst | 9 Graph framework and nodes written for graph framework. 14 The application demonstrates the use of the graph framework and graph nodes 23 TTL update and finally Tx is implemented inside graph nodes. These nodes are 113 :doc:`l3_forward`, major part of the implementation is in graph nodes via used 124 lead to the clone of ``ethdev_rx`` and ``ethdev_tx`` nodes as ``ethdev_rx-X-Y`` and 126 In case of ``ethdev_tx-X`` nodes, tx queue id assigned per instance of the node 129 These cloned nodes along with existing static nodes such as ``ip4_lookup`` and 142 Now a graph needs to be created with a specific set of nodes for every lcore. 171 Since currently ``ip4_lookup`` and ``ip4_rewrite`` nodes don't support 186 forwarding data is updated with nodes, worker lcores will be launched with graph [all …]
|
| H A D | server_node_efd.rst | 36 It should be noted that although they are referred to as nodes, the frontend 37 server and worker nodes are processes running on the same platform. 101 * ``-n NUM_NODES:`` Number of back-end nodes that will be used 113 First, the server app must be launched, with the number of nodes that will be run. 136 which is used to distribute packets to nodes, which the number of flows
|
| /dpdk/lib/graph/ |
| H A D | graph_populate.c | 92 node->nodes[count] = (struct rte_node *)&graph_node in graph_nodes_populate() 142 name = (const char *)node->nodes[val]; in graph_node_nexts_populate() 143 node->nodes[val] = graph_node_name_to_ptr(graph, name); in graph_node_nexts_populate() 144 if (node->nodes[val] == NULL) in graph_node_nexts_populate()
|
| H A D | graph_stats.c | 27 struct rte_node *nodes[]; member 158 cluster->nodes[cluster->nb_nodes++] = node; in stats_mem_populate() 180 cluster->nodes[cluster->nb_nodes++] = node; in stats_mem_populate() 339 node = cluster->nodes[count]; in cluster_node_arregate_stats()
|
| H A D | rte_graph_worker.h | 85 struct rte_node *nodes[] __rte_cache_min_aligned; /**< Next nodes. */ member 248 node = node->nodes[next]; in __rte_node_next_node_get()
|
| /dpdk/doc/guides/ |
| H A D | conf.py | 5 from docutils import nodes 74 newnode = nodes.reference('', 89 for node in doctree.traverse(nodes.reference): 110 newnode = nodes.reference('',
|
| /dpdk/usertools/ |
| H A D | dpdk-hugepages.py | 138 nodes = ['/sys/devices/system/node/node{}/hugepages'.format(node)] 140 nodes = glob.glob('/sys/devices/system/node/node*/hugepages') 142 for node_path in nodes:
|
| /dpdk/drivers/net/mvpp2/ |
| H A D | mrvl_tm.c | 77 LIST_INIT(&priv->nodes); in mrvl_tm_init() 95 struct mrvl_tm_node *node = LIST_FIRST(&priv->nodes); in mrvl_tm_deinit() 126 LIST_FOREACH(node, &priv->nodes, next) in mrvl_node_from_id() 638 LIST_FOREACH(node, &priv->nodes, next) { in mrvl_node_add() 683 LIST_INSERT_HEAD(&priv->nodes, node, next); in mrvl_node_add() 784 LIST_FOREACH(tmp, &priv->nodes, next) { in mrvl_node_suspend() 871 LIST_FOREACH(node, &priv->nodes, next) { in mrvl_hierarchy_commit() 923 if (LIST_EMPTY(&priv->nodes)) { in mrvl_hierarchy_commit()
|
| H A D | mrvl_ethdev.h | 183 LIST_HEAD(nodes, mrvl_tm_node) nodes;
|
| /dpdk/lib/node/ |
| H A D | ethdev_tx_priv.h | 31 uint32_t nodes[RTE_MAX_ETHPORTS]; /**< Tx nodes for each ethdev port. */ member
|
| H A D | ethdev_tx.c | 47 if (ethdev_tx_main.nodes[i] == node->id) { in ethdev_tx_node_init()
|
| H A D | ethdev_ctrl.c | 93 tx_node_data->nodes[port_id] = id; in rte_node_eth_config()
|
| /dpdk/config/ppc/ |
| H A D | meson.build | 119 # POWER systems do not allocate NUMA nodes sequentially. A dual socket system 120 # will have CPUs associated with NUMA nodes 0 & 8, so ensure that the second 122 # systems can scale even higher with as many as 32 NUMA nodes.
|
| /dpdk/drivers/net/softnic/ |
| H A D | rte_eth_softnic_tm.c | 165 TAILQ_INIT(&p->soft.tm.h.nodes); in tm_hierarchy_init() 175 tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes); in tm_hierarchy_free() 269 struct tm_node_list *nl = &p->soft.tm.h.nodes; in tm_node_search() 283 struct tm_node_list *nl = &p->soft.tm.h.nodes; in tm_root_node_present() 297 struct tm_node_list *nl = &p->soft.tm.h.nodes; in tm_node_subport_id() 2110 struct tm_node_list *nl = &h->nodes; in pipe_profile_build() 2236 struct tm_node_list *nl = &h->nodes; in pipe_profiles_generate() 2289 struct tm_node_list *nl = &h->nodes; in tm_tc_wred_profile_get() 2447 struct tm_node_list *nl = &h->nodes; in subport_profiles_generate() 2490 struct tm_node_list *nl = &h->nodes; in hierarchy_commit_check() [all …]
|