xref: /f-stack/dpdk/lib/librte_node/ethdev_ctrl.c (revision 2d9fd380)
1*2d9fd380Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2*2d9fd380Sjfb8856606  * Copyright(C) 2020 Marvell International Ltd.
3*2d9fd380Sjfb8856606  */
4*2d9fd380Sjfb8856606 
5*2d9fd380Sjfb8856606 #include <rte_debug.h>
6*2d9fd380Sjfb8856606 #include <rte_ethdev.h>
7*2d9fd380Sjfb8856606 #include <rte_ether.h>
8*2d9fd380Sjfb8856606 #include <rte_graph.h>
9*2d9fd380Sjfb8856606 
10*2d9fd380Sjfb8856606 #include "rte_node_eth_api.h"
11*2d9fd380Sjfb8856606 
12*2d9fd380Sjfb8856606 #include "ethdev_rx_priv.h"
13*2d9fd380Sjfb8856606 #include "ethdev_tx_priv.h"
14*2d9fd380Sjfb8856606 #include "ip4_rewrite_priv.h"
15*2d9fd380Sjfb8856606 #include "node_private.h"
16*2d9fd380Sjfb8856606 
17*2d9fd380Sjfb8856606 static struct ethdev_ctrl {
18*2d9fd380Sjfb8856606 	uint16_t nb_graphs;
19*2d9fd380Sjfb8856606 } ctrl;
20*2d9fd380Sjfb8856606 
21*2d9fd380Sjfb8856606 int
rte_node_eth_config(struct rte_node_ethdev_config * conf,uint16_t nb_confs,uint16_t nb_graphs)22*2d9fd380Sjfb8856606 rte_node_eth_config(struct rte_node_ethdev_config *conf, uint16_t nb_confs,
23*2d9fd380Sjfb8856606 		    uint16_t nb_graphs)
24*2d9fd380Sjfb8856606 {
25*2d9fd380Sjfb8856606 	struct rte_node_register *ip4_rewrite_node;
26*2d9fd380Sjfb8856606 	struct ethdev_tx_node_main *tx_node_data;
27*2d9fd380Sjfb8856606 	uint16_t tx_q_used, rx_q_used, port_id;
28*2d9fd380Sjfb8856606 	struct rte_node_register *tx_node;
29*2d9fd380Sjfb8856606 	char name[RTE_NODE_NAMESIZE];
30*2d9fd380Sjfb8856606 	const char *next_nodes = name;
31*2d9fd380Sjfb8856606 	struct rte_mempool *mp;
32*2d9fd380Sjfb8856606 	int i, j, rc;
33*2d9fd380Sjfb8856606 	uint32_t id;
34*2d9fd380Sjfb8856606 
35*2d9fd380Sjfb8856606 	ip4_rewrite_node = ip4_rewrite_node_get();
36*2d9fd380Sjfb8856606 	tx_node_data = ethdev_tx_node_data_get();
37*2d9fd380Sjfb8856606 	tx_node = ethdev_tx_node_get();
38*2d9fd380Sjfb8856606 	for (i = 0; i < nb_confs; i++) {
39*2d9fd380Sjfb8856606 		port_id = conf[i].port_id;
40*2d9fd380Sjfb8856606 
41*2d9fd380Sjfb8856606 		if (!rte_eth_dev_is_valid_port(port_id))
42*2d9fd380Sjfb8856606 			return -EINVAL;
43*2d9fd380Sjfb8856606 
44*2d9fd380Sjfb8856606 		/* Check for mbuf minimum private size requirement */
45*2d9fd380Sjfb8856606 		for (j = 0; j < conf[i].mp_count; j++) {
46*2d9fd380Sjfb8856606 			mp = conf[i].mp[j];
47*2d9fd380Sjfb8856606 			if (!mp)
48*2d9fd380Sjfb8856606 				continue;
49*2d9fd380Sjfb8856606 			/* Check for minimum private space */
50*2d9fd380Sjfb8856606 			if (rte_pktmbuf_priv_size(mp) < NODE_MBUF_PRIV2_SIZE) {
51*2d9fd380Sjfb8856606 				node_err("ethdev",
52*2d9fd380Sjfb8856606 					 "Minimum mbuf priv size requirement not met by mp %s",
53*2d9fd380Sjfb8856606 					 mp->name);
54*2d9fd380Sjfb8856606 				return -EINVAL;
55*2d9fd380Sjfb8856606 			}
56*2d9fd380Sjfb8856606 		}
57*2d9fd380Sjfb8856606 
58*2d9fd380Sjfb8856606 		rx_q_used = conf[i].num_rx_queues;
59*2d9fd380Sjfb8856606 		tx_q_used = conf[i].num_tx_queues;
60*2d9fd380Sjfb8856606 		/* Check if we have a txq for each worker */
61*2d9fd380Sjfb8856606 		if (tx_q_used < nb_graphs)
62*2d9fd380Sjfb8856606 			return -EINVAL;
63*2d9fd380Sjfb8856606 
64*2d9fd380Sjfb8856606 		/* Create node for each rx port queue pair */
65*2d9fd380Sjfb8856606 		for (j = 0; j < rx_q_used; j++) {
66*2d9fd380Sjfb8856606 			struct ethdev_rx_node_main *rx_node_data;
67*2d9fd380Sjfb8856606 			struct rte_node_register *rx_node;
68*2d9fd380Sjfb8856606 			ethdev_rx_node_elem_t *elem;
69*2d9fd380Sjfb8856606 
70*2d9fd380Sjfb8856606 			rx_node_data = ethdev_rx_get_node_data_get();
71*2d9fd380Sjfb8856606 			rx_node = ethdev_rx_node_get();
72*2d9fd380Sjfb8856606 			snprintf(name, sizeof(name), "%u-%u", port_id, j);
73*2d9fd380Sjfb8856606 			/* Clone a new rx node with same edges as parent */
74*2d9fd380Sjfb8856606 			id = rte_node_clone(rx_node->id, name);
75*2d9fd380Sjfb8856606 			if (id == RTE_NODE_ID_INVALID)
76*2d9fd380Sjfb8856606 				return -EIO;
77*2d9fd380Sjfb8856606 
78*2d9fd380Sjfb8856606 			/* Add it to list of ethdev rx nodes for lookup */
79*2d9fd380Sjfb8856606 			elem = malloc(sizeof(ethdev_rx_node_elem_t));
80*2d9fd380Sjfb8856606 			memset(elem, 0, sizeof(ethdev_rx_node_elem_t));
81*2d9fd380Sjfb8856606 			elem->ctx.port_id = port_id;
82*2d9fd380Sjfb8856606 			elem->ctx.queue_id = j;
83*2d9fd380Sjfb8856606 			elem->nid = id;
84*2d9fd380Sjfb8856606 			elem->next = rx_node_data->head;
85*2d9fd380Sjfb8856606 			rx_node_data->head = elem;
86*2d9fd380Sjfb8856606 
87*2d9fd380Sjfb8856606 			node_dbg("ethdev", "Rx node %s-%s: is at %u",
88*2d9fd380Sjfb8856606 				 rx_node->name, name, id);
89*2d9fd380Sjfb8856606 		}
90*2d9fd380Sjfb8856606 
91*2d9fd380Sjfb8856606 		/* Create a per port tx node from base node */
92*2d9fd380Sjfb8856606 		snprintf(name, sizeof(name), "%u", port_id);
93*2d9fd380Sjfb8856606 		/* Clone a new node with same edges as parent */
94*2d9fd380Sjfb8856606 		id = rte_node_clone(tx_node->id, name);
95*2d9fd380Sjfb8856606 		tx_node_data->nodes[port_id] = id;
96*2d9fd380Sjfb8856606 
97*2d9fd380Sjfb8856606 		node_dbg("ethdev", "Tx node %s-%s: is at %u", tx_node->name,
98*2d9fd380Sjfb8856606 			 name, id);
99*2d9fd380Sjfb8856606 
100*2d9fd380Sjfb8856606 		/* Prepare the actual name of the cloned node */
101*2d9fd380Sjfb8856606 		snprintf(name, sizeof(name), "ethdev_tx-%u", port_id);
102*2d9fd380Sjfb8856606 
103*2d9fd380Sjfb8856606 		/* Add this tx port node as next to ip4_rewrite_node */
104*2d9fd380Sjfb8856606 		rte_node_edge_update(ip4_rewrite_node->id, RTE_EDGE_ID_INVALID,
105*2d9fd380Sjfb8856606 				     &next_nodes, 1);
106*2d9fd380Sjfb8856606 		/* Assuming edge id is the last one alloc'ed */
107*2d9fd380Sjfb8856606 		rc = ip4_rewrite_set_next(
108*2d9fd380Sjfb8856606 			port_id, rte_node_edge_count(ip4_rewrite_node->id) - 1);
109*2d9fd380Sjfb8856606 		if (rc < 0)
110*2d9fd380Sjfb8856606 			return rc;
111*2d9fd380Sjfb8856606 	}
112*2d9fd380Sjfb8856606 
113*2d9fd380Sjfb8856606 	ctrl.nb_graphs = nb_graphs;
114*2d9fd380Sjfb8856606 	return 0;
115*2d9fd380Sjfb8856606 }
116