xref: /f-stack/dpdk/lib/librte_node/ethdev_tx.c (revision 2d9fd380)
1*2d9fd380Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2*2d9fd380Sjfb8856606  * Copyright(C) 2020 Marvell International Ltd.
3*2d9fd380Sjfb8856606  */
4*2d9fd380Sjfb8856606 
5*2d9fd380Sjfb8856606 #include <rte_debug.h>
6*2d9fd380Sjfb8856606 #include <rte_ethdev.h>
7*2d9fd380Sjfb8856606 #include <rte_graph.h>
8*2d9fd380Sjfb8856606 #include <rte_graph_worker.h>
9*2d9fd380Sjfb8856606 #include <rte_mbuf.h>
10*2d9fd380Sjfb8856606 
11*2d9fd380Sjfb8856606 #include "ethdev_tx_priv.h"
12*2d9fd380Sjfb8856606 
13*2d9fd380Sjfb8856606 static struct ethdev_tx_node_main ethdev_tx_main;
14*2d9fd380Sjfb8856606 
15*2d9fd380Sjfb8856606 static uint16_t
ethdev_tx_node_process(struct rte_graph * graph,struct rte_node * node,void ** objs,uint16_t nb_objs)16*2d9fd380Sjfb8856606 ethdev_tx_node_process(struct rte_graph *graph, struct rte_node *node,
17*2d9fd380Sjfb8856606 		       void **objs, uint16_t nb_objs)
18*2d9fd380Sjfb8856606 {
19*2d9fd380Sjfb8856606 	ethdev_tx_node_ctx_t *ctx = (ethdev_tx_node_ctx_t *)node->ctx;
20*2d9fd380Sjfb8856606 	uint16_t port, queue;
21*2d9fd380Sjfb8856606 	uint16_t count;
22*2d9fd380Sjfb8856606 
23*2d9fd380Sjfb8856606 	/* Get Tx port id */
24*2d9fd380Sjfb8856606 	port = ctx->port;
25*2d9fd380Sjfb8856606 	queue = ctx->queue;
26*2d9fd380Sjfb8856606 
27*2d9fd380Sjfb8856606 	count = rte_eth_tx_burst(port, queue, (struct rte_mbuf **)objs,
28*2d9fd380Sjfb8856606 				 nb_objs);
29*2d9fd380Sjfb8856606 
30*2d9fd380Sjfb8856606 	/* Redirect unsent pkts to drop node */
31*2d9fd380Sjfb8856606 	if (count != nb_objs) {
32*2d9fd380Sjfb8856606 		rte_node_enqueue(graph, node, ETHDEV_TX_NEXT_PKT_DROP,
33*2d9fd380Sjfb8856606 				 &objs[count], nb_objs - count);
34*2d9fd380Sjfb8856606 	}
35*2d9fd380Sjfb8856606 
36*2d9fd380Sjfb8856606 	return count;
37*2d9fd380Sjfb8856606 }
38*2d9fd380Sjfb8856606 
39*2d9fd380Sjfb8856606 static int
ethdev_tx_node_init(const struct rte_graph * graph,struct rte_node * node)40*2d9fd380Sjfb8856606 ethdev_tx_node_init(const struct rte_graph *graph, struct rte_node *node)
41*2d9fd380Sjfb8856606 {
42*2d9fd380Sjfb8856606 	ethdev_tx_node_ctx_t *ctx = (ethdev_tx_node_ctx_t *)node->ctx;
43*2d9fd380Sjfb8856606 	uint64_t port_id = RTE_MAX_ETHPORTS;
44*2d9fd380Sjfb8856606 	int i;
45*2d9fd380Sjfb8856606 
46*2d9fd380Sjfb8856606 	/* Find our port id */
47*2d9fd380Sjfb8856606 	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
48*2d9fd380Sjfb8856606 		if (ethdev_tx_main.nodes[i] == node->id) {
49*2d9fd380Sjfb8856606 			port_id = i;
50*2d9fd380Sjfb8856606 			break;
51*2d9fd380Sjfb8856606 		}
52*2d9fd380Sjfb8856606 	}
53*2d9fd380Sjfb8856606 	RTE_VERIFY(port_id < RTE_MAX_ETHPORTS);
54*2d9fd380Sjfb8856606 
55*2d9fd380Sjfb8856606 	/* Update port and queue */
56*2d9fd380Sjfb8856606 	ctx->port = port_id;
57*2d9fd380Sjfb8856606 	ctx->queue = graph->id;
58*2d9fd380Sjfb8856606 
59*2d9fd380Sjfb8856606 	return 0;
60*2d9fd380Sjfb8856606 }
61*2d9fd380Sjfb8856606 
62*2d9fd380Sjfb8856606 struct ethdev_tx_node_main *
ethdev_tx_node_data_get(void)63*2d9fd380Sjfb8856606 ethdev_tx_node_data_get(void)
64*2d9fd380Sjfb8856606 {
65*2d9fd380Sjfb8856606 	return &ethdev_tx_main;
66*2d9fd380Sjfb8856606 }
67*2d9fd380Sjfb8856606 
68*2d9fd380Sjfb8856606 static struct rte_node_register ethdev_tx_node_base = {
69*2d9fd380Sjfb8856606 	.process = ethdev_tx_node_process,
70*2d9fd380Sjfb8856606 	.name = "ethdev_tx",
71*2d9fd380Sjfb8856606 
72*2d9fd380Sjfb8856606 	.init = ethdev_tx_node_init,
73*2d9fd380Sjfb8856606 
74*2d9fd380Sjfb8856606 	.nb_edges = ETHDEV_TX_NEXT_MAX,
75*2d9fd380Sjfb8856606 	.next_nodes = {
76*2d9fd380Sjfb8856606 		[ETHDEV_TX_NEXT_PKT_DROP] = "pkt_drop",
77*2d9fd380Sjfb8856606 	},
78*2d9fd380Sjfb8856606 };
79*2d9fd380Sjfb8856606 
80*2d9fd380Sjfb8856606 struct rte_node_register *
ethdev_tx_node_get(void)81*2d9fd380Sjfb8856606 ethdev_tx_node_get(void)
82*2d9fd380Sjfb8856606 {
83*2d9fd380Sjfb8856606 	return &ethdev_tx_node_base;
84*2d9fd380Sjfb8856606 }
85*2d9fd380Sjfb8856606 
86*2d9fd380Sjfb8856606 RTE_NODE_REGISTER(ethdev_tx_node_base);
87