xref: /f-stack/dpdk/lib/librte_node/ethdev_rx.c (revision 2d9fd380)
1*2d9fd380Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2*2d9fd380Sjfb8856606  * Copyright(C) 2020 Marvell International Ltd.
3*2d9fd380Sjfb8856606  */
4*2d9fd380Sjfb8856606 
5*2d9fd380Sjfb8856606 #include <rte_debug.h>
6*2d9fd380Sjfb8856606 #include <rte_ethdev.h>
7*2d9fd380Sjfb8856606 #include <rte_ether.h>
8*2d9fd380Sjfb8856606 #include <rte_graph.h>
9*2d9fd380Sjfb8856606 #include <rte_graph_worker.h>
10*2d9fd380Sjfb8856606 #include <rte_mbuf.h>
11*2d9fd380Sjfb8856606 
12*2d9fd380Sjfb8856606 #include "ethdev_rx_priv.h"
13*2d9fd380Sjfb8856606 #include "node_private.h"
14*2d9fd380Sjfb8856606 
15*2d9fd380Sjfb8856606 static struct ethdev_rx_node_main ethdev_rx_main;
16*2d9fd380Sjfb8856606 
17*2d9fd380Sjfb8856606 static __rte_always_inline uint16_t
ethdev_rx_node_process_inline(struct rte_graph * graph,struct rte_node * node,ethdev_rx_node_ctx_t * ctx)18*2d9fd380Sjfb8856606 ethdev_rx_node_process_inline(struct rte_graph *graph, struct rte_node *node,
19*2d9fd380Sjfb8856606 			      ethdev_rx_node_ctx_t *ctx)
20*2d9fd380Sjfb8856606 {
21*2d9fd380Sjfb8856606 	uint16_t count, next_index;
22*2d9fd380Sjfb8856606 	uint16_t port, queue;
23*2d9fd380Sjfb8856606 
24*2d9fd380Sjfb8856606 	port = ctx->port_id;
25*2d9fd380Sjfb8856606 	queue = ctx->queue_id;
26*2d9fd380Sjfb8856606 	next_index = ctx->cls_next;
27*2d9fd380Sjfb8856606 
28*2d9fd380Sjfb8856606 	/* Get pkts from port */
29*2d9fd380Sjfb8856606 	count = rte_eth_rx_burst(port, queue, (struct rte_mbuf **)node->objs,
30*2d9fd380Sjfb8856606 				 RTE_GRAPH_BURST_SIZE);
31*2d9fd380Sjfb8856606 
32*2d9fd380Sjfb8856606 	if (!count)
33*2d9fd380Sjfb8856606 		return 0;
34*2d9fd380Sjfb8856606 	node->idx = count;
35*2d9fd380Sjfb8856606 	/* Enqueue to next node */
36*2d9fd380Sjfb8856606 	rte_node_next_stream_move(graph, node, next_index);
37*2d9fd380Sjfb8856606 
38*2d9fd380Sjfb8856606 	return count;
39*2d9fd380Sjfb8856606 }
40*2d9fd380Sjfb8856606 
41*2d9fd380Sjfb8856606 static __rte_always_inline uint16_t
ethdev_rx_node_process(struct rte_graph * graph,struct rte_node * node,void ** objs,uint16_t cnt)42*2d9fd380Sjfb8856606 ethdev_rx_node_process(struct rte_graph *graph, struct rte_node *node,
43*2d9fd380Sjfb8856606 		       void **objs, uint16_t cnt)
44*2d9fd380Sjfb8856606 {
45*2d9fd380Sjfb8856606 	ethdev_rx_node_ctx_t *ctx = (ethdev_rx_node_ctx_t *)node->ctx;
46*2d9fd380Sjfb8856606 	uint16_t n_pkts = 0;
47*2d9fd380Sjfb8856606 
48*2d9fd380Sjfb8856606 	RTE_SET_USED(objs);
49*2d9fd380Sjfb8856606 	RTE_SET_USED(cnt);
50*2d9fd380Sjfb8856606 
51*2d9fd380Sjfb8856606 	n_pkts = ethdev_rx_node_process_inline(graph, node, ctx);
52*2d9fd380Sjfb8856606 	return n_pkts;
53*2d9fd380Sjfb8856606 }
54*2d9fd380Sjfb8856606 
55*2d9fd380Sjfb8856606 static inline uint32_t
l3_ptype(uint16_t etype,uint32_t ptype)56*2d9fd380Sjfb8856606 l3_ptype(uint16_t etype, uint32_t ptype)
57*2d9fd380Sjfb8856606 {
58*2d9fd380Sjfb8856606 	ptype = ptype & ~RTE_PTYPE_L3_MASK;
59*2d9fd380Sjfb8856606 	if (etype == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
60*2d9fd380Sjfb8856606 		ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
61*2d9fd380Sjfb8856606 	else if (etype == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
62*2d9fd380Sjfb8856606 		ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
63*2d9fd380Sjfb8856606 	return ptype;
64*2d9fd380Sjfb8856606 }
65*2d9fd380Sjfb8856606 
66*2d9fd380Sjfb8856606 /* Callback for soft ptype parsing */
67*2d9fd380Sjfb8856606 static uint16_t
eth_pkt_parse_cb(uint16_t port,uint16_t queue,struct rte_mbuf ** mbufs,uint16_t nb_pkts,uint16_t max_pkts,void * user_param)68*2d9fd380Sjfb8856606 eth_pkt_parse_cb(uint16_t port, uint16_t queue, struct rte_mbuf **mbufs,
69*2d9fd380Sjfb8856606 		 uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
70*2d9fd380Sjfb8856606 {
71*2d9fd380Sjfb8856606 	struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
72*2d9fd380Sjfb8856606 	struct rte_ether_hdr *eth_hdr;
73*2d9fd380Sjfb8856606 	uint16_t etype, n_left;
74*2d9fd380Sjfb8856606 	struct rte_mbuf **pkts;
75*2d9fd380Sjfb8856606 
76*2d9fd380Sjfb8856606 	RTE_SET_USED(port);
77*2d9fd380Sjfb8856606 	RTE_SET_USED(queue);
78*2d9fd380Sjfb8856606 	RTE_SET_USED(max_pkts);
79*2d9fd380Sjfb8856606 	RTE_SET_USED(user_param);
80*2d9fd380Sjfb8856606 
81*2d9fd380Sjfb8856606 	pkts = mbufs;
82*2d9fd380Sjfb8856606 	n_left = nb_pkts;
83*2d9fd380Sjfb8856606 	while (n_left >= 12) {
84*2d9fd380Sjfb8856606 
85*2d9fd380Sjfb8856606 		/* Prefetch next-next mbufs */
86*2d9fd380Sjfb8856606 		rte_prefetch0(pkts[8]);
87*2d9fd380Sjfb8856606 		rte_prefetch0(pkts[9]);
88*2d9fd380Sjfb8856606 		rte_prefetch0(pkts[10]);
89*2d9fd380Sjfb8856606 		rte_prefetch0(pkts[11]);
90*2d9fd380Sjfb8856606 
91*2d9fd380Sjfb8856606 		/* Prefetch next mbuf data */
92*2d9fd380Sjfb8856606 		rte_prefetch0(
93*2d9fd380Sjfb8856606 			rte_pktmbuf_mtod(pkts[4], struct rte_ether_hdr *));
94*2d9fd380Sjfb8856606 		rte_prefetch0(
95*2d9fd380Sjfb8856606 			rte_pktmbuf_mtod(pkts[5], struct rte_ether_hdr *));
96*2d9fd380Sjfb8856606 		rte_prefetch0(
97*2d9fd380Sjfb8856606 			rte_pktmbuf_mtod(pkts[6], struct rte_ether_hdr *));
98*2d9fd380Sjfb8856606 		rte_prefetch0(
99*2d9fd380Sjfb8856606 			rte_pktmbuf_mtod(pkts[7], struct rte_ether_hdr *));
100*2d9fd380Sjfb8856606 
101*2d9fd380Sjfb8856606 		mbuf0 = pkts[0];
102*2d9fd380Sjfb8856606 		mbuf1 = pkts[1];
103*2d9fd380Sjfb8856606 		mbuf2 = pkts[2];
104*2d9fd380Sjfb8856606 		mbuf3 = pkts[3];
105*2d9fd380Sjfb8856606 		pkts += 4;
106*2d9fd380Sjfb8856606 		n_left -= 4;
107*2d9fd380Sjfb8856606 
108*2d9fd380Sjfb8856606 		/* Extract ptype of mbuf0 */
109*2d9fd380Sjfb8856606 		eth_hdr = rte_pktmbuf_mtod(mbuf0, struct rte_ether_hdr *);
110*2d9fd380Sjfb8856606 		etype = eth_hdr->ether_type;
111*2d9fd380Sjfb8856606 		mbuf0->packet_type = l3_ptype(etype, 0);
112*2d9fd380Sjfb8856606 
113*2d9fd380Sjfb8856606 		/* Extract ptype of mbuf1 */
114*2d9fd380Sjfb8856606 		eth_hdr = rte_pktmbuf_mtod(mbuf1, struct rte_ether_hdr *);
115*2d9fd380Sjfb8856606 		etype = eth_hdr->ether_type;
116*2d9fd380Sjfb8856606 		mbuf1->packet_type = l3_ptype(etype, 0);
117*2d9fd380Sjfb8856606 
118*2d9fd380Sjfb8856606 		/* Extract ptype of mbuf2 */
119*2d9fd380Sjfb8856606 		eth_hdr = rte_pktmbuf_mtod(mbuf2, struct rte_ether_hdr *);
120*2d9fd380Sjfb8856606 		etype = eth_hdr->ether_type;
121*2d9fd380Sjfb8856606 		mbuf2->packet_type = l3_ptype(etype, 0);
122*2d9fd380Sjfb8856606 
123*2d9fd380Sjfb8856606 		/* Extract ptype of mbuf3 */
124*2d9fd380Sjfb8856606 		eth_hdr = rte_pktmbuf_mtod(mbuf3, struct rte_ether_hdr *);
125*2d9fd380Sjfb8856606 		etype = eth_hdr->ether_type;
126*2d9fd380Sjfb8856606 		mbuf3->packet_type = l3_ptype(etype, 0);
127*2d9fd380Sjfb8856606 	}
128*2d9fd380Sjfb8856606 
129*2d9fd380Sjfb8856606 	while (n_left > 0) {
130*2d9fd380Sjfb8856606 		mbuf0 = pkts[0];
131*2d9fd380Sjfb8856606 
132*2d9fd380Sjfb8856606 		pkts += 1;
133*2d9fd380Sjfb8856606 		n_left -= 1;
134*2d9fd380Sjfb8856606 
135*2d9fd380Sjfb8856606 		/* Extract ptype of mbuf0 */
136*2d9fd380Sjfb8856606 		eth_hdr = rte_pktmbuf_mtod(mbuf0, struct rte_ether_hdr *);
137*2d9fd380Sjfb8856606 		etype = eth_hdr->ether_type;
138*2d9fd380Sjfb8856606 		mbuf0->packet_type = l3_ptype(etype, 0);
139*2d9fd380Sjfb8856606 	}
140*2d9fd380Sjfb8856606 
141*2d9fd380Sjfb8856606 	return nb_pkts;
142*2d9fd380Sjfb8856606 }
143*2d9fd380Sjfb8856606 
144*2d9fd380Sjfb8856606 #define MAX_PTYPES 16
145*2d9fd380Sjfb8856606 static int
ethdev_ptype_setup(uint16_t port,uint16_t queue)146*2d9fd380Sjfb8856606 ethdev_ptype_setup(uint16_t port, uint16_t queue)
147*2d9fd380Sjfb8856606 {
148*2d9fd380Sjfb8856606 	uint8_t l3_ipv4 = 0, l3_ipv6 = 0;
149*2d9fd380Sjfb8856606 	uint32_t ptypes[MAX_PTYPES];
150*2d9fd380Sjfb8856606 	int i, rc;
151*2d9fd380Sjfb8856606 
152*2d9fd380Sjfb8856606 	/* Check IPv4 & IPv6 ptype support */
153*2d9fd380Sjfb8856606 	rc = rte_eth_dev_get_supported_ptypes(port, RTE_PTYPE_L3_MASK, ptypes,
154*2d9fd380Sjfb8856606 					      MAX_PTYPES);
155*2d9fd380Sjfb8856606 	for (i = 0; i < rc; i++) {
156*2d9fd380Sjfb8856606 		if (ptypes[i] & RTE_PTYPE_L3_IPV4)
157*2d9fd380Sjfb8856606 			l3_ipv4 = 1;
158*2d9fd380Sjfb8856606 		if (ptypes[i] & RTE_PTYPE_L3_IPV6)
159*2d9fd380Sjfb8856606 			l3_ipv6 = 1;
160*2d9fd380Sjfb8856606 	}
161*2d9fd380Sjfb8856606 
162*2d9fd380Sjfb8856606 	if (!l3_ipv4 || !l3_ipv6) {
163*2d9fd380Sjfb8856606 		node_info("ethdev_rx",
164*2d9fd380Sjfb8856606 			  "Enabling ptype callback for required ptypes on port %u\n",
165*2d9fd380Sjfb8856606 			  port);
166*2d9fd380Sjfb8856606 
167*2d9fd380Sjfb8856606 		if (!rte_eth_add_rx_callback(port, queue, eth_pkt_parse_cb,
168*2d9fd380Sjfb8856606 					     NULL)) {
169*2d9fd380Sjfb8856606 			node_err("ethdev_rx",
170*2d9fd380Sjfb8856606 				 "Failed to add rx ptype cb: port=%d, queue=%d\n",
171*2d9fd380Sjfb8856606 				 port, queue);
172*2d9fd380Sjfb8856606 			return -EINVAL;
173*2d9fd380Sjfb8856606 		}
174*2d9fd380Sjfb8856606 	}
175*2d9fd380Sjfb8856606 
176*2d9fd380Sjfb8856606 	return 0;
177*2d9fd380Sjfb8856606 }
178*2d9fd380Sjfb8856606 
179*2d9fd380Sjfb8856606 static int
ethdev_rx_node_init(const struct rte_graph * graph,struct rte_node * node)180*2d9fd380Sjfb8856606 ethdev_rx_node_init(const struct rte_graph *graph, struct rte_node *node)
181*2d9fd380Sjfb8856606 {
182*2d9fd380Sjfb8856606 	ethdev_rx_node_ctx_t *ctx = (ethdev_rx_node_ctx_t *)node->ctx;
183*2d9fd380Sjfb8856606 	ethdev_rx_node_elem_t *elem = ethdev_rx_main.head;
184*2d9fd380Sjfb8856606 
185*2d9fd380Sjfb8856606 	RTE_SET_USED(graph);
186*2d9fd380Sjfb8856606 
187*2d9fd380Sjfb8856606 	while (elem) {
188*2d9fd380Sjfb8856606 		if (elem->nid == node->id) {
189*2d9fd380Sjfb8856606 			/* Update node specific context */
190*2d9fd380Sjfb8856606 			memcpy(ctx, &elem->ctx, sizeof(ethdev_rx_node_ctx_t));
191*2d9fd380Sjfb8856606 			break;
192*2d9fd380Sjfb8856606 		}
193*2d9fd380Sjfb8856606 		elem = elem->next;
194*2d9fd380Sjfb8856606 	}
195*2d9fd380Sjfb8856606 
196*2d9fd380Sjfb8856606 	RTE_VERIFY(elem != NULL);
197*2d9fd380Sjfb8856606 
198*2d9fd380Sjfb8856606 	ctx->cls_next = ETHDEV_RX_NEXT_PKT_CLS;
199*2d9fd380Sjfb8856606 
200*2d9fd380Sjfb8856606 	/* Check and setup ptype */
201*2d9fd380Sjfb8856606 	return ethdev_ptype_setup(ctx->port_id, ctx->queue_id);
202*2d9fd380Sjfb8856606 }
203*2d9fd380Sjfb8856606 
204*2d9fd380Sjfb8856606 struct ethdev_rx_node_main *
ethdev_rx_get_node_data_get(void)205*2d9fd380Sjfb8856606 ethdev_rx_get_node_data_get(void)
206*2d9fd380Sjfb8856606 {
207*2d9fd380Sjfb8856606 	return &ethdev_rx_main;
208*2d9fd380Sjfb8856606 }
209*2d9fd380Sjfb8856606 
210*2d9fd380Sjfb8856606 static struct rte_node_register ethdev_rx_node_base = {
211*2d9fd380Sjfb8856606 	.process = ethdev_rx_node_process,
212*2d9fd380Sjfb8856606 	.flags = RTE_NODE_SOURCE_F,
213*2d9fd380Sjfb8856606 	.name = "ethdev_rx",
214*2d9fd380Sjfb8856606 
215*2d9fd380Sjfb8856606 	.init = ethdev_rx_node_init,
216*2d9fd380Sjfb8856606 
217*2d9fd380Sjfb8856606 	.nb_edges = ETHDEV_RX_NEXT_MAX,
218*2d9fd380Sjfb8856606 	.next_nodes = {
219*2d9fd380Sjfb8856606 		/* Default pkt classification node */
220*2d9fd380Sjfb8856606 		[ETHDEV_RX_NEXT_PKT_CLS] = "pkt_cls",
221*2d9fd380Sjfb8856606 		[ETHDEV_RX_NEXT_IP4_LOOKUP] = "ip4_lookup",
222*2d9fd380Sjfb8856606 	},
223*2d9fd380Sjfb8856606 };
224*2d9fd380Sjfb8856606 
225*2d9fd380Sjfb8856606 struct rte_node_register *
ethdev_rx_node_get(void)226*2d9fd380Sjfb8856606 ethdev_rx_node_get(void)
227*2d9fd380Sjfb8856606 {
228*2d9fd380Sjfb8856606 	return &ethdev_rx_node_base;
229*2d9fd380Sjfb8856606 }
230*2d9fd380Sjfb8856606 
231*2d9fd380Sjfb8856606 RTE_NODE_REGISTER(ethdev_rx_node_base);
232