xref: /f-stack/dpdk/lib/librte_node/ip4_rewrite.c (revision 2d9fd380)
1*2d9fd380Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2*2d9fd380Sjfb8856606  * Copyright(C) 2020 Marvell International Ltd.
3*2d9fd380Sjfb8856606  */
4*2d9fd380Sjfb8856606 
5*2d9fd380Sjfb8856606 #include <rte_debug.h>
6*2d9fd380Sjfb8856606 #include <rte_ethdev.h>
7*2d9fd380Sjfb8856606 #include <rte_ether.h>
8*2d9fd380Sjfb8856606 #include <rte_graph.h>
9*2d9fd380Sjfb8856606 #include <rte_graph_worker.h>
10*2d9fd380Sjfb8856606 #include <rte_ip.h>
11*2d9fd380Sjfb8856606 #include <rte_malloc.h>
12*2d9fd380Sjfb8856606 #include <rte_mbuf.h>
13*2d9fd380Sjfb8856606 #include <rte_tcp.h>
14*2d9fd380Sjfb8856606 #include <rte_udp.h>
15*2d9fd380Sjfb8856606 #include <rte_vect.h>
16*2d9fd380Sjfb8856606 
17*2d9fd380Sjfb8856606 #include "rte_node_ip4_api.h"
18*2d9fd380Sjfb8856606 
19*2d9fd380Sjfb8856606 #include "ip4_rewrite_priv.h"
20*2d9fd380Sjfb8856606 #include "node_private.h"
21*2d9fd380Sjfb8856606 
22*2d9fd380Sjfb8856606 struct ip4_rewrite_node_ctx {
23*2d9fd380Sjfb8856606 	/* Dynamic offset to mbuf priv1 */
24*2d9fd380Sjfb8856606 	int mbuf_priv1_off;
25*2d9fd380Sjfb8856606 	/* Cached next index */
26*2d9fd380Sjfb8856606 	uint16_t next_index;
27*2d9fd380Sjfb8856606 };
28*2d9fd380Sjfb8856606 
29*2d9fd380Sjfb8856606 static struct ip4_rewrite_node_main *ip4_rewrite_nm;
30*2d9fd380Sjfb8856606 
31*2d9fd380Sjfb8856606 #define IP4_REWRITE_NODE_LAST_NEXT(ctx) \
32*2d9fd380Sjfb8856606 	(((struct ip4_rewrite_node_ctx *)ctx)->next_index)
33*2d9fd380Sjfb8856606 
34*2d9fd380Sjfb8856606 #define IP4_REWRITE_NODE_PRIV1_OFF(ctx) \
35*2d9fd380Sjfb8856606 	(((struct ip4_rewrite_node_ctx *)ctx)->mbuf_priv1_off)
36*2d9fd380Sjfb8856606 
37*2d9fd380Sjfb8856606 static uint16_t
ip4_rewrite_node_process(struct rte_graph * graph,struct rte_node * node,void ** objs,uint16_t nb_objs)38*2d9fd380Sjfb8856606 ip4_rewrite_node_process(struct rte_graph *graph, struct rte_node *node,
39*2d9fd380Sjfb8856606 			 void **objs, uint16_t nb_objs)
40*2d9fd380Sjfb8856606 {
41*2d9fd380Sjfb8856606 	struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3, **pkts;
42*2d9fd380Sjfb8856606 	struct ip4_rewrite_nh_header *nh = ip4_rewrite_nm->nh;
43*2d9fd380Sjfb8856606 	const int dyn = IP4_REWRITE_NODE_PRIV1_OFF(node->ctx);
44*2d9fd380Sjfb8856606 	uint16_t next0, next1, next2, next3, next_index;
45*2d9fd380Sjfb8856606 	struct rte_ipv4_hdr *ip0, *ip1, *ip2, *ip3;
46*2d9fd380Sjfb8856606 	uint16_t n_left_from, held = 0, last_spec = 0;
47*2d9fd380Sjfb8856606 	void *d0, *d1, *d2, *d3;
48*2d9fd380Sjfb8856606 	void **to_next, **from;
49*2d9fd380Sjfb8856606 	rte_xmm_t priv01;
50*2d9fd380Sjfb8856606 	rte_xmm_t priv23;
51*2d9fd380Sjfb8856606 	int i;
52*2d9fd380Sjfb8856606 
53*2d9fd380Sjfb8856606 	/* Speculative next as last next */
54*2d9fd380Sjfb8856606 	next_index = IP4_REWRITE_NODE_LAST_NEXT(node->ctx);
55*2d9fd380Sjfb8856606 	rte_prefetch0(nh);
56*2d9fd380Sjfb8856606 
57*2d9fd380Sjfb8856606 	pkts = (struct rte_mbuf **)objs;
58*2d9fd380Sjfb8856606 	from = objs;
59*2d9fd380Sjfb8856606 	n_left_from = nb_objs;
60*2d9fd380Sjfb8856606 
61*2d9fd380Sjfb8856606 	for (i = 0; i < 4 && i < n_left_from; i++)
62*2d9fd380Sjfb8856606 		rte_prefetch0(pkts[i]);
63*2d9fd380Sjfb8856606 
64*2d9fd380Sjfb8856606 	/* Get stream for the speculated next node */
65*2d9fd380Sjfb8856606 	to_next = rte_node_next_stream_get(graph, node, next_index, nb_objs);
66*2d9fd380Sjfb8856606 	/* Update Ethernet header of pkts */
67*2d9fd380Sjfb8856606 	while (n_left_from >= 4) {
68*2d9fd380Sjfb8856606 		if (likely(n_left_from > 7)) {
69*2d9fd380Sjfb8856606 			/* Prefetch only next-mbuf struct and priv area.
70*2d9fd380Sjfb8856606 			 * Data need not be prefetched as we only write.
71*2d9fd380Sjfb8856606 			 */
72*2d9fd380Sjfb8856606 			rte_prefetch0(pkts[4]);
73*2d9fd380Sjfb8856606 			rte_prefetch0(pkts[5]);
74*2d9fd380Sjfb8856606 			rte_prefetch0(pkts[6]);
75*2d9fd380Sjfb8856606 			rte_prefetch0(pkts[7]);
76*2d9fd380Sjfb8856606 		}
77*2d9fd380Sjfb8856606 
78*2d9fd380Sjfb8856606 		mbuf0 = pkts[0];
79*2d9fd380Sjfb8856606 		mbuf1 = pkts[1];
80*2d9fd380Sjfb8856606 		mbuf2 = pkts[2];
81*2d9fd380Sjfb8856606 		mbuf3 = pkts[3];
82*2d9fd380Sjfb8856606 
83*2d9fd380Sjfb8856606 		pkts += 4;
84*2d9fd380Sjfb8856606 		n_left_from -= 4;
85*2d9fd380Sjfb8856606 		priv01.u64[0] = node_mbuf_priv1(mbuf0, dyn)->u;
86*2d9fd380Sjfb8856606 		priv01.u64[1] = node_mbuf_priv1(mbuf1, dyn)->u;
87*2d9fd380Sjfb8856606 		priv23.u64[0] = node_mbuf_priv1(mbuf2, dyn)->u;
88*2d9fd380Sjfb8856606 		priv23.u64[1] = node_mbuf_priv1(mbuf3, dyn)->u;
89*2d9fd380Sjfb8856606 
90*2d9fd380Sjfb8856606 		/* Increment checksum by one. */
91*2d9fd380Sjfb8856606 		priv01.u32[1] += rte_cpu_to_be_16(0x0100);
92*2d9fd380Sjfb8856606 		priv01.u32[3] += rte_cpu_to_be_16(0x0100);
93*2d9fd380Sjfb8856606 		priv23.u32[1] += rte_cpu_to_be_16(0x0100);
94*2d9fd380Sjfb8856606 		priv23.u32[3] += rte_cpu_to_be_16(0x0100);
95*2d9fd380Sjfb8856606 
96*2d9fd380Sjfb8856606 		/* Update ttl,cksum rewrite ethernet hdr on mbuf0 */
97*2d9fd380Sjfb8856606 		d0 = rte_pktmbuf_mtod(mbuf0, void *);
98*2d9fd380Sjfb8856606 		rte_memcpy(d0, nh[priv01.u16[0]].rewrite_data,
99*2d9fd380Sjfb8856606 			   nh[priv01.u16[0]].rewrite_len);
100*2d9fd380Sjfb8856606 
101*2d9fd380Sjfb8856606 		next0 = nh[priv01.u16[0]].tx_node;
102*2d9fd380Sjfb8856606 		ip0 = (struct rte_ipv4_hdr *)((uint8_t *)d0 +
103*2d9fd380Sjfb8856606 					      sizeof(struct rte_ether_hdr));
104*2d9fd380Sjfb8856606 		ip0->time_to_live = priv01.u16[1] - 1;
105*2d9fd380Sjfb8856606 		ip0->hdr_checksum = priv01.u16[2] + priv01.u16[3];
106*2d9fd380Sjfb8856606 
107*2d9fd380Sjfb8856606 		/* Update ttl,cksum rewrite ethernet hdr on mbuf1 */
108*2d9fd380Sjfb8856606 		d1 = rte_pktmbuf_mtod(mbuf1, void *);
109*2d9fd380Sjfb8856606 		rte_memcpy(d1, nh[priv01.u16[4]].rewrite_data,
110*2d9fd380Sjfb8856606 			   nh[priv01.u16[4]].rewrite_len);
111*2d9fd380Sjfb8856606 
112*2d9fd380Sjfb8856606 		next1 = nh[priv01.u16[4]].tx_node;
113*2d9fd380Sjfb8856606 		ip1 = (struct rte_ipv4_hdr *)((uint8_t *)d1 +
114*2d9fd380Sjfb8856606 					      sizeof(struct rte_ether_hdr));
115*2d9fd380Sjfb8856606 		ip1->time_to_live = priv01.u16[5] - 1;
116*2d9fd380Sjfb8856606 		ip1->hdr_checksum = priv01.u16[6] + priv01.u16[7];
117*2d9fd380Sjfb8856606 
118*2d9fd380Sjfb8856606 		/* Update ttl,cksum rewrite ethernet hdr on mbuf2 */
119*2d9fd380Sjfb8856606 		d2 = rte_pktmbuf_mtod(mbuf2, void *);
120*2d9fd380Sjfb8856606 		rte_memcpy(d2, nh[priv23.u16[0]].rewrite_data,
121*2d9fd380Sjfb8856606 			   nh[priv23.u16[0]].rewrite_len);
122*2d9fd380Sjfb8856606 		next2 = nh[priv23.u16[0]].tx_node;
123*2d9fd380Sjfb8856606 		ip2 = (struct rte_ipv4_hdr *)((uint8_t *)d2 +
124*2d9fd380Sjfb8856606 					      sizeof(struct rte_ether_hdr));
125*2d9fd380Sjfb8856606 		ip2->time_to_live = priv23.u16[1] - 1;
126*2d9fd380Sjfb8856606 		ip2->hdr_checksum = priv23.u16[2] + priv23.u16[3];
127*2d9fd380Sjfb8856606 
128*2d9fd380Sjfb8856606 		/* Update ttl,cksum rewrite ethernet hdr on mbuf3 */
129*2d9fd380Sjfb8856606 		d3 = rte_pktmbuf_mtod(mbuf3, void *);
130*2d9fd380Sjfb8856606 		rte_memcpy(d3, nh[priv23.u16[4]].rewrite_data,
131*2d9fd380Sjfb8856606 			   nh[priv23.u16[4]].rewrite_len);
132*2d9fd380Sjfb8856606 
133*2d9fd380Sjfb8856606 		next3 = nh[priv23.u16[4]].tx_node;
134*2d9fd380Sjfb8856606 		ip3 = (struct rte_ipv4_hdr *)((uint8_t *)d3 +
135*2d9fd380Sjfb8856606 					      sizeof(struct rte_ether_hdr));
136*2d9fd380Sjfb8856606 		ip3->time_to_live = priv23.u16[5] - 1;
137*2d9fd380Sjfb8856606 		ip3->hdr_checksum = priv23.u16[6] + priv23.u16[7];
138*2d9fd380Sjfb8856606 
139*2d9fd380Sjfb8856606 		/* Enqueue four to next node */
140*2d9fd380Sjfb8856606 		rte_edge_t fix_spec =
141*2d9fd380Sjfb8856606 			((next_index == next0) && (next0 == next1) &&
142*2d9fd380Sjfb8856606 			 (next1 == next2) && (next2 == next3));
143*2d9fd380Sjfb8856606 
144*2d9fd380Sjfb8856606 		if (unlikely(fix_spec == 0)) {
145*2d9fd380Sjfb8856606 			/* Copy things successfully speculated till now */
146*2d9fd380Sjfb8856606 			rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
147*2d9fd380Sjfb8856606 			from += last_spec;
148*2d9fd380Sjfb8856606 			to_next += last_spec;
149*2d9fd380Sjfb8856606 			held += last_spec;
150*2d9fd380Sjfb8856606 			last_spec = 0;
151*2d9fd380Sjfb8856606 
152*2d9fd380Sjfb8856606 			/* next0 */
153*2d9fd380Sjfb8856606 			if (next_index == next0) {
154*2d9fd380Sjfb8856606 				to_next[0] = from[0];
155*2d9fd380Sjfb8856606 				to_next++;
156*2d9fd380Sjfb8856606 				held++;
157*2d9fd380Sjfb8856606 			} else {
158*2d9fd380Sjfb8856606 				rte_node_enqueue_x1(graph, node, next0,
159*2d9fd380Sjfb8856606 						    from[0]);
160*2d9fd380Sjfb8856606 			}
161*2d9fd380Sjfb8856606 
162*2d9fd380Sjfb8856606 			/* next1 */
163*2d9fd380Sjfb8856606 			if (next_index == next1) {
164*2d9fd380Sjfb8856606 				to_next[0] = from[1];
165*2d9fd380Sjfb8856606 				to_next++;
166*2d9fd380Sjfb8856606 				held++;
167*2d9fd380Sjfb8856606 			} else {
168*2d9fd380Sjfb8856606 				rte_node_enqueue_x1(graph, node, next1,
169*2d9fd380Sjfb8856606 						    from[1]);
170*2d9fd380Sjfb8856606 			}
171*2d9fd380Sjfb8856606 
172*2d9fd380Sjfb8856606 			/* next2 */
173*2d9fd380Sjfb8856606 			if (next_index == next2) {
174*2d9fd380Sjfb8856606 				to_next[0] = from[2];
175*2d9fd380Sjfb8856606 				to_next++;
176*2d9fd380Sjfb8856606 				held++;
177*2d9fd380Sjfb8856606 			} else {
178*2d9fd380Sjfb8856606 				rte_node_enqueue_x1(graph, node, next2,
179*2d9fd380Sjfb8856606 						    from[2]);
180*2d9fd380Sjfb8856606 			}
181*2d9fd380Sjfb8856606 
182*2d9fd380Sjfb8856606 			/* next3 */
183*2d9fd380Sjfb8856606 			if (next_index == next3) {
184*2d9fd380Sjfb8856606 				to_next[0] = from[3];
185*2d9fd380Sjfb8856606 				to_next++;
186*2d9fd380Sjfb8856606 				held++;
187*2d9fd380Sjfb8856606 			} else {
188*2d9fd380Sjfb8856606 				rte_node_enqueue_x1(graph, node, next3,
189*2d9fd380Sjfb8856606 						    from[3]);
190*2d9fd380Sjfb8856606 			}
191*2d9fd380Sjfb8856606 
192*2d9fd380Sjfb8856606 			from += 4;
193*2d9fd380Sjfb8856606 
194*2d9fd380Sjfb8856606 			/* Change speculation if last two are same */
195*2d9fd380Sjfb8856606 			if ((next_index != next3) && (next2 == next3)) {
196*2d9fd380Sjfb8856606 				/* Put the current speculated node */
197*2d9fd380Sjfb8856606 				rte_node_next_stream_put(graph, node,
198*2d9fd380Sjfb8856606 							 next_index, held);
199*2d9fd380Sjfb8856606 				held = 0;
200*2d9fd380Sjfb8856606 
201*2d9fd380Sjfb8856606 				/* Get next speculated stream */
202*2d9fd380Sjfb8856606 				next_index = next3;
203*2d9fd380Sjfb8856606 				to_next = rte_node_next_stream_get(
204*2d9fd380Sjfb8856606 					graph, node, next_index, nb_objs);
205*2d9fd380Sjfb8856606 			}
206*2d9fd380Sjfb8856606 		} else {
207*2d9fd380Sjfb8856606 			last_spec += 4;
208*2d9fd380Sjfb8856606 		}
209*2d9fd380Sjfb8856606 	}
210*2d9fd380Sjfb8856606 
211*2d9fd380Sjfb8856606 	while (n_left_from > 0) {
212*2d9fd380Sjfb8856606 		uint16_t chksum;
213*2d9fd380Sjfb8856606 
214*2d9fd380Sjfb8856606 		mbuf0 = pkts[0];
215*2d9fd380Sjfb8856606 
216*2d9fd380Sjfb8856606 		pkts += 1;
217*2d9fd380Sjfb8856606 		n_left_from -= 1;
218*2d9fd380Sjfb8856606 
219*2d9fd380Sjfb8856606 		d0 = rte_pktmbuf_mtod(mbuf0, void *);
220*2d9fd380Sjfb8856606 		rte_memcpy(d0, nh[node_mbuf_priv1(mbuf0, dyn)->nh].rewrite_data,
221*2d9fd380Sjfb8856606 			   nh[node_mbuf_priv1(mbuf0, dyn)->nh].rewrite_len);
222*2d9fd380Sjfb8856606 
223*2d9fd380Sjfb8856606 		next0 = nh[node_mbuf_priv1(mbuf0, dyn)->nh].tx_node;
224*2d9fd380Sjfb8856606 		ip0 = (struct rte_ipv4_hdr *)((uint8_t *)d0 +
225*2d9fd380Sjfb8856606 					      sizeof(struct rte_ether_hdr));
226*2d9fd380Sjfb8856606 		chksum = node_mbuf_priv1(mbuf0, dyn)->cksum +
227*2d9fd380Sjfb8856606 			 rte_cpu_to_be_16(0x0100);
228*2d9fd380Sjfb8856606 		chksum += chksum >= 0xffff;
229*2d9fd380Sjfb8856606 		ip0->hdr_checksum = chksum;
230*2d9fd380Sjfb8856606 		ip0->time_to_live = node_mbuf_priv1(mbuf0, dyn)->ttl - 1;
231*2d9fd380Sjfb8856606 
232*2d9fd380Sjfb8856606 		if (unlikely(next_index ^ next0)) {
233*2d9fd380Sjfb8856606 			/* Copy things successfully speculated till now */
234*2d9fd380Sjfb8856606 			rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
235*2d9fd380Sjfb8856606 			from += last_spec;
236*2d9fd380Sjfb8856606 			to_next += last_spec;
237*2d9fd380Sjfb8856606 			held += last_spec;
238*2d9fd380Sjfb8856606 			last_spec = 0;
239*2d9fd380Sjfb8856606 
240*2d9fd380Sjfb8856606 			rte_node_enqueue_x1(graph, node, next0, from[0]);
241*2d9fd380Sjfb8856606 			from += 1;
242*2d9fd380Sjfb8856606 		} else {
243*2d9fd380Sjfb8856606 			last_spec += 1;
244*2d9fd380Sjfb8856606 		}
245*2d9fd380Sjfb8856606 	}
246*2d9fd380Sjfb8856606 
247*2d9fd380Sjfb8856606 	/* !!! Home run !!! */
248*2d9fd380Sjfb8856606 	if (likely(last_spec == nb_objs)) {
249*2d9fd380Sjfb8856606 		rte_node_next_stream_move(graph, node, next_index);
250*2d9fd380Sjfb8856606 		return nb_objs;
251*2d9fd380Sjfb8856606 	}
252*2d9fd380Sjfb8856606 
253*2d9fd380Sjfb8856606 	held += last_spec;
254*2d9fd380Sjfb8856606 	rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
255*2d9fd380Sjfb8856606 	rte_node_next_stream_put(graph, node, next_index, held);
256*2d9fd380Sjfb8856606 	/* Save the last next used */
257*2d9fd380Sjfb8856606 	IP4_REWRITE_NODE_LAST_NEXT(node->ctx) = next_index;
258*2d9fd380Sjfb8856606 
259*2d9fd380Sjfb8856606 	return nb_objs;
260*2d9fd380Sjfb8856606 }
261*2d9fd380Sjfb8856606 
262*2d9fd380Sjfb8856606 static int
ip4_rewrite_node_init(const struct rte_graph * graph,struct rte_node * node)263*2d9fd380Sjfb8856606 ip4_rewrite_node_init(const struct rte_graph *graph, struct rte_node *node)
264*2d9fd380Sjfb8856606 {
265*2d9fd380Sjfb8856606 	static bool init_once;
266*2d9fd380Sjfb8856606 
267*2d9fd380Sjfb8856606 	RTE_SET_USED(graph);
268*2d9fd380Sjfb8856606 	RTE_BUILD_BUG_ON(sizeof(struct ip4_rewrite_node_ctx) > RTE_NODE_CTX_SZ);
269*2d9fd380Sjfb8856606 
270*2d9fd380Sjfb8856606 	if (!init_once) {
271*2d9fd380Sjfb8856606 		node_mbuf_priv1_dynfield_offset = rte_mbuf_dynfield_register(
272*2d9fd380Sjfb8856606 				&node_mbuf_priv1_dynfield_desc);
273*2d9fd380Sjfb8856606 		if (node_mbuf_priv1_dynfield_offset < 0)
274*2d9fd380Sjfb8856606 			return -rte_errno;
275*2d9fd380Sjfb8856606 		init_once = true;
276*2d9fd380Sjfb8856606 	}
277*2d9fd380Sjfb8856606 	IP4_REWRITE_NODE_PRIV1_OFF(node->ctx) = node_mbuf_priv1_dynfield_offset;
278*2d9fd380Sjfb8856606 
279*2d9fd380Sjfb8856606 	node_dbg("ip4_rewrite", "Initialized ip4_rewrite node initialized");
280*2d9fd380Sjfb8856606 
281*2d9fd380Sjfb8856606 	return 0;
282*2d9fd380Sjfb8856606 }
283*2d9fd380Sjfb8856606 
284*2d9fd380Sjfb8856606 int
ip4_rewrite_set_next(uint16_t port_id,uint16_t next_index)285*2d9fd380Sjfb8856606 ip4_rewrite_set_next(uint16_t port_id, uint16_t next_index)
286*2d9fd380Sjfb8856606 {
287*2d9fd380Sjfb8856606 	if (ip4_rewrite_nm == NULL) {
288*2d9fd380Sjfb8856606 		ip4_rewrite_nm = rte_zmalloc(
289*2d9fd380Sjfb8856606 			"ip4_rewrite", sizeof(struct ip4_rewrite_node_main),
290*2d9fd380Sjfb8856606 			RTE_CACHE_LINE_SIZE);
291*2d9fd380Sjfb8856606 		if (ip4_rewrite_nm == NULL)
292*2d9fd380Sjfb8856606 			return -ENOMEM;
293*2d9fd380Sjfb8856606 	}
294*2d9fd380Sjfb8856606 	ip4_rewrite_nm->next_index[port_id] = next_index;
295*2d9fd380Sjfb8856606 
296*2d9fd380Sjfb8856606 	return 0;
297*2d9fd380Sjfb8856606 }
298*2d9fd380Sjfb8856606 
299*2d9fd380Sjfb8856606 int
rte_node_ip4_rewrite_add(uint16_t next_hop,uint8_t * rewrite_data,uint8_t rewrite_len,uint16_t dst_port)300*2d9fd380Sjfb8856606 rte_node_ip4_rewrite_add(uint16_t next_hop, uint8_t *rewrite_data,
301*2d9fd380Sjfb8856606 			 uint8_t rewrite_len, uint16_t dst_port)
302*2d9fd380Sjfb8856606 {
303*2d9fd380Sjfb8856606 	struct ip4_rewrite_nh_header *nh;
304*2d9fd380Sjfb8856606 
305*2d9fd380Sjfb8856606 	if (next_hop >= RTE_GRAPH_IP4_REWRITE_MAX_NH)
306*2d9fd380Sjfb8856606 		return -EINVAL;
307*2d9fd380Sjfb8856606 
308*2d9fd380Sjfb8856606 	if (rewrite_len > RTE_GRAPH_IP4_REWRITE_MAX_LEN)
309*2d9fd380Sjfb8856606 		return -EINVAL;
310*2d9fd380Sjfb8856606 
311*2d9fd380Sjfb8856606 	if (ip4_rewrite_nm == NULL) {
312*2d9fd380Sjfb8856606 		ip4_rewrite_nm = rte_zmalloc(
313*2d9fd380Sjfb8856606 			"ip4_rewrite", sizeof(struct ip4_rewrite_node_main),
314*2d9fd380Sjfb8856606 			RTE_CACHE_LINE_SIZE);
315*2d9fd380Sjfb8856606 		if (ip4_rewrite_nm == NULL)
316*2d9fd380Sjfb8856606 			return -ENOMEM;
317*2d9fd380Sjfb8856606 	}
318*2d9fd380Sjfb8856606 
319*2d9fd380Sjfb8856606 	/* Check if dst port doesn't exist as edge */
320*2d9fd380Sjfb8856606 	if (!ip4_rewrite_nm->next_index[dst_port])
321*2d9fd380Sjfb8856606 		return -EINVAL;
322*2d9fd380Sjfb8856606 
323*2d9fd380Sjfb8856606 	/* Update next hop */
324*2d9fd380Sjfb8856606 	nh = &ip4_rewrite_nm->nh[next_hop];
325*2d9fd380Sjfb8856606 
326*2d9fd380Sjfb8856606 	memcpy(nh->rewrite_data, rewrite_data, rewrite_len);
327*2d9fd380Sjfb8856606 	nh->tx_node = ip4_rewrite_nm->next_index[dst_port];
328*2d9fd380Sjfb8856606 	nh->rewrite_len = rewrite_len;
329*2d9fd380Sjfb8856606 	nh->enabled = true;
330*2d9fd380Sjfb8856606 
331*2d9fd380Sjfb8856606 	return 0;
332*2d9fd380Sjfb8856606 }
333*2d9fd380Sjfb8856606 
334*2d9fd380Sjfb8856606 static struct rte_node_register ip4_rewrite_node = {
335*2d9fd380Sjfb8856606 	.process = ip4_rewrite_node_process,
336*2d9fd380Sjfb8856606 	.name = "ip4_rewrite",
337*2d9fd380Sjfb8856606 	/* Default edge i.e '0' is pkt drop */
338*2d9fd380Sjfb8856606 	.nb_edges = 1,
339*2d9fd380Sjfb8856606 	.next_nodes = {
340*2d9fd380Sjfb8856606 		[0] = "pkt_drop",
341*2d9fd380Sjfb8856606 	},
342*2d9fd380Sjfb8856606 	.init = ip4_rewrite_node_init,
343*2d9fd380Sjfb8856606 };
344*2d9fd380Sjfb8856606 
345*2d9fd380Sjfb8856606 struct rte_node_register *
ip4_rewrite_node_get(void)346*2d9fd380Sjfb8856606 ip4_rewrite_node_get(void)
347*2d9fd380Sjfb8856606 {
348*2d9fd380Sjfb8856606 	return &ip4_rewrite_node;
349*2d9fd380Sjfb8856606 }
350*2d9fd380Sjfb8856606 
351*2d9fd380Sjfb8856606 RTE_NODE_REGISTER(ip4_rewrite_node);
352