1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell.
3 */
4
5 #include <rte_graph.h>
6 #include <rte_graph_worker.h>
7
8 #include "pkt_cls_priv.h"
9 #include "node_private.h"
10
11 /* Next node for each ptype, default is '0' is "pkt_drop" */
12 static const uint8_t p_nxt[256] __rte_cache_aligned = {
13 [RTE_PTYPE_L3_IPV4] = PKT_CLS_NEXT_IP4_LOOKUP,
14
15 [RTE_PTYPE_L3_IPV4_EXT] = PKT_CLS_NEXT_IP4_LOOKUP,
16
17 [RTE_PTYPE_L3_IPV4_EXT_UNKNOWN] = PKT_CLS_NEXT_IP4_LOOKUP,
18
19 [RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER] =
20 PKT_CLS_NEXT_IP4_LOOKUP,
21
22 [RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L2_ETHER] =
23 PKT_CLS_NEXT_IP4_LOOKUP,
24
25 [RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L2_ETHER] =
26 PKT_CLS_NEXT_IP4_LOOKUP,
27 };
28
29 static uint16_t
pkt_cls_node_process(struct rte_graph * graph,struct rte_node * node,void ** objs,uint16_t nb_objs)30 pkt_cls_node_process(struct rte_graph *graph, struct rte_node *node,
31 void **objs, uint16_t nb_objs)
32 {
33 struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3, **pkts;
34 uint8_t l0, l1, l2, l3, last_type;
35 uint16_t next_index, n_left_from;
36 uint16_t held = 0, last_spec = 0;
37 struct pkt_cls_node_ctx *ctx;
38 void **to_next, **from;
39 uint32_t i;
40
41 pkts = (struct rte_mbuf **)objs;
42 from = objs;
43 n_left_from = nb_objs;
44
45 for (i = OBJS_PER_CLINE; i < RTE_GRAPH_BURST_SIZE; i += OBJS_PER_CLINE)
46 rte_prefetch0(&objs[i]);
47
48 #if RTE_GRAPH_BURST_SIZE > 64
49 for (i = 0; i < 4 && i < n_left_from; i++)
50 rte_prefetch0(pkts[i]);
51 #endif
52
53 ctx = (struct pkt_cls_node_ctx *)node->ctx;
54 last_type = ctx->l2l3_type;
55 next_index = p_nxt[last_type];
56
57 /* Get stream for the speculated next node */
58 to_next = rte_node_next_stream_get(graph, node,
59 next_index, nb_objs);
60 while (n_left_from >= 4) {
61 #if RTE_GRAPH_BURST_SIZE > 64
62 if (likely(n_left_from > 7)) {
63 rte_prefetch0(pkts[4]);
64 rte_prefetch0(pkts[5]);
65 rte_prefetch0(pkts[6]);
66 rte_prefetch0(pkts[7]);
67 }
68 #endif
69
70 mbuf0 = pkts[0];
71 mbuf1 = pkts[1];
72 mbuf2 = pkts[2];
73 mbuf3 = pkts[3];
74 pkts += 4;
75 n_left_from -= 4;
76
77 l0 = mbuf0->packet_type &
78 (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
79 l1 = mbuf1->packet_type &
80 (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
81 l2 = mbuf2->packet_type &
82 (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
83 l3 = mbuf3->packet_type &
84 (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
85
86 /* Check if they are destined to same
87 * next node based on l2l3 packet type.
88 */
89 uint8_t fix_spec = (last_type ^ l0) | (last_type ^ l1) |
90 (last_type ^ l2) | (last_type ^ l3);
91
92 if (unlikely(fix_spec)) {
93 /* Copy things successfully speculated till now */
94 rte_memcpy(to_next, from,
95 last_spec * sizeof(from[0]));
96 from += last_spec;
97 to_next += last_spec;
98 held += last_spec;
99 last_spec = 0;
100
101 /* l0 */
102 if (p_nxt[l0] == next_index) {
103 to_next[0] = from[0];
104 to_next++;
105 held++;
106 } else {
107 rte_node_enqueue_x1(graph, node,
108 p_nxt[l0], from[0]);
109 }
110
111 /* l1 */
112 if (p_nxt[l1] == next_index) {
113 to_next[0] = from[1];
114 to_next++;
115 held++;
116 } else {
117 rte_node_enqueue_x1(graph, node,
118 p_nxt[l1], from[1]);
119 }
120
121 /* l2 */
122 if (p_nxt[l2] == next_index) {
123 to_next[0] = from[2];
124 to_next++;
125 held++;
126 } else {
127 rte_node_enqueue_x1(graph, node,
128 p_nxt[l2], from[2]);
129 }
130
131 /* l3 */
132 if (p_nxt[l3] == next_index) {
133 to_next[0] = from[3];
134 to_next++;
135 held++;
136 } else {
137 rte_node_enqueue_x1(graph, node,
138 p_nxt[l3], from[3]);
139 }
140
141 /* Update speculated ptype */
142 if ((last_type != l3) && (l2 == l3) &&
143 (next_index != p_nxt[l3])) {
144 /* Put the current stream for
145 * speculated ltype.
146 */
147 rte_node_next_stream_put(graph, node,
148 next_index, held);
149
150 held = 0;
151
152 /* Get next stream for new ltype */
153 next_index = p_nxt[l3];
154 last_type = l3;
155 to_next = rte_node_next_stream_get(graph, node,
156 next_index,
157 nb_objs);
158 } else if (next_index == p_nxt[l3]) {
159 last_type = l3;
160 }
161
162 from += 4;
163 } else {
164 last_spec += 4;
165 }
166 }
167
168 while (n_left_from > 0) {
169 mbuf0 = pkts[0];
170
171 pkts += 1;
172 n_left_from -= 1;
173
174 l0 = mbuf0->packet_type &
175 (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK);
176 if (unlikely((l0 != last_type) &&
177 (p_nxt[l0] != next_index))) {
178 /* Copy things successfully speculated till now */
179 rte_memcpy(to_next, from,
180 last_spec * sizeof(from[0]));
181 from += last_spec;
182 to_next += last_spec;
183 held += last_spec;
184 last_spec = 0;
185
186 rte_node_enqueue_x1(graph, node,
187 p_nxt[l0], from[0]);
188 from += 1;
189 } else {
190 last_spec += 1;
191 }
192 }
193
194 /* !!! Home run !!! */
195 if (likely(last_spec == nb_objs)) {
196 rte_node_next_stream_move(graph, node, next_index);
197 return nb_objs;
198 }
199
200 held += last_spec;
201 /* Copy things successfully speculated till now */
202 rte_memcpy(to_next, from, last_spec * sizeof(from[0]));
203 rte_node_next_stream_put(graph, node, next_index, held);
204
205 ctx->l2l3_type = last_type;
206 return nb_objs;
207 }
208
209 /* Packet Classification Node */
210 struct rte_node_register pkt_cls_node = {
211 .process = pkt_cls_node_process,
212 .name = "pkt_cls",
213
214 .nb_edges = PKT_CLS_NEXT_MAX,
215 .next_nodes = {
216 /* Pkt drop node starts at '0' */
217 [PKT_CLS_NEXT_PKT_DROP] = "pkt_drop",
218 [PKT_CLS_NEXT_IP4_LOOKUP] = "ip4_lookup",
219 },
220 };
221 RTE_NODE_REGISTER(pkt_cls_node);
222