1*99a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
2*99a2dd95SBruce Richardson * Copyright(c) 2017 Intel Corporation
3*99a2dd95SBruce Richardson */
4*99a2dd95SBruce Richardson
5*99a2dd95SBruce Richardson #include <rte_flow_classify.h>
6*99a2dd95SBruce Richardson #include "rte_flow_classify_parse.h"
7*99a2dd95SBruce Richardson
8*99a2dd95SBruce Richardson struct classify_valid_pattern {
9*99a2dd95SBruce Richardson enum rte_flow_item_type *items;
10*99a2dd95SBruce Richardson parse_filter_t parse_filter;
11*99a2dd95SBruce Richardson };
12*99a2dd95SBruce Richardson
13*99a2dd95SBruce Richardson static struct classify_action action;
14*99a2dd95SBruce Richardson
15*99a2dd95SBruce Richardson /* Pattern for IPv4 5-tuple UDP filter */
16*99a2dd95SBruce Richardson static enum rte_flow_item_type pattern_ntuple_1[] = {
17*99a2dd95SBruce Richardson RTE_FLOW_ITEM_TYPE_ETH,
18*99a2dd95SBruce Richardson RTE_FLOW_ITEM_TYPE_IPV4,
19*99a2dd95SBruce Richardson RTE_FLOW_ITEM_TYPE_UDP,
20*99a2dd95SBruce Richardson RTE_FLOW_ITEM_TYPE_END,
21*99a2dd95SBruce Richardson };
22*99a2dd95SBruce Richardson
23*99a2dd95SBruce Richardson /* Pattern for IPv4 5-tuple TCP filter */
24*99a2dd95SBruce Richardson static enum rte_flow_item_type pattern_ntuple_2[] = {
25*99a2dd95SBruce Richardson RTE_FLOW_ITEM_TYPE_ETH,
26*99a2dd95SBruce Richardson RTE_FLOW_ITEM_TYPE_IPV4,
27*99a2dd95SBruce Richardson RTE_FLOW_ITEM_TYPE_TCP,
28*99a2dd95SBruce Richardson RTE_FLOW_ITEM_TYPE_END,
29*99a2dd95SBruce Richardson };
30*99a2dd95SBruce Richardson
31*99a2dd95SBruce Richardson /* Pattern for IPv4 5-tuple SCTP filter */
32*99a2dd95SBruce Richardson static enum rte_flow_item_type pattern_ntuple_3[] = {
33*99a2dd95SBruce Richardson RTE_FLOW_ITEM_TYPE_ETH,
34*99a2dd95SBruce Richardson RTE_FLOW_ITEM_TYPE_IPV4,
35*99a2dd95SBruce Richardson RTE_FLOW_ITEM_TYPE_SCTP,
36*99a2dd95SBruce Richardson RTE_FLOW_ITEM_TYPE_END,
37*99a2dd95SBruce Richardson };
38*99a2dd95SBruce Richardson
39*99a2dd95SBruce Richardson static int
40*99a2dd95SBruce Richardson classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
41*99a2dd95SBruce Richardson const struct rte_flow_item pattern[],
42*99a2dd95SBruce Richardson const struct rte_flow_action actions[],
43*99a2dd95SBruce Richardson struct rte_eth_ntuple_filter *filter,
44*99a2dd95SBruce Richardson struct rte_flow_error *error);
45*99a2dd95SBruce Richardson
46*99a2dd95SBruce Richardson static struct classify_valid_pattern classify_supported_patterns[] = {
47*99a2dd95SBruce Richardson /* ntuple */
48*99a2dd95SBruce Richardson { pattern_ntuple_1, classify_parse_ntuple_filter },
49*99a2dd95SBruce Richardson { pattern_ntuple_2, classify_parse_ntuple_filter },
50*99a2dd95SBruce Richardson { pattern_ntuple_3, classify_parse_ntuple_filter },
51*99a2dd95SBruce Richardson };
52*99a2dd95SBruce Richardson
53*99a2dd95SBruce Richardson struct classify_action *
classify_get_flow_action(void)54*99a2dd95SBruce Richardson classify_get_flow_action(void)
55*99a2dd95SBruce Richardson {
56*99a2dd95SBruce Richardson return &action;
57*99a2dd95SBruce Richardson }
58*99a2dd95SBruce Richardson
59*99a2dd95SBruce Richardson /* Find the first VOID or non-VOID item pointer */
60*99a2dd95SBruce Richardson const struct rte_flow_item *
classify_find_first_item(const struct rte_flow_item * item,bool is_void)61*99a2dd95SBruce Richardson classify_find_first_item(const struct rte_flow_item *item, bool is_void)
62*99a2dd95SBruce Richardson {
63*99a2dd95SBruce Richardson bool is_find;
64*99a2dd95SBruce Richardson
65*99a2dd95SBruce Richardson while (item->type != RTE_FLOW_ITEM_TYPE_END) {
66*99a2dd95SBruce Richardson if (is_void)
67*99a2dd95SBruce Richardson is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
68*99a2dd95SBruce Richardson else
69*99a2dd95SBruce Richardson is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
70*99a2dd95SBruce Richardson if (is_find)
71*99a2dd95SBruce Richardson break;
72*99a2dd95SBruce Richardson item++;
73*99a2dd95SBruce Richardson }
74*99a2dd95SBruce Richardson return item;
75*99a2dd95SBruce Richardson }
76*99a2dd95SBruce Richardson
77*99a2dd95SBruce Richardson /* Skip all VOID items of the pattern */
78*99a2dd95SBruce Richardson void
classify_pattern_skip_void_item(struct rte_flow_item * items,const struct rte_flow_item * pattern)79*99a2dd95SBruce Richardson classify_pattern_skip_void_item(struct rte_flow_item *items,
80*99a2dd95SBruce Richardson const struct rte_flow_item *pattern)
81*99a2dd95SBruce Richardson {
82*99a2dd95SBruce Richardson uint32_t cpy_count = 0;
83*99a2dd95SBruce Richardson const struct rte_flow_item *pb = pattern, *pe = pattern;
84*99a2dd95SBruce Richardson
85*99a2dd95SBruce Richardson for (;;) {
86*99a2dd95SBruce Richardson /* Find a non-void item first */
87*99a2dd95SBruce Richardson pb = classify_find_first_item(pb, false);
88*99a2dd95SBruce Richardson if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
89*99a2dd95SBruce Richardson pe = pb;
90*99a2dd95SBruce Richardson break;
91*99a2dd95SBruce Richardson }
92*99a2dd95SBruce Richardson
93*99a2dd95SBruce Richardson /* Find a void item */
94*99a2dd95SBruce Richardson pe = classify_find_first_item(pb + 1, true);
95*99a2dd95SBruce Richardson
96*99a2dd95SBruce Richardson cpy_count = pe - pb;
97*99a2dd95SBruce Richardson rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
98*99a2dd95SBruce Richardson
99*99a2dd95SBruce Richardson items += cpy_count;
100*99a2dd95SBruce Richardson
101*99a2dd95SBruce Richardson if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
102*99a2dd95SBruce Richardson pb = pe;
103*99a2dd95SBruce Richardson break;
104*99a2dd95SBruce Richardson }
105*99a2dd95SBruce Richardson }
106*99a2dd95SBruce Richardson /* Copy the END item. */
107*99a2dd95SBruce Richardson rte_memcpy(items, pe, sizeof(struct rte_flow_item));
108*99a2dd95SBruce Richardson }
109*99a2dd95SBruce Richardson
110*99a2dd95SBruce Richardson /* Check if the pattern matches a supported item type array */
111*99a2dd95SBruce Richardson static bool
classify_match_pattern(enum rte_flow_item_type * item_array,struct rte_flow_item * pattern)112*99a2dd95SBruce Richardson classify_match_pattern(enum rte_flow_item_type *item_array,
113*99a2dd95SBruce Richardson struct rte_flow_item *pattern)
114*99a2dd95SBruce Richardson {
115*99a2dd95SBruce Richardson struct rte_flow_item *item = pattern;
116*99a2dd95SBruce Richardson
117*99a2dd95SBruce Richardson while ((*item_array == item->type) &&
118*99a2dd95SBruce Richardson (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
119*99a2dd95SBruce Richardson item_array++;
120*99a2dd95SBruce Richardson item++;
121*99a2dd95SBruce Richardson }
122*99a2dd95SBruce Richardson
123*99a2dd95SBruce Richardson return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
124*99a2dd95SBruce Richardson item->type == RTE_FLOW_ITEM_TYPE_END);
125*99a2dd95SBruce Richardson }
126*99a2dd95SBruce Richardson
127*99a2dd95SBruce Richardson /* Find if there's parse filter function matched */
128*99a2dd95SBruce Richardson parse_filter_t
classify_find_parse_filter_func(struct rte_flow_item * pattern)129*99a2dd95SBruce Richardson classify_find_parse_filter_func(struct rte_flow_item *pattern)
130*99a2dd95SBruce Richardson {
131*99a2dd95SBruce Richardson parse_filter_t parse_filter = NULL;
132*99a2dd95SBruce Richardson uint8_t i = 0;
133*99a2dd95SBruce Richardson
134*99a2dd95SBruce Richardson for (; i < RTE_DIM(classify_supported_patterns); i++) {
135*99a2dd95SBruce Richardson if (classify_match_pattern(classify_supported_patterns[i].items,
136*99a2dd95SBruce Richardson pattern)) {
137*99a2dd95SBruce Richardson parse_filter =
138*99a2dd95SBruce Richardson classify_supported_patterns[i].parse_filter;
139*99a2dd95SBruce Richardson break;
140*99a2dd95SBruce Richardson }
141*99a2dd95SBruce Richardson }
142*99a2dd95SBruce Richardson
143*99a2dd95SBruce Richardson return parse_filter;
144*99a2dd95SBruce Richardson }
145*99a2dd95SBruce Richardson
146*99a2dd95SBruce Richardson #define FLOW_RULE_MIN_PRIORITY 8
147*99a2dd95SBruce Richardson #define FLOW_RULE_MAX_PRIORITY 0
148*99a2dd95SBruce Richardson
149*99a2dd95SBruce Richardson #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
150*99a2dd95SBruce Richardson do {\
151*99a2dd95SBruce Richardson item = pattern + index;\
152*99a2dd95SBruce Richardson while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
153*99a2dd95SBruce Richardson index++;\
154*99a2dd95SBruce Richardson item = pattern + index;\
155*99a2dd95SBruce Richardson } \
156*99a2dd95SBruce Richardson } while (0)
157*99a2dd95SBruce Richardson
158*99a2dd95SBruce Richardson #define NEXT_ITEM_OF_ACTION(act, actions, index)\
159*99a2dd95SBruce Richardson do {\
160*99a2dd95SBruce Richardson act = actions + index;\
161*99a2dd95SBruce Richardson while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
162*99a2dd95SBruce Richardson index++;\
163*99a2dd95SBruce Richardson act = actions + index;\
164*99a2dd95SBruce Richardson } \
165*99a2dd95SBruce Richardson } while (0)
166*99a2dd95SBruce Richardson
167*99a2dd95SBruce Richardson /**
168*99a2dd95SBruce Richardson * Please aware there's an assumption for all the parsers.
169*99a2dd95SBruce Richardson * rte_flow_item is using big endian, rte_flow_attr and
170*99a2dd95SBruce Richardson * rte_flow_action are using CPU order.
171*99a2dd95SBruce Richardson * Because the pattern is used to describe the packets,
172*99a2dd95SBruce Richardson * normally the packets should use network order.
173*99a2dd95SBruce Richardson */
174*99a2dd95SBruce Richardson
175*99a2dd95SBruce Richardson /**
176*99a2dd95SBruce Richardson * Parse the rule to see if it is a n-tuple rule.
177*99a2dd95SBruce Richardson * And get the n-tuple filter info BTW.
178*99a2dd95SBruce Richardson * pattern:
179*99a2dd95SBruce Richardson * The first not void item can be ETH or IPV4.
180*99a2dd95SBruce Richardson * The second not void item must be IPV4 if the first one is ETH.
181*99a2dd95SBruce Richardson * The third not void item must be UDP or TCP.
182*99a2dd95SBruce Richardson * The next not void item must be END.
183*99a2dd95SBruce Richardson * action:
184*99a2dd95SBruce Richardson * The first not void action should be QUEUE.
185*99a2dd95SBruce Richardson * The next not void action should be END.
186*99a2dd95SBruce Richardson * pattern example:
187*99a2dd95SBruce Richardson * ITEM Spec Mask
188*99a2dd95SBruce Richardson * ETH NULL NULL
189*99a2dd95SBruce Richardson * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
190*99a2dd95SBruce Richardson * dst_addr 192.167.3.50 0xFFFFFFFF
191*99a2dd95SBruce Richardson * next_proto_id 17 0xFF
192*99a2dd95SBruce Richardson * UDP/TCP/ src_port 80 0xFFFF
193*99a2dd95SBruce Richardson * SCTP dst_port 80 0xFFFF
194*99a2dd95SBruce Richardson * END
195*99a2dd95SBruce Richardson * other members in mask and spec should set to 0x00.
196*99a2dd95SBruce Richardson * item->last should be NULL.
197*99a2dd95SBruce Richardson */
198*99a2dd95SBruce Richardson static int
classify_parse_ntuple_filter(const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_eth_ntuple_filter * filter,struct rte_flow_error * error)199*99a2dd95SBruce Richardson classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
200*99a2dd95SBruce Richardson const struct rte_flow_item pattern[],
201*99a2dd95SBruce Richardson const struct rte_flow_action actions[],
202*99a2dd95SBruce Richardson struct rte_eth_ntuple_filter *filter,
203*99a2dd95SBruce Richardson struct rte_flow_error *error)
204*99a2dd95SBruce Richardson {
205*99a2dd95SBruce Richardson const struct rte_flow_item *item;
206*99a2dd95SBruce Richardson const struct rte_flow_action *act;
207*99a2dd95SBruce Richardson const struct rte_flow_item_ipv4 *ipv4_spec;
208*99a2dd95SBruce Richardson const struct rte_flow_item_ipv4 *ipv4_mask;
209*99a2dd95SBruce Richardson const struct rte_flow_item_tcp *tcp_spec;
210*99a2dd95SBruce Richardson const struct rte_flow_item_tcp *tcp_mask;
211*99a2dd95SBruce Richardson const struct rte_flow_item_udp *udp_spec;
212*99a2dd95SBruce Richardson const struct rte_flow_item_udp *udp_mask;
213*99a2dd95SBruce Richardson const struct rte_flow_item_sctp *sctp_spec;
214*99a2dd95SBruce Richardson const struct rte_flow_item_sctp *sctp_mask;
215*99a2dd95SBruce Richardson const struct rte_flow_action_count *count;
216*99a2dd95SBruce Richardson const struct rte_flow_action_mark *mark_spec;
217*99a2dd95SBruce Richardson uint32_t index;
218*99a2dd95SBruce Richardson
219*99a2dd95SBruce Richardson /* parse pattern */
220*99a2dd95SBruce Richardson index = 0;
221*99a2dd95SBruce Richardson
222*99a2dd95SBruce Richardson /* the first not void item can be MAC or IPv4 */
223*99a2dd95SBruce Richardson NEXT_ITEM_OF_PATTERN(item, pattern, index);
224*99a2dd95SBruce Richardson
225*99a2dd95SBruce Richardson if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
226*99a2dd95SBruce Richardson item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
227*99a2dd95SBruce Richardson rte_flow_error_set(error, EINVAL,
228*99a2dd95SBruce Richardson RTE_FLOW_ERROR_TYPE_ITEM,
229*99a2dd95SBruce Richardson item, "Not supported by ntuple filter");
230*99a2dd95SBruce Richardson return -EINVAL;
231*99a2dd95SBruce Richardson }
232*99a2dd95SBruce Richardson /* Skip Ethernet */
233*99a2dd95SBruce Richardson if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
234*99a2dd95SBruce Richardson /*Not supported last point for range*/
235*99a2dd95SBruce Richardson if (item->last) {
236*99a2dd95SBruce Richardson rte_flow_error_set(error, EINVAL,
237*99a2dd95SBruce Richardson RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
238*99a2dd95SBruce Richardson item,
239*99a2dd95SBruce Richardson "Not supported last point for range");
240*99a2dd95SBruce Richardson return -EINVAL;
241*99a2dd95SBruce Richardson
242*99a2dd95SBruce Richardson }
243*99a2dd95SBruce Richardson /* if the first item is MAC, the content should be NULL */
244*99a2dd95SBruce Richardson if (item->spec || item->mask) {
245*99a2dd95SBruce Richardson rte_flow_error_set(error, EINVAL,
246*99a2dd95SBruce Richardson RTE_FLOW_ERROR_TYPE_ITEM,
247*99a2dd95SBruce Richardson item,
248*99a2dd95SBruce Richardson "Not supported by ntuple filter");
249*99a2dd95SBruce Richardson return -EINVAL;
250*99a2dd95SBruce Richardson }
251*99a2dd95SBruce Richardson /* check if the next not void item is IPv4 */
252*99a2dd95SBruce Richardson index++;
253*99a2dd95SBruce Richardson NEXT_ITEM_OF_PATTERN(item, pattern, index);
254*99a2dd95SBruce Richardson if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
255*99a2dd95SBruce Richardson rte_flow_error_set(error, EINVAL,
256*99a2dd95SBruce Richardson RTE_FLOW_ERROR_TYPE_ITEM,
257*99a2dd95SBruce Richardson item,
258*99a2dd95SBruce Richardson "Not supported by ntuple filter");
259*99a2dd95SBruce Richardson return -EINVAL;
260*99a2dd95SBruce Richardson }
261*99a2dd95SBruce Richardson }
262*99a2dd95SBruce Richardson
263*99a2dd95SBruce Richardson /* get the IPv4 info */
264*99a2dd95SBruce Richardson if (!item->spec || !item->mask) {
265*99a2dd95SBruce Richardson rte_flow_error_set(error, EINVAL,
266*99a2dd95SBruce Richardson RTE_FLOW_ERROR_TYPE_ITEM,
267*99a2dd95SBruce Richardson item, "Invalid ntuple mask");
268*99a2dd95SBruce Richardson return -EINVAL;
269*99a2dd95SBruce Richardson }
270*99a2dd95SBruce Richardson /*Not supported last point for range*/
271*99a2dd95SBruce Richardson if (item->last) {
272*99a2dd95SBruce Richardson rte_flow_error_set(error, EINVAL,
273*99a2dd95SBruce Richardson RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
274*99a2dd95SBruce Richardson item, "Not supported last point for range");
275*99a2dd95SBruce Richardson return -EINVAL;
276*99a2dd95SBruce Richardson
277*99a2dd95SBruce Richardson }
278*99a2dd95SBruce Richardson
279*99a2dd95SBruce Richardson ipv4_mask = item->mask;
280*99a2dd95SBruce Richardson /**
281*99a2dd95SBruce Richardson * Only support src & dst addresses, protocol,
282*99a2dd95SBruce Richardson * others should be masked.
283*99a2dd95SBruce Richardson */
284*99a2dd95SBruce Richardson if (ipv4_mask->hdr.version_ihl ||
285*99a2dd95SBruce Richardson ipv4_mask->hdr.type_of_service ||
286*99a2dd95SBruce Richardson ipv4_mask->hdr.total_length ||
287*99a2dd95SBruce Richardson ipv4_mask->hdr.packet_id ||
288*99a2dd95SBruce Richardson ipv4_mask->hdr.fragment_offset ||
289*99a2dd95SBruce Richardson ipv4_mask->hdr.time_to_live ||
290*99a2dd95SBruce Richardson ipv4_mask->hdr.hdr_checksum) {
291*99a2dd95SBruce Richardson rte_flow_error_set(error,
292*99a2dd95SBruce Richardson EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
293*99a2dd95SBruce Richardson item, "Not supported by ntuple filter");
294*99a2dd95SBruce Richardson return -EINVAL;
295*99a2dd95SBruce Richardson }
296*99a2dd95SBruce Richardson
297*99a2dd95SBruce Richardson filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
298*99a2dd95SBruce Richardson filter->src_ip_mask = ipv4_mask->hdr.src_addr;
299*99a2dd95SBruce Richardson filter->proto_mask = ipv4_mask->hdr.next_proto_id;
300*99a2dd95SBruce Richardson
301*99a2dd95SBruce Richardson ipv4_spec = item->spec;
302*99a2dd95SBruce Richardson filter->dst_ip = ipv4_spec->hdr.dst_addr;
303*99a2dd95SBruce Richardson filter->src_ip = ipv4_spec->hdr.src_addr;
304*99a2dd95SBruce Richardson filter->proto = ipv4_spec->hdr.next_proto_id;
305*99a2dd95SBruce Richardson
306*99a2dd95SBruce Richardson /* check if the next not void item is TCP or UDP or SCTP */
307*99a2dd95SBruce Richardson index++;
308*99a2dd95SBruce Richardson NEXT_ITEM_OF_PATTERN(item, pattern, index);
309*99a2dd95SBruce Richardson if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
310*99a2dd95SBruce Richardson item->type != RTE_FLOW_ITEM_TYPE_UDP &&
311*99a2dd95SBruce Richardson item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
312*99a2dd95SBruce Richardson memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
313*99a2dd95SBruce Richardson rte_flow_error_set(error, EINVAL,
314*99a2dd95SBruce Richardson RTE_FLOW_ERROR_TYPE_ITEM,
315*99a2dd95SBruce Richardson item, "Not supported by ntuple filter");
316*99a2dd95SBruce Richardson return -EINVAL;
317*99a2dd95SBruce Richardson }
318*99a2dd95SBruce Richardson
319*99a2dd95SBruce Richardson /* get the TCP/UDP info */
320*99a2dd95SBruce Richardson if (!item->spec || !item->mask) {
321*99a2dd95SBruce Richardson memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
322*99a2dd95SBruce Richardson rte_flow_error_set(error, EINVAL,
323*99a2dd95SBruce Richardson RTE_FLOW_ERROR_TYPE_ITEM,
324*99a2dd95SBruce Richardson item, "Invalid ntuple mask");
325*99a2dd95SBruce Richardson return -EINVAL;
326*99a2dd95SBruce Richardson }
327*99a2dd95SBruce Richardson
328*99a2dd95SBruce Richardson /*Not supported last point for range*/
329*99a2dd95SBruce Richardson if (item->last) {
330*99a2dd95SBruce Richardson memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
331*99a2dd95SBruce Richardson rte_flow_error_set(error, EINVAL,
332*99a2dd95SBruce Richardson RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
333*99a2dd95SBruce Richardson item, "Not supported last point for range");
334*99a2dd95SBruce Richardson return -EINVAL;
335*99a2dd95SBruce Richardson
336*99a2dd95SBruce Richardson }
337*99a2dd95SBruce Richardson
338*99a2dd95SBruce Richardson if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
339*99a2dd95SBruce Richardson tcp_mask = item->mask;
340*99a2dd95SBruce Richardson
341*99a2dd95SBruce Richardson /**
342*99a2dd95SBruce Richardson * Only support src & dst ports, tcp flags,
343*99a2dd95SBruce Richardson * others should be masked.
344*99a2dd95SBruce Richardson */
345*99a2dd95SBruce Richardson if (tcp_mask->hdr.sent_seq ||
346*99a2dd95SBruce Richardson tcp_mask->hdr.recv_ack ||
347*99a2dd95SBruce Richardson tcp_mask->hdr.data_off ||
348*99a2dd95SBruce Richardson tcp_mask->hdr.rx_win ||
349*99a2dd95SBruce Richardson tcp_mask->hdr.cksum ||
350*99a2dd95SBruce Richardson tcp_mask->hdr.tcp_urp) {
351*99a2dd95SBruce Richardson memset(filter, 0,
352*99a2dd95SBruce Richardson sizeof(struct rte_eth_ntuple_filter));
353*99a2dd95SBruce Richardson rte_flow_error_set(error, EINVAL,
354*99a2dd95SBruce Richardson RTE_FLOW_ERROR_TYPE_ITEM,
355*99a2dd95SBruce Richardson item, "Not supported by ntuple filter");
356*99a2dd95SBruce Richardson return -EINVAL;
357*99a2dd95SBruce Richardson }
358*99a2dd95SBruce Richardson
359*99a2dd95SBruce Richardson filter->dst_port_mask = tcp_mask->hdr.dst_port;
360*99a2dd95SBruce Richardson filter->src_port_mask = tcp_mask->hdr.src_port;
361*99a2dd95SBruce Richardson if (tcp_mask->hdr.tcp_flags == 0xFF) {
362*99a2dd95SBruce Richardson filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
363*99a2dd95SBruce Richardson } else if (!tcp_mask->hdr.tcp_flags) {
364*99a2dd95SBruce Richardson filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
365*99a2dd95SBruce Richardson } else {
366*99a2dd95SBruce Richardson memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
367*99a2dd95SBruce Richardson rte_flow_error_set(error, EINVAL,
368*99a2dd95SBruce Richardson RTE_FLOW_ERROR_TYPE_ITEM,
369*99a2dd95SBruce Richardson item, "Not supported by ntuple filter");
370*99a2dd95SBruce Richardson return -EINVAL;
371*99a2dd95SBruce Richardson }
372*99a2dd95SBruce Richardson
373*99a2dd95SBruce Richardson tcp_spec = item->spec;
374*99a2dd95SBruce Richardson filter->dst_port = tcp_spec->hdr.dst_port;
375*99a2dd95SBruce Richardson filter->src_port = tcp_spec->hdr.src_port;
376*99a2dd95SBruce Richardson filter->tcp_flags = tcp_spec->hdr.tcp_flags;
377*99a2dd95SBruce Richardson } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
378*99a2dd95SBruce Richardson udp_mask = item->mask;
379*99a2dd95SBruce Richardson
380*99a2dd95SBruce Richardson /**
381*99a2dd95SBruce Richardson * Only support src & dst ports,
382*99a2dd95SBruce Richardson * others should be masked.
383*99a2dd95SBruce Richardson */
384*99a2dd95SBruce Richardson if (udp_mask->hdr.dgram_len ||
385*99a2dd95SBruce Richardson udp_mask->hdr.dgram_cksum) {
386*99a2dd95SBruce Richardson memset(filter, 0,
387*99a2dd95SBruce Richardson sizeof(struct rte_eth_ntuple_filter));
388*99a2dd95SBruce Richardson rte_flow_error_set(error, EINVAL,
389*99a2dd95SBruce Richardson RTE_FLOW_ERROR_TYPE_ITEM,
390*99a2dd95SBruce Richardson item, "Not supported by ntuple filter");
391*99a2dd95SBruce Richardson return -EINVAL;
392*99a2dd95SBruce Richardson }
393*99a2dd95SBruce Richardson
394*99a2dd95SBruce Richardson filter->dst_port_mask = udp_mask->hdr.dst_port;
395*99a2dd95SBruce Richardson filter->src_port_mask = udp_mask->hdr.src_port;
396*99a2dd95SBruce Richardson
397*99a2dd95SBruce Richardson udp_spec = item->spec;
398*99a2dd95SBruce Richardson filter->dst_port = udp_spec->hdr.dst_port;
399*99a2dd95SBruce Richardson filter->src_port = udp_spec->hdr.src_port;
400*99a2dd95SBruce Richardson } else {
401*99a2dd95SBruce Richardson sctp_mask = item->mask;
402*99a2dd95SBruce Richardson
403*99a2dd95SBruce Richardson /**
404*99a2dd95SBruce Richardson * Only support src & dst ports,
405*99a2dd95SBruce Richardson * others should be masked.
406*99a2dd95SBruce Richardson */
407*99a2dd95SBruce Richardson if (sctp_mask->hdr.tag ||
408*99a2dd95SBruce Richardson sctp_mask->hdr.cksum) {
409*99a2dd95SBruce Richardson memset(filter, 0,
410*99a2dd95SBruce Richardson sizeof(struct rte_eth_ntuple_filter));
411*99a2dd95SBruce Richardson rte_flow_error_set(error, EINVAL,
412*99a2dd95SBruce Richardson RTE_FLOW_ERROR_TYPE_ITEM,
413*99a2dd95SBruce Richardson item, "Not supported by ntuple filter");
414*99a2dd95SBruce Richardson return -EINVAL;
415*99a2dd95SBruce Richardson }
416*99a2dd95SBruce Richardson
417*99a2dd95SBruce Richardson filter->dst_port_mask = sctp_mask->hdr.dst_port;
418*99a2dd95SBruce Richardson filter->src_port_mask = sctp_mask->hdr.src_port;
419*99a2dd95SBruce Richardson
420*99a2dd95SBruce Richardson sctp_spec = item->spec;
421*99a2dd95SBruce Richardson filter->dst_port = sctp_spec->hdr.dst_port;
422*99a2dd95SBruce Richardson filter->src_port = sctp_spec->hdr.src_port;
423*99a2dd95SBruce Richardson }
424*99a2dd95SBruce Richardson
425*99a2dd95SBruce Richardson /* check if the next not void item is END */
426*99a2dd95SBruce Richardson index++;
427*99a2dd95SBruce Richardson NEXT_ITEM_OF_PATTERN(item, pattern, index);
428*99a2dd95SBruce Richardson if (item->type != RTE_FLOW_ITEM_TYPE_END) {
429*99a2dd95SBruce Richardson memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
430*99a2dd95SBruce Richardson rte_flow_error_set(error, EINVAL,
431*99a2dd95SBruce Richardson RTE_FLOW_ERROR_TYPE_ITEM,
432*99a2dd95SBruce Richardson item, "Not supported by ntuple filter");
433*99a2dd95SBruce Richardson return -EINVAL;
434*99a2dd95SBruce Richardson }
435*99a2dd95SBruce Richardson
436*99a2dd95SBruce Richardson table_type = RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE;
437*99a2dd95SBruce Richardson
438*99a2dd95SBruce Richardson /* parse attr */
439*99a2dd95SBruce Richardson /* must be input direction */
440*99a2dd95SBruce Richardson if (!attr->ingress) {
441*99a2dd95SBruce Richardson memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
442*99a2dd95SBruce Richardson rte_flow_error_set(error, EINVAL,
443*99a2dd95SBruce Richardson RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
444*99a2dd95SBruce Richardson attr, "Only support ingress.");
445*99a2dd95SBruce Richardson return -EINVAL;
446*99a2dd95SBruce Richardson }
447*99a2dd95SBruce Richardson
448*99a2dd95SBruce Richardson /* not supported */
449*99a2dd95SBruce Richardson if (attr->egress) {
450*99a2dd95SBruce Richardson memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
451*99a2dd95SBruce Richardson rte_flow_error_set(error, EINVAL,
452*99a2dd95SBruce Richardson RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
453*99a2dd95SBruce Richardson attr, "Not support egress.");
454*99a2dd95SBruce Richardson return -EINVAL;
455*99a2dd95SBruce Richardson }
456*99a2dd95SBruce Richardson
457*99a2dd95SBruce Richardson if (attr->priority > 0xFFFF) {
458*99a2dd95SBruce Richardson memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
459*99a2dd95SBruce Richardson rte_flow_error_set(error, EINVAL,
460*99a2dd95SBruce Richardson RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
461*99a2dd95SBruce Richardson attr, "Error priority.");
462*99a2dd95SBruce Richardson return -EINVAL;
463*99a2dd95SBruce Richardson }
464*99a2dd95SBruce Richardson filter->priority = (uint16_t)attr->priority;
465*99a2dd95SBruce Richardson if (attr->priority > FLOW_RULE_MIN_PRIORITY)
466*99a2dd95SBruce Richardson filter->priority = FLOW_RULE_MAX_PRIORITY;
467*99a2dd95SBruce Richardson
468*99a2dd95SBruce Richardson /* parse action */
469*99a2dd95SBruce Richardson index = 0;
470*99a2dd95SBruce Richardson
471*99a2dd95SBruce Richardson /**
472*99a2dd95SBruce Richardson * n-tuple only supports count and Mark,
473*99a2dd95SBruce Richardson * check if the first not void action is COUNT or MARK.
474*99a2dd95SBruce Richardson */
475*99a2dd95SBruce Richardson memset(&action, 0, sizeof(action));
476*99a2dd95SBruce Richardson NEXT_ITEM_OF_ACTION(act, actions, index);
477*99a2dd95SBruce Richardson switch (act->type) {
478*99a2dd95SBruce Richardson case RTE_FLOW_ACTION_TYPE_COUNT:
479*99a2dd95SBruce Richardson action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT;
480*99a2dd95SBruce Richardson count = act->conf;
481*99a2dd95SBruce Richardson memcpy(&action.act.counter, count, sizeof(action.act.counter));
482*99a2dd95SBruce Richardson break;
483*99a2dd95SBruce Richardson case RTE_FLOW_ACTION_TYPE_MARK:
484*99a2dd95SBruce Richardson action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK;
485*99a2dd95SBruce Richardson mark_spec = act->conf;
486*99a2dd95SBruce Richardson memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark));
487*99a2dd95SBruce Richardson break;
488*99a2dd95SBruce Richardson default:
489*99a2dd95SBruce Richardson memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
490*99a2dd95SBruce Richardson rte_flow_error_set(error, EINVAL,
491*99a2dd95SBruce Richardson RTE_FLOW_ERROR_TYPE_ACTION, act,
492*99a2dd95SBruce Richardson "Invalid action.");
493*99a2dd95SBruce Richardson return -EINVAL;
494*99a2dd95SBruce Richardson }
495*99a2dd95SBruce Richardson
496*99a2dd95SBruce Richardson /* check if the next not void item is MARK or COUNT or END */
497*99a2dd95SBruce Richardson index++;
498*99a2dd95SBruce Richardson NEXT_ITEM_OF_ACTION(act, actions, index);
499*99a2dd95SBruce Richardson switch (act->type) {
500*99a2dd95SBruce Richardson case RTE_FLOW_ACTION_TYPE_COUNT:
501*99a2dd95SBruce Richardson action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT;
502*99a2dd95SBruce Richardson count = act->conf;
503*99a2dd95SBruce Richardson memcpy(&action.act.counter, count, sizeof(action.act.counter));
504*99a2dd95SBruce Richardson break;
505*99a2dd95SBruce Richardson case RTE_FLOW_ACTION_TYPE_MARK:
506*99a2dd95SBruce Richardson action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK;
507*99a2dd95SBruce Richardson mark_spec = act->conf;
508*99a2dd95SBruce Richardson memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark));
509*99a2dd95SBruce Richardson break;
510*99a2dd95SBruce Richardson case RTE_FLOW_ACTION_TYPE_END:
511*99a2dd95SBruce Richardson return 0;
512*99a2dd95SBruce Richardson default:
513*99a2dd95SBruce Richardson memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
514*99a2dd95SBruce Richardson rte_flow_error_set(error, EINVAL,
515*99a2dd95SBruce Richardson RTE_FLOW_ERROR_TYPE_ACTION, act,
516*99a2dd95SBruce Richardson "Invalid action.");
517*99a2dd95SBruce Richardson return -EINVAL;
518*99a2dd95SBruce Richardson }
519*99a2dd95SBruce Richardson
520*99a2dd95SBruce Richardson /* check if the next not void item is END */
521*99a2dd95SBruce Richardson index++;
522*99a2dd95SBruce Richardson NEXT_ITEM_OF_ACTION(act, actions, index);
523*99a2dd95SBruce Richardson if (act->type != RTE_FLOW_ACTION_TYPE_END) {
524*99a2dd95SBruce Richardson memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
525*99a2dd95SBruce Richardson rte_flow_error_set(error, EINVAL,
526*99a2dd95SBruce Richardson RTE_FLOW_ERROR_TYPE_ACTION, act,
527*99a2dd95SBruce Richardson "Invalid action.");
528*99a2dd95SBruce Richardson return -EINVAL;
529*99a2dd95SBruce Richardson }
530*99a2dd95SBruce Richardson
531*99a2dd95SBruce Richardson return 0;
532*99a2dd95SBruce Richardson }
533