xref: /dpdk/drivers/net/i40e/i40e_flow.c (revision 87063aaf)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_debug.h>
14 #include <rte_ether.h>
15 #include <ethdev_driver.h>
16 #include <rte_log.h>
17 #include <rte_malloc.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_bitmap.h>
21 
22 #include "i40e_logs.h"
23 #include "base/i40e_type.h"
24 #include "base/i40e_prototype.h"
25 #include "i40e_ethdev.h"
26 #include "i40e_hash.h"
27 
28 #define I40E_IPV6_TC_MASK	(0xFF << I40E_FDIR_IPv6_TC_OFFSET)
29 #define I40E_IPV6_FRAG_HEADER	44
30 #define I40E_TENANT_ARRAY_NUM	3
31 #define I40E_VLAN_TCI_MASK	0xFFFF
32 #define I40E_VLAN_PRI_MASK	0xE000
33 #define I40E_VLAN_CFI_MASK	0x1000
34 #define I40E_VLAN_VID_MASK	0x0FFF
35 
36 static int i40e_flow_validate(struct rte_eth_dev *dev,
37 			      const struct rte_flow_attr *attr,
38 			      const struct rte_flow_item pattern[],
39 			      const struct rte_flow_action actions[],
40 			      struct rte_flow_error *error);
41 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
42 					 const struct rte_flow_attr *attr,
43 					 const struct rte_flow_item pattern[],
44 					 const struct rte_flow_action actions[],
45 					 struct rte_flow_error *error);
46 static int i40e_flow_destroy(struct rte_eth_dev *dev,
47 			     struct rte_flow *flow,
48 			     struct rte_flow_error *error);
49 static int i40e_flow_flush(struct rte_eth_dev *dev,
50 			   struct rte_flow_error *error);
51 static int i40e_flow_query(struct rte_eth_dev *dev,
52 			   struct rte_flow *flow,
53 			   const struct rte_flow_action *actions,
54 			   void *data, struct rte_flow_error *error);
55 static int
56 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
57 				  const struct rte_flow_item *pattern,
58 				  struct rte_flow_error *error,
59 				  struct rte_eth_ethertype_filter *filter);
60 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
61 				    const struct rte_flow_action *actions,
62 				    struct rte_flow_error *error,
63 				    struct rte_eth_ethertype_filter *filter);
64 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
65 					const struct rte_flow_attr *attr,
66 					const struct rte_flow_item *pattern,
67 					struct rte_flow_error *error,
68 					struct i40e_fdir_filter_conf *filter);
69 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
70 				       const struct rte_flow_action *actions,
71 				       struct rte_flow_error *error,
72 				       struct i40e_fdir_filter_conf *filter);
73 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
74 				 const struct rte_flow_action *actions,
75 				 struct rte_flow_error *error,
76 				 struct i40e_tunnel_filter_conf *filter);
77 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
78 				struct rte_flow_error *error);
79 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
80 				    const struct rte_flow_attr *attr,
81 				    const struct rte_flow_item pattern[],
82 				    const struct rte_flow_action actions[],
83 				    struct rte_flow_error *error,
84 				    union i40e_filter_t *filter);
85 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
86 				       const struct rte_flow_attr *attr,
87 				       const struct rte_flow_item pattern[],
88 				       const struct rte_flow_action actions[],
89 				       struct rte_flow_error *error,
90 				       union i40e_filter_t *filter);
91 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
92 					const struct rte_flow_attr *attr,
93 					const struct rte_flow_item pattern[],
94 					const struct rte_flow_action actions[],
95 					struct rte_flow_error *error,
96 					union i40e_filter_t *filter);
97 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
98 					const struct rte_flow_attr *attr,
99 					const struct rte_flow_item pattern[],
100 					const struct rte_flow_action actions[],
101 					struct rte_flow_error *error,
102 					union i40e_filter_t *filter);
103 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
104 				       const struct rte_flow_attr *attr,
105 				       const struct rte_flow_item pattern[],
106 				       const struct rte_flow_action actions[],
107 				       struct rte_flow_error *error,
108 				       union i40e_filter_t *filter);
109 static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
110 				      const struct rte_flow_attr *attr,
111 				      const struct rte_flow_item pattern[],
112 				      const struct rte_flow_action actions[],
113 				      struct rte_flow_error *error,
114 				      union i40e_filter_t *filter);
115 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
116 				      struct i40e_ethertype_filter *filter);
117 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
118 					   struct i40e_tunnel_filter *filter);
119 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
120 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
121 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
122 static int
123 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
124 			      const struct rte_flow_attr *attr,
125 			      const struct rte_flow_item pattern[],
126 			      const struct rte_flow_action actions[],
127 			      struct rte_flow_error *error,
128 			      union i40e_filter_t *filter);
129 static int
130 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
131 			      const struct rte_flow_item *pattern,
132 			      struct rte_flow_error *error,
133 			      struct i40e_tunnel_filter_conf *filter);
134 
135 static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
136 					   const struct rte_flow_attr *attr,
137 					   const struct rte_flow_item pattern[],
138 					   const struct rte_flow_action actions[],
139 					   struct rte_flow_error *error,
140 					   union i40e_filter_t *filter);
141 const struct rte_flow_ops i40e_flow_ops = {
142 	.validate = i40e_flow_validate,
143 	.create = i40e_flow_create,
144 	.destroy = i40e_flow_destroy,
145 	.flush = i40e_flow_flush,
146 	.query = i40e_flow_query,
147 };
148 
149 static union i40e_filter_t cons_filter;
150 static enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
151 /* internal pattern w/o VOID items */
152 struct rte_flow_item g_items[32];
153 
154 /* Pattern matched ethertype filter */
155 static enum rte_flow_item_type pattern_ethertype[] = {
156 	RTE_FLOW_ITEM_TYPE_ETH,
157 	RTE_FLOW_ITEM_TYPE_END,
158 };
159 
160 /* Pattern matched flow director filter */
161 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
162 	RTE_FLOW_ITEM_TYPE_ETH,
163 	RTE_FLOW_ITEM_TYPE_IPV4,
164 	RTE_FLOW_ITEM_TYPE_END,
165 };
166 
167 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
168 	RTE_FLOW_ITEM_TYPE_ETH,
169 	RTE_FLOW_ITEM_TYPE_IPV4,
170 	RTE_FLOW_ITEM_TYPE_UDP,
171 	RTE_FLOW_ITEM_TYPE_END,
172 };
173 
174 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
175 	RTE_FLOW_ITEM_TYPE_ETH,
176 	RTE_FLOW_ITEM_TYPE_IPV4,
177 	RTE_FLOW_ITEM_TYPE_TCP,
178 	RTE_FLOW_ITEM_TYPE_END,
179 };
180 
181 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
182 	RTE_FLOW_ITEM_TYPE_ETH,
183 	RTE_FLOW_ITEM_TYPE_IPV4,
184 	RTE_FLOW_ITEM_TYPE_SCTP,
185 	RTE_FLOW_ITEM_TYPE_END,
186 };
187 
188 static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
189 	RTE_FLOW_ITEM_TYPE_ETH,
190 	RTE_FLOW_ITEM_TYPE_IPV4,
191 	RTE_FLOW_ITEM_TYPE_UDP,
192 	RTE_FLOW_ITEM_TYPE_GTPC,
193 	RTE_FLOW_ITEM_TYPE_END,
194 };
195 
196 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
197 	RTE_FLOW_ITEM_TYPE_ETH,
198 	RTE_FLOW_ITEM_TYPE_IPV4,
199 	RTE_FLOW_ITEM_TYPE_UDP,
200 	RTE_FLOW_ITEM_TYPE_GTPU,
201 	RTE_FLOW_ITEM_TYPE_END,
202 };
203 
204 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
205 	RTE_FLOW_ITEM_TYPE_ETH,
206 	RTE_FLOW_ITEM_TYPE_IPV4,
207 	RTE_FLOW_ITEM_TYPE_UDP,
208 	RTE_FLOW_ITEM_TYPE_GTPU,
209 	RTE_FLOW_ITEM_TYPE_IPV4,
210 	RTE_FLOW_ITEM_TYPE_END,
211 };
212 
213 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
214 	RTE_FLOW_ITEM_TYPE_ETH,
215 	RTE_FLOW_ITEM_TYPE_IPV4,
216 	RTE_FLOW_ITEM_TYPE_UDP,
217 	RTE_FLOW_ITEM_TYPE_GTPU,
218 	RTE_FLOW_ITEM_TYPE_IPV6,
219 	RTE_FLOW_ITEM_TYPE_END,
220 };
221 
222 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
223 	RTE_FLOW_ITEM_TYPE_ETH,
224 	RTE_FLOW_ITEM_TYPE_IPV6,
225 	RTE_FLOW_ITEM_TYPE_END,
226 };
227 
228 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
229 	RTE_FLOW_ITEM_TYPE_ETH,
230 	RTE_FLOW_ITEM_TYPE_IPV6,
231 	RTE_FLOW_ITEM_TYPE_UDP,
232 	RTE_FLOW_ITEM_TYPE_END,
233 };
234 
235 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
236 	RTE_FLOW_ITEM_TYPE_ETH,
237 	RTE_FLOW_ITEM_TYPE_IPV6,
238 	RTE_FLOW_ITEM_TYPE_TCP,
239 	RTE_FLOW_ITEM_TYPE_END,
240 };
241 
242 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
243 	RTE_FLOW_ITEM_TYPE_ETH,
244 	RTE_FLOW_ITEM_TYPE_IPV6,
245 	RTE_FLOW_ITEM_TYPE_SCTP,
246 	RTE_FLOW_ITEM_TYPE_END,
247 };
248 
249 static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
250 	RTE_FLOW_ITEM_TYPE_ETH,
251 	RTE_FLOW_ITEM_TYPE_IPV6,
252 	RTE_FLOW_ITEM_TYPE_UDP,
253 	RTE_FLOW_ITEM_TYPE_GTPC,
254 	RTE_FLOW_ITEM_TYPE_END,
255 };
256 
257 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
258 	RTE_FLOW_ITEM_TYPE_ETH,
259 	RTE_FLOW_ITEM_TYPE_IPV6,
260 	RTE_FLOW_ITEM_TYPE_UDP,
261 	RTE_FLOW_ITEM_TYPE_GTPU,
262 	RTE_FLOW_ITEM_TYPE_END,
263 };
264 
265 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
266 	RTE_FLOW_ITEM_TYPE_ETH,
267 	RTE_FLOW_ITEM_TYPE_IPV6,
268 	RTE_FLOW_ITEM_TYPE_UDP,
269 	RTE_FLOW_ITEM_TYPE_GTPU,
270 	RTE_FLOW_ITEM_TYPE_IPV4,
271 	RTE_FLOW_ITEM_TYPE_END,
272 };
273 
274 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
275 	RTE_FLOW_ITEM_TYPE_ETH,
276 	RTE_FLOW_ITEM_TYPE_IPV6,
277 	RTE_FLOW_ITEM_TYPE_UDP,
278 	RTE_FLOW_ITEM_TYPE_GTPU,
279 	RTE_FLOW_ITEM_TYPE_IPV6,
280 	RTE_FLOW_ITEM_TYPE_END,
281 };
282 
283 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
284 	RTE_FLOW_ITEM_TYPE_ETH,
285 	RTE_FLOW_ITEM_TYPE_RAW,
286 	RTE_FLOW_ITEM_TYPE_END,
287 };
288 
289 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
290 	RTE_FLOW_ITEM_TYPE_ETH,
291 	RTE_FLOW_ITEM_TYPE_RAW,
292 	RTE_FLOW_ITEM_TYPE_RAW,
293 	RTE_FLOW_ITEM_TYPE_END,
294 };
295 
296 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
297 	RTE_FLOW_ITEM_TYPE_ETH,
298 	RTE_FLOW_ITEM_TYPE_RAW,
299 	RTE_FLOW_ITEM_TYPE_RAW,
300 	RTE_FLOW_ITEM_TYPE_RAW,
301 	RTE_FLOW_ITEM_TYPE_END,
302 };
303 
304 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
305 	RTE_FLOW_ITEM_TYPE_ETH,
306 	RTE_FLOW_ITEM_TYPE_IPV4,
307 	RTE_FLOW_ITEM_TYPE_RAW,
308 	RTE_FLOW_ITEM_TYPE_END,
309 };
310 
311 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
312 	RTE_FLOW_ITEM_TYPE_ETH,
313 	RTE_FLOW_ITEM_TYPE_IPV4,
314 	RTE_FLOW_ITEM_TYPE_RAW,
315 	RTE_FLOW_ITEM_TYPE_RAW,
316 	RTE_FLOW_ITEM_TYPE_END,
317 };
318 
319 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
320 	RTE_FLOW_ITEM_TYPE_ETH,
321 	RTE_FLOW_ITEM_TYPE_IPV4,
322 	RTE_FLOW_ITEM_TYPE_RAW,
323 	RTE_FLOW_ITEM_TYPE_RAW,
324 	RTE_FLOW_ITEM_TYPE_RAW,
325 	RTE_FLOW_ITEM_TYPE_END,
326 };
327 
328 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
329 	RTE_FLOW_ITEM_TYPE_ETH,
330 	RTE_FLOW_ITEM_TYPE_IPV4,
331 	RTE_FLOW_ITEM_TYPE_UDP,
332 	RTE_FLOW_ITEM_TYPE_RAW,
333 	RTE_FLOW_ITEM_TYPE_END,
334 };
335 
336 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
337 	RTE_FLOW_ITEM_TYPE_ETH,
338 	RTE_FLOW_ITEM_TYPE_IPV4,
339 	RTE_FLOW_ITEM_TYPE_UDP,
340 	RTE_FLOW_ITEM_TYPE_RAW,
341 	RTE_FLOW_ITEM_TYPE_RAW,
342 	RTE_FLOW_ITEM_TYPE_END,
343 };
344 
345 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
346 	RTE_FLOW_ITEM_TYPE_ETH,
347 	RTE_FLOW_ITEM_TYPE_IPV4,
348 	RTE_FLOW_ITEM_TYPE_UDP,
349 	RTE_FLOW_ITEM_TYPE_RAW,
350 	RTE_FLOW_ITEM_TYPE_RAW,
351 	RTE_FLOW_ITEM_TYPE_RAW,
352 	RTE_FLOW_ITEM_TYPE_END,
353 };
354 
355 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
356 	RTE_FLOW_ITEM_TYPE_ETH,
357 	RTE_FLOW_ITEM_TYPE_IPV4,
358 	RTE_FLOW_ITEM_TYPE_TCP,
359 	RTE_FLOW_ITEM_TYPE_RAW,
360 	RTE_FLOW_ITEM_TYPE_END,
361 };
362 
363 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
364 	RTE_FLOW_ITEM_TYPE_ETH,
365 	RTE_FLOW_ITEM_TYPE_IPV4,
366 	RTE_FLOW_ITEM_TYPE_TCP,
367 	RTE_FLOW_ITEM_TYPE_RAW,
368 	RTE_FLOW_ITEM_TYPE_RAW,
369 	RTE_FLOW_ITEM_TYPE_END,
370 };
371 
372 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
373 	RTE_FLOW_ITEM_TYPE_ETH,
374 	RTE_FLOW_ITEM_TYPE_IPV4,
375 	RTE_FLOW_ITEM_TYPE_TCP,
376 	RTE_FLOW_ITEM_TYPE_RAW,
377 	RTE_FLOW_ITEM_TYPE_RAW,
378 	RTE_FLOW_ITEM_TYPE_RAW,
379 	RTE_FLOW_ITEM_TYPE_END,
380 };
381 
382 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
383 	RTE_FLOW_ITEM_TYPE_ETH,
384 	RTE_FLOW_ITEM_TYPE_IPV4,
385 	RTE_FLOW_ITEM_TYPE_SCTP,
386 	RTE_FLOW_ITEM_TYPE_RAW,
387 	RTE_FLOW_ITEM_TYPE_END,
388 };
389 
390 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
391 	RTE_FLOW_ITEM_TYPE_ETH,
392 	RTE_FLOW_ITEM_TYPE_IPV4,
393 	RTE_FLOW_ITEM_TYPE_SCTP,
394 	RTE_FLOW_ITEM_TYPE_RAW,
395 	RTE_FLOW_ITEM_TYPE_RAW,
396 	RTE_FLOW_ITEM_TYPE_END,
397 };
398 
399 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
400 	RTE_FLOW_ITEM_TYPE_ETH,
401 	RTE_FLOW_ITEM_TYPE_IPV4,
402 	RTE_FLOW_ITEM_TYPE_SCTP,
403 	RTE_FLOW_ITEM_TYPE_RAW,
404 	RTE_FLOW_ITEM_TYPE_RAW,
405 	RTE_FLOW_ITEM_TYPE_RAW,
406 	RTE_FLOW_ITEM_TYPE_END,
407 };
408 
409 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
410 	RTE_FLOW_ITEM_TYPE_ETH,
411 	RTE_FLOW_ITEM_TYPE_IPV6,
412 	RTE_FLOW_ITEM_TYPE_RAW,
413 	RTE_FLOW_ITEM_TYPE_END,
414 };
415 
416 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
417 	RTE_FLOW_ITEM_TYPE_ETH,
418 	RTE_FLOW_ITEM_TYPE_IPV6,
419 	RTE_FLOW_ITEM_TYPE_RAW,
420 	RTE_FLOW_ITEM_TYPE_RAW,
421 	RTE_FLOW_ITEM_TYPE_END,
422 };
423 
424 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
425 	RTE_FLOW_ITEM_TYPE_ETH,
426 	RTE_FLOW_ITEM_TYPE_IPV6,
427 	RTE_FLOW_ITEM_TYPE_RAW,
428 	RTE_FLOW_ITEM_TYPE_RAW,
429 	RTE_FLOW_ITEM_TYPE_RAW,
430 	RTE_FLOW_ITEM_TYPE_END,
431 };
432 
433 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
434 	RTE_FLOW_ITEM_TYPE_ETH,
435 	RTE_FLOW_ITEM_TYPE_IPV6,
436 	RTE_FLOW_ITEM_TYPE_UDP,
437 	RTE_FLOW_ITEM_TYPE_RAW,
438 	RTE_FLOW_ITEM_TYPE_END,
439 };
440 
441 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
442 	RTE_FLOW_ITEM_TYPE_ETH,
443 	RTE_FLOW_ITEM_TYPE_IPV6,
444 	RTE_FLOW_ITEM_TYPE_UDP,
445 	RTE_FLOW_ITEM_TYPE_RAW,
446 	RTE_FLOW_ITEM_TYPE_RAW,
447 	RTE_FLOW_ITEM_TYPE_END,
448 };
449 
450 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
451 	RTE_FLOW_ITEM_TYPE_ETH,
452 	RTE_FLOW_ITEM_TYPE_IPV6,
453 	RTE_FLOW_ITEM_TYPE_UDP,
454 	RTE_FLOW_ITEM_TYPE_RAW,
455 	RTE_FLOW_ITEM_TYPE_RAW,
456 	RTE_FLOW_ITEM_TYPE_RAW,
457 	RTE_FLOW_ITEM_TYPE_END,
458 };
459 
460 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
461 	RTE_FLOW_ITEM_TYPE_ETH,
462 	RTE_FLOW_ITEM_TYPE_IPV6,
463 	RTE_FLOW_ITEM_TYPE_TCP,
464 	RTE_FLOW_ITEM_TYPE_RAW,
465 	RTE_FLOW_ITEM_TYPE_END,
466 };
467 
468 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
469 	RTE_FLOW_ITEM_TYPE_ETH,
470 	RTE_FLOW_ITEM_TYPE_IPV6,
471 	RTE_FLOW_ITEM_TYPE_TCP,
472 	RTE_FLOW_ITEM_TYPE_RAW,
473 	RTE_FLOW_ITEM_TYPE_RAW,
474 	RTE_FLOW_ITEM_TYPE_END,
475 };
476 
477 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
478 	RTE_FLOW_ITEM_TYPE_ETH,
479 	RTE_FLOW_ITEM_TYPE_IPV6,
480 	RTE_FLOW_ITEM_TYPE_TCP,
481 	RTE_FLOW_ITEM_TYPE_RAW,
482 	RTE_FLOW_ITEM_TYPE_RAW,
483 	RTE_FLOW_ITEM_TYPE_RAW,
484 	RTE_FLOW_ITEM_TYPE_END,
485 };
486 
487 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
488 	RTE_FLOW_ITEM_TYPE_ETH,
489 	RTE_FLOW_ITEM_TYPE_IPV6,
490 	RTE_FLOW_ITEM_TYPE_SCTP,
491 	RTE_FLOW_ITEM_TYPE_RAW,
492 	RTE_FLOW_ITEM_TYPE_END,
493 };
494 
495 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
496 	RTE_FLOW_ITEM_TYPE_ETH,
497 	RTE_FLOW_ITEM_TYPE_IPV6,
498 	RTE_FLOW_ITEM_TYPE_SCTP,
499 	RTE_FLOW_ITEM_TYPE_RAW,
500 	RTE_FLOW_ITEM_TYPE_RAW,
501 	RTE_FLOW_ITEM_TYPE_END,
502 };
503 
504 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
505 	RTE_FLOW_ITEM_TYPE_ETH,
506 	RTE_FLOW_ITEM_TYPE_IPV6,
507 	RTE_FLOW_ITEM_TYPE_SCTP,
508 	RTE_FLOW_ITEM_TYPE_RAW,
509 	RTE_FLOW_ITEM_TYPE_RAW,
510 	RTE_FLOW_ITEM_TYPE_RAW,
511 	RTE_FLOW_ITEM_TYPE_END,
512 };
513 
514 static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
515 	RTE_FLOW_ITEM_TYPE_ETH,
516 	RTE_FLOW_ITEM_TYPE_VLAN,
517 	RTE_FLOW_ITEM_TYPE_END,
518 };
519 
520 static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
521 	RTE_FLOW_ITEM_TYPE_ETH,
522 	RTE_FLOW_ITEM_TYPE_VLAN,
523 	RTE_FLOW_ITEM_TYPE_IPV4,
524 	RTE_FLOW_ITEM_TYPE_END,
525 };
526 
527 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
528 	RTE_FLOW_ITEM_TYPE_ETH,
529 	RTE_FLOW_ITEM_TYPE_VLAN,
530 	RTE_FLOW_ITEM_TYPE_IPV4,
531 	RTE_FLOW_ITEM_TYPE_UDP,
532 	RTE_FLOW_ITEM_TYPE_END,
533 };
534 
535 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
536 	RTE_FLOW_ITEM_TYPE_ETH,
537 	RTE_FLOW_ITEM_TYPE_VLAN,
538 	RTE_FLOW_ITEM_TYPE_IPV4,
539 	RTE_FLOW_ITEM_TYPE_TCP,
540 	RTE_FLOW_ITEM_TYPE_END,
541 };
542 
543 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
544 	RTE_FLOW_ITEM_TYPE_ETH,
545 	RTE_FLOW_ITEM_TYPE_VLAN,
546 	RTE_FLOW_ITEM_TYPE_IPV4,
547 	RTE_FLOW_ITEM_TYPE_SCTP,
548 	RTE_FLOW_ITEM_TYPE_END,
549 };
550 
551 static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
552 	RTE_FLOW_ITEM_TYPE_ETH,
553 	RTE_FLOW_ITEM_TYPE_VLAN,
554 	RTE_FLOW_ITEM_TYPE_IPV6,
555 	RTE_FLOW_ITEM_TYPE_END,
556 };
557 
558 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
559 	RTE_FLOW_ITEM_TYPE_ETH,
560 	RTE_FLOW_ITEM_TYPE_VLAN,
561 	RTE_FLOW_ITEM_TYPE_IPV6,
562 	RTE_FLOW_ITEM_TYPE_UDP,
563 	RTE_FLOW_ITEM_TYPE_END,
564 };
565 
566 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
567 	RTE_FLOW_ITEM_TYPE_ETH,
568 	RTE_FLOW_ITEM_TYPE_VLAN,
569 	RTE_FLOW_ITEM_TYPE_IPV6,
570 	RTE_FLOW_ITEM_TYPE_TCP,
571 	RTE_FLOW_ITEM_TYPE_END,
572 };
573 
574 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
575 	RTE_FLOW_ITEM_TYPE_ETH,
576 	RTE_FLOW_ITEM_TYPE_VLAN,
577 	RTE_FLOW_ITEM_TYPE_IPV6,
578 	RTE_FLOW_ITEM_TYPE_SCTP,
579 	RTE_FLOW_ITEM_TYPE_END,
580 };
581 
582 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
583 	RTE_FLOW_ITEM_TYPE_ETH,
584 	RTE_FLOW_ITEM_TYPE_VLAN,
585 	RTE_FLOW_ITEM_TYPE_RAW,
586 	RTE_FLOW_ITEM_TYPE_END,
587 };
588 
589 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
590 	RTE_FLOW_ITEM_TYPE_ETH,
591 	RTE_FLOW_ITEM_TYPE_VLAN,
592 	RTE_FLOW_ITEM_TYPE_RAW,
593 	RTE_FLOW_ITEM_TYPE_RAW,
594 	RTE_FLOW_ITEM_TYPE_END,
595 };
596 
597 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
598 	RTE_FLOW_ITEM_TYPE_ETH,
599 	RTE_FLOW_ITEM_TYPE_VLAN,
600 	RTE_FLOW_ITEM_TYPE_RAW,
601 	RTE_FLOW_ITEM_TYPE_RAW,
602 	RTE_FLOW_ITEM_TYPE_RAW,
603 	RTE_FLOW_ITEM_TYPE_END,
604 };
605 
606 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
607 	RTE_FLOW_ITEM_TYPE_ETH,
608 	RTE_FLOW_ITEM_TYPE_VLAN,
609 	RTE_FLOW_ITEM_TYPE_IPV4,
610 	RTE_FLOW_ITEM_TYPE_RAW,
611 	RTE_FLOW_ITEM_TYPE_END,
612 };
613 
614 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
615 	RTE_FLOW_ITEM_TYPE_ETH,
616 	RTE_FLOW_ITEM_TYPE_VLAN,
617 	RTE_FLOW_ITEM_TYPE_IPV4,
618 	RTE_FLOW_ITEM_TYPE_RAW,
619 	RTE_FLOW_ITEM_TYPE_RAW,
620 	RTE_FLOW_ITEM_TYPE_END,
621 };
622 
623 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
624 	RTE_FLOW_ITEM_TYPE_ETH,
625 	RTE_FLOW_ITEM_TYPE_VLAN,
626 	RTE_FLOW_ITEM_TYPE_IPV4,
627 	RTE_FLOW_ITEM_TYPE_RAW,
628 	RTE_FLOW_ITEM_TYPE_RAW,
629 	RTE_FLOW_ITEM_TYPE_RAW,
630 	RTE_FLOW_ITEM_TYPE_END,
631 };
632 
633 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
634 	RTE_FLOW_ITEM_TYPE_ETH,
635 	RTE_FLOW_ITEM_TYPE_VLAN,
636 	RTE_FLOW_ITEM_TYPE_IPV4,
637 	RTE_FLOW_ITEM_TYPE_UDP,
638 	RTE_FLOW_ITEM_TYPE_RAW,
639 	RTE_FLOW_ITEM_TYPE_END,
640 };
641 
642 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
643 	RTE_FLOW_ITEM_TYPE_ETH,
644 	RTE_FLOW_ITEM_TYPE_VLAN,
645 	RTE_FLOW_ITEM_TYPE_IPV4,
646 	RTE_FLOW_ITEM_TYPE_UDP,
647 	RTE_FLOW_ITEM_TYPE_RAW,
648 	RTE_FLOW_ITEM_TYPE_RAW,
649 	RTE_FLOW_ITEM_TYPE_END,
650 };
651 
652 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
653 	RTE_FLOW_ITEM_TYPE_ETH,
654 	RTE_FLOW_ITEM_TYPE_VLAN,
655 	RTE_FLOW_ITEM_TYPE_IPV4,
656 	RTE_FLOW_ITEM_TYPE_UDP,
657 	RTE_FLOW_ITEM_TYPE_RAW,
658 	RTE_FLOW_ITEM_TYPE_RAW,
659 	RTE_FLOW_ITEM_TYPE_RAW,
660 	RTE_FLOW_ITEM_TYPE_END,
661 };
662 
663 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
664 	RTE_FLOW_ITEM_TYPE_ETH,
665 	RTE_FLOW_ITEM_TYPE_VLAN,
666 	RTE_FLOW_ITEM_TYPE_IPV4,
667 	RTE_FLOW_ITEM_TYPE_TCP,
668 	RTE_FLOW_ITEM_TYPE_RAW,
669 	RTE_FLOW_ITEM_TYPE_END,
670 };
671 
672 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
673 	RTE_FLOW_ITEM_TYPE_ETH,
674 	RTE_FLOW_ITEM_TYPE_VLAN,
675 	RTE_FLOW_ITEM_TYPE_IPV4,
676 	RTE_FLOW_ITEM_TYPE_TCP,
677 	RTE_FLOW_ITEM_TYPE_RAW,
678 	RTE_FLOW_ITEM_TYPE_RAW,
679 	RTE_FLOW_ITEM_TYPE_END,
680 };
681 
682 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
683 	RTE_FLOW_ITEM_TYPE_ETH,
684 	RTE_FLOW_ITEM_TYPE_VLAN,
685 	RTE_FLOW_ITEM_TYPE_IPV4,
686 	RTE_FLOW_ITEM_TYPE_TCP,
687 	RTE_FLOW_ITEM_TYPE_RAW,
688 	RTE_FLOW_ITEM_TYPE_RAW,
689 	RTE_FLOW_ITEM_TYPE_RAW,
690 	RTE_FLOW_ITEM_TYPE_END,
691 };
692 
693 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
694 	RTE_FLOW_ITEM_TYPE_ETH,
695 	RTE_FLOW_ITEM_TYPE_VLAN,
696 	RTE_FLOW_ITEM_TYPE_IPV4,
697 	RTE_FLOW_ITEM_TYPE_SCTP,
698 	RTE_FLOW_ITEM_TYPE_RAW,
699 	RTE_FLOW_ITEM_TYPE_END,
700 };
701 
702 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
703 	RTE_FLOW_ITEM_TYPE_ETH,
704 	RTE_FLOW_ITEM_TYPE_VLAN,
705 	RTE_FLOW_ITEM_TYPE_IPV4,
706 	RTE_FLOW_ITEM_TYPE_SCTP,
707 	RTE_FLOW_ITEM_TYPE_RAW,
708 	RTE_FLOW_ITEM_TYPE_RAW,
709 	RTE_FLOW_ITEM_TYPE_END,
710 };
711 
712 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
713 	RTE_FLOW_ITEM_TYPE_ETH,
714 	RTE_FLOW_ITEM_TYPE_VLAN,
715 	RTE_FLOW_ITEM_TYPE_IPV4,
716 	RTE_FLOW_ITEM_TYPE_SCTP,
717 	RTE_FLOW_ITEM_TYPE_RAW,
718 	RTE_FLOW_ITEM_TYPE_RAW,
719 	RTE_FLOW_ITEM_TYPE_RAW,
720 	RTE_FLOW_ITEM_TYPE_END,
721 };
722 
723 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
724 	RTE_FLOW_ITEM_TYPE_ETH,
725 	RTE_FLOW_ITEM_TYPE_VLAN,
726 	RTE_FLOW_ITEM_TYPE_IPV6,
727 	RTE_FLOW_ITEM_TYPE_RAW,
728 	RTE_FLOW_ITEM_TYPE_END,
729 };
730 
731 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
732 	RTE_FLOW_ITEM_TYPE_ETH,
733 	RTE_FLOW_ITEM_TYPE_VLAN,
734 	RTE_FLOW_ITEM_TYPE_IPV6,
735 	RTE_FLOW_ITEM_TYPE_RAW,
736 	RTE_FLOW_ITEM_TYPE_RAW,
737 	RTE_FLOW_ITEM_TYPE_END,
738 };
739 
740 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
741 	RTE_FLOW_ITEM_TYPE_ETH,
742 	RTE_FLOW_ITEM_TYPE_VLAN,
743 	RTE_FLOW_ITEM_TYPE_IPV6,
744 	RTE_FLOW_ITEM_TYPE_RAW,
745 	RTE_FLOW_ITEM_TYPE_RAW,
746 	RTE_FLOW_ITEM_TYPE_RAW,
747 	RTE_FLOW_ITEM_TYPE_END,
748 };
749 
750 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
751 	RTE_FLOW_ITEM_TYPE_ETH,
752 	RTE_FLOW_ITEM_TYPE_VLAN,
753 	RTE_FLOW_ITEM_TYPE_IPV6,
754 	RTE_FLOW_ITEM_TYPE_UDP,
755 	RTE_FLOW_ITEM_TYPE_RAW,
756 	RTE_FLOW_ITEM_TYPE_END,
757 };
758 
759 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
760 	RTE_FLOW_ITEM_TYPE_ETH,
761 	RTE_FLOW_ITEM_TYPE_VLAN,
762 	RTE_FLOW_ITEM_TYPE_IPV6,
763 	RTE_FLOW_ITEM_TYPE_UDP,
764 	RTE_FLOW_ITEM_TYPE_RAW,
765 	RTE_FLOW_ITEM_TYPE_RAW,
766 	RTE_FLOW_ITEM_TYPE_END,
767 };
768 
769 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
770 	RTE_FLOW_ITEM_TYPE_ETH,
771 	RTE_FLOW_ITEM_TYPE_VLAN,
772 	RTE_FLOW_ITEM_TYPE_IPV6,
773 	RTE_FLOW_ITEM_TYPE_UDP,
774 	RTE_FLOW_ITEM_TYPE_RAW,
775 	RTE_FLOW_ITEM_TYPE_RAW,
776 	RTE_FLOW_ITEM_TYPE_RAW,
777 	RTE_FLOW_ITEM_TYPE_END,
778 };
779 
780 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
781 	RTE_FLOW_ITEM_TYPE_ETH,
782 	RTE_FLOW_ITEM_TYPE_VLAN,
783 	RTE_FLOW_ITEM_TYPE_IPV6,
784 	RTE_FLOW_ITEM_TYPE_TCP,
785 	RTE_FLOW_ITEM_TYPE_RAW,
786 	RTE_FLOW_ITEM_TYPE_END,
787 };
788 
789 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
790 	RTE_FLOW_ITEM_TYPE_ETH,
791 	RTE_FLOW_ITEM_TYPE_VLAN,
792 	RTE_FLOW_ITEM_TYPE_IPV6,
793 	RTE_FLOW_ITEM_TYPE_TCP,
794 	RTE_FLOW_ITEM_TYPE_RAW,
795 	RTE_FLOW_ITEM_TYPE_RAW,
796 	RTE_FLOW_ITEM_TYPE_END,
797 };
798 
799 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
800 	RTE_FLOW_ITEM_TYPE_ETH,
801 	RTE_FLOW_ITEM_TYPE_VLAN,
802 	RTE_FLOW_ITEM_TYPE_IPV6,
803 	RTE_FLOW_ITEM_TYPE_TCP,
804 	RTE_FLOW_ITEM_TYPE_RAW,
805 	RTE_FLOW_ITEM_TYPE_RAW,
806 	RTE_FLOW_ITEM_TYPE_RAW,
807 	RTE_FLOW_ITEM_TYPE_END,
808 };
809 
810 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
811 	RTE_FLOW_ITEM_TYPE_ETH,
812 	RTE_FLOW_ITEM_TYPE_VLAN,
813 	RTE_FLOW_ITEM_TYPE_IPV6,
814 	RTE_FLOW_ITEM_TYPE_SCTP,
815 	RTE_FLOW_ITEM_TYPE_RAW,
816 	RTE_FLOW_ITEM_TYPE_END,
817 };
818 
819 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
820 	RTE_FLOW_ITEM_TYPE_ETH,
821 	RTE_FLOW_ITEM_TYPE_VLAN,
822 	RTE_FLOW_ITEM_TYPE_IPV6,
823 	RTE_FLOW_ITEM_TYPE_SCTP,
824 	RTE_FLOW_ITEM_TYPE_RAW,
825 	RTE_FLOW_ITEM_TYPE_RAW,
826 	RTE_FLOW_ITEM_TYPE_END,
827 };
828 
829 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
830 	RTE_FLOW_ITEM_TYPE_ETH,
831 	RTE_FLOW_ITEM_TYPE_VLAN,
832 	RTE_FLOW_ITEM_TYPE_IPV6,
833 	RTE_FLOW_ITEM_TYPE_SCTP,
834 	RTE_FLOW_ITEM_TYPE_RAW,
835 	RTE_FLOW_ITEM_TYPE_RAW,
836 	RTE_FLOW_ITEM_TYPE_RAW,
837 	RTE_FLOW_ITEM_TYPE_END,
838 };
839 
840 static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
841 	RTE_FLOW_ITEM_TYPE_ETH,
842 	RTE_FLOW_ITEM_TYPE_IPV4,
843 	RTE_FLOW_ITEM_TYPE_VF,
844 	RTE_FLOW_ITEM_TYPE_END,
845 };
846 
847 static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
848 	RTE_FLOW_ITEM_TYPE_ETH,
849 	RTE_FLOW_ITEM_TYPE_IPV4,
850 	RTE_FLOW_ITEM_TYPE_UDP,
851 	RTE_FLOW_ITEM_TYPE_VF,
852 	RTE_FLOW_ITEM_TYPE_END,
853 };
854 
855 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
856 	RTE_FLOW_ITEM_TYPE_ETH,
857 	RTE_FLOW_ITEM_TYPE_IPV4,
858 	RTE_FLOW_ITEM_TYPE_TCP,
859 	RTE_FLOW_ITEM_TYPE_VF,
860 	RTE_FLOW_ITEM_TYPE_END,
861 };
862 
863 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
864 	RTE_FLOW_ITEM_TYPE_ETH,
865 	RTE_FLOW_ITEM_TYPE_IPV4,
866 	RTE_FLOW_ITEM_TYPE_SCTP,
867 	RTE_FLOW_ITEM_TYPE_VF,
868 	RTE_FLOW_ITEM_TYPE_END,
869 };
870 
871 static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
872 	RTE_FLOW_ITEM_TYPE_ETH,
873 	RTE_FLOW_ITEM_TYPE_IPV6,
874 	RTE_FLOW_ITEM_TYPE_VF,
875 	RTE_FLOW_ITEM_TYPE_END,
876 };
877 
878 static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
879 	RTE_FLOW_ITEM_TYPE_ETH,
880 	RTE_FLOW_ITEM_TYPE_IPV6,
881 	RTE_FLOW_ITEM_TYPE_UDP,
882 	RTE_FLOW_ITEM_TYPE_VF,
883 	RTE_FLOW_ITEM_TYPE_END,
884 };
885 
886 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
887 	RTE_FLOW_ITEM_TYPE_ETH,
888 	RTE_FLOW_ITEM_TYPE_IPV6,
889 	RTE_FLOW_ITEM_TYPE_TCP,
890 	RTE_FLOW_ITEM_TYPE_VF,
891 	RTE_FLOW_ITEM_TYPE_END,
892 };
893 
894 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
895 	RTE_FLOW_ITEM_TYPE_ETH,
896 	RTE_FLOW_ITEM_TYPE_IPV6,
897 	RTE_FLOW_ITEM_TYPE_SCTP,
898 	RTE_FLOW_ITEM_TYPE_VF,
899 	RTE_FLOW_ITEM_TYPE_END,
900 };
901 
902 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
903 	RTE_FLOW_ITEM_TYPE_ETH,
904 	RTE_FLOW_ITEM_TYPE_RAW,
905 	RTE_FLOW_ITEM_TYPE_VF,
906 	RTE_FLOW_ITEM_TYPE_END,
907 };
908 
909 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
910 	RTE_FLOW_ITEM_TYPE_ETH,
911 	RTE_FLOW_ITEM_TYPE_RAW,
912 	RTE_FLOW_ITEM_TYPE_RAW,
913 	RTE_FLOW_ITEM_TYPE_VF,
914 	RTE_FLOW_ITEM_TYPE_END,
915 };
916 
917 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
918 	RTE_FLOW_ITEM_TYPE_ETH,
919 	RTE_FLOW_ITEM_TYPE_RAW,
920 	RTE_FLOW_ITEM_TYPE_RAW,
921 	RTE_FLOW_ITEM_TYPE_RAW,
922 	RTE_FLOW_ITEM_TYPE_VF,
923 	RTE_FLOW_ITEM_TYPE_END,
924 };
925 
926 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
927 	RTE_FLOW_ITEM_TYPE_ETH,
928 	RTE_FLOW_ITEM_TYPE_IPV4,
929 	RTE_FLOW_ITEM_TYPE_RAW,
930 	RTE_FLOW_ITEM_TYPE_VF,
931 	RTE_FLOW_ITEM_TYPE_END,
932 };
933 
934 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
935 	RTE_FLOW_ITEM_TYPE_ETH,
936 	RTE_FLOW_ITEM_TYPE_IPV4,
937 	RTE_FLOW_ITEM_TYPE_RAW,
938 	RTE_FLOW_ITEM_TYPE_RAW,
939 	RTE_FLOW_ITEM_TYPE_VF,
940 	RTE_FLOW_ITEM_TYPE_END,
941 };
942 
943 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
944 	RTE_FLOW_ITEM_TYPE_ETH,
945 	RTE_FLOW_ITEM_TYPE_IPV4,
946 	RTE_FLOW_ITEM_TYPE_RAW,
947 	RTE_FLOW_ITEM_TYPE_RAW,
948 	RTE_FLOW_ITEM_TYPE_RAW,
949 	RTE_FLOW_ITEM_TYPE_VF,
950 	RTE_FLOW_ITEM_TYPE_END,
951 };
952 
953 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
954 	RTE_FLOW_ITEM_TYPE_ETH,
955 	RTE_FLOW_ITEM_TYPE_IPV4,
956 	RTE_FLOW_ITEM_TYPE_UDP,
957 	RTE_FLOW_ITEM_TYPE_RAW,
958 	RTE_FLOW_ITEM_TYPE_VF,
959 	RTE_FLOW_ITEM_TYPE_END,
960 };
961 
962 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
963 	RTE_FLOW_ITEM_TYPE_ETH,
964 	RTE_FLOW_ITEM_TYPE_IPV4,
965 	RTE_FLOW_ITEM_TYPE_UDP,
966 	RTE_FLOW_ITEM_TYPE_RAW,
967 	RTE_FLOW_ITEM_TYPE_RAW,
968 	RTE_FLOW_ITEM_TYPE_VF,
969 	RTE_FLOW_ITEM_TYPE_END,
970 };
971 
972 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
973 	RTE_FLOW_ITEM_TYPE_ETH,
974 	RTE_FLOW_ITEM_TYPE_IPV4,
975 	RTE_FLOW_ITEM_TYPE_UDP,
976 	RTE_FLOW_ITEM_TYPE_RAW,
977 	RTE_FLOW_ITEM_TYPE_RAW,
978 	RTE_FLOW_ITEM_TYPE_RAW,
979 	RTE_FLOW_ITEM_TYPE_VF,
980 	RTE_FLOW_ITEM_TYPE_END,
981 };
982 
983 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
984 	RTE_FLOW_ITEM_TYPE_ETH,
985 	RTE_FLOW_ITEM_TYPE_IPV4,
986 	RTE_FLOW_ITEM_TYPE_TCP,
987 	RTE_FLOW_ITEM_TYPE_RAW,
988 	RTE_FLOW_ITEM_TYPE_VF,
989 	RTE_FLOW_ITEM_TYPE_END,
990 };
991 
992 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
993 	RTE_FLOW_ITEM_TYPE_ETH,
994 	RTE_FLOW_ITEM_TYPE_IPV4,
995 	RTE_FLOW_ITEM_TYPE_TCP,
996 	RTE_FLOW_ITEM_TYPE_RAW,
997 	RTE_FLOW_ITEM_TYPE_RAW,
998 	RTE_FLOW_ITEM_TYPE_VF,
999 	RTE_FLOW_ITEM_TYPE_END,
1000 };
1001 
1002 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
1003 	RTE_FLOW_ITEM_TYPE_ETH,
1004 	RTE_FLOW_ITEM_TYPE_IPV4,
1005 	RTE_FLOW_ITEM_TYPE_TCP,
1006 	RTE_FLOW_ITEM_TYPE_RAW,
1007 	RTE_FLOW_ITEM_TYPE_RAW,
1008 	RTE_FLOW_ITEM_TYPE_RAW,
1009 	RTE_FLOW_ITEM_TYPE_VF,
1010 	RTE_FLOW_ITEM_TYPE_END,
1011 };
1012 
1013 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
1014 	RTE_FLOW_ITEM_TYPE_ETH,
1015 	RTE_FLOW_ITEM_TYPE_IPV4,
1016 	RTE_FLOW_ITEM_TYPE_SCTP,
1017 	RTE_FLOW_ITEM_TYPE_RAW,
1018 	RTE_FLOW_ITEM_TYPE_VF,
1019 	RTE_FLOW_ITEM_TYPE_END,
1020 };
1021 
1022 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
1023 	RTE_FLOW_ITEM_TYPE_ETH,
1024 	RTE_FLOW_ITEM_TYPE_IPV4,
1025 	RTE_FLOW_ITEM_TYPE_SCTP,
1026 	RTE_FLOW_ITEM_TYPE_RAW,
1027 	RTE_FLOW_ITEM_TYPE_RAW,
1028 	RTE_FLOW_ITEM_TYPE_VF,
1029 	RTE_FLOW_ITEM_TYPE_END,
1030 };
1031 
1032 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
1033 	RTE_FLOW_ITEM_TYPE_ETH,
1034 	RTE_FLOW_ITEM_TYPE_IPV4,
1035 	RTE_FLOW_ITEM_TYPE_SCTP,
1036 	RTE_FLOW_ITEM_TYPE_RAW,
1037 	RTE_FLOW_ITEM_TYPE_RAW,
1038 	RTE_FLOW_ITEM_TYPE_RAW,
1039 	RTE_FLOW_ITEM_TYPE_VF,
1040 	RTE_FLOW_ITEM_TYPE_END,
1041 };
1042 
1043 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
1044 	RTE_FLOW_ITEM_TYPE_ETH,
1045 	RTE_FLOW_ITEM_TYPE_IPV6,
1046 	RTE_FLOW_ITEM_TYPE_RAW,
1047 	RTE_FLOW_ITEM_TYPE_VF,
1048 	RTE_FLOW_ITEM_TYPE_END,
1049 };
1050 
1051 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
1052 	RTE_FLOW_ITEM_TYPE_ETH,
1053 	RTE_FLOW_ITEM_TYPE_IPV6,
1054 	RTE_FLOW_ITEM_TYPE_RAW,
1055 	RTE_FLOW_ITEM_TYPE_RAW,
1056 	RTE_FLOW_ITEM_TYPE_VF,
1057 	RTE_FLOW_ITEM_TYPE_END,
1058 };
1059 
1060 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
1061 	RTE_FLOW_ITEM_TYPE_ETH,
1062 	RTE_FLOW_ITEM_TYPE_IPV6,
1063 	RTE_FLOW_ITEM_TYPE_RAW,
1064 	RTE_FLOW_ITEM_TYPE_RAW,
1065 	RTE_FLOW_ITEM_TYPE_RAW,
1066 	RTE_FLOW_ITEM_TYPE_VF,
1067 	RTE_FLOW_ITEM_TYPE_END,
1068 };
1069 
1070 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
1071 	RTE_FLOW_ITEM_TYPE_ETH,
1072 	RTE_FLOW_ITEM_TYPE_IPV6,
1073 	RTE_FLOW_ITEM_TYPE_UDP,
1074 	RTE_FLOW_ITEM_TYPE_RAW,
1075 	RTE_FLOW_ITEM_TYPE_VF,
1076 	RTE_FLOW_ITEM_TYPE_END,
1077 };
1078 
1079 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
1080 	RTE_FLOW_ITEM_TYPE_ETH,
1081 	RTE_FLOW_ITEM_TYPE_IPV6,
1082 	RTE_FLOW_ITEM_TYPE_UDP,
1083 	RTE_FLOW_ITEM_TYPE_RAW,
1084 	RTE_FLOW_ITEM_TYPE_RAW,
1085 	RTE_FLOW_ITEM_TYPE_VF,
1086 	RTE_FLOW_ITEM_TYPE_END,
1087 };
1088 
1089 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
1090 	RTE_FLOW_ITEM_TYPE_ETH,
1091 	RTE_FLOW_ITEM_TYPE_IPV6,
1092 	RTE_FLOW_ITEM_TYPE_UDP,
1093 	RTE_FLOW_ITEM_TYPE_RAW,
1094 	RTE_FLOW_ITEM_TYPE_RAW,
1095 	RTE_FLOW_ITEM_TYPE_RAW,
1096 	RTE_FLOW_ITEM_TYPE_VF,
1097 	RTE_FLOW_ITEM_TYPE_END,
1098 };
1099 
1100 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
1101 	RTE_FLOW_ITEM_TYPE_ETH,
1102 	RTE_FLOW_ITEM_TYPE_IPV6,
1103 	RTE_FLOW_ITEM_TYPE_TCP,
1104 	RTE_FLOW_ITEM_TYPE_RAW,
1105 	RTE_FLOW_ITEM_TYPE_VF,
1106 	RTE_FLOW_ITEM_TYPE_END,
1107 };
1108 
1109 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
1110 	RTE_FLOW_ITEM_TYPE_ETH,
1111 	RTE_FLOW_ITEM_TYPE_IPV6,
1112 	RTE_FLOW_ITEM_TYPE_TCP,
1113 	RTE_FLOW_ITEM_TYPE_RAW,
1114 	RTE_FLOW_ITEM_TYPE_RAW,
1115 	RTE_FLOW_ITEM_TYPE_VF,
1116 	RTE_FLOW_ITEM_TYPE_END,
1117 };
1118 
1119 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
1120 	RTE_FLOW_ITEM_TYPE_ETH,
1121 	RTE_FLOW_ITEM_TYPE_IPV6,
1122 	RTE_FLOW_ITEM_TYPE_TCP,
1123 	RTE_FLOW_ITEM_TYPE_RAW,
1124 	RTE_FLOW_ITEM_TYPE_RAW,
1125 	RTE_FLOW_ITEM_TYPE_RAW,
1126 	RTE_FLOW_ITEM_TYPE_VF,
1127 	RTE_FLOW_ITEM_TYPE_END,
1128 };
1129 
1130 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
1131 	RTE_FLOW_ITEM_TYPE_ETH,
1132 	RTE_FLOW_ITEM_TYPE_IPV6,
1133 	RTE_FLOW_ITEM_TYPE_SCTP,
1134 	RTE_FLOW_ITEM_TYPE_RAW,
1135 	RTE_FLOW_ITEM_TYPE_VF,
1136 	RTE_FLOW_ITEM_TYPE_END,
1137 };
1138 
1139 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
1140 	RTE_FLOW_ITEM_TYPE_ETH,
1141 	RTE_FLOW_ITEM_TYPE_IPV6,
1142 	RTE_FLOW_ITEM_TYPE_SCTP,
1143 	RTE_FLOW_ITEM_TYPE_RAW,
1144 	RTE_FLOW_ITEM_TYPE_RAW,
1145 	RTE_FLOW_ITEM_TYPE_VF,
1146 	RTE_FLOW_ITEM_TYPE_END,
1147 };
1148 
1149 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
1150 	RTE_FLOW_ITEM_TYPE_ETH,
1151 	RTE_FLOW_ITEM_TYPE_IPV6,
1152 	RTE_FLOW_ITEM_TYPE_SCTP,
1153 	RTE_FLOW_ITEM_TYPE_RAW,
1154 	RTE_FLOW_ITEM_TYPE_RAW,
1155 	RTE_FLOW_ITEM_TYPE_RAW,
1156 	RTE_FLOW_ITEM_TYPE_VF,
1157 	RTE_FLOW_ITEM_TYPE_END,
1158 };
1159 
1160 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
1161 	RTE_FLOW_ITEM_TYPE_ETH,
1162 	RTE_FLOW_ITEM_TYPE_VLAN,
1163 	RTE_FLOW_ITEM_TYPE_VF,
1164 	RTE_FLOW_ITEM_TYPE_END,
1165 };
1166 
1167 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
1168 	RTE_FLOW_ITEM_TYPE_ETH,
1169 	RTE_FLOW_ITEM_TYPE_VLAN,
1170 	RTE_FLOW_ITEM_TYPE_IPV4,
1171 	RTE_FLOW_ITEM_TYPE_VF,
1172 	RTE_FLOW_ITEM_TYPE_END,
1173 };
1174 
1175 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
1176 	RTE_FLOW_ITEM_TYPE_ETH,
1177 	RTE_FLOW_ITEM_TYPE_VLAN,
1178 	RTE_FLOW_ITEM_TYPE_IPV4,
1179 	RTE_FLOW_ITEM_TYPE_UDP,
1180 	RTE_FLOW_ITEM_TYPE_VF,
1181 	RTE_FLOW_ITEM_TYPE_END,
1182 };
1183 
1184 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
1185 	RTE_FLOW_ITEM_TYPE_ETH,
1186 	RTE_FLOW_ITEM_TYPE_VLAN,
1187 	RTE_FLOW_ITEM_TYPE_IPV4,
1188 	RTE_FLOW_ITEM_TYPE_TCP,
1189 	RTE_FLOW_ITEM_TYPE_VF,
1190 	RTE_FLOW_ITEM_TYPE_END,
1191 };
1192 
1193 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
1194 	RTE_FLOW_ITEM_TYPE_ETH,
1195 	RTE_FLOW_ITEM_TYPE_VLAN,
1196 	RTE_FLOW_ITEM_TYPE_IPV4,
1197 	RTE_FLOW_ITEM_TYPE_SCTP,
1198 	RTE_FLOW_ITEM_TYPE_VF,
1199 	RTE_FLOW_ITEM_TYPE_END,
1200 };
1201 
1202 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
1203 	RTE_FLOW_ITEM_TYPE_ETH,
1204 	RTE_FLOW_ITEM_TYPE_VLAN,
1205 	RTE_FLOW_ITEM_TYPE_IPV6,
1206 	RTE_FLOW_ITEM_TYPE_VF,
1207 	RTE_FLOW_ITEM_TYPE_END,
1208 };
1209 
1210 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
1211 	RTE_FLOW_ITEM_TYPE_ETH,
1212 	RTE_FLOW_ITEM_TYPE_VLAN,
1213 	RTE_FLOW_ITEM_TYPE_IPV6,
1214 	RTE_FLOW_ITEM_TYPE_UDP,
1215 	RTE_FLOW_ITEM_TYPE_VF,
1216 	RTE_FLOW_ITEM_TYPE_END,
1217 };
1218 
1219 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
1220 	RTE_FLOW_ITEM_TYPE_ETH,
1221 	RTE_FLOW_ITEM_TYPE_VLAN,
1222 	RTE_FLOW_ITEM_TYPE_IPV6,
1223 	RTE_FLOW_ITEM_TYPE_TCP,
1224 	RTE_FLOW_ITEM_TYPE_VF,
1225 	RTE_FLOW_ITEM_TYPE_END,
1226 };
1227 
1228 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
1229 	RTE_FLOW_ITEM_TYPE_ETH,
1230 	RTE_FLOW_ITEM_TYPE_VLAN,
1231 	RTE_FLOW_ITEM_TYPE_IPV6,
1232 	RTE_FLOW_ITEM_TYPE_SCTP,
1233 	RTE_FLOW_ITEM_TYPE_VF,
1234 	RTE_FLOW_ITEM_TYPE_END,
1235 };
1236 
1237 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
1238 	RTE_FLOW_ITEM_TYPE_ETH,
1239 	RTE_FLOW_ITEM_TYPE_VLAN,
1240 	RTE_FLOW_ITEM_TYPE_RAW,
1241 	RTE_FLOW_ITEM_TYPE_VF,
1242 	RTE_FLOW_ITEM_TYPE_END,
1243 };
1244 
1245 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
1246 	RTE_FLOW_ITEM_TYPE_ETH,
1247 	RTE_FLOW_ITEM_TYPE_VLAN,
1248 	RTE_FLOW_ITEM_TYPE_RAW,
1249 	RTE_FLOW_ITEM_TYPE_RAW,
1250 	RTE_FLOW_ITEM_TYPE_VF,
1251 	RTE_FLOW_ITEM_TYPE_END,
1252 };
1253 
1254 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
1255 	RTE_FLOW_ITEM_TYPE_ETH,
1256 	RTE_FLOW_ITEM_TYPE_VLAN,
1257 	RTE_FLOW_ITEM_TYPE_RAW,
1258 	RTE_FLOW_ITEM_TYPE_RAW,
1259 	RTE_FLOW_ITEM_TYPE_RAW,
1260 	RTE_FLOW_ITEM_TYPE_VF,
1261 	RTE_FLOW_ITEM_TYPE_END,
1262 };
1263 
1264 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
1265 	RTE_FLOW_ITEM_TYPE_ETH,
1266 	RTE_FLOW_ITEM_TYPE_VLAN,
1267 	RTE_FLOW_ITEM_TYPE_IPV4,
1268 	RTE_FLOW_ITEM_TYPE_RAW,
1269 	RTE_FLOW_ITEM_TYPE_VF,
1270 	RTE_FLOW_ITEM_TYPE_END,
1271 };
1272 
1273 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
1274 	RTE_FLOW_ITEM_TYPE_ETH,
1275 	RTE_FLOW_ITEM_TYPE_VLAN,
1276 	RTE_FLOW_ITEM_TYPE_IPV4,
1277 	RTE_FLOW_ITEM_TYPE_RAW,
1278 	RTE_FLOW_ITEM_TYPE_RAW,
1279 	RTE_FLOW_ITEM_TYPE_VF,
1280 	RTE_FLOW_ITEM_TYPE_END,
1281 };
1282 
1283 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
1284 	RTE_FLOW_ITEM_TYPE_ETH,
1285 	RTE_FLOW_ITEM_TYPE_VLAN,
1286 	RTE_FLOW_ITEM_TYPE_IPV4,
1287 	RTE_FLOW_ITEM_TYPE_RAW,
1288 	RTE_FLOW_ITEM_TYPE_RAW,
1289 	RTE_FLOW_ITEM_TYPE_RAW,
1290 	RTE_FLOW_ITEM_TYPE_VF,
1291 	RTE_FLOW_ITEM_TYPE_END,
1292 };
1293 
1294 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
1295 	RTE_FLOW_ITEM_TYPE_ETH,
1296 	RTE_FLOW_ITEM_TYPE_VLAN,
1297 	RTE_FLOW_ITEM_TYPE_IPV4,
1298 	RTE_FLOW_ITEM_TYPE_UDP,
1299 	RTE_FLOW_ITEM_TYPE_RAW,
1300 	RTE_FLOW_ITEM_TYPE_VF,
1301 	RTE_FLOW_ITEM_TYPE_END,
1302 };
1303 
1304 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
1305 	RTE_FLOW_ITEM_TYPE_ETH,
1306 	RTE_FLOW_ITEM_TYPE_VLAN,
1307 	RTE_FLOW_ITEM_TYPE_IPV4,
1308 	RTE_FLOW_ITEM_TYPE_UDP,
1309 	RTE_FLOW_ITEM_TYPE_RAW,
1310 	RTE_FLOW_ITEM_TYPE_RAW,
1311 	RTE_FLOW_ITEM_TYPE_VF,
1312 	RTE_FLOW_ITEM_TYPE_END,
1313 };
1314 
1315 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
1316 	RTE_FLOW_ITEM_TYPE_ETH,
1317 	RTE_FLOW_ITEM_TYPE_VLAN,
1318 	RTE_FLOW_ITEM_TYPE_IPV4,
1319 	RTE_FLOW_ITEM_TYPE_UDP,
1320 	RTE_FLOW_ITEM_TYPE_RAW,
1321 	RTE_FLOW_ITEM_TYPE_RAW,
1322 	RTE_FLOW_ITEM_TYPE_RAW,
1323 	RTE_FLOW_ITEM_TYPE_VF,
1324 	RTE_FLOW_ITEM_TYPE_END,
1325 };
1326 
1327 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
1328 	RTE_FLOW_ITEM_TYPE_ETH,
1329 	RTE_FLOW_ITEM_TYPE_VLAN,
1330 	RTE_FLOW_ITEM_TYPE_IPV4,
1331 	RTE_FLOW_ITEM_TYPE_TCP,
1332 	RTE_FLOW_ITEM_TYPE_RAW,
1333 	RTE_FLOW_ITEM_TYPE_VF,
1334 	RTE_FLOW_ITEM_TYPE_END,
1335 };
1336 
1337 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
1338 	RTE_FLOW_ITEM_TYPE_ETH,
1339 	RTE_FLOW_ITEM_TYPE_VLAN,
1340 	RTE_FLOW_ITEM_TYPE_IPV4,
1341 	RTE_FLOW_ITEM_TYPE_TCP,
1342 	RTE_FLOW_ITEM_TYPE_RAW,
1343 	RTE_FLOW_ITEM_TYPE_RAW,
1344 	RTE_FLOW_ITEM_TYPE_VF,
1345 	RTE_FLOW_ITEM_TYPE_END,
1346 };
1347 
1348 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
1349 	RTE_FLOW_ITEM_TYPE_ETH,
1350 	RTE_FLOW_ITEM_TYPE_VLAN,
1351 	RTE_FLOW_ITEM_TYPE_IPV4,
1352 	RTE_FLOW_ITEM_TYPE_TCP,
1353 	RTE_FLOW_ITEM_TYPE_RAW,
1354 	RTE_FLOW_ITEM_TYPE_RAW,
1355 	RTE_FLOW_ITEM_TYPE_RAW,
1356 	RTE_FLOW_ITEM_TYPE_VF,
1357 	RTE_FLOW_ITEM_TYPE_END,
1358 };
1359 
1360 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
1361 	RTE_FLOW_ITEM_TYPE_ETH,
1362 	RTE_FLOW_ITEM_TYPE_VLAN,
1363 	RTE_FLOW_ITEM_TYPE_IPV4,
1364 	RTE_FLOW_ITEM_TYPE_SCTP,
1365 	RTE_FLOW_ITEM_TYPE_RAW,
1366 	RTE_FLOW_ITEM_TYPE_VF,
1367 	RTE_FLOW_ITEM_TYPE_END,
1368 };
1369 
1370 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
1371 	RTE_FLOW_ITEM_TYPE_ETH,
1372 	RTE_FLOW_ITEM_TYPE_VLAN,
1373 	RTE_FLOW_ITEM_TYPE_IPV4,
1374 	RTE_FLOW_ITEM_TYPE_SCTP,
1375 	RTE_FLOW_ITEM_TYPE_RAW,
1376 	RTE_FLOW_ITEM_TYPE_RAW,
1377 	RTE_FLOW_ITEM_TYPE_VF,
1378 	RTE_FLOW_ITEM_TYPE_END,
1379 };
1380 
1381 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
1382 	RTE_FLOW_ITEM_TYPE_ETH,
1383 	RTE_FLOW_ITEM_TYPE_VLAN,
1384 	RTE_FLOW_ITEM_TYPE_IPV4,
1385 	RTE_FLOW_ITEM_TYPE_SCTP,
1386 	RTE_FLOW_ITEM_TYPE_RAW,
1387 	RTE_FLOW_ITEM_TYPE_RAW,
1388 	RTE_FLOW_ITEM_TYPE_RAW,
1389 	RTE_FLOW_ITEM_TYPE_VF,
1390 	RTE_FLOW_ITEM_TYPE_END,
1391 };
1392 
1393 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
1394 	RTE_FLOW_ITEM_TYPE_ETH,
1395 	RTE_FLOW_ITEM_TYPE_VLAN,
1396 	RTE_FLOW_ITEM_TYPE_IPV6,
1397 	RTE_FLOW_ITEM_TYPE_RAW,
1398 	RTE_FLOW_ITEM_TYPE_VF,
1399 	RTE_FLOW_ITEM_TYPE_END,
1400 };
1401 
1402 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
1403 	RTE_FLOW_ITEM_TYPE_ETH,
1404 	RTE_FLOW_ITEM_TYPE_VLAN,
1405 	RTE_FLOW_ITEM_TYPE_IPV6,
1406 	RTE_FLOW_ITEM_TYPE_RAW,
1407 	RTE_FLOW_ITEM_TYPE_RAW,
1408 	RTE_FLOW_ITEM_TYPE_VF,
1409 	RTE_FLOW_ITEM_TYPE_END,
1410 };
1411 
1412 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
1413 	RTE_FLOW_ITEM_TYPE_ETH,
1414 	RTE_FLOW_ITEM_TYPE_VLAN,
1415 	RTE_FLOW_ITEM_TYPE_IPV6,
1416 	RTE_FLOW_ITEM_TYPE_RAW,
1417 	RTE_FLOW_ITEM_TYPE_RAW,
1418 	RTE_FLOW_ITEM_TYPE_RAW,
1419 	RTE_FLOW_ITEM_TYPE_VF,
1420 	RTE_FLOW_ITEM_TYPE_END,
1421 };
1422 
1423 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
1424 	RTE_FLOW_ITEM_TYPE_ETH,
1425 	RTE_FLOW_ITEM_TYPE_VLAN,
1426 	RTE_FLOW_ITEM_TYPE_IPV6,
1427 	RTE_FLOW_ITEM_TYPE_UDP,
1428 	RTE_FLOW_ITEM_TYPE_RAW,
1429 	RTE_FLOW_ITEM_TYPE_VF,
1430 	RTE_FLOW_ITEM_TYPE_END,
1431 };
1432 
1433 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
1434 	RTE_FLOW_ITEM_TYPE_ETH,
1435 	RTE_FLOW_ITEM_TYPE_VLAN,
1436 	RTE_FLOW_ITEM_TYPE_IPV6,
1437 	RTE_FLOW_ITEM_TYPE_UDP,
1438 	RTE_FLOW_ITEM_TYPE_RAW,
1439 	RTE_FLOW_ITEM_TYPE_RAW,
1440 	RTE_FLOW_ITEM_TYPE_VF,
1441 	RTE_FLOW_ITEM_TYPE_END,
1442 };
1443 
1444 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
1445 	RTE_FLOW_ITEM_TYPE_ETH,
1446 	RTE_FLOW_ITEM_TYPE_VLAN,
1447 	RTE_FLOW_ITEM_TYPE_IPV6,
1448 	RTE_FLOW_ITEM_TYPE_UDP,
1449 	RTE_FLOW_ITEM_TYPE_RAW,
1450 	RTE_FLOW_ITEM_TYPE_RAW,
1451 	RTE_FLOW_ITEM_TYPE_RAW,
1452 	RTE_FLOW_ITEM_TYPE_VF,
1453 	RTE_FLOW_ITEM_TYPE_END,
1454 };
1455 
1456 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
1457 	RTE_FLOW_ITEM_TYPE_ETH,
1458 	RTE_FLOW_ITEM_TYPE_VLAN,
1459 	RTE_FLOW_ITEM_TYPE_IPV6,
1460 	RTE_FLOW_ITEM_TYPE_TCP,
1461 	RTE_FLOW_ITEM_TYPE_RAW,
1462 	RTE_FLOW_ITEM_TYPE_VF,
1463 	RTE_FLOW_ITEM_TYPE_END,
1464 };
1465 
1466 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
1467 	RTE_FLOW_ITEM_TYPE_ETH,
1468 	RTE_FLOW_ITEM_TYPE_VLAN,
1469 	RTE_FLOW_ITEM_TYPE_IPV6,
1470 	RTE_FLOW_ITEM_TYPE_TCP,
1471 	RTE_FLOW_ITEM_TYPE_RAW,
1472 	RTE_FLOW_ITEM_TYPE_RAW,
1473 	RTE_FLOW_ITEM_TYPE_VF,
1474 	RTE_FLOW_ITEM_TYPE_END,
1475 };
1476 
1477 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
1478 	RTE_FLOW_ITEM_TYPE_ETH,
1479 	RTE_FLOW_ITEM_TYPE_VLAN,
1480 	RTE_FLOW_ITEM_TYPE_IPV6,
1481 	RTE_FLOW_ITEM_TYPE_TCP,
1482 	RTE_FLOW_ITEM_TYPE_RAW,
1483 	RTE_FLOW_ITEM_TYPE_RAW,
1484 	RTE_FLOW_ITEM_TYPE_RAW,
1485 	RTE_FLOW_ITEM_TYPE_VF,
1486 	RTE_FLOW_ITEM_TYPE_END,
1487 };
1488 
1489 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
1490 	RTE_FLOW_ITEM_TYPE_ETH,
1491 	RTE_FLOW_ITEM_TYPE_VLAN,
1492 	RTE_FLOW_ITEM_TYPE_IPV6,
1493 	RTE_FLOW_ITEM_TYPE_SCTP,
1494 	RTE_FLOW_ITEM_TYPE_RAW,
1495 	RTE_FLOW_ITEM_TYPE_VF,
1496 	RTE_FLOW_ITEM_TYPE_END,
1497 };
1498 
1499 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
1500 	RTE_FLOW_ITEM_TYPE_ETH,
1501 	RTE_FLOW_ITEM_TYPE_VLAN,
1502 	RTE_FLOW_ITEM_TYPE_IPV6,
1503 	RTE_FLOW_ITEM_TYPE_SCTP,
1504 	RTE_FLOW_ITEM_TYPE_RAW,
1505 	RTE_FLOW_ITEM_TYPE_RAW,
1506 	RTE_FLOW_ITEM_TYPE_VF,
1507 	RTE_FLOW_ITEM_TYPE_END,
1508 };
1509 
1510 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
1511 	RTE_FLOW_ITEM_TYPE_ETH,
1512 	RTE_FLOW_ITEM_TYPE_VLAN,
1513 	RTE_FLOW_ITEM_TYPE_IPV6,
1514 	RTE_FLOW_ITEM_TYPE_SCTP,
1515 	RTE_FLOW_ITEM_TYPE_RAW,
1516 	RTE_FLOW_ITEM_TYPE_RAW,
1517 	RTE_FLOW_ITEM_TYPE_RAW,
1518 	RTE_FLOW_ITEM_TYPE_VF,
1519 	RTE_FLOW_ITEM_TYPE_END,
1520 };
1521 
1522 /* Pattern matched tunnel filter */
1523 static enum rte_flow_item_type pattern_vxlan_1[] = {
1524 	RTE_FLOW_ITEM_TYPE_ETH,
1525 	RTE_FLOW_ITEM_TYPE_IPV4,
1526 	RTE_FLOW_ITEM_TYPE_UDP,
1527 	RTE_FLOW_ITEM_TYPE_VXLAN,
1528 	RTE_FLOW_ITEM_TYPE_ETH,
1529 	RTE_FLOW_ITEM_TYPE_END,
1530 };
1531 
1532 static enum rte_flow_item_type pattern_vxlan_2[] = {
1533 	RTE_FLOW_ITEM_TYPE_ETH,
1534 	RTE_FLOW_ITEM_TYPE_IPV6,
1535 	RTE_FLOW_ITEM_TYPE_UDP,
1536 	RTE_FLOW_ITEM_TYPE_VXLAN,
1537 	RTE_FLOW_ITEM_TYPE_ETH,
1538 	RTE_FLOW_ITEM_TYPE_END,
1539 };
1540 
1541 static enum rte_flow_item_type pattern_vxlan_3[] = {
1542 	RTE_FLOW_ITEM_TYPE_ETH,
1543 	RTE_FLOW_ITEM_TYPE_IPV4,
1544 	RTE_FLOW_ITEM_TYPE_UDP,
1545 	RTE_FLOW_ITEM_TYPE_VXLAN,
1546 	RTE_FLOW_ITEM_TYPE_ETH,
1547 	RTE_FLOW_ITEM_TYPE_VLAN,
1548 	RTE_FLOW_ITEM_TYPE_END,
1549 };
1550 
1551 static enum rte_flow_item_type pattern_vxlan_4[] = {
1552 	RTE_FLOW_ITEM_TYPE_ETH,
1553 	RTE_FLOW_ITEM_TYPE_IPV6,
1554 	RTE_FLOW_ITEM_TYPE_UDP,
1555 	RTE_FLOW_ITEM_TYPE_VXLAN,
1556 	RTE_FLOW_ITEM_TYPE_ETH,
1557 	RTE_FLOW_ITEM_TYPE_VLAN,
1558 	RTE_FLOW_ITEM_TYPE_END,
1559 };
1560 
1561 static enum rte_flow_item_type pattern_nvgre_1[] = {
1562 	RTE_FLOW_ITEM_TYPE_ETH,
1563 	RTE_FLOW_ITEM_TYPE_IPV4,
1564 	RTE_FLOW_ITEM_TYPE_NVGRE,
1565 	RTE_FLOW_ITEM_TYPE_ETH,
1566 	RTE_FLOW_ITEM_TYPE_END,
1567 };
1568 
1569 static enum rte_flow_item_type pattern_nvgre_2[] = {
1570 	RTE_FLOW_ITEM_TYPE_ETH,
1571 	RTE_FLOW_ITEM_TYPE_IPV6,
1572 	RTE_FLOW_ITEM_TYPE_NVGRE,
1573 	RTE_FLOW_ITEM_TYPE_ETH,
1574 	RTE_FLOW_ITEM_TYPE_END,
1575 };
1576 
1577 static enum rte_flow_item_type pattern_nvgre_3[] = {
1578 	RTE_FLOW_ITEM_TYPE_ETH,
1579 	RTE_FLOW_ITEM_TYPE_IPV4,
1580 	RTE_FLOW_ITEM_TYPE_NVGRE,
1581 	RTE_FLOW_ITEM_TYPE_ETH,
1582 	RTE_FLOW_ITEM_TYPE_VLAN,
1583 	RTE_FLOW_ITEM_TYPE_END,
1584 };
1585 
1586 static enum rte_flow_item_type pattern_nvgre_4[] = {
1587 	RTE_FLOW_ITEM_TYPE_ETH,
1588 	RTE_FLOW_ITEM_TYPE_IPV6,
1589 	RTE_FLOW_ITEM_TYPE_NVGRE,
1590 	RTE_FLOW_ITEM_TYPE_ETH,
1591 	RTE_FLOW_ITEM_TYPE_VLAN,
1592 	RTE_FLOW_ITEM_TYPE_END,
1593 };
1594 
1595 static enum rte_flow_item_type pattern_mpls_1[] = {
1596 	RTE_FLOW_ITEM_TYPE_ETH,
1597 	RTE_FLOW_ITEM_TYPE_IPV4,
1598 	RTE_FLOW_ITEM_TYPE_UDP,
1599 	RTE_FLOW_ITEM_TYPE_MPLS,
1600 	RTE_FLOW_ITEM_TYPE_END,
1601 };
1602 
1603 static enum rte_flow_item_type pattern_mpls_2[] = {
1604 	RTE_FLOW_ITEM_TYPE_ETH,
1605 	RTE_FLOW_ITEM_TYPE_IPV6,
1606 	RTE_FLOW_ITEM_TYPE_UDP,
1607 	RTE_FLOW_ITEM_TYPE_MPLS,
1608 	RTE_FLOW_ITEM_TYPE_END,
1609 };
1610 
1611 static enum rte_flow_item_type pattern_mpls_3[] = {
1612 	RTE_FLOW_ITEM_TYPE_ETH,
1613 	RTE_FLOW_ITEM_TYPE_IPV4,
1614 	RTE_FLOW_ITEM_TYPE_GRE,
1615 	RTE_FLOW_ITEM_TYPE_MPLS,
1616 	RTE_FLOW_ITEM_TYPE_END,
1617 };
1618 
1619 static enum rte_flow_item_type pattern_mpls_4[] = {
1620 	RTE_FLOW_ITEM_TYPE_ETH,
1621 	RTE_FLOW_ITEM_TYPE_IPV6,
1622 	RTE_FLOW_ITEM_TYPE_GRE,
1623 	RTE_FLOW_ITEM_TYPE_MPLS,
1624 	RTE_FLOW_ITEM_TYPE_END,
1625 };
1626 
1627 static enum rte_flow_item_type pattern_qinq_1[] = {
1628 	RTE_FLOW_ITEM_TYPE_ETH,
1629 	RTE_FLOW_ITEM_TYPE_VLAN,
1630 	RTE_FLOW_ITEM_TYPE_VLAN,
1631 	RTE_FLOW_ITEM_TYPE_END,
1632 };
1633 
1634 static enum rte_flow_item_type pattern_fdir_ipv4_l2tpv3oip[] = {
1635 	RTE_FLOW_ITEM_TYPE_ETH,
1636 	RTE_FLOW_ITEM_TYPE_IPV4,
1637 	RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1638 	RTE_FLOW_ITEM_TYPE_END,
1639 };
1640 
1641 static enum rte_flow_item_type pattern_fdir_ipv6_l2tpv3oip[] = {
1642 	RTE_FLOW_ITEM_TYPE_ETH,
1643 	RTE_FLOW_ITEM_TYPE_IPV6,
1644 	RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1645 	RTE_FLOW_ITEM_TYPE_END,
1646 };
1647 
1648 static enum rte_flow_item_type pattern_fdir_ipv4_esp[] = {
1649 	RTE_FLOW_ITEM_TYPE_ETH,
1650 	RTE_FLOW_ITEM_TYPE_IPV4,
1651 	RTE_FLOW_ITEM_TYPE_ESP,
1652 	RTE_FLOW_ITEM_TYPE_END,
1653 };
1654 
1655 static enum rte_flow_item_type pattern_fdir_ipv6_esp[] = {
1656 	RTE_FLOW_ITEM_TYPE_ETH,
1657 	RTE_FLOW_ITEM_TYPE_IPV6,
1658 	RTE_FLOW_ITEM_TYPE_ESP,
1659 	RTE_FLOW_ITEM_TYPE_END,
1660 };
1661 
1662 static enum rte_flow_item_type pattern_fdir_ipv4_udp_esp[] = {
1663 	RTE_FLOW_ITEM_TYPE_ETH,
1664 	RTE_FLOW_ITEM_TYPE_IPV4,
1665 	RTE_FLOW_ITEM_TYPE_UDP,
1666 	RTE_FLOW_ITEM_TYPE_ESP,
1667 	RTE_FLOW_ITEM_TYPE_END,
1668 };
1669 
1670 static enum rte_flow_item_type pattern_fdir_ipv6_udp_esp[] = {
1671 	RTE_FLOW_ITEM_TYPE_ETH,
1672 	RTE_FLOW_ITEM_TYPE_IPV6,
1673 	RTE_FLOW_ITEM_TYPE_UDP,
1674 	RTE_FLOW_ITEM_TYPE_ESP,
1675 	RTE_FLOW_ITEM_TYPE_END,
1676 };
1677 
1678 static struct i40e_valid_pattern i40e_supported_patterns[] = {
1679 	/* Ethertype */
1680 	{ pattern_ethertype, i40e_flow_parse_ethertype_filter },
1681 	/* FDIR - support default flow type without flexible payload*/
1682 	{ pattern_ethertype, i40e_flow_parse_fdir_filter },
1683 	{ pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
1684 	{ pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
1685 	{ pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
1686 	{ pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
1687 	{ pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
1688 	{ pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
1689 	{ pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1690 	{ pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1691 	{ pattern_fdir_ipv4_esp, i40e_flow_parse_fdir_filter },
1692 	{ pattern_fdir_ipv4_udp_esp, i40e_flow_parse_fdir_filter },
1693 	{ pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
1694 	{ pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
1695 	{ pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
1696 	{ pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
1697 	{ pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
1698 	{ pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
1699 	{ pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1700 	{ pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1701 	{ pattern_fdir_ipv6_esp, i40e_flow_parse_fdir_filter },
1702 	{ pattern_fdir_ipv6_udp_esp, i40e_flow_parse_fdir_filter },
1703 	/* FDIR - support default flow type with flexible payload */
1704 	{ pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
1705 	{ pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
1706 	{ pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
1707 	{ pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1708 	{ pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1709 	{ pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1710 	{ pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1711 	{ pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1712 	{ pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1713 	{ pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1714 	{ pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1715 	{ pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1716 	{ pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1717 	{ pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1718 	{ pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1719 	{ pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1720 	{ pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1721 	{ pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1722 	{ pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1723 	{ pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1724 	{ pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1725 	{ pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1726 	{ pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1727 	{ pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1728 	{ pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1729 	{ pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1730 	{ pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1731 	/* FDIR - support single vlan input set */
1732 	{ pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
1733 	{ pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
1734 	{ pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
1735 	{ pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
1736 	{ pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
1737 	{ pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
1738 	{ pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
1739 	{ pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
1740 	{ pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
1741 	{ pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
1742 	{ pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
1743 	{ pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
1744 	{ pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1745 	{ pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1746 	{ pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1747 	{ pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1748 	{ pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1749 	{ pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1750 	{ pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1751 	{ pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1752 	{ pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1753 	{ pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1754 	{ pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1755 	{ pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1756 	{ pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1757 	{ pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1758 	{ pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1759 	{ pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1760 	{ pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1761 	{ pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1762 	{ pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1763 	{ pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1764 	{ pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1765 	{ pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1766 	{ pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1767 	{ pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1768 	/* FDIR - support VF item */
1769 	{ pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
1770 	{ pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1771 	{ pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1772 	{ pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1773 	{ pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
1774 	{ pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1775 	{ pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1776 	{ pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1777 	{ pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
1778 	{ pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
1779 	{ pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
1780 	{ pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1781 	{ pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1782 	{ pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1783 	{ pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1784 	{ pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1785 	{ pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1786 	{ pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1787 	{ pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1788 	{ pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1789 	{ pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1790 	{ pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1791 	{ pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1792 	{ pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1793 	{ pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1794 	{ pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1795 	{ pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1796 	{ pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1797 	{ pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1798 	{ pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1799 	{ pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1800 	{ pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1801 	{ pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1802 	{ pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1803 	{ pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1804 	{ pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
1805 	{ pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
1806 	{ pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1807 	{ pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1808 	{ pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1809 	{ pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
1810 	{ pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1811 	{ pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1812 	{ pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1813 	{ pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
1814 	{ pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
1815 	{ pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
1816 	{ pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1817 	{ pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1818 	{ pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1819 	{ pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1820 	{ pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1821 	{ pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1822 	{ pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1823 	{ pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1824 	{ pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1825 	{ pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1826 	{ pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1827 	{ pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1828 	{ pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1829 	{ pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1830 	{ pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1831 	{ pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1832 	{ pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1833 	{ pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1834 	{ pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1835 	{ pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1836 	{ pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1837 	{ pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1838 	{ pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1839 	{ pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1840 	/* VXLAN */
1841 	{ pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
1842 	{ pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
1843 	{ pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
1844 	{ pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
1845 	/* NVGRE */
1846 	{ pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
1847 	{ pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
1848 	{ pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
1849 	{ pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
1850 	/* MPLSoUDP & MPLSoGRE */
1851 	{ pattern_mpls_1, i40e_flow_parse_mpls_filter },
1852 	{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
1853 	{ pattern_mpls_3, i40e_flow_parse_mpls_filter },
1854 	{ pattern_mpls_4, i40e_flow_parse_mpls_filter },
1855 	/* GTP-C & GTP-U */
1856 	{ pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
1857 	{ pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
1858 	{ pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
1859 	{ pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
1860 	/* QINQ */
1861 	{ pattern_qinq_1, i40e_flow_parse_qinq_filter },
1862 	/* L2TPv3 over IP */
1863 	{ pattern_fdir_ipv4_l2tpv3oip, i40e_flow_parse_fdir_filter },
1864 	{ pattern_fdir_ipv6_l2tpv3oip, i40e_flow_parse_fdir_filter },
1865 	/* L4 over port */
1866 	{ pattern_fdir_ipv4_udp, i40e_flow_parse_l4_cloud_filter },
1867 	{ pattern_fdir_ipv4_tcp, i40e_flow_parse_l4_cloud_filter },
1868 	{ pattern_fdir_ipv4_sctp, i40e_flow_parse_l4_cloud_filter },
1869 	{ pattern_fdir_ipv6_udp, i40e_flow_parse_l4_cloud_filter },
1870 	{ pattern_fdir_ipv6_tcp, i40e_flow_parse_l4_cloud_filter },
1871 	{ pattern_fdir_ipv6_sctp, i40e_flow_parse_l4_cloud_filter },
1872 };
1873 
1874 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
1875 	do {                                                            \
1876 		act = actions + index;                                  \
1877 		while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
1878 			index++;                                        \
1879 			act = actions + index;                          \
1880 		}                                                       \
1881 	} while (0)
1882 
1883 /* Find the first VOID or non-VOID item pointer */
1884 static const struct rte_flow_item *
i40e_find_first_item(const struct rte_flow_item * item,bool is_void)1885 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
1886 {
1887 	bool is_find;
1888 
1889 	while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1890 		if (is_void)
1891 			is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
1892 		else
1893 			is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
1894 		if (is_find)
1895 			break;
1896 		item++;
1897 	}
1898 	return item;
1899 }
1900 
1901 /* Skip all VOID items of the pattern */
1902 static void
i40e_pattern_skip_void_item(struct rte_flow_item * items,const struct rte_flow_item * pattern)1903 i40e_pattern_skip_void_item(struct rte_flow_item *items,
1904 			    const struct rte_flow_item *pattern)
1905 {
1906 	uint32_t cpy_count = 0;
1907 	const struct rte_flow_item *pb = pattern, *pe = pattern;
1908 
1909 	for (;;) {
1910 		/* Find a non-void item first */
1911 		pb = i40e_find_first_item(pb, false);
1912 		if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
1913 			pe = pb;
1914 			break;
1915 		}
1916 
1917 		/* Find a void item */
1918 		pe = i40e_find_first_item(pb + 1, true);
1919 
1920 		cpy_count = pe - pb;
1921 		rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
1922 
1923 		items += cpy_count;
1924 
1925 		if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
1926 			pb = pe;
1927 			break;
1928 		}
1929 
1930 		pb = pe + 1;
1931 	}
1932 	/* Copy the END item. */
1933 	rte_memcpy(items, pe, sizeof(struct rte_flow_item));
1934 }
1935 
1936 /* Check if the pattern matches a supported item type array */
1937 static bool
i40e_match_pattern(enum rte_flow_item_type * item_array,struct rte_flow_item * pattern)1938 i40e_match_pattern(enum rte_flow_item_type *item_array,
1939 		   struct rte_flow_item *pattern)
1940 {
1941 	struct rte_flow_item *item = pattern;
1942 
1943 	while ((*item_array == item->type) &&
1944 	       (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
1945 		item_array++;
1946 		item++;
1947 	}
1948 
1949 	return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
1950 		item->type == RTE_FLOW_ITEM_TYPE_END);
1951 }
1952 
1953 /* Find if there's parse filter function matched */
1954 static parse_filter_t
i40e_find_parse_filter_func(struct rte_flow_item * pattern,uint32_t * idx)1955 i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx)
1956 {
1957 	parse_filter_t parse_filter = NULL;
1958 	uint8_t i = *idx;
1959 
1960 	for (; i < RTE_DIM(i40e_supported_patterns); i++) {
1961 		if (i40e_match_pattern(i40e_supported_patterns[i].items,
1962 					pattern)) {
1963 			parse_filter = i40e_supported_patterns[i].parse_filter;
1964 			break;
1965 		}
1966 	}
1967 
1968 	*idx = ++i;
1969 
1970 	return parse_filter;
1971 }
1972 
1973 /* Parse attributes */
1974 static int
i40e_flow_parse_attr(const struct rte_flow_attr * attr,struct rte_flow_error * error)1975 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
1976 		     struct rte_flow_error *error)
1977 {
1978 	/* Must be input direction */
1979 	if (!attr->ingress) {
1980 		rte_flow_error_set(error, EINVAL,
1981 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1982 				   attr, "Only support ingress.");
1983 		return -rte_errno;
1984 	}
1985 
1986 	/* Not supported */
1987 	if (attr->egress) {
1988 		rte_flow_error_set(error, EINVAL,
1989 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1990 				   attr, "Not support egress.");
1991 		return -rte_errno;
1992 	}
1993 
1994 	/* Not supported */
1995 	if (attr->priority) {
1996 		rte_flow_error_set(error, EINVAL,
1997 				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1998 				   attr, "Not support priority.");
1999 		return -rte_errno;
2000 	}
2001 
2002 	/* Not supported */
2003 	if (attr->group) {
2004 		rte_flow_error_set(error, EINVAL,
2005 				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2006 				   attr, "Not support group.");
2007 		return -rte_errno;
2008 	}
2009 
2010 	return 0;
2011 }
2012 
2013 static uint16_t
i40e_get_outer_vlan(struct rte_eth_dev * dev)2014 i40e_get_outer_vlan(struct rte_eth_dev *dev)
2015 {
2016 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2017 	int qinq = dev->data->dev_conf.rxmode.offloads &
2018 		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
2019 	uint64_t reg_r = 0;
2020 	uint16_t reg_id;
2021 	uint16_t tpid;
2022 
2023 	if (qinq)
2024 		reg_id = 2;
2025 	else
2026 		reg_id = 3;
2027 
2028 	i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
2029 				    &reg_r, NULL);
2030 
2031 	tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
2032 
2033 	return tpid;
2034 }
2035 
2036 /* 1. Last in item should be NULL as range is not supported.
2037  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
2038  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
2039  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
2040  *    FF:FF:FF:FF:FF:FF
2041  * 5. Ether_type mask should be 0xFFFF.
2042  */
2043 static int
i40e_flow_parse_ethertype_pattern(struct rte_eth_dev * dev,const struct rte_flow_item * pattern,struct rte_flow_error * error,struct rte_eth_ethertype_filter * filter)2044 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
2045 				  const struct rte_flow_item *pattern,
2046 				  struct rte_flow_error *error,
2047 				  struct rte_eth_ethertype_filter *filter)
2048 {
2049 	const struct rte_flow_item *item = pattern;
2050 	const struct rte_flow_item_eth *eth_spec;
2051 	const struct rte_flow_item_eth *eth_mask;
2052 	enum rte_flow_item_type item_type;
2053 
2054 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2055 		if (item->last) {
2056 			rte_flow_error_set(error, EINVAL,
2057 					   RTE_FLOW_ERROR_TYPE_ITEM,
2058 					   item,
2059 					   "Not support range");
2060 			return -rte_errno;
2061 		}
2062 		item_type = item->type;
2063 		switch (item_type) {
2064 		case RTE_FLOW_ITEM_TYPE_ETH:
2065 			eth_spec = item->spec;
2066 			eth_mask = item->mask;
2067 			/* Get the MAC info. */
2068 			if (!eth_spec || !eth_mask) {
2069 				rte_flow_error_set(error, EINVAL,
2070 						   RTE_FLOW_ERROR_TYPE_ITEM,
2071 						   item,
2072 						   "NULL ETH spec/mask");
2073 				return -rte_errno;
2074 			}
2075 
2076 			/* Mask bits of source MAC address must be full of 0.
2077 			 * Mask bits of destination MAC address must be full
2078 			 * of 1 or full of 0.
2079 			 */
2080 			if (!rte_is_zero_ether_addr(&eth_mask->src) ||
2081 			    (!rte_is_zero_ether_addr(&eth_mask->dst) &&
2082 			     !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
2083 				rte_flow_error_set(error, EINVAL,
2084 						   RTE_FLOW_ERROR_TYPE_ITEM,
2085 						   item,
2086 						   "Invalid MAC_addr mask");
2087 				return -rte_errno;
2088 			}
2089 
2090 			if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
2091 				rte_flow_error_set(error, EINVAL,
2092 						   RTE_FLOW_ERROR_TYPE_ITEM,
2093 						   item,
2094 						   "Invalid ethertype mask");
2095 				return -rte_errno;
2096 			}
2097 
2098 			/* If mask bits of destination MAC address
2099 			 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
2100 			 */
2101 			if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
2102 				filter->mac_addr = eth_spec->dst;
2103 				filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
2104 			} else {
2105 				filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
2106 			}
2107 			filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
2108 
2109 			if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2110 			    filter->ether_type == RTE_ETHER_TYPE_IPV6 ||
2111 			    filter->ether_type == RTE_ETHER_TYPE_LLDP ||
2112 			    filter->ether_type == i40e_get_outer_vlan(dev)) {
2113 				rte_flow_error_set(error, EINVAL,
2114 						   RTE_FLOW_ERROR_TYPE_ITEM,
2115 						   item,
2116 						   "Unsupported ether_type in"
2117 						   " control packet filter.");
2118 				return -rte_errno;
2119 			}
2120 			break;
2121 		default:
2122 			break;
2123 		}
2124 	}
2125 
2126 	return 0;
2127 }
2128 
2129 /* Ethertype action only supports QUEUE or DROP. */
2130 static int
i40e_flow_parse_ethertype_action(struct rte_eth_dev * dev,const struct rte_flow_action * actions,struct rte_flow_error * error,struct rte_eth_ethertype_filter * filter)2131 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
2132 				 const struct rte_flow_action *actions,
2133 				 struct rte_flow_error *error,
2134 				 struct rte_eth_ethertype_filter *filter)
2135 {
2136 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2137 	const struct rte_flow_action *act;
2138 	const struct rte_flow_action_queue *act_q;
2139 	uint32_t index = 0;
2140 
2141 	/* Check if the first non-void action is QUEUE or DROP. */
2142 	NEXT_ITEM_OF_ACTION(act, actions, index);
2143 	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
2144 	    act->type != RTE_FLOW_ACTION_TYPE_DROP) {
2145 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2146 				   act, "Not supported action.");
2147 		return -rte_errno;
2148 	}
2149 
2150 	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2151 		act_q = act->conf;
2152 		filter->queue = act_q->index;
2153 		if (filter->queue >= pf->dev_data->nb_rx_queues) {
2154 			rte_flow_error_set(error, EINVAL,
2155 					   RTE_FLOW_ERROR_TYPE_ACTION,
2156 					   act, "Invalid queue ID for"
2157 					   " ethertype_filter.");
2158 			return -rte_errno;
2159 		}
2160 	} else {
2161 		filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
2162 	}
2163 
2164 	/* Check if the next non-void item is END */
2165 	index++;
2166 	NEXT_ITEM_OF_ACTION(act, actions, index);
2167 	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2168 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2169 				   act, "Not supported action.");
2170 		return -rte_errno;
2171 	}
2172 
2173 	return 0;
2174 }
2175 
2176 static int
i40e_flow_parse_ethertype_filter(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error,union i40e_filter_t * filter)2177 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
2178 				 const struct rte_flow_attr *attr,
2179 				 const struct rte_flow_item pattern[],
2180 				 const struct rte_flow_action actions[],
2181 				 struct rte_flow_error *error,
2182 				 union i40e_filter_t *filter)
2183 {
2184 	struct rte_eth_ethertype_filter *ethertype_filter =
2185 		&filter->ethertype_filter;
2186 	int ret;
2187 
2188 	ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
2189 						ethertype_filter);
2190 	if (ret)
2191 		return ret;
2192 
2193 	ret = i40e_flow_parse_ethertype_action(dev, actions, error,
2194 					       ethertype_filter);
2195 	if (ret)
2196 		return ret;
2197 
2198 	ret = i40e_flow_parse_attr(attr, error);
2199 	if (ret)
2200 		return ret;
2201 
2202 	cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
2203 
2204 	return ret;
2205 }
2206 
2207 static int
i40e_flow_check_raw_item(const struct rte_flow_item * item,const struct rte_flow_item_raw * raw_spec,struct rte_flow_error * error)2208 i40e_flow_check_raw_item(const struct rte_flow_item *item,
2209 			 const struct rte_flow_item_raw *raw_spec,
2210 			 struct rte_flow_error *error)
2211 {
2212 	if (!raw_spec->relative) {
2213 		rte_flow_error_set(error, EINVAL,
2214 				   RTE_FLOW_ERROR_TYPE_ITEM,
2215 				   item,
2216 				   "Relative should be 1.");
2217 		return -rte_errno;
2218 	}
2219 
2220 	if (raw_spec->offset % sizeof(uint16_t)) {
2221 		rte_flow_error_set(error, EINVAL,
2222 				   RTE_FLOW_ERROR_TYPE_ITEM,
2223 				   item,
2224 				   "Offset should be even.");
2225 		return -rte_errno;
2226 	}
2227 
2228 	if (raw_spec->search || raw_spec->limit) {
2229 		rte_flow_error_set(error, EINVAL,
2230 				   RTE_FLOW_ERROR_TYPE_ITEM,
2231 				   item,
2232 				   "search or limit is not supported.");
2233 		return -rte_errno;
2234 	}
2235 
2236 	if (raw_spec->offset < 0) {
2237 		rte_flow_error_set(error, EINVAL,
2238 				   RTE_FLOW_ERROR_TYPE_ITEM,
2239 				   item,
2240 				   "Offset should be non-negative.");
2241 		return -rte_errno;
2242 	}
2243 	return 0;
2244 }
2245 
2246 
2247 static uint8_t
i40e_flow_fdir_get_pctype_value(struct i40e_pf * pf,enum rte_flow_item_type item_type,struct i40e_fdir_filter_conf * filter)2248 i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
2249 				enum rte_flow_item_type item_type,
2250 				struct i40e_fdir_filter_conf *filter)
2251 {
2252 	struct i40e_customized_pctype *cus_pctype = NULL;
2253 
2254 	switch (item_type) {
2255 	case RTE_FLOW_ITEM_TYPE_GTPC:
2256 		cus_pctype = i40e_find_customized_pctype(pf,
2257 							 I40E_CUSTOMIZED_GTPC);
2258 		break;
2259 	case RTE_FLOW_ITEM_TYPE_GTPU:
2260 		if (!filter->input.flow_ext.inner_ip)
2261 			cus_pctype = i40e_find_customized_pctype(pf,
2262 							 I40E_CUSTOMIZED_GTPU);
2263 		else if (filter->input.flow_ext.iip_type ==
2264 			 I40E_FDIR_IPTYPE_IPV4)
2265 			cus_pctype = i40e_find_customized_pctype(pf,
2266 						 I40E_CUSTOMIZED_GTPU_IPV4);
2267 		else if (filter->input.flow_ext.iip_type ==
2268 			 I40E_FDIR_IPTYPE_IPV6)
2269 			cus_pctype = i40e_find_customized_pctype(pf,
2270 						 I40E_CUSTOMIZED_GTPU_IPV6);
2271 		break;
2272 	case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
2273 		if (filter->input.flow_ext.oip_type == I40E_FDIR_IPTYPE_IPV4)
2274 			cus_pctype = i40e_find_customized_pctype(pf,
2275 						I40E_CUSTOMIZED_IPV4_L2TPV3);
2276 		else if (filter->input.flow_ext.oip_type ==
2277 			 I40E_FDIR_IPTYPE_IPV6)
2278 			cus_pctype = i40e_find_customized_pctype(pf,
2279 						I40E_CUSTOMIZED_IPV6_L2TPV3);
2280 		break;
2281 	case RTE_FLOW_ITEM_TYPE_ESP:
2282 		if (!filter->input.flow_ext.is_udp) {
2283 			if (filter->input.flow_ext.oip_type ==
2284 				I40E_FDIR_IPTYPE_IPV4)
2285 				cus_pctype = i40e_find_customized_pctype(pf,
2286 						I40E_CUSTOMIZED_ESP_IPV4);
2287 			else if (filter->input.flow_ext.oip_type ==
2288 				I40E_FDIR_IPTYPE_IPV6)
2289 				cus_pctype = i40e_find_customized_pctype(pf,
2290 						I40E_CUSTOMIZED_ESP_IPV6);
2291 		} else {
2292 			if (filter->input.flow_ext.oip_type ==
2293 				I40E_FDIR_IPTYPE_IPV4)
2294 				cus_pctype = i40e_find_customized_pctype(pf,
2295 						I40E_CUSTOMIZED_ESP_IPV4_UDP);
2296 			else if (filter->input.flow_ext.oip_type ==
2297 					I40E_FDIR_IPTYPE_IPV6)
2298 				cus_pctype = i40e_find_customized_pctype(pf,
2299 						I40E_CUSTOMIZED_ESP_IPV6_UDP);
2300 			filter->input.flow_ext.is_udp = false;
2301 		}
2302 		break;
2303 	default:
2304 		PMD_DRV_LOG(ERR, "Unsupported item type");
2305 		break;
2306 	}
2307 
2308 	if (cus_pctype && cus_pctype->valid)
2309 		return cus_pctype->pctype;
2310 
2311 	return I40E_FILTER_PCTYPE_INVALID;
2312 }
2313 
2314 static void
i40e_flow_set_filter_spi(struct i40e_fdir_filter_conf * filter,const struct rte_flow_item_esp * esp_spec)2315 i40e_flow_set_filter_spi(struct i40e_fdir_filter_conf *filter,
2316 	const struct rte_flow_item_esp *esp_spec)
2317 {
2318 	if (filter->input.flow_ext.oip_type ==
2319 		I40E_FDIR_IPTYPE_IPV4) {
2320 		if (filter->input.flow_ext.is_udp)
2321 			filter->input.flow.esp_ipv4_udp_flow.spi =
2322 				esp_spec->hdr.spi;
2323 		else
2324 			filter->input.flow.esp_ipv4_flow.spi =
2325 				esp_spec->hdr.spi;
2326 	}
2327 	if (filter->input.flow_ext.oip_type ==
2328 		I40E_FDIR_IPTYPE_IPV6) {
2329 		if (filter->input.flow_ext.is_udp)
2330 			filter->input.flow.esp_ipv6_udp_flow.spi =
2331 				esp_spec->hdr.spi;
2332 		else
2333 			filter->input.flow.esp_ipv6_flow.spi =
2334 				esp_spec->hdr.spi;
2335 	}
2336 }
2337 
2338 /* 1. Last in item should be NULL as range is not supported.
2339  * 2. Supported patterns: refer to array i40e_supported_patterns.
2340  * 3. Default supported flow type and input set: refer to array
2341  *    valid_fdir_inset_table in i40e_ethdev.c.
2342  * 4. Mask of fields which need to be matched should be
2343  *    filled with 1.
2344  * 5. Mask of fields which needn't to be matched should be
2345  *    filled with 0.
2346  * 6. GTP profile supports GTPv1 only.
2347  * 7. GTP-C response message ('source_port' = 2123) is not supported.
2348  */
2349 static int
i40e_flow_parse_fdir_pattern(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item * pattern,struct rte_flow_error * error,struct i40e_fdir_filter_conf * filter)2350 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
2351 			     const struct rte_flow_attr *attr,
2352 			     const struct rte_flow_item *pattern,
2353 			     struct rte_flow_error *error,
2354 			     struct i40e_fdir_filter_conf *filter)
2355 {
2356 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2357 	const struct rte_flow_item *item = pattern;
2358 	const struct rte_flow_item_eth *eth_spec, *eth_mask;
2359 	const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
2360 	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
2361 	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
2362 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
2363 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
2364 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
2365 	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
2366 	const struct rte_flow_item_esp *esp_spec, *esp_mask;
2367 	const struct rte_flow_item_raw *raw_spec, *raw_mask;
2368 	const struct rte_flow_item_vf *vf_spec;
2369 	const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
2370 
2371 	uint8_t pctype = 0;
2372 	uint64_t input_set = I40E_INSET_NONE;
2373 	enum rte_flow_item_type item_type;
2374 	enum rte_flow_item_type next_type;
2375 	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
2376 	enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
2377 	uint32_t i, j;
2378 	uint8_t  ipv6_addr_mask[16] = {
2379 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
2380 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2381 	enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
2382 	uint8_t raw_id = 0;
2383 	int32_t off_arr[I40E_MAX_FLXPLD_FIED];
2384 	uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
2385 	struct i40e_fdir_flex_pit flex_pit;
2386 	uint8_t next_dst_off = 0;
2387 	uint16_t flex_size;
2388 	uint16_t ether_type;
2389 	uint32_t vtc_flow_cpu;
2390 	bool outer_ip = true;
2391 	uint8_t field_idx;
2392 	int ret;
2393 
2394 	memset(off_arr, 0, sizeof(off_arr));
2395 	memset(len_arr, 0, sizeof(len_arr));
2396 	filter->input.flow_ext.customized_pctype = false;
2397 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2398 		if (item->last && item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
2399 			rte_flow_error_set(error, EINVAL,
2400 					   RTE_FLOW_ERROR_TYPE_ITEM,
2401 					   item,
2402 					   "Not support range");
2403 			return -rte_errno;
2404 		}
2405 		item_type = item->type;
2406 		switch (item_type) {
2407 		case RTE_FLOW_ITEM_TYPE_ETH:
2408 			eth_spec = item->spec;
2409 			eth_mask = item->mask;
2410 			next_type = (item + 1)->type;
2411 
2412 			if (next_type == RTE_FLOW_ITEM_TYPE_END &&
2413 						(!eth_spec || !eth_mask)) {
2414 				rte_flow_error_set(error, EINVAL,
2415 						   RTE_FLOW_ERROR_TYPE_ITEM,
2416 						   item,
2417 						   "NULL eth spec/mask.");
2418 				return -rte_errno;
2419 			}
2420 
2421 			if (eth_spec && eth_mask) {
2422 				if (rte_is_broadcast_ether_addr(&eth_mask->dst) &&
2423 					rte_is_zero_ether_addr(&eth_mask->src)) {
2424 					filter->input.flow.l2_flow.dst =
2425 						eth_spec->dst;
2426 					input_set |= I40E_INSET_DMAC;
2427 				} else if (rte_is_zero_ether_addr(&eth_mask->dst) &&
2428 					rte_is_broadcast_ether_addr(&eth_mask->src)) {
2429 					filter->input.flow.l2_flow.src =
2430 						eth_spec->src;
2431 					input_set |= I40E_INSET_SMAC;
2432 				} else if (rte_is_broadcast_ether_addr(&eth_mask->dst) &&
2433 					rte_is_broadcast_ether_addr(&eth_mask->src)) {
2434 					filter->input.flow.l2_flow.dst =
2435 						eth_spec->dst;
2436 					filter->input.flow.l2_flow.src =
2437 						eth_spec->src;
2438 					input_set |= (I40E_INSET_DMAC | I40E_INSET_SMAC);
2439 				} else if (!rte_is_zero_ether_addr(&eth_mask->src) ||
2440 					   !rte_is_zero_ether_addr(&eth_mask->dst)) {
2441 					rte_flow_error_set(error, EINVAL,
2442 						      RTE_FLOW_ERROR_TYPE_ITEM,
2443 						      item,
2444 						      "Invalid MAC_addr mask.");
2445 					return -rte_errno;
2446 				}
2447 			}
2448 			if (eth_spec && eth_mask &&
2449 			next_type == RTE_FLOW_ITEM_TYPE_END) {
2450 				if (eth_mask->type != RTE_BE16(0xffff)) {
2451 					rte_flow_error_set(error, EINVAL,
2452 						      RTE_FLOW_ERROR_TYPE_ITEM,
2453 						      item,
2454 						      "Invalid type mask.");
2455 					return -rte_errno;
2456 				}
2457 
2458 				ether_type = rte_be_to_cpu_16(eth_spec->type);
2459 
2460 				if (next_type == RTE_FLOW_ITEM_TYPE_VLAN ||
2461 				    ether_type == RTE_ETHER_TYPE_IPV4 ||
2462 				    ether_type == RTE_ETHER_TYPE_IPV6 ||
2463 				    ether_type == i40e_get_outer_vlan(dev)) {
2464 					rte_flow_error_set(error, EINVAL,
2465 						     RTE_FLOW_ERROR_TYPE_ITEM,
2466 						     item,
2467 						     "Unsupported ether_type.");
2468 					return -rte_errno;
2469 				}
2470 				input_set |= I40E_INSET_LAST_ETHER_TYPE;
2471 				filter->input.flow.l2_flow.ether_type =
2472 					eth_spec->type;
2473 			}
2474 
2475 			pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2476 			layer_idx = I40E_FLXPLD_L2_IDX;
2477 
2478 			break;
2479 		case RTE_FLOW_ITEM_TYPE_VLAN:
2480 			vlan_spec = item->spec;
2481 			vlan_mask = item->mask;
2482 
2483 			RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE));
2484 			if (vlan_spec && vlan_mask) {
2485 				if (vlan_mask->tci !=
2486 				    rte_cpu_to_be_16(I40E_VLAN_TCI_MASK) &&
2487 				    vlan_mask->tci !=
2488 				    rte_cpu_to_be_16(I40E_VLAN_PRI_MASK) &&
2489 				    vlan_mask->tci !=
2490 				    rte_cpu_to_be_16(I40E_VLAN_CFI_MASK) &&
2491 				    vlan_mask->tci !=
2492 				    rte_cpu_to_be_16(I40E_VLAN_VID_MASK)) {
2493 					rte_flow_error_set(error, EINVAL,
2494 						   RTE_FLOW_ERROR_TYPE_ITEM,
2495 						   item,
2496 						   "Unsupported TCI mask.");
2497 				}
2498 				input_set |= I40E_INSET_VLAN_INNER;
2499 				filter->input.flow_ext.vlan_tci =
2500 					vlan_spec->tci;
2501 			}
2502 			if (vlan_spec && vlan_mask && vlan_mask->inner_type) {
2503 				if (vlan_mask->inner_type != RTE_BE16(0xffff)) {
2504 					rte_flow_error_set(error, EINVAL,
2505 						      RTE_FLOW_ERROR_TYPE_ITEM,
2506 						      item,
2507 						      "Invalid inner_type"
2508 						      " mask.");
2509 					return -rte_errno;
2510 				}
2511 
2512 				ether_type =
2513 					rte_be_to_cpu_16(vlan_spec->inner_type);
2514 
2515 				if (ether_type == RTE_ETHER_TYPE_IPV4 ||
2516 				    ether_type == RTE_ETHER_TYPE_IPV6 ||
2517 				    ether_type == i40e_get_outer_vlan(dev)) {
2518 					rte_flow_error_set(error, EINVAL,
2519 						     RTE_FLOW_ERROR_TYPE_ITEM,
2520 						     item,
2521 						     "Unsupported inner_type.");
2522 					return -rte_errno;
2523 				}
2524 				input_set |= I40E_INSET_LAST_ETHER_TYPE;
2525 				filter->input.flow.l2_flow.ether_type =
2526 					vlan_spec->inner_type;
2527 			}
2528 
2529 			pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2530 			layer_idx = I40E_FLXPLD_L2_IDX;
2531 
2532 			break;
2533 		case RTE_FLOW_ITEM_TYPE_IPV4:
2534 			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
2535 			ipv4_spec = item->spec;
2536 			ipv4_mask = item->mask;
2537 			ipv4_last = item->last;
2538 			pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
2539 			layer_idx = I40E_FLXPLD_L3_IDX;
2540 
2541 			if (ipv4_last) {
2542 				if (!ipv4_spec || !ipv4_mask || !outer_ip) {
2543 					rte_flow_error_set(error, EINVAL,
2544 						RTE_FLOW_ERROR_TYPE_ITEM,
2545 						item,
2546 						"Not support range");
2547 					return -rte_errno;
2548 				}
2549 				/* Only fragment_offset supports range */
2550 				if (ipv4_last->hdr.version_ihl ||
2551 				    ipv4_last->hdr.type_of_service ||
2552 				    ipv4_last->hdr.total_length ||
2553 				    ipv4_last->hdr.packet_id ||
2554 				    ipv4_last->hdr.time_to_live ||
2555 				    ipv4_last->hdr.next_proto_id ||
2556 				    ipv4_last->hdr.hdr_checksum ||
2557 				    ipv4_last->hdr.src_addr ||
2558 				    ipv4_last->hdr.dst_addr) {
2559 					rte_flow_error_set(error, EINVAL,
2560 						   RTE_FLOW_ERROR_TYPE_ITEM,
2561 						   item,
2562 						   "Not support range");
2563 					return -rte_errno;
2564 				}
2565 			}
2566 			if (ipv4_spec && ipv4_mask && outer_ip) {
2567 				/* Check IPv4 mask and update input set */
2568 				if (ipv4_mask->hdr.version_ihl ||
2569 				    ipv4_mask->hdr.total_length ||
2570 				    ipv4_mask->hdr.packet_id ||
2571 				    ipv4_mask->hdr.hdr_checksum) {
2572 					rte_flow_error_set(error, EINVAL,
2573 						   RTE_FLOW_ERROR_TYPE_ITEM,
2574 						   item,
2575 						   "Invalid IPv4 mask.");
2576 					return -rte_errno;
2577 				}
2578 
2579 				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
2580 					input_set |= I40E_INSET_IPV4_SRC;
2581 				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
2582 					input_set |= I40E_INSET_IPV4_DST;
2583 				if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
2584 					input_set |= I40E_INSET_IPV4_TOS;
2585 				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
2586 					input_set |= I40E_INSET_IPV4_TTL;
2587 				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
2588 					input_set |= I40E_INSET_IPV4_PROTO;
2589 
2590 				/* Check if it is fragment. */
2591 				uint16_t frag_mask =
2592 					ipv4_mask->hdr.fragment_offset;
2593 				uint16_t frag_spec =
2594 					ipv4_spec->hdr.fragment_offset;
2595 				uint16_t frag_last = 0;
2596 				if (ipv4_last)
2597 					frag_last =
2598 					ipv4_last->hdr.fragment_offset;
2599 				if (frag_mask) {
2600 					frag_mask = rte_be_to_cpu_16(frag_mask);
2601 					frag_spec = rte_be_to_cpu_16(frag_spec);
2602 					frag_last = rte_be_to_cpu_16(frag_last);
2603 					/* frag_off mask has to be 0x3fff */
2604 					if (frag_mask !=
2605 					    (RTE_IPV4_HDR_OFFSET_MASK |
2606 					    RTE_IPV4_HDR_MF_FLAG)) {
2607 						rte_flow_error_set(error,
2608 						   EINVAL,
2609 						   RTE_FLOW_ERROR_TYPE_ITEM,
2610 						   item,
2611 						   "Invalid IPv4 fragment_offset mask");
2612 						return -rte_errno;
2613 					}
2614 					/*
2615 					 * non-frag rule:
2616 					 * mask=0x3fff,spec=0
2617 					 * frag rule:
2618 					 * mask=0x3fff,spec=0x8,last=0x2000
2619 					 */
2620 					if (frag_spec ==
2621 					    (1 << RTE_IPV4_HDR_FO_SHIFT) &&
2622 					    frag_last == RTE_IPV4_HDR_MF_FLAG) {
2623 						pctype =
2624 						  I40E_FILTER_PCTYPE_FRAG_IPV4;
2625 					} else if (frag_spec || frag_last) {
2626 						rte_flow_error_set(error,
2627 						   EINVAL,
2628 						   RTE_FLOW_ERROR_TYPE_ITEM,
2629 						   item,
2630 						   "Invalid IPv4 fragment_offset rule");
2631 						return -rte_errno;
2632 					}
2633 				} else if (frag_spec || frag_last) {
2634 					rte_flow_error_set(error,
2635 						EINVAL,
2636 						RTE_FLOW_ERROR_TYPE_ITEM,
2637 						item,
2638 						"Invalid fragment_offset");
2639 					return -rte_errno;
2640 				}
2641 
2642 				if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2643 					if (input_set & (I40E_INSET_IPV4_SRC |
2644 						I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
2645 						I40E_INSET_IPV4_TTL | I40E_INSET_IPV4_PROTO)) {
2646 						rte_flow_error_set(error, EINVAL,
2647 							RTE_FLOW_ERROR_TYPE_ITEM,
2648 							item,
2649 							"L2 and L3 input set are exclusive.");
2650 						return -rte_errno;
2651 					}
2652 				} else {
2653 					/* Get the filter info */
2654 					filter->input.flow.ip4_flow.proto =
2655 						ipv4_spec->hdr.next_proto_id;
2656 					filter->input.flow.ip4_flow.tos =
2657 						ipv4_spec->hdr.type_of_service;
2658 					filter->input.flow.ip4_flow.ttl =
2659 						ipv4_spec->hdr.time_to_live;
2660 					filter->input.flow.ip4_flow.src_ip =
2661 						ipv4_spec->hdr.src_addr;
2662 					filter->input.flow.ip4_flow.dst_ip =
2663 						ipv4_spec->hdr.dst_addr;
2664 
2665 					filter->input.flow_ext.inner_ip = false;
2666 					filter->input.flow_ext.oip_type =
2667 						I40E_FDIR_IPTYPE_IPV4;
2668 				}
2669 			} else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
2670 				filter->input.flow_ext.inner_ip = true;
2671 				filter->input.flow_ext.iip_type =
2672 					I40E_FDIR_IPTYPE_IPV4;
2673 			} else if (!ipv4_spec && !ipv4_mask && outer_ip) {
2674 				filter->input.flow_ext.inner_ip = false;
2675 				filter->input.flow_ext.oip_type =
2676 					I40E_FDIR_IPTYPE_IPV4;
2677 			} else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
2678 				rte_flow_error_set(error, EINVAL,
2679 						   RTE_FLOW_ERROR_TYPE_ITEM,
2680 						   item,
2681 						   "Invalid inner IPv4 mask.");
2682 				return -rte_errno;
2683 			}
2684 
2685 			if (outer_ip)
2686 				outer_ip = false;
2687 
2688 			break;
2689 		case RTE_FLOW_ITEM_TYPE_IPV6:
2690 			l3 = RTE_FLOW_ITEM_TYPE_IPV6;
2691 			ipv6_spec = item->spec;
2692 			ipv6_mask = item->mask;
2693 			pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
2694 			layer_idx = I40E_FLXPLD_L3_IDX;
2695 
2696 			if (ipv6_spec && ipv6_mask && outer_ip) {
2697 				/* Check IPv6 mask and update input set */
2698 				if (ipv6_mask->hdr.payload_len) {
2699 					rte_flow_error_set(error, EINVAL,
2700 						   RTE_FLOW_ERROR_TYPE_ITEM,
2701 						   item,
2702 						   "Invalid IPv6 mask");
2703 					return -rte_errno;
2704 				}
2705 
2706 				if (!memcmp(ipv6_mask->hdr.src_addr,
2707 					    ipv6_addr_mask,
2708 					    RTE_DIM(ipv6_mask->hdr.src_addr)))
2709 					input_set |= I40E_INSET_IPV6_SRC;
2710 				if (!memcmp(ipv6_mask->hdr.dst_addr,
2711 					    ipv6_addr_mask,
2712 					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
2713 					input_set |= I40E_INSET_IPV6_DST;
2714 
2715 				if ((ipv6_mask->hdr.vtc_flow &
2716 				     rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2717 				    == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2718 					input_set |= I40E_INSET_IPV6_TC;
2719 				if (ipv6_mask->hdr.proto == UINT8_MAX)
2720 					input_set |= I40E_INSET_IPV6_NEXT_HDR;
2721 				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
2722 					input_set |= I40E_INSET_IPV6_HOP_LIMIT;
2723 
2724 				/* Get filter info */
2725 				vtc_flow_cpu =
2726 				      rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
2727 				filter->input.flow.ipv6_flow.tc =
2728 					(uint8_t)(vtc_flow_cpu >>
2729 						  I40E_FDIR_IPv6_TC_OFFSET);
2730 				filter->input.flow.ipv6_flow.proto =
2731 					ipv6_spec->hdr.proto;
2732 				filter->input.flow.ipv6_flow.hop_limits =
2733 					ipv6_spec->hdr.hop_limits;
2734 
2735 				filter->input.flow_ext.inner_ip = false;
2736 				filter->input.flow_ext.oip_type =
2737 					I40E_FDIR_IPTYPE_IPV6;
2738 
2739 				rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
2740 					   ipv6_spec->hdr.src_addr, 16);
2741 				rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
2742 					   ipv6_spec->hdr.dst_addr, 16);
2743 
2744 				/* Check if it is fragment. */
2745 				if (ipv6_spec->hdr.proto ==
2746 				    I40E_IPV6_FRAG_HEADER)
2747 					pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
2748 			} else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
2749 				filter->input.flow_ext.inner_ip = true;
2750 				filter->input.flow_ext.iip_type =
2751 					I40E_FDIR_IPTYPE_IPV6;
2752 			} else if (!ipv6_spec && !ipv6_mask && outer_ip) {
2753 				filter->input.flow_ext.inner_ip = false;
2754 				filter->input.flow_ext.oip_type =
2755 					I40E_FDIR_IPTYPE_IPV6;
2756 			} else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
2757 				rte_flow_error_set(error, EINVAL,
2758 						   RTE_FLOW_ERROR_TYPE_ITEM,
2759 						   item,
2760 						   "Invalid inner IPv6 mask");
2761 				return -rte_errno;
2762 			}
2763 
2764 			if (outer_ip)
2765 				outer_ip = false;
2766 			break;
2767 		case RTE_FLOW_ITEM_TYPE_TCP:
2768 			tcp_spec = item->spec;
2769 			tcp_mask = item->mask;
2770 
2771 			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2772 				pctype =
2773 					I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
2774 			else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2775 				pctype =
2776 					I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
2777 			if (tcp_spec && tcp_mask) {
2778 				/* Check TCP mask and update input set */
2779 				if (tcp_mask->hdr.sent_seq ||
2780 				    tcp_mask->hdr.recv_ack ||
2781 				    tcp_mask->hdr.data_off ||
2782 				    tcp_mask->hdr.tcp_flags ||
2783 				    tcp_mask->hdr.rx_win ||
2784 				    tcp_mask->hdr.cksum ||
2785 				    tcp_mask->hdr.tcp_urp) {
2786 					rte_flow_error_set(error, EINVAL,
2787 						   RTE_FLOW_ERROR_TYPE_ITEM,
2788 						   item,
2789 						   "Invalid TCP mask");
2790 					return -rte_errno;
2791 				}
2792 
2793 				if (tcp_mask->hdr.src_port == UINT16_MAX)
2794 					input_set |= I40E_INSET_SRC_PORT;
2795 				if (tcp_mask->hdr.dst_port == UINT16_MAX)
2796 					input_set |= I40E_INSET_DST_PORT;
2797 
2798 				if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2799 					if (input_set &
2800 						(I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) {
2801 						rte_flow_error_set(error, EINVAL,
2802 							RTE_FLOW_ERROR_TYPE_ITEM,
2803 							item,
2804 							"L2 and L4 input set are exclusive.");
2805 						return -rte_errno;
2806 					}
2807 				} else {
2808 					/* Get filter info */
2809 					if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2810 						filter->input.flow.tcp4_flow.src_port =
2811 							tcp_spec->hdr.src_port;
2812 						filter->input.flow.tcp4_flow.dst_port =
2813 							tcp_spec->hdr.dst_port;
2814 					} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2815 						filter->input.flow.tcp6_flow.src_port =
2816 							tcp_spec->hdr.src_port;
2817 						filter->input.flow.tcp6_flow.dst_port =
2818 							tcp_spec->hdr.dst_port;
2819 					}
2820 				}
2821 			}
2822 
2823 			layer_idx = I40E_FLXPLD_L4_IDX;
2824 
2825 			break;
2826 		case RTE_FLOW_ITEM_TYPE_UDP:
2827 			udp_spec = item->spec;
2828 			udp_mask = item->mask;
2829 
2830 			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2831 				pctype =
2832 					I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2833 			else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2834 				pctype =
2835 					I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
2836 
2837 			if (udp_spec && udp_mask) {
2838 				/* Check UDP mask and update input set*/
2839 				if (udp_mask->hdr.dgram_len ||
2840 				    udp_mask->hdr.dgram_cksum) {
2841 					rte_flow_error_set(error, EINVAL,
2842 						   RTE_FLOW_ERROR_TYPE_ITEM,
2843 						   item,
2844 						   "Invalid UDP mask");
2845 					return -rte_errno;
2846 				}
2847 
2848 				if (udp_mask->hdr.src_port == UINT16_MAX)
2849 					input_set |= I40E_INSET_SRC_PORT;
2850 				if (udp_mask->hdr.dst_port == UINT16_MAX)
2851 					input_set |= I40E_INSET_DST_PORT;
2852 
2853 				if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2854 					if (input_set &
2855 						(I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) {
2856 						rte_flow_error_set(error, EINVAL,
2857 							RTE_FLOW_ERROR_TYPE_ITEM,
2858 							item,
2859 							"L2 and L4 input set are exclusive.");
2860 						return -rte_errno;
2861 					}
2862 				} else {
2863 					/* Get filter info */
2864 					if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2865 						filter->input.flow.udp4_flow.src_port =
2866 							udp_spec->hdr.src_port;
2867 						filter->input.flow.udp4_flow.dst_port =
2868 							udp_spec->hdr.dst_port;
2869 					} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2870 						filter->input.flow.udp6_flow.src_port =
2871 							udp_spec->hdr.src_port;
2872 						filter->input.flow.udp6_flow.dst_port =
2873 							udp_spec->hdr.dst_port;
2874 					}
2875 				}
2876 			}
2877 			filter->input.flow_ext.is_udp = true;
2878 			layer_idx = I40E_FLXPLD_L4_IDX;
2879 
2880 			break;
2881 		case RTE_FLOW_ITEM_TYPE_GTPC:
2882 		case RTE_FLOW_ITEM_TYPE_GTPU:
2883 			if (!pf->gtp_support) {
2884 				rte_flow_error_set(error, EINVAL,
2885 						   RTE_FLOW_ERROR_TYPE_ITEM,
2886 						   item,
2887 						   "Unsupported protocol");
2888 				return -rte_errno;
2889 			}
2890 
2891 			gtp_spec = item->spec;
2892 			gtp_mask = item->mask;
2893 
2894 			if (gtp_spec && gtp_mask) {
2895 				if (gtp_mask->v_pt_rsv_flags ||
2896 				    gtp_mask->msg_type ||
2897 				    gtp_mask->msg_len ||
2898 				    gtp_mask->teid != UINT32_MAX) {
2899 					rte_flow_error_set(error, EINVAL,
2900 						   RTE_FLOW_ERROR_TYPE_ITEM,
2901 						   item,
2902 						   "Invalid GTP mask");
2903 					return -rte_errno;
2904 				}
2905 
2906 				filter->input.flow.gtp_flow.teid =
2907 					gtp_spec->teid;
2908 				filter->input.flow_ext.customized_pctype = true;
2909 				cus_proto = item_type;
2910 			}
2911 			break;
2912 		case RTE_FLOW_ITEM_TYPE_ESP:
2913 			if (!pf->esp_support) {
2914 				rte_flow_error_set(error, EINVAL,
2915 						   RTE_FLOW_ERROR_TYPE_ITEM,
2916 						   item,
2917 						   "Unsupported ESP protocol");
2918 				return -rte_errno;
2919 			}
2920 
2921 			esp_spec = item->spec;
2922 			esp_mask = item->mask;
2923 
2924 			if (!esp_spec || !esp_mask) {
2925 				rte_flow_error_set(error, EINVAL,
2926 						   RTE_FLOW_ERROR_TYPE_ITEM,
2927 						   item,
2928 						   "Invalid ESP item");
2929 				return -rte_errno;
2930 			}
2931 
2932 			if (esp_spec && esp_mask) {
2933 				if (esp_mask->hdr.spi != UINT32_MAX) {
2934 					rte_flow_error_set(error, EINVAL,
2935 						   RTE_FLOW_ERROR_TYPE_ITEM,
2936 						   item,
2937 						   "Invalid ESP mask");
2938 					return -rte_errno;
2939 				}
2940 				i40e_flow_set_filter_spi(filter, esp_spec);
2941 				filter->input.flow_ext.customized_pctype = true;
2942 				cus_proto = item_type;
2943 			}
2944 			break;
2945 		case RTE_FLOW_ITEM_TYPE_SCTP:
2946 			sctp_spec = item->spec;
2947 			sctp_mask = item->mask;
2948 
2949 			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2950 				pctype =
2951 					I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
2952 			else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2953 				pctype =
2954 					I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
2955 
2956 			if (sctp_spec && sctp_mask) {
2957 				/* Check SCTP mask and update input set */
2958 				if (sctp_mask->hdr.cksum) {
2959 					rte_flow_error_set(error, EINVAL,
2960 						   RTE_FLOW_ERROR_TYPE_ITEM,
2961 						   item,
2962 						   "Invalid UDP mask");
2963 					return -rte_errno;
2964 				}
2965 
2966 				if (sctp_mask->hdr.src_port == UINT16_MAX)
2967 					input_set |= I40E_INSET_SRC_PORT;
2968 				if (sctp_mask->hdr.dst_port == UINT16_MAX)
2969 					input_set |= I40E_INSET_DST_PORT;
2970 				if (sctp_mask->hdr.tag == UINT32_MAX)
2971 					input_set |= I40E_INSET_SCTP_VT;
2972 
2973 				/* Get filter info */
2974 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2975 					filter->input.flow.sctp4_flow.src_port =
2976 						sctp_spec->hdr.src_port;
2977 					filter->input.flow.sctp4_flow.dst_port =
2978 						sctp_spec->hdr.dst_port;
2979 					filter->input.flow.sctp4_flow.verify_tag
2980 						= sctp_spec->hdr.tag;
2981 				} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2982 					filter->input.flow.sctp6_flow.src_port =
2983 						sctp_spec->hdr.src_port;
2984 					filter->input.flow.sctp6_flow.dst_port =
2985 						sctp_spec->hdr.dst_port;
2986 					filter->input.flow.sctp6_flow.verify_tag
2987 						= sctp_spec->hdr.tag;
2988 				}
2989 			}
2990 
2991 			layer_idx = I40E_FLXPLD_L4_IDX;
2992 
2993 			break;
2994 		case RTE_FLOW_ITEM_TYPE_RAW:
2995 			raw_spec = item->spec;
2996 			raw_mask = item->mask;
2997 
2998 			if (!raw_spec || !raw_mask) {
2999 				rte_flow_error_set(error, EINVAL,
3000 						   RTE_FLOW_ERROR_TYPE_ITEM,
3001 						   item,
3002 						   "NULL RAW spec/mask");
3003 				return -rte_errno;
3004 			}
3005 
3006 			if (pf->support_multi_driver) {
3007 				rte_flow_error_set(error, ENOTSUP,
3008 						   RTE_FLOW_ERROR_TYPE_ITEM,
3009 						   item,
3010 						   "Unsupported flexible payload.");
3011 				return -rte_errno;
3012 			}
3013 
3014 			ret = i40e_flow_check_raw_item(item, raw_spec, error);
3015 			if (ret < 0)
3016 				return ret;
3017 
3018 			off_arr[raw_id] = raw_spec->offset;
3019 			len_arr[raw_id] = raw_spec->length;
3020 
3021 			flex_size = 0;
3022 			memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
3023 			field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
3024 			flex_pit.size =
3025 				raw_spec->length / sizeof(uint16_t);
3026 			flex_pit.dst_offset =
3027 				next_dst_off / sizeof(uint16_t);
3028 
3029 			for (i = 0; i <= raw_id; i++) {
3030 				if (i == raw_id)
3031 					flex_pit.src_offset +=
3032 						raw_spec->offset /
3033 						sizeof(uint16_t);
3034 				else
3035 					flex_pit.src_offset +=
3036 						(off_arr[i] + len_arr[i]) /
3037 						sizeof(uint16_t);
3038 				flex_size += len_arr[i];
3039 			}
3040 			if (((flex_pit.src_offset + flex_pit.size) >=
3041 			     I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
3042 				flex_size > I40E_FDIR_MAX_FLEXLEN) {
3043 				rte_flow_error_set(error, EINVAL,
3044 					   RTE_FLOW_ERROR_TYPE_ITEM,
3045 					   item,
3046 					   "Exceeds maximal payload limit.");
3047 				return -rte_errno;
3048 			}
3049 
3050 			for (i = 0; i < raw_spec->length; i++) {
3051 				j = i + next_dst_off;
3052 				if (j >= RTE_ETH_FDIR_MAX_FLEXLEN ||
3053 						j >= I40E_FDIR_MAX_FLEX_LEN)
3054 					break;
3055 				filter->input.flow_ext.flexbytes[j] =
3056 					raw_spec->pattern[i];
3057 				filter->input.flow_ext.flex_mask[j] =
3058 					raw_mask->pattern[i];
3059 			}
3060 
3061 			next_dst_off += raw_spec->length;
3062 			raw_id++;
3063 
3064 			memcpy(&filter->input.flow_ext.flex_pit[field_idx],
3065 			       &flex_pit, sizeof(struct i40e_fdir_flex_pit));
3066 			filter->input.flow_ext.layer_idx = layer_idx;
3067 			filter->input.flow_ext.raw_id = raw_id;
3068 			filter->input.flow_ext.is_flex_flow = true;
3069 			break;
3070 		case RTE_FLOW_ITEM_TYPE_VF:
3071 			vf_spec = item->spec;
3072 			if (!attr->transfer) {
3073 				rte_flow_error_set(error, ENOTSUP,
3074 						   RTE_FLOW_ERROR_TYPE_ITEM,
3075 						   item,
3076 						   "Matching VF traffic"
3077 						   " without affecting it"
3078 						   " (transfer attribute)"
3079 						   " is unsupported");
3080 				return -rte_errno;
3081 			}
3082 			filter->input.flow_ext.is_vf = 1;
3083 			filter->input.flow_ext.dst_id = vf_spec->id;
3084 			if (filter->input.flow_ext.is_vf &&
3085 			    filter->input.flow_ext.dst_id >= pf->vf_num) {
3086 				rte_flow_error_set(error, EINVAL,
3087 						   RTE_FLOW_ERROR_TYPE_ITEM,
3088 						   item,
3089 						   "Invalid VF ID for FDIR.");
3090 				return -rte_errno;
3091 			}
3092 			break;
3093 		case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
3094 			l2tpv3oip_spec = item->spec;
3095 			l2tpv3oip_mask = item->mask;
3096 
3097 			if (!l2tpv3oip_spec || !l2tpv3oip_mask)
3098 				break;
3099 
3100 			if (l2tpv3oip_mask->session_id != UINT32_MAX) {
3101 				rte_flow_error_set(error, EINVAL,
3102 					RTE_FLOW_ERROR_TYPE_ITEM,
3103 					item,
3104 					"Invalid L2TPv3 mask");
3105 				return -rte_errno;
3106 			}
3107 
3108 			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3109 				filter->input.flow.ip4_l2tpv3oip_flow.session_id =
3110 					l2tpv3oip_spec->session_id;
3111 				filter->input.flow_ext.oip_type =
3112 					I40E_FDIR_IPTYPE_IPV4;
3113 			} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3114 				filter->input.flow.ip6_l2tpv3oip_flow.session_id =
3115 					l2tpv3oip_spec->session_id;
3116 				filter->input.flow_ext.oip_type =
3117 					I40E_FDIR_IPTYPE_IPV6;
3118 			}
3119 
3120 			filter->input.flow_ext.customized_pctype = true;
3121 			cus_proto = item_type;
3122 			break;
3123 		default:
3124 			break;
3125 		}
3126 	}
3127 
3128 	/* Get customized pctype value */
3129 	if (filter->input.flow_ext.customized_pctype) {
3130 		pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
3131 		if (pctype == I40E_FILTER_PCTYPE_INVALID) {
3132 			rte_flow_error_set(error, EINVAL,
3133 					   RTE_FLOW_ERROR_TYPE_ITEM,
3134 					   item,
3135 					   "Unsupported pctype");
3136 			return -rte_errno;
3137 		}
3138 	}
3139 
3140 	/* If customized pctype is not used, set fdir configuration.*/
3141 	if (!filter->input.flow_ext.customized_pctype) {
3142 		/* Check if the input set is valid */
3143 		if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
3144 						input_set) != 0) {
3145 			rte_flow_error_set(error, EINVAL,
3146 					   RTE_FLOW_ERROR_TYPE_ITEM,
3147 					   item,
3148 					   "Invalid input set");
3149 			return -rte_errno;
3150 		}
3151 
3152 		filter->input.flow_ext.input_set = input_set;
3153 	}
3154 
3155 	filter->input.pctype = pctype;
3156 
3157 	return 0;
3158 }
3159 
3160 /* Parse to get the action info of a FDIR filter.
3161  * FDIR action supports QUEUE or (QUEUE + MARK).
3162  */
3163 static int
i40e_flow_parse_fdir_action(struct rte_eth_dev * dev,const struct rte_flow_action * actions,struct rte_flow_error * error,struct i40e_fdir_filter_conf * filter)3164 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
3165 			    const struct rte_flow_action *actions,
3166 			    struct rte_flow_error *error,
3167 			    struct i40e_fdir_filter_conf *filter)
3168 {
3169 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3170 	const struct rte_flow_action *act;
3171 	const struct rte_flow_action_queue *act_q;
3172 	const struct rte_flow_action_mark *mark_spec = NULL;
3173 	uint32_t index = 0;
3174 
3175 	/* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
3176 	NEXT_ITEM_OF_ACTION(act, actions, index);
3177 	switch (act->type) {
3178 	case RTE_FLOW_ACTION_TYPE_QUEUE:
3179 		act_q = act->conf;
3180 		filter->action.rx_queue = act_q->index;
3181 		if ((!filter->input.flow_ext.is_vf &&
3182 		     filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
3183 		    (filter->input.flow_ext.is_vf &&
3184 		     filter->action.rx_queue >= pf->vf_nb_qps)) {
3185 			rte_flow_error_set(error, EINVAL,
3186 					   RTE_FLOW_ERROR_TYPE_ACTION, act,
3187 					   "Invalid queue ID for FDIR.");
3188 			return -rte_errno;
3189 		}
3190 		filter->action.behavior = I40E_FDIR_ACCEPT;
3191 		break;
3192 	case RTE_FLOW_ACTION_TYPE_DROP:
3193 		filter->action.behavior = I40E_FDIR_REJECT;
3194 		break;
3195 	case RTE_FLOW_ACTION_TYPE_PASSTHRU:
3196 		filter->action.behavior = I40E_FDIR_PASSTHRU;
3197 		break;
3198 	case RTE_FLOW_ACTION_TYPE_MARK:
3199 		filter->action.behavior = I40E_FDIR_PASSTHRU;
3200 		mark_spec = act->conf;
3201 		filter->action.report_status = I40E_FDIR_REPORT_ID;
3202 		filter->soft_id = mark_spec->id;
3203 	break;
3204 	default:
3205 		rte_flow_error_set(error, EINVAL,
3206 				   RTE_FLOW_ERROR_TYPE_ACTION, act,
3207 				   "Invalid action.");
3208 		return -rte_errno;
3209 	}
3210 
3211 	/* Check if the next non-void item is MARK or FLAG or END. */
3212 	index++;
3213 	NEXT_ITEM_OF_ACTION(act, actions, index);
3214 	switch (act->type) {
3215 	case RTE_FLOW_ACTION_TYPE_MARK:
3216 		if (mark_spec) {
3217 			/* Double MARK actions requested */
3218 			rte_flow_error_set(error, EINVAL,
3219 			   RTE_FLOW_ERROR_TYPE_ACTION, act,
3220 			   "Invalid action.");
3221 			return -rte_errno;
3222 		}
3223 		mark_spec = act->conf;
3224 		filter->action.report_status = I40E_FDIR_REPORT_ID;
3225 		filter->soft_id = mark_spec->id;
3226 		break;
3227 	case RTE_FLOW_ACTION_TYPE_FLAG:
3228 		if (mark_spec) {
3229 			/* MARK + FLAG not supported */
3230 			rte_flow_error_set(error, EINVAL,
3231 					   RTE_FLOW_ERROR_TYPE_ACTION, act,
3232 					   "Invalid action.");
3233 			return -rte_errno;
3234 		}
3235 		filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
3236 		break;
3237 	case RTE_FLOW_ACTION_TYPE_RSS:
3238 		if (filter->action.behavior != I40E_FDIR_PASSTHRU) {
3239 			/* RSS filter won't be next if FDIR did not pass thru */
3240 			rte_flow_error_set(error, EINVAL,
3241 					   RTE_FLOW_ERROR_TYPE_ACTION, act,
3242 					   "Invalid action.");
3243 			return -rte_errno;
3244 		}
3245 		break;
3246 	case RTE_FLOW_ACTION_TYPE_END:
3247 		return 0;
3248 	default:
3249 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3250 				   act, "Invalid action.");
3251 		return -rte_errno;
3252 	}
3253 
3254 	/* Check if the next non-void item is END */
3255 	index++;
3256 	NEXT_ITEM_OF_ACTION(act, actions, index);
3257 	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3258 		rte_flow_error_set(error, EINVAL,
3259 				   RTE_FLOW_ERROR_TYPE_ACTION,
3260 				   act, "Invalid action.");
3261 		return -rte_errno;
3262 	}
3263 
3264 	return 0;
3265 }
3266 
3267 static int
i40e_flow_parse_fdir_filter(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error,union i40e_filter_t * filter)3268 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
3269 			    const struct rte_flow_attr *attr,
3270 			    const struct rte_flow_item pattern[],
3271 			    const struct rte_flow_action actions[],
3272 			    struct rte_flow_error *error,
3273 			    union i40e_filter_t *filter)
3274 {
3275 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3276 	struct i40e_fdir_filter_conf *fdir_filter =
3277 		&filter->fdir_filter;
3278 	int ret;
3279 
3280 	ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error,
3281 					   fdir_filter);
3282 	if (ret)
3283 		return ret;
3284 
3285 	ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
3286 	if (ret)
3287 		return ret;
3288 
3289 	ret = i40e_flow_parse_attr(attr, error);
3290 	if (ret)
3291 		return ret;
3292 
3293 	cons_filter_type = RTE_ETH_FILTER_FDIR;
3294 
3295 	if (pf->fdir.fdir_vsi == NULL) {
3296 		/* Enable fdir when fdir flow is added at first time. */
3297 		ret = i40e_fdir_setup(pf);
3298 		if (ret != I40E_SUCCESS) {
3299 			rte_flow_error_set(error, ENOTSUP,
3300 					   RTE_FLOW_ERROR_TYPE_HANDLE,
3301 					   NULL, "Failed to setup fdir.");
3302 			return -rte_errno;
3303 		}
3304 		ret = i40e_fdir_configure(dev);
3305 		if (ret < 0) {
3306 			rte_flow_error_set(error, ENOTSUP,
3307 					   RTE_FLOW_ERROR_TYPE_HANDLE,
3308 					   NULL, "Failed to configure fdir.");
3309 			goto err;
3310 		}
3311 	}
3312 
3313 	/* If create the first fdir rule, enable fdir check for rx queues */
3314 	if (TAILQ_EMPTY(&pf->fdir.fdir_list))
3315 		i40e_fdir_rx_proc_enable(dev, 1);
3316 
3317 	return 0;
3318 err:
3319 	i40e_fdir_teardown(pf);
3320 	return -rte_errno;
3321 }
3322 
3323 /* Parse to get the action info of a tunnel filter
3324  * Tunnel action only supports PF, VF and QUEUE.
3325  */
3326 static int
i40e_flow_parse_tunnel_action(struct rte_eth_dev * dev,const struct rte_flow_action * actions,struct rte_flow_error * error,struct i40e_tunnel_filter_conf * filter)3327 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
3328 			      const struct rte_flow_action *actions,
3329 			      struct rte_flow_error *error,
3330 			      struct i40e_tunnel_filter_conf *filter)
3331 {
3332 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3333 	const struct rte_flow_action *act;
3334 	const struct rte_flow_action_queue *act_q;
3335 	const struct rte_flow_action_vf *act_vf;
3336 	uint32_t index = 0;
3337 
3338 	/* Check if the first non-void action is PF or VF. */
3339 	NEXT_ITEM_OF_ACTION(act, actions, index);
3340 	if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
3341 	    act->type != RTE_FLOW_ACTION_TYPE_VF) {
3342 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3343 				   act, "Not supported action.");
3344 		return -rte_errno;
3345 	}
3346 
3347 	if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
3348 		act_vf = act->conf;
3349 		filter->vf_id = act_vf->id;
3350 		filter->is_to_vf = 1;
3351 		if (filter->vf_id >= pf->vf_num) {
3352 			rte_flow_error_set(error, EINVAL,
3353 				   RTE_FLOW_ERROR_TYPE_ACTION,
3354 				   act, "Invalid VF ID for tunnel filter");
3355 			return -rte_errno;
3356 		}
3357 	}
3358 
3359 	/* Check if the next non-void item is QUEUE */
3360 	index++;
3361 	NEXT_ITEM_OF_ACTION(act, actions, index);
3362 	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3363 		act_q = act->conf;
3364 		filter->queue_id = act_q->index;
3365 		if ((!filter->is_to_vf) &&
3366 		    (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
3367 			rte_flow_error_set(error, EINVAL,
3368 				   RTE_FLOW_ERROR_TYPE_ACTION,
3369 				   act, "Invalid queue ID for tunnel filter");
3370 			return -rte_errno;
3371 		} else if (filter->is_to_vf &&
3372 			   (filter->queue_id >= pf->vf_nb_qps)) {
3373 			rte_flow_error_set(error, EINVAL,
3374 				   RTE_FLOW_ERROR_TYPE_ACTION,
3375 				   act, "Invalid queue ID for tunnel filter");
3376 			return -rte_errno;
3377 		}
3378 	}
3379 
3380 	/* Check if the next non-void item is END */
3381 	index++;
3382 	NEXT_ITEM_OF_ACTION(act, actions, index);
3383 	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3384 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3385 				   act, "Not supported action.");
3386 		return -rte_errno;
3387 	}
3388 
3389 	return 0;
3390 }
3391 
3392 /* 1. Last in item should be NULL as range is not supported.
3393  * 2. Supported filter types: Source port only and Destination port only.
3394  * 3. Mask of fields which need to be matched should be
3395  *    filled with 1.
3396  * 4. Mask of fields which needn't to be matched should be
3397  *    filled with 0.
3398  */
3399 static int
i40e_flow_parse_l4_pattern(const struct rte_flow_item * pattern,struct rte_flow_error * error,struct i40e_tunnel_filter_conf * filter)3400 i40e_flow_parse_l4_pattern(const struct rte_flow_item *pattern,
3401 			   struct rte_flow_error *error,
3402 			   struct i40e_tunnel_filter_conf *filter)
3403 {
3404 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
3405 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
3406 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
3407 	const struct rte_flow_item *item = pattern;
3408 	enum rte_flow_item_type item_type;
3409 
3410 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3411 		if (item->last) {
3412 			rte_flow_error_set(error, EINVAL,
3413 					   RTE_FLOW_ERROR_TYPE_ITEM,
3414 					   item,
3415 					   "Not support range");
3416 			return -rte_errno;
3417 		}
3418 		item_type = item->type;
3419 		switch (item_type) {
3420 		case RTE_FLOW_ITEM_TYPE_ETH:
3421 			if (item->spec || item->mask) {
3422 				rte_flow_error_set(error, EINVAL,
3423 						   RTE_FLOW_ERROR_TYPE_ITEM,
3424 						   item,
3425 						   "Invalid ETH item");
3426 				return -rte_errno;
3427 			}
3428 
3429 			break;
3430 		case RTE_FLOW_ITEM_TYPE_IPV4:
3431 			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3432 			/* IPv4 is used to describe protocol,
3433 			 * spec and mask should be NULL.
3434 			 */
3435 			if (item->spec || item->mask) {
3436 				rte_flow_error_set(error, EINVAL,
3437 						   RTE_FLOW_ERROR_TYPE_ITEM,
3438 						   item,
3439 						   "Invalid IPv4 item");
3440 				return -rte_errno;
3441 			}
3442 
3443 			break;
3444 		case RTE_FLOW_ITEM_TYPE_IPV6:
3445 			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3446 			/* IPv6 is used to describe protocol,
3447 			 * spec and mask should be NULL.
3448 			 */
3449 			if (item->spec || item->mask) {
3450 				rte_flow_error_set(error, EINVAL,
3451 						   RTE_FLOW_ERROR_TYPE_ITEM,
3452 						   item,
3453 						   "Invalid IPv6 item");
3454 				return -rte_errno;
3455 			}
3456 
3457 			break;
3458 		case RTE_FLOW_ITEM_TYPE_UDP:
3459 			udp_spec = item->spec;
3460 			udp_mask = item->mask;
3461 
3462 			if (!udp_spec || !udp_mask) {
3463 				rte_flow_error_set(error, EINVAL,
3464 						   RTE_FLOW_ERROR_TYPE_ITEM,
3465 						   item,
3466 						   "Invalid udp item");
3467 				return -rte_errno;
3468 			}
3469 
3470 			if (udp_spec->hdr.src_port != 0 &&
3471 			    udp_spec->hdr.dst_port != 0) {
3472 				rte_flow_error_set(error, EINVAL,
3473 						   RTE_FLOW_ERROR_TYPE_ITEM,
3474 						   item,
3475 						   "Invalid udp spec");
3476 				return -rte_errno;
3477 			}
3478 
3479 			if (udp_spec->hdr.src_port != 0) {
3480 				filter->l4_port_type =
3481 					I40E_L4_PORT_TYPE_SRC;
3482 				filter->tenant_id =
3483 				rte_be_to_cpu_32(udp_spec->hdr.src_port);
3484 			}
3485 
3486 			if (udp_spec->hdr.dst_port != 0) {
3487 				filter->l4_port_type =
3488 					I40E_L4_PORT_TYPE_DST;
3489 				filter->tenant_id =
3490 				rte_be_to_cpu_32(udp_spec->hdr.dst_port);
3491 			}
3492 
3493 			filter->tunnel_type = I40E_CLOUD_TYPE_UDP;
3494 
3495 			break;
3496 		case RTE_FLOW_ITEM_TYPE_TCP:
3497 			tcp_spec = item->spec;
3498 			tcp_mask = item->mask;
3499 
3500 			if (!tcp_spec || !tcp_mask) {
3501 				rte_flow_error_set(error, EINVAL,
3502 						   RTE_FLOW_ERROR_TYPE_ITEM,
3503 						   item,
3504 						   "Invalid tcp item");
3505 				return -rte_errno;
3506 			}
3507 
3508 			if (tcp_spec->hdr.src_port != 0 &&
3509 			    tcp_spec->hdr.dst_port != 0) {
3510 				rte_flow_error_set(error, EINVAL,
3511 						   RTE_FLOW_ERROR_TYPE_ITEM,
3512 						   item,
3513 						   "Invalid tcp spec");
3514 				return -rte_errno;
3515 			}
3516 
3517 			if (tcp_spec->hdr.src_port != 0) {
3518 				filter->l4_port_type =
3519 					I40E_L4_PORT_TYPE_SRC;
3520 				filter->tenant_id =
3521 				rte_be_to_cpu_32(tcp_spec->hdr.src_port);
3522 			}
3523 
3524 			if (tcp_spec->hdr.dst_port != 0) {
3525 				filter->l4_port_type =
3526 					I40E_L4_PORT_TYPE_DST;
3527 				filter->tenant_id =
3528 				rte_be_to_cpu_32(tcp_spec->hdr.dst_port);
3529 			}
3530 
3531 			filter->tunnel_type = I40E_CLOUD_TYPE_TCP;
3532 
3533 			break;
3534 		case RTE_FLOW_ITEM_TYPE_SCTP:
3535 			sctp_spec = item->spec;
3536 			sctp_mask = item->mask;
3537 
3538 			if (!sctp_spec || !sctp_mask) {
3539 				rte_flow_error_set(error, EINVAL,
3540 						   RTE_FLOW_ERROR_TYPE_ITEM,
3541 						   item,
3542 						   "Invalid sctp item");
3543 				return -rte_errno;
3544 			}
3545 
3546 			if (sctp_spec->hdr.src_port != 0 &&
3547 			    sctp_spec->hdr.dst_port != 0) {
3548 				rte_flow_error_set(error, EINVAL,
3549 						   RTE_FLOW_ERROR_TYPE_ITEM,
3550 						   item,
3551 						   "Invalid sctp spec");
3552 				return -rte_errno;
3553 			}
3554 
3555 			if (sctp_spec->hdr.src_port != 0) {
3556 				filter->l4_port_type =
3557 					I40E_L4_PORT_TYPE_SRC;
3558 				filter->tenant_id =
3559 					rte_be_to_cpu_32(sctp_spec->hdr.src_port);
3560 			}
3561 
3562 			if (sctp_spec->hdr.dst_port != 0) {
3563 				filter->l4_port_type =
3564 					I40E_L4_PORT_TYPE_DST;
3565 				filter->tenant_id =
3566 					rte_be_to_cpu_32(sctp_spec->hdr.dst_port);
3567 			}
3568 
3569 			filter->tunnel_type = I40E_CLOUD_TYPE_SCTP;
3570 
3571 			break;
3572 		default:
3573 			break;
3574 		}
3575 	}
3576 
3577 	return 0;
3578 }
3579 
3580 static int
i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error,union i40e_filter_t * filter)3581 i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
3582 				const struct rte_flow_attr *attr,
3583 				const struct rte_flow_item pattern[],
3584 				const struct rte_flow_action actions[],
3585 				struct rte_flow_error *error,
3586 				union i40e_filter_t *filter)
3587 {
3588 	struct i40e_tunnel_filter_conf *tunnel_filter =
3589 		&filter->consistent_tunnel_filter;
3590 	int ret;
3591 
3592 	ret = i40e_flow_parse_l4_pattern(pattern, error, tunnel_filter);
3593 	if (ret)
3594 		return ret;
3595 
3596 	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3597 	if (ret)
3598 		return ret;
3599 
3600 	ret = i40e_flow_parse_attr(attr, error);
3601 	if (ret)
3602 		return ret;
3603 
3604 	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3605 
3606 	return ret;
3607 }
3608 
3609 static uint16_t i40e_supported_tunnel_filter_types[] = {
3610 	RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_TENID |
3611 	RTE_ETH_TUNNEL_FILTER_IVLAN,
3612 	RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
3613 	RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_TENID,
3614 	RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_TENID |
3615 	RTE_ETH_TUNNEL_FILTER_IMAC,
3616 	RTE_ETH_TUNNEL_FILTER_IMAC,
3617 };
3618 
3619 static int
i40e_check_tunnel_filter_type(uint8_t filter_type)3620 i40e_check_tunnel_filter_type(uint8_t filter_type)
3621 {
3622 	uint8_t i;
3623 
3624 	for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
3625 		if (filter_type == i40e_supported_tunnel_filter_types[i])
3626 			return 0;
3627 	}
3628 
3629 	return -1;
3630 }
3631 
3632 /* 1. Last in item should be NULL as range is not supported.
3633  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3634  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3635  * 3. Mask of fields which need to be matched should be
3636  *    filled with 1.
3637  * 4. Mask of fields which needn't to be matched should be
3638  *    filled with 0.
3639  */
3640 static int
i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev * dev,const struct rte_flow_item * pattern,struct rte_flow_error * error,struct i40e_tunnel_filter_conf * filter)3641 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
3642 			      const struct rte_flow_item *pattern,
3643 			      struct rte_flow_error *error,
3644 			      struct i40e_tunnel_filter_conf *filter)
3645 {
3646 	const struct rte_flow_item *item = pattern;
3647 	const struct rte_flow_item_eth *eth_spec;
3648 	const struct rte_flow_item_eth *eth_mask;
3649 	const struct rte_flow_item_vxlan *vxlan_spec;
3650 	const struct rte_flow_item_vxlan *vxlan_mask;
3651 	const struct rte_flow_item_vlan *vlan_spec;
3652 	const struct rte_flow_item_vlan *vlan_mask;
3653 	uint8_t filter_type = 0;
3654 	bool is_vni_masked = 0;
3655 	uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
3656 	enum rte_flow_item_type item_type;
3657 	bool vxlan_flag = 0;
3658 	uint32_t tenant_id_be = 0;
3659 	int ret;
3660 
3661 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3662 		if (item->last) {
3663 			rte_flow_error_set(error, EINVAL,
3664 					   RTE_FLOW_ERROR_TYPE_ITEM,
3665 					   item,
3666 					   "Not support range");
3667 			return -rte_errno;
3668 		}
3669 		item_type = item->type;
3670 		switch (item_type) {
3671 		case RTE_FLOW_ITEM_TYPE_ETH:
3672 			eth_spec = item->spec;
3673 			eth_mask = item->mask;
3674 
3675 			/* Check if ETH item is used for place holder.
3676 			 * If yes, both spec and mask should be NULL.
3677 			 * If no, both spec and mask shouldn't be NULL.
3678 			 */
3679 			if ((!eth_spec && eth_mask) ||
3680 			    (eth_spec && !eth_mask)) {
3681 				rte_flow_error_set(error, EINVAL,
3682 						   RTE_FLOW_ERROR_TYPE_ITEM,
3683 						   item,
3684 						   "Invalid ether spec/mask");
3685 				return -rte_errno;
3686 			}
3687 
3688 			if (eth_spec && eth_mask) {
3689 				/* DST address of inner MAC shouldn't be masked.
3690 				 * SRC address of Inner MAC should be masked.
3691 				 */
3692 				if (!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
3693 				    !rte_is_zero_ether_addr(&eth_mask->src) ||
3694 				    eth_mask->type) {
3695 					rte_flow_error_set(error, EINVAL,
3696 						   RTE_FLOW_ERROR_TYPE_ITEM,
3697 						   item,
3698 						   "Invalid ether spec/mask");
3699 					return -rte_errno;
3700 				}
3701 
3702 				if (!vxlan_flag) {
3703 					rte_memcpy(&filter->outer_mac,
3704 						   &eth_spec->dst,
3705 						   RTE_ETHER_ADDR_LEN);
3706 					filter_type |= RTE_ETH_TUNNEL_FILTER_OMAC;
3707 				} else {
3708 					rte_memcpy(&filter->inner_mac,
3709 						   &eth_spec->dst,
3710 						   RTE_ETHER_ADDR_LEN);
3711 					filter_type |= RTE_ETH_TUNNEL_FILTER_IMAC;
3712 				}
3713 			}
3714 			break;
3715 		case RTE_FLOW_ITEM_TYPE_VLAN:
3716 			vlan_spec = item->spec;
3717 			vlan_mask = item->mask;
3718 			if (!(vlan_spec && vlan_mask) ||
3719 			    vlan_mask->inner_type) {
3720 				rte_flow_error_set(error, EINVAL,
3721 						   RTE_FLOW_ERROR_TYPE_ITEM,
3722 						   item,
3723 						   "Invalid vlan item");
3724 				return -rte_errno;
3725 			}
3726 
3727 			if (vlan_spec && vlan_mask) {
3728 				if (vlan_mask->tci ==
3729 				    rte_cpu_to_be_16(I40E_VLAN_TCI_MASK))
3730 					filter->inner_vlan =
3731 					      rte_be_to_cpu_16(vlan_spec->tci) &
3732 					      I40E_VLAN_TCI_MASK;
3733 				filter_type |= RTE_ETH_TUNNEL_FILTER_IVLAN;
3734 			}
3735 			break;
3736 		case RTE_FLOW_ITEM_TYPE_IPV4:
3737 			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3738 			/* IPv4 is used to describe protocol,
3739 			 * spec and mask should be NULL.
3740 			 */
3741 			if (item->spec || item->mask) {
3742 				rte_flow_error_set(error, EINVAL,
3743 						   RTE_FLOW_ERROR_TYPE_ITEM,
3744 						   item,
3745 						   "Invalid IPv4 item");
3746 				return -rte_errno;
3747 			}
3748 			break;
3749 		case RTE_FLOW_ITEM_TYPE_IPV6:
3750 			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3751 			/* IPv6 is used to describe protocol,
3752 			 * spec and mask should be NULL.
3753 			 */
3754 			if (item->spec || item->mask) {
3755 				rte_flow_error_set(error, EINVAL,
3756 						   RTE_FLOW_ERROR_TYPE_ITEM,
3757 						   item,
3758 						   "Invalid IPv6 item");
3759 				return -rte_errno;
3760 			}
3761 			break;
3762 		case RTE_FLOW_ITEM_TYPE_UDP:
3763 			/* UDP is used to describe protocol,
3764 			 * spec and mask should be NULL.
3765 			 */
3766 			if (item->spec || item->mask) {
3767 				rte_flow_error_set(error, EINVAL,
3768 					   RTE_FLOW_ERROR_TYPE_ITEM,
3769 					   item,
3770 					   "Invalid UDP item");
3771 				return -rte_errno;
3772 			}
3773 			break;
3774 		case RTE_FLOW_ITEM_TYPE_VXLAN:
3775 			vxlan_spec = item->spec;
3776 			vxlan_mask = item->mask;
3777 			/* Check if VXLAN item is used to describe protocol.
3778 			 * If yes, both spec and mask should be NULL.
3779 			 * If no, both spec and mask shouldn't be NULL.
3780 			 */
3781 			if ((!vxlan_spec && vxlan_mask) ||
3782 			    (vxlan_spec && !vxlan_mask)) {
3783 				rte_flow_error_set(error, EINVAL,
3784 					   RTE_FLOW_ERROR_TYPE_ITEM,
3785 					   item,
3786 					   "Invalid VXLAN item");
3787 				return -rte_errno;
3788 			}
3789 
3790 			/* Check if VNI is masked. */
3791 			if (vxlan_spec && vxlan_mask) {
3792 				is_vni_masked =
3793 					!!memcmp(vxlan_mask->vni, vni_mask,
3794 						 RTE_DIM(vni_mask));
3795 				if (is_vni_masked) {
3796 					rte_flow_error_set(error, EINVAL,
3797 						   RTE_FLOW_ERROR_TYPE_ITEM,
3798 						   item,
3799 						   "Invalid VNI mask");
3800 					return -rte_errno;
3801 				}
3802 
3803 				rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3804 					   vxlan_spec->vni, 3);
3805 				filter->tenant_id =
3806 					rte_be_to_cpu_32(tenant_id_be);
3807 				filter_type |= RTE_ETH_TUNNEL_FILTER_TENID;
3808 			}
3809 
3810 			vxlan_flag = 1;
3811 			break;
3812 		default:
3813 			break;
3814 		}
3815 	}
3816 
3817 	ret = i40e_check_tunnel_filter_type(filter_type);
3818 	if (ret < 0) {
3819 		rte_flow_error_set(error, EINVAL,
3820 				   RTE_FLOW_ERROR_TYPE_ITEM,
3821 				   NULL,
3822 				   "Invalid filter type");
3823 		return -rte_errno;
3824 	}
3825 	filter->filter_type = filter_type;
3826 
3827 	filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
3828 
3829 	return 0;
3830 }
3831 
3832 static int
i40e_flow_parse_vxlan_filter(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error,union i40e_filter_t * filter)3833 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
3834 			     const struct rte_flow_attr *attr,
3835 			     const struct rte_flow_item pattern[],
3836 			     const struct rte_flow_action actions[],
3837 			     struct rte_flow_error *error,
3838 			     union i40e_filter_t *filter)
3839 {
3840 	struct i40e_tunnel_filter_conf *tunnel_filter =
3841 		&filter->consistent_tunnel_filter;
3842 	int ret;
3843 
3844 	ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
3845 					    error, tunnel_filter);
3846 	if (ret)
3847 		return ret;
3848 
3849 	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3850 	if (ret)
3851 		return ret;
3852 
3853 	ret = i40e_flow_parse_attr(attr, error);
3854 	if (ret)
3855 		return ret;
3856 
3857 	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3858 
3859 	return ret;
3860 }
3861 
3862 /* 1. Last in item should be NULL as range is not supported.
3863  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3864  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3865  * 3. Mask of fields which need to be matched should be
3866  *    filled with 1.
3867  * 4. Mask of fields which needn't to be matched should be
3868  *    filled with 0.
3869  */
3870 static int
i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev * dev,const struct rte_flow_item * pattern,struct rte_flow_error * error,struct i40e_tunnel_filter_conf * filter)3871 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
3872 			      const struct rte_flow_item *pattern,
3873 			      struct rte_flow_error *error,
3874 			      struct i40e_tunnel_filter_conf *filter)
3875 {
3876 	const struct rte_flow_item *item = pattern;
3877 	const struct rte_flow_item_eth *eth_spec;
3878 	const struct rte_flow_item_eth *eth_mask;
3879 	const struct rte_flow_item_nvgre *nvgre_spec;
3880 	const struct rte_flow_item_nvgre *nvgre_mask;
3881 	const struct rte_flow_item_vlan *vlan_spec;
3882 	const struct rte_flow_item_vlan *vlan_mask;
3883 	enum rte_flow_item_type item_type;
3884 	uint8_t filter_type = 0;
3885 	bool is_tni_masked = 0;
3886 	uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
3887 	bool nvgre_flag = 0;
3888 	uint32_t tenant_id_be = 0;
3889 	int ret;
3890 
3891 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3892 		if (item->last) {
3893 			rte_flow_error_set(error, EINVAL,
3894 					   RTE_FLOW_ERROR_TYPE_ITEM,
3895 					   item,
3896 					   "Not support range");
3897 			return -rte_errno;
3898 		}
3899 		item_type = item->type;
3900 		switch (item_type) {
3901 		case RTE_FLOW_ITEM_TYPE_ETH:
3902 			eth_spec = item->spec;
3903 			eth_mask = item->mask;
3904 
3905 			/* Check if ETH item is used for place holder.
3906 			 * If yes, both spec and mask should be NULL.
3907 			 * If no, both spec and mask shouldn't be NULL.
3908 			 */
3909 			if ((!eth_spec && eth_mask) ||
3910 			    (eth_spec && !eth_mask)) {
3911 				rte_flow_error_set(error, EINVAL,
3912 						   RTE_FLOW_ERROR_TYPE_ITEM,
3913 						   item,
3914 						   "Invalid ether spec/mask");
3915 				return -rte_errno;
3916 			}
3917 
3918 			if (eth_spec && eth_mask) {
3919 				/* DST address of inner MAC shouldn't be masked.
3920 				 * SRC address of Inner MAC should be masked.
3921 				 */
3922 				if (!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
3923 				    !rte_is_zero_ether_addr(&eth_mask->src) ||
3924 				    eth_mask->type) {
3925 					rte_flow_error_set(error, EINVAL,
3926 						   RTE_FLOW_ERROR_TYPE_ITEM,
3927 						   item,
3928 						   "Invalid ether spec/mask");
3929 					return -rte_errno;
3930 				}
3931 
3932 				if (!nvgre_flag) {
3933 					rte_memcpy(&filter->outer_mac,
3934 						   &eth_spec->dst,
3935 						   RTE_ETHER_ADDR_LEN);
3936 					filter_type |= RTE_ETH_TUNNEL_FILTER_OMAC;
3937 				} else {
3938 					rte_memcpy(&filter->inner_mac,
3939 						   &eth_spec->dst,
3940 						   RTE_ETHER_ADDR_LEN);
3941 					filter_type |= RTE_ETH_TUNNEL_FILTER_IMAC;
3942 				}
3943 			}
3944 
3945 			break;
3946 		case RTE_FLOW_ITEM_TYPE_VLAN:
3947 			vlan_spec = item->spec;
3948 			vlan_mask = item->mask;
3949 			if (!(vlan_spec && vlan_mask) ||
3950 			    vlan_mask->inner_type) {
3951 				rte_flow_error_set(error, EINVAL,
3952 						   RTE_FLOW_ERROR_TYPE_ITEM,
3953 						   item,
3954 						   "Invalid vlan item");
3955 				return -rte_errno;
3956 			}
3957 
3958 			if (vlan_spec && vlan_mask) {
3959 				if (vlan_mask->tci ==
3960 				    rte_cpu_to_be_16(I40E_VLAN_TCI_MASK))
3961 					filter->inner_vlan =
3962 					      rte_be_to_cpu_16(vlan_spec->tci) &
3963 					      I40E_VLAN_TCI_MASK;
3964 				filter_type |= RTE_ETH_TUNNEL_FILTER_IVLAN;
3965 			}
3966 			break;
3967 		case RTE_FLOW_ITEM_TYPE_IPV4:
3968 			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3969 			/* IPv4 is used to describe protocol,
3970 			 * spec and mask should be NULL.
3971 			 */
3972 			if (item->spec || item->mask) {
3973 				rte_flow_error_set(error, EINVAL,
3974 						   RTE_FLOW_ERROR_TYPE_ITEM,
3975 						   item,
3976 						   "Invalid IPv4 item");
3977 				return -rte_errno;
3978 			}
3979 			break;
3980 		case RTE_FLOW_ITEM_TYPE_IPV6:
3981 			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3982 			/* IPv6 is used to describe protocol,
3983 			 * spec and mask should be NULL.
3984 			 */
3985 			if (item->spec || item->mask) {
3986 				rte_flow_error_set(error, EINVAL,
3987 						   RTE_FLOW_ERROR_TYPE_ITEM,
3988 						   item,
3989 						   "Invalid IPv6 item");
3990 				return -rte_errno;
3991 			}
3992 			break;
3993 		case RTE_FLOW_ITEM_TYPE_NVGRE:
3994 			nvgre_spec = item->spec;
3995 			nvgre_mask = item->mask;
3996 			/* Check if NVGRE item is used to describe protocol.
3997 			 * If yes, both spec and mask should be NULL.
3998 			 * If no, both spec and mask shouldn't be NULL.
3999 			 */
4000 			if ((!nvgre_spec && nvgre_mask) ||
4001 			    (nvgre_spec && !nvgre_mask)) {
4002 				rte_flow_error_set(error, EINVAL,
4003 					   RTE_FLOW_ERROR_TYPE_ITEM,
4004 					   item,
4005 					   "Invalid NVGRE item");
4006 				return -rte_errno;
4007 			}
4008 
4009 			if (nvgre_spec && nvgre_mask) {
4010 				is_tni_masked =
4011 					!!memcmp(nvgre_mask->tni, tni_mask,
4012 						 RTE_DIM(tni_mask));
4013 				if (is_tni_masked) {
4014 					rte_flow_error_set(error, EINVAL,
4015 						       RTE_FLOW_ERROR_TYPE_ITEM,
4016 						       item,
4017 						       "Invalid TNI mask");
4018 					return -rte_errno;
4019 				}
4020 				if (nvgre_mask->protocol &&
4021 					nvgre_mask->protocol != 0xFFFF) {
4022 					rte_flow_error_set(error, EINVAL,
4023 						RTE_FLOW_ERROR_TYPE_ITEM,
4024 						item,
4025 						"Invalid NVGRE item");
4026 					return -rte_errno;
4027 				}
4028 				if (nvgre_mask->c_k_s_rsvd0_ver &&
4029 					nvgre_mask->c_k_s_rsvd0_ver !=
4030 					rte_cpu_to_be_16(0xFFFF)) {
4031 					rte_flow_error_set(error, EINVAL,
4032 						   RTE_FLOW_ERROR_TYPE_ITEM,
4033 						   item,
4034 						   "Invalid NVGRE item");
4035 					return -rte_errno;
4036 				}
4037 				if (nvgre_spec->c_k_s_rsvd0_ver !=
4038 					rte_cpu_to_be_16(0x2000) &&
4039 					nvgre_mask->c_k_s_rsvd0_ver) {
4040 					rte_flow_error_set(error, EINVAL,
4041 						   RTE_FLOW_ERROR_TYPE_ITEM,
4042 						   item,
4043 						   "Invalid NVGRE item");
4044 					return -rte_errno;
4045 				}
4046 				if (nvgre_mask->protocol &&
4047 					nvgre_spec->protocol !=
4048 					rte_cpu_to_be_16(0x6558)) {
4049 					rte_flow_error_set(error, EINVAL,
4050 						   RTE_FLOW_ERROR_TYPE_ITEM,
4051 						   item,
4052 						   "Invalid NVGRE item");
4053 					return -rte_errno;
4054 				}
4055 				rte_memcpy(((uint8_t *)&tenant_id_be + 1),
4056 					   nvgre_spec->tni, 3);
4057 				filter->tenant_id =
4058 					rte_be_to_cpu_32(tenant_id_be);
4059 				filter_type |= RTE_ETH_TUNNEL_FILTER_TENID;
4060 			}
4061 
4062 			nvgre_flag = 1;
4063 			break;
4064 		default:
4065 			break;
4066 		}
4067 	}
4068 
4069 	ret = i40e_check_tunnel_filter_type(filter_type);
4070 	if (ret < 0) {
4071 		rte_flow_error_set(error, EINVAL,
4072 				   RTE_FLOW_ERROR_TYPE_ITEM,
4073 				   NULL,
4074 				   "Invalid filter type");
4075 		return -rte_errno;
4076 	}
4077 	filter->filter_type = filter_type;
4078 
4079 	filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
4080 
4081 	return 0;
4082 }
4083 
4084 static int
i40e_flow_parse_nvgre_filter(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error,union i40e_filter_t * filter)4085 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
4086 			     const struct rte_flow_attr *attr,
4087 			     const struct rte_flow_item pattern[],
4088 			     const struct rte_flow_action actions[],
4089 			     struct rte_flow_error *error,
4090 			     union i40e_filter_t *filter)
4091 {
4092 	struct i40e_tunnel_filter_conf *tunnel_filter =
4093 		&filter->consistent_tunnel_filter;
4094 	int ret;
4095 
4096 	ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
4097 					    error, tunnel_filter);
4098 	if (ret)
4099 		return ret;
4100 
4101 	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4102 	if (ret)
4103 		return ret;
4104 
4105 	ret = i40e_flow_parse_attr(attr, error);
4106 	if (ret)
4107 		return ret;
4108 
4109 	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4110 
4111 	return ret;
4112 }
4113 
4114 /* 1. Last in item should be NULL as range is not supported.
4115  * 2. Supported filter types: MPLS label.
4116  * 3. Mask of fields which need to be matched should be
4117  *    filled with 1.
4118  * 4. Mask of fields which needn't to be matched should be
4119  *    filled with 0.
4120  */
4121 static int
i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev * dev,const struct rte_flow_item * pattern,struct rte_flow_error * error,struct i40e_tunnel_filter_conf * filter)4122 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
4123 			     const struct rte_flow_item *pattern,
4124 			     struct rte_flow_error *error,
4125 			     struct i40e_tunnel_filter_conf *filter)
4126 {
4127 	const struct rte_flow_item *item = pattern;
4128 	const struct rte_flow_item_mpls *mpls_spec;
4129 	const struct rte_flow_item_mpls *mpls_mask;
4130 	enum rte_flow_item_type item_type;
4131 	bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
4132 	const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
4133 	uint32_t label_be = 0;
4134 
4135 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4136 		if (item->last) {
4137 			rte_flow_error_set(error, EINVAL,
4138 					   RTE_FLOW_ERROR_TYPE_ITEM,
4139 					   item,
4140 					   "Not support range");
4141 			return -rte_errno;
4142 		}
4143 		item_type = item->type;
4144 		switch (item_type) {
4145 		case RTE_FLOW_ITEM_TYPE_ETH:
4146 			if (item->spec || item->mask) {
4147 				rte_flow_error_set(error, EINVAL,
4148 						   RTE_FLOW_ERROR_TYPE_ITEM,
4149 						   item,
4150 						   "Invalid ETH item");
4151 				return -rte_errno;
4152 			}
4153 			break;
4154 		case RTE_FLOW_ITEM_TYPE_IPV4:
4155 			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4156 			/* IPv4 is used to describe protocol,
4157 			 * spec and mask should be NULL.
4158 			 */
4159 			if (item->spec || item->mask) {
4160 				rte_flow_error_set(error, EINVAL,
4161 						   RTE_FLOW_ERROR_TYPE_ITEM,
4162 						   item,
4163 						   "Invalid IPv4 item");
4164 				return -rte_errno;
4165 			}
4166 			break;
4167 		case RTE_FLOW_ITEM_TYPE_IPV6:
4168 			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
4169 			/* IPv6 is used to describe protocol,
4170 			 * spec and mask should be NULL.
4171 			 */
4172 			if (item->spec || item->mask) {
4173 				rte_flow_error_set(error, EINVAL,
4174 						   RTE_FLOW_ERROR_TYPE_ITEM,
4175 						   item,
4176 						   "Invalid IPv6 item");
4177 				return -rte_errno;
4178 			}
4179 			break;
4180 		case RTE_FLOW_ITEM_TYPE_UDP:
4181 			/* UDP is used to describe protocol,
4182 			 * spec and mask should be NULL.
4183 			 */
4184 			if (item->spec || item->mask) {
4185 				rte_flow_error_set(error, EINVAL,
4186 						   RTE_FLOW_ERROR_TYPE_ITEM,
4187 						   item,
4188 						   "Invalid UDP item");
4189 				return -rte_errno;
4190 			}
4191 			is_mplsoudp = 1;
4192 			break;
4193 		case RTE_FLOW_ITEM_TYPE_GRE:
4194 			/* GRE is used to describe protocol,
4195 			 * spec and mask should be NULL.
4196 			 */
4197 			if (item->spec || item->mask) {
4198 				rte_flow_error_set(error, EINVAL,
4199 						   RTE_FLOW_ERROR_TYPE_ITEM,
4200 						   item,
4201 						   "Invalid GRE item");
4202 				return -rte_errno;
4203 			}
4204 			break;
4205 		case RTE_FLOW_ITEM_TYPE_MPLS:
4206 			mpls_spec = item->spec;
4207 			mpls_mask = item->mask;
4208 
4209 			if (!mpls_spec || !mpls_mask) {
4210 				rte_flow_error_set(error, EINVAL,
4211 						   RTE_FLOW_ERROR_TYPE_ITEM,
4212 						   item,
4213 						   "Invalid MPLS item");
4214 				return -rte_errno;
4215 			}
4216 
4217 			if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
4218 				rte_flow_error_set(error, EINVAL,
4219 						   RTE_FLOW_ERROR_TYPE_ITEM,
4220 						   item,
4221 						   "Invalid MPLS label mask");
4222 				return -rte_errno;
4223 			}
4224 			rte_memcpy(((uint8_t *)&label_be + 1),
4225 				   mpls_spec->label_tc_s, 3);
4226 			filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
4227 			break;
4228 		default:
4229 			break;
4230 		}
4231 	}
4232 
4233 	if (is_mplsoudp)
4234 		filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
4235 	else
4236 		filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
4237 
4238 	return 0;
4239 }
4240 
4241 static int
i40e_flow_parse_mpls_filter(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error,union i40e_filter_t * filter)4242 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
4243 			    const struct rte_flow_attr *attr,
4244 			    const struct rte_flow_item pattern[],
4245 			    const struct rte_flow_action actions[],
4246 			    struct rte_flow_error *error,
4247 			    union i40e_filter_t *filter)
4248 {
4249 	struct i40e_tunnel_filter_conf *tunnel_filter =
4250 		&filter->consistent_tunnel_filter;
4251 	int ret;
4252 
4253 	ret = i40e_flow_parse_mpls_pattern(dev, pattern,
4254 					   error, tunnel_filter);
4255 	if (ret)
4256 		return ret;
4257 
4258 	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4259 	if (ret)
4260 		return ret;
4261 
4262 	ret = i40e_flow_parse_attr(attr, error);
4263 	if (ret)
4264 		return ret;
4265 
4266 	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4267 
4268 	return ret;
4269 }
4270 
4271 /* 1. Last in item should be NULL as range is not supported.
4272  * 2. Supported filter types: GTP TEID.
4273  * 3. Mask of fields which need to be matched should be
4274  *    filled with 1.
4275  * 4. Mask of fields which needn't to be matched should be
4276  *    filled with 0.
4277  * 5. GTP profile supports GTPv1 only.
4278  * 6. GTP-C response message ('source_port' = 2123) is not supported.
4279  */
4280 static int
i40e_flow_parse_gtp_pattern(struct rte_eth_dev * dev,const struct rte_flow_item * pattern,struct rte_flow_error * error,struct i40e_tunnel_filter_conf * filter)4281 i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
4282 			    const struct rte_flow_item *pattern,
4283 			    struct rte_flow_error *error,
4284 			    struct i40e_tunnel_filter_conf *filter)
4285 {
4286 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4287 	const struct rte_flow_item *item = pattern;
4288 	const struct rte_flow_item_gtp *gtp_spec;
4289 	const struct rte_flow_item_gtp *gtp_mask;
4290 	enum rte_flow_item_type item_type;
4291 
4292 	if (!pf->gtp_support) {
4293 		rte_flow_error_set(error, EINVAL,
4294 				   RTE_FLOW_ERROR_TYPE_ITEM,
4295 				   item,
4296 				   "GTP is not supported by default.");
4297 		return -rte_errno;
4298 	}
4299 
4300 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4301 		if (item->last) {
4302 			rte_flow_error_set(error, EINVAL,
4303 					   RTE_FLOW_ERROR_TYPE_ITEM,
4304 					   item,
4305 					   "Not support range");
4306 			return -rte_errno;
4307 		}
4308 		item_type = item->type;
4309 		switch (item_type) {
4310 		case RTE_FLOW_ITEM_TYPE_ETH:
4311 			if (item->spec || item->mask) {
4312 				rte_flow_error_set(error, EINVAL,
4313 						   RTE_FLOW_ERROR_TYPE_ITEM,
4314 						   item,
4315 						   "Invalid ETH item");
4316 				return -rte_errno;
4317 			}
4318 			break;
4319 		case RTE_FLOW_ITEM_TYPE_IPV4:
4320 			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4321 			/* IPv4 is used to describe protocol,
4322 			 * spec and mask should be NULL.
4323 			 */
4324 			if (item->spec || item->mask) {
4325 				rte_flow_error_set(error, EINVAL,
4326 						   RTE_FLOW_ERROR_TYPE_ITEM,
4327 						   item,
4328 						   "Invalid IPv4 item");
4329 				return -rte_errno;
4330 			}
4331 			break;
4332 		case RTE_FLOW_ITEM_TYPE_UDP:
4333 			if (item->spec || item->mask) {
4334 				rte_flow_error_set(error, EINVAL,
4335 						   RTE_FLOW_ERROR_TYPE_ITEM,
4336 						   item,
4337 						   "Invalid UDP item");
4338 				return -rte_errno;
4339 			}
4340 			break;
4341 		case RTE_FLOW_ITEM_TYPE_GTPC:
4342 		case RTE_FLOW_ITEM_TYPE_GTPU:
4343 			gtp_spec = item->spec;
4344 			gtp_mask = item->mask;
4345 
4346 			if (!gtp_spec || !gtp_mask) {
4347 				rte_flow_error_set(error, EINVAL,
4348 						   RTE_FLOW_ERROR_TYPE_ITEM,
4349 						   item,
4350 						   "Invalid GTP item");
4351 				return -rte_errno;
4352 			}
4353 
4354 			if (gtp_mask->v_pt_rsv_flags ||
4355 			    gtp_mask->msg_type ||
4356 			    gtp_mask->msg_len ||
4357 			    gtp_mask->teid != UINT32_MAX) {
4358 				rte_flow_error_set(error, EINVAL,
4359 						   RTE_FLOW_ERROR_TYPE_ITEM,
4360 						   item,
4361 						   "Invalid GTP mask");
4362 				return -rte_errno;
4363 			}
4364 
4365 			if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
4366 				filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
4367 			else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
4368 				filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
4369 
4370 			filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
4371 
4372 			break;
4373 		default:
4374 			break;
4375 		}
4376 	}
4377 
4378 	return 0;
4379 }
4380 
4381 static int
i40e_flow_parse_gtp_filter(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error,union i40e_filter_t * filter)4382 i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
4383 			   const struct rte_flow_attr *attr,
4384 			   const struct rte_flow_item pattern[],
4385 			   const struct rte_flow_action actions[],
4386 			   struct rte_flow_error *error,
4387 			   union i40e_filter_t *filter)
4388 {
4389 	struct i40e_tunnel_filter_conf *tunnel_filter =
4390 		&filter->consistent_tunnel_filter;
4391 	int ret;
4392 
4393 	ret = i40e_flow_parse_gtp_pattern(dev, pattern,
4394 					  error, tunnel_filter);
4395 	if (ret)
4396 		return ret;
4397 
4398 	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4399 	if (ret)
4400 		return ret;
4401 
4402 	ret = i40e_flow_parse_attr(attr, error);
4403 	if (ret)
4404 		return ret;
4405 
4406 	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4407 
4408 	return ret;
4409 }
4410 
4411 /* 1. Last in item should be NULL as range is not supported.
4412  * 2. Supported filter types: QINQ.
4413  * 3. Mask of fields which need to be matched should be
4414  *    filled with 1.
4415  * 4. Mask of fields which needn't to be matched should be
4416  *    filled with 0.
4417  */
4418 static int
i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev * dev,const struct rte_flow_item * pattern,struct rte_flow_error * error,struct i40e_tunnel_filter_conf * filter)4419 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
4420 			      const struct rte_flow_item *pattern,
4421 			      struct rte_flow_error *error,
4422 			      struct i40e_tunnel_filter_conf *filter)
4423 {
4424 	const struct rte_flow_item *item = pattern;
4425 	const struct rte_flow_item_vlan *vlan_spec = NULL;
4426 	const struct rte_flow_item_vlan *vlan_mask = NULL;
4427 	const struct rte_flow_item_vlan *i_vlan_spec = NULL;
4428 	const struct rte_flow_item_vlan *i_vlan_mask = NULL;
4429 	const struct rte_flow_item_vlan *o_vlan_spec = NULL;
4430 	const struct rte_flow_item_vlan *o_vlan_mask = NULL;
4431 
4432 	enum rte_flow_item_type item_type;
4433 	bool vlan_flag = 0;
4434 
4435 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4436 		if (item->last) {
4437 			rte_flow_error_set(error, EINVAL,
4438 					   RTE_FLOW_ERROR_TYPE_ITEM,
4439 					   item,
4440 					   "Not support range");
4441 			return -rte_errno;
4442 		}
4443 		item_type = item->type;
4444 		switch (item_type) {
4445 		case RTE_FLOW_ITEM_TYPE_ETH:
4446 			if (item->spec || item->mask) {
4447 				rte_flow_error_set(error, EINVAL,
4448 						   RTE_FLOW_ERROR_TYPE_ITEM,
4449 						   item,
4450 						   "Invalid ETH item");
4451 				return -rte_errno;
4452 			}
4453 			break;
4454 		case RTE_FLOW_ITEM_TYPE_VLAN:
4455 			vlan_spec = item->spec;
4456 			vlan_mask = item->mask;
4457 
4458 			if (!(vlan_spec && vlan_mask) ||
4459 			    vlan_mask->inner_type) {
4460 				rte_flow_error_set(error, EINVAL,
4461 					   RTE_FLOW_ERROR_TYPE_ITEM,
4462 					   item,
4463 					   "Invalid vlan item");
4464 				return -rte_errno;
4465 			}
4466 
4467 			if (!vlan_flag) {
4468 				o_vlan_spec = vlan_spec;
4469 				o_vlan_mask = vlan_mask;
4470 				vlan_flag = 1;
4471 			} else {
4472 				i_vlan_spec = vlan_spec;
4473 				i_vlan_mask = vlan_mask;
4474 				vlan_flag = 0;
4475 			}
4476 			break;
4477 
4478 		default:
4479 			break;
4480 		}
4481 	}
4482 
4483 	/* Get filter specification */
4484 	if (o_vlan_mask != NULL &&  i_vlan_mask != NULL) {
4485 		filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci);
4486 		filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci);
4487 	} else {
4488 			rte_flow_error_set(error, EINVAL,
4489 					   RTE_FLOW_ERROR_TYPE_ITEM,
4490 					   NULL,
4491 					   "Invalid filter type");
4492 			return -rte_errno;
4493 	}
4494 
4495 	filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
4496 	return 0;
4497 }
4498 
4499 static int
i40e_flow_parse_qinq_filter(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error,union i40e_filter_t * filter)4500 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
4501 			      const struct rte_flow_attr *attr,
4502 			      const struct rte_flow_item pattern[],
4503 			      const struct rte_flow_action actions[],
4504 			      struct rte_flow_error *error,
4505 			      union i40e_filter_t *filter)
4506 {
4507 	struct i40e_tunnel_filter_conf *tunnel_filter =
4508 		&filter->consistent_tunnel_filter;
4509 	int ret;
4510 
4511 	ret = i40e_flow_parse_qinq_pattern(dev, pattern,
4512 					     error, tunnel_filter);
4513 	if (ret)
4514 		return ret;
4515 
4516 	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4517 	if (ret)
4518 		return ret;
4519 
4520 	ret = i40e_flow_parse_attr(attr, error);
4521 	if (ret)
4522 		return ret;
4523 
4524 	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4525 
4526 	return ret;
4527 }
4528 
4529 static int
i40e_flow_validate(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)4530 i40e_flow_validate(struct rte_eth_dev *dev,
4531 		   const struct rte_flow_attr *attr,
4532 		   const struct rte_flow_item pattern[],
4533 		   const struct rte_flow_action actions[],
4534 		   struct rte_flow_error *error)
4535 {
4536 	struct rte_flow_item *items; /* internal pattern w/o VOID items */
4537 	parse_filter_t parse_filter;
4538 	uint32_t item_num = 0; /* non-void item number of pattern*/
4539 	uint32_t i = 0;
4540 	bool flag = false;
4541 	int ret = I40E_NOT_SUPPORTED;
4542 
4543 	if (!pattern) {
4544 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
4545 				   NULL, "NULL pattern.");
4546 		return -rte_errno;
4547 	}
4548 
4549 	if (!actions) {
4550 		rte_flow_error_set(error, EINVAL,
4551 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
4552 				   NULL, "NULL action.");
4553 		return -rte_errno;
4554 	}
4555 
4556 	if (!attr) {
4557 		rte_flow_error_set(error, EINVAL,
4558 				   RTE_FLOW_ERROR_TYPE_ATTR,
4559 				   NULL, "NULL attribute.");
4560 		return -rte_errno;
4561 	}
4562 	memset(&cons_filter, 0, sizeof(cons_filter));
4563 
4564 	/* Get the non-void item of action */
4565 	while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
4566 		i++;
4567 
4568 	if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
4569 		ret = i40e_flow_parse_attr(attr, error);
4570 		if (ret)
4571 			return ret;
4572 
4573 		cons_filter_type = RTE_ETH_FILTER_HASH;
4574 		return i40e_hash_parse(dev, pattern, actions + i,
4575 				       &cons_filter.rss_conf, error);
4576 	}
4577 
4578 	i = 0;
4579 	/* Get the non-void item number of pattern */
4580 	while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
4581 		if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
4582 			item_num++;
4583 		i++;
4584 	}
4585 	item_num++;
4586 
4587 	if (item_num <= ARRAY_SIZE(g_items)) {
4588 		items = g_items;
4589 	} else {
4590 		items = rte_zmalloc("i40e_pattern",
4591 				    item_num * sizeof(struct rte_flow_item), 0);
4592 		if (!items) {
4593 			rte_flow_error_set(error, ENOMEM,
4594 					RTE_FLOW_ERROR_TYPE_ITEM_NUM,
4595 					NULL,
4596 					"No memory for PMD internal items.");
4597 			return -ENOMEM;
4598 		}
4599 	}
4600 
4601 	i40e_pattern_skip_void_item(items, pattern);
4602 
4603 	i = 0;
4604 	do {
4605 		parse_filter = i40e_find_parse_filter_func(items, &i);
4606 		if (!parse_filter && !flag) {
4607 			rte_flow_error_set(error, EINVAL,
4608 					   RTE_FLOW_ERROR_TYPE_ITEM,
4609 					   pattern, "Unsupported pattern");
4610 
4611 			if (items != g_items)
4612 				rte_free(items);
4613 			return -rte_errno;
4614 		}
4615 
4616 		if (parse_filter)
4617 			ret = parse_filter(dev, attr, items, actions,
4618 					   error, &cons_filter);
4619 
4620 		flag = true;
4621 	} while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
4622 
4623 	if (items != g_items)
4624 		rte_free(items);
4625 
4626 	return ret;
4627 }
4628 
4629 static struct rte_flow *
i40e_flow_create(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)4630 i40e_flow_create(struct rte_eth_dev *dev,
4631 		 const struct rte_flow_attr *attr,
4632 		 const struct rte_flow_item pattern[],
4633 		 const struct rte_flow_action actions[],
4634 		 struct rte_flow_error *error)
4635 {
4636 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4637 	struct rte_flow *flow = NULL;
4638 	struct i40e_fdir_info *fdir_info = &pf->fdir;
4639 	int ret;
4640 
4641 	ret = i40e_flow_validate(dev, attr, pattern, actions, error);
4642 	if (ret < 0)
4643 		return NULL;
4644 
4645 	if (cons_filter_type == RTE_ETH_FILTER_FDIR) {
4646 		flow = i40e_fdir_entry_pool_get(fdir_info);
4647 		if (flow == NULL) {
4648 			rte_flow_error_set(error, ENOBUFS,
4649 			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4650 			   "Fdir space full");
4651 
4652 			return flow;
4653 		}
4654 	} else {
4655 		flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
4656 		if (!flow) {
4657 			rte_flow_error_set(error, ENOMEM,
4658 					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4659 					   "Failed to allocate memory");
4660 			return flow;
4661 		}
4662 	}
4663 
4664 	switch (cons_filter_type) {
4665 	case RTE_ETH_FILTER_ETHERTYPE:
4666 		ret = i40e_ethertype_filter_set(pf,
4667 					&cons_filter.ethertype_filter, 1);
4668 		if (ret)
4669 			goto free_flow;
4670 		flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
4671 					i40e_ethertype_filter_list);
4672 		break;
4673 	case RTE_ETH_FILTER_FDIR:
4674 		ret = i40e_flow_add_del_fdir_filter(dev,
4675 			       &cons_filter.fdir_filter, 1);
4676 		if (ret)
4677 			goto free_flow;
4678 		flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
4679 					i40e_fdir_filter_list);
4680 		break;
4681 	case RTE_ETH_FILTER_TUNNEL:
4682 		ret = i40e_dev_consistent_tunnel_filter_set(pf,
4683 			    &cons_filter.consistent_tunnel_filter, 1);
4684 		if (ret)
4685 			goto free_flow;
4686 		flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
4687 					i40e_tunnel_filter_list);
4688 		break;
4689 	case RTE_ETH_FILTER_HASH:
4690 		ret = i40e_hash_filter_create(pf, &cons_filter.rss_conf);
4691 		if (ret)
4692 			goto free_flow;
4693 		flow->rule = TAILQ_LAST(&pf->rss_config_list,
4694 					i40e_rss_conf_list);
4695 		break;
4696 	default:
4697 		goto free_flow;
4698 	}
4699 
4700 	flow->filter_type = cons_filter_type;
4701 	TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
4702 	return flow;
4703 
4704 free_flow:
4705 	rte_flow_error_set(error, -ret,
4706 			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4707 			   "Failed to create flow.");
4708 
4709 	if (cons_filter_type != RTE_ETH_FILTER_FDIR)
4710 		rte_free(flow);
4711 	else
4712 		i40e_fdir_entry_pool_put(fdir_info, flow);
4713 
4714 	return NULL;
4715 }
4716 
4717 static int
i40e_flow_destroy(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_error * error)4718 i40e_flow_destroy(struct rte_eth_dev *dev,
4719 		  struct rte_flow *flow,
4720 		  struct rte_flow_error *error)
4721 {
4722 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4723 	enum rte_filter_type filter_type = flow->filter_type;
4724 	struct i40e_fdir_info *fdir_info = &pf->fdir;
4725 	int ret = 0;
4726 
4727 	switch (filter_type) {
4728 	case RTE_ETH_FILTER_ETHERTYPE:
4729 		ret = i40e_flow_destroy_ethertype_filter(pf,
4730 			 (struct i40e_ethertype_filter *)flow->rule);
4731 		break;
4732 	case RTE_ETH_FILTER_TUNNEL:
4733 		ret = i40e_flow_destroy_tunnel_filter(pf,
4734 			      (struct i40e_tunnel_filter *)flow->rule);
4735 		break;
4736 	case RTE_ETH_FILTER_FDIR:
4737 		ret = i40e_flow_add_del_fdir_filter(dev,
4738 				&((struct i40e_fdir_filter *)flow->rule)->fdir,
4739 				0);
4740 
4741 		/* If the last flow is destroyed, disable fdir. */
4742 		if (!ret && TAILQ_EMPTY(&pf->fdir.fdir_list)) {
4743 			i40e_fdir_rx_proc_enable(dev, 0);
4744 		}
4745 		break;
4746 	case RTE_ETH_FILTER_HASH:
4747 		ret = i40e_hash_filter_destroy(pf, flow->rule);
4748 		break;
4749 	default:
4750 		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4751 			    filter_type);
4752 		ret = -EINVAL;
4753 		break;
4754 	}
4755 
4756 	if (!ret) {
4757 		TAILQ_REMOVE(&pf->flow_list, flow, node);
4758 		if (filter_type == RTE_ETH_FILTER_FDIR)
4759 			i40e_fdir_entry_pool_put(fdir_info, flow);
4760 		else
4761 			rte_free(flow);
4762 
4763 	} else
4764 		rte_flow_error_set(error, -ret,
4765 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4766 				   "Failed to destroy flow.");
4767 
4768 	return ret;
4769 }
4770 
4771 static int
i40e_flow_destroy_ethertype_filter(struct i40e_pf * pf,struct i40e_ethertype_filter * filter)4772 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
4773 				   struct i40e_ethertype_filter *filter)
4774 {
4775 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4776 	struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
4777 	struct i40e_ethertype_filter *node;
4778 	struct i40e_control_filter_stats stats;
4779 	uint16_t flags = 0;
4780 	int ret = 0;
4781 
4782 	if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
4783 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
4784 	if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
4785 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
4786 	flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
4787 
4788 	memset(&stats, 0, sizeof(stats));
4789 	ret = i40e_aq_add_rem_control_packet_filter(hw,
4790 				    filter->input.mac_addr.addr_bytes,
4791 				    filter->input.ether_type,
4792 				    flags, pf->main_vsi->seid,
4793 				    filter->queue, 0, &stats, NULL);
4794 	if (ret < 0)
4795 		return ret;
4796 
4797 	node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
4798 	if (!node)
4799 		return -EINVAL;
4800 
4801 	ret = i40e_sw_ethertype_filter_del(pf, &node->input);
4802 
4803 	return ret;
4804 }
4805 
4806 static int
i40e_flow_destroy_tunnel_filter(struct i40e_pf * pf,struct i40e_tunnel_filter * filter)4807 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
4808 				struct i40e_tunnel_filter *filter)
4809 {
4810 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4811 	struct i40e_vsi *vsi;
4812 	struct i40e_pf_vf *vf;
4813 	struct i40e_aqc_cloud_filters_element_bb cld_filter;
4814 	struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
4815 	struct i40e_tunnel_filter *node;
4816 	bool big_buffer = 0;
4817 	int ret = 0;
4818 
4819 	memset(&cld_filter, 0, sizeof(cld_filter));
4820 	rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.outer_mac,
4821 			(struct rte_ether_addr *)&cld_filter.element.outer_mac);
4822 	rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.inner_mac,
4823 			(struct rte_ether_addr *)&cld_filter.element.inner_mac);
4824 	cld_filter.element.inner_vlan = filter->input.inner_vlan;
4825 	cld_filter.element.flags = filter->input.flags;
4826 	cld_filter.element.tenant_id = filter->input.tenant_id;
4827 	cld_filter.element.queue_number = filter->queue;
4828 	rte_memcpy(cld_filter.general_fields,
4829 		   filter->input.general_fields,
4830 		   sizeof(cld_filter.general_fields));
4831 
4832 	if (!filter->is_to_vf)
4833 		vsi = pf->main_vsi;
4834 	else {
4835 		vf = &pf->vfs[filter->vf_id];
4836 		vsi = vf->vsi;
4837 	}
4838 
4839 	if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
4840 	    I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
4841 	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
4842 	    I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
4843 	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
4844 	    I40E_AQC_ADD_CLOUD_FILTER_0X10))
4845 		big_buffer = 1;
4846 
4847 	if (big_buffer)
4848 		ret = i40e_aq_rem_cloud_filters_bb(hw, vsi->seid,
4849 						&cld_filter, 1);
4850 	else
4851 		ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
4852 						&cld_filter.element, 1);
4853 	if (ret < 0)
4854 		return -ENOTSUP;
4855 
4856 	node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
4857 	if (!node)
4858 		return -EINVAL;
4859 
4860 	ret = i40e_sw_tunnel_filter_del(pf, &node->input);
4861 
4862 	return ret;
4863 }
4864 
4865 static int
i40e_flow_flush(struct rte_eth_dev * dev,struct rte_flow_error * error)4866 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
4867 {
4868 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4869 	int ret;
4870 
4871 	ret = i40e_flow_flush_fdir_filter(pf);
4872 	if (ret) {
4873 		rte_flow_error_set(error, -ret,
4874 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4875 				   "Failed to flush FDIR flows.");
4876 		return -rte_errno;
4877 	}
4878 
4879 	ret = i40e_flow_flush_ethertype_filter(pf);
4880 	if (ret) {
4881 		rte_flow_error_set(error, -ret,
4882 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4883 				   "Failed to ethertype flush flows.");
4884 		return -rte_errno;
4885 	}
4886 
4887 	ret = i40e_flow_flush_tunnel_filter(pf);
4888 	if (ret) {
4889 		rte_flow_error_set(error, -ret,
4890 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4891 				   "Failed to flush tunnel flows.");
4892 		return -rte_errno;
4893 	}
4894 
4895 	ret = i40e_hash_filter_flush(pf);
4896 	if (ret)
4897 		rte_flow_error_set(error, -ret,
4898 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4899 				   "Failed to flush RSS flows.");
4900 	return ret;
4901 }
4902 
4903 static int
i40e_flow_flush_fdir_filter(struct i40e_pf * pf)4904 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
4905 {
4906 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
4907 	struct i40e_fdir_info *fdir_info = &pf->fdir;
4908 	struct i40e_fdir_filter *fdir_filter;
4909 	enum i40e_filter_pctype pctype;
4910 	struct rte_flow *flow;
4911 	void *temp;
4912 	int ret;
4913 	uint32_t i = 0;
4914 
4915 	ret = i40e_fdir_flush(dev);
4916 	if (!ret) {
4917 		/* Delete FDIR filters in FDIR list. */
4918 		while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
4919 			ret = i40e_sw_fdir_filter_del(pf,
4920 						      &fdir_filter->fdir.input);
4921 			if (ret < 0)
4922 				return ret;
4923 		}
4924 
4925 		/* Delete FDIR flows in flow list. */
4926 		RTE_TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4927 			if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
4928 				TAILQ_REMOVE(&pf->flow_list, flow, node);
4929 			}
4930 		}
4931 
4932 		/* reset bitmap */
4933 		rte_bitmap_reset(fdir_info->fdir_flow_pool.bitmap);
4934 		for (i = 0; i < fdir_info->fdir_space_size; i++) {
4935 			fdir_info->fdir_flow_pool.pool[i].idx = i;
4936 			rte_bitmap_set(fdir_info->fdir_flow_pool.bitmap, i);
4937 		}
4938 
4939 		fdir_info->fdir_actual_cnt = 0;
4940 		fdir_info->fdir_guarantee_free_space =
4941 			fdir_info->fdir_guarantee_total_space;
4942 		memset(fdir_info->fdir_filter_array,
4943 			0,
4944 			sizeof(struct i40e_fdir_filter) *
4945 			I40E_MAX_FDIR_FILTER_NUM);
4946 
4947 		for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
4948 		     pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
4949 			pf->fdir.flow_count[pctype] = 0;
4950 			pf->fdir.flex_mask_flag[pctype] = 0;
4951 		}
4952 
4953 		for (i = 0; i < I40E_MAX_FLXPLD_LAYER; i++)
4954 			pf->fdir.flex_pit_flag[i] = 0;
4955 
4956 		/* Disable FDIR processing as all FDIR rules are now flushed */
4957 		i40e_fdir_rx_proc_enable(dev, 0);
4958 	}
4959 
4960 	return ret;
4961 }
4962 
4963 /* Flush all ethertype filters */
4964 static int
i40e_flow_flush_ethertype_filter(struct i40e_pf * pf)4965 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
4966 {
4967 	struct i40e_ethertype_filter_list
4968 		*ethertype_list = &pf->ethertype.ethertype_list;
4969 	struct i40e_ethertype_filter *filter;
4970 	struct rte_flow *flow;
4971 	void *temp;
4972 	int ret = 0;
4973 
4974 	while ((filter = TAILQ_FIRST(ethertype_list))) {
4975 		ret = i40e_flow_destroy_ethertype_filter(pf, filter);
4976 		if (ret)
4977 			return ret;
4978 	}
4979 
4980 	/* Delete ethertype flows in flow list. */
4981 	RTE_TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4982 		if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
4983 			TAILQ_REMOVE(&pf->flow_list, flow, node);
4984 			rte_free(flow);
4985 		}
4986 	}
4987 
4988 	return ret;
4989 }
4990 
4991 /* Flush all tunnel filters */
4992 static int
i40e_flow_flush_tunnel_filter(struct i40e_pf * pf)4993 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
4994 {
4995 	struct i40e_tunnel_filter_list
4996 		*tunnel_list = &pf->tunnel.tunnel_list;
4997 	struct i40e_tunnel_filter *filter;
4998 	struct rte_flow *flow;
4999 	void *temp;
5000 	int ret = 0;
5001 
5002 	while ((filter = TAILQ_FIRST(tunnel_list))) {
5003 		ret = i40e_flow_destroy_tunnel_filter(pf, filter);
5004 		if (ret)
5005 			return ret;
5006 	}
5007 
5008 	/* Delete tunnel flows in flow list. */
5009 	RTE_TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5010 		if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
5011 			TAILQ_REMOVE(&pf->flow_list, flow, node);
5012 			rte_free(flow);
5013 		}
5014 	}
5015 
5016 	return ret;
5017 }
5018 
5019 static int
i40e_flow_query(struct rte_eth_dev * dev __rte_unused,struct rte_flow * flow,const struct rte_flow_action * actions,void * data,struct rte_flow_error * error)5020 i40e_flow_query(struct rte_eth_dev *dev __rte_unused,
5021 		struct rte_flow *flow,
5022 		const struct rte_flow_action *actions,
5023 		void *data, struct rte_flow_error *error)
5024 {
5025 	struct i40e_rss_filter *rss_rule = (struct i40e_rss_filter *)flow->rule;
5026 	enum rte_filter_type filter_type = flow->filter_type;
5027 	struct rte_flow_action_rss *rss_conf = data;
5028 
5029 	if (!rss_rule) {
5030 		rte_flow_error_set(error, EINVAL,
5031 				   RTE_FLOW_ERROR_TYPE_HANDLE,
5032 				   NULL, "Invalid rule");
5033 		return -rte_errno;
5034 	}
5035 
5036 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5037 		switch (actions->type) {
5038 		case RTE_FLOW_ACTION_TYPE_VOID:
5039 			break;
5040 		case RTE_FLOW_ACTION_TYPE_RSS:
5041 			if (filter_type != RTE_ETH_FILTER_HASH) {
5042 				rte_flow_error_set(error, ENOTSUP,
5043 						   RTE_FLOW_ERROR_TYPE_ACTION,
5044 						   actions,
5045 						   "action not supported");
5046 				return -rte_errno;
5047 			}
5048 			rte_memcpy(rss_conf,
5049 				   &rss_rule->rss_filter_info.conf,
5050 				   sizeof(struct rte_flow_action_rss));
5051 			break;
5052 		default:
5053 			return rte_flow_error_set(error, ENOTSUP,
5054 						  RTE_FLOW_ERROR_TYPE_ACTION,
5055 						  actions,
5056 						  "action not supported");
5057 		}
5058 	}
5059 
5060 	return 0;
5061 }
5062