xref: /dpdk/drivers/net/i40e/i40e_flow.c (revision 29fd052d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_debug.h>
14 #include <rte_ether.h>
15 #include <ethdev_driver.h>
16 #include <rte_log.h>
17 #include <rte_malloc.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_bitmap.h>
21 
22 #include "i40e_logs.h"
23 #include "base/i40e_type.h"
24 #include "base/i40e_prototype.h"
25 #include "i40e_ethdev.h"
26 #include "i40e_hash.h"
27 
28 #define I40E_IPV6_TC_MASK	(0xFF << I40E_FDIR_IPv6_TC_OFFSET)
29 #define I40E_IPV6_FRAG_HEADER	44
30 #define I40E_TENANT_ARRAY_NUM	3
31 #define I40E_VLAN_TCI_MASK	0xFFFF
32 #define I40E_VLAN_PRI_MASK	0xE000
33 #define I40E_VLAN_CFI_MASK	0x1000
34 #define I40E_VLAN_VID_MASK	0x0FFF
35 
36 static int i40e_flow_validate(struct rte_eth_dev *dev,
37 			      const struct rte_flow_attr *attr,
38 			      const struct rte_flow_item pattern[],
39 			      const struct rte_flow_action actions[],
40 			      struct rte_flow_error *error);
41 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
42 					 const struct rte_flow_attr *attr,
43 					 const struct rte_flow_item pattern[],
44 					 const struct rte_flow_action actions[],
45 					 struct rte_flow_error *error);
46 static int i40e_flow_destroy(struct rte_eth_dev *dev,
47 			     struct rte_flow *flow,
48 			     struct rte_flow_error *error);
49 static int i40e_flow_flush(struct rte_eth_dev *dev,
50 			   struct rte_flow_error *error);
51 static int i40e_flow_query(struct rte_eth_dev *dev,
52 			   struct rte_flow *flow,
53 			   const struct rte_flow_action *actions,
54 			   void *data, struct rte_flow_error *error);
55 static int
56 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
57 				  const struct rte_flow_item *pattern,
58 				  struct rte_flow_error *error,
59 				  struct rte_eth_ethertype_filter *filter);
60 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
61 				    const struct rte_flow_action *actions,
62 				    struct rte_flow_error *error,
63 				    struct rte_eth_ethertype_filter *filter);
64 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
65 					const struct rte_flow_attr *attr,
66 					const struct rte_flow_item *pattern,
67 					struct rte_flow_error *error,
68 					struct i40e_fdir_filter_conf *filter);
69 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
70 				       const struct rte_flow_action *actions,
71 				       struct rte_flow_error *error,
72 				       struct i40e_fdir_filter_conf *filter);
73 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
74 				 const struct rte_flow_action *actions,
75 				 struct rte_flow_error *error,
76 				 struct i40e_tunnel_filter_conf *filter);
77 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
78 				struct rte_flow_error *error);
79 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
80 				    const struct rte_flow_attr *attr,
81 				    const struct rte_flow_item pattern[],
82 				    const struct rte_flow_action actions[],
83 				    struct rte_flow_error *error,
84 				    union i40e_filter_t *filter);
85 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
86 				       const struct rte_flow_attr *attr,
87 				       const struct rte_flow_item pattern[],
88 				       const struct rte_flow_action actions[],
89 				       struct rte_flow_error *error,
90 				       union i40e_filter_t *filter);
91 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
92 					const struct rte_flow_attr *attr,
93 					const struct rte_flow_item pattern[],
94 					const struct rte_flow_action actions[],
95 					struct rte_flow_error *error,
96 					union i40e_filter_t *filter);
97 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
98 					const struct rte_flow_attr *attr,
99 					const struct rte_flow_item pattern[],
100 					const struct rte_flow_action actions[],
101 					struct rte_flow_error *error,
102 					union i40e_filter_t *filter);
103 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
104 				       const struct rte_flow_attr *attr,
105 				       const struct rte_flow_item pattern[],
106 				       const struct rte_flow_action actions[],
107 				       struct rte_flow_error *error,
108 				       union i40e_filter_t *filter);
109 static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
110 				      const struct rte_flow_attr *attr,
111 				      const struct rte_flow_item pattern[],
112 				      const struct rte_flow_action actions[],
113 				      struct rte_flow_error *error,
114 				      union i40e_filter_t *filter);
115 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
116 				      struct i40e_ethertype_filter *filter);
117 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
118 					   struct i40e_tunnel_filter *filter);
119 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
120 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
121 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
122 static int
123 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
124 			      const struct rte_flow_attr *attr,
125 			      const struct rte_flow_item pattern[],
126 			      const struct rte_flow_action actions[],
127 			      struct rte_flow_error *error,
128 			      union i40e_filter_t *filter);
129 static int
130 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
131 			      const struct rte_flow_item *pattern,
132 			      struct rte_flow_error *error,
133 			      struct i40e_tunnel_filter_conf *filter);
134 
135 static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
136 					   const struct rte_flow_attr *attr,
137 					   const struct rte_flow_item pattern[],
138 					   const struct rte_flow_action actions[],
139 					   struct rte_flow_error *error,
140 					   union i40e_filter_t *filter);
141 const struct rte_flow_ops i40e_flow_ops = {
142 	.validate = i40e_flow_validate,
143 	.create = i40e_flow_create,
144 	.destroy = i40e_flow_destroy,
145 	.flush = i40e_flow_flush,
146 	.query = i40e_flow_query,
147 };
148 
149 static union i40e_filter_t cons_filter;
150 static enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
151 /* internal pattern w/o VOID items */
152 struct rte_flow_item g_items[32];
153 
154 /* Pattern matched ethertype filter */
155 static enum rte_flow_item_type pattern_ethertype[] = {
156 	RTE_FLOW_ITEM_TYPE_ETH,
157 	RTE_FLOW_ITEM_TYPE_END,
158 };
159 
160 /* Pattern matched flow director filter */
161 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
162 	RTE_FLOW_ITEM_TYPE_ETH,
163 	RTE_FLOW_ITEM_TYPE_IPV4,
164 	RTE_FLOW_ITEM_TYPE_END,
165 };
166 
167 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
168 	RTE_FLOW_ITEM_TYPE_ETH,
169 	RTE_FLOW_ITEM_TYPE_IPV4,
170 	RTE_FLOW_ITEM_TYPE_UDP,
171 	RTE_FLOW_ITEM_TYPE_END,
172 };
173 
174 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
175 	RTE_FLOW_ITEM_TYPE_ETH,
176 	RTE_FLOW_ITEM_TYPE_IPV4,
177 	RTE_FLOW_ITEM_TYPE_TCP,
178 	RTE_FLOW_ITEM_TYPE_END,
179 };
180 
181 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
182 	RTE_FLOW_ITEM_TYPE_ETH,
183 	RTE_FLOW_ITEM_TYPE_IPV4,
184 	RTE_FLOW_ITEM_TYPE_SCTP,
185 	RTE_FLOW_ITEM_TYPE_END,
186 };
187 
188 static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
189 	RTE_FLOW_ITEM_TYPE_ETH,
190 	RTE_FLOW_ITEM_TYPE_IPV4,
191 	RTE_FLOW_ITEM_TYPE_UDP,
192 	RTE_FLOW_ITEM_TYPE_GTPC,
193 	RTE_FLOW_ITEM_TYPE_END,
194 };
195 
196 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
197 	RTE_FLOW_ITEM_TYPE_ETH,
198 	RTE_FLOW_ITEM_TYPE_IPV4,
199 	RTE_FLOW_ITEM_TYPE_UDP,
200 	RTE_FLOW_ITEM_TYPE_GTPU,
201 	RTE_FLOW_ITEM_TYPE_END,
202 };
203 
204 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
205 	RTE_FLOW_ITEM_TYPE_ETH,
206 	RTE_FLOW_ITEM_TYPE_IPV4,
207 	RTE_FLOW_ITEM_TYPE_UDP,
208 	RTE_FLOW_ITEM_TYPE_GTPU,
209 	RTE_FLOW_ITEM_TYPE_IPV4,
210 	RTE_FLOW_ITEM_TYPE_END,
211 };
212 
213 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
214 	RTE_FLOW_ITEM_TYPE_ETH,
215 	RTE_FLOW_ITEM_TYPE_IPV4,
216 	RTE_FLOW_ITEM_TYPE_UDP,
217 	RTE_FLOW_ITEM_TYPE_GTPU,
218 	RTE_FLOW_ITEM_TYPE_IPV6,
219 	RTE_FLOW_ITEM_TYPE_END,
220 };
221 
222 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
223 	RTE_FLOW_ITEM_TYPE_ETH,
224 	RTE_FLOW_ITEM_TYPE_IPV6,
225 	RTE_FLOW_ITEM_TYPE_END,
226 };
227 
228 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
229 	RTE_FLOW_ITEM_TYPE_ETH,
230 	RTE_FLOW_ITEM_TYPE_IPV6,
231 	RTE_FLOW_ITEM_TYPE_UDP,
232 	RTE_FLOW_ITEM_TYPE_END,
233 };
234 
235 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
236 	RTE_FLOW_ITEM_TYPE_ETH,
237 	RTE_FLOW_ITEM_TYPE_IPV6,
238 	RTE_FLOW_ITEM_TYPE_TCP,
239 	RTE_FLOW_ITEM_TYPE_END,
240 };
241 
242 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
243 	RTE_FLOW_ITEM_TYPE_ETH,
244 	RTE_FLOW_ITEM_TYPE_IPV6,
245 	RTE_FLOW_ITEM_TYPE_SCTP,
246 	RTE_FLOW_ITEM_TYPE_END,
247 };
248 
249 static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
250 	RTE_FLOW_ITEM_TYPE_ETH,
251 	RTE_FLOW_ITEM_TYPE_IPV6,
252 	RTE_FLOW_ITEM_TYPE_UDP,
253 	RTE_FLOW_ITEM_TYPE_GTPC,
254 	RTE_FLOW_ITEM_TYPE_END,
255 };
256 
257 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
258 	RTE_FLOW_ITEM_TYPE_ETH,
259 	RTE_FLOW_ITEM_TYPE_IPV6,
260 	RTE_FLOW_ITEM_TYPE_UDP,
261 	RTE_FLOW_ITEM_TYPE_GTPU,
262 	RTE_FLOW_ITEM_TYPE_END,
263 };
264 
265 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
266 	RTE_FLOW_ITEM_TYPE_ETH,
267 	RTE_FLOW_ITEM_TYPE_IPV6,
268 	RTE_FLOW_ITEM_TYPE_UDP,
269 	RTE_FLOW_ITEM_TYPE_GTPU,
270 	RTE_FLOW_ITEM_TYPE_IPV4,
271 	RTE_FLOW_ITEM_TYPE_END,
272 };
273 
274 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
275 	RTE_FLOW_ITEM_TYPE_ETH,
276 	RTE_FLOW_ITEM_TYPE_IPV6,
277 	RTE_FLOW_ITEM_TYPE_UDP,
278 	RTE_FLOW_ITEM_TYPE_GTPU,
279 	RTE_FLOW_ITEM_TYPE_IPV6,
280 	RTE_FLOW_ITEM_TYPE_END,
281 };
282 
283 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
284 	RTE_FLOW_ITEM_TYPE_ETH,
285 	RTE_FLOW_ITEM_TYPE_RAW,
286 	RTE_FLOW_ITEM_TYPE_END,
287 };
288 
289 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
290 	RTE_FLOW_ITEM_TYPE_ETH,
291 	RTE_FLOW_ITEM_TYPE_RAW,
292 	RTE_FLOW_ITEM_TYPE_RAW,
293 	RTE_FLOW_ITEM_TYPE_END,
294 };
295 
296 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
297 	RTE_FLOW_ITEM_TYPE_ETH,
298 	RTE_FLOW_ITEM_TYPE_RAW,
299 	RTE_FLOW_ITEM_TYPE_RAW,
300 	RTE_FLOW_ITEM_TYPE_RAW,
301 	RTE_FLOW_ITEM_TYPE_END,
302 };
303 
304 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
305 	RTE_FLOW_ITEM_TYPE_ETH,
306 	RTE_FLOW_ITEM_TYPE_IPV4,
307 	RTE_FLOW_ITEM_TYPE_RAW,
308 	RTE_FLOW_ITEM_TYPE_END,
309 };
310 
311 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
312 	RTE_FLOW_ITEM_TYPE_ETH,
313 	RTE_FLOW_ITEM_TYPE_IPV4,
314 	RTE_FLOW_ITEM_TYPE_RAW,
315 	RTE_FLOW_ITEM_TYPE_RAW,
316 	RTE_FLOW_ITEM_TYPE_END,
317 };
318 
319 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
320 	RTE_FLOW_ITEM_TYPE_ETH,
321 	RTE_FLOW_ITEM_TYPE_IPV4,
322 	RTE_FLOW_ITEM_TYPE_RAW,
323 	RTE_FLOW_ITEM_TYPE_RAW,
324 	RTE_FLOW_ITEM_TYPE_RAW,
325 	RTE_FLOW_ITEM_TYPE_END,
326 };
327 
328 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
329 	RTE_FLOW_ITEM_TYPE_ETH,
330 	RTE_FLOW_ITEM_TYPE_IPV4,
331 	RTE_FLOW_ITEM_TYPE_UDP,
332 	RTE_FLOW_ITEM_TYPE_RAW,
333 	RTE_FLOW_ITEM_TYPE_END,
334 };
335 
336 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
337 	RTE_FLOW_ITEM_TYPE_ETH,
338 	RTE_FLOW_ITEM_TYPE_IPV4,
339 	RTE_FLOW_ITEM_TYPE_UDP,
340 	RTE_FLOW_ITEM_TYPE_RAW,
341 	RTE_FLOW_ITEM_TYPE_RAW,
342 	RTE_FLOW_ITEM_TYPE_END,
343 };
344 
345 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
346 	RTE_FLOW_ITEM_TYPE_ETH,
347 	RTE_FLOW_ITEM_TYPE_IPV4,
348 	RTE_FLOW_ITEM_TYPE_UDP,
349 	RTE_FLOW_ITEM_TYPE_RAW,
350 	RTE_FLOW_ITEM_TYPE_RAW,
351 	RTE_FLOW_ITEM_TYPE_RAW,
352 	RTE_FLOW_ITEM_TYPE_END,
353 };
354 
355 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
356 	RTE_FLOW_ITEM_TYPE_ETH,
357 	RTE_FLOW_ITEM_TYPE_IPV4,
358 	RTE_FLOW_ITEM_TYPE_TCP,
359 	RTE_FLOW_ITEM_TYPE_RAW,
360 	RTE_FLOW_ITEM_TYPE_END,
361 };
362 
363 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
364 	RTE_FLOW_ITEM_TYPE_ETH,
365 	RTE_FLOW_ITEM_TYPE_IPV4,
366 	RTE_FLOW_ITEM_TYPE_TCP,
367 	RTE_FLOW_ITEM_TYPE_RAW,
368 	RTE_FLOW_ITEM_TYPE_RAW,
369 	RTE_FLOW_ITEM_TYPE_END,
370 };
371 
372 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
373 	RTE_FLOW_ITEM_TYPE_ETH,
374 	RTE_FLOW_ITEM_TYPE_IPV4,
375 	RTE_FLOW_ITEM_TYPE_TCP,
376 	RTE_FLOW_ITEM_TYPE_RAW,
377 	RTE_FLOW_ITEM_TYPE_RAW,
378 	RTE_FLOW_ITEM_TYPE_RAW,
379 	RTE_FLOW_ITEM_TYPE_END,
380 };
381 
382 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
383 	RTE_FLOW_ITEM_TYPE_ETH,
384 	RTE_FLOW_ITEM_TYPE_IPV4,
385 	RTE_FLOW_ITEM_TYPE_SCTP,
386 	RTE_FLOW_ITEM_TYPE_RAW,
387 	RTE_FLOW_ITEM_TYPE_END,
388 };
389 
390 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
391 	RTE_FLOW_ITEM_TYPE_ETH,
392 	RTE_FLOW_ITEM_TYPE_IPV4,
393 	RTE_FLOW_ITEM_TYPE_SCTP,
394 	RTE_FLOW_ITEM_TYPE_RAW,
395 	RTE_FLOW_ITEM_TYPE_RAW,
396 	RTE_FLOW_ITEM_TYPE_END,
397 };
398 
399 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
400 	RTE_FLOW_ITEM_TYPE_ETH,
401 	RTE_FLOW_ITEM_TYPE_IPV4,
402 	RTE_FLOW_ITEM_TYPE_SCTP,
403 	RTE_FLOW_ITEM_TYPE_RAW,
404 	RTE_FLOW_ITEM_TYPE_RAW,
405 	RTE_FLOW_ITEM_TYPE_RAW,
406 	RTE_FLOW_ITEM_TYPE_END,
407 };
408 
409 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
410 	RTE_FLOW_ITEM_TYPE_ETH,
411 	RTE_FLOW_ITEM_TYPE_IPV6,
412 	RTE_FLOW_ITEM_TYPE_RAW,
413 	RTE_FLOW_ITEM_TYPE_END,
414 };
415 
416 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
417 	RTE_FLOW_ITEM_TYPE_ETH,
418 	RTE_FLOW_ITEM_TYPE_IPV6,
419 	RTE_FLOW_ITEM_TYPE_RAW,
420 	RTE_FLOW_ITEM_TYPE_RAW,
421 	RTE_FLOW_ITEM_TYPE_END,
422 };
423 
424 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
425 	RTE_FLOW_ITEM_TYPE_ETH,
426 	RTE_FLOW_ITEM_TYPE_IPV6,
427 	RTE_FLOW_ITEM_TYPE_RAW,
428 	RTE_FLOW_ITEM_TYPE_RAW,
429 	RTE_FLOW_ITEM_TYPE_RAW,
430 	RTE_FLOW_ITEM_TYPE_END,
431 };
432 
433 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
434 	RTE_FLOW_ITEM_TYPE_ETH,
435 	RTE_FLOW_ITEM_TYPE_IPV6,
436 	RTE_FLOW_ITEM_TYPE_UDP,
437 	RTE_FLOW_ITEM_TYPE_RAW,
438 	RTE_FLOW_ITEM_TYPE_END,
439 };
440 
441 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
442 	RTE_FLOW_ITEM_TYPE_ETH,
443 	RTE_FLOW_ITEM_TYPE_IPV6,
444 	RTE_FLOW_ITEM_TYPE_UDP,
445 	RTE_FLOW_ITEM_TYPE_RAW,
446 	RTE_FLOW_ITEM_TYPE_RAW,
447 	RTE_FLOW_ITEM_TYPE_END,
448 };
449 
450 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
451 	RTE_FLOW_ITEM_TYPE_ETH,
452 	RTE_FLOW_ITEM_TYPE_IPV6,
453 	RTE_FLOW_ITEM_TYPE_UDP,
454 	RTE_FLOW_ITEM_TYPE_RAW,
455 	RTE_FLOW_ITEM_TYPE_RAW,
456 	RTE_FLOW_ITEM_TYPE_RAW,
457 	RTE_FLOW_ITEM_TYPE_END,
458 };
459 
460 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
461 	RTE_FLOW_ITEM_TYPE_ETH,
462 	RTE_FLOW_ITEM_TYPE_IPV6,
463 	RTE_FLOW_ITEM_TYPE_TCP,
464 	RTE_FLOW_ITEM_TYPE_RAW,
465 	RTE_FLOW_ITEM_TYPE_END,
466 };
467 
468 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
469 	RTE_FLOW_ITEM_TYPE_ETH,
470 	RTE_FLOW_ITEM_TYPE_IPV6,
471 	RTE_FLOW_ITEM_TYPE_TCP,
472 	RTE_FLOW_ITEM_TYPE_RAW,
473 	RTE_FLOW_ITEM_TYPE_RAW,
474 	RTE_FLOW_ITEM_TYPE_END,
475 };
476 
477 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
478 	RTE_FLOW_ITEM_TYPE_ETH,
479 	RTE_FLOW_ITEM_TYPE_IPV6,
480 	RTE_FLOW_ITEM_TYPE_TCP,
481 	RTE_FLOW_ITEM_TYPE_RAW,
482 	RTE_FLOW_ITEM_TYPE_RAW,
483 	RTE_FLOW_ITEM_TYPE_RAW,
484 	RTE_FLOW_ITEM_TYPE_END,
485 };
486 
487 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
488 	RTE_FLOW_ITEM_TYPE_ETH,
489 	RTE_FLOW_ITEM_TYPE_IPV6,
490 	RTE_FLOW_ITEM_TYPE_SCTP,
491 	RTE_FLOW_ITEM_TYPE_RAW,
492 	RTE_FLOW_ITEM_TYPE_END,
493 };
494 
495 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
496 	RTE_FLOW_ITEM_TYPE_ETH,
497 	RTE_FLOW_ITEM_TYPE_IPV6,
498 	RTE_FLOW_ITEM_TYPE_SCTP,
499 	RTE_FLOW_ITEM_TYPE_RAW,
500 	RTE_FLOW_ITEM_TYPE_RAW,
501 	RTE_FLOW_ITEM_TYPE_END,
502 };
503 
504 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
505 	RTE_FLOW_ITEM_TYPE_ETH,
506 	RTE_FLOW_ITEM_TYPE_IPV6,
507 	RTE_FLOW_ITEM_TYPE_SCTP,
508 	RTE_FLOW_ITEM_TYPE_RAW,
509 	RTE_FLOW_ITEM_TYPE_RAW,
510 	RTE_FLOW_ITEM_TYPE_RAW,
511 	RTE_FLOW_ITEM_TYPE_END,
512 };
513 
514 static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
515 	RTE_FLOW_ITEM_TYPE_ETH,
516 	RTE_FLOW_ITEM_TYPE_VLAN,
517 	RTE_FLOW_ITEM_TYPE_END,
518 };
519 
520 static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
521 	RTE_FLOW_ITEM_TYPE_ETH,
522 	RTE_FLOW_ITEM_TYPE_VLAN,
523 	RTE_FLOW_ITEM_TYPE_IPV4,
524 	RTE_FLOW_ITEM_TYPE_END,
525 };
526 
527 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
528 	RTE_FLOW_ITEM_TYPE_ETH,
529 	RTE_FLOW_ITEM_TYPE_VLAN,
530 	RTE_FLOW_ITEM_TYPE_IPV4,
531 	RTE_FLOW_ITEM_TYPE_UDP,
532 	RTE_FLOW_ITEM_TYPE_END,
533 };
534 
535 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
536 	RTE_FLOW_ITEM_TYPE_ETH,
537 	RTE_FLOW_ITEM_TYPE_VLAN,
538 	RTE_FLOW_ITEM_TYPE_IPV4,
539 	RTE_FLOW_ITEM_TYPE_TCP,
540 	RTE_FLOW_ITEM_TYPE_END,
541 };
542 
543 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
544 	RTE_FLOW_ITEM_TYPE_ETH,
545 	RTE_FLOW_ITEM_TYPE_VLAN,
546 	RTE_FLOW_ITEM_TYPE_IPV4,
547 	RTE_FLOW_ITEM_TYPE_SCTP,
548 	RTE_FLOW_ITEM_TYPE_END,
549 };
550 
551 static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
552 	RTE_FLOW_ITEM_TYPE_ETH,
553 	RTE_FLOW_ITEM_TYPE_VLAN,
554 	RTE_FLOW_ITEM_TYPE_IPV6,
555 	RTE_FLOW_ITEM_TYPE_END,
556 };
557 
558 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
559 	RTE_FLOW_ITEM_TYPE_ETH,
560 	RTE_FLOW_ITEM_TYPE_VLAN,
561 	RTE_FLOW_ITEM_TYPE_IPV6,
562 	RTE_FLOW_ITEM_TYPE_UDP,
563 	RTE_FLOW_ITEM_TYPE_END,
564 };
565 
566 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
567 	RTE_FLOW_ITEM_TYPE_ETH,
568 	RTE_FLOW_ITEM_TYPE_VLAN,
569 	RTE_FLOW_ITEM_TYPE_IPV6,
570 	RTE_FLOW_ITEM_TYPE_TCP,
571 	RTE_FLOW_ITEM_TYPE_END,
572 };
573 
574 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
575 	RTE_FLOW_ITEM_TYPE_ETH,
576 	RTE_FLOW_ITEM_TYPE_VLAN,
577 	RTE_FLOW_ITEM_TYPE_IPV6,
578 	RTE_FLOW_ITEM_TYPE_SCTP,
579 	RTE_FLOW_ITEM_TYPE_END,
580 };
581 
582 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
583 	RTE_FLOW_ITEM_TYPE_ETH,
584 	RTE_FLOW_ITEM_TYPE_VLAN,
585 	RTE_FLOW_ITEM_TYPE_RAW,
586 	RTE_FLOW_ITEM_TYPE_END,
587 };
588 
589 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
590 	RTE_FLOW_ITEM_TYPE_ETH,
591 	RTE_FLOW_ITEM_TYPE_VLAN,
592 	RTE_FLOW_ITEM_TYPE_RAW,
593 	RTE_FLOW_ITEM_TYPE_RAW,
594 	RTE_FLOW_ITEM_TYPE_END,
595 };
596 
597 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
598 	RTE_FLOW_ITEM_TYPE_ETH,
599 	RTE_FLOW_ITEM_TYPE_VLAN,
600 	RTE_FLOW_ITEM_TYPE_RAW,
601 	RTE_FLOW_ITEM_TYPE_RAW,
602 	RTE_FLOW_ITEM_TYPE_RAW,
603 	RTE_FLOW_ITEM_TYPE_END,
604 };
605 
606 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
607 	RTE_FLOW_ITEM_TYPE_ETH,
608 	RTE_FLOW_ITEM_TYPE_VLAN,
609 	RTE_FLOW_ITEM_TYPE_IPV4,
610 	RTE_FLOW_ITEM_TYPE_RAW,
611 	RTE_FLOW_ITEM_TYPE_END,
612 };
613 
614 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
615 	RTE_FLOW_ITEM_TYPE_ETH,
616 	RTE_FLOW_ITEM_TYPE_VLAN,
617 	RTE_FLOW_ITEM_TYPE_IPV4,
618 	RTE_FLOW_ITEM_TYPE_RAW,
619 	RTE_FLOW_ITEM_TYPE_RAW,
620 	RTE_FLOW_ITEM_TYPE_END,
621 };
622 
623 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
624 	RTE_FLOW_ITEM_TYPE_ETH,
625 	RTE_FLOW_ITEM_TYPE_VLAN,
626 	RTE_FLOW_ITEM_TYPE_IPV4,
627 	RTE_FLOW_ITEM_TYPE_RAW,
628 	RTE_FLOW_ITEM_TYPE_RAW,
629 	RTE_FLOW_ITEM_TYPE_RAW,
630 	RTE_FLOW_ITEM_TYPE_END,
631 };
632 
633 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
634 	RTE_FLOW_ITEM_TYPE_ETH,
635 	RTE_FLOW_ITEM_TYPE_VLAN,
636 	RTE_FLOW_ITEM_TYPE_IPV4,
637 	RTE_FLOW_ITEM_TYPE_UDP,
638 	RTE_FLOW_ITEM_TYPE_RAW,
639 	RTE_FLOW_ITEM_TYPE_END,
640 };
641 
642 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
643 	RTE_FLOW_ITEM_TYPE_ETH,
644 	RTE_FLOW_ITEM_TYPE_VLAN,
645 	RTE_FLOW_ITEM_TYPE_IPV4,
646 	RTE_FLOW_ITEM_TYPE_UDP,
647 	RTE_FLOW_ITEM_TYPE_RAW,
648 	RTE_FLOW_ITEM_TYPE_RAW,
649 	RTE_FLOW_ITEM_TYPE_END,
650 };
651 
652 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
653 	RTE_FLOW_ITEM_TYPE_ETH,
654 	RTE_FLOW_ITEM_TYPE_VLAN,
655 	RTE_FLOW_ITEM_TYPE_IPV4,
656 	RTE_FLOW_ITEM_TYPE_UDP,
657 	RTE_FLOW_ITEM_TYPE_RAW,
658 	RTE_FLOW_ITEM_TYPE_RAW,
659 	RTE_FLOW_ITEM_TYPE_RAW,
660 	RTE_FLOW_ITEM_TYPE_END,
661 };
662 
663 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
664 	RTE_FLOW_ITEM_TYPE_ETH,
665 	RTE_FLOW_ITEM_TYPE_VLAN,
666 	RTE_FLOW_ITEM_TYPE_IPV4,
667 	RTE_FLOW_ITEM_TYPE_TCP,
668 	RTE_FLOW_ITEM_TYPE_RAW,
669 	RTE_FLOW_ITEM_TYPE_END,
670 };
671 
672 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
673 	RTE_FLOW_ITEM_TYPE_ETH,
674 	RTE_FLOW_ITEM_TYPE_VLAN,
675 	RTE_FLOW_ITEM_TYPE_IPV4,
676 	RTE_FLOW_ITEM_TYPE_TCP,
677 	RTE_FLOW_ITEM_TYPE_RAW,
678 	RTE_FLOW_ITEM_TYPE_RAW,
679 	RTE_FLOW_ITEM_TYPE_END,
680 };
681 
682 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
683 	RTE_FLOW_ITEM_TYPE_ETH,
684 	RTE_FLOW_ITEM_TYPE_VLAN,
685 	RTE_FLOW_ITEM_TYPE_IPV4,
686 	RTE_FLOW_ITEM_TYPE_TCP,
687 	RTE_FLOW_ITEM_TYPE_RAW,
688 	RTE_FLOW_ITEM_TYPE_RAW,
689 	RTE_FLOW_ITEM_TYPE_RAW,
690 	RTE_FLOW_ITEM_TYPE_END,
691 };
692 
693 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
694 	RTE_FLOW_ITEM_TYPE_ETH,
695 	RTE_FLOW_ITEM_TYPE_VLAN,
696 	RTE_FLOW_ITEM_TYPE_IPV4,
697 	RTE_FLOW_ITEM_TYPE_SCTP,
698 	RTE_FLOW_ITEM_TYPE_RAW,
699 	RTE_FLOW_ITEM_TYPE_END,
700 };
701 
702 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
703 	RTE_FLOW_ITEM_TYPE_ETH,
704 	RTE_FLOW_ITEM_TYPE_VLAN,
705 	RTE_FLOW_ITEM_TYPE_IPV4,
706 	RTE_FLOW_ITEM_TYPE_SCTP,
707 	RTE_FLOW_ITEM_TYPE_RAW,
708 	RTE_FLOW_ITEM_TYPE_RAW,
709 	RTE_FLOW_ITEM_TYPE_END,
710 };
711 
712 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
713 	RTE_FLOW_ITEM_TYPE_ETH,
714 	RTE_FLOW_ITEM_TYPE_VLAN,
715 	RTE_FLOW_ITEM_TYPE_IPV4,
716 	RTE_FLOW_ITEM_TYPE_SCTP,
717 	RTE_FLOW_ITEM_TYPE_RAW,
718 	RTE_FLOW_ITEM_TYPE_RAW,
719 	RTE_FLOW_ITEM_TYPE_RAW,
720 	RTE_FLOW_ITEM_TYPE_END,
721 };
722 
723 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
724 	RTE_FLOW_ITEM_TYPE_ETH,
725 	RTE_FLOW_ITEM_TYPE_VLAN,
726 	RTE_FLOW_ITEM_TYPE_IPV6,
727 	RTE_FLOW_ITEM_TYPE_RAW,
728 	RTE_FLOW_ITEM_TYPE_END,
729 };
730 
731 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
732 	RTE_FLOW_ITEM_TYPE_ETH,
733 	RTE_FLOW_ITEM_TYPE_VLAN,
734 	RTE_FLOW_ITEM_TYPE_IPV6,
735 	RTE_FLOW_ITEM_TYPE_RAW,
736 	RTE_FLOW_ITEM_TYPE_RAW,
737 	RTE_FLOW_ITEM_TYPE_END,
738 };
739 
740 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
741 	RTE_FLOW_ITEM_TYPE_ETH,
742 	RTE_FLOW_ITEM_TYPE_VLAN,
743 	RTE_FLOW_ITEM_TYPE_IPV6,
744 	RTE_FLOW_ITEM_TYPE_RAW,
745 	RTE_FLOW_ITEM_TYPE_RAW,
746 	RTE_FLOW_ITEM_TYPE_RAW,
747 	RTE_FLOW_ITEM_TYPE_END,
748 };
749 
750 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
751 	RTE_FLOW_ITEM_TYPE_ETH,
752 	RTE_FLOW_ITEM_TYPE_VLAN,
753 	RTE_FLOW_ITEM_TYPE_IPV6,
754 	RTE_FLOW_ITEM_TYPE_UDP,
755 	RTE_FLOW_ITEM_TYPE_RAW,
756 	RTE_FLOW_ITEM_TYPE_END,
757 };
758 
759 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
760 	RTE_FLOW_ITEM_TYPE_ETH,
761 	RTE_FLOW_ITEM_TYPE_VLAN,
762 	RTE_FLOW_ITEM_TYPE_IPV6,
763 	RTE_FLOW_ITEM_TYPE_UDP,
764 	RTE_FLOW_ITEM_TYPE_RAW,
765 	RTE_FLOW_ITEM_TYPE_RAW,
766 	RTE_FLOW_ITEM_TYPE_END,
767 };
768 
769 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
770 	RTE_FLOW_ITEM_TYPE_ETH,
771 	RTE_FLOW_ITEM_TYPE_VLAN,
772 	RTE_FLOW_ITEM_TYPE_IPV6,
773 	RTE_FLOW_ITEM_TYPE_UDP,
774 	RTE_FLOW_ITEM_TYPE_RAW,
775 	RTE_FLOW_ITEM_TYPE_RAW,
776 	RTE_FLOW_ITEM_TYPE_RAW,
777 	RTE_FLOW_ITEM_TYPE_END,
778 };
779 
780 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
781 	RTE_FLOW_ITEM_TYPE_ETH,
782 	RTE_FLOW_ITEM_TYPE_VLAN,
783 	RTE_FLOW_ITEM_TYPE_IPV6,
784 	RTE_FLOW_ITEM_TYPE_TCP,
785 	RTE_FLOW_ITEM_TYPE_RAW,
786 	RTE_FLOW_ITEM_TYPE_END,
787 };
788 
789 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
790 	RTE_FLOW_ITEM_TYPE_ETH,
791 	RTE_FLOW_ITEM_TYPE_VLAN,
792 	RTE_FLOW_ITEM_TYPE_IPV6,
793 	RTE_FLOW_ITEM_TYPE_TCP,
794 	RTE_FLOW_ITEM_TYPE_RAW,
795 	RTE_FLOW_ITEM_TYPE_RAW,
796 	RTE_FLOW_ITEM_TYPE_END,
797 };
798 
799 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
800 	RTE_FLOW_ITEM_TYPE_ETH,
801 	RTE_FLOW_ITEM_TYPE_VLAN,
802 	RTE_FLOW_ITEM_TYPE_IPV6,
803 	RTE_FLOW_ITEM_TYPE_TCP,
804 	RTE_FLOW_ITEM_TYPE_RAW,
805 	RTE_FLOW_ITEM_TYPE_RAW,
806 	RTE_FLOW_ITEM_TYPE_RAW,
807 	RTE_FLOW_ITEM_TYPE_END,
808 };
809 
810 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
811 	RTE_FLOW_ITEM_TYPE_ETH,
812 	RTE_FLOW_ITEM_TYPE_VLAN,
813 	RTE_FLOW_ITEM_TYPE_IPV6,
814 	RTE_FLOW_ITEM_TYPE_SCTP,
815 	RTE_FLOW_ITEM_TYPE_RAW,
816 	RTE_FLOW_ITEM_TYPE_END,
817 };
818 
819 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
820 	RTE_FLOW_ITEM_TYPE_ETH,
821 	RTE_FLOW_ITEM_TYPE_VLAN,
822 	RTE_FLOW_ITEM_TYPE_IPV6,
823 	RTE_FLOW_ITEM_TYPE_SCTP,
824 	RTE_FLOW_ITEM_TYPE_RAW,
825 	RTE_FLOW_ITEM_TYPE_RAW,
826 	RTE_FLOW_ITEM_TYPE_END,
827 };
828 
829 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
830 	RTE_FLOW_ITEM_TYPE_ETH,
831 	RTE_FLOW_ITEM_TYPE_VLAN,
832 	RTE_FLOW_ITEM_TYPE_IPV6,
833 	RTE_FLOW_ITEM_TYPE_SCTP,
834 	RTE_FLOW_ITEM_TYPE_RAW,
835 	RTE_FLOW_ITEM_TYPE_RAW,
836 	RTE_FLOW_ITEM_TYPE_RAW,
837 	RTE_FLOW_ITEM_TYPE_END,
838 };
839 
840 static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
841 	RTE_FLOW_ITEM_TYPE_ETH,
842 	RTE_FLOW_ITEM_TYPE_IPV4,
843 	RTE_FLOW_ITEM_TYPE_VF,
844 	RTE_FLOW_ITEM_TYPE_END,
845 };
846 
847 static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
848 	RTE_FLOW_ITEM_TYPE_ETH,
849 	RTE_FLOW_ITEM_TYPE_IPV4,
850 	RTE_FLOW_ITEM_TYPE_UDP,
851 	RTE_FLOW_ITEM_TYPE_VF,
852 	RTE_FLOW_ITEM_TYPE_END,
853 };
854 
855 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
856 	RTE_FLOW_ITEM_TYPE_ETH,
857 	RTE_FLOW_ITEM_TYPE_IPV4,
858 	RTE_FLOW_ITEM_TYPE_TCP,
859 	RTE_FLOW_ITEM_TYPE_VF,
860 	RTE_FLOW_ITEM_TYPE_END,
861 };
862 
863 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
864 	RTE_FLOW_ITEM_TYPE_ETH,
865 	RTE_FLOW_ITEM_TYPE_IPV4,
866 	RTE_FLOW_ITEM_TYPE_SCTP,
867 	RTE_FLOW_ITEM_TYPE_VF,
868 	RTE_FLOW_ITEM_TYPE_END,
869 };
870 
871 static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
872 	RTE_FLOW_ITEM_TYPE_ETH,
873 	RTE_FLOW_ITEM_TYPE_IPV6,
874 	RTE_FLOW_ITEM_TYPE_VF,
875 	RTE_FLOW_ITEM_TYPE_END,
876 };
877 
878 static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
879 	RTE_FLOW_ITEM_TYPE_ETH,
880 	RTE_FLOW_ITEM_TYPE_IPV6,
881 	RTE_FLOW_ITEM_TYPE_UDP,
882 	RTE_FLOW_ITEM_TYPE_VF,
883 	RTE_FLOW_ITEM_TYPE_END,
884 };
885 
886 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
887 	RTE_FLOW_ITEM_TYPE_ETH,
888 	RTE_FLOW_ITEM_TYPE_IPV6,
889 	RTE_FLOW_ITEM_TYPE_TCP,
890 	RTE_FLOW_ITEM_TYPE_VF,
891 	RTE_FLOW_ITEM_TYPE_END,
892 };
893 
894 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
895 	RTE_FLOW_ITEM_TYPE_ETH,
896 	RTE_FLOW_ITEM_TYPE_IPV6,
897 	RTE_FLOW_ITEM_TYPE_SCTP,
898 	RTE_FLOW_ITEM_TYPE_VF,
899 	RTE_FLOW_ITEM_TYPE_END,
900 };
901 
902 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
903 	RTE_FLOW_ITEM_TYPE_ETH,
904 	RTE_FLOW_ITEM_TYPE_RAW,
905 	RTE_FLOW_ITEM_TYPE_VF,
906 	RTE_FLOW_ITEM_TYPE_END,
907 };
908 
909 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
910 	RTE_FLOW_ITEM_TYPE_ETH,
911 	RTE_FLOW_ITEM_TYPE_RAW,
912 	RTE_FLOW_ITEM_TYPE_RAW,
913 	RTE_FLOW_ITEM_TYPE_VF,
914 	RTE_FLOW_ITEM_TYPE_END,
915 };
916 
917 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
918 	RTE_FLOW_ITEM_TYPE_ETH,
919 	RTE_FLOW_ITEM_TYPE_RAW,
920 	RTE_FLOW_ITEM_TYPE_RAW,
921 	RTE_FLOW_ITEM_TYPE_RAW,
922 	RTE_FLOW_ITEM_TYPE_VF,
923 	RTE_FLOW_ITEM_TYPE_END,
924 };
925 
926 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
927 	RTE_FLOW_ITEM_TYPE_ETH,
928 	RTE_FLOW_ITEM_TYPE_IPV4,
929 	RTE_FLOW_ITEM_TYPE_RAW,
930 	RTE_FLOW_ITEM_TYPE_VF,
931 	RTE_FLOW_ITEM_TYPE_END,
932 };
933 
934 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
935 	RTE_FLOW_ITEM_TYPE_ETH,
936 	RTE_FLOW_ITEM_TYPE_IPV4,
937 	RTE_FLOW_ITEM_TYPE_RAW,
938 	RTE_FLOW_ITEM_TYPE_RAW,
939 	RTE_FLOW_ITEM_TYPE_VF,
940 	RTE_FLOW_ITEM_TYPE_END,
941 };
942 
943 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
944 	RTE_FLOW_ITEM_TYPE_ETH,
945 	RTE_FLOW_ITEM_TYPE_IPV4,
946 	RTE_FLOW_ITEM_TYPE_RAW,
947 	RTE_FLOW_ITEM_TYPE_RAW,
948 	RTE_FLOW_ITEM_TYPE_RAW,
949 	RTE_FLOW_ITEM_TYPE_VF,
950 	RTE_FLOW_ITEM_TYPE_END,
951 };
952 
953 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
954 	RTE_FLOW_ITEM_TYPE_ETH,
955 	RTE_FLOW_ITEM_TYPE_IPV4,
956 	RTE_FLOW_ITEM_TYPE_UDP,
957 	RTE_FLOW_ITEM_TYPE_RAW,
958 	RTE_FLOW_ITEM_TYPE_VF,
959 	RTE_FLOW_ITEM_TYPE_END,
960 };
961 
962 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
963 	RTE_FLOW_ITEM_TYPE_ETH,
964 	RTE_FLOW_ITEM_TYPE_IPV4,
965 	RTE_FLOW_ITEM_TYPE_UDP,
966 	RTE_FLOW_ITEM_TYPE_RAW,
967 	RTE_FLOW_ITEM_TYPE_RAW,
968 	RTE_FLOW_ITEM_TYPE_VF,
969 	RTE_FLOW_ITEM_TYPE_END,
970 };
971 
972 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
973 	RTE_FLOW_ITEM_TYPE_ETH,
974 	RTE_FLOW_ITEM_TYPE_IPV4,
975 	RTE_FLOW_ITEM_TYPE_UDP,
976 	RTE_FLOW_ITEM_TYPE_RAW,
977 	RTE_FLOW_ITEM_TYPE_RAW,
978 	RTE_FLOW_ITEM_TYPE_RAW,
979 	RTE_FLOW_ITEM_TYPE_VF,
980 	RTE_FLOW_ITEM_TYPE_END,
981 };
982 
983 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
984 	RTE_FLOW_ITEM_TYPE_ETH,
985 	RTE_FLOW_ITEM_TYPE_IPV4,
986 	RTE_FLOW_ITEM_TYPE_TCP,
987 	RTE_FLOW_ITEM_TYPE_RAW,
988 	RTE_FLOW_ITEM_TYPE_VF,
989 	RTE_FLOW_ITEM_TYPE_END,
990 };
991 
992 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
993 	RTE_FLOW_ITEM_TYPE_ETH,
994 	RTE_FLOW_ITEM_TYPE_IPV4,
995 	RTE_FLOW_ITEM_TYPE_TCP,
996 	RTE_FLOW_ITEM_TYPE_RAW,
997 	RTE_FLOW_ITEM_TYPE_RAW,
998 	RTE_FLOW_ITEM_TYPE_VF,
999 	RTE_FLOW_ITEM_TYPE_END,
1000 };
1001 
1002 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
1003 	RTE_FLOW_ITEM_TYPE_ETH,
1004 	RTE_FLOW_ITEM_TYPE_IPV4,
1005 	RTE_FLOW_ITEM_TYPE_TCP,
1006 	RTE_FLOW_ITEM_TYPE_RAW,
1007 	RTE_FLOW_ITEM_TYPE_RAW,
1008 	RTE_FLOW_ITEM_TYPE_RAW,
1009 	RTE_FLOW_ITEM_TYPE_VF,
1010 	RTE_FLOW_ITEM_TYPE_END,
1011 };
1012 
1013 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
1014 	RTE_FLOW_ITEM_TYPE_ETH,
1015 	RTE_FLOW_ITEM_TYPE_IPV4,
1016 	RTE_FLOW_ITEM_TYPE_SCTP,
1017 	RTE_FLOW_ITEM_TYPE_RAW,
1018 	RTE_FLOW_ITEM_TYPE_VF,
1019 	RTE_FLOW_ITEM_TYPE_END,
1020 };
1021 
1022 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
1023 	RTE_FLOW_ITEM_TYPE_ETH,
1024 	RTE_FLOW_ITEM_TYPE_IPV4,
1025 	RTE_FLOW_ITEM_TYPE_SCTP,
1026 	RTE_FLOW_ITEM_TYPE_RAW,
1027 	RTE_FLOW_ITEM_TYPE_RAW,
1028 	RTE_FLOW_ITEM_TYPE_VF,
1029 	RTE_FLOW_ITEM_TYPE_END,
1030 };
1031 
1032 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
1033 	RTE_FLOW_ITEM_TYPE_ETH,
1034 	RTE_FLOW_ITEM_TYPE_IPV4,
1035 	RTE_FLOW_ITEM_TYPE_SCTP,
1036 	RTE_FLOW_ITEM_TYPE_RAW,
1037 	RTE_FLOW_ITEM_TYPE_RAW,
1038 	RTE_FLOW_ITEM_TYPE_RAW,
1039 	RTE_FLOW_ITEM_TYPE_VF,
1040 	RTE_FLOW_ITEM_TYPE_END,
1041 };
1042 
1043 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
1044 	RTE_FLOW_ITEM_TYPE_ETH,
1045 	RTE_FLOW_ITEM_TYPE_IPV6,
1046 	RTE_FLOW_ITEM_TYPE_RAW,
1047 	RTE_FLOW_ITEM_TYPE_VF,
1048 	RTE_FLOW_ITEM_TYPE_END,
1049 };
1050 
1051 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
1052 	RTE_FLOW_ITEM_TYPE_ETH,
1053 	RTE_FLOW_ITEM_TYPE_IPV6,
1054 	RTE_FLOW_ITEM_TYPE_RAW,
1055 	RTE_FLOW_ITEM_TYPE_RAW,
1056 	RTE_FLOW_ITEM_TYPE_VF,
1057 	RTE_FLOW_ITEM_TYPE_END,
1058 };
1059 
1060 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
1061 	RTE_FLOW_ITEM_TYPE_ETH,
1062 	RTE_FLOW_ITEM_TYPE_IPV6,
1063 	RTE_FLOW_ITEM_TYPE_RAW,
1064 	RTE_FLOW_ITEM_TYPE_RAW,
1065 	RTE_FLOW_ITEM_TYPE_RAW,
1066 	RTE_FLOW_ITEM_TYPE_VF,
1067 	RTE_FLOW_ITEM_TYPE_END,
1068 };
1069 
1070 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
1071 	RTE_FLOW_ITEM_TYPE_ETH,
1072 	RTE_FLOW_ITEM_TYPE_IPV6,
1073 	RTE_FLOW_ITEM_TYPE_UDP,
1074 	RTE_FLOW_ITEM_TYPE_RAW,
1075 	RTE_FLOW_ITEM_TYPE_VF,
1076 	RTE_FLOW_ITEM_TYPE_END,
1077 };
1078 
1079 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
1080 	RTE_FLOW_ITEM_TYPE_ETH,
1081 	RTE_FLOW_ITEM_TYPE_IPV6,
1082 	RTE_FLOW_ITEM_TYPE_UDP,
1083 	RTE_FLOW_ITEM_TYPE_RAW,
1084 	RTE_FLOW_ITEM_TYPE_RAW,
1085 	RTE_FLOW_ITEM_TYPE_VF,
1086 	RTE_FLOW_ITEM_TYPE_END,
1087 };
1088 
1089 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
1090 	RTE_FLOW_ITEM_TYPE_ETH,
1091 	RTE_FLOW_ITEM_TYPE_IPV6,
1092 	RTE_FLOW_ITEM_TYPE_UDP,
1093 	RTE_FLOW_ITEM_TYPE_RAW,
1094 	RTE_FLOW_ITEM_TYPE_RAW,
1095 	RTE_FLOW_ITEM_TYPE_RAW,
1096 	RTE_FLOW_ITEM_TYPE_VF,
1097 	RTE_FLOW_ITEM_TYPE_END,
1098 };
1099 
1100 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
1101 	RTE_FLOW_ITEM_TYPE_ETH,
1102 	RTE_FLOW_ITEM_TYPE_IPV6,
1103 	RTE_FLOW_ITEM_TYPE_TCP,
1104 	RTE_FLOW_ITEM_TYPE_RAW,
1105 	RTE_FLOW_ITEM_TYPE_VF,
1106 	RTE_FLOW_ITEM_TYPE_END,
1107 };
1108 
1109 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
1110 	RTE_FLOW_ITEM_TYPE_ETH,
1111 	RTE_FLOW_ITEM_TYPE_IPV6,
1112 	RTE_FLOW_ITEM_TYPE_TCP,
1113 	RTE_FLOW_ITEM_TYPE_RAW,
1114 	RTE_FLOW_ITEM_TYPE_RAW,
1115 	RTE_FLOW_ITEM_TYPE_VF,
1116 	RTE_FLOW_ITEM_TYPE_END,
1117 };
1118 
1119 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
1120 	RTE_FLOW_ITEM_TYPE_ETH,
1121 	RTE_FLOW_ITEM_TYPE_IPV6,
1122 	RTE_FLOW_ITEM_TYPE_TCP,
1123 	RTE_FLOW_ITEM_TYPE_RAW,
1124 	RTE_FLOW_ITEM_TYPE_RAW,
1125 	RTE_FLOW_ITEM_TYPE_RAW,
1126 	RTE_FLOW_ITEM_TYPE_VF,
1127 	RTE_FLOW_ITEM_TYPE_END,
1128 };
1129 
1130 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
1131 	RTE_FLOW_ITEM_TYPE_ETH,
1132 	RTE_FLOW_ITEM_TYPE_IPV6,
1133 	RTE_FLOW_ITEM_TYPE_SCTP,
1134 	RTE_FLOW_ITEM_TYPE_RAW,
1135 	RTE_FLOW_ITEM_TYPE_VF,
1136 	RTE_FLOW_ITEM_TYPE_END,
1137 };
1138 
1139 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
1140 	RTE_FLOW_ITEM_TYPE_ETH,
1141 	RTE_FLOW_ITEM_TYPE_IPV6,
1142 	RTE_FLOW_ITEM_TYPE_SCTP,
1143 	RTE_FLOW_ITEM_TYPE_RAW,
1144 	RTE_FLOW_ITEM_TYPE_RAW,
1145 	RTE_FLOW_ITEM_TYPE_VF,
1146 	RTE_FLOW_ITEM_TYPE_END,
1147 };
1148 
1149 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
1150 	RTE_FLOW_ITEM_TYPE_ETH,
1151 	RTE_FLOW_ITEM_TYPE_IPV6,
1152 	RTE_FLOW_ITEM_TYPE_SCTP,
1153 	RTE_FLOW_ITEM_TYPE_RAW,
1154 	RTE_FLOW_ITEM_TYPE_RAW,
1155 	RTE_FLOW_ITEM_TYPE_RAW,
1156 	RTE_FLOW_ITEM_TYPE_VF,
1157 	RTE_FLOW_ITEM_TYPE_END,
1158 };
1159 
1160 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
1161 	RTE_FLOW_ITEM_TYPE_ETH,
1162 	RTE_FLOW_ITEM_TYPE_VLAN,
1163 	RTE_FLOW_ITEM_TYPE_VF,
1164 	RTE_FLOW_ITEM_TYPE_END,
1165 };
1166 
1167 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
1168 	RTE_FLOW_ITEM_TYPE_ETH,
1169 	RTE_FLOW_ITEM_TYPE_VLAN,
1170 	RTE_FLOW_ITEM_TYPE_IPV4,
1171 	RTE_FLOW_ITEM_TYPE_VF,
1172 	RTE_FLOW_ITEM_TYPE_END,
1173 };
1174 
1175 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
1176 	RTE_FLOW_ITEM_TYPE_ETH,
1177 	RTE_FLOW_ITEM_TYPE_VLAN,
1178 	RTE_FLOW_ITEM_TYPE_IPV4,
1179 	RTE_FLOW_ITEM_TYPE_UDP,
1180 	RTE_FLOW_ITEM_TYPE_VF,
1181 	RTE_FLOW_ITEM_TYPE_END,
1182 };
1183 
1184 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
1185 	RTE_FLOW_ITEM_TYPE_ETH,
1186 	RTE_FLOW_ITEM_TYPE_VLAN,
1187 	RTE_FLOW_ITEM_TYPE_IPV4,
1188 	RTE_FLOW_ITEM_TYPE_TCP,
1189 	RTE_FLOW_ITEM_TYPE_VF,
1190 	RTE_FLOW_ITEM_TYPE_END,
1191 };
1192 
1193 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
1194 	RTE_FLOW_ITEM_TYPE_ETH,
1195 	RTE_FLOW_ITEM_TYPE_VLAN,
1196 	RTE_FLOW_ITEM_TYPE_IPV4,
1197 	RTE_FLOW_ITEM_TYPE_SCTP,
1198 	RTE_FLOW_ITEM_TYPE_VF,
1199 	RTE_FLOW_ITEM_TYPE_END,
1200 };
1201 
1202 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
1203 	RTE_FLOW_ITEM_TYPE_ETH,
1204 	RTE_FLOW_ITEM_TYPE_VLAN,
1205 	RTE_FLOW_ITEM_TYPE_IPV6,
1206 	RTE_FLOW_ITEM_TYPE_VF,
1207 	RTE_FLOW_ITEM_TYPE_END,
1208 };
1209 
1210 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
1211 	RTE_FLOW_ITEM_TYPE_ETH,
1212 	RTE_FLOW_ITEM_TYPE_VLAN,
1213 	RTE_FLOW_ITEM_TYPE_IPV6,
1214 	RTE_FLOW_ITEM_TYPE_UDP,
1215 	RTE_FLOW_ITEM_TYPE_VF,
1216 	RTE_FLOW_ITEM_TYPE_END,
1217 };
1218 
1219 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
1220 	RTE_FLOW_ITEM_TYPE_ETH,
1221 	RTE_FLOW_ITEM_TYPE_VLAN,
1222 	RTE_FLOW_ITEM_TYPE_IPV6,
1223 	RTE_FLOW_ITEM_TYPE_TCP,
1224 	RTE_FLOW_ITEM_TYPE_VF,
1225 	RTE_FLOW_ITEM_TYPE_END,
1226 };
1227 
1228 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
1229 	RTE_FLOW_ITEM_TYPE_ETH,
1230 	RTE_FLOW_ITEM_TYPE_VLAN,
1231 	RTE_FLOW_ITEM_TYPE_IPV6,
1232 	RTE_FLOW_ITEM_TYPE_SCTP,
1233 	RTE_FLOW_ITEM_TYPE_VF,
1234 	RTE_FLOW_ITEM_TYPE_END,
1235 };
1236 
1237 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
1238 	RTE_FLOW_ITEM_TYPE_ETH,
1239 	RTE_FLOW_ITEM_TYPE_VLAN,
1240 	RTE_FLOW_ITEM_TYPE_RAW,
1241 	RTE_FLOW_ITEM_TYPE_VF,
1242 	RTE_FLOW_ITEM_TYPE_END,
1243 };
1244 
1245 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
1246 	RTE_FLOW_ITEM_TYPE_ETH,
1247 	RTE_FLOW_ITEM_TYPE_VLAN,
1248 	RTE_FLOW_ITEM_TYPE_RAW,
1249 	RTE_FLOW_ITEM_TYPE_RAW,
1250 	RTE_FLOW_ITEM_TYPE_VF,
1251 	RTE_FLOW_ITEM_TYPE_END,
1252 };
1253 
1254 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
1255 	RTE_FLOW_ITEM_TYPE_ETH,
1256 	RTE_FLOW_ITEM_TYPE_VLAN,
1257 	RTE_FLOW_ITEM_TYPE_RAW,
1258 	RTE_FLOW_ITEM_TYPE_RAW,
1259 	RTE_FLOW_ITEM_TYPE_RAW,
1260 	RTE_FLOW_ITEM_TYPE_VF,
1261 	RTE_FLOW_ITEM_TYPE_END,
1262 };
1263 
1264 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
1265 	RTE_FLOW_ITEM_TYPE_ETH,
1266 	RTE_FLOW_ITEM_TYPE_VLAN,
1267 	RTE_FLOW_ITEM_TYPE_IPV4,
1268 	RTE_FLOW_ITEM_TYPE_RAW,
1269 	RTE_FLOW_ITEM_TYPE_VF,
1270 	RTE_FLOW_ITEM_TYPE_END,
1271 };
1272 
1273 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
1274 	RTE_FLOW_ITEM_TYPE_ETH,
1275 	RTE_FLOW_ITEM_TYPE_VLAN,
1276 	RTE_FLOW_ITEM_TYPE_IPV4,
1277 	RTE_FLOW_ITEM_TYPE_RAW,
1278 	RTE_FLOW_ITEM_TYPE_RAW,
1279 	RTE_FLOW_ITEM_TYPE_VF,
1280 	RTE_FLOW_ITEM_TYPE_END,
1281 };
1282 
1283 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
1284 	RTE_FLOW_ITEM_TYPE_ETH,
1285 	RTE_FLOW_ITEM_TYPE_VLAN,
1286 	RTE_FLOW_ITEM_TYPE_IPV4,
1287 	RTE_FLOW_ITEM_TYPE_RAW,
1288 	RTE_FLOW_ITEM_TYPE_RAW,
1289 	RTE_FLOW_ITEM_TYPE_RAW,
1290 	RTE_FLOW_ITEM_TYPE_VF,
1291 	RTE_FLOW_ITEM_TYPE_END,
1292 };
1293 
1294 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
1295 	RTE_FLOW_ITEM_TYPE_ETH,
1296 	RTE_FLOW_ITEM_TYPE_VLAN,
1297 	RTE_FLOW_ITEM_TYPE_IPV4,
1298 	RTE_FLOW_ITEM_TYPE_UDP,
1299 	RTE_FLOW_ITEM_TYPE_RAW,
1300 	RTE_FLOW_ITEM_TYPE_VF,
1301 	RTE_FLOW_ITEM_TYPE_END,
1302 };
1303 
1304 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
1305 	RTE_FLOW_ITEM_TYPE_ETH,
1306 	RTE_FLOW_ITEM_TYPE_VLAN,
1307 	RTE_FLOW_ITEM_TYPE_IPV4,
1308 	RTE_FLOW_ITEM_TYPE_UDP,
1309 	RTE_FLOW_ITEM_TYPE_RAW,
1310 	RTE_FLOW_ITEM_TYPE_RAW,
1311 	RTE_FLOW_ITEM_TYPE_VF,
1312 	RTE_FLOW_ITEM_TYPE_END,
1313 };
1314 
1315 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
1316 	RTE_FLOW_ITEM_TYPE_ETH,
1317 	RTE_FLOW_ITEM_TYPE_VLAN,
1318 	RTE_FLOW_ITEM_TYPE_IPV4,
1319 	RTE_FLOW_ITEM_TYPE_UDP,
1320 	RTE_FLOW_ITEM_TYPE_RAW,
1321 	RTE_FLOW_ITEM_TYPE_RAW,
1322 	RTE_FLOW_ITEM_TYPE_RAW,
1323 	RTE_FLOW_ITEM_TYPE_VF,
1324 	RTE_FLOW_ITEM_TYPE_END,
1325 };
1326 
1327 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
1328 	RTE_FLOW_ITEM_TYPE_ETH,
1329 	RTE_FLOW_ITEM_TYPE_VLAN,
1330 	RTE_FLOW_ITEM_TYPE_IPV4,
1331 	RTE_FLOW_ITEM_TYPE_TCP,
1332 	RTE_FLOW_ITEM_TYPE_RAW,
1333 	RTE_FLOW_ITEM_TYPE_VF,
1334 	RTE_FLOW_ITEM_TYPE_END,
1335 };
1336 
1337 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
1338 	RTE_FLOW_ITEM_TYPE_ETH,
1339 	RTE_FLOW_ITEM_TYPE_VLAN,
1340 	RTE_FLOW_ITEM_TYPE_IPV4,
1341 	RTE_FLOW_ITEM_TYPE_TCP,
1342 	RTE_FLOW_ITEM_TYPE_RAW,
1343 	RTE_FLOW_ITEM_TYPE_RAW,
1344 	RTE_FLOW_ITEM_TYPE_VF,
1345 	RTE_FLOW_ITEM_TYPE_END,
1346 };
1347 
1348 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
1349 	RTE_FLOW_ITEM_TYPE_ETH,
1350 	RTE_FLOW_ITEM_TYPE_VLAN,
1351 	RTE_FLOW_ITEM_TYPE_IPV4,
1352 	RTE_FLOW_ITEM_TYPE_TCP,
1353 	RTE_FLOW_ITEM_TYPE_RAW,
1354 	RTE_FLOW_ITEM_TYPE_RAW,
1355 	RTE_FLOW_ITEM_TYPE_RAW,
1356 	RTE_FLOW_ITEM_TYPE_VF,
1357 	RTE_FLOW_ITEM_TYPE_END,
1358 };
1359 
1360 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
1361 	RTE_FLOW_ITEM_TYPE_ETH,
1362 	RTE_FLOW_ITEM_TYPE_VLAN,
1363 	RTE_FLOW_ITEM_TYPE_IPV4,
1364 	RTE_FLOW_ITEM_TYPE_SCTP,
1365 	RTE_FLOW_ITEM_TYPE_RAW,
1366 	RTE_FLOW_ITEM_TYPE_VF,
1367 	RTE_FLOW_ITEM_TYPE_END,
1368 };
1369 
1370 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
1371 	RTE_FLOW_ITEM_TYPE_ETH,
1372 	RTE_FLOW_ITEM_TYPE_VLAN,
1373 	RTE_FLOW_ITEM_TYPE_IPV4,
1374 	RTE_FLOW_ITEM_TYPE_SCTP,
1375 	RTE_FLOW_ITEM_TYPE_RAW,
1376 	RTE_FLOW_ITEM_TYPE_RAW,
1377 	RTE_FLOW_ITEM_TYPE_VF,
1378 	RTE_FLOW_ITEM_TYPE_END,
1379 };
1380 
1381 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
1382 	RTE_FLOW_ITEM_TYPE_ETH,
1383 	RTE_FLOW_ITEM_TYPE_VLAN,
1384 	RTE_FLOW_ITEM_TYPE_IPV4,
1385 	RTE_FLOW_ITEM_TYPE_SCTP,
1386 	RTE_FLOW_ITEM_TYPE_RAW,
1387 	RTE_FLOW_ITEM_TYPE_RAW,
1388 	RTE_FLOW_ITEM_TYPE_RAW,
1389 	RTE_FLOW_ITEM_TYPE_VF,
1390 	RTE_FLOW_ITEM_TYPE_END,
1391 };
1392 
1393 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
1394 	RTE_FLOW_ITEM_TYPE_ETH,
1395 	RTE_FLOW_ITEM_TYPE_VLAN,
1396 	RTE_FLOW_ITEM_TYPE_IPV6,
1397 	RTE_FLOW_ITEM_TYPE_RAW,
1398 	RTE_FLOW_ITEM_TYPE_VF,
1399 	RTE_FLOW_ITEM_TYPE_END,
1400 };
1401 
1402 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
1403 	RTE_FLOW_ITEM_TYPE_ETH,
1404 	RTE_FLOW_ITEM_TYPE_VLAN,
1405 	RTE_FLOW_ITEM_TYPE_IPV6,
1406 	RTE_FLOW_ITEM_TYPE_RAW,
1407 	RTE_FLOW_ITEM_TYPE_RAW,
1408 	RTE_FLOW_ITEM_TYPE_VF,
1409 	RTE_FLOW_ITEM_TYPE_END,
1410 };
1411 
1412 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
1413 	RTE_FLOW_ITEM_TYPE_ETH,
1414 	RTE_FLOW_ITEM_TYPE_VLAN,
1415 	RTE_FLOW_ITEM_TYPE_IPV6,
1416 	RTE_FLOW_ITEM_TYPE_RAW,
1417 	RTE_FLOW_ITEM_TYPE_RAW,
1418 	RTE_FLOW_ITEM_TYPE_RAW,
1419 	RTE_FLOW_ITEM_TYPE_VF,
1420 	RTE_FLOW_ITEM_TYPE_END,
1421 };
1422 
1423 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
1424 	RTE_FLOW_ITEM_TYPE_ETH,
1425 	RTE_FLOW_ITEM_TYPE_VLAN,
1426 	RTE_FLOW_ITEM_TYPE_IPV6,
1427 	RTE_FLOW_ITEM_TYPE_UDP,
1428 	RTE_FLOW_ITEM_TYPE_RAW,
1429 	RTE_FLOW_ITEM_TYPE_VF,
1430 	RTE_FLOW_ITEM_TYPE_END,
1431 };
1432 
1433 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
1434 	RTE_FLOW_ITEM_TYPE_ETH,
1435 	RTE_FLOW_ITEM_TYPE_VLAN,
1436 	RTE_FLOW_ITEM_TYPE_IPV6,
1437 	RTE_FLOW_ITEM_TYPE_UDP,
1438 	RTE_FLOW_ITEM_TYPE_RAW,
1439 	RTE_FLOW_ITEM_TYPE_RAW,
1440 	RTE_FLOW_ITEM_TYPE_VF,
1441 	RTE_FLOW_ITEM_TYPE_END,
1442 };
1443 
1444 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
1445 	RTE_FLOW_ITEM_TYPE_ETH,
1446 	RTE_FLOW_ITEM_TYPE_VLAN,
1447 	RTE_FLOW_ITEM_TYPE_IPV6,
1448 	RTE_FLOW_ITEM_TYPE_UDP,
1449 	RTE_FLOW_ITEM_TYPE_RAW,
1450 	RTE_FLOW_ITEM_TYPE_RAW,
1451 	RTE_FLOW_ITEM_TYPE_RAW,
1452 	RTE_FLOW_ITEM_TYPE_VF,
1453 	RTE_FLOW_ITEM_TYPE_END,
1454 };
1455 
1456 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
1457 	RTE_FLOW_ITEM_TYPE_ETH,
1458 	RTE_FLOW_ITEM_TYPE_VLAN,
1459 	RTE_FLOW_ITEM_TYPE_IPV6,
1460 	RTE_FLOW_ITEM_TYPE_TCP,
1461 	RTE_FLOW_ITEM_TYPE_RAW,
1462 	RTE_FLOW_ITEM_TYPE_VF,
1463 	RTE_FLOW_ITEM_TYPE_END,
1464 };
1465 
1466 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
1467 	RTE_FLOW_ITEM_TYPE_ETH,
1468 	RTE_FLOW_ITEM_TYPE_VLAN,
1469 	RTE_FLOW_ITEM_TYPE_IPV6,
1470 	RTE_FLOW_ITEM_TYPE_TCP,
1471 	RTE_FLOW_ITEM_TYPE_RAW,
1472 	RTE_FLOW_ITEM_TYPE_RAW,
1473 	RTE_FLOW_ITEM_TYPE_VF,
1474 	RTE_FLOW_ITEM_TYPE_END,
1475 };
1476 
1477 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
1478 	RTE_FLOW_ITEM_TYPE_ETH,
1479 	RTE_FLOW_ITEM_TYPE_VLAN,
1480 	RTE_FLOW_ITEM_TYPE_IPV6,
1481 	RTE_FLOW_ITEM_TYPE_TCP,
1482 	RTE_FLOW_ITEM_TYPE_RAW,
1483 	RTE_FLOW_ITEM_TYPE_RAW,
1484 	RTE_FLOW_ITEM_TYPE_RAW,
1485 	RTE_FLOW_ITEM_TYPE_VF,
1486 	RTE_FLOW_ITEM_TYPE_END,
1487 };
1488 
1489 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
1490 	RTE_FLOW_ITEM_TYPE_ETH,
1491 	RTE_FLOW_ITEM_TYPE_VLAN,
1492 	RTE_FLOW_ITEM_TYPE_IPV6,
1493 	RTE_FLOW_ITEM_TYPE_SCTP,
1494 	RTE_FLOW_ITEM_TYPE_RAW,
1495 	RTE_FLOW_ITEM_TYPE_VF,
1496 	RTE_FLOW_ITEM_TYPE_END,
1497 };
1498 
1499 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
1500 	RTE_FLOW_ITEM_TYPE_ETH,
1501 	RTE_FLOW_ITEM_TYPE_VLAN,
1502 	RTE_FLOW_ITEM_TYPE_IPV6,
1503 	RTE_FLOW_ITEM_TYPE_SCTP,
1504 	RTE_FLOW_ITEM_TYPE_RAW,
1505 	RTE_FLOW_ITEM_TYPE_RAW,
1506 	RTE_FLOW_ITEM_TYPE_VF,
1507 	RTE_FLOW_ITEM_TYPE_END,
1508 };
1509 
1510 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
1511 	RTE_FLOW_ITEM_TYPE_ETH,
1512 	RTE_FLOW_ITEM_TYPE_VLAN,
1513 	RTE_FLOW_ITEM_TYPE_IPV6,
1514 	RTE_FLOW_ITEM_TYPE_SCTP,
1515 	RTE_FLOW_ITEM_TYPE_RAW,
1516 	RTE_FLOW_ITEM_TYPE_RAW,
1517 	RTE_FLOW_ITEM_TYPE_RAW,
1518 	RTE_FLOW_ITEM_TYPE_VF,
1519 	RTE_FLOW_ITEM_TYPE_END,
1520 };
1521 
1522 /* Pattern matched tunnel filter */
1523 static enum rte_flow_item_type pattern_vxlan_1[] = {
1524 	RTE_FLOW_ITEM_TYPE_ETH,
1525 	RTE_FLOW_ITEM_TYPE_IPV4,
1526 	RTE_FLOW_ITEM_TYPE_UDP,
1527 	RTE_FLOW_ITEM_TYPE_VXLAN,
1528 	RTE_FLOW_ITEM_TYPE_ETH,
1529 	RTE_FLOW_ITEM_TYPE_END,
1530 };
1531 
1532 static enum rte_flow_item_type pattern_vxlan_2[] = {
1533 	RTE_FLOW_ITEM_TYPE_ETH,
1534 	RTE_FLOW_ITEM_TYPE_IPV6,
1535 	RTE_FLOW_ITEM_TYPE_UDP,
1536 	RTE_FLOW_ITEM_TYPE_VXLAN,
1537 	RTE_FLOW_ITEM_TYPE_ETH,
1538 	RTE_FLOW_ITEM_TYPE_END,
1539 };
1540 
1541 static enum rte_flow_item_type pattern_vxlan_3[] = {
1542 	RTE_FLOW_ITEM_TYPE_ETH,
1543 	RTE_FLOW_ITEM_TYPE_IPV4,
1544 	RTE_FLOW_ITEM_TYPE_UDP,
1545 	RTE_FLOW_ITEM_TYPE_VXLAN,
1546 	RTE_FLOW_ITEM_TYPE_ETH,
1547 	RTE_FLOW_ITEM_TYPE_VLAN,
1548 	RTE_FLOW_ITEM_TYPE_END,
1549 };
1550 
1551 static enum rte_flow_item_type pattern_vxlan_4[] = {
1552 	RTE_FLOW_ITEM_TYPE_ETH,
1553 	RTE_FLOW_ITEM_TYPE_IPV6,
1554 	RTE_FLOW_ITEM_TYPE_UDP,
1555 	RTE_FLOW_ITEM_TYPE_VXLAN,
1556 	RTE_FLOW_ITEM_TYPE_ETH,
1557 	RTE_FLOW_ITEM_TYPE_VLAN,
1558 	RTE_FLOW_ITEM_TYPE_END,
1559 };
1560 
1561 static enum rte_flow_item_type pattern_nvgre_1[] = {
1562 	RTE_FLOW_ITEM_TYPE_ETH,
1563 	RTE_FLOW_ITEM_TYPE_IPV4,
1564 	RTE_FLOW_ITEM_TYPE_NVGRE,
1565 	RTE_FLOW_ITEM_TYPE_ETH,
1566 	RTE_FLOW_ITEM_TYPE_END,
1567 };
1568 
1569 static enum rte_flow_item_type pattern_nvgre_2[] = {
1570 	RTE_FLOW_ITEM_TYPE_ETH,
1571 	RTE_FLOW_ITEM_TYPE_IPV6,
1572 	RTE_FLOW_ITEM_TYPE_NVGRE,
1573 	RTE_FLOW_ITEM_TYPE_ETH,
1574 	RTE_FLOW_ITEM_TYPE_END,
1575 };
1576 
1577 static enum rte_flow_item_type pattern_nvgre_3[] = {
1578 	RTE_FLOW_ITEM_TYPE_ETH,
1579 	RTE_FLOW_ITEM_TYPE_IPV4,
1580 	RTE_FLOW_ITEM_TYPE_NVGRE,
1581 	RTE_FLOW_ITEM_TYPE_ETH,
1582 	RTE_FLOW_ITEM_TYPE_VLAN,
1583 	RTE_FLOW_ITEM_TYPE_END,
1584 };
1585 
1586 static enum rte_flow_item_type pattern_nvgre_4[] = {
1587 	RTE_FLOW_ITEM_TYPE_ETH,
1588 	RTE_FLOW_ITEM_TYPE_IPV6,
1589 	RTE_FLOW_ITEM_TYPE_NVGRE,
1590 	RTE_FLOW_ITEM_TYPE_ETH,
1591 	RTE_FLOW_ITEM_TYPE_VLAN,
1592 	RTE_FLOW_ITEM_TYPE_END,
1593 };
1594 
1595 static enum rte_flow_item_type pattern_mpls_1[] = {
1596 	RTE_FLOW_ITEM_TYPE_ETH,
1597 	RTE_FLOW_ITEM_TYPE_IPV4,
1598 	RTE_FLOW_ITEM_TYPE_UDP,
1599 	RTE_FLOW_ITEM_TYPE_MPLS,
1600 	RTE_FLOW_ITEM_TYPE_END,
1601 };
1602 
1603 static enum rte_flow_item_type pattern_mpls_2[] = {
1604 	RTE_FLOW_ITEM_TYPE_ETH,
1605 	RTE_FLOW_ITEM_TYPE_IPV6,
1606 	RTE_FLOW_ITEM_TYPE_UDP,
1607 	RTE_FLOW_ITEM_TYPE_MPLS,
1608 	RTE_FLOW_ITEM_TYPE_END,
1609 };
1610 
1611 static enum rte_flow_item_type pattern_mpls_3[] = {
1612 	RTE_FLOW_ITEM_TYPE_ETH,
1613 	RTE_FLOW_ITEM_TYPE_IPV4,
1614 	RTE_FLOW_ITEM_TYPE_GRE,
1615 	RTE_FLOW_ITEM_TYPE_MPLS,
1616 	RTE_FLOW_ITEM_TYPE_END,
1617 };
1618 
1619 static enum rte_flow_item_type pattern_mpls_4[] = {
1620 	RTE_FLOW_ITEM_TYPE_ETH,
1621 	RTE_FLOW_ITEM_TYPE_IPV6,
1622 	RTE_FLOW_ITEM_TYPE_GRE,
1623 	RTE_FLOW_ITEM_TYPE_MPLS,
1624 	RTE_FLOW_ITEM_TYPE_END,
1625 };
1626 
1627 static enum rte_flow_item_type pattern_qinq_1[] = {
1628 	RTE_FLOW_ITEM_TYPE_ETH,
1629 	RTE_FLOW_ITEM_TYPE_VLAN,
1630 	RTE_FLOW_ITEM_TYPE_VLAN,
1631 	RTE_FLOW_ITEM_TYPE_END,
1632 };
1633 
1634 static enum rte_flow_item_type pattern_fdir_ipv4_l2tpv3oip[] = {
1635 	RTE_FLOW_ITEM_TYPE_ETH,
1636 	RTE_FLOW_ITEM_TYPE_IPV4,
1637 	RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1638 	RTE_FLOW_ITEM_TYPE_END,
1639 };
1640 
1641 static enum rte_flow_item_type pattern_fdir_ipv6_l2tpv3oip[] = {
1642 	RTE_FLOW_ITEM_TYPE_ETH,
1643 	RTE_FLOW_ITEM_TYPE_IPV6,
1644 	RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1645 	RTE_FLOW_ITEM_TYPE_END,
1646 };
1647 
1648 static enum rte_flow_item_type pattern_fdir_ipv4_esp[] = {
1649 	RTE_FLOW_ITEM_TYPE_ETH,
1650 	RTE_FLOW_ITEM_TYPE_IPV4,
1651 	RTE_FLOW_ITEM_TYPE_ESP,
1652 	RTE_FLOW_ITEM_TYPE_END,
1653 };
1654 
1655 static enum rte_flow_item_type pattern_fdir_ipv6_esp[] = {
1656 	RTE_FLOW_ITEM_TYPE_ETH,
1657 	RTE_FLOW_ITEM_TYPE_IPV6,
1658 	RTE_FLOW_ITEM_TYPE_ESP,
1659 	RTE_FLOW_ITEM_TYPE_END,
1660 };
1661 
1662 static enum rte_flow_item_type pattern_fdir_ipv4_udp_esp[] = {
1663 	RTE_FLOW_ITEM_TYPE_ETH,
1664 	RTE_FLOW_ITEM_TYPE_IPV4,
1665 	RTE_FLOW_ITEM_TYPE_UDP,
1666 	RTE_FLOW_ITEM_TYPE_ESP,
1667 	RTE_FLOW_ITEM_TYPE_END,
1668 };
1669 
1670 static enum rte_flow_item_type pattern_fdir_ipv6_udp_esp[] = {
1671 	RTE_FLOW_ITEM_TYPE_ETH,
1672 	RTE_FLOW_ITEM_TYPE_IPV6,
1673 	RTE_FLOW_ITEM_TYPE_UDP,
1674 	RTE_FLOW_ITEM_TYPE_ESP,
1675 	RTE_FLOW_ITEM_TYPE_END,
1676 };
1677 
1678 static struct i40e_valid_pattern i40e_supported_patterns[] = {
1679 	/* Ethertype */
1680 	{ pattern_ethertype, i40e_flow_parse_ethertype_filter },
1681 	/* FDIR - support default flow type without flexible payload*/
1682 	{ pattern_ethertype, i40e_flow_parse_fdir_filter },
1683 	{ pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
1684 	{ pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
1685 	{ pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
1686 	{ pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
1687 	{ pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
1688 	{ pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
1689 	{ pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1690 	{ pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1691 	{ pattern_fdir_ipv4_esp, i40e_flow_parse_fdir_filter },
1692 	{ pattern_fdir_ipv4_udp_esp, i40e_flow_parse_fdir_filter },
1693 	{ pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
1694 	{ pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
1695 	{ pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
1696 	{ pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
1697 	{ pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
1698 	{ pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
1699 	{ pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1700 	{ pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1701 	{ pattern_fdir_ipv6_esp, i40e_flow_parse_fdir_filter },
1702 	{ pattern_fdir_ipv6_udp_esp, i40e_flow_parse_fdir_filter },
1703 	/* FDIR - support default flow type with flexible payload */
1704 	{ pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
1705 	{ pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
1706 	{ pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
1707 	{ pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1708 	{ pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1709 	{ pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1710 	{ pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1711 	{ pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1712 	{ pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1713 	{ pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1714 	{ pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1715 	{ pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1716 	{ pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1717 	{ pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1718 	{ pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1719 	{ pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1720 	{ pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1721 	{ pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1722 	{ pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1723 	{ pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1724 	{ pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1725 	{ pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1726 	{ pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1727 	{ pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1728 	{ pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1729 	{ pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1730 	{ pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1731 	/* FDIR - support single vlan input set */
1732 	{ pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
1733 	{ pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
1734 	{ pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
1735 	{ pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
1736 	{ pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
1737 	{ pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
1738 	{ pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
1739 	{ pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
1740 	{ pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
1741 	{ pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
1742 	{ pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
1743 	{ pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
1744 	{ pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1745 	{ pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1746 	{ pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1747 	{ pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1748 	{ pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1749 	{ pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1750 	{ pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1751 	{ pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1752 	{ pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1753 	{ pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1754 	{ pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1755 	{ pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1756 	{ pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1757 	{ pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1758 	{ pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1759 	{ pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1760 	{ pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1761 	{ pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1762 	{ pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1763 	{ pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1764 	{ pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1765 	{ pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1766 	{ pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1767 	{ pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1768 	/* FDIR - support VF item */
1769 	{ pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
1770 	{ pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1771 	{ pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1772 	{ pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1773 	{ pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
1774 	{ pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1775 	{ pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1776 	{ pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1777 	{ pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
1778 	{ pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
1779 	{ pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
1780 	{ pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1781 	{ pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1782 	{ pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1783 	{ pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1784 	{ pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1785 	{ pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1786 	{ pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1787 	{ pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1788 	{ pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1789 	{ pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1790 	{ pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1791 	{ pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1792 	{ pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1793 	{ pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1794 	{ pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1795 	{ pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1796 	{ pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1797 	{ pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1798 	{ pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1799 	{ pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1800 	{ pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1801 	{ pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1802 	{ pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1803 	{ pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1804 	{ pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
1805 	{ pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
1806 	{ pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1807 	{ pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1808 	{ pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1809 	{ pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
1810 	{ pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1811 	{ pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1812 	{ pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1813 	{ pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
1814 	{ pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
1815 	{ pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
1816 	{ pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1817 	{ pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1818 	{ pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1819 	{ pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1820 	{ pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1821 	{ pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1822 	{ pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1823 	{ pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1824 	{ pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1825 	{ pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1826 	{ pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1827 	{ pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1828 	{ pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1829 	{ pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1830 	{ pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1831 	{ pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1832 	{ pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1833 	{ pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1834 	{ pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1835 	{ pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1836 	{ pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1837 	{ pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1838 	{ pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1839 	{ pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1840 	/* VXLAN */
1841 	{ pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
1842 	{ pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
1843 	{ pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
1844 	{ pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
1845 	/* NVGRE */
1846 	{ pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
1847 	{ pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
1848 	{ pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
1849 	{ pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
1850 	/* MPLSoUDP & MPLSoGRE */
1851 	{ pattern_mpls_1, i40e_flow_parse_mpls_filter },
1852 	{ pattern_mpls_2, i40e_flow_parse_mpls_filter },
1853 	{ pattern_mpls_3, i40e_flow_parse_mpls_filter },
1854 	{ pattern_mpls_4, i40e_flow_parse_mpls_filter },
1855 	/* GTP-C & GTP-U */
1856 	{ pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
1857 	{ pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
1858 	{ pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
1859 	{ pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
1860 	/* QINQ */
1861 	{ pattern_qinq_1, i40e_flow_parse_qinq_filter },
1862 	/* L2TPv3 over IP */
1863 	{ pattern_fdir_ipv4_l2tpv3oip, i40e_flow_parse_fdir_filter },
1864 	{ pattern_fdir_ipv6_l2tpv3oip, i40e_flow_parse_fdir_filter },
1865 	/* L4 over port */
1866 	{ pattern_fdir_ipv4_udp, i40e_flow_parse_l4_cloud_filter },
1867 	{ pattern_fdir_ipv4_tcp, i40e_flow_parse_l4_cloud_filter },
1868 	{ pattern_fdir_ipv4_sctp, i40e_flow_parse_l4_cloud_filter },
1869 	{ pattern_fdir_ipv6_udp, i40e_flow_parse_l4_cloud_filter },
1870 	{ pattern_fdir_ipv6_tcp, i40e_flow_parse_l4_cloud_filter },
1871 	{ pattern_fdir_ipv6_sctp, i40e_flow_parse_l4_cloud_filter },
1872 };
1873 
1874 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
1875 	do {                                                            \
1876 		act = actions + index;                                  \
1877 		while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {        \
1878 			index++;                                        \
1879 			act = actions + index;                          \
1880 		}                                                       \
1881 	} while (0)
1882 
1883 /* Find the first VOID or non-VOID item pointer */
1884 static const struct rte_flow_item *
1885 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
1886 {
1887 	bool is_find;
1888 
1889 	while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1890 		if (is_void)
1891 			is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
1892 		else
1893 			is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
1894 		if (is_find)
1895 			break;
1896 		item++;
1897 	}
1898 	return item;
1899 }
1900 
1901 /* Skip all VOID items of the pattern */
1902 static void
1903 i40e_pattern_skip_void_item(struct rte_flow_item *items,
1904 			    const struct rte_flow_item *pattern)
1905 {
1906 	uint32_t cpy_count = 0;
1907 	const struct rte_flow_item *pb = pattern, *pe = pattern;
1908 
1909 	for (;;) {
1910 		/* Find a non-void item first */
1911 		pb = i40e_find_first_item(pb, false);
1912 		if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
1913 			pe = pb;
1914 			break;
1915 		}
1916 
1917 		/* Find a void item */
1918 		pe = i40e_find_first_item(pb + 1, true);
1919 
1920 		cpy_count = pe - pb;
1921 		rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
1922 
1923 		items += cpy_count;
1924 
1925 		if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
1926 			pb = pe;
1927 			break;
1928 		}
1929 
1930 		pb = pe + 1;
1931 	}
1932 	/* Copy the END item. */
1933 	rte_memcpy(items, pe, sizeof(struct rte_flow_item));
1934 }
1935 
1936 /* Check if the pattern matches a supported item type array */
1937 static bool
1938 i40e_match_pattern(enum rte_flow_item_type *item_array,
1939 		   struct rte_flow_item *pattern)
1940 {
1941 	struct rte_flow_item *item = pattern;
1942 
1943 	while ((*item_array == item->type) &&
1944 	       (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
1945 		item_array++;
1946 		item++;
1947 	}
1948 
1949 	return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
1950 		item->type == RTE_FLOW_ITEM_TYPE_END);
1951 }
1952 
1953 /* Find if there's parse filter function matched */
1954 static parse_filter_t
1955 i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx)
1956 {
1957 	parse_filter_t parse_filter = NULL;
1958 	uint8_t i = *idx;
1959 
1960 	for (; i < RTE_DIM(i40e_supported_patterns); i++) {
1961 		if (i40e_match_pattern(i40e_supported_patterns[i].items,
1962 					pattern)) {
1963 			parse_filter = i40e_supported_patterns[i].parse_filter;
1964 			break;
1965 		}
1966 	}
1967 
1968 	*idx = ++i;
1969 
1970 	return parse_filter;
1971 }
1972 
1973 /* Parse attributes */
1974 static int
1975 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
1976 		     struct rte_flow_error *error)
1977 {
1978 	/* Must be input direction */
1979 	if (!attr->ingress) {
1980 		rte_flow_error_set(error, EINVAL,
1981 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1982 				   attr, "Only support ingress.");
1983 		return -rte_errno;
1984 	}
1985 
1986 	/* Not supported */
1987 	if (attr->egress) {
1988 		rte_flow_error_set(error, EINVAL,
1989 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1990 				   attr, "Not support egress.");
1991 		return -rte_errno;
1992 	}
1993 
1994 	/* Not supported */
1995 	if (attr->priority) {
1996 		rte_flow_error_set(error, EINVAL,
1997 				   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1998 				   attr, "Not support priority.");
1999 		return -rte_errno;
2000 	}
2001 
2002 	/* Not supported */
2003 	if (attr->group) {
2004 		rte_flow_error_set(error, EINVAL,
2005 				   RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2006 				   attr, "Not support group.");
2007 		return -rte_errno;
2008 	}
2009 
2010 	return 0;
2011 }
2012 
2013 static uint16_t
2014 i40e_get_outer_vlan(struct rte_eth_dev *dev)
2015 {
2016 	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2017 	int qinq = dev->data->dev_conf.rxmode.offloads &
2018 		RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
2019 	uint64_t reg_r = 0;
2020 	uint16_t reg_id;
2021 	uint16_t tpid;
2022 
2023 	if (qinq)
2024 		reg_id = 2;
2025 	else
2026 		reg_id = 3;
2027 
2028 	i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
2029 				    &reg_r, NULL);
2030 
2031 	tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
2032 
2033 	return tpid;
2034 }
2035 
2036 /* 1. Last in item should be NULL as range is not supported.
2037  * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
2038  * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
2039  * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
2040  *    FF:FF:FF:FF:FF:FF
2041  * 5. Ether_type mask should be 0xFFFF.
2042  */
2043 static int
2044 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
2045 				  const struct rte_flow_item *pattern,
2046 				  struct rte_flow_error *error,
2047 				  struct rte_eth_ethertype_filter *filter)
2048 {
2049 	const struct rte_flow_item *item = pattern;
2050 	const struct rte_flow_item_eth *eth_spec;
2051 	const struct rte_flow_item_eth *eth_mask;
2052 	enum rte_flow_item_type item_type;
2053 
2054 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2055 		if (item->last) {
2056 			rte_flow_error_set(error, EINVAL,
2057 					   RTE_FLOW_ERROR_TYPE_ITEM,
2058 					   item,
2059 					   "Not support range");
2060 			return -rte_errno;
2061 		}
2062 		item_type = item->type;
2063 		switch (item_type) {
2064 		case RTE_FLOW_ITEM_TYPE_ETH:
2065 			eth_spec = item->spec;
2066 			eth_mask = item->mask;
2067 			/* Get the MAC info. */
2068 			if (!eth_spec || !eth_mask) {
2069 				rte_flow_error_set(error, EINVAL,
2070 						   RTE_FLOW_ERROR_TYPE_ITEM,
2071 						   item,
2072 						   "NULL ETH spec/mask");
2073 				return -rte_errno;
2074 			}
2075 
2076 			/* Mask bits of source MAC address must be full of 0.
2077 			 * Mask bits of destination MAC address must be full
2078 			 * of 1 or full of 0.
2079 			 */
2080 			if (!rte_is_zero_ether_addr(&eth_mask->src) ||
2081 			    (!rte_is_zero_ether_addr(&eth_mask->dst) &&
2082 			     !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
2083 				rte_flow_error_set(error, EINVAL,
2084 						   RTE_FLOW_ERROR_TYPE_ITEM,
2085 						   item,
2086 						   "Invalid MAC_addr mask");
2087 				return -rte_errno;
2088 			}
2089 
2090 			if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
2091 				rte_flow_error_set(error, EINVAL,
2092 						   RTE_FLOW_ERROR_TYPE_ITEM,
2093 						   item,
2094 						   "Invalid ethertype mask");
2095 				return -rte_errno;
2096 			}
2097 
2098 			/* If mask bits of destination MAC address
2099 			 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
2100 			 */
2101 			if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
2102 				filter->mac_addr = eth_spec->dst;
2103 				filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
2104 			} else {
2105 				filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
2106 			}
2107 			filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
2108 
2109 			if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2110 			    filter->ether_type == RTE_ETHER_TYPE_IPV6 ||
2111 			    filter->ether_type == RTE_ETHER_TYPE_LLDP ||
2112 			    filter->ether_type == i40e_get_outer_vlan(dev)) {
2113 				rte_flow_error_set(error, EINVAL,
2114 						   RTE_FLOW_ERROR_TYPE_ITEM,
2115 						   item,
2116 						   "Unsupported ether_type in"
2117 						   " control packet filter.");
2118 				return -rte_errno;
2119 			}
2120 			break;
2121 		default:
2122 			break;
2123 		}
2124 	}
2125 
2126 	return 0;
2127 }
2128 
2129 /* Ethertype action only supports QUEUE or DROP. */
2130 static int
2131 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
2132 				 const struct rte_flow_action *actions,
2133 				 struct rte_flow_error *error,
2134 				 struct rte_eth_ethertype_filter *filter)
2135 {
2136 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2137 	const struct rte_flow_action *act;
2138 	const struct rte_flow_action_queue *act_q;
2139 	uint32_t index = 0;
2140 
2141 	/* Check if the first non-void action is QUEUE or DROP. */
2142 	NEXT_ITEM_OF_ACTION(act, actions, index);
2143 	if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
2144 	    act->type != RTE_FLOW_ACTION_TYPE_DROP) {
2145 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2146 				   act, "Not supported action.");
2147 		return -rte_errno;
2148 	}
2149 
2150 	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2151 		act_q = act->conf;
2152 		filter->queue = act_q->index;
2153 		if (filter->queue >= pf->dev_data->nb_rx_queues) {
2154 			rte_flow_error_set(error, EINVAL,
2155 					   RTE_FLOW_ERROR_TYPE_ACTION,
2156 					   act, "Invalid queue ID for"
2157 					   " ethertype_filter.");
2158 			return -rte_errno;
2159 		}
2160 	} else {
2161 		filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
2162 	}
2163 
2164 	/* Check if the next non-void item is END */
2165 	index++;
2166 	NEXT_ITEM_OF_ACTION(act, actions, index);
2167 	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2168 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2169 				   act, "Not supported action.");
2170 		return -rte_errno;
2171 	}
2172 
2173 	return 0;
2174 }
2175 
2176 static int
2177 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
2178 				 const struct rte_flow_attr *attr,
2179 				 const struct rte_flow_item pattern[],
2180 				 const struct rte_flow_action actions[],
2181 				 struct rte_flow_error *error,
2182 				 union i40e_filter_t *filter)
2183 {
2184 	struct rte_eth_ethertype_filter *ethertype_filter =
2185 		&filter->ethertype_filter;
2186 	int ret;
2187 
2188 	ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
2189 						ethertype_filter);
2190 	if (ret)
2191 		return ret;
2192 
2193 	ret = i40e_flow_parse_ethertype_action(dev, actions, error,
2194 					       ethertype_filter);
2195 	if (ret)
2196 		return ret;
2197 
2198 	ret = i40e_flow_parse_attr(attr, error);
2199 	if (ret)
2200 		return ret;
2201 
2202 	cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
2203 
2204 	return ret;
2205 }
2206 
2207 static int
2208 i40e_flow_check_raw_item(const struct rte_flow_item *item,
2209 			 const struct rte_flow_item_raw *raw_spec,
2210 			 struct rte_flow_error *error)
2211 {
2212 	if (!raw_spec->relative) {
2213 		rte_flow_error_set(error, EINVAL,
2214 				   RTE_FLOW_ERROR_TYPE_ITEM,
2215 				   item,
2216 				   "Relative should be 1.");
2217 		return -rte_errno;
2218 	}
2219 
2220 	if (raw_spec->offset % sizeof(uint16_t)) {
2221 		rte_flow_error_set(error, EINVAL,
2222 				   RTE_FLOW_ERROR_TYPE_ITEM,
2223 				   item,
2224 				   "Offset should be even.");
2225 		return -rte_errno;
2226 	}
2227 
2228 	if (raw_spec->search || raw_spec->limit) {
2229 		rte_flow_error_set(error, EINVAL,
2230 				   RTE_FLOW_ERROR_TYPE_ITEM,
2231 				   item,
2232 				   "search or limit is not supported.");
2233 		return -rte_errno;
2234 	}
2235 
2236 	if (raw_spec->offset < 0) {
2237 		rte_flow_error_set(error, EINVAL,
2238 				   RTE_FLOW_ERROR_TYPE_ITEM,
2239 				   item,
2240 				   "Offset should be non-negative.");
2241 		return -rte_errno;
2242 	}
2243 	return 0;
2244 }
2245 
2246 
2247 static uint8_t
2248 i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
2249 				enum rte_flow_item_type item_type,
2250 				struct i40e_fdir_filter_conf *filter)
2251 {
2252 	struct i40e_customized_pctype *cus_pctype = NULL;
2253 
2254 	switch (item_type) {
2255 	case RTE_FLOW_ITEM_TYPE_GTPC:
2256 		cus_pctype = i40e_find_customized_pctype(pf,
2257 							 I40E_CUSTOMIZED_GTPC);
2258 		break;
2259 	case RTE_FLOW_ITEM_TYPE_GTPU:
2260 		if (!filter->input.flow_ext.inner_ip)
2261 			cus_pctype = i40e_find_customized_pctype(pf,
2262 							 I40E_CUSTOMIZED_GTPU);
2263 		else if (filter->input.flow_ext.iip_type ==
2264 			 I40E_FDIR_IPTYPE_IPV4)
2265 			cus_pctype = i40e_find_customized_pctype(pf,
2266 						 I40E_CUSTOMIZED_GTPU_IPV4);
2267 		else if (filter->input.flow_ext.iip_type ==
2268 			 I40E_FDIR_IPTYPE_IPV6)
2269 			cus_pctype = i40e_find_customized_pctype(pf,
2270 						 I40E_CUSTOMIZED_GTPU_IPV6);
2271 		break;
2272 	case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
2273 		if (filter->input.flow_ext.oip_type == I40E_FDIR_IPTYPE_IPV4)
2274 			cus_pctype = i40e_find_customized_pctype(pf,
2275 						I40E_CUSTOMIZED_IPV4_L2TPV3);
2276 		else if (filter->input.flow_ext.oip_type ==
2277 			 I40E_FDIR_IPTYPE_IPV6)
2278 			cus_pctype = i40e_find_customized_pctype(pf,
2279 						I40E_CUSTOMIZED_IPV6_L2TPV3);
2280 		break;
2281 	case RTE_FLOW_ITEM_TYPE_ESP:
2282 		if (!filter->input.flow_ext.is_udp) {
2283 			if (filter->input.flow_ext.oip_type ==
2284 				I40E_FDIR_IPTYPE_IPV4)
2285 				cus_pctype = i40e_find_customized_pctype(pf,
2286 						I40E_CUSTOMIZED_ESP_IPV4);
2287 			else if (filter->input.flow_ext.oip_type ==
2288 				I40E_FDIR_IPTYPE_IPV6)
2289 				cus_pctype = i40e_find_customized_pctype(pf,
2290 						I40E_CUSTOMIZED_ESP_IPV6);
2291 		} else {
2292 			if (filter->input.flow_ext.oip_type ==
2293 				I40E_FDIR_IPTYPE_IPV4)
2294 				cus_pctype = i40e_find_customized_pctype(pf,
2295 						I40E_CUSTOMIZED_ESP_IPV4_UDP);
2296 			else if (filter->input.flow_ext.oip_type ==
2297 					I40E_FDIR_IPTYPE_IPV6)
2298 				cus_pctype = i40e_find_customized_pctype(pf,
2299 						I40E_CUSTOMIZED_ESP_IPV6_UDP);
2300 			filter->input.flow_ext.is_udp = false;
2301 		}
2302 		break;
2303 	default:
2304 		PMD_DRV_LOG(ERR, "Unsupported item type");
2305 		break;
2306 	}
2307 
2308 	if (cus_pctype && cus_pctype->valid)
2309 		return cus_pctype->pctype;
2310 
2311 	return I40E_FILTER_PCTYPE_INVALID;
2312 }
2313 
2314 static void
2315 i40e_flow_set_filter_spi(struct i40e_fdir_filter_conf *filter,
2316 	const struct rte_flow_item_esp *esp_spec)
2317 {
2318 	if (filter->input.flow_ext.oip_type ==
2319 		I40E_FDIR_IPTYPE_IPV4) {
2320 		if (filter->input.flow_ext.is_udp)
2321 			filter->input.flow.esp_ipv4_udp_flow.spi =
2322 				esp_spec->hdr.spi;
2323 		else
2324 			filter->input.flow.esp_ipv4_flow.spi =
2325 				esp_spec->hdr.spi;
2326 	}
2327 	if (filter->input.flow_ext.oip_type ==
2328 		I40E_FDIR_IPTYPE_IPV6) {
2329 		if (filter->input.flow_ext.is_udp)
2330 			filter->input.flow.esp_ipv6_udp_flow.spi =
2331 				esp_spec->hdr.spi;
2332 		else
2333 			filter->input.flow.esp_ipv6_flow.spi =
2334 				esp_spec->hdr.spi;
2335 	}
2336 }
2337 
2338 /* 1. Last in item should be NULL as range is not supported.
2339  * 2. Supported patterns: refer to array i40e_supported_patterns.
2340  * 3. Default supported flow type and input set: refer to array
2341  *    valid_fdir_inset_table in i40e_ethdev.c.
2342  * 4. Mask of fields which need to be matched should be
2343  *    filled with 1.
2344  * 5. Mask of fields which needn't to be matched should be
2345  *    filled with 0.
2346  * 6. GTP profile supports GTPv1 only.
2347  * 7. GTP-C response message ('source_port' = 2123) is not supported.
2348  */
2349 static int
2350 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
2351 			     const struct rte_flow_attr *attr,
2352 			     const struct rte_flow_item *pattern,
2353 			     struct rte_flow_error *error,
2354 			     struct i40e_fdir_filter_conf *filter)
2355 {
2356 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2357 	const struct rte_flow_item *item = pattern;
2358 	const struct rte_flow_item_eth *eth_spec, *eth_mask;
2359 	const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
2360 	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
2361 	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
2362 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
2363 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
2364 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
2365 	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
2366 	const struct rte_flow_item_esp *esp_spec, *esp_mask;
2367 	const struct rte_flow_item_raw *raw_spec, *raw_mask;
2368 	const struct rte_flow_item_vf *vf_spec;
2369 	const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
2370 
2371 	uint8_t pctype = 0;
2372 	uint64_t input_set = I40E_INSET_NONE;
2373 	enum rte_flow_item_type item_type;
2374 	enum rte_flow_item_type next_type;
2375 	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
2376 	enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
2377 	uint32_t i, j;
2378 	uint8_t  ipv6_addr_mask[16] = {
2379 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
2380 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2381 	enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
2382 	uint8_t raw_id = 0;
2383 	int32_t off_arr[I40E_MAX_FLXPLD_FIED];
2384 	uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
2385 	struct i40e_fdir_flex_pit flex_pit;
2386 	uint8_t next_dst_off = 0;
2387 	uint16_t flex_size;
2388 	uint16_t ether_type;
2389 	uint32_t vtc_flow_cpu;
2390 	bool outer_ip = true;
2391 	uint8_t field_idx;
2392 	int ret;
2393 
2394 	memset(off_arr, 0, sizeof(off_arr));
2395 	memset(len_arr, 0, sizeof(len_arr));
2396 	filter->input.flow_ext.customized_pctype = false;
2397 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2398 		if (item->last && item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
2399 			rte_flow_error_set(error, EINVAL,
2400 					   RTE_FLOW_ERROR_TYPE_ITEM,
2401 					   item,
2402 					   "Not support range");
2403 			return -rte_errno;
2404 		}
2405 		item_type = item->type;
2406 		switch (item_type) {
2407 		case RTE_FLOW_ITEM_TYPE_ETH:
2408 			eth_spec = item->spec;
2409 			eth_mask = item->mask;
2410 			next_type = (item + 1)->type;
2411 
2412 			if (next_type == RTE_FLOW_ITEM_TYPE_END &&
2413 						(!eth_spec || !eth_mask)) {
2414 				rte_flow_error_set(error, EINVAL,
2415 						   RTE_FLOW_ERROR_TYPE_ITEM,
2416 						   item,
2417 						   "NULL eth spec/mask.");
2418 				return -rte_errno;
2419 			}
2420 
2421 			if (eth_spec && eth_mask) {
2422 				if (rte_is_broadcast_ether_addr(&eth_mask->dst) &&
2423 					rte_is_zero_ether_addr(&eth_mask->src)) {
2424 					filter->input.flow.l2_flow.dst =
2425 						eth_spec->dst;
2426 					input_set |= I40E_INSET_DMAC;
2427 				} else if (rte_is_zero_ether_addr(&eth_mask->dst) &&
2428 					rte_is_broadcast_ether_addr(&eth_mask->src)) {
2429 					filter->input.flow.l2_flow.src =
2430 						eth_spec->src;
2431 					input_set |= I40E_INSET_SMAC;
2432 				} else if (rte_is_broadcast_ether_addr(&eth_mask->dst) &&
2433 					rte_is_broadcast_ether_addr(&eth_mask->src)) {
2434 					filter->input.flow.l2_flow.dst =
2435 						eth_spec->dst;
2436 					filter->input.flow.l2_flow.src =
2437 						eth_spec->src;
2438 					input_set |= (I40E_INSET_DMAC | I40E_INSET_SMAC);
2439 				} else if (!rte_is_zero_ether_addr(&eth_mask->src) ||
2440 					   !rte_is_zero_ether_addr(&eth_mask->dst)) {
2441 					rte_flow_error_set(error, EINVAL,
2442 						      RTE_FLOW_ERROR_TYPE_ITEM,
2443 						      item,
2444 						      "Invalid MAC_addr mask.");
2445 					return -rte_errno;
2446 				}
2447 			}
2448 			if (eth_spec && eth_mask &&
2449 			next_type == RTE_FLOW_ITEM_TYPE_END) {
2450 				if (eth_mask->type != RTE_BE16(0xffff)) {
2451 					rte_flow_error_set(error, EINVAL,
2452 						      RTE_FLOW_ERROR_TYPE_ITEM,
2453 						      item,
2454 						      "Invalid type mask.");
2455 					return -rte_errno;
2456 				}
2457 
2458 				ether_type = rte_be_to_cpu_16(eth_spec->type);
2459 
2460 				if (next_type == RTE_FLOW_ITEM_TYPE_VLAN ||
2461 				    ether_type == RTE_ETHER_TYPE_IPV4 ||
2462 				    ether_type == RTE_ETHER_TYPE_IPV6 ||
2463 				    ether_type == i40e_get_outer_vlan(dev)) {
2464 					rte_flow_error_set(error, EINVAL,
2465 						     RTE_FLOW_ERROR_TYPE_ITEM,
2466 						     item,
2467 						     "Unsupported ether_type.");
2468 					return -rte_errno;
2469 				}
2470 				input_set |= I40E_INSET_LAST_ETHER_TYPE;
2471 				filter->input.flow.l2_flow.ether_type =
2472 					eth_spec->type;
2473 			}
2474 
2475 			pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2476 			layer_idx = I40E_FLXPLD_L2_IDX;
2477 
2478 			break;
2479 		case RTE_FLOW_ITEM_TYPE_VLAN:
2480 			vlan_spec = item->spec;
2481 			vlan_mask = item->mask;
2482 
2483 			RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE));
2484 			if (vlan_spec && vlan_mask) {
2485 				if (vlan_mask->tci !=
2486 				    rte_cpu_to_be_16(I40E_VLAN_TCI_MASK) &&
2487 				    vlan_mask->tci !=
2488 				    rte_cpu_to_be_16(I40E_VLAN_PRI_MASK) &&
2489 				    vlan_mask->tci !=
2490 				    rte_cpu_to_be_16(I40E_VLAN_CFI_MASK) &&
2491 				    vlan_mask->tci !=
2492 				    rte_cpu_to_be_16(I40E_VLAN_VID_MASK)) {
2493 					rte_flow_error_set(error, EINVAL,
2494 						   RTE_FLOW_ERROR_TYPE_ITEM,
2495 						   item,
2496 						   "Unsupported TCI mask.");
2497 				}
2498 				input_set |= I40E_INSET_VLAN_INNER;
2499 				filter->input.flow_ext.vlan_tci =
2500 					vlan_spec->tci;
2501 			}
2502 			if (vlan_spec && vlan_mask && vlan_mask->inner_type) {
2503 				if (vlan_mask->inner_type != RTE_BE16(0xffff)) {
2504 					rte_flow_error_set(error, EINVAL,
2505 						      RTE_FLOW_ERROR_TYPE_ITEM,
2506 						      item,
2507 						      "Invalid inner_type"
2508 						      " mask.");
2509 					return -rte_errno;
2510 				}
2511 
2512 				ether_type =
2513 					rte_be_to_cpu_16(vlan_spec->inner_type);
2514 
2515 				if (ether_type == RTE_ETHER_TYPE_IPV4 ||
2516 				    ether_type == RTE_ETHER_TYPE_IPV6 ||
2517 				    ether_type == i40e_get_outer_vlan(dev)) {
2518 					rte_flow_error_set(error, EINVAL,
2519 						     RTE_FLOW_ERROR_TYPE_ITEM,
2520 						     item,
2521 						     "Unsupported inner_type.");
2522 					return -rte_errno;
2523 				}
2524 				input_set |= I40E_INSET_LAST_ETHER_TYPE;
2525 				filter->input.flow.l2_flow.ether_type =
2526 					vlan_spec->inner_type;
2527 			}
2528 
2529 			pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2530 			layer_idx = I40E_FLXPLD_L2_IDX;
2531 
2532 			break;
2533 		case RTE_FLOW_ITEM_TYPE_IPV4:
2534 			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
2535 			ipv4_spec = item->spec;
2536 			ipv4_mask = item->mask;
2537 			ipv4_last = item->last;
2538 			pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
2539 			layer_idx = I40E_FLXPLD_L3_IDX;
2540 
2541 			if (ipv4_last) {
2542 				if (!ipv4_spec || !ipv4_mask || !outer_ip) {
2543 					rte_flow_error_set(error, EINVAL,
2544 						RTE_FLOW_ERROR_TYPE_ITEM,
2545 						item,
2546 						"Not support range");
2547 					return -rte_errno;
2548 				}
2549 				/* Only fragment_offset supports range */
2550 				if (ipv4_last->hdr.version_ihl ||
2551 				    ipv4_last->hdr.type_of_service ||
2552 				    ipv4_last->hdr.total_length ||
2553 				    ipv4_last->hdr.packet_id ||
2554 				    ipv4_last->hdr.time_to_live ||
2555 				    ipv4_last->hdr.next_proto_id ||
2556 				    ipv4_last->hdr.hdr_checksum ||
2557 				    ipv4_last->hdr.src_addr ||
2558 				    ipv4_last->hdr.dst_addr) {
2559 					rte_flow_error_set(error, EINVAL,
2560 						   RTE_FLOW_ERROR_TYPE_ITEM,
2561 						   item,
2562 						   "Not support range");
2563 					return -rte_errno;
2564 				}
2565 			}
2566 			if (ipv4_spec && ipv4_mask && outer_ip) {
2567 				/* Check IPv4 mask and update input set */
2568 				if (ipv4_mask->hdr.version_ihl ||
2569 				    ipv4_mask->hdr.total_length ||
2570 				    ipv4_mask->hdr.packet_id ||
2571 				    ipv4_mask->hdr.hdr_checksum) {
2572 					rte_flow_error_set(error, EINVAL,
2573 						   RTE_FLOW_ERROR_TYPE_ITEM,
2574 						   item,
2575 						   "Invalid IPv4 mask.");
2576 					return -rte_errno;
2577 				}
2578 
2579 				if (ipv4_mask->hdr.src_addr == UINT32_MAX)
2580 					input_set |= I40E_INSET_IPV4_SRC;
2581 				if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
2582 					input_set |= I40E_INSET_IPV4_DST;
2583 				if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
2584 					input_set |= I40E_INSET_IPV4_TOS;
2585 				if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
2586 					input_set |= I40E_INSET_IPV4_TTL;
2587 				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
2588 					input_set |= I40E_INSET_IPV4_PROTO;
2589 
2590 				/* Check if it is fragment. */
2591 				uint16_t frag_mask =
2592 					ipv4_mask->hdr.fragment_offset;
2593 				uint16_t frag_spec =
2594 					ipv4_spec->hdr.fragment_offset;
2595 				uint16_t frag_last = 0;
2596 				if (ipv4_last)
2597 					frag_last =
2598 					ipv4_last->hdr.fragment_offset;
2599 				if (frag_mask) {
2600 					frag_mask = rte_be_to_cpu_16(frag_mask);
2601 					frag_spec = rte_be_to_cpu_16(frag_spec);
2602 					frag_last = rte_be_to_cpu_16(frag_last);
2603 					/* frag_off mask has to be 0x3fff */
2604 					if (frag_mask !=
2605 					    (RTE_IPV4_HDR_OFFSET_MASK |
2606 					    RTE_IPV4_HDR_MF_FLAG)) {
2607 						rte_flow_error_set(error,
2608 						   EINVAL,
2609 						   RTE_FLOW_ERROR_TYPE_ITEM,
2610 						   item,
2611 						   "Invalid IPv4 fragment_offset mask");
2612 						return -rte_errno;
2613 					}
2614 					/*
2615 					 * non-frag rule:
2616 					 * mask=0x3fff,spec=0
2617 					 * frag rule:
2618 					 * mask=0x3fff,spec=0x8,last=0x2000
2619 					 */
2620 					if (frag_spec ==
2621 					    (1 << RTE_IPV4_HDR_FO_SHIFT) &&
2622 					    frag_last == RTE_IPV4_HDR_MF_FLAG) {
2623 						pctype =
2624 						  I40E_FILTER_PCTYPE_FRAG_IPV4;
2625 					} else if (frag_spec || frag_last) {
2626 						rte_flow_error_set(error,
2627 						   EINVAL,
2628 						   RTE_FLOW_ERROR_TYPE_ITEM,
2629 						   item,
2630 						   "Invalid IPv4 fragment_offset rule");
2631 						return -rte_errno;
2632 					}
2633 				} else if (frag_spec || frag_last) {
2634 					rte_flow_error_set(error,
2635 						EINVAL,
2636 						RTE_FLOW_ERROR_TYPE_ITEM,
2637 						item,
2638 						"Invalid fragment_offset");
2639 					return -rte_errno;
2640 				}
2641 
2642 				if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2643 					if (input_set & (I40E_INSET_IPV4_SRC |
2644 						I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
2645 						I40E_INSET_IPV4_TTL | I40E_INSET_IPV4_PROTO)) {
2646 						rte_flow_error_set(error, EINVAL,
2647 							RTE_FLOW_ERROR_TYPE_ITEM,
2648 							item,
2649 							"L2 and L3 input set are exclusive.");
2650 						return -rte_errno;
2651 					}
2652 				} else {
2653 					/* Get the filter info */
2654 					filter->input.flow.ip4_flow.proto =
2655 						ipv4_spec->hdr.next_proto_id;
2656 					filter->input.flow.ip4_flow.tos =
2657 						ipv4_spec->hdr.type_of_service;
2658 					filter->input.flow.ip4_flow.ttl =
2659 						ipv4_spec->hdr.time_to_live;
2660 					filter->input.flow.ip4_flow.src_ip =
2661 						ipv4_spec->hdr.src_addr;
2662 					filter->input.flow.ip4_flow.dst_ip =
2663 						ipv4_spec->hdr.dst_addr;
2664 
2665 					filter->input.flow_ext.inner_ip = false;
2666 					filter->input.flow_ext.oip_type =
2667 						I40E_FDIR_IPTYPE_IPV4;
2668 				}
2669 			} else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
2670 				filter->input.flow_ext.inner_ip = true;
2671 				filter->input.flow_ext.iip_type =
2672 					I40E_FDIR_IPTYPE_IPV4;
2673 			} else if (!ipv4_spec && !ipv4_mask && outer_ip) {
2674 				filter->input.flow_ext.inner_ip = false;
2675 				filter->input.flow_ext.oip_type =
2676 					I40E_FDIR_IPTYPE_IPV4;
2677 			} else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
2678 				rte_flow_error_set(error, EINVAL,
2679 						   RTE_FLOW_ERROR_TYPE_ITEM,
2680 						   item,
2681 						   "Invalid inner IPv4 mask.");
2682 				return -rte_errno;
2683 			}
2684 
2685 			if (outer_ip)
2686 				outer_ip = false;
2687 
2688 			break;
2689 		case RTE_FLOW_ITEM_TYPE_IPV6:
2690 			l3 = RTE_FLOW_ITEM_TYPE_IPV6;
2691 			ipv6_spec = item->spec;
2692 			ipv6_mask = item->mask;
2693 			pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
2694 			layer_idx = I40E_FLXPLD_L3_IDX;
2695 
2696 			if (ipv6_spec && ipv6_mask && outer_ip) {
2697 				/* Check IPv6 mask and update input set */
2698 				if (ipv6_mask->hdr.payload_len) {
2699 					rte_flow_error_set(error, EINVAL,
2700 						   RTE_FLOW_ERROR_TYPE_ITEM,
2701 						   item,
2702 						   "Invalid IPv6 mask");
2703 					return -rte_errno;
2704 				}
2705 
2706 				if (!memcmp(ipv6_mask->hdr.src_addr,
2707 					    ipv6_addr_mask,
2708 					    RTE_DIM(ipv6_mask->hdr.src_addr)))
2709 					input_set |= I40E_INSET_IPV6_SRC;
2710 				if (!memcmp(ipv6_mask->hdr.dst_addr,
2711 					    ipv6_addr_mask,
2712 					    RTE_DIM(ipv6_mask->hdr.dst_addr)))
2713 					input_set |= I40E_INSET_IPV6_DST;
2714 
2715 				if ((ipv6_mask->hdr.vtc_flow &
2716 				     rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2717 				    == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2718 					input_set |= I40E_INSET_IPV6_TC;
2719 				if (ipv6_mask->hdr.proto == UINT8_MAX)
2720 					input_set |= I40E_INSET_IPV6_NEXT_HDR;
2721 				if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
2722 					input_set |= I40E_INSET_IPV6_HOP_LIMIT;
2723 
2724 				/* Get filter info */
2725 				vtc_flow_cpu =
2726 				      rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
2727 				filter->input.flow.ipv6_flow.tc =
2728 					(uint8_t)(vtc_flow_cpu >>
2729 						  I40E_FDIR_IPv6_TC_OFFSET);
2730 				filter->input.flow.ipv6_flow.proto =
2731 					ipv6_spec->hdr.proto;
2732 				filter->input.flow.ipv6_flow.hop_limits =
2733 					ipv6_spec->hdr.hop_limits;
2734 
2735 				filter->input.flow_ext.inner_ip = false;
2736 				filter->input.flow_ext.oip_type =
2737 					I40E_FDIR_IPTYPE_IPV6;
2738 
2739 				rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
2740 					   ipv6_spec->hdr.src_addr, 16);
2741 				rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
2742 					   ipv6_spec->hdr.dst_addr, 16);
2743 
2744 				/* Check if it is fragment. */
2745 				if (ipv6_spec->hdr.proto ==
2746 				    I40E_IPV6_FRAG_HEADER)
2747 					pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
2748 			} else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
2749 				filter->input.flow_ext.inner_ip = true;
2750 				filter->input.flow_ext.iip_type =
2751 					I40E_FDIR_IPTYPE_IPV6;
2752 			} else if (!ipv6_spec && !ipv6_mask && outer_ip) {
2753 				filter->input.flow_ext.inner_ip = false;
2754 				filter->input.flow_ext.oip_type =
2755 					I40E_FDIR_IPTYPE_IPV6;
2756 			} else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
2757 				rte_flow_error_set(error, EINVAL,
2758 						   RTE_FLOW_ERROR_TYPE_ITEM,
2759 						   item,
2760 						   "Invalid inner IPv6 mask");
2761 				return -rte_errno;
2762 			}
2763 
2764 			if (outer_ip)
2765 				outer_ip = false;
2766 			break;
2767 		case RTE_FLOW_ITEM_TYPE_TCP:
2768 			tcp_spec = item->spec;
2769 			tcp_mask = item->mask;
2770 
2771 			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2772 				pctype =
2773 					I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
2774 			else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2775 				pctype =
2776 					I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
2777 			if (tcp_spec && tcp_mask) {
2778 				/* Check TCP mask and update input set */
2779 				if (tcp_mask->hdr.sent_seq ||
2780 				    tcp_mask->hdr.recv_ack ||
2781 				    tcp_mask->hdr.data_off ||
2782 				    tcp_mask->hdr.tcp_flags ||
2783 				    tcp_mask->hdr.rx_win ||
2784 				    tcp_mask->hdr.cksum ||
2785 				    tcp_mask->hdr.tcp_urp) {
2786 					rte_flow_error_set(error, EINVAL,
2787 						   RTE_FLOW_ERROR_TYPE_ITEM,
2788 						   item,
2789 						   "Invalid TCP mask");
2790 					return -rte_errno;
2791 				}
2792 
2793 				if (tcp_mask->hdr.src_port == UINT16_MAX)
2794 					input_set |= I40E_INSET_SRC_PORT;
2795 				if (tcp_mask->hdr.dst_port == UINT16_MAX)
2796 					input_set |= I40E_INSET_DST_PORT;
2797 
2798 				if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2799 					if (input_set &
2800 						(I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) {
2801 						rte_flow_error_set(error, EINVAL,
2802 							RTE_FLOW_ERROR_TYPE_ITEM,
2803 							item,
2804 							"L2 and L4 input set are exclusive.");
2805 						return -rte_errno;
2806 					}
2807 				} else {
2808 					/* Get filter info */
2809 					if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2810 						filter->input.flow.tcp4_flow.src_port =
2811 							tcp_spec->hdr.src_port;
2812 						filter->input.flow.tcp4_flow.dst_port =
2813 							tcp_spec->hdr.dst_port;
2814 					} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2815 						filter->input.flow.tcp6_flow.src_port =
2816 							tcp_spec->hdr.src_port;
2817 						filter->input.flow.tcp6_flow.dst_port =
2818 							tcp_spec->hdr.dst_port;
2819 					}
2820 				}
2821 			}
2822 
2823 			layer_idx = I40E_FLXPLD_L4_IDX;
2824 
2825 			break;
2826 		case RTE_FLOW_ITEM_TYPE_UDP:
2827 			udp_spec = item->spec;
2828 			udp_mask = item->mask;
2829 
2830 			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2831 				pctype =
2832 					I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2833 			else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2834 				pctype =
2835 					I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
2836 
2837 			if (udp_spec && udp_mask) {
2838 				/* Check UDP mask and update input set*/
2839 				if (udp_mask->hdr.dgram_len ||
2840 				    udp_mask->hdr.dgram_cksum) {
2841 					rte_flow_error_set(error, EINVAL,
2842 						   RTE_FLOW_ERROR_TYPE_ITEM,
2843 						   item,
2844 						   "Invalid UDP mask");
2845 					return -rte_errno;
2846 				}
2847 
2848 				if (udp_mask->hdr.src_port == UINT16_MAX)
2849 					input_set |= I40E_INSET_SRC_PORT;
2850 				if (udp_mask->hdr.dst_port == UINT16_MAX)
2851 					input_set |= I40E_INSET_DST_PORT;
2852 
2853 				if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2854 					if (input_set &
2855 						(I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) {
2856 						rte_flow_error_set(error, EINVAL,
2857 							RTE_FLOW_ERROR_TYPE_ITEM,
2858 							item,
2859 							"L2 and L4 input set are exclusive.");
2860 						return -rte_errno;
2861 					}
2862 				} else {
2863 					/* Get filter info */
2864 					if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2865 						filter->input.flow.udp4_flow.src_port =
2866 							udp_spec->hdr.src_port;
2867 						filter->input.flow.udp4_flow.dst_port =
2868 							udp_spec->hdr.dst_port;
2869 					} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2870 						filter->input.flow.udp6_flow.src_port =
2871 							udp_spec->hdr.src_port;
2872 						filter->input.flow.udp6_flow.dst_port =
2873 							udp_spec->hdr.dst_port;
2874 					}
2875 				}
2876 			}
2877 			filter->input.flow_ext.is_udp = true;
2878 			layer_idx = I40E_FLXPLD_L4_IDX;
2879 
2880 			break;
2881 		case RTE_FLOW_ITEM_TYPE_GTPC:
2882 		case RTE_FLOW_ITEM_TYPE_GTPU:
2883 			if (!pf->gtp_support) {
2884 				rte_flow_error_set(error, EINVAL,
2885 						   RTE_FLOW_ERROR_TYPE_ITEM,
2886 						   item,
2887 						   "Unsupported protocol");
2888 				return -rte_errno;
2889 			}
2890 
2891 			gtp_spec = item->spec;
2892 			gtp_mask = item->mask;
2893 
2894 			if (gtp_spec && gtp_mask) {
2895 				if (gtp_mask->v_pt_rsv_flags ||
2896 				    gtp_mask->msg_type ||
2897 				    gtp_mask->msg_len ||
2898 				    gtp_mask->teid != UINT32_MAX) {
2899 					rte_flow_error_set(error, EINVAL,
2900 						   RTE_FLOW_ERROR_TYPE_ITEM,
2901 						   item,
2902 						   "Invalid GTP mask");
2903 					return -rte_errno;
2904 				}
2905 
2906 				filter->input.flow.gtp_flow.teid =
2907 					gtp_spec->teid;
2908 				filter->input.flow_ext.customized_pctype = true;
2909 				cus_proto = item_type;
2910 			}
2911 			break;
2912 		case RTE_FLOW_ITEM_TYPE_ESP:
2913 			if (!pf->esp_support) {
2914 				rte_flow_error_set(error, EINVAL,
2915 						   RTE_FLOW_ERROR_TYPE_ITEM,
2916 						   item,
2917 						   "Unsupported ESP protocol");
2918 				return -rte_errno;
2919 			}
2920 
2921 			esp_spec = item->spec;
2922 			esp_mask = item->mask;
2923 
2924 			if (!esp_spec || !esp_mask) {
2925 				rte_flow_error_set(error, EINVAL,
2926 						   RTE_FLOW_ERROR_TYPE_ITEM,
2927 						   item,
2928 						   "Invalid ESP item");
2929 				return -rte_errno;
2930 			}
2931 
2932 			if (esp_spec && esp_mask) {
2933 				if (esp_mask->hdr.spi != UINT32_MAX) {
2934 					rte_flow_error_set(error, EINVAL,
2935 						   RTE_FLOW_ERROR_TYPE_ITEM,
2936 						   item,
2937 						   "Invalid ESP mask");
2938 					return -rte_errno;
2939 				}
2940 				i40e_flow_set_filter_spi(filter, esp_spec);
2941 				filter->input.flow_ext.customized_pctype = true;
2942 				cus_proto = item_type;
2943 			}
2944 			break;
2945 		case RTE_FLOW_ITEM_TYPE_SCTP:
2946 			sctp_spec = item->spec;
2947 			sctp_mask = item->mask;
2948 
2949 			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2950 				pctype =
2951 					I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
2952 			else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2953 				pctype =
2954 					I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
2955 
2956 			if (sctp_spec && sctp_mask) {
2957 				/* Check SCTP mask and update input set */
2958 				if (sctp_mask->hdr.cksum) {
2959 					rte_flow_error_set(error, EINVAL,
2960 						   RTE_FLOW_ERROR_TYPE_ITEM,
2961 						   item,
2962 						   "Invalid UDP mask");
2963 					return -rte_errno;
2964 				}
2965 
2966 				if (sctp_mask->hdr.src_port == UINT16_MAX)
2967 					input_set |= I40E_INSET_SRC_PORT;
2968 				if (sctp_mask->hdr.dst_port == UINT16_MAX)
2969 					input_set |= I40E_INSET_DST_PORT;
2970 				if (sctp_mask->hdr.tag == UINT32_MAX)
2971 					input_set |= I40E_INSET_SCTP_VT;
2972 
2973 				/* Get filter info */
2974 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2975 					filter->input.flow.sctp4_flow.src_port =
2976 						sctp_spec->hdr.src_port;
2977 					filter->input.flow.sctp4_flow.dst_port =
2978 						sctp_spec->hdr.dst_port;
2979 					filter->input.flow.sctp4_flow.verify_tag
2980 						= sctp_spec->hdr.tag;
2981 				} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2982 					filter->input.flow.sctp6_flow.src_port =
2983 						sctp_spec->hdr.src_port;
2984 					filter->input.flow.sctp6_flow.dst_port =
2985 						sctp_spec->hdr.dst_port;
2986 					filter->input.flow.sctp6_flow.verify_tag
2987 						= sctp_spec->hdr.tag;
2988 				}
2989 			}
2990 
2991 			layer_idx = I40E_FLXPLD_L4_IDX;
2992 
2993 			break;
2994 		case RTE_FLOW_ITEM_TYPE_RAW:
2995 			raw_spec = item->spec;
2996 			raw_mask = item->mask;
2997 
2998 			if (!raw_spec || !raw_mask) {
2999 				rte_flow_error_set(error, EINVAL,
3000 						   RTE_FLOW_ERROR_TYPE_ITEM,
3001 						   item,
3002 						   "NULL RAW spec/mask");
3003 				return -rte_errno;
3004 			}
3005 
3006 			if (pf->support_multi_driver) {
3007 				rte_flow_error_set(error, ENOTSUP,
3008 						   RTE_FLOW_ERROR_TYPE_ITEM,
3009 						   item,
3010 						   "Unsupported flexible payload.");
3011 				return -rte_errno;
3012 			}
3013 
3014 			ret = i40e_flow_check_raw_item(item, raw_spec, error);
3015 			if (ret < 0)
3016 				return ret;
3017 
3018 			off_arr[raw_id] = raw_spec->offset;
3019 			len_arr[raw_id] = raw_spec->length;
3020 
3021 			flex_size = 0;
3022 			memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
3023 			field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
3024 			flex_pit.size =
3025 				raw_spec->length / sizeof(uint16_t);
3026 			flex_pit.dst_offset =
3027 				next_dst_off / sizeof(uint16_t);
3028 
3029 			for (i = 0; i <= raw_id; i++) {
3030 				if (i == raw_id)
3031 					flex_pit.src_offset +=
3032 						raw_spec->offset /
3033 						sizeof(uint16_t);
3034 				else
3035 					flex_pit.src_offset +=
3036 						(off_arr[i] + len_arr[i]) /
3037 						sizeof(uint16_t);
3038 				flex_size += len_arr[i];
3039 			}
3040 			if (((flex_pit.src_offset + flex_pit.size) >=
3041 			     I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
3042 				flex_size > I40E_FDIR_MAX_FLEXLEN) {
3043 				rte_flow_error_set(error, EINVAL,
3044 					   RTE_FLOW_ERROR_TYPE_ITEM,
3045 					   item,
3046 					   "Exceeds maximal payload limit.");
3047 				return -rte_errno;
3048 			}
3049 
3050 			for (i = 0; i < raw_spec->length; i++) {
3051 				j = i + next_dst_off;
3052 				if (j >= RTE_ETH_FDIR_MAX_FLEXLEN ||
3053 						j >= I40E_FDIR_MAX_FLEX_LEN)
3054 					break;
3055 				filter->input.flow_ext.flexbytes[j] =
3056 					raw_spec->pattern[i];
3057 				filter->input.flow_ext.flex_mask[j] =
3058 					raw_mask->pattern[i];
3059 			}
3060 
3061 			next_dst_off += raw_spec->length;
3062 			raw_id++;
3063 
3064 			memcpy(&filter->input.flow_ext.flex_pit[field_idx],
3065 			       &flex_pit, sizeof(struct i40e_fdir_flex_pit));
3066 			filter->input.flow_ext.layer_idx = layer_idx;
3067 			filter->input.flow_ext.raw_id = raw_id;
3068 			filter->input.flow_ext.is_flex_flow = true;
3069 			break;
3070 		case RTE_FLOW_ITEM_TYPE_VF:
3071 			vf_spec = item->spec;
3072 			if (!attr->transfer) {
3073 				rte_flow_error_set(error, ENOTSUP,
3074 						   RTE_FLOW_ERROR_TYPE_ITEM,
3075 						   item,
3076 						   "Matching VF traffic"
3077 						   " without affecting it"
3078 						   " (transfer attribute)"
3079 						   " is unsupported");
3080 				return -rte_errno;
3081 			}
3082 			filter->input.flow_ext.is_vf = 1;
3083 			filter->input.flow_ext.dst_id = vf_spec->id;
3084 			if (filter->input.flow_ext.is_vf &&
3085 			    filter->input.flow_ext.dst_id >= pf->vf_num) {
3086 				rte_flow_error_set(error, EINVAL,
3087 						   RTE_FLOW_ERROR_TYPE_ITEM,
3088 						   item,
3089 						   "Invalid VF ID for FDIR.");
3090 				return -rte_errno;
3091 			}
3092 			break;
3093 		case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
3094 			l2tpv3oip_spec = item->spec;
3095 			l2tpv3oip_mask = item->mask;
3096 
3097 			if (!l2tpv3oip_spec || !l2tpv3oip_mask)
3098 				break;
3099 
3100 			if (l2tpv3oip_mask->session_id != UINT32_MAX) {
3101 				rte_flow_error_set(error, EINVAL,
3102 					RTE_FLOW_ERROR_TYPE_ITEM,
3103 					item,
3104 					"Invalid L2TPv3 mask");
3105 				return -rte_errno;
3106 			}
3107 
3108 			if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3109 				filter->input.flow.ip4_l2tpv3oip_flow.session_id =
3110 					l2tpv3oip_spec->session_id;
3111 				filter->input.flow_ext.oip_type =
3112 					I40E_FDIR_IPTYPE_IPV4;
3113 			} else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3114 				filter->input.flow.ip6_l2tpv3oip_flow.session_id =
3115 					l2tpv3oip_spec->session_id;
3116 				filter->input.flow_ext.oip_type =
3117 					I40E_FDIR_IPTYPE_IPV6;
3118 			}
3119 
3120 			filter->input.flow_ext.customized_pctype = true;
3121 			cus_proto = item_type;
3122 			break;
3123 		default:
3124 			break;
3125 		}
3126 	}
3127 
3128 	/* Get customized pctype value */
3129 	if (filter->input.flow_ext.customized_pctype) {
3130 		pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
3131 		if (pctype == I40E_FILTER_PCTYPE_INVALID) {
3132 			rte_flow_error_set(error, EINVAL,
3133 					   RTE_FLOW_ERROR_TYPE_ITEM,
3134 					   item,
3135 					   "Unsupported pctype");
3136 			return -rte_errno;
3137 		}
3138 	}
3139 
3140 	/* If customized pctype is not used, set fdir configuration.*/
3141 	if (!filter->input.flow_ext.customized_pctype) {
3142 		/* Check if the input set is valid */
3143 		if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
3144 						input_set) != 0) {
3145 			PMD_DRV_LOG(ERR, "Invalid input set");
3146 			return -EINVAL;
3147 		}
3148 
3149 		filter->input.flow_ext.input_set = input_set;
3150 	}
3151 
3152 	filter->input.pctype = pctype;
3153 
3154 	return 0;
3155 }
3156 
3157 /* Parse to get the action info of a FDIR filter.
3158  * FDIR action supports QUEUE or (QUEUE + MARK).
3159  */
3160 static int
3161 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
3162 			    const struct rte_flow_action *actions,
3163 			    struct rte_flow_error *error,
3164 			    struct i40e_fdir_filter_conf *filter)
3165 {
3166 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3167 	const struct rte_flow_action *act;
3168 	const struct rte_flow_action_queue *act_q;
3169 	const struct rte_flow_action_mark *mark_spec = NULL;
3170 	uint32_t index = 0;
3171 
3172 	/* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
3173 	NEXT_ITEM_OF_ACTION(act, actions, index);
3174 	switch (act->type) {
3175 	case RTE_FLOW_ACTION_TYPE_QUEUE:
3176 		act_q = act->conf;
3177 		filter->action.rx_queue = act_q->index;
3178 		if ((!filter->input.flow_ext.is_vf &&
3179 		     filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
3180 		    (filter->input.flow_ext.is_vf &&
3181 		     filter->action.rx_queue >= pf->vf_nb_qps)) {
3182 			rte_flow_error_set(error, EINVAL,
3183 					   RTE_FLOW_ERROR_TYPE_ACTION, act,
3184 					   "Invalid queue ID for FDIR.");
3185 			return -rte_errno;
3186 		}
3187 		filter->action.behavior = I40E_FDIR_ACCEPT;
3188 		break;
3189 	case RTE_FLOW_ACTION_TYPE_DROP:
3190 		filter->action.behavior = I40E_FDIR_REJECT;
3191 		break;
3192 	case RTE_FLOW_ACTION_TYPE_PASSTHRU:
3193 		filter->action.behavior = I40E_FDIR_PASSTHRU;
3194 		break;
3195 	case RTE_FLOW_ACTION_TYPE_MARK:
3196 		filter->action.behavior = I40E_FDIR_PASSTHRU;
3197 		mark_spec = act->conf;
3198 		filter->action.report_status = I40E_FDIR_REPORT_ID;
3199 		filter->soft_id = mark_spec->id;
3200 	break;
3201 	default:
3202 		rte_flow_error_set(error, EINVAL,
3203 				   RTE_FLOW_ERROR_TYPE_ACTION, act,
3204 				   "Invalid action.");
3205 		return -rte_errno;
3206 	}
3207 
3208 	/* Check if the next non-void item is MARK or FLAG or END. */
3209 	index++;
3210 	NEXT_ITEM_OF_ACTION(act, actions, index);
3211 	switch (act->type) {
3212 	case RTE_FLOW_ACTION_TYPE_MARK:
3213 		if (mark_spec) {
3214 			/* Double MARK actions requested */
3215 			rte_flow_error_set(error, EINVAL,
3216 			   RTE_FLOW_ERROR_TYPE_ACTION, act,
3217 			   "Invalid action.");
3218 			return -rte_errno;
3219 		}
3220 		mark_spec = act->conf;
3221 		filter->action.report_status = I40E_FDIR_REPORT_ID;
3222 		filter->soft_id = mark_spec->id;
3223 		break;
3224 	case RTE_FLOW_ACTION_TYPE_FLAG:
3225 		if (mark_spec) {
3226 			/* MARK + FLAG not supported */
3227 			rte_flow_error_set(error, EINVAL,
3228 					   RTE_FLOW_ERROR_TYPE_ACTION, act,
3229 					   "Invalid action.");
3230 			return -rte_errno;
3231 		}
3232 		filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
3233 		break;
3234 	case RTE_FLOW_ACTION_TYPE_RSS:
3235 		if (filter->action.behavior != I40E_FDIR_PASSTHRU) {
3236 			/* RSS filter won't be next if FDIR did not pass thru */
3237 			rte_flow_error_set(error, EINVAL,
3238 					   RTE_FLOW_ERROR_TYPE_ACTION, act,
3239 					   "Invalid action.");
3240 			return -rte_errno;
3241 		}
3242 		break;
3243 	case RTE_FLOW_ACTION_TYPE_END:
3244 		return 0;
3245 	default:
3246 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3247 				   act, "Invalid action.");
3248 		return -rte_errno;
3249 	}
3250 
3251 	/* Check if the next non-void item is END */
3252 	index++;
3253 	NEXT_ITEM_OF_ACTION(act, actions, index);
3254 	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3255 		rte_flow_error_set(error, EINVAL,
3256 				   RTE_FLOW_ERROR_TYPE_ACTION,
3257 				   act, "Invalid action.");
3258 		return -rte_errno;
3259 	}
3260 
3261 	return 0;
3262 }
3263 
3264 static int
3265 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
3266 			    const struct rte_flow_attr *attr,
3267 			    const struct rte_flow_item pattern[],
3268 			    const struct rte_flow_action actions[],
3269 			    struct rte_flow_error *error,
3270 			    union i40e_filter_t *filter)
3271 {
3272 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3273 	struct i40e_fdir_filter_conf *fdir_filter =
3274 		&filter->fdir_filter;
3275 	int ret;
3276 
3277 	ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error,
3278 					   fdir_filter);
3279 	if (ret)
3280 		return ret;
3281 
3282 	ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
3283 	if (ret)
3284 		return ret;
3285 
3286 	ret = i40e_flow_parse_attr(attr, error);
3287 	if (ret)
3288 		return ret;
3289 
3290 	cons_filter_type = RTE_ETH_FILTER_FDIR;
3291 
3292 	if (pf->fdir.fdir_vsi == NULL) {
3293 		/* Enable fdir when fdir flow is added at first time. */
3294 		ret = i40e_fdir_setup(pf);
3295 		if (ret != I40E_SUCCESS) {
3296 			rte_flow_error_set(error, ENOTSUP,
3297 					   RTE_FLOW_ERROR_TYPE_HANDLE,
3298 					   NULL, "Failed to setup fdir.");
3299 			return -rte_errno;
3300 		}
3301 		ret = i40e_fdir_configure(dev);
3302 		if (ret < 0) {
3303 			rte_flow_error_set(error, ENOTSUP,
3304 					   RTE_FLOW_ERROR_TYPE_HANDLE,
3305 					   NULL, "Failed to configure fdir.");
3306 			goto err;
3307 		}
3308 	}
3309 
3310 	/* If create the first fdir rule, enable fdir check for rx queues */
3311 	if (TAILQ_EMPTY(&pf->fdir.fdir_list))
3312 		i40e_fdir_rx_proc_enable(dev, 1);
3313 
3314 	return 0;
3315 err:
3316 	i40e_fdir_teardown(pf);
3317 	return -rte_errno;
3318 }
3319 
3320 /* Parse to get the action info of a tunnel filter
3321  * Tunnel action only supports PF, VF and QUEUE.
3322  */
3323 static int
3324 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
3325 			      const struct rte_flow_action *actions,
3326 			      struct rte_flow_error *error,
3327 			      struct i40e_tunnel_filter_conf *filter)
3328 {
3329 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3330 	const struct rte_flow_action *act;
3331 	const struct rte_flow_action_queue *act_q;
3332 	const struct rte_flow_action_vf *act_vf;
3333 	uint32_t index = 0;
3334 
3335 	/* Check if the first non-void action is PF or VF. */
3336 	NEXT_ITEM_OF_ACTION(act, actions, index);
3337 	if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
3338 	    act->type != RTE_FLOW_ACTION_TYPE_VF) {
3339 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3340 				   act, "Not supported action.");
3341 		return -rte_errno;
3342 	}
3343 
3344 	if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
3345 		act_vf = act->conf;
3346 		filter->vf_id = act_vf->id;
3347 		filter->is_to_vf = 1;
3348 		if (filter->vf_id >= pf->vf_num) {
3349 			rte_flow_error_set(error, EINVAL,
3350 				   RTE_FLOW_ERROR_TYPE_ACTION,
3351 				   act, "Invalid VF ID for tunnel filter");
3352 			return -rte_errno;
3353 		}
3354 	}
3355 
3356 	/* Check if the next non-void item is QUEUE */
3357 	index++;
3358 	NEXT_ITEM_OF_ACTION(act, actions, index);
3359 	if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3360 		act_q = act->conf;
3361 		filter->queue_id = act_q->index;
3362 		if ((!filter->is_to_vf) &&
3363 		    (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
3364 			rte_flow_error_set(error, EINVAL,
3365 				   RTE_FLOW_ERROR_TYPE_ACTION,
3366 				   act, "Invalid queue ID for tunnel filter");
3367 			return -rte_errno;
3368 		} else if (filter->is_to_vf &&
3369 			   (filter->queue_id >= pf->vf_nb_qps)) {
3370 			rte_flow_error_set(error, EINVAL,
3371 				   RTE_FLOW_ERROR_TYPE_ACTION,
3372 				   act, "Invalid queue ID for tunnel filter");
3373 			return -rte_errno;
3374 		}
3375 	}
3376 
3377 	/* Check if the next non-void item is END */
3378 	index++;
3379 	NEXT_ITEM_OF_ACTION(act, actions, index);
3380 	if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3381 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3382 				   act, "Not supported action.");
3383 		return -rte_errno;
3384 	}
3385 
3386 	return 0;
3387 }
3388 
3389 /* 1. Last in item should be NULL as range is not supported.
3390  * 2. Supported filter types: Source port only and Destination port only.
3391  * 3. Mask of fields which need to be matched should be
3392  *    filled with 1.
3393  * 4. Mask of fields which needn't to be matched should be
3394  *    filled with 0.
3395  */
3396 static int
3397 i40e_flow_parse_l4_pattern(const struct rte_flow_item *pattern,
3398 			   struct rte_flow_error *error,
3399 			   struct i40e_tunnel_filter_conf *filter)
3400 {
3401 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
3402 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
3403 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
3404 	const struct rte_flow_item *item = pattern;
3405 	enum rte_flow_item_type item_type;
3406 
3407 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3408 		if (item->last) {
3409 			rte_flow_error_set(error, EINVAL,
3410 					   RTE_FLOW_ERROR_TYPE_ITEM,
3411 					   item,
3412 					   "Not support range");
3413 			return -rte_errno;
3414 		}
3415 		item_type = item->type;
3416 		switch (item_type) {
3417 		case RTE_FLOW_ITEM_TYPE_ETH:
3418 			if (item->spec || item->mask) {
3419 				rte_flow_error_set(error, EINVAL,
3420 						   RTE_FLOW_ERROR_TYPE_ITEM,
3421 						   item,
3422 						   "Invalid ETH item");
3423 				return -rte_errno;
3424 			}
3425 
3426 			break;
3427 		case RTE_FLOW_ITEM_TYPE_IPV4:
3428 			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3429 			/* IPv4 is used to describe protocol,
3430 			 * spec and mask should be NULL.
3431 			 */
3432 			if (item->spec || item->mask) {
3433 				rte_flow_error_set(error, EINVAL,
3434 						   RTE_FLOW_ERROR_TYPE_ITEM,
3435 						   item,
3436 						   "Invalid IPv4 item");
3437 				return -rte_errno;
3438 			}
3439 
3440 			break;
3441 		case RTE_FLOW_ITEM_TYPE_IPV6:
3442 			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3443 			/* IPv6 is used to describe protocol,
3444 			 * spec and mask should be NULL.
3445 			 */
3446 			if (item->spec || item->mask) {
3447 				rte_flow_error_set(error, EINVAL,
3448 						   RTE_FLOW_ERROR_TYPE_ITEM,
3449 						   item,
3450 						   "Invalid IPv6 item");
3451 				return -rte_errno;
3452 			}
3453 
3454 			break;
3455 		case RTE_FLOW_ITEM_TYPE_UDP:
3456 			udp_spec = item->spec;
3457 			udp_mask = item->mask;
3458 
3459 			if (!udp_spec || !udp_mask) {
3460 				rte_flow_error_set(error, EINVAL,
3461 						   RTE_FLOW_ERROR_TYPE_ITEM,
3462 						   item,
3463 						   "Invalid udp item");
3464 				return -rte_errno;
3465 			}
3466 
3467 			if (udp_spec->hdr.src_port != 0 &&
3468 			    udp_spec->hdr.dst_port != 0) {
3469 				rte_flow_error_set(error, EINVAL,
3470 						   RTE_FLOW_ERROR_TYPE_ITEM,
3471 						   item,
3472 						   "Invalid udp spec");
3473 				return -rte_errno;
3474 			}
3475 
3476 			if (udp_spec->hdr.src_port != 0) {
3477 				filter->l4_port_type =
3478 					I40E_L4_PORT_TYPE_SRC;
3479 				filter->tenant_id =
3480 				rte_be_to_cpu_32(udp_spec->hdr.src_port);
3481 			}
3482 
3483 			if (udp_spec->hdr.dst_port != 0) {
3484 				filter->l4_port_type =
3485 					I40E_L4_PORT_TYPE_DST;
3486 				filter->tenant_id =
3487 				rte_be_to_cpu_32(udp_spec->hdr.dst_port);
3488 			}
3489 
3490 			filter->tunnel_type = I40E_CLOUD_TYPE_UDP;
3491 
3492 			break;
3493 		case RTE_FLOW_ITEM_TYPE_TCP:
3494 			tcp_spec = item->spec;
3495 			tcp_mask = item->mask;
3496 
3497 			if (!tcp_spec || !tcp_mask) {
3498 				rte_flow_error_set(error, EINVAL,
3499 						   RTE_FLOW_ERROR_TYPE_ITEM,
3500 						   item,
3501 						   "Invalid tcp item");
3502 				return -rte_errno;
3503 			}
3504 
3505 			if (tcp_spec->hdr.src_port != 0 &&
3506 			    tcp_spec->hdr.dst_port != 0) {
3507 				rte_flow_error_set(error, EINVAL,
3508 						   RTE_FLOW_ERROR_TYPE_ITEM,
3509 						   item,
3510 						   "Invalid tcp spec");
3511 				return -rte_errno;
3512 			}
3513 
3514 			if (tcp_spec->hdr.src_port != 0) {
3515 				filter->l4_port_type =
3516 					I40E_L4_PORT_TYPE_SRC;
3517 				filter->tenant_id =
3518 				rte_be_to_cpu_32(tcp_spec->hdr.src_port);
3519 			}
3520 
3521 			if (tcp_spec->hdr.dst_port != 0) {
3522 				filter->l4_port_type =
3523 					I40E_L4_PORT_TYPE_DST;
3524 				filter->tenant_id =
3525 				rte_be_to_cpu_32(tcp_spec->hdr.dst_port);
3526 			}
3527 
3528 			filter->tunnel_type = I40E_CLOUD_TYPE_TCP;
3529 
3530 			break;
3531 		case RTE_FLOW_ITEM_TYPE_SCTP:
3532 			sctp_spec = item->spec;
3533 			sctp_mask = item->mask;
3534 
3535 			if (!sctp_spec || !sctp_mask) {
3536 				rte_flow_error_set(error, EINVAL,
3537 						   RTE_FLOW_ERROR_TYPE_ITEM,
3538 						   item,
3539 						   "Invalid sctp item");
3540 				return -rte_errno;
3541 			}
3542 
3543 			if (sctp_spec->hdr.src_port != 0 &&
3544 			    sctp_spec->hdr.dst_port != 0) {
3545 				rte_flow_error_set(error, EINVAL,
3546 						   RTE_FLOW_ERROR_TYPE_ITEM,
3547 						   item,
3548 						   "Invalid sctp spec");
3549 				return -rte_errno;
3550 			}
3551 
3552 			if (sctp_spec->hdr.src_port != 0) {
3553 				filter->l4_port_type =
3554 					I40E_L4_PORT_TYPE_SRC;
3555 				filter->tenant_id =
3556 					rte_be_to_cpu_32(sctp_spec->hdr.src_port);
3557 			}
3558 
3559 			if (sctp_spec->hdr.dst_port != 0) {
3560 				filter->l4_port_type =
3561 					I40E_L4_PORT_TYPE_DST;
3562 				filter->tenant_id =
3563 					rte_be_to_cpu_32(sctp_spec->hdr.dst_port);
3564 			}
3565 
3566 			filter->tunnel_type = I40E_CLOUD_TYPE_SCTP;
3567 
3568 			break;
3569 		default:
3570 			break;
3571 		}
3572 	}
3573 
3574 	return 0;
3575 }
3576 
3577 static int
3578 i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
3579 				const struct rte_flow_attr *attr,
3580 				const struct rte_flow_item pattern[],
3581 				const struct rte_flow_action actions[],
3582 				struct rte_flow_error *error,
3583 				union i40e_filter_t *filter)
3584 {
3585 	struct i40e_tunnel_filter_conf *tunnel_filter =
3586 		&filter->consistent_tunnel_filter;
3587 	int ret;
3588 
3589 	ret = i40e_flow_parse_l4_pattern(pattern, error, tunnel_filter);
3590 	if (ret)
3591 		return ret;
3592 
3593 	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3594 	if (ret)
3595 		return ret;
3596 
3597 	ret = i40e_flow_parse_attr(attr, error);
3598 	if (ret)
3599 		return ret;
3600 
3601 	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3602 
3603 	return ret;
3604 }
3605 
3606 static uint16_t i40e_supported_tunnel_filter_types[] = {
3607 	RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_TENID |
3608 	RTE_ETH_TUNNEL_FILTER_IVLAN,
3609 	RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_IVLAN,
3610 	RTE_ETH_TUNNEL_FILTER_IMAC | RTE_ETH_TUNNEL_FILTER_TENID,
3611 	RTE_ETH_TUNNEL_FILTER_OMAC | RTE_ETH_TUNNEL_FILTER_TENID |
3612 	RTE_ETH_TUNNEL_FILTER_IMAC,
3613 	RTE_ETH_TUNNEL_FILTER_IMAC,
3614 };
3615 
3616 static int
3617 i40e_check_tunnel_filter_type(uint8_t filter_type)
3618 {
3619 	uint8_t i;
3620 
3621 	for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
3622 		if (filter_type == i40e_supported_tunnel_filter_types[i])
3623 			return 0;
3624 	}
3625 
3626 	return -1;
3627 }
3628 
3629 /* 1. Last in item should be NULL as range is not supported.
3630  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3631  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3632  * 3. Mask of fields which need to be matched should be
3633  *    filled with 1.
3634  * 4. Mask of fields which needn't to be matched should be
3635  *    filled with 0.
3636  */
3637 static int
3638 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
3639 			      const struct rte_flow_item *pattern,
3640 			      struct rte_flow_error *error,
3641 			      struct i40e_tunnel_filter_conf *filter)
3642 {
3643 	const struct rte_flow_item *item = pattern;
3644 	const struct rte_flow_item_eth *eth_spec;
3645 	const struct rte_flow_item_eth *eth_mask;
3646 	const struct rte_flow_item_vxlan *vxlan_spec;
3647 	const struct rte_flow_item_vxlan *vxlan_mask;
3648 	const struct rte_flow_item_vlan *vlan_spec;
3649 	const struct rte_flow_item_vlan *vlan_mask;
3650 	uint8_t filter_type = 0;
3651 	bool is_vni_masked = 0;
3652 	uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
3653 	enum rte_flow_item_type item_type;
3654 	bool vxlan_flag = 0;
3655 	uint32_t tenant_id_be = 0;
3656 	int ret;
3657 
3658 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3659 		if (item->last) {
3660 			rte_flow_error_set(error, EINVAL,
3661 					   RTE_FLOW_ERROR_TYPE_ITEM,
3662 					   item,
3663 					   "Not support range");
3664 			return -rte_errno;
3665 		}
3666 		item_type = item->type;
3667 		switch (item_type) {
3668 		case RTE_FLOW_ITEM_TYPE_ETH:
3669 			eth_spec = item->spec;
3670 			eth_mask = item->mask;
3671 
3672 			/* Check if ETH item is used for place holder.
3673 			 * If yes, both spec and mask should be NULL.
3674 			 * If no, both spec and mask shouldn't be NULL.
3675 			 */
3676 			if ((!eth_spec && eth_mask) ||
3677 			    (eth_spec && !eth_mask)) {
3678 				rte_flow_error_set(error, EINVAL,
3679 						   RTE_FLOW_ERROR_TYPE_ITEM,
3680 						   item,
3681 						   "Invalid ether spec/mask");
3682 				return -rte_errno;
3683 			}
3684 
3685 			if (eth_spec && eth_mask) {
3686 				/* DST address of inner MAC shouldn't be masked.
3687 				 * SRC address of Inner MAC should be masked.
3688 				 */
3689 				if (!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
3690 				    !rte_is_zero_ether_addr(&eth_mask->src) ||
3691 				    eth_mask->type) {
3692 					rte_flow_error_set(error, EINVAL,
3693 						   RTE_FLOW_ERROR_TYPE_ITEM,
3694 						   item,
3695 						   "Invalid ether spec/mask");
3696 					return -rte_errno;
3697 				}
3698 
3699 				if (!vxlan_flag) {
3700 					rte_memcpy(&filter->outer_mac,
3701 						   &eth_spec->dst,
3702 						   RTE_ETHER_ADDR_LEN);
3703 					filter_type |= RTE_ETH_TUNNEL_FILTER_OMAC;
3704 				} else {
3705 					rte_memcpy(&filter->inner_mac,
3706 						   &eth_spec->dst,
3707 						   RTE_ETHER_ADDR_LEN);
3708 					filter_type |= RTE_ETH_TUNNEL_FILTER_IMAC;
3709 				}
3710 			}
3711 			break;
3712 		case RTE_FLOW_ITEM_TYPE_VLAN:
3713 			vlan_spec = item->spec;
3714 			vlan_mask = item->mask;
3715 			if (!(vlan_spec && vlan_mask) ||
3716 			    vlan_mask->inner_type) {
3717 				rte_flow_error_set(error, EINVAL,
3718 						   RTE_FLOW_ERROR_TYPE_ITEM,
3719 						   item,
3720 						   "Invalid vlan item");
3721 				return -rte_errno;
3722 			}
3723 
3724 			if (vlan_spec && vlan_mask) {
3725 				if (vlan_mask->tci ==
3726 				    rte_cpu_to_be_16(I40E_VLAN_TCI_MASK))
3727 					filter->inner_vlan =
3728 					      rte_be_to_cpu_16(vlan_spec->tci) &
3729 					      I40E_VLAN_TCI_MASK;
3730 				filter_type |= RTE_ETH_TUNNEL_FILTER_IVLAN;
3731 			}
3732 			break;
3733 		case RTE_FLOW_ITEM_TYPE_IPV4:
3734 			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3735 			/* IPv4 is used to describe protocol,
3736 			 * spec and mask should be NULL.
3737 			 */
3738 			if (item->spec || item->mask) {
3739 				rte_flow_error_set(error, EINVAL,
3740 						   RTE_FLOW_ERROR_TYPE_ITEM,
3741 						   item,
3742 						   "Invalid IPv4 item");
3743 				return -rte_errno;
3744 			}
3745 			break;
3746 		case RTE_FLOW_ITEM_TYPE_IPV6:
3747 			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3748 			/* IPv6 is used to describe protocol,
3749 			 * spec and mask should be NULL.
3750 			 */
3751 			if (item->spec || item->mask) {
3752 				rte_flow_error_set(error, EINVAL,
3753 						   RTE_FLOW_ERROR_TYPE_ITEM,
3754 						   item,
3755 						   "Invalid IPv6 item");
3756 				return -rte_errno;
3757 			}
3758 			break;
3759 		case RTE_FLOW_ITEM_TYPE_UDP:
3760 			/* UDP is used to describe protocol,
3761 			 * spec and mask should be NULL.
3762 			 */
3763 			if (item->spec || item->mask) {
3764 				rte_flow_error_set(error, EINVAL,
3765 					   RTE_FLOW_ERROR_TYPE_ITEM,
3766 					   item,
3767 					   "Invalid UDP item");
3768 				return -rte_errno;
3769 			}
3770 			break;
3771 		case RTE_FLOW_ITEM_TYPE_VXLAN:
3772 			vxlan_spec = item->spec;
3773 			vxlan_mask = item->mask;
3774 			/* Check if VXLAN item is used to describe protocol.
3775 			 * If yes, both spec and mask should be NULL.
3776 			 * If no, both spec and mask shouldn't be NULL.
3777 			 */
3778 			if ((!vxlan_spec && vxlan_mask) ||
3779 			    (vxlan_spec && !vxlan_mask)) {
3780 				rte_flow_error_set(error, EINVAL,
3781 					   RTE_FLOW_ERROR_TYPE_ITEM,
3782 					   item,
3783 					   "Invalid VXLAN item");
3784 				return -rte_errno;
3785 			}
3786 
3787 			/* Check if VNI is masked. */
3788 			if (vxlan_spec && vxlan_mask) {
3789 				is_vni_masked =
3790 					!!memcmp(vxlan_mask->vni, vni_mask,
3791 						 RTE_DIM(vni_mask));
3792 				if (is_vni_masked) {
3793 					rte_flow_error_set(error, EINVAL,
3794 						   RTE_FLOW_ERROR_TYPE_ITEM,
3795 						   item,
3796 						   "Invalid VNI mask");
3797 					return -rte_errno;
3798 				}
3799 
3800 				rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3801 					   vxlan_spec->vni, 3);
3802 				filter->tenant_id =
3803 					rte_be_to_cpu_32(tenant_id_be);
3804 				filter_type |= RTE_ETH_TUNNEL_FILTER_TENID;
3805 			}
3806 
3807 			vxlan_flag = 1;
3808 			break;
3809 		default:
3810 			break;
3811 		}
3812 	}
3813 
3814 	ret = i40e_check_tunnel_filter_type(filter_type);
3815 	if (ret < 0) {
3816 		rte_flow_error_set(error, EINVAL,
3817 				   RTE_FLOW_ERROR_TYPE_ITEM,
3818 				   NULL,
3819 				   "Invalid filter type");
3820 		return -rte_errno;
3821 	}
3822 	filter->filter_type = filter_type;
3823 
3824 	filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
3825 
3826 	return 0;
3827 }
3828 
3829 static int
3830 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
3831 			     const struct rte_flow_attr *attr,
3832 			     const struct rte_flow_item pattern[],
3833 			     const struct rte_flow_action actions[],
3834 			     struct rte_flow_error *error,
3835 			     union i40e_filter_t *filter)
3836 {
3837 	struct i40e_tunnel_filter_conf *tunnel_filter =
3838 		&filter->consistent_tunnel_filter;
3839 	int ret;
3840 
3841 	ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
3842 					    error, tunnel_filter);
3843 	if (ret)
3844 		return ret;
3845 
3846 	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3847 	if (ret)
3848 		return ret;
3849 
3850 	ret = i40e_flow_parse_attr(attr, error);
3851 	if (ret)
3852 		return ret;
3853 
3854 	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3855 
3856 	return ret;
3857 }
3858 
3859 /* 1. Last in item should be NULL as range is not supported.
3860  * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3861  *    IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3862  * 3. Mask of fields which need to be matched should be
3863  *    filled with 1.
3864  * 4. Mask of fields which needn't to be matched should be
3865  *    filled with 0.
3866  */
3867 static int
3868 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
3869 			      const struct rte_flow_item *pattern,
3870 			      struct rte_flow_error *error,
3871 			      struct i40e_tunnel_filter_conf *filter)
3872 {
3873 	const struct rte_flow_item *item = pattern;
3874 	const struct rte_flow_item_eth *eth_spec;
3875 	const struct rte_flow_item_eth *eth_mask;
3876 	const struct rte_flow_item_nvgre *nvgre_spec;
3877 	const struct rte_flow_item_nvgre *nvgre_mask;
3878 	const struct rte_flow_item_vlan *vlan_spec;
3879 	const struct rte_flow_item_vlan *vlan_mask;
3880 	enum rte_flow_item_type item_type;
3881 	uint8_t filter_type = 0;
3882 	bool is_tni_masked = 0;
3883 	uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
3884 	bool nvgre_flag = 0;
3885 	uint32_t tenant_id_be = 0;
3886 	int ret;
3887 
3888 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3889 		if (item->last) {
3890 			rte_flow_error_set(error, EINVAL,
3891 					   RTE_FLOW_ERROR_TYPE_ITEM,
3892 					   item,
3893 					   "Not support range");
3894 			return -rte_errno;
3895 		}
3896 		item_type = item->type;
3897 		switch (item_type) {
3898 		case RTE_FLOW_ITEM_TYPE_ETH:
3899 			eth_spec = item->spec;
3900 			eth_mask = item->mask;
3901 
3902 			/* Check if ETH item is used for place holder.
3903 			 * If yes, both spec and mask should be NULL.
3904 			 * If no, both spec and mask shouldn't be NULL.
3905 			 */
3906 			if ((!eth_spec && eth_mask) ||
3907 			    (eth_spec && !eth_mask)) {
3908 				rte_flow_error_set(error, EINVAL,
3909 						   RTE_FLOW_ERROR_TYPE_ITEM,
3910 						   item,
3911 						   "Invalid ether spec/mask");
3912 				return -rte_errno;
3913 			}
3914 
3915 			if (eth_spec && eth_mask) {
3916 				/* DST address of inner MAC shouldn't be masked.
3917 				 * SRC address of Inner MAC should be masked.
3918 				 */
3919 				if (!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
3920 				    !rte_is_zero_ether_addr(&eth_mask->src) ||
3921 				    eth_mask->type) {
3922 					rte_flow_error_set(error, EINVAL,
3923 						   RTE_FLOW_ERROR_TYPE_ITEM,
3924 						   item,
3925 						   "Invalid ether spec/mask");
3926 					return -rte_errno;
3927 				}
3928 
3929 				if (!nvgre_flag) {
3930 					rte_memcpy(&filter->outer_mac,
3931 						   &eth_spec->dst,
3932 						   RTE_ETHER_ADDR_LEN);
3933 					filter_type |= RTE_ETH_TUNNEL_FILTER_OMAC;
3934 				} else {
3935 					rte_memcpy(&filter->inner_mac,
3936 						   &eth_spec->dst,
3937 						   RTE_ETHER_ADDR_LEN);
3938 					filter_type |= RTE_ETH_TUNNEL_FILTER_IMAC;
3939 				}
3940 			}
3941 
3942 			break;
3943 		case RTE_FLOW_ITEM_TYPE_VLAN:
3944 			vlan_spec = item->spec;
3945 			vlan_mask = item->mask;
3946 			if (!(vlan_spec && vlan_mask) ||
3947 			    vlan_mask->inner_type) {
3948 				rte_flow_error_set(error, EINVAL,
3949 						   RTE_FLOW_ERROR_TYPE_ITEM,
3950 						   item,
3951 						   "Invalid vlan item");
3952 				return -rte_errno;
3953 			}
3954 
3955 			if (vlan_spec && vlan_mask) {
3956 				if (vlan_mask->tci ==
3957 				    rte_cpu_to_be_16(I40E_VLAN_TCI_MASK))
3958 					filter->inner_vlan =
3959 					      rte_be_to_cpu_16(vlan_spec->tci) &
3960 					      I40E_VLAN_TCI_MASK;
3961 				filter_type |= RTE_ETH_TUNNEL_FILTER_IVLAN;
3962 			}
3963 			break;
3964 		case RTE_FLOW_ITEM_TYPE_IPV4:
3965 			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3966 			/* IPv4 is used to describe protocol,
3967 			 * spec and mask should be NULL.
3968 			 */
3969 			if (item->spec || item->mask) {
3970 				rte_flow_error_set(error, EINVAL,
3971 						   RTE_FLOW_ERROR_TYPE_ITEM,
3972 						   item,
3973 						   "Invalid IPv4 item");
3974 				return -rte_errno;
3975 			}
3976 			break;
3977 		case RTE_FLOW_ITEM_TYPE_IPV6:
3978 			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3979 			/* IPv6 is used to describe protocol,
3980 			 * spec and mask should be NULL.
3981 			 */
3982 			if (item->spec || item->mask) {
3983 				rte_flow_error_set(error, EINVAL,
3984 						   RTE_FLOW_ERROR_TYPE_ITEM,
3985 						   item,
3986 						   "Invalid IPv6 item");
3987 				return -rte_errno;
3988 			}
3989 			break;
3990 		case RTE_FLOW_ITEM_TYPE_NVGRE:
3991 			nvgre_spec = item->spec;
3992 			nvgre_mask = item->mask;
3993 			/* Check if NVGRE item is used to describe protocol.
3994 			 * If yes, both spec and mask should be NULL.
3995 			 * If no, both spec and mask shouldn't be NULL.
3996 			 */
3997 			if ((!nvgre_spec && nvgre_mask) ||
3998 			    (nvgre_spec && !nvgre_mask)) {
3999 				rte_flow_error_set(error, EINVAL,
4000 					   RTE_FLOW_ERROR_TYPE_ITEM,
4001 					   item,
4002 					   "Invalid NVGRE item");
4003 				return -rte_errno;
4004 			}
4005 
4006 			if (nvgre_spec && nvgre_mask) {
4007 				is_tni_masked =
4008 					!!memcmp(nvgre_mask->tni, tni_mask,
4009 						 RTE_DIM(tni_mask));
4010 				if (is_tni_masked) {
4011 					rte_flow_error_set(error, EINVAL,
4012 						       RTE_FLOW_ERROR_TYPE_ITEM,
4013 						       item,
4014 						       "Invalid TNI mask");
4015 					return -rte_errno;
4016 				}
4017 				if (nvgre_mask->protocol &&
4018 					nvgre_mask->protocol != 0xFFFF) {
4019 					rte_flow_error_set(error, EINVAL,
4020 						RTE_FLOW_ERROR_TYPE_ITEM,
4021 						item,
4022 						"Invalid NVGRE item");
4023 					return -rte_errno;
4024 				}
4025 				if (nvgre_mask->c_k_s_rsvd0_ver &&
4026 					nvgre_mask->c_k_s_rsvd0_ver !=
4027 					rte_cpu_to_be_16(0xFFFF)) {
4028 					rte_flow_error_set(error, EINVAL,
4029 						   RTE_FLOW_ERROR_TYPE_ITEM,
4030 						   item,
4031 						   "Invalid NVGRE item");
4032 					return -rte_errno;
4033 				}
4034 				if (nvgre_spec->c_k_s_rsvd0_ver !=
4035 					rte_cpu_to_be_16(0x2000) &&
4036 					nvgre_mask->c_k_s_rsvd0_ver) {
4037 					rte_flow_error_set(error, EINVAL,
4038 						   RTE_FLOW_ERROR_TYPE_ITEM,
4039 						   item,
4040 						   "Invalid NVGRE item");
4041 					return -rte_errno;
4042 				}
4043 				if (nvgre_mask->protocol &&
4044 					nvgre_spec->protocol !=
4045 					rte_cpu_to_be_16(0x6558)) {
4046 					rte_flow_error_set(error, EINVAL,
4047 						   RTE_FLOW_ERROR_TYPE_ITEM,
4048 						   item,
4049 						   "Invalid NVGRE item");
4050 					return -rte_errno;
4051 				}
4052 				rte_memcpy(((uint8_t *)&tenant_id_be + 1),
4053 					   nvgre_spec->tni, 3);
4054 				filter->tenant_id =
4055 					rte_be_to_cpu_32(tenant_id_be);
4056 				filter_type |= RTE_ETH_TUNNEL_FILTER_TENID;
4057 			}
4058 
4059 			nvgre_flag = 1;
4060 			break;
4061 		default:
4062 			break;
4063 		}
4064 	}
4065 
4066 	ret = i40e_check_tunnel_filter_type(filter_type);
4067 	if (ret < 0) {
4068 		rte_flow_error_set(error, EINVAL,
4069 				   RTE_FLOW_ERROR_TYPE_ITEM,
4070 				   NULL,
4071 				   "Invalid filter type");
4072 		return -rte_errno;
4073 	}
4074 	filter->filter_type = filter_type;
4075 
4076 	filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
4077 
4078 	return 0;
4079 }
4080 
4081 static int
4082 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
4083 			     const struct rte_flow_attr *attr,
4084 			     const struct rte_flow_item pattern[],
4085 			     const struct rte_flow_action actions[],
4086 			     struct rte_flow_error *error,
4087 			     union i40e_filter_t *filter)
4088 {
4089 	struct i40e_tunnel_filter_conf *tunnel_filter =
4090 		&filter->consistent_tunnel_filter;
4091 	int ret;
4092 
4093 	ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
4094 					    error, tunnel_filter);
4095 	if (ret)
4096 		return ret;
4097 
4098 	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4099 	if (ret)
4100 		return ret;
4101 
4102 	ret = i40e_flow_parse_attr(attr, error);
4103 	if (ret)
4104 		return ret;
4105 
4106 	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4107 
4108 	return ret;
4109 }
4110 
4111 /* 1. Last in item should be NULL as range is not supported.
4112  * 2. Supported filter types: MPLS label.
4113  * 3. Mask of fields which need to be matched should be
4114  *    filled with 1.
4115  * 4. Mask of fields which needn't to be matched should be
4116  *    filled with 0.
4117  */
4118 static int
4119 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
4120 			     const struct rte_flow_item *pattern,
4121 			     struct rte_flow_error *error,
4122 			     struct i40e_tunnel_filter_conf *filter)
4123 {
4124 	const struct rte_flow_item *item = pattern;
4125 	const struct rte_flow_item_mpls *mpls_spec;
4126 	const struct rte_flow_item_mpls *mpls_mask;
4127 	enum rte_flow_item_type item_type;
4128 	bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
4129 	const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
4130 	uint32_t label_be = 0;
4131 
4132 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4133 		if (item->last) {
4134 			rte_flow_error_set(error, EINVAL,
4135 					   RTE_FLOW_ERROR_TYPE_ITEM,
4136 					   item,
4137 					   "Not support range");
4138 			return -rte_errno;
4139 		}
4140 		item_type = item->type;
4141 		switch (item_type) {
4142 		case RTE_FLOW_ITEM_TYPE_ETH:
4143 			if (item->spec || item->mask) {
4144 				rte_flow_error_set(error, EINVAL,
4145 						   RTE_FLOW_ERROR_TYPE_ITEM,
4146 						   item,
4147 						   "Invalid ETH item");
4148 				return -rte_errno;
4149 			}
4150 			break;
4151 		case RTE_FLOW_ITEM_TYPE_IPV4:
4152 			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4153 			/* IPv4 is used to describe protocol,
4154 			 * spec and mask should be NULL.
4155 			 */
4156 			if (item->spec || item->mask) {
4157 				rte_flow_error_set(error, EINVAL,
4158 						   RTE_FLOW_ERROR_TYPE_ITEM,
4159 						   item,
4160 						   "Invalid IPv4 item");
4161 				return -rte_errno;
4162 			}
4163 			break;
4164 		case RTE_FLOW_ITEM_TYPE_IPV6:
4165 			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
4166 			/* IPv6 is used to describe protocol,
4167 			 * spec and mask should be NULL.
4168 			 */
4169 			if (item->spec || item->mask) {
4170 				rte_flow_error_set(error, EINVAL,
4171 						   RTE_FLOW_ERROR_TYPE_ITEM,
4172 						   item,
4173 						   "Invalid IPv6 item");
4174 				return -rte_errno;
4175 			}
4176 			break;
4177 		case RTE_FLOW_ITEM_TYPE_UDP:
4178 			/* UDP is used to describe protocol,
4179 			 * spec and mask should be NULL.
4180 			 */
4181 			if (item->spec || item->mask) {
4182 				rte_flow_error_set(error, EINVAL,
4183 						   RTE_FLOW_ERROR_TYPE_ITEM,
4184 						   item,
4185 						   "Invalid UDP item");
4186 				return -rte_errno;
4187 			}
4188 			is_mplsoudp = 1;
4189 			break;
4190 		case RTE_FLOW_ITEM_TYPE_GRE:
4191 			/* GRE is used to describe protocol,
4192 			 * spec and mask should be NULL.
4193 			 */
4194 			if (item->spec || item->mask) {
4195 				rte_flow_error_set(error, EINVAL,
4196 						   RTE_FLOW_ERROR_TYPE_ITEM,
4197 						   item,
4198 						   "Invalid GRE item");
4199 				return -rte_errno;
4200 			}
4201 			break;
4202 		case RTE_FLOW_ITEM_TYPE_MPLS:
4203 			mpls_spec = item->spec;
4204 			mpls_mask = item->mask;
4205 
4206 			if (!mpls_spec || !mpls_mask) {
4207 				rte_flow_error_set(error, EINVAL,
4208 						   RTE_FLOW_ERROR_TYPE_ITEM,
4209 						   item,
4210 						   "Invalid MPLS item");
4211 				return -rte_errno;
4212 			}
4213 
4214 			if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
4215 				rte_flow_error_set(error, EINVAL,
4216 						   RTE_FLOW_ERROR_TYPE_ITEM,
4217 						   item,
4218 						   "Invalid MPLS label mask");
4219 				return -rte_errno;
4220 			}
4221 			rte_memcpy(((uint8_t *)&label_be + 1),
4222 				   mpls_spec->label_tc_s, 3);
4223 			filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
4224 			break;
4225 		default:
4226 			break;
4227 		}
4228 	}
4229 
4230 	if (is_mplsoudp)
4231 		filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
4232 	else
4233 		filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
4234 
4235 	return 0;
4236 }
4237 
4238 static int
4239 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
4240 			    const struct rte_flow_attr *attr,
4241 			    const struct rte_flow_item pattern[],
4242 			    const struct rte_flow_action actions[],
4243 			    struct rte_flow_error *error,
4244 			    union i40e_filter_t *filter)
4245 {
4246 	struct i40e_tunnel_filter_conf *tunnel_filter =
4247 		&filter->consistent_tunnel_filter;
4248 	int ret;
4249 
4250 	ret = i40e_flow_parse_mpls_pattern(dev, pattern,
4251 					   error, tunnel_filter);
4252 	if (ret)
4253 		return ret;
4254 
4255 	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4256 	if (ret)
4257 		return ret;
4258 
4259 	ret = i40e_flow_parse_attr(attr, error);
4260 	if (ret)
4261 		return ret;
4262 
4263 	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4264 
4265 	return ret;
4266 }
4267 
4268 /* 1. Last in item should be NULL as range is not supported.
4269  * 2. Supported filter types: GTP TEID.
4270  * 3. Mask of fields which need to be matched should be
4271  *    filled with 1.
4272  * 4. Mask of fields which needn't to be matched should be
4273  *    filled with 0.
4274  * 5. GTP profile supports GTPv1 only.
4275  * 6. GTP-C response message ('source_port' = 2123) is not supported.
4276  */
4277 static int
4278 i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
4279 			    const struct rte_flow_item *pattern,
4280 			    struct rte_flow_error *error,
4281 			    struct i40e_tunnel_filter_conf *filter)
4282 {
4283 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4284 	const struct rte_flow_item *item = pattern;
4285 	const struct rte_flow_item_gtp *gtp_spec;
4286 	const struct rte_flow_item_gtp *gtp_mask;
4287 	enum rte_flow_item_type item_type;
4288 
4289 	if (!pf->gtp_support) {
4290 		rte_flow_error_set(error, EINVAL,
4291 				   RTE_FLOW_ERROR_TYPE_ITEM,
4292 				   item,
4293 				   "GTP is not supported by default.");
4294 		return -rte_errno;
4295 	}
4296 
4297 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4298 		if (item->last) {
4299 			rte_flow_error_set(error, EINVAL,
4300 					   RTE_FLOW_ERROR_TYPE_ITEM,
4301 					   item,
4302 					   "Not support range");
4303 			return -rte_errno;
4304 		}
4305 		item_type = item->type;
4306 		switch (item_type) {
4307 		case RTE_FLOW_ITEM_TYPE_ETH:
4308 			if (item->spec || item->mask) {
4309 				rte_flow_error_set(error, EINVAL,
4310 						   RTE_FLOW_ERROR_TYPE_ITEM,
4311 						   item,
4312 						   "Invalid ETH item");
4313 				return -rte_errno;
4314 			}
4315 			break;
4316 		case RTE_FLOW_ITEM_TYPE_IPV4:
4317 			filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4318 			/* IPv4 is used to describe protocol,
4319 			 * spec and mask should be NULL.
4320 			 */
4321 			if (item->spec || item->mask) {
4322 				rte_flow_error_set(error, EINVAL,
4323 						   RTE_FLOW_ERROR_TYPE_ITEM,
4324 						   item,
4325 						   "Invalid IPv4 item");
4326 				return -rte_errno;
4327 			}
4328 			break;
4329 		case RTE_FLOW_ITEM_TYPE_UDP:
4330 			if (item->spec || item->mask) {
4331 				rte_flow_error_set(error, EINVAL,
4332 						   RTE_FLOW_ERROR_TYPE_ITEM,
4333 						   item,
4334 						   "Invalid UDP item");
4335 				return -rte_errno;
4336 			}
4337 			break;
4338 		case RTE_FLOW_ITEM_TYPE_GTPC:
4339 		case RTE_FLOW_ITEM_TYPE_GTPU:
4340 			gtp_spec = item->spec;
4341 			gtp_mask = item->mask;
4342 
4343 			if (!gtp_spec || !gtp_mask) {
4344 				rte_flow_error_set(error, EINVAL,
4345 						   RTE_FLOW_ERROR_TYPE_ITEM,
4346 						   item,
4347 						   "Invalid GTP item");
4348 				return -rte_errno;
4349 			}
4350 
4351 			if (gtp_mask->v_pt_rsv_flags ||
4352 			    gtp_mask->msg_type ||
4353 			    gtp_mask->msg_len ||
4354 			    gtp_mask->teid != UINT32_MAX) {
4355 				rte_flow_error_set(error, EINVAL,
4356 						   RTE_FLOW_ERROR_TYPE_ITEM,
4357 						   item,
4358 						   "Invalid GTP mask");
4359 				return -rte_errno;
4360 			}
4361 
4362 			if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
4363 				filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
4364 			else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
4365 				filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
4366 
4367 			filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
4368 
4369 			break;
4370 		default:
4371 			break;
4372 		}
4373 	}
4374 
4375 	return 0;
4376 }
4377 
4378 static int
4379 i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
4380 			   const struct rte_flow_attr *attr,
4381 			   const struct rte_flow_item pattern[],
4382 			   const struct rte_flow_action actions[],
4383 			   struct rte_flow_error *error,
4384 			   union i40e_filter_t *filter)
4385 {
4386 	struct i40e_tunnel_filter_conf *tunnel_filter =
4387 		&filter->consistent_tunnel_filter;
4388 	int ret;
4389 
4390 	ret = i40e_flow_parse_gtp_pattern(dev, pattern,
4391 					  error, tunnel_filter);
4392 	if (ret)
4393 		return ret;
4394 
4395 	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4396 	if (ret)
4397 		return ret;
4398 
4399 	ret = i40e_flow_parse_attr(attr, error);
4400 	if (ret)
4401 		return ret;
4402 
4403 	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4404 
4405 	return ret;
4406 }
4407 
4408 /* 1. Last in item should be NULL as range is not supported.
4409  * 2. Supported filter types: QINQ.
4410  * 3. Mask of fields which need to be matched should be
4411  *    filled with 1.
4412  * 4. Mask of fields which needn't to be matched should be
4413  *    filled with 0.
4414  */
4415 static int
4416 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
4417 			      const struct rte_flow_item *pattern,
4418 			      struct rte_flow_error *error,
4419 			      struct i40e_tunnel_filter_conf *filter)
4420 {
4421 	const struct rte_flow_item *item = pattern;
4422 	const struct rte_flow_item_vlan *vlan_spec = NULL;
4423 	const struct rte_flow_item_vlan *vlan_mask = NULL;
4424 	const struct rte_flow_item_vlan *i_vlan_spec = NULL;
4425 	const struct rte_flow_item_vlan *i_vlan_mask = NULL;
4426 	const struct rte_flow_item_vlan *o_vlan_spec = NULL;
4427 	const struct rte_flow_item_vlan *o_vlan_mask = NULL;
4428 
4429 	enum rte_flow_item_type item_type;
4430 	bool vlan_flag = 0;
4431 
4432 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4433 		if (item->last) {
4434 			rte_flow_error_set(error, EINVAL,
4435 					   RTE_FLOW_ERROR_TYPE_ITEM,
4436 					   item,
4437 					   "Not support range");
4438 			return -rte_errno;
4439 		}
4440 		item_type = item->type;
4441 		switch (item_type) {
4442 		case RTE_FLOW_ITEM_TYPE_ETH:
4443 			if (item->spec || item->mask) {
4444 				rte_flow_error_set(error, EINVAL,
4445 						   RTE_FLOW_ERROR_TYPE_ITEM,
4446 						   item,
4447 						   "Invalid ETH item");
4448 				return -rte_errno;
4449 			}
4450 			break;
4451 		case RTE_FLOW_ITEM_TYPE_VLAN:
4452 			vlan_spec = item->spec;
4453 			vlan_mask = item->mask;
4454 
4455 			if (!(vlan_spec && vlan_mask) ||
4456 			    vlan_mask->inner_type) {
4457 				rte_flow_error_set(error, EINVAL,
4458 					   RTE_FLOW_ERROR_TYPE_ITEM,
4459 					   item,
4460 					   "Invalid vlan item");
4461 				return -rte_errno;
4462 			}
4463 
4464 			if (!vlan_flag) {
4465 				o_vlan_spec = vlan_spec;
4466 				o_vlan_mask = vlan_mask;
4467 				vlan_flag = 1;
4468 			} else {
4469 				i_vlan_spec = vlan_spec;
4470 				i_vlan_mask = vlan_mask;
4471 				vlan_flag = 0;
4472 			}
4473 			break;
4474 
4475 		default:
4476 			break;
4477 		}
4478 	}
4479 
4480 	/* Get filter specification */
4481 	if (o_vlan_mask != NULL &&  i_vlan_mask != NULL) {
4482 		filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci);
4483 		filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci);
4484 	} else {
4485 			rte_flow_error_set(error, EINVAL,
4486 					   RTE_FLOW_ERROR_TYPE_ITEM,
4487 					   NULL,
4488 					   "Invalid filter type");
4489 			return -rte_errno;
4490 	}
4491 
4492 	filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
4493 	return 0;
4494 }
4495 
4496 static int
4497 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
4498 			      const struct rte_flow_attr *attr,
4499 			      const struct rte_flow_item pattern[],
4500 			      const struct rte_flow_action actions[],
4501 			      struct rte_flow_error *error,
4502 			      union i40e_filter_t *filter)
4503 {
4504 	struct i40e_tunnel_filter_conf *tunnel_filter =
4505 		&filter->consistent_tunnel_filter;
4506 	int ret;
4507 
4508 	ret = i40e_flow_parse_qinq_pattern(dev, pattern,
4509 					     error, tunnel_filter);
4510 	if (ret)
4511 		return ret;
4512 
4513 	ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4514 	if (ret)
4515 		return ret;
4516 
4517 	ret = i40e_flow_parse_attr(attr, error);
4518 	if (ret)
4519 		return ret;
4520 
4521 	cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4522 
4523 	return ret;
4524 }
4525 
4526 static int
4527 i40e_flow_validate(struct rte_eth_dev *dev,
4528 		   const struct rte_flow_attr *attr,
4529 		   const struct rte_flow_item pattern[],
4530 		   const struct rte_flow_action actions[],
4531 		   struct rte_flow_error *error)
4532 {
4533 	struct rte_flow_item *items; /* internal pattern w/o VOID items */
4534 	parse_filter_t parse_filter;
4535 	uint32_t item_num = 0; /* non-void item number of pattern*/
4536 	uint32_t i = 0;
4537 	bool flag = false;
4538 	int ret = I40E_NOT_SUPPORTED;
4539 
4540 	if (!pattern) {
4541 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
4542 				   NULL, "NULL pattern.");
4543 		return -rte_errno;
4544 	}
4545 
4546 	if (!actions) {
4547 		rte_flow_error_set(error, EINVAL,
4548 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
4549 				   NULL, "NULL action.");
4550 		return -rte_errno;
4551 	}
4552 
4553 	if (!attr) {
4554 		rte_flow_error_set(error, EINVAL,
4555 				   RTE_FLOW_ERROR_TYPE_ATTR,
4556 				   NULL, "NULL attribute.");
4557 		return -rte_errno;
4558 	}
4559 	memset(&cons_filter, 0, sizeof(cons_filter));
4560 
4561 	/* Get the non-void item of action */
4562 	while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
4563 		i++;
4564 
4565 	if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
4566 		ret = i40e_flow_parse_attr(attr, error);
4567 		if (ret)
4568 			return ret;
4569 
4570 		cons_filter_type = RTE_ETH_FILTER_HASH;
4571 		return i40e_hash_parse(dev, pattern, actions + i,
4572 				       &cons_filter.rss_conf, error);
4573 	}
4574 
4575 	i = 0;
4576 	/* Get the non-void item number of pattern */
4577 	while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
4578 		if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
4579 			item_num++;
4580 		i++;
4581 	}
4582 	item_num++;
4583 
4584 	if (item_num <= ARRAY_SIZE(g_items)) {
4585 		items = g_items;
4586 	} else {
4587 		items = rte_zmalloc("i40e_pattern",
4588 				    item_num * sizeof(struct rte_flow_item), 0);
4589 		if (!items) {
4590 			rte_flow_error_set(error, ENOMEM,
4591 					RTE_FLOW_ERROR_TYPE_ITEM_NUM,
4592 					NULL,
4593 					"No memory for PMD internal items.");
4594 			return -ENOMEM;
4595 		}
4596 	}
4597 
4598 	i40e_pattern_skip_void_item(items, pattern);
4599 
4600 	i = 0;
4601 	do {
4602 		parse_filter = i40e_find_parse_filter_func(items, &i);
4603 		if (!parse_filter && !flag) {
4604 			rte_flow_error_set(error, EINVAL,
4605 					   RTE_FLOW_ERROR_TYPE_ITEM,
4606 					   pattern, "Unsupported pattern");
4607 
4608 			if (items != g_items)
4609 				rte_free(items);
4610 			return -rte_errno;
4611 		}
4612 
4613 		if (parse_filter)
4614 			ret = parse_filter(dev, attr, items, actions,
4615 					   error, &cons_filter);
4616 
4617 		flag = true;
4618 	} while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
4619 
4620 	if (items != g_items)
4621 		rte_free(items);
4622 
4623 	return ret;
4624 }
4625 
4626 static struct rte_flow *
4627 i40e_flow_create(struct rte_eth_dev *dev,
4628 		 const struct rte_flow_attr *attr,
4629 		 const struct rte_flow_item pattern[],
4630 		 const struct rte_flow_action actions[],
4631 		 struct rte_flow_error *error)
4632 {
4633 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4634 	struct rte_flow *flow = NULL;
4635 	struct i40e_fdir_info *fdir_info = &pf->fdir;
4636 	int ret;
4637 
4638 	ret = i40e_flow_validate(dev, attr, pattern, actions, error);
4639 	if (ret < 0)
4640 		return NULL;
4641 
4642 	if (cons_filter_type == RTE_ETH_FILTER_FDIR) {
4643 		flow = i40e_fdir_entry_pool_get(fdir_info);
4644 		if (flow == NULL) {
4645 			rte_flow_error_set(error, ENOBUFS,
4646 			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4647 			   "Fdir space full");
4648 
4649 			return flow;
4650 		}
4651 	} else {
4652 		flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
4653 		if (!flow) {
4654 			rte_flow_error_set(error, ENOMEM,
4655 					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4656 					   "Failed to allocate memory");
4657 			return flow;
4658 		}
4659 	}
4660 
4661 	switch (cons_filter_type) {
4662 	case RTE_ETH_FILTER_ETHERTYPE:
4663 		ret = i40e_ethertype_filter_set(pf,
4664 					&cons_filter.ethertype_filter, 1);
4665 		if (ret)
4666 			goto free_flow;
4667 		flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
4668 					i40e_ethertype_filter_list);
4669 		break;
4670 	case RTE_ETH_FILTER_FDIR:
4671 		ret = i40e_flow_add_del_fdir_filter(dev,
4672 			       &cons_filter.fdir_filter, 1);
4673 		if (ret)
4674 			goto free_flow;
4675 		flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
4676 					i40e_fdir_filter_list);
4677 		break;
4678 	case RTE_ETH_FILTER_TUNNEL:
4679 		ret = i40e_dev_consistent_tunnel_filter_set(pf,
4680 			    &cons_filter.consistent_tunnel_filter, 1);
4681 		if (ret)
4682 			goto free_flow;
4683 		flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
4684 					i40e_tunnel_filter_list);
4685 		break;
4686 	case RTE_ETH_FILTER_HASH:
4687 		ret = i40e_hash_filter_create(pf, &cons_filter.rss_conf);
4688 		if (ret)
4689 			goto free_flow;
4690 		flow->rule = TAILQ_LAST(&pf->rss_config_list,
4691 					i40e_rss_conf_list);
4692 		break;
4693 	default:
4694 		goto free_flow;
4695 	}
4696 
4697 	flow->filter_type = cons_filter_type;
4698 	TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
4699 	return flow;
4700 
4701 free_flow:
4702 	rte_flow_error_set(error, -ret,
4703 			   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4704 			   "Failed to create flow.");
4705 
4706 	if (cons_filter_type != RTE_ETH_FILTER_FDIR)
4707 		rte_free(flow);
4708 	else
4709 		i40e_fdir_entry_pool_put(fdir_info, flow);
4710 
4711 	return NULL;
4712 }
4713 
4714 static int
4715 i40e_flow_destroy(struct rte_eth_dev *dev,
4716 		  struct rte_flow *flow,
4717 		  struct rte_flow_error *error)
4718 {
4719 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4720 	enum rte_filter_type filter_type = flow->filter_type;
4721 	struct i40e_fdir_info *fdir_info = &pf->fdir;
4722 	int ret = 0;
4723 
4724 	switch (filter_type) {
4725 	case RTE_ETH_FILTER_ETHERTYPE:
4726 		ret = i40e_flow_destroy_ethertype_filter(pf,
4727 			 (struct i40e_ethertype_filter *)flow->rule);
4728 		break;
4729 	case RTE_ETH_FILTER_TUNNEL:
4730 		ret = i40e_flow_destroy_tunnel_filter(pf,
4731 			      (struct i40e_tunnel_filter *)flow->rule);
4732 		break;
4733 	case RTE_ETH_FILTER_FDIR:
4734 		ret = i40e_flow_add_del_fdir_filter(dev,
4735 				&((struct i40e_fdir_filter *)flow->rule)->fdir,
4736 				0);
4737 
4738 		/* If the last flow is destroyed, disable fdir. */
4739 		if (!ret && TAILQ_EMPTY(&pf->fdir.fdir_list)) {
4740 			i40e_fdir_rx_proc_enable(dev, 0);
4741 		}
4742 		break;
4743 	case RTE_ETH_FILTER_HASH:
4744 		ret = i40e_hash_filter_destroy(pf, flow->rule);
4745 		break;
4746 	default:
4747 		PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4748 			    filter_type);
4749 		ret = -EINVAL;
4750 		break;
4751 	}
4752 
4753 	if (!ret) {
4754 		TAILQ_REMOVE(&pf->flow_list, flow, node);
4755 		if (filter_type == RTE_ETH_FILTER_FDIR)
4756 			i40e_fdir_entry_pool_put(fdir_info, flow);
4757 		else
4758 			rte_free(flow);
4759 
4760 	} else
4761 		rte_flow_error_set(error, -ret,
4762 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4763 				   "Failed to destroy flow.");
4764 
4765 	return ret;
4766 }
4767 
4768 static int
4769 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
4770 				   struct i40e_ethertype_filter *filter)
4771 {
4772 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4773 	struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
4774 	struct i40e_ethertype_filter *node;
4775 	struct i40e_control_filter_stats stats;
4776 	uint16_t flags = 0;
4777 	int ret = 0;
4778 
4779 	if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
4780 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
4781 	if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
4782 		flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
4783 	flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
4784 
4785 	memset(&stats, 0, sizeof(stats));
4786 	ret = i40e_aq_add_rem_control_packet_filter(hw,
4787 				    filter->input.mac_addr.addr_bytes,
4788 				    filter->input.ether_type,
4789 				    flags, pf->main_vsi->seid,
4790 				    filter->queue, 0, &stats, NULL);
4791 	if (ret < 0)
4792 		return ret;
4793 
4794 	node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
4795 	if (!node)
4796 		return -EINVAL;
4797 
4798 	ret = i40e_sw_ethertype_filter_del(pf, &node->input);
4799 
4800 	return ret;
4801 }
4802 
4803 static int
4804 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
4805 				struct i40e_tunnel_filter *filter)
4806 {
4807 	struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4808 	struct i40e_vsi *vsi;
4809 	struct i40e_pf_vf *vf;
4810 	struct i40e_aqc_cloud_filters_element_bb cld_filter;
4811 	struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
4812 	struct i40e_tunnel_filter *node;
4813 	bool big_buffer = 0;
4814 	int ret = 0;
4815 
4816 	memset(&cld_filter, 0, sizeof(cld_filter));
4817 	rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.outer_mac,
4818 			(struct rte_ether_addr *)&cld_filter.element.outer_mac);
4819 	rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.inner_mac,
4820 			(struct rte_ether_addr *)&cld_filter.element.inner_mac);
4821 	cld_filter.element.inner_vlan = filter->input.inner_vlan;
4822 	cld_filter.element.flags = filter->input.flags;
4823 	cld_filter.element.tenant_id = filter->input.tenant_id;
4824 	cld_filter.element.queue_number = filter->queue;
4825 	rte_memcpy(cld_filter.general_fields,
4826 		   filter->input.general_fields,
4827 		   sizeof(cld_filter.general_fields));
4828 
4829 	if (!filter->is_to_vf)
4830 		vsi = pf->main_vsi;
4831 	else {
4832 		vf = &pf->vfs[filter->vf_id];
4833 		vsi = vf->vsi;
4834 	}
4835 
4836 	if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
4837 	    I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
4838 	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
4839 	    I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
4840 	    ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
4841 	    I40E_AQC_ADD_CLOUD_FILTER_0X10))
4842 		big_buffer = 1;
4843 
4844 	if (big_buffer)
4845 		ret = i40e_aq_rem_cloud_filters_bb(hw, vsi->seid,
4846 						&cld_filter, 1);
4847 	else
4848 		ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
4849 						&cld_filter.element, 1);
4850 	if (ret < 0)
4851 		return -ENOTSUP;
4852 
4853 	node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
4854 	if (!node)
4855 		return -EINVAL;
4856 
4857 	ret = i40e_sw_tunnel_filter_del(pf, &node->input);
4858 
4859 	return ret;
4860 }
4861 
4862 static int
4863 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
4864 {
4865 	struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4866 	int ret;
4867 
4868 	ret = i40e_flow_flush_fdir_filter(pf);
4869 	if (ret) {
4870 		rte_flow_error_set(error, -ret,
4871 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4872 				   "Failed to flush FDIR flows.");
4873 		return -rte_errno;
4874 	}
4875 
4876 	ret = i40e_flow_flush_ethertype_filter(pf);
4877 	if (ret) {
4878 		rte_flow_error_set(error, -ret,
4879 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4880 				   "Failed to ethertype flush flows.");
4881 		return -rte_errno;
4882 	}
4883 
4884 	ret = i40e_flow_flush_tunnel_filter(pf);
4885 	if (ret) {
4886 		rte_flow_error_set(error, -ret,
4887 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4888 				   "Failed to flush tunnel flows.");
4889 		return -rte_errno;
4890 	}
4891 
4892 	ret = i40e_hash_filter_flush(pf);
4893 	if (ret)
4894 		rte_flow_error_set(error, -ret,
4895 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4896 				   "Failed to flush RSS flows.");
4897 	return ret;
4898 }
4899 
4900 static int
4901 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
4902 {
4903 	struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
4904 	struct i40e_fdir_info *fdir_info = &pf->fdir;
4905 	struct i40e_fdir_filter *fdir_filter;
4906 	enum i40e_filter_pctype pctype;
4907 	struct rte_flow *flow;
4908 	void *temp;
4909 	int ret;
4910 	uint32_t i = 0;
4911 
4912 	ret = i40e_fdir_flush(dev);
4913 	if (!ret) {
4914 		/* Delete FDIR filters in FDIR list. */
4915 		while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
4916 			ret = i40e_sw_fdir_filter_del(pf,
4917 						      &fdir_filter->fdir.input);
4918 			if (ret < 0)
4919 				return ret;
4920 		}
4921 
4922 		/* Delete FDIR flows in flow list. */
4923 		RTE_TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4924 			if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
4925 				TAILQ_REMOVE(&pf->flow_list, flow, node);
4926 			}
4927 		}
4928 
4929 		/* reset bitmap */
4930 		rte_bitmap_reset(fdir_info->fdir_flow_pool.bitmap);
4931 		for (i = 0; i < fdir_info->fdir_space_size; i++) {
4932 			fdir_info->fdir_flow_pool.pool[i].idx = i;
4933 			rte_bitmap_set(fdir_info->fdir_flow_pool.bitmap, i);
4934 		}
4935 
4936 		fdir_info->fdir_actual_cnt = 0;
4937 		fdir_info->fdir_guarantee_free_space =
4938 			fdir_info->fdir_guarantee_total_space;
4939 		memset(fdir_info->fdir_filter_array,
4940 			0,
4941 			sizeof(struct i40e_fdir_filter) *
4942 			I40E_MAX_FDIR_FILTER_NUM);
4943 
4944 		for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
4945 		     pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
4946 			pf->fdir.flow_count[pctype] = 0;
4947 			pf->fdir.flex_mask_flag[pctype] = 0;
4948 		}
4949 
4950 		for (i = 0; i < I40E_MAX_FLXPLD_LAYER; i++)
4951 			pf->fdir.flex_pit_flag[i] = 0;
4952 
4953 		/* Disable FDIR processing as all FDIR rules are now flushed */
4954 		i40e_fdir_rx_proc_enable(dev, 0);
4955 	}
4956 
4957 	return ret;
4958 }
4959 
4960 /* Flush all ethertype filters */
4961 static int
4962 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
4963 {
4964 	struct i40e_ethertype_filter_list
4965 		*ethertype_list = &pf->ethertype.ethertype_list;
4966 	struct i40e_ethertype_filter *filter;
4967 	struct rte_flow *flow;
4968 	void *temp;
4969 	int ret = 0;
4970 
4971 	while ((filter = TAILQ_FIRST(ethertype_list))) {
4972 		ret = i40e_flow_destroy_ethertype_filter(pf, filter);
4973 		if (ret)
4974 			return ret;
4975 	}
4976 
4977 	/* Delete ethertype flows in flow list. */
4978 	RTE_TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4979 		if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
4980 			TAILQ_REMOVE(&pf->flow_list, flow, node);
4981 			rte_free(flow);
4982 		}
4983 	}
4984 
4985 	return ret;
4986 }
4987 
4988 /* Flush all tunnel filters */
4989 static int
4990 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
4991 {
4992 	struct i40e_tunnel_filter_list
4993 		*tunnel_list = &pf->tunnel.tunnel_list;
4994 	struct i40e_tunnel_filter *filter;
4995 	struct rte_flow *flow;
4996 	void *temp;
4997 	int ret = 0;
4998 
4999 	while ((filter = TAILQ_FIRST(tunnel_list))) {
5000 		ret = i40e_flow_destroy_tunnel_filter(pf, filter);
5001 		if (ret)
5002 			return ret;
5003 	}
5004 
5005 	/* Delete tunnel flows in flow list. */
5006 	RTE_TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5007 		if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
5008 			TAILQ_REMOVE(&pf->flow_list, flow, node);
5009 			rte_free(flow);
5010 		}
5011 	}
5012 
5013 	return ret;
5014 }
5015 
5016 static int
5017 i40e_flow_query(struct rte_eth_dev *dev __rte_unused,
5018 		struct rte_flow *flow,
5019 		const struct rte_flow_action *actions,
5020 		void *data, struct rte_flow_error *error)
5021 {
5022 	struct i40e_rss_filter *rss_rule = (struct i40e_rss_filter *)flow->rule;
5023 	enum rte_filter_type filter_type = flow->filter_type;
5024 	struct rte_flow_action_rss *rss_conf = data;
5025 
5026 	if (!rss_rule) {
5027 		rte_flow_error_set(error, EINVAL,
5028 				   RTE_FLOW_ERROR_TYPE_HANDLE,
5029 				   NULL, "Invalid rule");
5030 		return -rte_errno;
5031 	}
5032 
5033 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5034 		switch (actions->type) {
5035 		case RTE_FLOW_ACTION_TYPE_VOID:
5036 			break;
5037 		case RTE_FLOW_ACTION_TYPE_RSS:
5038 			if (filter_type != RTE_ETH_FILTER_HASH) {
5039 				rte_flow_error_set(error, ENOTSUP,
5040 						   RTE_FLOW_ERROR_TYPE_ACTION,
5041 						   actions,
5042 						   "action not supported");
5043 				return -rte_errno;
5044 			}
5045 			rte_memcpy(rss_conf,
5046 				   &rss_rule->rss_filter_info.conf,
5047 				   sizeof(struct rte_flow_action_rss));
5048 			break;
5049 		default:
5050 			return rte_flow_error_set(error, ENOTSUP,
5051 						  RTE_FLOW_ERROR_TYPE_ACTION,
5052 						  actions,
5053 						  "action not supported");
5054 		}
5055 	}
5056 
5057 	return 0;
5058 }
5059