1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17 
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20 
21 static struct iavf_engine_list engine_list =
22 		TAILQ_HEAD_INITIALIZER(engine_list);
23 
24 static int iavf_flow_validate(struct rte_eth_dev *dev,
25 		const struct rte_flow_attr *attr,
26 		const struct rte_flow_item pattern[],
27 		const struct rte_flow_action actions[],
28 		struct rte_flow_error *error);
29 static struct rte_flow *iavf_flow_create(struct rte_eth_dev *dev,
30 		const struct rte_flow_attr *attr,
31 		const struct rte_flow_item pattern[],
32 		const struct rte_flow_action actions[],
33 		struct rte_flow_error *error);
34 static int iavf_flow_destroy(struct rte_eth_dev *dev,
35 		struct rte_flow *flow,
36 		struct rte_flow_error *error);
37 static int iavf_flow_query(struct rte_eth_dev *dev,
38 		struct rte_flow *flow,
39 		const struct rte_flow_action *actions,
40 		void *data,
41 		struct rte_flow_error *error);
42 
43 const struct rte_flow_ops iavf_flow_ops = {
44 	.validate = iavf_flow_validate,
45 	.create = iavf_flow_create,
46 	.destroy = iavf_flow_destroy,
47 	.flush = iavf_flow_flush,
48 	.query = iavf_flow_query,
49 };
50 
51 /* empty */
52 enum rte_flow_item_type iavf_pattern_empty[] = {
53 	RTE_FLOW_ITEM_TYPE_END,
54 };
55 
56 /* L2 */
57 enum rte_flow_item_type iavf_pattern_ethertype[] = {
58 	RTE_FLOW_ITEM_TYPE_ETH,
59 	RTE_FLOW_ITEM_TYPE_END,
60 };
61 
62 enum rte_flow_item_type iavf_pattern_ethertype_vlan[] = {
63 	RTE_FLOW_ITEM_TYPE_ETH,
64 	RTE_FLOW_ITEM_TYPE_VLAN,
65 	RTE_FLOW_ITEM_TYPE_END,
66 };
67 
68 enum rte_flow_item_type iavf_pattern_ethertype_qinq[] = {
69 	RTE_FLOW_ITEM_TYPE_ETH,
70 	RTE_FLOW_ITEM_TYPE_VLAN,
71 	RTE_FLOW_ITEM_TYPE_VLAN,
72 	RTE_FLOW_ITEM_TYPE_END,
73 };
74 
75 /* ARP */
76 enum rte_flow_item_type iavf_pattern_eth_arp[] = {
77 	RTE_FLOW_ITEM_TYPE_ETH,
78 	RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4,
79 	RTE_FLOW_ITEM_TYPE_END,
80 };
81 
82 /* non-tunnel IPv4 */
83 enum rte_flow_item_type iavf_pattern_eth_ipv4[] = {
84 	RTE_FLOW_ITEM_TYPE_ETH,
85 	RTE_FLOW_ITEM_TYPE_IPV4,
86 	RTE_FLOW_ITEM_TYPE_END,
87 };
88 
89 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4[] = {
90 	RTE_FLOW_ITEM_TYPE_ETH,
91 	RTE_FLOW_ITEM_TYPE_VLAN,
92 	RTE_FLOW_ITEM_TYPE_IPV4,
93 	RTE_FLOW_ITEM_TYPE_END,
94 };
95 
96 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4[] = {
97 	RTE_FLOW_ITEM_TYPE_ETH,
98 	RTE_FLOW_ITEM_TYPE_VLAN,
99 	RTE_FLOW_ITEM_TYPE_VLAN,
100 	RTE_FLOW_ITEM_TYPE_IPV4,
101 	RTE_FLOW_ITEM_TYPE_END,
102 };
103 
104 enum rte_flow_item_type iavf_pattern_eth_ipv4_udp[] = {
105 	RTE_FLOW_ITEM_TYPE_ETH,
106 	RTE_FLOW_ITEM_TYPE_IPV4,
107 	RTE_FLOW_ITEM_TYPE_UDP,
108 	RTE_FLOW_ITEM_TYPE_END,
109 };
110 
111 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_udp[] = {
112 	RTE_FLOW_ITEM_TYPE_ETH,
113 	RTE_FLOW_ITEM_TYPE_VLAN,
114 	RTE_FLOW_ITEM_TYPE_IPV4,
115 	RTE_FLOW_ITEM_TYPE_UDP,
116 	RTE_FLOW_ITEM_TYPE_END,
117 };
118 
119 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_udp[] = {
120 	RTE_FLOW_ITEM_TYPE_ETH,
121 	RTE_FLOW_ITEM_TYPE_VLAN,
122 	RTE_FLOW_ITEM_TYPE_VLAN,
123 	RTE_FLOW_ITEM_TYPE_IPV4,
124 	RTE_FLOW_ITEM_TYPE_UDP,
125 	RTE_FLOW_ITEM_TYPE_END,
126 };
127 
128 enum rte_flow_item_type iavf_pattern_eth_ipv4_tcp[] = {
129 	RTE_FLOW_ITEM_TYPE_ETH,
130 	RTE_FLOW_ITEM_TYPE_IPV4,
131 	RTE_FLOW_ITEM_TYPE_TCP,
132 	RTE_FLOW_ITEM_TYPE_END,
133 };
134 
135 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_tcp[] = {
136 	RTE_FLOW_ITEM_TYPE_ETH,
137 	RTE_FLOW_ITEM_TYPE_VLAN,
138 	RTE_FLOW_ITEM_TYPE_IPV4,
139 	RTE_FLOW_ITEM_TYPE_TCP,
140 	RTE_FLOW_ITEM_TYPE_END,
141 };
142 
143 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_tcp[] = {
144 	RTE_FLOW_ITEM_TYPE_ETH,
145 	RTE_FLOW_ITEM_TYPE_VLAN,
146 	RTE_FLOW_ITEM_TYPE_VLAN,
147 	RTE_FLOW_ITEM_TYPE_IPV4,
148 	RTE_FLOW_ITEM_TYPE_TCP,
149 	RTE_FLOW_ITEM_TYPE_END,
150 };
151 
152 enum rte_flow_item_type iavf_pattern_eth_ipv4_sctp[] = {
153 	RTE_FLOW_ITEM_TYPE_ETH,
154 	RTE_FLOW_ITEM_TYPE_IPV4,
155 	RTE_FLOW_ITEM_TYPE_SCTP,
156 	RTE_FLOW_ITEM_TYPE_END,
157 };
158 
159 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_sctp[] = {
160 	RTE_FLOW_ITEM_TYPE_ETH,
161 	RTE_FLOW_ITEM_TYPE_VLAN,
162 	RTE_FLOW_ITEM_TYPE_IPV4,
163 	RTE_FLOW_ITEM_TYPE_SCTP,
164 	RTE_FLOW_ITEM_TYPE_END,
165 };
166 
167 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_sctp[] = {
168 	RTE_FLOW_ITEM_TYPE_ETH,
169 	RTE_FLOW_ITEM_TYPE_VLAN,
170 	RTE_FLOW_ITEM_TYPE_VLAN,
171 	RTE_FLOW_ITEM_TYPE_IPV4,
172 	RTE_FLOW_ITEM_TYPE_SCTP,
173 	RTE_FLOW_ITEM_TYPE_END,
174 };
175 
176 enum rte_flow_item_type iavf_pattern_eth_ipv4_icmp[] = {
177 	RTE_FLOW_ITEM_TYPE_ETH,
178 	RTE_FLOW_ITEM_TYPE_IPV4,
179 	RTE_FLOW_ITEM_TYPE_ICMP,
180 	RTE_FLOW_ITEM_TYPE_END,
181 };
182 
183 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_icmp[] = {
184 	RTE_FLOW_ITEM_TYPE_ETH,
185 	RTE_FLOW_ITEM_TYPE_VLAN,
186 	RTE_FLOW_ITEM_TYPE_IPV4,
187 	RTE_FLOW_ITEM_TYPE_ICMP,
188 	RTE_FLOW_ITEM_TYPE_END,
189 };
190 
191 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_icmp[] = {
192 	RTE_FLOW_ITEM_TYPE_ETH,
193 	RTE_FLOW_ITEM_TYPE_VLAN,
194 	RTE_FLOW_ITEM_TYPE_VLAN,
195 	RTE_FLOW_ITEM_TYPE_IPV4,
196 	RTE_FLOW_ITEM_TYPE_ICMP,
197 	RTE_FLOW_ITEM_TYPE_END,
198 };
199 
200 /* non-tunnel IPv6 */
201 enum rte_flow_item_type iavf_pattern_eth_ipv6[] = {
202 	RTE_FLOW_ITEM_TYPE_ETH,
203 	RTE_FLOW_ITEM_TYPE_IPV6,
204 	RTE_FLOW_ITEM_TYPE_END,
205 };
206 
207 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6[] = {
208 	RTE_FLOW_ITEM_TYPE_ETH,
209 	RTE_FLOW_ITEM_TYPE_VLAN,
210 	RTE_FLOW_ITEM_TYPE_IPV6,
211 	RTE_FLOW_ITEM_TYPE_END,
212 };
213 
214 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6[] = {
215 	RTE_FLOW_ITEM_TYPE_ETH,
216 	RTE_FLOW_ITEM_TYPE_VLAN,
217 	RTE_FLOW_ITEM_TYPE_VLAN,
218 	RTE_FLOW_ITEM_TYPE_IPV6,
219 	RTE_FLOW_ITEM_TYPE_END,
220 };
221 
222 enum rte_flow_item_type iavf_pattern_eth_ipv6_udp[] = {
223 	RTE_FLOW_ITEM_TYPE_ETH,
224 	RTE_FLOW_ITEM_TYPE_IPV6,
225 	RTE_FLOW_ITEM_TYPE_UDP,
226 	RTE_FLOW_ITEM_TYPE_END,
227 };
228 
229 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_udp[] = {
230 	RTE_FLOW_ITEM_TYPE_ETH,
231 	RTE_FLOW_ITEM_TYPE_VLAN,
232 	RTE_FLOW_ITEM_TYPE_IPV6,
233 	RTE_FLOW_ITEM_TYPE_UDP,
234 	RTE_FLOW_ITEM_TYPE_END,
235 };
236 
237 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_udp[] = {
238 	RTE_FLOW_ITEM_TYPE_ETH,
239 	RTE_FLOW_ITEM_TYPE_VLAN,
240 	RTE_FLOW_ITEM_TYPE_VLAN,
241 	RTE_FLOW_ITEM_TYPE_IPV6,
242 	RTE_FLOW_ITEM_TYPE_UDP,
243 	RTE_FLOW_ITEM_TYPE_END,
244 };
245 
246 enum rte_flow_item_type iavf_pattern_eth_ipv6_tcp[] = {
247 	RTE_FLOW_ITEM_TYPE_ETH,
248 	RTE_FLOW_ITEM_TYPE_IPV6,
249 	RTE_FLOW_ITEM_TYPE_TCP,
250 	RTE_FLOW_ITEM_TYPE_END,
251 };
252 
253 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_tcp[] = {
254 	RTE_FLOW_ITEM_TYPE_ETH,
255 	RTE_FLOW_ITEM_TYPE_VLAN,
256 	RTE_FLOW_ITEM_TYPE_IPV6,
257 	RTE_FLOW_ITEM_TYPE_TCP,
258 	RTE_FLOW_ITEM_TYPE_END,
259 };
260 
261 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_tcp[] = {
262 	RTE_FLOW_ITEM_TYPE_ETH,
263 	RTE_FLOW_ITEM_TYPE_VLAN,
264 	RTE_FLOW_ITEM_TYPE_VLAN,
265 	RTE_FLOW_ITEM_TYPE_IPV6,
266 	RTE_FLOW_ITEM_TYPE_TCP,
267 	RTE_FLOW_ITEM_TYPE_END,
268 };
269 
270 enum rte_flow_item_type iavf_pattern_eth_ipv6_sctp[] = {
271 	RTE_FLOW_ITEM_TYPE_ETH,
272 	RTE_FLOW_ITEM_TYPE_IPV6,
273 	RTE_FLOW_ITEM_TYPE_SCTP,
274 	RTE_FLOW_ITEM_TYPE_END,
275 };
276 
277 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_sctp[] = {
278 	RTE_FLOW_ITEM_TYPE_ETH,
279 	RTE_FLOW_ITEM_TYPE_VLAN,
280 	RTE_FLOW_ITEM_TYPE_IPV6,
281 	RTE_FLOW_ITEM_TYPE_SCTP,
282 	RTE_FLOW_ITEM_TYPE_END,
283 };
284 
285 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_sctp[] = {
286 	RTE_FLOW_ITEM_TYPE_ETH,
287 	RTE_FLOW_ITEM_TYPE_VLAN,
288 	RTE_FLOW_ITEM_TYPE_VLAN,
289 	RTE_FLOW_ITEM_TYPE_IPV6,
290 	RTE_FLOW_ITEM_TYPE_SCTP,
291 	RTE_FLOW_ITEM_TYPE_END,
292 };
293 
294 enum rte_flow_item_type iavf_pattern_eth_ipv6_icmp6[] = {
295 	RTE_FLOW_ITEM_TYPE_ETH,
296 	RTE_FLOW_ITEM_TYPE_IPV6,
297 	RTE_FLOW_ITEM_TYPE_ICMP6,
298 	RTE_FLOW_ITEM_TYPE_END,
299 };
300 
301 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_icmp6[] = {
302 	RTE_FLOW_ITEM_TYPE_ETH,
303 	RTE_FLOW_ITEM_TYPE_VLAN,
304 	RTE_FLOW_ITEM_TYPE_IPV6,
305 	RTE_FLOW_ITEM_TYPE_ICMP6,
306 	RTE_FLOW_ITEM_TYPE_END,
307 };
308 
309 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_icmp6[] = {
310 	RTE_FLOW_ITEM_TYPE_ETH,
311 	RTE_FLOW_ITEM_TYPE_VLAN,
312 	RTE_FLOW_ITEM_TYPE_VLAN,
313 	RTE_FLOW_ITEM_TYPE_IPV6,
314 	RTE_FLOW_ITEM_TYPE_ICMP6,
315 	RTE_FLOW_ITEM_TYPE_END,
316 };
317 
318 /* IPv4 GTPC */
319 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpc[] = {
320 	RTE_FLOW_ITEM_TYPE_ETH,
321 	RTE_FLOW_ITEM_TYPE_IPV4,
322 	RTE_FLOW_ITEM_TYPE_UDP,
323 	RTE_FLOW_ITEM_TYPE_GTPC,
324 	RTE_FLOW_ITEM_TYPE_END,
325 };
326 
327 /* IPV4 GTPU (EH) */
328 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu[] = {
329 	RTE_FLOW_ITEM_TYPE_ETH,
330 	RTE_FLOW_ITEM_TYPE_IPV4,
331 	RTE_FLOW_ITEM_TYPE_UDP,
332 	RTE_FLOW_ITEM_TYPE_GTPU,
333 	RTE_FLOW_ITEM_TYPE_END,
334 };
335 
336 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh[] = {
337 	RTE_FLOW_ITEM_TYPE_ETH,
338 	RTE_FLOW_ITEM_TYPE_IPV4,
339 	RTE_FLOW_ITEM_TYPE_UDP,
340 	RTE_FLOW_ITEM_TYPE_GTPU,
341 	RTE_FLOW_ITEM_TYPE_GTP_PSC,
342 	RTE_FLOW_ITEM_TYPE_END,
343 };
344 
345 /* IPv6 GTPC */
346 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpc[] = {
347 	RTE_FLOW_ITEM_TYPE_ETH,
348 	RTE_FLOW_ITEM_TYPE_IPV6,
349 	RTE_FLOW_ITEM_TYPE_UDP,
350 	RTE_FLOW_ITEM_TYPE_GTPC,
351 	RTE_FLOW_ITEM_TYPE_END,
352 };
353 
354 /* IPV6 GTPU (EH) */
355 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu[] = {
356 	RTE_FLOW_ITEM_TYPE_ETH,
357 	RTE_FLOW_ITEM_TYPE_IPV6,
358 	RTE_FLOW_ITEM_TYPE_UDP,
359 	RTE_FLOW_ITEM_TYPE_GTPU,
360 	RTE_FLOW_ITEM_TYPE_END,
361 };
362 
363 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh[] = {
364 	RTE_FLOW_ITEM_TYPE_ETH,
365 	RTE_FLOW_ITEM_TYPE_IPV6,
366 	RTE_FLOW_ITEM_TYPE_UDP,
367 	RTE_FLOW_ITEM_TYPE_GTPU,
368 	RTE_FLOW_ITEM_TYPE_GTP_PSC,
369 	RTE_FLOW_ITEM_TYPE_END,
370 };
371 
372 /* IPV4 GTPU IPv4 */
373 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4[] = {
374 	RTE_FLOW_ITEM_TYPE_ETH,
375 	RTE_FLOW_ITEM_TYPE_IPV4,
376 	RTE_FLOW_ITEM_TYPE_UDP,
377 	RTE_FLOW_ITEM_TYPE_GTPU,
378 	RTE_FLOW_ITEM_TYPE_IPV4,
379 	RTE_FLOW_ITEM_TYPE_END,
380 };
381 
382 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4_udp[] = {
383 	RTE_FLOW_ITEM_TYPE_ETH,
384 	RTE_FLOW_ITEM_TYPE_IPV4,
385 	RTE_FLOW_ITEM_TYPE_UDP,
386 	RTE_FLOW_ITEM_TYPE_GTPU,
387 	RTE_FLOW_ITEM_TYPE_IPV4,
388 	RTE_FLOW_ITEM_TYPE_UDP,
389 	RTE_FLOW_ITEM_TYPE_END,
390 };
391 
392 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4_tcp[] = {
393 	RTE_FLOW_ITEM_TYPE_ETH,
394 	RTE_FLOW_ITEM_TYPE_IPV4,
395 	RTE_FLOW_ITEM_TYPE_UDP,
396 	RTE_FLOW_ITEM_TYPE_GTPU,
397 	RTE_FLOW_ITEM_TYPE_IPV4,
398 	RTE_FLOW_ITEM_TYPE_TCP,
399 	RTE_FLOW_ITEM_TYPE_END,
400 };
401 
402 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4_icmp[] = {
403 	RTE_FLOW_ITEM_TYPE_ETH,
404 	RTE_FLOW_ITEM_TYPE_IPV4,
405 	RTE_FLOW_ITEM_TYPE_UDP,
406 	RTE_FLOW_ITEM_TYPE_GTPU,
407 	RTE_FLOW_ITEM_TYPE_IPV4,
408 	RTE_FLOW_ITEM_TYPE_ICMP,
409 	RTE_FLOW_ITEM_TYPE_END,
410 };
411 
412 /* IPV4 GTPU IPv6 */
413 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv6[] = {
414 	RTE_FLOW_ITEM_TYPE_ETH,
415 	RTE_FLOW_ITEM_TYPE_IPV4,
416 	RTE_FLOW_ITEM_TYPE_UDP,
417 	RTE_FLOW_ITEM_TYPE_GTPU,
418 	RTE_FLOW_ITEM_TYPE_IPV6,
419 	RTE_FLOW_ITEM_TYPE_END,
420 };
421 
422 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv6_udp[] = {
423 	RTE_FLOW_ITEM_TYPE_ETH,
424 	RTE_FLOW_ITEM_TYPE_IPV4,
425 	RTE_FLOW_ITEM_TYPE_UDP,
426 	RTE_FLOW_ITEM_TYPE_GTPU,
427 	RTE_FLOW_ITEM_TYPE_IPV6,
428 	RTE_FLOW_ITEM_TYPE_UDP,
429 	RTE_FLOW_ITEM_TYPE_END,
430 };
431 
432 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv6_tcp[] = {
433 	RTE_FLOW_ITEM_TYPE_ETH,
434 	RTE_FLOW_ITEM_TYPE_IPV4,
435 	RTE_FLOW_ITEM_TYPE_UDP,
436 	RTE_FLOW_ITEM_TYPE_GTPU,
437 	RTE_FLOW_ITEM_TYPE_IPV6,
438 	RTE_FLOW_ITEM_TYPE_TCP,
439 	RTE_FLOW_ITEM_TYPE_END,
440 };
441 
442 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv6_icmp[] = {
443 	RTE_FLOW_ITEM_TYPE_ETH,
444 	RTE_FLOW_ITEM_TYPE_IPV4,
445 	RTE_FLOW_ITEM_TYPE_UDP,
446 	RTE_FLOW_ITEM_TYPE_GTPU,
447 	RTE_FLOW_ITEM_TYPE_IPV6,
448 	RTE_FLOW_ITEM_TYPE_ICMP,
449 	RTE_FLOW_ITEM_TYPE_END,
450 };
451 
452 /* IPV6 GTPU IPv4 */
453 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv4[] = {
454 	RTE_FLOW_ITEM_TYPE_ETH,
455 	RTE_FLOW_ITEM_TYPE_IPV6,
456 	RTE_FLOW_ITEM_TYPE_UDP,
457 	RTE_FLOW_ITEM_TYPE_GTPU,
458 	RTE_FLOW_ITEM_TYPE_IPV4,
459 	RTE_FLOW_ITEM_TYPE_END,
460 };
461 
462 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv4_udp[] = {
463 	RTE_FLOW_ITEM_TYPE_ETH,
464 	RTE_FLOW_ITEM_TYPE_IPV6,
465 	RTE_FLOW_ITEM_TYPE_UDP,
466 	RTE_FLOW_ITEM_TYPE_GTPU,
467 	RTE_FLOW_ITEM_TYPE_IPV4,
468 	RTE_FLOW_ITEM_TYPE_UDP,
469 	RTE_FLOW_ITEM_TYPE_END,
470 };
471 
472 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv4_tcp[] = {
473 	RTE_FLOW_ITEM_TYPE_ETH,
474 	RTE_FLOW_ITEM_TYPE_IPV6,
475 	RTE_FLOW_ITEM_TYPE_UDP,
476 	RTE_FLOW_ITEM_TYPE_GTPU,
477 	RTE_FLOW_ITEM_TYPE_IPV4,
478 	RTE_FLOW_ITEM_TYPE_TCP,
479 	RTE_FLOW_ITEM_TYPE_END,
480 };
481 
482 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv4_icmp[] = {
483 	RTE_FLOW_ITEM_TYPE_ETH,
484 	RTE_FLOW_ITEM_TYPE_IPV6,
485 	RTE_FLOW_ITEM_TYPE_UDP,
486 	RTE_FLOW_ITEM_TYPE_GTPU,
487 	RTE_FLOW_ITEM_TYPE_IPV4,
488 	RTE_FLOW_ITEM_TYPE_ICMP,
489 	RTE_FLOW_ITEM_TYPE_END,
490 };
491 
492 /* IPV6 GTPU IPv6 */
493 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv6[] = {
494 	RTE_FLOW_ITEM_TYPE_ETH,
495 	RTE_FLOW_ITEM_TYPE_IPV6,
496 	RTE_FLOW_ITEM_TYPE_UDP,
497 	RTE_FLOW_ITEM_TYPE_GTPU,
498 	RTE_FLOW_ITEM_TYPE_IPV6,
499 	RTE_FLOW_ITEM_TYPE_END,
500 };
501 
502 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv6_udp[] = {
503 	RTE_FLOW_ITEM_TYPE_ETH,
504 	RTE_FLOW_ITEM_TYPE_IPV6,
505 	RTE_FLOW_ITEM_TYPE_UDP,
506 	RTE_FLOW_ITEM_TYPE_GTPU,
507 	RTE_FLOW_ITEM_TYPE_IPV6,
508 	RTE_FLOW_ITEM_TYPE_UDP,
509 	RTE_FLOW_ITEM_TYPE_END,
510 };
511 
512 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv6_tcp[] = {
513 	RTE_FLOW_ITEM_TYPE_ETH,
514 	RTE_FLOW_ITEM_TYPE_IPV6,
515 	RTE_FLOW_ITEM_TYPE_UDP,
516 	RTE_FLOW_ITEM_TYPE_GTPU,
517 	RTE_FLOW_ITEM_TYPE_IPV6,
518 	RTE_FLOW_ITEM_TYPE_TCP,
519 	RTE_FLOW_ITEM_TYPE_END,
520 };
521 
522 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_ipv6_icmp[] = {
523 	RTE_FLOW_ITEM_TYPE_ETH,
524 	RTE_FLOW_ITEM_TYPE_IPV6,
525 	RTE_FLOW_ITEM_TYPE_UDP,
526 	RTE_FLOW_ITEM_TYPE_GTPU,
527 	RTE_FLOW_ITEM_TYPE_IPV6,
528 	RTE_FLOW_ITEM_TYPE_ICMP,
529 	RTE_FLOW_ITEM_TYPE_END,
530 };
531 
532 /* IPV4 GTPU EH IPv4 */
533 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4[] = {
534 	RTE_FLOW_ITEM_TYPE_ETH,
535 	RTE_FLOW_ITEM_TYPE_IPV4,
536 	RTE_FLOW_ITEM_TYPE_UDP,
537 	RTE_FLOW_ITEM_TYPE_GTPU,
538 	RTE_FLOW_ITEM_TYPE_GTP_PSC,
539 	RTE_FLOW_ITEM_TYPE_IPV4,
540 	RTE_FLOW_ITEM_TYPE_END,
541 };
542 
543 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp[] = {
544 	RTE_FLOW_ITEM_TYPE_ETH,
545 	RTE_FLOW_ITEM_TYPE_IPV4,
546 	RTE_FLOW_ITEM_TYPE_UDP,
547 	RTE_FLOW_ITEM_TYPE_GTPU,
548 	RTE_FLOW_ITEM_TYPE_GTP_PSC,
549 	RTE_FLOW_ITEM_TYPE_IPV4,
550 	RTE_FLOW_ITEM_TYPE_UDP,
551 	RTE_FLOW_ITEM_TYPE_END,
552 };
553 
554 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp[] = {
555 	RTE_FLOW_ITEM_TYPE_ETH,
556 	RTE_FLOW_ITEM_TYPE_IPV4,
557 	RTE_FLOW_ITEM_TYPE_UDP,
558 	RTE_FLOW_ITEM_TYPE_GTPU,
559 	RTE_FLOW_ITEM_TYPE_GTP_PSC,
560 	RTE_FLOW_ITEM_TYPE_IPV4,
561 	RTE_FLOW_ITEM_TYPE_TCP,
562 	RTE_FLOW_ITEM_TYPE_END,
563 };
564 
565 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_icmp[] = {
566 	RTE_FLOW_ITEM_TYPE_ETH,
567 	RTE_FLOW_ITEM_TYPE_IPV4,
568 	RTE_FLOW_ITEM_TYPE_UDP,
569 	RTE_FLOW_ITEM_TYPE_GTPU,
570 	RTE_FLOW_ITEM_TYPE_GTP_PSC,
571 	RTE_FLOW_ITEM_TYPE_IPV4,
572 	RTE_FLOW_ITEM_TYPE_ICMP,
573 	RTE_FLOW_ITEM_TYPE_END,
574 };
575 
576 /* IPV4 GTPU EH IPv6 */
577 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv6[] = {
578 	RTE_FLOW_ITEM_TYPE_ETH,
579 	RTE_FLOW_ITEM_TYPE_IPV4,
580 	RTE_FLOW_ITEM_TYPE_UDP,
581 	RTE_FLOW_ITEM_TYPE_GTPU,
582 	RTE_FLOW_ITEM_TYPE_GTP_PSC,
583 	RTE_FLOW_ITEM_TYPE_IPV6,
584 	RTE_FLOW_ITEM_TYPE_END,
585 };
586 
587 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv6_udp[] = {
588 	RTE_FLOW_ITEM_TYPE_ETH,
589 	RTE_FLOW_ITEM_TYPE_IPV4,
590 	RTE_FLOW_ITEM_TYPE_UDP,
591 	RTE_FLOW_ITEM_TYPE_GTPU,
592 	RTE_FLOW_ITEM_TYPE_GTP_PSC,
593 	RTE_FLOW_ITEM_TYPE_IPV6,
594 	RTE_FLOW_ITEM_TYPE_UDP,
595 	RTE_FLOW_ITEM_TYPE_END,
596 };
597 
598 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv6_tcp[] = {
599 	RTE_FLOW_ITEM_TYPE_ETH,
600 	RTE_FLOW_ITEM_TYPE_IPV4,
601 	RTE_FLOW_ITEM_TYPE_UDP,
602 	RTE_FLOW_ITEM_TYPE_GTPU,
603 	RTE_FLOW_ITEM_TYPE_GTP_PSC,
604 	RTE_FLOW_ITEM_TYPE_IPV6,
605 	RTE_FLOW_ITEM_TYPE_TCP,
606 	RTE_FLOW_ITEM_TYPE_END,
607 };
608 
609 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv6_icmp[] = {
610 	RTE_FLOW_ITEM_TYPE_ETH,
611 	RTE_FLOW_ITEM_TYPE_IPV4,
612 	RTE_FLOW_ITEM_TYPE_UDP,
613 	RTE_FLOW_ITEM_TYPE_GTPU,
614 	RTE_FLOW_ITEM_TYPE_GTP_PSC,
615 	RTE_FLOW_ITEM_TYPE_IPV6,
616 	RTE_FLOW_ITEM_TYPE_ICMP,
617 	RTE_FLOW_ITEM_TYPE_END,
618 };
619 
620 /* IPV6 GTPU EH IPv4 */
621 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv4[] = {
622 	RTE_FLOW_ITEM_TYPE_ETH,
623 	RTE_FLOW_ITEM_TYPE_IPV6,
624 	RTE_FLOW_ITEM_TYPE_UDP,
625 	RTE_FLOW_ITEM_TYPE_GTPU,
626 	RTE_FLOW_ITEM_TYPE_GTP_PSC,
627 	RTE_FLOW_ITEM_TYPE_IPV4,
628 	RTE_FLOW_ITEM_TYPE_END,
629 };
630 
631 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv4_udp[] = {
632 	RTE_FLOW_ITEM_TYPE_ETH,
633 	RTE_FLOW_ITEM_TYPE_IPV6,
634 	RTE_FLOW_ITEM_TYPE_UDP,
635 	RTE_FLOW_ITEM_TYPE_GTPU,
636 	RTE_FLOW_ITEM_TYPE_GTP_PSC,
637 	RTE_FLOW_ITEM_TYPE_IPV4,
638 	RTE_FLOW_ITEM_TYPE_UDP,
639 	RTE_FLOW_ITEM_TYPE_END,
640 };
641 
642 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv4_tcp[] = {
643 	RTE_FLOW_ITEM_TYPE_ETH,
644 	RTE_FLOW_ITEM_TYPE_IPV6,
645 	RTE_FLOW_ITEM_TYPE_UDP,
646 	RTE_FLOW_ITEM_TYPE_GTPU,
647 	RTE_FLOW_ITEM_TYPE_GTP_PSC,
648 	RTE_FLOW_ITEM_TYPE_IPV4,
649 	RTE_FLOW_ITEM_TYPE_TCP,
650 	RTE_FLOW_ITEM_TYPE_END,
651 };
652 
653 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv4_icmp[] = {
654 	RTE_FLOW_ITEM_TYPE_ETH,
655 	RTE_FLOW_ITEM_TYPE_IPV6,
656 	RTE_FLOW_ITEM_TYPE_UDP,
657 	RTE_FLOW_ITEM_TYPE_GTPU,
658 	RTE_FLOW_ITEM_TYPE_GTP_PSC,
659 	RTE_FLOW_ITEM_TYPE_IPV4,
660 	RTE_FLOW_ITEM_TYPE_ICMP,
661 	RTE_FLOW_ITEM_TYPE_END,
662 };
663 
664 /* IPV6 GTPU EH IPv6 */
665 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv6[] = {
666 	RTE_FLOW_ITEM_TYPE_ETH,
667 	RTE_FLOW_ITEM_TYPE_IPV6,
668 	RTE_FLOW_ITEM_TYPE_UDP,
669 	RTE_FLOW_ITEM_TYPE_GTPU,
670 	RTE_FLOW_ITEM_TYPE_GTP_PSC,
671 	RTE_FLOW_ITEM_TYPE_IPV6,
672 	RTE_FLOW_ITEM_TYPE_END,
673 };
674 
675 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv6_udp[] = {
676 	RTE_FLOW_ITEM_TYPE_ETH,
677 	RTE_FLOW_ITEM_TYPE_IPV6,
678 	RTE_FLOW_ITEM_TYPE_UDP,
679 	RTE_FLOW_ITEM_TYPE_GTPU,
680 	RTE_FLOW_ITEM_TYPE_GTP_PSC,
681 	RTE_FLOW_ITEM_TYPE_IPV6,
682 	RTE_FLOW_ITEM_TYPE_UDP,
683 	RTE_FLOW_ITEM_TYPE_END,
684 };
685 
686 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv6_tcp[] = {
687 	RTE_FLOW_ITEM_TYPE_ETH,
688 	RTE_FLOW_ITEM_TYPE_IPV6,
689 	RTE_FLOW_ITEM_TYPE_UDP,
690 	RTE_FLOW_ITEM_TYPE_GTPU,
691 	RTE_FLOW_ITEM_TYPE_GTP_PSC,
692 	RTE_FLOW_ITEM_TYPE_IPV6,
693 	RTE_FLOW_ITEM_TYPE_TCP,
694 	RTE_FLOW_ITEM_TYPE_END,
695 };
696 
697 enum rte_flow_item_type iavf_pattern_eth_ipv6_gtpu_eh_ipv6_icmp[] = {
698 	RTE_FLOW_ITEM_TYPE_ETH,
699 	RTE_FLOW_ITEM_TYPE_IPV6,
700 	RTE_FLOW_ITEM_TYPE_UDP,
701 	RTE_FLOW_ITEM_TYPE_GTPU,
702 	RTE_FLOW_ITEM_TYPE_GTP_PSC,
703 	RTE_FLOW_ITEM_TYPE_IPV6,
704 	RTE_FLOW_ITEM_TYPE_ICMP,
705 	RTE_FLOW_ITEM_TYPE_END,
706 };
707 
708 /* ESP */
709 enum rte_flow_item_type iavf_pattern_eth_ipv4_esp[] = {
710 	RTE_FLOW_ITEM_TYPE_ETH,
711 	RTE_FLOW_ITEM_TYPE_IPV4,
712 	RTE_FLOW_ITEM_TYPE_ESP,
713 	RTE_FLOW_ITEM_TYPE_END,
714 };
715 
716 enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_esp[] = {
717 	RTE_FLOW_ITEM_TYPE_ETH,
718 	RTE_FLOW_ITEM_TYPE_IPV4,
719 	RTE_FLOW_ITEM_TYPE_UDP,
720 	RTE_FLOW_ITEM_TYPE_ESP,
721 	RTE_FLOW_ITEM_TYPE_END,
722 };
723 
724 enum rte_flow_item_type iavf_pattern_eth_ipv6_esp[] = {
725 	RTE_FLOW_ITEM_TYPE_ETH,
726 	RTE_FLOW_ITEM_TYPE_IPV6,
727 	RTE_FLOW_ITEM_TYPE_ESP,
728 	RTE_FLOW_ITEM_TYPE_END,
729 };
730 
731 enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_esp[] = {
732 	RTE_FLOW_ITEM_TYPE_ETH,
733 	RTE_FLOW_ITEM_TYPE_IPV6,
734 	RTE_FLOW_ITEM_TYPE_UDP,
735 	RTE_FLOW_ITEM_TYPE_ESP,
736 	RTE_FLOW_ITEM_TYPE_END,
737 };
738 
739 /* AH */
740 enum rte_flow_item_type iavf_pattern_eth_ipv4_ah[] = {
741 	RTE_FLOW_ITEM_TYPE_ETH,
742 	RTE_FLOW_ITEM_TYPE_IPV4,
743 	RTE_FLOW_ITEM_TYPE_AH,
744 	RTE_FLOW_ITEM_TYPE_END,
745 };
746 
747 enum rte_flow_item_type iavf_pattern_eth_ipv6_ah[] = {
748 	RTE_FLOW_ITEM_TYPE_ETH,
749 	RTE_FLOW_ITEM_TYPE_IPV6,
750 	RTE_FLOW_ITEM_TYPE_AH,
751 	RTE_FLOW_ITEM_TYPE_END,
752 };
753 
754 /* L2TPV3 */
755 enum rte_flow_item_type iavf_pattern_eth_ipv4_l2tpv3[] = {
756 	RTE_FLOW_ITEM_TYPE_ETH,
757 	RTE_FLOW_ITEM_TYPE_IPV4,
758 	RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
759 	RTE_FLOW_ITEM_TYPE_END,
760 };
761 
762 enum rte_flow_item_type iavf_pattern_eth_ipv6_l2tpv3[] = {
763 	RTE_FLOW_ITEM_TYPE_ETH,
764 	RTE_FLOW_ITEM_TYPE_IPV6,
765 	RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
766 	RTE_FLOW_ITEM_TYPE_END,
767 };
768 
769 /* PFCP */
770 enum rte_flow_item_type iavf_pattern_eth_ipv4_pfcp[] = {
771 	RTE_FLOW_ITEM_TYPE_ETH,
772 	RTE_FLOW_ITEM_TYPE_IPV4,
773 	RTE_FLOW_ITEM_TYPE_UDP,
774 	RTE_FLOW_ITEM_TYPE_PFCP,
775 	RTE_FLOW_ITEM_TYPE_END,
776 };
777 
778 enum rte_flow_item_type iavf_pattern_eth_ipv6_pfcp[] = {
779 	RTE_FLOW_ITEM_TYPE_ETH,
780 	RTE_FLOW_ITEM_TYPE_IPV6,
781 	RTE_FLOW_ITEM_TYPE_UDP,
782 	RTE_FLOW_ITEM_TYPE_PFCP,
783 	RTE_FLOW_ITEM_TYPE_END,
784 };
785 
786 typedef struct iavf_flow_engine * (*parse_engine_t)(struct iavf_adapter *ad,
787 		struct rte_flow *flow,
788 		struct iavf_parser_list *parser_list,
789 		const struct rte_flow_item pattern[],
790 		const struct rte_flow_action actions[],
791 		struct rte_flow_error *error);
792 
793 void
iavf_register_flow_engine(struct iavf_flow_engine * engine)794 iavf_register_flow_engine(struct iavf_flow_engine *engine)
795 {
796 	TAILQ_INSERT_TAIL(&engine_list, engine, node);
797 }
798 
799 int
iavf_flow_init(struct iavf_adapter * ad)800 iavf_flow_init(struct iavf_adapter *ad)
801 {
802 	int ret;
803 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
804 	void *temp;
805 	struct iavf_flow_engine *engine;
806 
807 	TAILQ_INIT(&vf->flow_list);
808 	TAILQ_INIT(&vf->rss_parser_list);
809 	TAILQ_INIT(&vf->dist_parser_list);
810 	rte_spinlock_init(&vf->flow_ops_lock);
811 
812 	TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
813 		if (engine->init == NULL) {
814 			PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
815 				     engine->type);
816 			return -ENOTSUP;
817 		}
818 
819 		ret = engine->init(ad);
820 		if (ret && ret != -ENOTSUP) {
821 			PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
822 				     engine->type);
823 			return ret;
824 		}
825 	}
826 	return 0;
827 }
828 
829 void
iavf_flow_uninit(struct iavf_adapter * ad)830 iavf_flow_uninit(struct iavf_adapter *ad)
831 {
832 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
833 	struct iavf_flow_engine *engine;
834 	struct rte_flow *p_flow;
835 	struct iavf_flow_parser_node *p_parser;
836 	void *temp;
837 
838 	TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
839 		if (engine->uninit)
840 			engine->uninit(ad);
841 	}
842 
843 	/* Remove all flows */
844 	while ((p_flow = TAILQ_FIRST(&vf->flow_list))) {
845 		TAILQ_REMOVE(&vf->flow_list, p_flow, node);
846 		if (p_flow->engine->free)
847 			p_flow->engine->free(p_flow);
848 		rte_free(p_flow);
849 	}
850 
851 	/* Cleanup parser list */
852 	while ((p_parser = TAILQ_FIRST(&vf->rss_parser_list))) {
853 		TAILQ_REMOVE(&vf->rss_parser_list, p_parser, node);
854 		rte_free(p_parser);
855 	}
856 
857 	while ((p_parser = TAILQ_FIRST(&vf->dist_parser_list))) {
858 		TAILQ_REMOVE(&vf->dist_parser_list, p_parser, node);
859 		rte_free(p_parser);
860 	}
861 }
862 
863 int
iavf_register_parser(struct iavf_flow_parser * parser,struct iavf_adapter * ad)864 iavf_register_parser(struct iavf_flow_parser *parser,
865 		     struct iavf_adapter *ad)
866 {
867 	struct iavf_parser_list *list = NULL;
868 	struct iavf_flow_parser_node *parser_node;
869 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
870 
871 	parser_node = rte_zmalloc("iavf_parser", sizeof(*parser_node), 0);
872 	if (parser_node == NULL) {
873 		PMD_DRV_LOG(ERR, "Failed to allocate memory.");
874 		return -ENOMEM;
875 	}
876 	parser_node->parser = parser;
877 
878 	if (parser->engine->type == IAVF_FLOW_ENGINE_HASH) {
879 		list = &vf->rss_parser_list;
880 		TAILQ_INSERT_TAIL(list, parser_node, node);
881 	} else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
882 		list = &vf->dist_parser_list;
883 		TAILQ_INSERT_HEAD(list, parser_node, node);
884 	} else {
885 		return -EINVAL;
886 	}
887 
888 	return 0;
889 }
890 
891 void
iavf_unregister_parser(struct iavf_flow_parser * parser,struct iavf_adapter * ad)892 iavf_unregister_parser(struct iavf_flow_parser *parser,
893 		       struct iavf_adapter *ad)
894 {
895 	struct iavf_parser_list *list = NULL;
896 	struct iavf_flow_parser_node *p_parser;
897 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
898 	void *temp;
899 
900 	if (parser->engine->type == IAVF_FLOW_ENGINE_HASH)
901 		list = &vf->rss_parser_list;
902 	else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR)
903 		list = &vf->dist_parser_list;
904 
905 	if (list == NULL)
906 		return;
907 
908 	TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
909 		if (p_parser->parser->engine->type == parser->engine->type) {
910 			TAILQ_REMOVE(list, p_parser, node);
911 			rte_free(p_parser);
912 		}
913 	}
914 }
915 
916 static int
iavf_flow_valid_attr(const struct rte_flow_attr * attr,struct rte_flow_error * error)917 iavf_flow_valid_attr(const struct rte_flow_attr *attr,
918 		     struct rte_flow_error *error)
919 {
920 	/* Must be input direction */
921 	if (!attr->ingress) {
922 		rte_flow_error_set(error, EINVAL,
923 				RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
924 				attr, "Only support ingress.");
925 		return -rte_errno;
926 	}
927 
928 	/* Not supported */
929 	if (attr->egress) {
930 		rte_flow_error_set(error, EINVAL,
931 				RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
932 				attr, "Not support egress.");
933 		return -rte_errno;
934 	}
935 
936 	/* Not supported */
937 	if (attr->priority) {
938 		rte_flow_error_set(error, EINVAL,
939 				RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
940 				attr, "Not support priority.");
941 		return -rte_errno;
942 	}
943 
944 	/* Not supported */
945 	if (attr->group) {
946 		rte_flow_error_set(error, EINVAL,
947 				RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
948 				attr, "Not support group.");
949 		return -rte_errno;
950 	}
951 
952 	return 0;
953 }
954 
955 /* Find the first VOID or non-VOID item pointer */
956 static const struct rte_flow_item *
iavf_find_first_item(const struct rte_flow_item * item,bool is_void)957 iavf_find_first_item(const struct rte_flow_item *item, bool is_void)
958 {
959 	bool is_find;
960 
961 	while (item->type != RTE_FLOW_ITEM_TYPE_END) {
962 		if (is_void)
963 			is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
964 		else
965 			is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
966 		if (is_find)
967 			break;
968 		item++;
969 	}
970 	return item;
971 }
972 
973 /* Skip all VOID items of the pattern */
974 static void
iavf_pattern_skip_void_item(struct rte_flow_item * items,const struct rte_flow_item * pattern)975 iavf_pattern_skip_void_item(struct rte_flow_item *items,
976 			const struct rte_flow_item *pattern)
977 {
978 	uint32_t cpy_count = 0;
979 	const struct rte_flow_item *pb = pattern, *pe = pattern;
980 
981 	for (;;) {
982 		/* Find a non-void item first */
983 		pb = iavf_find_first_item(pb, false);
984 		if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
985 			pe = pb;
986 			break;
987 		}
988 
989 		/* Find a void item */
990 		pe = iavf_find_first_item(pb + 1, true);
991 
992 		cpy_count = pe - pb;
993 		rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
994 
995 		items += cpy_count;
996 
997 		if (pe->type == RTE_FLOW_ITEM_TYPE_END)
998 			break;
999 
1000 		pb = pe + 1;
1001 	}
1002 	/* Copy the END item. */
1003 	rte_memcpy(items, pe, sizeof(struct rte_flow_item));
1004 }
1005 
1006 /* Check if the pattern matches a supported item type array */
1007 static bool
iavf_match_pattern(enum rte_flow_item_type * item_array,const struct rte_flow_item * pattern)1008 iavf_match_pattern(enum rte_flow_item_type *item_array,
1009 		   const struct rte_flow_item *pattern)
1010 {
1011 	const struct rte_flow_item *item = pattern;
1012 
1013 	while ((*item_array == item->type) &&
1014 	       (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
1015 		item_array++;
1016 		item++;
1017 	}
1018 
1019 	return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
1020 		item->type == RTE_FLOW_ITEM_TYPE_END);
1021 }
1022 
1023 struct iavf_pattern_match_item *
iavf_search_pattern_match_item(const struct rte_flow_item pattern[],struct iavf_pattern_match_item * array,uint32_t array_len,struct rte_flow_error * error)1024 iavf_search_pattern_match_item(const struct rte_flow_item pattern[],
1025 		struct iavf_pattern_match_item *array,
1026 		uint32_t array_len,
1027 		struct rte_flow_error *error)
1028 {
1029 	uint16_t i = 0;
1030 	struct iavf_pattern_match_item *pattern_match_item;
1031 	/* need free by each filter */
1032 	struct rte_flow_item *items; /* used for pattern without VOID items */
1033 	uint32_t item_num = 0; /* non-void item number */
1034 
1035 	/* Get the non-void item number of pattern */
1036 	while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
1037 		if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
1038 			item_num++;
1039 		i++;
1040 	}
1041 	item_num++;
1042 
1043 	items = rte_zmalloc("iavf_pattern",
1044 			    item_num * sizeof(struct rte_flow_item), 0);
1045 	if (!items) {
1046 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1047 				   NULL, "No memory for PMD internal items.");
1048 		return NULL;
1049 	}
1050 	pattern_match_item = rte_zmalloc("iavf_pattern_match_item",
1051 				sizeof(struct iavf_pattern_match_item), 0);
1052 	if (!pattern_match_item) {
1053 		rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1054 				   NULL, "Failed to allocate memory.");
1055 		return NULL;
1056 	}
1057 
1058 	iavf_pattern_skip_void_item(items, pattern);
1059 
1060 	for (i = 0; i < array_len; i++)
1061 		if (iavf_match_pattern(array[i].pattern_list,
1062 				       items)) {
1063 			pattern_match_item->input_set_mask =
1064 				array[i].input_set_mask;
1065 			pattern_match_item->pattern_list =
1066 				array[i].pattern_list;
1067 			pattern_match_item->meta = array[i].meta;
1068 			rte_free(items);
1069 			return pattern_match_item;
1070 		}
1071 	rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1072 			   pattern, "Unsupported pattern");
1073 
1074 	rte_free(items);
1075 	rte_free(pattern_match_item);
1076 	return NULL;
1077 }
1078 
1079 static struct iavf_flow_engine *
iavf_parse_engine_create(struct iavf_adapter * ad,struct rte_flow * flow,struct iavf_parser_list * parser_list,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)1080 iavf_parse_engine_create(struct iavf_adapter *ad,
1081 		struct rte_flow *flow,
1082 		struct iavf_parser_list *parser_list,
1083 		const struct rte_flow_item pattern[],
1084 		const struct rte_flow_action actions[],
1085 		struct rte_flow_error *error)
1086 {
1087 	struct iavf_flow_engine *engine = NULL;
1088 	struct iavf_flow_parser_node *parser_node;
1089 	void *temp;
1090 	void *meta = NULL;
1091 
1092 	TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
1093 		if (parser_node->parser->parse_pattern_action(ad,
1094 				parser_node->parser->array,
1095 				parser_node->parser->array_len,
1096 				pattern, actions, &meta, error) < 0)
1097 			continue;
1098 
1099 		engine = parser_node->parser->engine;
1100 
1101 		RTE_ASSERT(engine->create != NULL);
1102 		if (!(engine->create(ad, flow, meta, error)))
1103 			return engine;
1104 	}
1105 	return NULL;
1106 }
1107 
1108 static struct iavf_flow_engine *
iavf_parse_engine_validate(struct iavf_adapter * ad,struct rte_flow * flow,struct iavf_parser_list * parser_list,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)1109 iavf_parse_engine_validate(struct iavf_adapter *ad,
1110 		struct rte_flow *flow,
1111 		struct iavf_parser_list *parser_list,
1112 		const struct rte_flow_item pattern[],
1113 		const struct rte_flow_action actions[],
1114 		struct rte_flow_error *error)
1115 {
1116 	struct iavf_flow_engine *engine = NULL;
1117 	struct iavf_flow_parser_node *parser_node;
1118 	void *temp;
1119 	void *meta = NULL;
1120 
1121 	TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
1122 		if (parser_node->parser->parse_pattern_action(ad,
1123 				parser_node->parser->array,
1124 				parser_node->parser->array_len,
1125 				pattern, actions, &meta,  error) < 0)
1126 			continue;
1127 
1128 		engine = parser_node->parser->engine;
1129 		if (engine->validation == NULL) {
1130 			rte_flow_error_set(error, EINVAL,
1131 				RTE_FLOW_ERROR_TYPE_HANDLE,
1132 				NULL, "Validation not support");
1133 			continue;
1134 		}
1135 
1136 		if (engine->validation(ad, flow, meta, error)) {
1137 			rte_flow_error_set(error, EINVAL,
1138 				RTE_FLOW_ERROR_TYPE_HANDLE,
1139 				NULL, "Validation failed");
1140 			break;
1141 		}
1142 	}
1143 	return engine;
1144 }
1145 
1146 
1147 static int
iavf_flow_process_filter(struct rte_eth_dev * dev,struct rte_flow * flow,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct iavf_flow_engine ** engine,parse_engine_t iavf_parse_engine,struct rte_flow_error * error)1148 iavf_flow_process_filter(struct rte_eth_dev *dev,
1149 		struct rte_flow *flow,
1150 		const struct rte_flow_attr *attr,
1151 		const struct rte_flow_item pattern[],
1152 		const struct rte_flow_action actions[],
1153 		struct iavf_flow_engine **engine,
1154 		parse_engine_t iavf_parse_engine,
1155 		struct rte_flow_error *error)
1156 {
1157 	int ret = IAVF_ERR_CONFIG;
1158 	struct iavf_adapter *ad =
1159 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1160 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1161 
1162 	if (!pattern) {
1163 		rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1164 				   NULL, "NULL pattern.");
1165 		return -rte_errno;
1166 	}
1167 
1168 	if (!actions) {
1169 		rte_flow_error_set(error, EINVAL,
1170 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1171 				   NULL, "NULL action.");
1172 		return -rte_errno;
1173 	}
1174 
1175 	if (!attr) {
1176 		rte_flow_error_set(error, EINVAL,
1177 				   RTE_FLOW_ERROR_TYPE_ATTR,
1178 				   NULL, "NULL attribute.");
1179 		return -rte_errno;
1180 	}
1181 
1182 	ret = iavf_flow_valid_attr(attr, error);
1183 	if (ret)
1184 		return ret;
1185 
1186 	*engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list, pattern,
1187 				    actions, error);
1188 	if (*engine)
1189 		return 0;
1190 
1191 	*engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
1192 				    actions, error);
1193 
1194 	if (!*engine) {
1195 		rte_flow_error_set(error, EINVAL,
1196 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1197 				   "Failed to create parser engine.");
1198 		return -rte_errno;
1199 	}
1200 
1201 	return 0;
1202 }
1203 
1204 static int
iavf_flow_validate(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)1205 iavf_flow_validate(struct rte_eth_dev *dev,
1206 		const struct rte_flow_attr *attr,
1207 		const struct rte_flow_item pattern[],
1208 		const struct rte_flow_action actions[],
1209 		struct rte_flow_error *error)
1210 {
1211 	struct iavf_flow_engine *engine;
1212 
1213 	return iavf_flow_process_filter(dev, NULL, attr, pattern, actions,
1214 			&engine, iavf_parse_engine_validate, error);
1215 }
1216 
1217 static struct rte_flow *
iavf_flow_create(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)1218 iavf_flow_create(struct rte_eth_dev *dev,
1219 		 const struct rte_flow_attr *attr,
1220 		 const struct rte_flow_item pattern[],
1221 		 const struct rte_flow_action actions[],
1222 		 struct rte_flow_error *error)
1223 {
1224 	struct iavf_adapter *ad =
1225 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1226 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1227 	struct iavf_flow_engine *engine = NULL;
1228 	struct rte_flow *flow = NULL;
1229 	int ret;
1230 
1231 	flow = rte_zmalloc("iavf_flow", sizeof(struct rte_flow), 0);
1232 	if (!flow) {
1233 		rte_flow_error_set(error, ENOMEM,
1234 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1235 				   "Failed to allocate memory");
1236 		return flow;
1237 	}
1238 
1239 	ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
1240 			&engine, iavf_parse_engine_create, error);
1241 	if (ret < 0) {
1242 		PMD_DRV_LOG(ERR, "Failed to create flow");
1243 		rte_free(flow);
1244 		flow = NULL;
1245 		goto free_flow;
1246 	}
1247 
1248 	flow->engine = engine;
1249 	TAILQ_INSERT_TAIL(&vf->flow_list, flow, node);
1250 	PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine->type);
1251 
1252 free_flow:
1253 	rte_spinlock_unlock(&vf->flow_ops_lock);
1254 	return flow;
1255 }
1256 
1257 static bool
iavf_flow_is_valid(struct rte_flow * flow)1258 iavf_flow_is_valid(struct rte_flow *flow)
1259 {
1260 	struct iavf_flow_engine *engine;
1261 	void *temp;
1262 
1263 	if (flow && flow->engine) {
1264 		TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
1265 			if (engine == flow->engine)
1266 				return true;
1267 		}
1268 	}
1269 
1270 	return false;
1271 }
1272 
1273 static int
iavf_flow_destroy(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_error * error)1274 iavf_flow_destroy(struct rte_eth_dev *dev,
1275 		  struct rte_flow *flow,
1276 		  struct rte_flow_error *error)
1277 {
1278 	struct iavf_adapter *ad =
1279 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1280 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1281 	int ret = 0;
1282 
1283 	if (!iavf_flow_is_valid(flow) || !flow->engine->destroy) {
1284 		rte_flow_error_set(error, EINVAL,
1285 				   RTE_FLOW_ERROR_TYPE_HANDLE,
1286 				   NULL, "Invalid flow destroy");
1287 		return -rte_errno;
1288 	}
1289 
1290 	rte_spinlock_lock(&vf->flow_ops_lock);
1291 
1292 	ret = flow->engine->destroy(ad, flow, error);
1293 
1294 	if (!ret) {
1295 		TAILQ_REMOVE(&vf->flow_list, flow, node);
1296 		rte_free(flow);
1297 	} else {
1298 		PMD_DRV_LOG(ERR, "Failed to destroy flow");
1299 	}
1300 
1301 	rte_spinlock_unlock(&vf->flow_ops_lock);
1302 
1303 	return ret;
1304 }
1305 
1306 int
iavf_flow_flush(struct rte_eth_dev * dev,struct rte_flow_error * error)1307 iavf_flow_flush(struct rte_eth_dev *dev,
1308 		struct rte_flow_error *error)
1309 {
1310 	struct iavf_adapter *ad =
1311 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1312 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
1313 	struct rte_flow *p_flow;
1314 	void *temp;
1315 	int ret = 0;
1316 
1317 	TAILQ_FOREACH_SAFE(p_flow, &vf->flow_list, node, temp) {
1318 		ret = iavf_flow_destroy(dev, p_flow, error);
1319 		if (ret) {
1320 			PMD_DRV_LOG(ERR, "Failed to flush flows");
1321 			return -EINVAL;
1322 		}
1323 	}
1324 
1325 	return ret;
1326 }
1327 
1328 static int
iavf_flow_query(struct rte_eth_dev * dev,struct rte_flow * flow,const struct rte_flow_action * actions,void * data,struct rte_flow_error * error)1329 iavf_flow_query(struct rte_eth_dev *dev,
1330 		struct rte_flow *flow,
1331 		const struct rte_flow_action *actions,
1332 		void *data,
1333 		struct rte_flow_error *error)
1334 {
1335 	int ret = -EINVAL;
1336 	struct iavf_adapter *ad =
1337 		IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1338 	struct rte_flow_query_count *count = data;
1339 
1340 	if (!iavf_flow_is_valid(flow) || !flow->engine->query_count) {
1341 		rte_flow_error_set(error, EINVAL,
1342 				   RTE_FLOW_ERROR_TYPE_HANDLE,
1343 				   NULL, "Invalid flow query");
1344 		return -rte_errno;
1345 	}
1346 
1347 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1348 		switch (actions->type) {
1349 		case RTE_FLOW_ACTION_TYPE_VOID:
1350 			break;
1351 		case RTE_FLOW_ACTION_TYPE_COUNT:
1352 			ret = flow->engine->query_count(ad, flow, count, error);
1353 			break;
1354 		default:
1355 			return rte_flow_error_set(error, ENOTSUP,
1356 					RTE_FLOW_ERROR_TYPE_ACTION,
1357 					actions,
1358 					"action not supported");
1359 		}
1360 	}
1361 	return ret;
1362 }
1363 
1364