xref: /f-stack/dpdk/drivers/net/iavf/iavf_fdir.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17 
18 #include "iavf.h"
19 #include "iavf_generic_flow.h"
20 #include "virtchnl.h"
21 #include "iavf_rxtx.h"
22 
23 #define IAVF_FDIR_MAX_QREGION_SIZE 128
24 
25 #define IAVF_FDIR_IPV6_TC_OFFSET 20
26 #define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
27 
28 #define IAVF_FDIR_INSET_ETH (\
29 	IAVF_INSET_ETHERTYPE)
30 
31 #define IAVF_FDIR_INSET_ETH_IPV4 (\
32 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
33 	IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
34 	IAVF_INSET_IPV4_TTL)
35 
36 #define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
37 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
38 	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
39 	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
40 
41 #define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
42 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
43 	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
44 	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
45 
46 #define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
47 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
48 	IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
49 	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
50 
51 #define IAVF_FDIR_INSET_ETH_IPV6 (\
52 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
53 	IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
54 	IAVF_INSET_IPV6_HOP_LIMIT)
55 
56 #define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
57 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
58 	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
59 	IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
60 
61 #define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
62 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
63 	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
64 	IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
65 
66 #define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
67 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
68 	IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
69 	IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
70 
71 #define IAVF_FDIR_INSET_IPV4_GTPU (\
72 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
73 	IAVF_INSET_GTPU_TEID)
74 
75 #define IAVF_FDIR_INSET_IPV4_GTPU_EH (\
76 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
77 	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
78 
79 #define IAVF_FDIR_INSET_IPV6_GTPU (\
80 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
81 	IAVF_INSET_GTPU_TEID)
82 
83 #define IAVF_FDIR_INSET_IPV6_GTPU_EH (\
84 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
85 	IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
86 
87 #define IAVF_FDIR_INSET_L2TPV3OIP (\
88 	IAVF_L2TPV3OIP_SESSION_ID)
89 
90 #define IAVF_FDIR_INSET_ESP (\
91 	IAVF_INSET_ESP_SPI)
92 
93 #define IAVF_FDIR_INSET_AH (\
94 	IAVF_INSET_AH_SPI)
95 
96 #define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
97 	IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
98 	IAVF_INSET_ESP_SPI)
99 
100 #define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
101 	IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
102 	IAVF_INSET_ESP_SPI)
103 
104 #define IAVF_FDIR_INSET_PFCP (\
105 	IAVF_INSET_PFCP_S_FIELD)
106 
107 static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
108 	{iavf_pattern_ethertype,		IAVF_FDIR_INSET_ETH,			IAVF_INSET_NONE},
109 	{iavf_pattern_eth_ipv4,			IAVF_FDIR_INSET_ETH_IPV4,		IAVF_INSET_NONE},
110 	{iavf_pattern_eth_ipv4_udp,		IAVF_FDIR_INSET_ETH_IPV4_UDP,		IAVF_INSET_NONE},
111 	{iavf_pattern_eth_ipv4_tcp,		IAVF_FDIR_INSET_ETH_IPV4_TCP,		IAVF_INSET_NONE},
112 	{iavf_pattern_eth_ipv4_sctp,		IAVF_FDIR_INSET_ETH_IPV4_SCTP,		IAVF_INSET_NONE},
113 	{iavf_pattern_eth_ipv6,			IAVF_FDIR_INSET_ETH_IPV6,		IAVF_INSET_NONE},
114 	{iavf_pattern_eth_ipv6_udp,		IAVF_FDIR_INSET_ETH_IPV6_UDP,		IAVF_INSET_NONE},
115 	{iavf_pattern_eth_ipv6_tcp,		IAVF_FDIR_INSET_ETH_IPV6_TCP,		IAVF_INSET_NONE},
116 	{iavf_pattern_eth_ipv6_sctp,		IAVF_FDIR_INSET_ETH_IPV6_SCTP,		IAVF_INSET_NONE},
117 	{iavf_pattern_eth_ipv4_gtpu,		IAVF_FDIR_INSET_IPV4_GTPU,		IAVF_INSET_NONE},
118 	{iavf_pattern_eth_ipv4_gtpu_eh,		IAVF_FDIR_INSET_IPV4_GTPU_EH,		IAVF_INSET_NONE},
119 	{iavf_pattern_eth_ipv6_gtpu,		IAVF_FDIR_INSET_IPV6_GTPU,		IAVF_INSET_NONE},
120 	{iavf_pattern_eth_ipv6_gtpu_eh,		IAVF_FDIR_INSET_IPV6_GTPU_EH,		IAVF_INSET_NONE},
121 	{iavf_pattern_eth_ipv4_l2tpv3,		IAVF_FDIR_INSET_L2TPV3OIP,		IAVF_INSET_NONE},
122 	{iavf_pattern_eth_ipv6_l2tpv3,		IAVF_FDIR_INSET_L2TPV3OIP,		IAVF_INSET_NONE},
123 	{iavf_pattern_eth_ipv4_esp,		IAVF_FDIR_INSET_ESP,			IAVF_INSET_NONE},
124 	{iavf_pattern_eth_ipv6_esp,		IAVF_FDIR_INSET_ESP,			IAVF_INSET_NONE},
125 	{iavf_pattern_eth_ipv4_ah,		IAVF_FDIR_INSET_AH,			IAVF_INSET_NONE},
126 	{iavf_pattern_eth_ipv6_ah,		IAVF_FDIR_INSET_AH,			IAVF_INSET_NONE},
127 	{iavf_pattern_eth_ipv4_udp_esp,		IAVF_FDIR_INSET_IPV4_NATT_ESP,		IAVF_INSET_NONE},
128 	{iavf_pattern_eth_ipv6_udp_esp,		IAVF_FDIR_INSET_IPV6_NATT_ESP,		IAVF_INSET_NONE},
129 	{iavf_pattern_eth_ipv4_pfcp,		IAVF_FDIR_INSET_PFCP,			IAVF_INSET_NONE},
130 	{iavf_pattern_eth_ipv6_pfcp,		IAVF_FDIR_INSET_PFCP,			IAVF_INSET_NONE},
131 };
132 
133 static struct iavf_flow_parser iavf_fdir_parser;
134 
135 static int
iavf_fdir_init(struct iavf_adapter * ad)136 iavf_fdir_init(struct iavf_adapter *ad)
137 {
138 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
139 	struct iavf_flow_parser *parser;
140 
141 	if (!vf->vf_res)
142 		return -EINVAL;
143 
144 	if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
145 		parser = &iavf_fdir_parser;
146 	else
147 		return -ENOTSUP;
148 
149 	return iavf_register_parser(parser, ad);
150 }
151 
152 static void
iavf_fdir_uninit(struct iavf_adapter * ad)153 iavf_fdir_uninit(struct iavf_adapter *ad)
154 {
155 	iavf_unregister_parser(&iavf_fdir_parser, ad);
156 }
157 
158 static int
iavf_fdir_create(struct iavf_adapter * ad,struct rte_flow * flow,void * meta,struct rte_flow_error * error)159 iavf_fdir_create(struct iavf_adapter *ad,
160 		struct rte_flow *flow,
161 		void *meta,
162 		struct rte_flow_error *error)
163 {
164 	struct iavf_fdir_conf *filter = meta;
165 	struct iavf_fdir_conf *rule;
166 	int ret;
167 
168 	rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
169 	if (!rule) {
170 		rte_flow_error_set(error, ENOMEM,
171 				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
172 				"Failed to allocate memory for fdir rule");
173 		return -rte_errno;
174 	}
175 
176 	ret = iavf_fdir_add(ad, filter);
177 	if (ret) {
178 		rte_flow_error_set(error, -ret,
179 				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
180 				"Failed to add filter rule.");
181 		goto free_entry;
182 	}
183 
184 	if (filter->mark_flag == 1)
185 		iavf_fdir_rx_proc_enable(ad, 1);
186 
187 	rte_memcpy(rule, filter, sizeof(*rule));
188 	flow->rule = rule;
189 
190 	return 0;
191 
192 free_entry:
193 	rte_free(rule);
194 	return -rte_errno;
195 }
196 
197 static int
iavf_fdir_destroy(struct iavf_adapter * ad,struct rte_flow * flow,struct rte_flow_error * error)198 iavf_fdir_destroy(struct iavf_adapter *ad,
199 		struct rte_flow *flow,
200 		struct rte_flow_error *error)
201 {
202 	struct iavf_fdir_conf *filter;
203 	int ret;
204 
205 	filter = (struct iavf_fdir_conf *)flow->rule;
206 
207 	ret = iavf_fdir_del(ad, filter);
208 	if (ret) {
209 		rte_flow_error_set(error, -ret,
210 				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
211 				"Failed to delete filter rule.");
212 		return -rte_errno;
213 	}
214 
215 	if (filter->mark_flag == 1)
216 		iavf_fdir_rx_proc_enable(ad, 0);
217 
218 	flow->rule = NULL;
219 	rte_free(filter);
220 
221 	return 0;
222 }
223 
224 static int
iavf_fdir_validation(struct iavf_adapter * ad,__rte_unused struct rte_flow * flow,void * meta,struct rte_flow_error * error)225 iavf_fdir_validation(struct iavf_adapter *ad,
226 		__rte_unused struct rte_flow *flow,
227 		void *meta,
228 		struct rte_flow_error *error)
229 {
230 	struct iavf_fdir_conf *filter = meta;
231 	int ret;
232 
233 	ret = iavf_fdir_check(ad, filter);
234 	if (ret) {
235 		rte_flow_error_set(error, -ret,
236 				RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
237 				"Failed to validate filter rule.");
238 		return -rte_errno;
239 	}
240 
241 	return 0;
242 };
243 
244 static struct iavf_flow_engine iavf_fdir_engine = {
245 	.init = iavf_fdir_init,
246 	.uninit = iavf_fdir_uninit,
247 	.create = iavf_fdir_create,
248 	.destroy = iavf_fdir_destroy,
249 	.validation = iavf_fdir_validation,
250 	.type = IAVF_FLOW_ENGINE_FDIR,
251 };
252 
253 static int
iavf_fdir_parse_action_qregion(struct iavf_adapter * ad,struct rte_flow_error * error,const struct rte_flow_action * act,struct virtchnl_filter_action * filter_action)254 iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
255 			struct rte_flow_error *error,
256 			const struct rte_flow_action *act,
257 			struct virtchnl_filter_action *filter_action)
258 {
259 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
260 	const struct rte_flow_action_rss *rss = act->conf;
261 	uint32_t i;
262 
263 	if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
264 		rte_flow_error_set(error, EINVAL,
265 				RTE_FLOW_ERROR_TYPE_ACTION, act,
266 				"Invalid action.");
267 		return -rte_errno;
268 	}
269 
270 	if (rss->queue_num <= 1) {
271 		rte_flow_error_set(error, EINVAL,
272 				RTE_FLOW_ERROR_TYPE_ACTION, act,
273 				"Queue region size can't be 0 or 1.");
274 		return -rte_errno;
275 	}
276 
277 	/* check if queue index for queue region is continuous */
278 	for (i = 0; i < rss->queue_num - 1; i++) {
279 		if (rss->queue[i + 1] != rss->queue[i] + 1) {
280 			rte_flow_error_set(error, EINVAL,
281 					RTE_FLOW_ERROR_TYPE_ACTION, act,
282 					"Discontinuous queue region");
283 			return -rte_errno;
284 		}
285 	}
286 
287 	if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
288 		rte_flow_error_set(error, EINVAL,
289 				RTE_FLOW_ERROR_TYPE_ACTION, act,
290 				"Invalid queue region indexes.");
291 		return -rte_errno;
292 	}
293 
294 	if (!(rte_is_power_of_2(rss->queue_num) &&
295 		rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE)) {
296 		rte_flow_error_set(error, EINVAL,
297 				RTE_FLOW_ERROR_TYPE_ACTION, act,
298 				"The region size should be any of the following values:"
299 				"1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
300 				"of queues do not exceed the VSI allocation.");
301 		return -rte_errno;
302 	}
303 
304 	if (rss->queue_num > vf->max_rss_qregion) {
305 		rte_flow_error_set(error, EINVAL,
306 				RTE_FLOW_ERROR_TYPE_ACTION, act,
307 				"The region size cannot be large than the supported max RSS queue region");
308 		return -rte_errno;
309 	}
310 
311 	filter_action->act_conf.queue.index = rss->queue[0];
312 	filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
313 
314 	return 0;
315 }
316 
317 static int
iavf_fdir_parse_action(struct iavf_adapter * ad,const struct rte_flow_action actions[],struct rte_flow_error * error,struct iavf_fdir_conf * filter)318 iavf_fdir_parse_action(struct iavf_adapter *ad,
319 			const struct rte_flow_action actions[],
320 			struct rte_flow_error *error,
321 			struct iavf_fdir_conf *filter)
322 {
323 	const struct rte_flow_action_queue *act_q;
324 	const struct rte_flow_action_mark *mark_spec = NULL;
325 	uint32_t dest_num = 0;
326 	uint32_t mark_num = 0;
327 	int ret;
328 
329 	int number = 0;
330 	struct virtchnl_filter_action *filter_action;
331 
332 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
333 		switch (actions->type) {
334 		case RTE_FLOW_ACTION_TYPE_VOID:
335 			break;
336 
337 		case RTE_FLOW_ACTION_TYPE_PASSTHRU:
338 			dest_num++;
339 
340 			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
341 
342 			filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
343 
344 			filter->add_fltr.rule_cfg.action_set.count = ++number;
345 			break;
346 
347 		case RTE_FLOW_ACTION_TYPE_DROP:
348 			dest_num++;
349 
350 			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
351 
352 			filter_action->type = VIRTCHNL_ACTION_DROP;
353 
354 			filter->add_fltr.rule_cfg.action_set.count = ++number;
355 			break;
356 
357 		case RTE_FLOW_ACTION_TYPE_QUEUE:
358 			dest_num++;
359 
360 			act_q = actions->conf;
361 			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
362 
363 			filter_action->type = VIRTCHNL_ACTION_QUEUE;
364 			filter_action->act_conf.queue.index = act_q->index;
365 
366 			if (filter_action->act_conf.queue.index >=
367 				ad->eth_dev->data->nb_rx_queues) {
368 				rte_flow_error_set(error, EINVAL,
369 					RTE_FLOW_ERROR_TYPE_ACTION,
370 					actions, "Invalid queue for FDIR.");
371 				return -rte_errno;
372 			}
373 
374 			filter->add_fltr.rule_cfg.action_set.count = ++number;
375 			break;
376 
377 		case RTE_FLOW_ACTION_TYPE_RSS:
378 			dest_num++;
379 
380 			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
381 
382 			filter_action->type = VIRTCHNL_ACTION_Q_REGION;
383 
384 			ret = iavf_fdir_parse_action_qregion(ad,
385 						error, actions, filter_action);
386 			if (ret)
387 				return ret;
388 
389 			filter->add_fltr.rule_cfg.action_set.count = ++number;
390 			break;
391 
392 		case RTE_FLOW_ACTION_TYPE_MARK:
393 			mark_num++;
394 
395 			filter->mark_flag = 1;
396 			mark_spec = actions->conf;
397 			filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
398 
399 			filter_action->type = VIRTCHNL_ACTION_MARK;
400 			filter_action->act_conf.mark_id = mark_spec->id;
401 
402 			filter->add_fltr.rule_cfg.action_set.count = ++number;
403 			break;
404 
405 		default:
406 			rte_flow_error_set(error, EINVAL,
407 					RTE_FLOW_ERROR_TYPE_ACTION, actions,
408 					"Invalid action.");
409 			return -rte_errno;
410 		}
411 	}
412 
413 	if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
414 		rte_flow_error_set(error, EINVAL,
415 			RTE_FLOW_ERROR_TYPE_ACTION, actions,
416 			"Action numbers exceed the maximum value");
417 		return -rte_errno;
418 	}
419 
420 	if (dest_num >= 2) {
421 		rte_flow_error_set(error, EINVAL,
422 			RTE_FLOW_ERROR_TYPE_ACTION, actions,
423 			"Unsupported action combination");
424 		return -rte_errno;
425 	}
426 
427 	if (mark_num >= 2) {
428 		rte_flow_error_set(error, EINVAL,
429 			RTE_FLOW_ERROR_TYPE_ACTION, actions,
430 			"Too many mark actions");
431 		return -rte_errno;
432 	}
433 
434 	if (dest_num + mark_num == 0) {
435 		rte_flow_error_set(error, EINVAL,
436 			RTE_FLOW_ERROR_TYPE_ACTION, actions,
437 			"Empty action");
438 		return -rte_errno;
439 	}
440 
441 	/* Mark only is equal to mark + passthru. */
442 	if (dest_num == 0) {
443 		filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
444 		filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
445 		filter->add_fltr.rule_cfg.action_set.count = ++number;
446 	}
447 
448 	return 0;
449 }
450 
451 static int
iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter * ad,const struct rte_flow_item pattern[],struct rte_flow_error * error,struct iavf_fdir_conf * filter)452 iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
453 			const struct rte_flow_item pattern[],
454 			struct rte_flow_error *error,
455 			struct iavf_fdir_conf *filter)
456 {
457 	const struct rte_flow_item *item = pattern;
458 	enum rte_flow_item_type item_type;
459 	enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
460 	const struct rte_flow_item_eth *eth_spec, *eth_mask;
461 	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
462 	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
463 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
464 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
465 	const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
466 	const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
467 	const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
468 	const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
469 	const struct rte_flow_item_esp *esp_spec, *esp_mask;
470 	const struct rte_flow_item_ah *ah_spec, *ah_mask;
471 	const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
472 	uint64_t input_set = IAVF_INSET_NONE;
473 
474 	enum rte_flow_item_type next_type;
475 	uint16_t ether_type;
476 
477 	int layer = 0;
478 	struct virtchnl_proto_hdr *hdr;
479 
480 	uint8_t  ipv6_addr_mask[16] = {
481 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
482 		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
483 	};
484 
485 	for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
486 		if (item->last) {
487 			rte_flow_error_set(error, EINVAL,
488 					RTE_FLOW_ERROR_TYPE_ITEM, item,
489 					"Not support range");
490 		}
491 
492 		item_type = item->type;
493 
494 		switch (item_type) {
495 		case RTE_FLOW_ITEM_TYPE_ETH:
496 			eth_spec = item->spec;
497 			eth_mask = item->mask;
498 			next_type = (item + 1)->type;
499 
500 			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
501 
502 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
503 
504 			if (next_type == RTE_FLOW_ITEM_TYPE_END &&
505 				(!eth_spec || !eth_mask)) {
506 				rte_flow_error_set(error, EINVAL,
507 						RTE_FLOW_ERROR_TYPE_ITEM,
508 						item, "NULL eth spec/mask.");
509 				return -rte_errno;
510 			}
511 
512 			if (eth_spec && eth_mask) {
513 				if (!rte_is_zero_ether_addr(&eth_mask->src) ||
514 				    !rte_is_zero_ether_addr(&eth_mask->dst)) {
515 					rte_flow_error_set(error, EINVAL,
516 						RTE_FLOW_ERROR_TYPE_ITEM, item,
517 						"Invalid MAC_addr mask.");
518 					return -rte_errno;
519 				}
520 			}
521 
522 			if (eth_spec && eth_mask && eth_mask->type) {
523 				if (eth_mask->type != RTE_BE16(0xffff)) {
524 					rte_flow_error_set(error, EINVAL,
525 						RTE_FLOW_ERROR_TYPE_ITEM,
526 						item, "Invalid type mask.");
527 					return -rte_errno;
528 				}
529 
530 				ether_type = rte_be_to_cpu_16(eth_spec->type);
531 				if (ether_type == RTE_ETHER_TYPE_IPV4 ||
532 					ether_type == RTE_ETHER_TYPE_IPV6) {
533 					rte_flow_error_set(error, EINVAL,
534 						RTE_FLOW_ERROR_TYPE_ITEM,
535 						item,
536 						"Unsupported ether_type.");
537 					return -rte_errno;
538 				}
539 
540 				input_set |= IAVF_INSET_ETHERTYPE;
541 				VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
542 
543 				rte_memcpy(hdr->buffer,
544 					eth_spec, sizeof(struct rte_ether_hdr));
545 			}
546 
547 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
548 			break;
549 
550 		case RTE_FLOW_ITEM_TYPE_IPV4:
551 			l3 = RTE_FLOW_ITEM_TYPE_IPV4;
552 			ipv4_spec = item->spec;
553 			ipv4_mask = item->mask;
554 
555 			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
556 
557 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
558 
559 			if (ipv4_spec && ipv4_mask) {
560 				if (ipv4_mask->hdr.version_ihl ||
561 					ipv4_mask->hdr.total_length ||
562 					ipv4_mask->hdr.packet_id ||
563 					ipv4_mask->hdr.fragment_offset ||
564 					ipv4_mask->hdr.hdr_checksum) {
565 					rte_flow_error_set(error, EINVAL,
566 						RTE_FLOW_ERROR_TYPE_ITEM,
567 						item, "Invalid IPv4 mask.");
568 					return -rte_errno;
569 				}
570 
571 				if (ipv4_mask->hdr.type_of_service ==
572 								UINT8_MAX) {
573 					input_set |= IAVF_INSET_IPV4_TOS;
574 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
575 				}
576 				if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
577 					input_set |= IAVF_INSET_IPV4_PROTO;
578 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
579 				}
580 				if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
581 					input_set |= IAVF_INSET_IPV4_TTL;
582 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
583 				}
584 				if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
585 					input_set |= IAVF_INSET_IPV4_SRC;
586 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
587 				}
588 				if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
589 					input_set |= IAVF_INSET_IPV4_DST;
590 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
591 				}
592 
593 				rte_memcpy(hdr->buffer,
594 					&ipv4_spec->hdr,
595 					sizeof(ipv4_spec->hdr));
596 			}
597 
598 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
599 			break;
600 
601 		case RTE_FLOW_ITEM_TYPE_IPV6:
602 			l3 = RTE_FLOW_ITEM_TYPE_IPV6;
603 			ipv6_spec = item->spec;
604 			ipv6_mask = item->mask;
605 
606 			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
607 
608 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
609 
610 			if (ipv6_spec && ipv6_mask) {
611 				if (ipv6_mask->hdr.payload_len) {
612 					rte_flow_error_set(error, EINVAL,
613 						RTE_FLOW_ERROR_TYPE_ITEM,
614 						item, "Invalid IPv6 mask");
615 					return -rte_errno;
616 				}
617 
618 				if ((ipv6_mask->hdr.vtc_flow &
619 					rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
620 					== rte_cpu_to_be_32(IAVF_IPV6_TC_MASK)) {
621 					input_set |= IAVF_INSET_IPV6_TC;
622 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
623 				}
624 				if (ipv6_mask->hdr.proto == UINT8_MAX) {
625 					input_set |= IAVF_INSET_IPV6_NEXT_HDR;
626 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
627 				}
628 				if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
629 					input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
630 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
631 				}
632 				if (!memcmp(ipv6_mask->hdr.src_addr,
633 					ipv6_addr_mask,
634 					RTE_DIM(ipv6_mask->hdr.src_addr))) {
635 					input_set |= IAVF_INSET_IPV6_SRC;
636 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
637 				}
638 				if (!memcmp(ipv6_mask->hdr.dst_addr,
639 					ipv6_addr_mask,
640 					RTE_DIM(ipv6_mask->hdr.dst_addr))) {
641 					input_set |= IAVF_INSET_IPV6_DST;
642 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
643 				}
644 
645 				rte_memcpy(hdr->buffer,
646 					&ipv6_spec->hdr,
647 					sizeof(ipv6_spec->hdr));
648 			}
649 
650 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
651 			break;
652 
653 		case RTE_FLOW_ITEM_TYPE_UDP:
654 			udp_spec = item->spec;
655 			udp_mask = item->mask;
656 
657 			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
658 
659 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
660 
661 			if (udp_spec && udp_mask) {
662 				if (udp_mask->hdr.dgram_len ||
663 					udp_mask->hdr.dgram_cksum) {
664 					rte_flow_error_set(error, EINVAL,
665 						RTE_FLOW_ERROR_TYPE_ITEM, item,
666 						"Invalid UDP mask");
667 					return -rte_errno;
668 				}
669 
670 				if (udp_mask->hdr.src_port == UINT16_MAX) {
671 					input_set |= IAVF_INSET_UDP_SRC_PORT;
672 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
673 				}
674 				if (udp_mask->hdr.dst_port == UINT16_MAX) {
675 					input_set |= IAVF_INSET_UDP_DST_PORT;
676 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
677 				}
678 
679 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
680 					rte_memcpy(hdr->buffer,
681 						&udp_spec->hdr,
682 						sizeof(udp_spec->hdr));
683 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
684 					rte_memcpy(hdr->buffer,
685 						&udp_spec->hdr,
686 						sizeof(udp_spec->hdr));
687 			}
688 
689 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
690 			break;
691 
692 		case RTE_FLOW_ITEM_TYPE_TCP:
693 			tcp_spec = item->spec;
694 			tcp_mask = item->mask;
695 
696 			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
697 
698 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
699 
700 			if (tcp_spec && tcp_mask) {
701 				if (tcp_mask->hdr.sent_seq ||
702 					tcp_mask->hdr.recv_ack ||
703 					tcp_mask->hdr.data_off ||
704 					tcp_mask->hdr.tcp_flags ||
705 					tcp_mask->hdr.rx_win ||
706 					tcp_mask->hdr.cksum ||
707 					tcp_mask->hdr.tcp_urp) {
708 					rte_flow_error_set(error, EINVAL,
709 						RTE_FLOW_ERROR_TYPE_ITEM, item,
710 						"Invalid TCP mask");
711 					return -rte_errno;
712 				}
713 
714 				if (tcp_mask->hdr.src_port == UINT16_MAX) {
715 					input_set |= IAVF_INSET_TCP_SRC_PORT;
716 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
717 				}
718 				if (tcp_mask->hdr.dst_port == UINT16_MAX) {
719 					input_set |= IAVF_INSET_TCP_DST_PORT;
720 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
721 				}
722 
723 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
724 					rte_memcpy(hdr->buffer,
725 						&tcp_spec->hdr,
726 						sizeof(tcp_spec->hdr));
727 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
728 					rte_memcpy(hdr->buffer,
729 						&tcp_spec->hdr,
730 						sizeof(tcp_spec->hdr));
731 			}
732 
733 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
734 			break;
735 
736 		case RTE_FLOW_ITEM_TYPE_SCTP:
737 			sctp_spec = item->spec;
738 			sctp_mask = item->mask;
739 
740 			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
741 
742 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
743 
744 			if (sctp_spec && sctp_mask) {
745 				if (sctp_mask->hdr.cksum) {
746 					rte_flow_error_set(error, EINVAL,
747 						RTE_FLOW_ERROR_TYPE_ITEM, item,
748 						"Invalid UDP mask");
749 					return -rte_errno;
750 				}
751 
752 				if (sctp_mask->hdr.src_port == UINT16_MAX) {
753 					input_set |= IAVF_INSET_SCTP_SRC_PORT;
754 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
755 				}
756 				if (sctp_mask->hdr.dst_port == UINT16_MAX) {
757 					input_set |= IAVF_INSET_SCTP_DST_PORT;
758 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
759 				}
760 
761 				if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
762 					rte_memcpy(hdr->buffer,
763 						&sctp_spec->hdr,
764 						sizeof(sctp_spec->hdr));
765 				else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
766 					rte_memcpy(hdr->buffer,
767 						&sctp_spec->hdr,
768 						sizeof(sctp_spec->hdr));
769 			}
770 
771 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
772 			break;
773 
774 		case RTE_FLOW_ITEM_TYPE_GTPU:
775 			gtp_spec = item->spec;
776 			gtp_mask = item->mask;
777 
778 			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
779 
780 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
781 
782 			if (gtp_spec && gtp_mask) {
783 				if (gtp_mask->v_pt_rsv_flags ||
784 					gtp_mask->msg_type ||
785 					gtp_mask->msg_len) {
786 					rte_flow_error_set(error, EINVAL,
787 						RTE_FLOW_ERROR_TYPE_ITEM,
788 						item, "Invalid GTP mask");
789 					return -rte_errno;
790 				}
791 
792 				if (gtp_mask->teid == UINT32_MAX) {
793 					input_set |= IAVF_INSET_GTPU_TEID;
794 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
795 				}
796 
797 				rte_memcpy(hdr->buffer,
798 					gtp_spec, sizeof(*gtp_spec));
799 			}
800 
801 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
802 			break;
803 
804 		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
805 			gtp_psc_spec = item->spec;
806 			gtp_psc_mask = item->mask;
807 
808 			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
809 
810 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
811 
812 			if (gtp_psc_spec && gtp_psc_mask) {
813 				if (gtp_psc_mask->qfi == UINT8_MAX) {
814 					input_set |= IAVF_INSET_GTPU_QFI;
815 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
816 				}
817 
818 				rte_memcpy(hdr->buffer, gtp_psc_spec,
819 					sizeof(*gtp_psc_spec));
820 			}
821 
822 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
823 			break;
824 
825 		case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
826 			l2tpv3oip_spec = item->spec;
827 			l2tpv3oip_mask = item->mask;
828 
829 			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
830 
831 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
832 
833 			if (l2tpv3oip_spec && l2tpv3oip_mask) {
834 				if (l2tpv3oip_mask->session_id == UINT32_MAX) {
835 					input_set |= IAVF_L2TPV3OIP_SESSION_ID;
836 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
837 				}
838 
839 				rte_memcpy(hdr->buffer, l2tpv3oip_spec,
840 					sizeof(*l2tpv3oip_spec));
841 			}
842 
843 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
844 			break;
845 
846 		case RTE_FLOW_ITEM_TYPE_ESP:
847 			esp_spec = item->spec;
848 			esp_mask = item->mask;
849 
850 			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
851 
852 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
853 
854 			if (esp_spec && esp_mask) {
855 				if (esp_mask->hdr.spi == UINT32_MAX) {
856 					input_set |= IAVF_INSET_ESP_SPI;
857 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
858 				}
859 
860 				rte_memcpy(hdr->buffer, &esp_spec->hdr,
861 					sizeof(esp_spec->hdr));
862 			}
863 
864 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
865 			break;
866 
867 		case RTE_FLOW_ITEM_TYPE_AH:
868 			ah_spec = item->spec;
869 			ah_mask = item->mask;
870 
871 			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
872 
873 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
874 
875 			if (ah_spec && ah_mask) {
876 				if (ah_mask->spi == UINT32_MAX) {
877 					input_set |= IAVF_INSET_AH_SPI;
878 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
879 				}
880 
881 				rte_memcpy(hdr->buffer, ah_spec,
882 					sizeof(*ah_spec));
883 			}
884 
885 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
886 			break;
887 
888 		case RTE_FLOW_ITEM_TYPE_PFCP:
889 			pfcp_spec = item->spec;
890 			pfcp_mask = item->mask;
891 
892 			hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
893 
894 			VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
895 
896 			if (pfcp_spec && pfcp_mask) {
897 				if (pfcp_mask->s_field == UINT8_MAX) {
898 					input_set |= IAVF_INSET_PFCP_S_FIELD;
899 					VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
900 				}
901 
902 				rte_memcpy(hdr->buffer, pfcp_spec,
903 					sizeof(*pfcp_spec));
904 			}
905 
906 			filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
907 			break;
908 
909 		case RTE_FLOW_ITEM_TYPE_VOID:
910 			break;
911 
912 		default:
913 			rte_flow_error_set(error, EINVAL,
914 					RTE_FLOW_ERROR_TYPE_ITEM, item,
915 					"Invalid pattern item.");
916 			return -rte_errno;
917 		}
918 	}
919 
920 	if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
921 		rte_flow_error_set(error, EINVAL,
922 			RTE_FLOW_ERROR_TYPE_ITEM, item,
923 			"Protocol header layers exceed the maximum value");
924 		return -rte_errno;
925 	}
926 
927 	filter->input_set = input_set;
928 
929 	return 0;
930 }
931 
932 static int
iavf_fdir_parse(struct iavf_adapter * ad,struct iavf_pattern_match_item * array,uint32_t array_len,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],void ** meta,struct rte_flow_error * error)933 iavf_fdir_parse(struct iavf_adapter *ad,
934 		struct iavf_pattern_match_item *array,
935 		uint32_t array_len,
936 		const struct rte_flow_item pattern[],
937 		const struct rte_flow_action actions[],
938 		void **meta,
939 		struct rte_flow_error *error)
940 {
941 	struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
942 	struct iavf_fdir_conf *filter = &vf->fdir.conf;
943 	struct iavf_pattern_match_item *item = NULL;
944 	uint64_t input_set;
945 	int ret;
946 
947 	memset(filter, 0, sizeof(*filter));
948 
949 	item = iavf_search_pattern_match_item(pattern, array, array_len, error);
950 	if (!item)
951 		return -rte_errno;
952 
953 	ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
954 	if (ret)
955 		goto error;
956 
957 	input_set = filter->input_set;
958 	if (!input_set || input_set & ~item->input_set_mask) {
959 		rte_flow_error_set(error, EINVAL,
960 				RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
961 				"Invalid input set");
962 		ret = -rte_errno;
963 		goto error;
964 	}
965 
966 	ret = iavf_fdir_parse_action(ad, actions, error, filter);
967 	if (ret)
968 		goto error;
969 
970 	if (meta)
971 		*meta = filter;
972 
973 error:
974 	rte_free(item);
975 	return ret;
976 }
977 
978 static struct iavf_flow_parser iavf_fdir_parser = {
979 	.engine = &iavf_fdir_engine,
980 	.array = iavf_fdir_pattern,
981 	.array_len = RTE_DIM(iavf_fdir_pattern),
982 	.parse_pattern_action = iavf_fdir_parse,
983 	.stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
984 };
985 
RTE_INIT(iavf_fdir_engine_register)986 RTE_INIT(iavf_fdir_engine_register)
987 {
988 	iavf_register_flow_engine(&iavf_fdir_engine);
989 }
990