1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4 
5 #include "otx2_ethdev.h"
6 #include "otx2_flow.h"
7 
8 const struct rte_flow_item *
otx2_flow_skip_void_and_any_items(const struct rte_flow_item * pattern)9 otx2_flow_skip_void_and_any_items(const struct rte_flow_item *pattern)
10 {
11 	while ((pattern->type == RTE_FLOW_ITEM_TYPE_VOID) ||
12 	       (pattern->type == RTE_FLOW_ITEM_TYPE_ANY))
13 		pattern++;
14 
15 	return pattern;
16 }
17 
18 /*
19  * Tunnel+ESP, Tunnel+ICMP4/6, Tunnel+TCP, Tunnel+UDP,
20  * Tunnel+SCTP
21  */
22 int
otx2_flow_parse_lh(struct otx2_parse_state * pst)23 otx2_flow_parse_lh(struct otx2_parse_state *pst)
24 {
25 	struct otx2_flow_item_info info;
26 	char hw_mask[64];
27 	int lid, lt;
28 	int rc;
29 
30 	if (!pst->tunnel)
31 		return 0;
32 
33 	info.hw_mask = &hw_mask;
34 	info.spec = NULL;
35 	info.mask = NULL;
36 	info.hw_hdr_len = 0;
37 	lid = NPC_LID_LH;
38 
39 	switch (pst->pattern->type) {
40 	case RTE_FLOW_ITEM_TYPE_UDP:
41 		lt = NPC_LT_LH_TU_UDP;
42 		info.def_mask = &rte_flow_item_udp_mask;
43 		info.len = sizeof(struct rte_flow_item_udp);
44 		break;
45 	case RTE_FLOW_ITEM_TYPE_TCP:
46 		lt = NPC_LT_LH_TU_TCP;
47 		info.def_mask = &rte_flow_item_tcp_mask;
48 		info.len = sizeof(struct rte_flow_item_tcp);
49 		break;
50 	case RTE_FLOW_ITEM_TYPE_SCTP:
51 		lt = NPC_LT_LH_TU_SCTP;
52 		info.def_mask = &rte_flow_item_sctp_mask;
53 		info.len = sizeof(struct rte_flow_item_sctp);
54 		break;
55 	case RTE_FLOW_ITEM_TYPE_ESP:
56 		lt = NPC_LT_LH_TU_ESP;
57 		info.def_mask = &rte_flow_item_esp_mask;
58 		info.len = sizeof(struct rte_flow_item_esp);
59 		break;
60 	default:
61 		return 0;
62 	}
63 
64 	otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
65 	rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
66 	if (rc != 0)
67 		return rc;
68 
69 	return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
70 }
71 
72 /* Tunnel+IPv4, Tunnel+IPv6 */
73 int
otx2_flow_parse_lg(struct otx2_parse_state * pst)74 otx2_flow_parse_lg(struct otx2_parse_state *pst)
75 {
76 	struct otx2_flow_item_info info;
77 	char hw_mask[64];
78 	int lid, lt;
79 	int rc;
80 
81 	if (!pst->tunnel)
82 		return 0;
83 
84 	info.hw_mask = &hw_mask;
85 	info.spec = NULL;
86 	info.mask = NULL;
87 	info.hw_hdr_len = 0;
88 	lid = NPC_LID_LG;
89 
90 	if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
91 		lt = NPC_LT_LG_TU_IP;
92 		info.def_mask = &rte_flow_item_ipv4_mask;
93 		info.len = sizeof(struct rte_flow_item_ipv4);
94 	} else if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_IPV6) {
95 		lt = NPC_LT_LG_TU_IP6;
96 		info.def_mask = &rte_flow_item_ipv6_mask;
97 		info.len = sizeof(struct rte_flow_item_ipv6);
98 	} else {
99 		/* There is no tunneled IP header */
100 		return 0;
101 	}
102 
103 	otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
104 	rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
105 	if (rc != 0)
106 		return rc;
107 
108 	return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
109 }
110 
111 /* Tunnel+Ether */
112 int
otx2_flow_parse_lf(struct otx2_parse_state * pst)113 otx2_flow_parse_lf(struct otx2_parse_state *pst)
114 {
115 	const struct rte_flow_item *pattern, *last_pattern;
116 	struct rte_flow_item_eth hw_mask;
117 	struct otx2_flow_item_info info;
118 	int lid, lt, lflags;
119 	int nr_vlans = 0;
120 	int rc;
121 
122 	/* We hit this layer if there is a tunneling protocol */
123 	if (!pst->tunnel)
124 		return 0;
125 
126 	if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_ETH)
127 		return 0;
128 
129 	lid = NPC_LID_LF;
130 	lt = NPC_LT_LF_TU_ETHER;
131 	lflags = 0;
132 
133 	info.def_mask = &rte_flow_item_vlan_mask;
134 	/* No match support for vlan tags */
135 	info.hw_mask = NULL;
136 	info.len = sizeof(struct rte_flow_item_vlan);
137 	info.spec = NULL;
138 	info.mask = NULL;
139 	info.hw_hdr_len = 0;
140 
141 	/* Look ahead and find out any VLAN tags. These can be
142 	 * detected but no data matching is available.
143 	 */
144 	last_pattern = pst->pattern;
145 	pattern = pst->pattern + 1;
146 	pattern = otx2_flow_skip_void_and_any_items(pattern);
147 	while (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
148 		nr_vlans++;
149 		rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
150 		if (rc != 0)
151 			return rc;
152 		last_pattern = pattern;
153 		pattern++;
154 		pattern = otx2_flow_skip_void_and_any_items(pattern);
155 	}
156 	otx2_npc_dbg("Nr_vlans = %d", nr_vlans);
157 	switch (nr_vlans) {
158 	case 0:
159 		break;
160 	case 1:
161 		lflags = NPC_F_TU_ETHER_CTAG;
162 		break;
163 	case 2:
164 		lflags = NPC_F_TU_ETHER_STAG_CTAG;
165 		break;
166 	default:
167 		rte_flow_error_set(pst->error, ENOTSUP,
168 				   RTE_FLOW_ERROR_TYPE_ITEM,
169 				   last_pattern,
170 				   "more than 2 vlans with tunneled Ethernet "
171 				   "not supported");
172 		return -rte_errno;
173 	}
174 
175 	info.def_mask = &rte_flow_item_eth_mask;
176 	info.hw_mask = &hw_mask;
177 	info.len = sizeof(struct rte_flow_item_eth);
178 	info.hw_hdr_len = 0;
179 	otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
180 	info.spec = NULL;
181 	info.mask = NULL;
182 
183 	rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
184 	if (rc != 0)
185 		return rc;
186 
187 	pst->pattern = last_pattern;
188 
189 	return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
190 }
191 
192 int
otx2_flow_parse_le(struct otx2_parse_state * pst)193 otx2_flow_parse_le(struct otx2_parse_state *pst)
194 {
195 	/*
196 	 * We are positioned at UDP. Scan ahead and look for
197 	 * UDP encapsulated tunnel protocols. If available,
198 	 * parse them. In that case handle this:
199 	 *	- RTE spec assumes we point to tunnel header.
200 	 *	- NPC parser provides offset from UDP header.
201 	 */
202 
203 	/*
204 	 * Note: Add support to GENEVE, VXLAN_GPE when we
205 	 * upgrade DPDK
206 	 *
207 	 * Note: Better to split flags into two nibbles:
208 	 *	- Higher nibble can have flags
209 	 *	- Lower nibble to further enumerate protocols
210 	 *	  and have flags based extraction
211 	 */
212 	const struct rte_flow_item *pattern = pst->pattern;
213 	struct otx2_flow_item_info info;
214 	int lid, lt, lflags;
215 	char hw_mask[64];
216 	int rc;
217 
218 	if (pst->tunnel)
219 		return 0;
220 
221 	if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
222 		return otx2_flow_parse_mpls(pst, NPC_LID_LE);
223 
224 	info.spec = NULL;
225 	info.mask = NULL;
226 	info.hw_mask = NULL;
227 	info.def_mask = NULL;
228 	info.len = 0;
229 	info.hw_hdr_len = 0;
230 	lid = NPC_LID_LE;
231 	lflags = 0;
232 
233 	/* Ensure we are not matching anything in UDP */
234 	rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
235 	if (rc)
236 		return rc;
237 
238 	info.hw_mask = &hw_mask;
239 	pattern = otx2_flow_skip_void_and_any_items(pattern);
240 	otx2_npc_dbg("Pattern->type = %d", pattern->type);
241 	switch (pattern->type) {
242 	case RTE_FLOW_ITEM_TYPE_VXLAN:
243 		lflags = NPC_F_UDP_VXLAN;
244 		info.def_mask = &rte_flow_item_vxlan_mask;
245 		info.len = sizeof(struct rte_flow_item_vxlan);
246 		lt = NPC_LT_LE_VXLAN;
247 		break;
248 	case RTE_FLOW_ITEM_TYPE_ESP:
249 		lt = NPC_LT_LE_ESP;
250 		info.def_mask = &rte_flow_item_esp_mask;
251 		info.len = sizeof(struct rte_flow_item_esp);
252 		break;
253 	case RTE_FLOW_ITEM_TYPE_GTPC:
254 		lflags = NPC_F_UDP_GTP_GTPC;
255 		info.def_mask = &rte_flow_item_gtp_mask;
256 		info.len = sizeof(struct rte_flow_item_gtp);
257 		lt = NPC_LT_LE_GTPC;
258 		break;
259 	case RTE_FLOW_ITEM_TYPE_GTPU:
260 		lflags = NPC_F_UDP_GTP_GTPU_G_PDU;
261 		info.def_mask = &rte_flow_item_gtp_mask;
262 		info.len = sizeof(struct rte_flow_item_gtp);
263 		lt = NPC_LT_LE_GTPU;
264 		break;
265 	case RTE_FLOW_ITEM_TYPE_GENEVE:
266 		lflags = NPC_F_UDP_GENEVE;
267 		info.def_mask = &rte_flow_item_geneve_mask;
268 		info.len = sizeof(struct rte_flow_item_geneve);
269 		lt = NPC_LT_LE_GENEVE;
270 		break;
271 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
272 		lflags = NPC_F_UDP_VXLANGPE;
273 		info.def_mask = &rte_flow_item_vxlan_gpe_mask;
274 		info.len = sizeof(struct rte_flow_item_vxlan_gpe);
275 		lt = NPC_LT_LE_VXLANGPE;
276 		break;
277 	default:
278 		return 0;
279 	}
280 
281 	pst->tunnel = 1;
282 
283 	otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
284 	rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
285 	if (rc != 0)
286 		return rc;
287 
288 	return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
289 }
290 
291 static int
flow_parse_mpls_label_stack(struct otx2_parse_state * pst,int * flag)292 flow_parse_mpls_label_stack(struct otx2_parse_state *pst, int *flag)
293 {
294 	int nr_labels = 0;
295 	const struct rte_flow_item *pattern = pst->pattern;
296 	struct otx2_flow_item_info info;
297 	int rc;
298 	uint8_t flag_list[] = {0, NPC_F_MPLS_2_LABELS,
299 		NPC_F_MPLS_3_LABELS, NPC_F_MPLS_4_LABELS};
300 
301 	/*
302 	 * pst->pattern points to first MPLS label. We only check
303 	 * that subsequent labels do not have anything to match.
304 	 */
305 	info.def_mask = &rte_flow_item_mpls_mask;
306 	info.hw_mask = NULL;
307 	info.len = sizeof(struct rte_flow_item_mpls);
308 	info.spec = NULL;
309 	info.mask = NULL;
310 	info.hw_hdr_len = 0;
311 
312 	while (pattern->type == RTE_FLOW_ITEM_TYPE_MPLS) {
313 		nr_labels++;
314 
315 		/* Basic validation of 2nd/3rd/4th mpls item */
316 		if (nr_labels > 1) {
317 			rc = otx2_flow_parse_item_basic(pattern, &info,
318 							pst->error);
319 			if (rc != 0)
320 				return rc;
321 		}
322 		pst->last_pattern = pattern;
323 		pattern++;
324 		pattern = otx2_flow_skip_void_and_any_items(pattern);
325 	}
326 
327 	if (nr_labels > 4) {
328 		rte_flow_error_set(pst->error, ENOTSUP,
329 				   RTE_FLOW_ERROR_TYPE_ITEM,
330 				   pst->last_pattern,
331 				   "more than 4 mpls labels not supported");
332 		return -rte_errno;
333 	}
334 
335 	*flag = flag_list[nr_labels - 1];
336 	return 0;
337 }
338 
339 int
otx2_flow_parse_mpls(struct otx2_parse_state * pst,int lid)340 otx2_flow_parse_mpls(struct otx2_parse_state *pst, int lid)
341 {
342 	/* Find number of MPLS labels */
343 	struct rte_flow_item_mpls hw_mask;
344 	struct otx2_flow_item_info info;
345 	int lt, lflags;
346 	int rc;
347 
348 	lflags = 0;
349 
350 	if (lid == NPC_LID_LC)
351 		lt = NPC_LT_LC_MPLS;
352 	else if (lid == NPC_LID_LD)
353 		lt = NPC_LT_LD_TU_MPLS_IN_IP;
354 	else
355 		lt = NPC_LT_LE_TU_MPLS_IN_UDP;
356 
357 	/* Prepare for parsing the first item */
358 	info.def_mask = &rte_flow_item_mpls_mask;
359 	info.hw_mask = &hw_mask;
360 	info.len = sizeof(struct rte_flow_item_mpls);
361 	info.spec = NULL;
362 	info.mask = NULL;
363 	info.hw_hdr_len = 0;
364 
365 	otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
366 	rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
367 	if (rc != 0)
368 		return rc;
369 
370 	/*
371 	 * Parse for more labels.
372 	 * This sets lflags and pst->last_pattern correctly.
373 	 */
374 	rc = flow_parse_mpls_label_stack(pst, &lflags);
375 	if (rc != 0)
376 		return rc;
377 
378 	pst->tunnel = 1;
379 	pst->pattern = pst->last_pattern;
380 
381 	return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
382 }
383 
384 /*
385  * ICMP, ICMP6, UDP, TCP, SCTP, VXLAN, GRE, NVGRE,
386  * GTP, GTPC, GTPU, ESP
387  *
388  * Note: UDP tunnel protocols are identified by flags.
389  *       LPTR for these protocol still points to UDP
390  *       header. Need flag based extraction to support
391  *       this.
392  */
393 int
otx2_flow_parse_ld(struct otx2_parse_state * pst)394 otx2_flow_parse_ld(struct otx2_parse_state *pst)
395 {
396 	char hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
397 	uint32_t gre_key_mask = 0xffffffff;
398 	struct otx2_flow_item_info info;
399 	int lid, lt, lflags;
400 	int rc;
401 
402 	if (pst->tunnel) {
403 		/* We have already parsed MPLS or IPv4/v6 followed
404 		 * by MPLS or IPv4/v6. Subsequent TCP/UDP etc
405 		 * would be parsed as tunneled versions. Skip
406 		 * this layer, except for tunneled MPLS. If LC is
407 		 * MPLS, we have anyway skipped all stacked MPLS
408 		 * labels.
409 		 */
410 		if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
411 			return otx2_flow_parse_mpls(pst, NPC_LID_LD);
412 		return 0;
413 	}
414 	info.hw_mask = &hw_mask;
415 	info.spec = NULL;
416 	info.mask = NULL;
417 	info.def_mask = NULL;
418 	info.len = 0;
419 	info.hw_hdr_len = 0;
420 
421 	lid = NPC_LID_LD;
422 	lflags = 0;
423 
424 	otx2_npc_dbg("Pst->pattern->type = %d", pst->pattern->type);
425 	switch (pst->pattern->type) {
426 	case RTE_FLOW_ITEM_TYPE_ICMP:
427 		if (pst->lt[NPC_LID_LC] == NPC_LT_LC_IP6)
428 			lt = NPC_LT_LD_ICMP6;
429 		else
430 			lt = NPC_LT_LD_ICMP;
431 		info.def_mask = &rte_flow_item_icmp_mask;
432 		info.len = sizeof(struct rte_flow_item_icmp);
433 		break;
434 	case RTE_FLOW_ITEM_TYPE_UDP:
435 		lt = NPC_LT_LD_UDP;
436 		info.def_mask = &rte_flow_item_udp_mask;
437 		info.len = sizeof(struct rte_flow_item_udp);
438 		break;
439 	case RTE_FLOW_ITEM_TYPE_TCP:
440 		lt = NPC_LT_LD_TCP;
441 		info.def_mask = &rte_flow_item_tcp_mask;
442 		info.len = sizeof(struct rte_flow_item_tcp);
443 		break;
444 	case RTE_FLOW_ITEM_TYPE_SCTP:
445 		lt = NPC_LT_LD_SCTP;
446 		info.def_mask = &rte_flow_item_sctp_mask;
447 		info.len = sizeof(struct rte_flow_item_sctp);
448 		break;
449 	case RTE_FLOW_ITEM_TYPE_GRE:
450 		lt = NPC_LT_LD_GRE;
451 		info.def_mask = &rte_flow_item_gre_mask;
452 		info.len = sizeof(struct rte_flow_item_gre);
453 		break;
454 	case RTE_FLOW_ITEM_TYPE_GRE_KEY:
455 		lt = NPC_LT_LD_GRE;
456 		info.def_mask = &gre_key_mask;
457 		info.len = sizeof(gre_key_mask);
458 		info.hw_hdr_len = 4;
459 		break;
460 	case RTE_FLOW_ITEM_TYPE_NVGRE:
461 		lt = NPC_LT_LD_NVGRE;
462 		lflags = NPC_F_GRE_NVGRE;
463 		info.def_mask = &rte_flow_item_nvgre_mask;
464 		info.len = sizeof(struct rte_flow_item_nvgre);
465 		/* Further IP/Ethernet are parsed as tunneled */
466 		pst->tunnel = 1;
467 		break;
468 	default:
469 		return 0;
470 	}
471 
472 	otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
473 	rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
474 	if (rc != 0)
475 		return rc;
476 
477 	return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
478 }
479 
480 static inline void
flow_check_lc_ip_tunnel(struct otx2_parse_state * pst)481 flow_check_lc_ip_tunnel(struct otx2_parse_state *pst)
482 {
483 	const struct rte_flow_item *pattern = pst->pattern + 1;
484 
485 	pattern = otx2_flow_skip_void_and_any_items(pattern);
486 	if (pattern->type == RTE_FLOW_ITEM_TYPE_MPLS ||
487 	    pattern->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
488 	    pattern->type == RTE_FLOW_ITEM_TYPE_IPV6)
489 		pst->tunnel = 1;
490 }
491 
492 /* Outer IPv4, Outer IPv6, MPLS, ARP */
493 int
otx2_flow_parse_lc(struct otx2_parse_state * pst)494 otx2_flow_parse_lc(struct otx2_parse_state *pst)
495 {
496 	uint8_t hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
497 	struct otx2_flow_item_info info;
498 	int lid, lt;
499 	int rc;
500 
501 	if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
502 		return otx2_flow_parse_mpls(pst, NPC_LID_LC);
503 
504 	info.hw_mask = &hw_mask;
505 	info.spec = NULL;
506 	info.mask = NULL;
507 	info.hw_hdr_len = 0;
508 	lid = NPC_LID_LC;
509 
510 	switch (pst->pattern->type) {
511 	case RTE_FLOW_ITEM_TYPE_IPV4:
512 		lt = NPC_LT_LC_IP;
513 		info.def_mask = &rte_flow_item_ipv4_mask;
514 		info.len = sizeof(struct rte_flow_item_ipv4);
515 		break;
516 	case RTE_FLOW_ITEM_TYPE_IPV6:
517 		lid = NPC_LID_LC;
518 		lt = NPC_LT_LC_IP6;
519 		info.def_mask = &rte_flow_item_ipv6_mask;
520 		info.len = sizeof(struct rte_flow_item_ipv6);
521 		break;
522 	case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4:
523 		lt = NPC_LT_LC_ARP;
524 		info.def_mask = &rte_flow_item_arp_eth_ipv4_mask;
525 		info.len = sizeof(struct rte_flow_item_arp_eth_ipv4);
526 		break;
527 	case RTE_FLOW_ITEM_TYPE_IPV6_EXT:
528 		lid = NPC_LID_LC;
529 		lt = NPC_LT_LC_IP6_EXT;
530 		info.def_mask = &rte_flow_item_ipv6_ext_mask;
531 		info.len = sizeof(struct rte_flow_item_ipv6_ext);
532 		info.hw_hdr_len = 40;
533 		break;
534 	default:
535 		/* No match at this layer */
536 		return 0;
537 	}
538 
539 	/* Identify if IP tunnels MPLS or IPv4/v6 */
540 	flow_check_lc_ip_tunnel(pst);
541 
542 	otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
543 	rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
544 	if (rc != 0)
545 		return rc;
546 
547 	return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
548 }
549 
550 /* VLAN, ETAG */
551 int
otx2_flow_parse_lb(struct otx2_parse_state * pst)552 otx2_flow_parse_lb(struct otx2_parse_state *pst)
553 {
554 	const struct rte_flow_item *pattern = pst->pattern;
555 	const struct rte_flow_item *last_pattern;
556 	char hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
557 	struct otx2_flow_item_info info;
558 	int lid, lt, lflags;
559 	int nr_vlans = 0;
560 	int rc;
561 
562 	info.spec = NULL;
563 	info.mask = NULL;
564 	info.hw_hdr_len = NPC_TPID_LENGTH;
565 
566 	lid = NPC_LID_LB;
567 	lflags = 0;
568 	last_pattern = pattern;
569 
570 	if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
571 		/* RTE vlan is either 802.1q or 802.1ad,
572 		 * this maps to either CTAG/STAG. We need to decide
573 		 * based on number of VLANS present. Matching is
574 		 * supported on first tag only.
575 		 */
576 		info.def_mask = &rte_flow_item_vlan_mask;
577 		info.hw_mask = NULL;
578 		info.len = sizeof(struct rte_flow_item_vlan);
579 
580 		pattern = pst->pattern;
581 		while (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
582 			nr_vlans++;
583 
584 			/* Basic validation of 2nd/3rd vlan item */
585 			if (nr_vlans > 1) {
586 				otx2_npc_dbg("Vlans  = %d", nr_vlans);
587 				rc = otx2_flow_parse_item_basic(pattern, &info,
588 								pst->error);
589 				if (rc != 0)
590 					return rc;
591 			}
592 			last_pattern = pattern;
593 			pattern++;
594 			pattern = otx2_flow_skip_void_and_any_items(pattern);
595 		}
596 
597 		switch (nr_vlans) {
598 		case 1:
599 			lt = NPC_LT_LB_CTAG;
600 			break;
601 		case 2:
602 			lt = NPC_LT_LB_STAG_QINQ;
603 			lflags = NPC_F_STAG_CTAG;
604 			break;
605 		case 3:
606 			lt = NPC_LT_LB_STAG_QINQ;
607 			lflags = NPC_F_STAG_STAG_CTAG;
608 			break;
609 		default:
610 			rte_flow_error_set(pst->error, ENOTSUP,
611 					   RTE_FLOW_ERROR_TYPE_ITEM,
612 					   last_pattern,
613 					   "more than 3 vlans not supported");
614 			return -rte_errno;
615 		}
616 	} else if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_E_TAG) {
617 		/* we can support ETAG and match a subsequent CTAG
618 		 * without any matching support.
619 		 */
620 		lt = NPC_LT_LB_ETAG;
621 		lflags = 0;
622 
623 		last_pattern = pst->pattern;
624 		pattern = otx2_flow_skip_void_and_any_items(pst->pattern + 1);
625 		if (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
626 			info.def_mask = &rte_flow_item_vlan_mask;
627 			/* set supported mask to NULL for vlan tag */
628 			info.hw_mask = NULL;
629 			info.len = sizeof(struct rte_flow_item_vlan);
630 			rc = otx2_flow_parse_item_basic(pattern, &info,
631 							pst->error);
632 			if (rc != 0)
633 				return rc;
634 
635 			lflags = NPC_F_ETAG_CTAG;
636 			last_pattern = pattern;
637 		}
638 
639 		info.def_mask = &rte_flow_item_e_tag_mask;
640 		info.len = sizeof(struct rte_flow_item_e_tag);
641 	} else {
642 		return 0;
643 	}
644 
645 	info.hw_mask = &hw_mask;
646 	info.spec = NULL;
647 	info.mask = NULL;
648 	otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
649 
650 	rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
651 	if (rc != 0)
652 		return rc;
653 
654 	/* Point pattern to last item consumed */
655 	pst->pattern = last_pattern;
656 	return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
657 }
658 
659 int
otx2_flow_parse_la(struct otx2_parse_state * pst)660 otx2_flow_parse_la(struct otx2_parse_state *pst)
661 {
662 	struct rte_flow_item_eth hw_mask;
663 	struct otx2_flow_item_info info;
664 	int lid, lt;
665 	int rc;
666 
667 	/* Identify the pattern type into lid, lt */
668 	if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_ETH)
669 		return 0;
670 
671 	lid = NPC_LID_LA;
672 	lt = NPC_LT_LA_ETHER;
673 	info.hw_hdr_len = 0;
674 
675 	if (pst->flow->nix_intf == NIX_INTF_TX) {
676 		lt = NPC_LT_LA_IH_NIX_ETHER;
677 		info.hw_hdr_len = NPC_IH_LENGTH;
678 		if (pst->npc->switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
679 			lt = NPC_LT_LA_IH_NIX_HIGIG2_ETHER;
680 			info.hw_hdr_len += NPC_HIGIG2_LENGTH;
681 		}
682 	} else {
683 		if (pst->npc->switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
684 			lt = NPC_LT_LA_HIGIG2_ETHER;
685 			info.hw_hdr_len = NPC_HIGIG2_LENGTH;
686 		}
687 	}
688 
689 	/* Prepare for parsing the item */
690 	info.def_mask = &rte_flow_item_eth_mask;
691 	info.hw_mask = &hw_mask;
692 	info.len = sizeof(struct rte_flow_item_eth);
693 	otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
694 	info.spec = NULL;
695 	info.mask = NULL;
696 
697 	/* Basic validation of item parameters */
698 	rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
699 	if (rc)
700 		return rc;
701 
702 	/* Update pst if not validate only? clash check? */
703 	return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
704 }
705 
706 int
otx2_flow_parse_higig2_hdr(struct otx2_parse_state * pst)707 otx2_flow_parse_higig2_hdr(struct otx2_parse_state *pst)
708 {
709 	struct rte_flow_item_higig2_hdr hw_mask;
710 	struct otx2_flow_item_info info;
711 	int lid, lt;
712 	int rc;
713 
714 	/* Identify the pattern type into lid, lt */
715 	if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_HIGIG2)
716 		return 0;
717 
718 	lid = NPC_LID_LA;
719 	lt = NPC_LT_LA_HIGIG2_ETHER;
720 	info.hw_hdr_len = 0;
721 
722 	if (pst->flow->nix_intf == NIX_INTF_TX) {
723 		lt = NPC_LT_LA_IH_NIX_HIGIG2_ETHER;
724 		info.hw_hdr_len = NPC_IH_LENGTH;
725 	}
726 
727 	/* Prepare for parsing the item */
728 	info.def_mask = &rte_flow_item_higig2_hdr_mask;
729 	info.hw_mask = &hw_mask;
730 	info.len = sizeof(struct rte_flow_item_higig2_hdr);
731 	otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
732 	info.spec = NULL;
733 	info.mask = NULL;
734 
735 	/* Basic validation of item parameters */
736 	rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
737 	if (rc)
738 		return rc;
739 
740 	/* Update pst if not validate only? clash check? */
741 	return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
742 }
743 
744 static int
parse_rss_action(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_action * act,struct rte_flow_error * error)745 parse_rss_action(struct rte_eth_dev *dev,
746 		 const struct rte_flow_attr *attr,
747 		 const struct rte_flow_action *act,
748 		 struct rte_flow_error *error)
749 {
750 	struct otx2_eth_dev *hw = dev->data->dev_private;
751 	struct otx2_rss_info *rss_info = &hw->rss_info;
752 	const struct rte_flow_action_rss *rss;
753 	uint32_t i;
754 
755 	rss = (const struct rte_flow_action_rss *)act->conf;
756 
757 	/* Not supported */
758 	if (attr->egress) {
759 		return rte_flow_error_set(error, EINVAL,
760 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
761 					  attr, "No support of RSS in egress");
762 	}
763 
764 	if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS)
765 		return rte_flow_error_set(error, ENOTSUP,
766 					  RTE_FLOW_ERROR_TYPE_ACTION,
767 					  act, "multi-queue mode is disabled");
768 
769 	/* Parse RSS related parameters from configuration */
770 	if (!rss || !rss->queue_num)
771 		return rte_flow_error_set(error, EINVAL,
772 					  RTE_FLOW_ERROR_TYPE_ACTION,
773 					  act, "no valid queues");
774 
775 	if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
776 		return rte_flow_error_set(error, ENOTSUP,
777 					  RTE_FLOW_ERROR_TYPE_ACTION, act,
778 					  "non-default RSS hash functions"
779 					  " are not supported");
780 
781 	if (rss->key_len && rss->key_len > RTE_DIM(rss_info->key))
782 		return rte_flow_error_set(error, ENOTSUP,
783 					  RTE_FLOW_ERROR_TYPE_ACTION, act,
784 					  "RSS hash key too large");
785 
786 	if (rss->queue_num > rss_info->rss_size)
787 		return rte_flow_error_set
788 			(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
789 			 "too many queues for RSS context");
790 
791 	for (i = 0; i < rss->queue_num; i++) {
792 		if (rss->queue[i] >= dev->data->nb_rx_queues)
793 			return rte_flow_error_set(error, EINVAL,
794 						  RTE_FLOW_ERROR_TYPE_ACTION,
795 						  act,
796 						  "queue id > max number"
797 						  " of queues");
798 	}
799 
800 	return 0;
801 }
802 
803 int
otx2_flow_parse_actions(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_action actions[],struct rte_flow_error * error,struct rte_flow * flow)804 otx2_flow_parse_actions(struct rte_eth_dev *dev,
805 			const struct rte_flow_attr *attr,
806 			const struct rte_flow_action actions[],
807 			struct rte_flow_error *error,
808 			struct rte_flow *flow)
809 {
810 	struct otx2_eth_dev *hw = dev->data->dev_private;
811 	struct otx2_npc_flow_info *npc = &hw->npc_flow;
812 	const struct rte_flow_action_count *act_count;
813 	const struct rte_flow_action_mark *act_mark;
814 	const struct rte_flow_action_queue *act_q;
815 	const struct rte_flow_action_vf *vf_act;
816 	bool vlan_insert_action = false;
817 	const char *errmsg = NULL;
818 	int sel_act, req_act = 0;
819 	uint16_t pf_func, vf_id;
820 	int errcode = 0;
821 	int mark = 0;
822 	int rq = 0;
823 
824 	/* Initialize actions */
825 	flow->ctr_id = NPC_COUNTER_NONE;
826 	pf_func = otx2_pfvf_func(hw->pf, hw->vf);
827 
828 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
829 		otx2_npc_dbg("Action type = %d", actions->type);
830 
831 		switch (actions->type) {
832 		case RTE_FLOW_ACTION_TYPE_VOID:
833 			break;
834 		case RTE_FLOW_ACTION_TYPE_MARK:
835 			act_mark =
836 			    (const struct rte_flow_action_mark *)actions->conf;
837 
838 			/* We have only 16 bits. Use highest val for flag */
839 			if (act_mark->id > (OTX2_FLOW_FLAG_VAL - 2)) {
840 				errmsg = "mark value must be < 0xfffe";
841 				errcode = ENOTSUP;
842 				goto err_exit;
843 			}
844 			mark = act_mark->id + 1;
845 			req_act |= OTX2_FLOW_ACT_MARK;
846 			rte_atomic32_inc(&npc->mark_actions);
847 			break;
848 
849 		case RTE_FLOW_ACTION_TYPE_FLAG:
850 			mark = OTX2_FLOW_FLAG_VAL;
851 			req_act |= OTX2_FLOW_ACT_FLAG;
852 			rte_atomic32_inc(&npc->mark_actions);
853 			break;
854 
855 		case RTE_FLOW_ACTION_TYPE_COUNT:
856 			act_count =
857 				(const struct rte_flow_action_count *)
858 				actions->conf;
859 
860 			if (act_count->shared == 1) {
861 				errmsg = "Shared Counters not supported";
862 				errcode = ENOTSUP;
863 				goto err_exit;
864 			}
865 			/* Indicates, need a counter */
866 			flow->ctr_id = 1;
867 			req_act |= OTX2_FLOW_ACT_COUNT;
868 			break;
869 
870 		case RTE_FLOW_ACTION_TYPE_DROP:
871 			req_act |= OTX2_FLOW_ACT_DROP;
872 			break;
873 
874 		case RTE_FLOW_ACTION_TYPE_PF:
875 			req_act |= OTX2_FLOW_ACT_PF;
876 			pf_func &= (0xfc00);
877 			break;
878 
879 		case RTE_FLOW_ACTION_TYPE_VF:
880 			vf_act = (const struct rte_flow_action_vf *)
881 				actions->conf;
882 			req_act |= OTX2_FLOW_ACT_VF;
883 			if (vf_act->original == 0) {
884 				vf_id = vf_act->id & RVU_PFVF_FUNC_MASK;
885 				if (vf_id  >= hw->maxvf) {
886 					errmsg = "invalid vf specified";
887 					errcode = EINVAL;
888 					goto err_exit;
889 				}
890 				pf_func &= (0xfc00);
891 				pf_func = (pf_func | (vf_id + 1));
892 			}
893 			break;
894 
895 		case RTE_FLOW_ACTION_TYPE_QUEUE:
896 			/* Applicable only to ingress flow */
897 			act_q = (const struct rte_flow_action_queue *)
898 				actions->conf;
899 			rq = act_q->index;
900 			if (rq >= dev->data->nb_rx_queues) {
901 				errmsg = "invalid queue index";
902 				errcode = EINVAL;
903 				goto err_exit;
904 			}
905 			req_act |= OTX2_FLOW_ACT_QUEUE;
906 			break;
907 
908 		case RTE_FLOW_ACTION_TYPE_RSS:
909 			errcode = parse_rss_action(dev,	attr, actions, error);
910 			if (errcode)
911 				return -rte_errno;
912 
913 			req_act |= OTX2_FLOW_ACT_RSS;
914 			break;
915 
916 		case RTE_FLOW_ACTION_TYPE_SECURITY:
917 			/* Assumes user has already configured security
918 			 * session for this flow. Associated conf is
919 			 * opaque. When RTE security is implemented for otx2,
920 			 * we need to verify that for specified security
921 			 * session:
922 			 *  action_type ==
923 			 *    RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL &&
924 			 *  session_protocol ==
925 			 *    RTE_SECURITY_PROTOCOL_IPSEC
926 			 *
927 			 * RSS is not supported with inline ipsec. Get the
928 			 * rq from associated conf, or make
929 			 * RTE_FLOW_ACTION_TYPE_QUEUE compulsory with this
930 			 * action.
931 			 * Currently, rq = 0 is assumed.
932 			 */
933 			req_act |= OTX2_FLOW_ACT_SEC;
934 			rq = 0;
935 			break;
936 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
937 			req_act |= OTX2_FLOW_ACT_VLAN_INSERT;
938 			break;
939 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
940 			req_act |= OTX2_FLOW_ACT_VLAN_STRIP;
941 			break;
942 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
943 			req_act |= OTX2_FLOW_ACT_VLAN_ETHTYPE_INSERT;
944 			break;
945 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
946 			req_act |= OTX2_FLOW_ACT_VLAN_PCP_INSERT;
947 			break;
948 		default:
949 			errmsg = "Unsupported action specified";
950 			errcode = ENOTSUP;
951 			goto err_exit;
952 		}
953 	}
954 
955 	if (req_act &
956 	    (OTX2_FLOW_ACT_VLAN_INSERT | OTX2_FLOW_ACT_VLAN_ETHTYPE_INSERT |
957 	     OTX2_FLOW_ACT_VLAN_PCP_INSERT))
958 		vlan_insert_action = true;
959 
960 	if ((req_act &
961 	     (OTX2_FLOW_ACT_VLAN_INSERT | OTX2_FLOW_ACT_VLAN_ETHTYPE_INSERT |
962 	      OTX2_FLOW_ACT_VLAN_PCP_INSERT)) ==
963 	    OTX2_FLOW_ACT_VLAN_PCP_INSERT) {
964 		errmsg = " PCP insert action can't be supported alone";
965 		errcode = ENOTSUP;
966 		goto err_exit;
967 	}
968 
969 	/* Both STRIP and INSERT actions are not supported */
970 	if (vlan_insert_action && (req_act & OTX2_FLOW_ACT_VLAN_STRIP)) {
971 		errmsg = "Both VLAN insert and strip actions not supported"
972 			" together";
973 		errcode = ENOTSUP;
974 		goto err_exit;
975 	}
976 
977 	/* Check if actions specified are compatible */
978 	if (attr->egress) {
979 		if (req_act & OTX2_FLOW_ACT_VLAN_STRIP) {
980 			errmsg = "VLAN pop action is not supported on Egress";
981 			errcode = ENOTSUP;
982 			goto err_exit;
983 		}
984 
985 		if (req_act & OTX2_FLOW_ACT_DROP) {
986 			flow->npc_action = NIX_TX_ACTIONOP_DROP;
987 		} else if ((req_act & OTX2_FLOW_ACT_COUNT) ||
988 			   vlan_insert_action) {
989 			flow->npc_action = NIX_TX_ACTIONOP_UCAST_DEFAULT;
990 		} else {
991 			errmsg = "Unsupported action for egress";
992 			errcode = EINVAL;
993 			goto err_exit;
994 		}
995 		goto set_pf_func;
996 	}
997 
998 	/* We have already verified the attr, this is ingress.
999 	 * - Exactly one terminating action is supported
1000 	 * - Exactly one of MARK or FLAG is supported
1001 	 * - If terminating action is DROP, only count is valid.
1002 	 */
1003 	sel_act = req_act & OTX2_FLOW_ACT_TERM;
1004 	if ((sel_act & (sel_act - 1)) != 0) {
1005 		errmsg = "Only one terminating action supported";
1006 		errcode = EINVAL;
1007 		goto err_exit;
1008 	}
1009 
1010 	if (req_act & OTX2_FLOW_ACT_DROP) {
1011 		sel_act = req_act & ~OTX2_FLOW_ACT_COUNT;
1012 		if ((sel_act & (sel_act - 1)) != 0) {
1013 			errmsg = "Only COUNT action is supported "
1014 				"with DROP ingress action";
1015 			errcode = ENOTSUP;
1016 			goto err_exit;
1017 		}
1018 	}
1019 
1020 	if ((req_act & (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK))
1021 	    == (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK)) {
1022 		errmsg = "Only one of FLAG or MARK action is supported";
1023 		errcode = ENOTSUP;
1024 		goto err_exit;
1025 	}
1026 
1027 	if (vlan_insert_action) {
1028 		errmsg = "VLAN push/Insert action is not supported on Ingress";
1029 		errcode = ENOTSUP;
1030 		goto err_exit;
1031 	}
1032 
1033 	if (req_act & OTX2_FLOW_ACT_VLAN_STRIP)
1034 		npc->vtag_actions++;
1035 
1036 	/* Only VLAN action is provided */
1037 	if (req_act == OTX2_FLOW_ACT_VLAN_STRIP)
1038 		flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1039 	/* Set NIX_RX_ACTIONOP */
1040 	else if (req_act & (OTX2_FLOW_ACT_PF | OTX2_FLOW_ACT_VF)) {
1041 		flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1042 		if (req_act & OTX2_FLOW_ACT_QUEUE)
1043 			flow->npc_action |= (uint64_t)rq << 20;
1044 	} else if (req_act & OTX2_FLOW_ACT_DROP) {
1045 		flow->npc_action = NIX_RX_ACTIONOP_DROP;
1046 	} else if (req_act & OTX2_FLOW_ACT_QUEUE) {
1047 		flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1048 		flow->npc_action |= (uint64_t)rq << 20;
1049 	} else if (req_act & OTX2_FLOW_ACT_RSS) {
1050 		/* When user added a rule for rss, first we will add the
1051 		 *rule in MCAM and then update the action, once if we have
1052 		 *FLOW_KEY_ALG index. So, till we update the action with
1053 		 *flow_key_alg index, set the action to drop.
1054 		 */
1055 		if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
1056 			flow->npc_action = NIX_RX_ACTIONOP_DROP;
1057 		else
1058 			flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1059 	} else if (req_act & OTX2_FLOW_ACT_SEC) {
1060 		flow->npc_action = NIX_RX_ACTIONOP_UCAST_IPSEC;
1061 		flow->npc_action |= (uint64_t)rq << 20;
1062 	} else if (req_act & (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK)) {
1063 		flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1064 	} else if (req_act & OTX2_FLOW_ACT_COUNT) {
1065 		/* Keep OTX2_FLOW_ACT_COUNT always at the end
1066 		 * This is default action, when user specify only
1067 		 * COUNT ACTION
1068 		 */
1069 		flow->npc_action = NIX_RX_ACTIONOP_UCAST;
1070 	} else {
1071 		/* Should never reach here */
1072 		errmsg = "Invalid action specified";
1073 		errcode = EINVAL;
1074 		goto err_exit;
1075 	}
1076 
1077 	if (mark)
1078 		flow->npc_action |= (uint64_t)mark << 40;
1079 
1080 	if (rte_atomic32_read(&npc->mark_actions) == 1) {
1081 		hw->rx_offload_flags |=
1082 			NIX_RX_OFFLOAD_MARK_UPDATE_F;
1083 		otx2_eth_set_rx_function(dev);
1084 	}
1085 
1086 	if (npc->vtag_actions == 1) {
1087 		hw->rx_offload_flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
1088 		otx2_eth_set_rx_function(dev);
1089 	}
1090 
1091 set_pf_func:
1092 	/* Ideally AF must ensure that correct pf_func is set */
1093 	flow->npc_action |= (uint64_t)pf_func << 4;
1094 
1095 	return 0;
1096 
1097 err_exit:
1098 	rte_flow_error_set(error, errcode,
1099 			   RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
1100 			   errmsg);
1101 	return -rte_errno;
1102 }
1103