xref: /f-stack/dpdk/drivers/net/bnxt/bnxt_flow.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <sys/queue.h>
7 
8 #include <rte_log.h>
9 #include <rte_malloc.h>
10 #include <rte_flow.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
13 #include <rte_alarm.h>
14 #include <rte_cycles.h>
15 
16 #include "bnxt.h"
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_ring.h"
20 #include "bnxt_rxq.h"
21 #include "bnxt_rxr.h"
22 #include "bnxt_vnic.h"
23 #include "hsi_struct_def_dpdk.h"
24 
25 static int
bnxt_flow_args_validate(const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)26 bnxt_flow_args_validate(const struct rte_flow_attr *attr,
27 			const struct rte_flow_item pattern[],
28 			const struct rte_flow_action actions[],
29 			struct rte_flow_error *error)
30 {
31 	if (!pattern) {
32 		rte_flow_error_set(error,
33 				   EINVAL,
34 				   RTE_FLOW_ERROR_TYPE_ITEM_NUM,
35 				   NULL,
36 				   "NULL pattern.");
37 		return -rte_errno;
38 	}
39 
40 	if (!actions) {
41 		rte_flow_error_set(error,
42 				   EINVAL,
43 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
44 				   NULL,
45 				   "NULL action.");
46 		return -rte_errno;
47 	}
48 
49 	if (!attr) {
50 		rte_flow_error_set(error,
51 				   EINVAL,
52 				   RTE_FLOW_ERROR_TYPE_ATTR,
53 				   NULL,
54 				   "NULL attribute.");
55 		return -rte_errno;
56 	}
57 
58 	return 0;
59 }
60 
61 static const struct rte_flow_item *
bnxt_flow_non_void_item(const struct rte_flow_item * cur)62 bnxt_flow_non_void_item(const struct rte_flow_item *cur)
63 {
64 	while (1) {
65 		if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
66 			return cur;
67 		cur++;
68 	}
69 }
70 
71 static const struct rte_flow_action *
bnxt_flow_non_void_action(const struct rte_flow_action * cur)72 bnxt_flow_non_void_action(const struct rte_flow_action *cur)
73 {
74 	while (1) {
75 		if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
76 			return cur;
77 		cur++;
78 	}
79 }
80 
81 static int
bnxt_filter_type_check(const struct rte_flow_item pattern[],struct rte_flow_error * error)82 bnxt_filter_type_check(const struct rte_flow_item pattern[],
83 		       struct rte_flow_error *error)
84 {
85 	const struct rte_flow_item *item =
86 		bnxt_flow_non_void_item(pattern);
87 	int use_ntuple = 1;
88 	bool has_vlan = 0;
89 
90 	while (item->type != RTE_FLOW_ITEM_TYPE_END) {
91 		switch (item->type) {
92 		case RTE_FLOW_ITEM_TYPE_ANY:
93 		case RTE_FLOW_ITEM_TYPE_ETH:
94 			use_ntuple = 0;
95 			break;
96 		case RTE_FLOW_ITEM_TYPE_VLAN:
97 			use_ntuple = 0;
98 			has_vlan = 1;
99 			break;
100 		case RTE_FLOW_ITEM_TYPE_IPV4:
101 		case RTE_FLOW_ITEM_TYPE_IPV6:
102 		case RTE_FLOW_ITEM_TYPE_TCP:
103 		case RTE_FLOW_ITEM_TYPE_UDP:
104 			/* FALLTHROUGH */
105 			/* need ntuple match, reset exact match */
106 			use_ntuple |= 1;
107 			break;
108 		default:
109 			PMD_DRV_LOG(DEBUG, "Unknown Flow type\n");
110 			use_ntuple |= 0;
111 		}
112 		item++;
113 	}
114 
115 	if (has_vlan && use_ntuple) {
116 		PMD_DRV_LOG(ERR,
117 			    "VLAN flow cannot use NTUPLE filter\n");
118 		rte_flow_error_set(error, EINVAL,
119 				   RTE_FLOW_ERROR_TYPE_ITEM,
120 				   item,
121 				   "Cannot use VLAN with NTUPLE");
122 		return -rte_errno;
123 	}
124 
125 	return use_ntuple;
126 }
127 
128 static int
bnxt_validate_and_parse_flow_type(struct bnxt * bp,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],struct rte_flow_error * error,struct bnxt_filter_info * filter)129 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
130 				  const struct rte_flow_attr *attr,
131 				  const struct rte_flow_item pattern[],
132 				  struct rte_flow_error *error,
133 				  struct bnxt_filter_info *filter)
134 {
135 	const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
136 	const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
137 	const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
138 	const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
139 	const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
140 	const struct rte_flow_item_udp *udp_spec, *udp_mask;
141 	const struct rte_flow_item_eth *eth_spec, *eth_mask;
142 	const struct rte_ether_addr *dst, *src;
143 	const struct rte_flow_item_nvgre *nvgre_spec;
144 	const struct rte_flow_item_nvgre *nvgre_mask;
145 	const struct rte_flow_item_gre *gre_spec;
146 	const struct rte_flow_item_gre *gre_mask;
147 	const struct rte_flow_item_vxlan *vxlan_spec;
148 	const struct rte_flow_item_vxlan *vxlan_mask;
149 	uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
150 	uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
151 	const struct rte_flow_item_vf *vf_spec;
152 	uint32_t tenant_id_be = 0, valid_flags = 0;
153 	bool vni_masked = 0;
154 	bool tni_masked = 0;
155 	uint32_t en_ethertype;
156 	uint8_t inner = 0;
157 	uint32_t vf = 0;
158 	uint32_t en = 0;
159 	int use_ntuple;
160 	int dflt_vnic;
161 
162 	use_ntuple = bnxt_filter_type_check(pattern, error);
163 	if (use_ntuple < 0)
164 		return use_ntuple;
165 	PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
166 
167 	filter->filter_type = use_ntuple ?
168 		HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_L2_FILTER;
169 	en_ethertype = use_ntuple ?
170 		NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
171 		EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
172 
173 	while (item->type != RTE_FLOW_ITEM_TYPE_END) {
174 		if (item->last) {
175 			/* last or range is NOT supported as match criteria */
176 			rte_flow_error_set(error, EINVAL,
177 					   RTE_FLOW_ERROR_TYPE_ITEM,
178 					   item,
179 					   "No support for range");
180 			return -rte_errno;
181 		}
182 
183 		switch (item->type) {
184 		case RTE_FLOW_ITEM_TYPE_ANY:
185 			inner =
186 			((const struct rte_flow_item_any *)item->spec)->num > 3;
187 			if (inner)
188 				PMD_DRV_LOG(DEBUG, "Parse inner header\n");
189 			break;
190 		case RTE_FLOW_ITEM_TYPE_ETH:
191 			if (!item->spec || !item->mask)
192 				break;
193 
194 			eth_spec = item->spec;
195 			eth_mask = item->mask;
196 
197 			/* Source MAC address mask cannot be partially set.
198 			 * Should be All 0's or all 1's.
199 			 * Destination MAC address mask must not be partially
200 			 * set. Should be all 1's or all 0's.
201 			 */
202 			if ((!rte_is_zero_ether_addr(&eth_mask->src) &&
203 			     !rte_is_broadcast_ether_addr(&eth_mask->src)) ||
204 			    (!rte_is_zero_ether_addr(&eth_mask->dst) &&
205 			     !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
206 				rte_flow_error_set(error,
207 						   EINVAL,
208 						   RTE_FLOW_ERROR_TYPE_ITEM,
209 						   item,
210 						   "MAC_addr mask not valid");
211 				return -rte_errno;
212 			}
213 
214 			/* Mask is not allowed. Only exact matches are */
215 			if (eth_mask->type &&
216 			    eth_mask->type != RTE_BE16(0xffff)) {
217 				rte_flow_error_set(error, EINVAL,
218 						   RTE_FLOW_ERROR_TYPE_ITEM,
219 						   item,
220 						   "ethertype mask not valid");
221 				return -rte_errno;
222 			}
223 
224 			if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
225 				dst = &eth_spec->dst;
226 				if (!rte_is_valid_assigned_ether_addr(dst)) {
227 					rte_flow_error_set(error,
228 							   EINVAL,
229 							   RTE_FLOW_ERROR_TYPE_ITEM,
230 							   item,
231 							   "DMAC is invalid");
232 					PMD_DRV_LOG(ERR,
233 						    "DMAC is invalid!\n");
234 					return -rte_errno;
235 				}
236 				rte_memcpy(filter->dst_macaddr,
237 					   &eth_spec->dst, RTE_ETHER_ADDR_LEN);
238 				en |= use_ntuple ?
239 					NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
240 					EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
241 				valid_flags |= inner ?
242 					BNXT_FLOW_L2_INNER_DST_VALID_FLAG :
243 					BNXT_FLOW_L2_DST_VALID_FLAG;
244 				filter->priority = attr->priority;
245 				PMD_DRV_LOG(DEBUG,
246 					    "Creating a priority flow\n");
247 			}
248 			if (rte_is_broadcast_ether_addr(&eth_mask->src)) {
249 				src = &eth_spec->src;
250 				if (!rte_is_valid_assigned_ether_addr(src)) {
251 					rte_flow_error_set(error,
252 							   EINVAL,
253 							   RTE_FLOW_ERROR_TYPE_ITEM,
254 							   item,
255 							   "SMAC is invalid");
256 					PMD_DRV_LOG(ERR,
257 						    "SMAC is invalid!\n");
258 					return -rte_errno;
259 				}
260 				rte_memcpy(filter->src_macaddr,
261 					   &eth_spec->src, RTE_ETHER_ADDR_LEN);
262 				en |= use_ntuple ?
263 					NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
264 					EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
265 				valid_flags |= inner ?
266 					BNXT_FLOW_L2_INNER_SRC_VALID_FLAG :
267 					BNXT_FLOW_L2_SRC_VALID_FLAG;
268 			} /*
269 			   * else {
270 			   *  PMD_DRV_LOG(ERR, "Handle this condition\n");
271 			   * }
272 			   */
273 			if (eth_mask->type) {
274 				filter->ethertype =
275 					rte_be_to_cpu_16(eth_spec->type);
276 				en |= en_ethertype;
277 			}
278 			if (inner)
279 				valid_flags |= BNXT_FLOW_PARSE_INNER_FLAG;
280 
281 			break;
282 		case RTE_FLOW_ITEM_TYPE_VLAN:
283 			vlan_spec = item->spec;
284 			vlan_mask = item->mask;
285 			if (en & en_ethertype) {
286 				rte_flow_error_set(error, EINVAL,
287 						   RTE_FLOW_ERROR_TYPE_ITEM,
288 						   item,
289 						   "VLAN TPID matching is not"
290 						   " supported");
291 				return -rte_errno;
292 			}
293 			if (vlan_mask->tci &&
294 			    vlan_mask->tci == RTE_BE16(0x0fff)) {
295 				/* Only the VLAN ID can be matched. */
296 				filter->l2_ovlan =
297 					rte_be_to_cpu_16(vlan_spec->tci &
298 							 RTE_BE16(0x0fff));
299 				en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
300 			} else {
301 				rte_flow_error_set(error,
302 						   EINVAL,
303 						   RTE_FLOW_ERROR_TYPE_ITEM,
304 						   item,
305 						   "VLAN mask is invalid");
306 				return -rte_errno;
307 			}
308 			if (vlan_mask->inner_type &&
309 			    vlan_mask->inner_type != RTE_BE16(0xffff)) {
310 				rte_flow_error_set(error, EINVAL,
311 						   RTE_FLOW_ERROR_TYPE_ITEM,
312 						   item,
313 						   "inner ethertype mask not"
314 						   " valid");
315 				return -rte_errno;
316 			}
317 			if (vlan_mask->inner_type) {
318 				filter->ethertype =
319 					rte_be_to_cpu_16(vlan_spec->inner_type);
320 				en |= en_ethertype;
321 			}
322 
323 			break;
324 		case RTE_FLOW_ITEM_TYPE_IPV4:
325 			/* If mask is not involved, we could use EM filters. */
326 			ipv4_spec = item->spec;
327 			ipv4_mask = item->mask;
328 
329 			if (!item->spec || !item->mask)
330 				break;
331 
332 			/* Only IP DST and SRC fields are maskable. */
333 			if (ipv4_mask->hdr.version_ihl ||
334 			    ipv4_mask->hdr.type_of_service ||
335 			    ipv4_mask->hdr.total_length ||
336 			    ipv4_mask->hdr.packet_id ||
337 			    ipv4_mask->hdr.fragment_offset ||
338 			    ipv4_mask->hdr.time_to_live ||
339 			    ipv4_mask->hdr.next_proto_id ||
340 			    ipv4_mask->hdr.hdr_checksum) {
341 				rte_flow_error_set(error,
342 						   EINVAL,
343 						   RTE_FLOW_ERROR_TYPE_ITEM,
344 						   item,
345 						   "Invalid IPv4 mask.");
346 				return -rte_errno;
347 			}
348 
349 			filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
350 			filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
351 
352 			if (use_ntuple)
353 				en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
354 					NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
355 			else
356 				en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
357 					EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
358 
359 			if (ipv4_mask->hdr.src_addr) {
360 				filter->src_ipaddr_mask[0] =
361 					ipv4_mask->hdr.src_addr;
362 				en |= !use_ntuple ? 0 :
363 				     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
364 			}
365 
366 			if (ipv4_mask->hdr.dst_addr) {
367 				filter->dst_ipaddr_mask[0] =
368 					ipv4_mask->hdr.dst_addr;
369 				en |= !use_ntuple ? 0 :
370 				     NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
371 			}
372 
373 			filter->ip_addr_type = use_ntuple ?
374 			 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
375 			 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
376 
377 			if (ipv4_spec->hdr.next_proto_id) {
378 				filter->ip_protocol =
379 					ipv4_spec->hdr.next_proto_id;
380 				if (use_ntuple)
381 					en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
382 				else
383 					en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
384 			}
385 			break;
386 		case RTE_FLOW_ITEM_TYPE_IPV6:
387 			ipv6_spec = item->spec;
388 			ipv6_mask = item->mask;
389 
390 			if (!item->spec || !item->mask)
391 				break;
392 
393 			/* Only IP DST and SRC fields are maskable. */
394 			if (ipv6_mask->hdr.vtc_flow ||
395 			    ipv6_mask->hdr.payload_len ||
396 			    ipv6_mask->hdr.proto ||
397 			    ipv6_mask->hdr.hop_limits) {
398 				rte_flow_error_set(error,
399 						   EINVAL,
400 						   RTE_FLOW_ERROR_TYPE_ITEM,
401 						   item,
402 						   "Invalid IPv6 mask.");
403 				return -rte_errno;
404 			}
405 
406 			if (use_ntuple)
407 				en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
408 					NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
409 			else
410 				en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
411 					EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
412 
413 			rte_memcpy(filter->src_ipaddr,
414 				   ipv6_spec->hdr.src_addr, 16);
415 			rte_memcpy(filter->dst_ipaddr,
416 				   ipv6_spec->hdr.dst_addr, 16);
417 
418 			if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
419 						   16)) {
420 				rte_memcpy(filter->src_ipaddr_mask,
421 					   ipv6_mask->hdr.src_addr, 16);
422 				en |= !use_ntuple ? 0 :
423 				    NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
424 			}
425 
426 			if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
427 						   16)) {
428 				rte_memcpy(filter->dst_ipaddr_mask,
429 					   ipv6_mask->hdr.dst_addr, 16);
430 				en |= !use_ntuple ? 0 :
431 				     NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
432 			}
433 
434 			filter->ip_addr_type = use_ntuple ?
435 				NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
436 				EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
437 			break;
438 		case RTE_FLOW_ITEM_TYPE_TCP:
439 			tcp_spec = item->spec;
440 			tcp_mask = item->mask;
441 
442 			if (!item->spec || !item->mask)
443 				break;
444 
445 			/* Check TCP mask. Only DST & SRC ports are maskable */
446 			if (tcp_mask->hdr.sent_seq ||
447 			    tcp_mask->hdr.recv_ack ||
448 			    tcp_mask->hdr.data_off ||
449 			    tcp_mask->hdr.tcp_flags ||
450 			    tcp_mask->hdr.rx_win ||
451 			    tcp_mask->hdr.cksum ||
452 			    tcp_mask->hdr.tcp_urp) {
453 				rte_flow_error_set(error,
454 						   EINVAL,
455 						   RTE_FLOW_ERROR_TYPE_ITEM,
456 						   item,
457 						   "Invalid TCP mask");
458 				return -rte_errno;
459 			}
460 
461 			filter->src_port = tcp_spec->hdr.src_port;
462 			filter->dst_port = tcp_spec->hdr.dst_port;
463 
464 			if (use_ntuple)
465 				en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
466 					NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
467 			else
468 				en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
469 					EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
470 
471 			if (tcp_mask->hdr.dst_port) {
472 				filter->dst_port_mask = tcp_mask->hdr.dst_port;
473 				en |= !use_ntuple ? 0 :
474 				  NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
475 			}
476 
477 			if (tcp_mask->hdr.src_port) {
478 				filter->src_port_mask = tcp_mask->hdr.src_port;
479 				en |= !use_ntuple ? 0 :
480 				  NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
481 			}
482 			break;
483 		case RTE_FLOW_ITEM_TYPE_UDP:
484 			udp_spec = item->spec;
485 			udp_mask = item->mask;
486 
487 			if (!item->spec || !item->mask)
488 				break;
489 
490 			if (udp_mask->hdr.dgram_len ||
491 			    udp_mask->hdr.dgram_cksum) {
492 				rte_flow_error_set(error,
493 						   EINVAL,
494 						   RTE_FLOW_ERROR_TYPE_ITEM,
495 						   item,
496 						   "Invalid UDP mask");
497 				return -rte_errno;
498 			}
499 
500 			filter->src_port = udp_spec->hdr.src_port;
501 			filter->dst_port = udp_spec->hdr.dst_port;
502 
503 			if (use_ntuple)
504 				en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
505 					NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
506 			else
507 				en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
508 					EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
509 
510 			if (udp_mask->hdr.dst_port) {
511 				filter->dst_port_mask = udp_mask->hdr.dst_port;
512 				en |= !use_ntuple ? 0 :
513 				  NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
514 			}
515 
516 			if (udp_mask->hdr.src_port) {
517 				filter->src_port_mask = udp_mask->hdr.src_port;
518 				en |= !use_ntuple ? 0 :
519 				  NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
520 			}
521 			break;
522 		case RTE_FLOW_ITEM_TYPE_VXLAN:
523 			vxlan_spec = item->spec;
524 			vxlan_mask = item->mask;
525 			/* Check if VXLAN item is used to describe protocol.
526 			 * If yes, both spec and mask should be NULL.
527 			 * If no, both spec and mask shouldn't be NULL.
528 			 */
529 			if ((!vxlan_spec && vxlan_mask) ||
530 			    (vxlan_spec && !vxlan_mask)) {
531 				rte_flow_error_set(error,
532 						   EINVAL,
533 						   RTE_FLOW_ERROR_TYPE_ITEM,
534 						   item,
535 						   "Invalid VXLAN item");
536 				return -rte_errno;
537 			}
538 
539 			if (!vxlan_spec && !vxlan_mask) {
540 				filter->tunnel_type =
541 				CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
542 				break;
543 			}
544 
545 			if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
546 			    vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
547 			    vxlan_spec->flags != 0x8) {
548 				rte_flow_error_set(error,
549 						   EINVAL,
550 						   RTE_FLOW_ERROR_TYPE_ITEM,
551 						   item,
552 						   "Invalid VXLAN item");
553 				return -rte_errno;
554 			}
555 
556 			/* Check if VNI is masked. */
557 			if (vxlan_mask != NULL) {
558 				vni_masked =
559 					!!memcmp(vxlan_mask->vni, vni_mask,
560 						 RTE_DIM(vni_mask));
561 				if (vni_masked) {
562 					rte_flow_error_set
563 						(error,
564 						 EINVAL,
565 						 RTE_FLOW_ERROR_TYPE_ITEM,
566 						 item,
567 						 "Invalid VNI mask");
568 					return -rte_errno;
569 				}
570 
571 				rte_memcpy(((uint8_t *)&tenant_id_be + 1),
572 					   vxlan_spec->vni, 3);
573 				filter->vni =
574 					rte_be_to_cpu_32(tenant_id_be);
575 				filter->tunnel_type =
576 				 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
577 			}
578 			break;
579 		case RTE_FLOW_ITEM_TYPE_NVGRE:
580 			nvgre_spec = item->spec;
581 			nvgre_mask = item->mask;
582 			/* Check if NVGRE item is used to describe protocol.
583 			 * If yes, both spec and mask should be NULL.
584 			 * If no, both spec and mask shouldn't be NULL.
585 			 */
586 			if ((!nvgre_spec && nvgre_mask) ||
587 			    (nvgre_spec && !nvgre_mask)) {
588 				rte_flow_error_set(error,
589 						   EINVAL,
590 						   RTE_FLOW_ERROR_TYPE_ITEM,
591 						   item,
592 						   "Invalid NVGRE item");
593 				return -rte_errno;
594 			}
595 
596 			if (!nvgre_spec && !nvgre_mask) {
597 				filter->tunnel_type =
598 				CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
599 				break;
600 			}
601 
602 			if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
603 			    nvgre_spec->protocol != 0x6558) {
604 				rte_flow_error_set(error,
605 						   EINVAL,
606 						   RTE_FLOW_ERROR_TYPE_ITEM,
607 						   item,
608 						   "Invalid NVGRE item");
609 				return -rte_errno;
610 			}
611 
612 			if (nvgre_spec && nvgre_mask) {
613 				tni_masked =
614 					!!memcmp(nvgre_mask->tni, tni_mask,
615 						 RTE_DIM(tni_mask));
616 				if (tni_masked) {
617 					rte_flow_error_set
618 						(error,
619 						 EINVAL,
620 						 RTE_FLOW_ERROR_TYPE_ITEM,
621 						 item,
622 						 "Invalid TNI mask");
623 					return -rte_errno;
624 				}
625 				rte_memcpy(((uint8_t *)&tenant_id_be + 1),
626 					   nvgre_spec->tni, 3);
627 				filter->vni =
628 					rte_be_to_cpu_32(tenant_id_be);
629 				filter->tunnel_type =
630 				 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
631 			}
632 			break;
633 
634 		case RTE_FLOW_ITEM_TYPE_GRE:
635 			gre_spec = (const struct rte_flow_item_gre *)item->spec;
636 			gre_mask = (const struct rte_flow_item_gre *)item->mask;
637 
638 			/*
639 			 *Check if GRE item is used to describe protocol.
640 			 * If yes, both spec and mask should be NULL.
641 			 * If no, both spec and mask shouldn't be NULL.
642 			 */
643 			if (!!gre_spec ^ !!gre_mask) {
644 				rte_flow_error_set(error, EINVAL,
645 						   RTE_FLOW_ERROR_TYPE_ITEM,
646 						   item,
647 						   "Invalid GRE item");
648 				return -rte_errno;
649 			}
650 
651 			if (!gre_spec && !gre_mask) {
652 				filter->tunnel_type =
653 				CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE;
654 				break;
655 			}
656 			break;
657 
658 		case RTE_FLOW_ITEM_TYPE_VF:
659 			vf_spec = item->spec;
660 			vf = vf_spec->id;
661 			if (!BNXT_PF(bp)) {
662 				rte_flow_error_set(error,
663 						   EINVAL,
664 						   RTE_FLOW_ERROR_TYPE_ITEM,
665 						   item,
666 						   "Configuring on a VF!");
667 				return -rte_errno;
668 			}
669 
670 			if (vf >= bp->pdev->max_vfs) {
671 				rte_flow_error_set(error,
672 						   EINVAL,
673 						   RTE_FLOW_ERROR_TYPE_ITEM,
674 						   item,
675 						   "Incorrect VF id!");
676 				return -rte_errno;
677 			}
678 
679 			if (!attr->transfer) {
680 				rte_flow_error_set(error,
681 						   ENOTSUP,
682 						   RTE_FLOW_ERROR_TYPE_ITEM,
683 						   item,
684 						   "Matching VF traffic without"
685 						   " affecting it (transfer attribute)"
686 						   " is unsupported");
687 				return -rte_errno;
688 			}
689 
690 			filter->mirror_vnic_id =
691 			dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
692 			if (dflt_vnic < 0) {
693 				/* This simply indicates there's no driver
694 				 * loaded. This is not an error.
695 				 */
696 				rte_flow_error_set
697 					(error,
698 					 EINVAL,
699 					 RTE_FLOW_ERROR_TYPE_ITEM,
700 					 item,
701 					 "Unable to get default VNIC for VF");
702 				return -rte_errno;
703 			}
704 
705 			filter->mirror_vnic_id = dflt_vnic;
706 			en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
707 			break;
708 		default:
709 			break;
710 		}
711 		item++;
712 	}
713 	filter->enables = en;
714 	filter->valid_flags = valid_flags;
715 
716 	return 0;
717 }
718 
719 /* Parse attributes */
720 static int
bnxt_flow_parse_attr(const struct rte_flow_attr * attr,struct rte_flow_error * error)721 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
722 		     struct rte_flow_error *error)
723 {
724 	/* Must be input direction */
725 	if (!attr->ingress) {
726 		rte_flow_error_set(error,
727 				   EINVAL,
728 				   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
729 				   attr,
730 				   "Only support ingress.");
731 		return -rte_errno;
732 	}
733 
734 	/* Not supported */
735 	if (attr->egress) {
736 		rte_flow_error_set(error,
737 				   EINVAL,
738 				   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
739 				   attr,
740 				   "No support for egress.");
741 		return -rte_errno;
742 	}
743 
744 	return 0;
745 }
746 
747 static struct bnxt_filter_info *
bnxt_find_matching_l2_filter(struct bnxt * bp,struct bnxt_filter_info * nf)748 bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
749 {
750 	struct bnxt_filter_info *mf, *f0;
751 	struct bnxt_vnic_info *vnic0;
752 	int i;
753 
754 	vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
755 	f0 = STAILQ_FIRST(&vnic0->filter);
756 
757 	/* This flow has same DST MAC as the port/l2 filter. */
758 	if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
759 		return f0;
760 
761 	for (i = bp->max_vnics - 1; i >= 0; i--) {
762 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
763 
764 		if (vnic->fw_vnic_id == INVALID_VNIC_ID)
765 			continue;
766 
767 		STAILQ_FOREACH(mf, &vnic->filter, next) {
768 
769 			if (mf->matching_l2_fltr_ptr)
770 				continue;
771 
772 			if (mf->ethertype == nf->ethertype &&
773 			    mf->l2_ovlan == nf->l2_ovlan &&
774 			    mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
775 			    mf->l2_ivlan == nf->l2_ivlan &&
776 			    mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
777 			    !memcmp(mf->src_macaddr, nf->src_macaddr,
778 				    RTE_ETHER_ADDR_LEN) &&
779 			    !memcmp(mf->dst_macaddr, nf->dst_macaddr,
780 				    RTE_ETHER_ADDR_LEN))
781 				return mf;
782 		}
783 	}
784 	return NULL;
785 }
786 
787 static struct bnxt_filter_info *
bnxt_create_l2_filter(struct bnxt * bp,struct bnxt_filter_info * nf,struct bnxt_vnic_info * vnic)788 bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
789 		      struct bnxt_vnic_info *vnic)
790 {
791 	struct bnxt_filter_info *filter1;
792 	int rc;
793 
794 	/* Alloc new L2 filter.
795 	 * This flow needs MAC filter which does not match any existing
796 	 * L2 filters.
797 	 */
798 	filter1 = bnxt_get_unused_filter(bp);
799 	if (filter1 == NULL)
800 		return NULL;
801 
802 	memcpy(filter1, nf, sizeof(*filter1));
803 
804 	filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE;
805 	filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
806 	if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
807 	    nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) {
808 		filter1->flags |=
809 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
810 		PMD_DRV_LOG(DEBUG, "Create Outer filter\n");
811 	}
812 
813 	if (nf->filter_type == HWRM_CFA_L2_FILTER &&
814 	    (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
815 	     nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) {
816 		PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n");
817 		filter1->flags |=
818 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID;
819 		memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN);
820 	} else {
821 		PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
822 		memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
823 	}
824 
825 	if (nf->priority &&
826 	    (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG ||
827 	     nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
828 		/* Tell the FW where to place the filter in the table. */
829 		if (nf->priority > 65535) {
830 			filter1->pri_hint =
831 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER;
832 			/* This will place the filter in TCAM */
833 			filter1->l2_filter_id_hint = (uint64_t)-1;
834 		}
835 	}
836 
837 	if (nf->valid_flags & (BNXT_FLOW_L2_DST_VALID_FLAG |
838 			       BNXT_FLOW_L2_SRC_VALID_FLAG |
839 			       BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
840 			       BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
841 		filter1->enables =
842 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
843 			L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
844 		memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
845 	}
846 
847 	if (nf->valid_flags & BNXT_FLOW_L2_DROP_FLAG) {
848 		filter1->flags |=
849 			HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP;
850 		if (nf->ethertype == RTE_ETHER_TYPE_IPV4) {
851 			/* Num VLANs for drop filter will/should be 0.
852 			 * If the req is memset to 0, then the count will
853 			 * be automatically set to 0.
854 			 */
855 			if (nf->valid_flags & BNXT_FLOW_PARSE_INNER_FLAG) {
856 				filter1->enables |=
857 					L2_FILTER_ALLOC_INPUT_EN_T_NUM_VLANS;
858 			} else {
859 				filter1->enables |=
860 					L2_FILTER_ALLOC_INPUT_EN_NUM_VLANS;
861 				filter1->flags |=
862 				HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
863 			}
864 		}
865 	}
866 
867 	rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
868 				     filter1);
869 	if (rc) {
870 		bnxt_free_filter(bp, filter1);
871 		return NULL;
872 	}
873 	return filter1;
874 }
875 
876 struct bnxt_filter_info *
bnxt_get_l2_filter(struct bnxt * bp,struct bnxt_filter_info * nf,struct bnxt_vnic_info * vnic)877 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
878 		   struct bnxt_vnic_info *vnic)
879 {
880 	struct bnxt_filter_info *l2_filter = NULL;
881 
882 	l2_filter = bnxt_find_matching_l2_filter(bp, nf);
883 	if (l2_filter) {
884 		l2_filter->l2_ref_cnt++;
885 	} else {
886 		l2_filter = bnxt_create_l2_filter(bp, nf, vnic);
887 		if (l2_filter) {
888 			STAILQ_INSERT_TAIL(&vnic->filter, l2_filter, next);
889 			l2_filter->vnic = vnic;
890 		}
891 	}
892 	nf->matching_l2_fltr_ptr = l2_filter;
893 
894 	return l2_filter;
895 }
896 
bnxt_vnic_prep(struct bnxt * bp,struct bnxt_vnic_info * vnic)897 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic)
898 {
899 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
900 	uint64_t rx_offloads = dev_conf->rxmode.offloads;
901 	int rc;
902 
903 	rc = bnxt_vnic_grp_alloc(bp, vnic);
904 	if (rc)
905 		goto ret;
906 
907 	rc = bnxt_hwrm_vnic_alloc(bp, vnic);
908 	if (rc) {
909 		PMD_DRV_LOG(ERR, "HWRM vnic alloc failure rc: %x\n", rc);
910 		goto ret;
911 	}
912 	bp->nr_vnics++;
913 
914 	/* RSS context is required only when there is more than one RSS ring */
915 	if (vnic->rx_queue_cnt > 1) {
916 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0 /* ctx_idx 0 */);
917 		if (rc) {
918 			PMD_DRV_LOG(ERR,
919 				    "HWRM vnic ctx alloc failure: %x\n", rc);
920 			goto ret;
921 		}
922 	} else {
923 		PMD_DRV_LOG(DEBUG, "No RSS context required\n");
924 	}
925 
926 	if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
927 		vnic->vlan_strip = true;
928 	else
929 		vnic->vlan_strip = false;
930 
931 	rc = bnxt_hwrm_vnic_cfg(bp, vnic);
932 	if (rc)
933 		goto ret;
934 
935 	bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
936 
937 ret:
938 	return rc;
939 }
940 
match_vnic_rss_cfg(struct bnxt * bp,struct bnxt_vnic_info * vnic,const struct rte_flow_action_rss * rss)941 static int match_vnic_rss_cfg(struct bnxt *bp,
942 			      struct bnxt_vnic_info *vnic,
943 			      const struct rte_flow_action_rss *rss)
944 {
945 	unsigned int match = 0, i;
946 
947 	if (vnic->rx_queue_cnt != rss->queue_num)
948 		return -EINVAL;
949 
950 	for (i = 0; i < rss->queue_num; i++) {
951 		if (!bp->rx_queues[rss->queue[i]]->vnic->rx_queue_cnt &&
952 		    !bp->rx_queues[rss->queue[i]]->rx_started)
953 			return -EINVAL;
954 	}
955 
956 	for (i = 0; i < vnic->rx_queue_cnt; i++) {
957 		int j;
958 
959 		for (j = 0; j < vnic->rx_queue_cnt; j++) {
960 			if (bp->grp_info[rss->queue[i]].fw_grp_id ==
961 			    vnic->fw_grp_ids[j])
962 				match++;
963 		}
964 	}
965 
966 	if (match != vnic->rx_queue_cnt) {
967 		PMD_DRV_LOG(ERR,
968 			    "VNIC queue count %d vs queues matched %d\n",
969 			    match, vnic->rx_queue_cnt);
970 		return -EINVAL;
971 	}
972 
973 	return 0;
974 }
975 
976 static void
bnxt_update_filter_flags_en(struct bnxt_filter_info * filter,struct bnxt_filter_info * filter1,int use_ntuple)977 bnxt_update_filter_flags_en(struct bnxt_filter_info *filter,
978 			    struct bnxt_filter_info *filter1,
979 			    int use_ntuple)
980 {
981 	if (!use_ntuple &&
982 	    !(filter->valid_flags &
983 	      ~(BNXT_FLOW_L2_DST_VALID_FLAG |
984 		BNXT_FLOW_L2_SRC_VALID_FLAG |
985 		BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
986 		BNXT_FLOW_L2_INNER_DST_VALID_FLAG |
987 		BNXT_FLOW_L2_DROP_FLAG |
988 		BNXT_FLOW_PARSE_INNER_FLAG))) {
989 		filter->flags = filter1->flags;
990 		filter->enables = filter1->enables;
991 		filter->filter_type = HWRM_CFA_L2_FILTER;
992 		memcpy(filter->l2_addr, filter1->l2_addr, RTE_ETHER_ADDR_LEN);
993 		memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
994 		filter->pri_hint = filter1->pri_hint;
995 		filter->l2_filter_id_hint = filter1->l2_filter_id_hint;
996 	}
997 	filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
998 	filter->l2_ref_cnt = filter1->l2_ref_cnt;
999 	filter->flow_id = filter1->flow_id;
1000 	PMD_DRV_LOG(DEBUG,
1001 		"l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n",
1002 		filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt);
1003 }
1004 
1005 static int
bnxt_validate_and_parse_flow(struct rte_eth_dev * dev,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],const struct rte_flow_attr * attr,struct rte_flow_error * error,struct bnxt_filter_info * filter)1006 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
1007 			     const struct rte_flow_item pattern[],
1008 			     const struct rte_flow_action actions[],
1009 			     const struct rte_flow_attr *attr,
1010 			     struct rte_flow_error *error,
1011 			     struct bnxt_filter_info *filter)
1012 {
1013 	const struct rte_flow_action *act =
1014 		bnxt_flow_non_void_action(actions);
1015 	struct bnxt *bp = dev->data->dev_private;
1016 	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1017 	struct bnxt_vnic_info *vnic = NULL, *vnic0 = NULL;
1018 	const struct rte_flow_action_queue *act_q;
1019 	const struct rte_flow_action_vf *act_vf;
1020 	struct bnxt_filter_info *filter1 = NULL;
1021 	const struct rte_flow_action_rss *rss;
1022 	struct bnxt_rx_queue *rxq = NULL;
1023 	int dflt_vnic, vnic_id;
1024 	unsigned int rss_idx;
1025 	uint32_t vf = 0, i;
1026 	int rc, use_ntuple;
1027 
1028 	rc =
1029 	bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
1030 	if (rc != 0)
1031 		goto ret;
1032 
1033 	rc = bnxt_flow_parse_attr(attr, error);
1034 	if (rc != 0)
1035 		goto ret;
1036 
1037 	/* Since we support ingress attribute only - right now. */
1038 	if (filter->filter_type == HWRM_CFA_EM_FILTER)
1039 		filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
1040 
1041 	use_ntuple = bnxt_filter_type_check(pattern, error);
1042 
1043 start:
1044 	switch (act->type) {
1045 	case RTE_FLOW_ACTION_TYPE_QUEUE:
1046 		/* Allow this flow. Redirect to a VNIC. */
1047 		act_q = (const struct rte_flow_action_queue *)act->conf;
1048 		if (!act_q->index || act_q->index >= bp->rx_nr_rings) {
1049 			rte_flow_error_set(error,
1050 					   EINVAL,
1051 					   RTE_FLOW_ERROR_TYPE_ACTION,
1052 					   act,
1053 					   "Invalid queue ID.");
1054 			rc = -rte_errno;
1055 			goto ret;
1056 		}
1057 		PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
1058 
1059 		vnic_id = attr->group;
1060 		if (!vnic_id) {
1061 			PMD_DRV_LOG(DEBUG, "Group id is 0\n");
1062 			vnic_id = act_q->index;
1063 		}
1064 
1065 		BNXT_VALID_VNIC_OR_RET(bp, vnic_id);
1066 
1067 		vnic = &bp->vnic_info[vnic_id];
1068 		if (vnic->rx_queue_cnt) {
1069 			if (vnic->start_grp_id != act_q->index) {
1070 				PMD_DRV_LOG(ERR,
1071 					    "VNIC already in use\n");
1072 				rte_flow_error_set(error,
1073 						   EINVAL,
1074 						   RTE_FLOW_ERROR_TYPE_ACTION,
1075 						   act,
1076 						   "VNIC already in use");
1077 				rc = -rte_errno;
1078 				goto ret;
1079 			}
1080 			goto use_vnic;
1081 		}
1082 
1083 		rxq = bp->rx_queues[act_q->index];
1084 
1085 		if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
1086 		    vnic->fw_vnic_id != INVALID_HW_RING_ID)
1087 			goto use_vnic;
1088 
1089 		if (!rxq) {
1090 			PMD_DRV_LOG(ERR,
1091 				    "Queue invalid or used with other VNIC\n");
1092 			rte_flow_error_set(error,
1093 					   EINVAL,
1094 					   RTE_FLOW_ERROR_TYPE_ACTION,
1095 					   act,
1096 					   "Queue invalid queue or in use");
1097 			rc = -rte_errno;
1098 			goto ret;
1099 		}
1100 
1101 		rxq->vnic = vnic;
1102 		rxq->rx_started = 1;
1103 		vnic->rx_queue_cnt++;
1104 		vnic->start_grp_id = act_q->index;
1105 		vnic->end_grp_id = act_q->index;
1106 		vnic->func_default = 0;	//This is not a default VNIC.
1107 
1108 		PMD_DRV_LOG(DEBUG, "VNIC found\n");
1109 
1110 		rc = bnxt_vnic_prep(bp, vnic);
1111 		if (rc)  {
1112 			rte_flow_error_set(error,
1113 					   EINVAL,
1114 					   RTE_FLOW_ERROR_TYPE_ACTION,
1115 					   act,
1116 					   "VNIC prep fail");
1117 			rc = -rte_errno;
1118 			goto ret;
1119 		}
1120 
1121 		PMD_DRV_LOG(DEBUG,
1122 			    "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1123 			    act_q->index, vnic, vnic->fw_grp_ids);
1124 
1125 use_vnic:
1126 		vnic->ff_pool_idx = vnic_id;
1127 		PMD_DRV_LOG(DEBUG,
1128 			    "Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
1129 		filter->dst_id = vnic->fw_vnic_id;
1130 
1131 		/* For ntuple filter, create the L2 filter with default VNIC.
1132 		 * The user specified redirect queue will be set while creating
1133 		 * the ntuple filter in hardware.
1134 		 */
1135 		vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
1136 		if (use_ntuple)
1137 			filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1138 		else
1139 			filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1140 		if (filter1 == NULL) {
1141 			rte_flow_error_set(error,
1142 					   ENOSPC,
1143 					   RTE_FLOW_ERROR_TYPE_ACTION,
1144 					   act,
1145 					   "Filter not available");
1146 			rc = -rte_errno;
1147 			goto ret;
1148 		}
1149 
1150 		PMD_DRV_LOG(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d\n",
1151 			    filter, filter1, filter1->l2_ref_cnt);
1152 		bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1153 		break;
1154 	case RTE_FLOW_ACTION_TYPE_DROP:
1155 		vnic0 = &bp->vnic_info[0];
1156 		filter->dst_id = vnic0->fw_vnic_id;
1157 		filter->valid_flags |= BNXT_FLOW_L2_DROP_FLAG;
1158 		filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1159 		if (filter1 == NULL) {
1160 			rte_flow_error_set(error,
1161 					   ENOSPC,
1162 					   RTE_FLOW_ERROR_TYPE_ACTION,
1163 					   act,
1164 					   "Filter not available");
1165 			rc = -rte_errno;
1166 			goto ret;
1167 		}
1168 
1169 		if (filter->filter_type == HWRM_CFA_EM_FILTER)
1170 			filter->flags =
1171 				HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
1172 		else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1173 			filter->flags =
1174 				HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1175 
1176 		bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1177 		break;
1178 	case RTE_FLOW_ACTION_TYPE_COUNT:
1179 		vnic0 = &bp->vnic_info[0];
1180 		filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1181 		if (filter1 == NULL) {
1182 			rte_flow_error_set(error,
1183 					   ENOSPC,
1184 					   RTE_FLOW_ERROR_TYPE_ACTION,
1185 					   act,
1186 					   "New filter not available");
1187 			rc = -rte_errno;
1188 			goto ret;
1189 		}
1190 
1191 		filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1192 		filter->flow_id = filter1->flow_id;
1193 		filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
1194 		break;
1195 	case RTE_FLOW_ACTION_TYPE_VF:
1196 		act_vf = (const struct rte_flow_action_vf *)act->conf;
1197 		vf = act_vf->id;
1198 
1199 		if (filter->tunnel_type ==
1200 		    CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
1201 		    filter->tunnel_type ==
1202 		    CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) {
1203 			/* If issued on a VF, ensure id is 0 and is trusted */
1204 			if (BNXT_VF(bp)) {
1205 				if (!BNXT_VF_IS_TRUSTED(bp) || vf) {
1206 					rte_flow_error_set(error, EINVAL,
1207 						RTE_FLOW_ERROR_TYPE_ACTION,
1208 						act,
1209 						"Incorrect VF");
1210 					rc = -rte_errno;
1211 					goto ret;
1212 				}
1213 			}
1214 
1215 			filter->enables |= filter->tunnel_type;
1216 			filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER;
1217 			goto done;
1218 		}
1219 
1220 		if (vf >= bp->pdev->max_vfs) {
1221 			rte_flow_error_set(error,
1222 					   EINVAL,
1223 					   RTE_FLOW_ERROR_TYPE_ACTION,
1224 					   act,
1225 					   "Incorrect VF id!");
1226 			rc = -rte_errno;
1227 			goto ret;
1228 		}
1229 
1230 		filter->mirror_vnic_id =
1231 		dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
1232 		if (dflt_vnic < 0) {
1233 			/* This simply indicates there's no driver loaded.
1234 			 * This is not an error.
1235 			 */
1236 			rte_flow_error_set(error,
1237 					   EINVAL,
1238 					   RTE_FLOW_ERROR_TYPE_ACTION,
1239 					   act,
1240 					   "Unable to get default VNIC for VF");
1241 			rc = -rte_errno;
1242 			goto ret;
1243 		}
1244 
1245 		filter->mirror_vnic_id = dflt_vnic;
1246 		filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
1247 
1248 		vnic0 = &bp->vnic_info[0];
1249 		filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1250 		if (filter1 == NULL) {
1251 			rte_flow_error_set(error,
1252 					   ENOSPC,
1253 					   RTE_FLOW_ERROR_TYPE_ACTION,
1254 					   act,
1255 					   "New filter not available");
1256 			rc = -rte_errno;
1257 			goto ret;
1258 		}
1259 
1260 		filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1261 		filter->flow_id = filter1->flow_id;
1262 		break;
1263 	case RTE_FLOW_ACTION_TYPE_RSS:
1264 		rss = (const struct rte_flow_action_rss *)act->conf;
1265 
1266 		vnic_id = attr->group;
1267 
1268 		BNXT_VALID_VNIC_OR_RET(bp, vnic_id);
1269 		vnic = &bp->vnic_info[vnic_id];
1270 
1271 		/* Check if requested RSS config matches RSS config of VNIC
1272 		 * only if it is not a fresh VNIC configuration.
1273 		 * Otherwise the existing VNIC configuration can be used.
1274 		 */
1275 		if (vnic->rx_queue_cnt) {
1276 			rc = match_vnic_rss_cfg(bp, vnic, rss);
1277 			if (rc) {
1278 				PMD_DRV_LOG(ERR,
1279 					    "VNIC and RSS config mismatch\n");
1280 				rte_flow_error_set(error,
1281 						   EINVAL,
1282 						   RTE_FLOW_ERROR_TYPE_ACTION,
1283 						   act,
1284 						   "VNIC and RSS cfg mismatch");
1285 				rc = -rte_errno;
1286 				goto ret;
1287 			}
1288 			goto vnic_found;
1289 		}
1290 
1291 		for (i = 0; i < rss->queue_num; i++) {
1292 			PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n",
1293 				    rss->queue[i]);
1294 
1295 			if (!rss->queue[i] ||
1296 			    rss->queue[i] >= bp->rx_nr_rings ||
1297 			    !bp->rx_queues[rss->queue[i]]) {
1298 				rte_flow_error_set(error,
1299 						   EINVAL,
1300 						   RTE_FLOW_ERROR_TYPE_ACTION,
1301 						   act,
1302 						   "Invalid queue ID for RSS");
1303 				rc = -rte_errno;
1304 				goto ret;
1305 			}
1306 			rxq = bp->rx_queues[rss->queue[i]];
1307 
1308 			if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1309 			    INVALID_HW_RING_ID) {
1310 				PMD_DRV_LOG(ERR,
1311 					    "queue active with other VNIC\n");
1312 				rte_flow_error_set(error,
1313 						   EINVAL,
1314 						   RTE_FLOW_ERROR_TYPE_ACTION,
1315 						   act,
1316 						   "Invalid queue ID for RSS");
1317 				rc = -rte_errno;
1318 				goto ret;
1319 			}
1320 
1321 			rxq->vnic = vnic;
1322 			rxq->rx_started = 1;
1323 			vnic->rx_queue_cnt++;
1324 		}
1325 
1326 		vnic->start_grp_id = rss->queue[0];
1327 		vnic->end_grp_id = rss->queue[rss->queue_num - 1];
1328 		vnic->func_default = 0;	//This is not a default VNIC.
1329 
1330 		rc = bnxt_vnic_prep(bp, vnic);
1331 		if (rc) {
1332 			rte_flow_error_set(error,
1333 					   EINVAL,
1334 					   RTE_FLOW_ERROR_TYPE_ACTION,
1335 					   act,
1336 					   "VNIC prep fail");
1337 			rc = -rte_errno;
1338 			goto ret;
1339 		}
1340 
1341 		PMD_DRV_LOG(DEBUG,
1342 			    "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1343 			    vnic_id, vnic, vnic->fw_grp_ids);
1344 
1345 		vnic->ff_pool_idx = vnic_id;
1346 		PMD_DRV_LOG(DEBUG,
1347 			    "Setting vnic ff_pool_idx %d\n", vnic->ff_pool_idx);
1348 
1349 		/* This can be done only after vnic_grp_alloc is done. */
1350 		for (i = 0; i < vnic->rx_queue_cnt; i++) {
1351 			vnic->fw_grp_ids[i] =
1352 				bp->grp_info[rss->queue[i]].fw_grp_id;
1353 			/* Make sure vnic0 does not use these rings. */
1354 			bp->vnic_info[0].fw_grp_ids[rss->queue[i]] =
1355 				INVALID_HW_RING_ID;
1356 		}
1357 
1358 		for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ) {
1359 			for (i = 0; i < vnic->rx_queue_cnt; i++)
1360 				vnic->rss_table[rss_idx++] =
1361 					vnic->fw_grp_ids[i];
1362 		}
1363 
1364 		/* Configure RSS only if the queue count is > 1 */
1365 		if (vnic->rx_queue_cnt > 1) {
1366 			vnic->hash_type =
1367 				bnxt_rte_to_hwrm_hash_types(rss->types);
1368 			vnic->hash_mode =
1369 			bnxt_rte_to_hwrm_hash_level(bp, rss->types, rss->level);
1370 
1371 			if (!rss->key_len) {
1372 				/* If hash key has not been specified,
1373 				 * use random hash key.
1374 				 */
1375 				prandom_bytes(vnic->rss_hash_key,
1376 					      HW_HASH_KEY_SIZE);
1377 			} else {
1378 				if (rss->key_len > HW_HASH_KEY_SIZE)
1379 					memcpy(vnic->rss_hash_key,
1380 					       rss->key,
1381 					       HW_HASH_KEY_SIZE);
1382 				else
1383 					memcpy(vnic->rss_hash_key,
1384 					       rss->key,
1385 					       rss->key_len);
1386 			}
1387 			bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1388 		} else {
1389 			PMD_DRV_LOG(DEBUG, "No RSS config required\n");
1390 		}
1391 
1392 vnic_found:
1393 		filter->dst_id = vnic->fw_vnic_id;
1394 		filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1395 		if (filter1 == NULL) {
1396 			rte_flow_error_set(error,
1397 					   ENOSPC,
1398 					   RTE_FLOW_ERROR_TYPE_ACTION,
1399 					   act,
1400 					   "New filter not available");
1401 			rc = -rte_errno;
1402 			goto ret;
1403 		}
1404 
1405 		PMD_DRV_LOG(DEBUG, "L2 filter created\n");
1406 		bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1407 		break;
1408 	case RTE_FLOW_ACTION_TYPE_MARK:
1409 		if (bp->mark_table == NULL) {
1410 			rte_flow_error_set(error,
1411 					   ENOMEM,
1412 					   RTE_FLOW_ERROR_TYPE_ACTION,
1413 					   act,
1414 					   "Mark table not allocated.");
1415 			rc = -rte_errno;
1416 			goto ret;
1417 		}
1418 
1419 		if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) {
1420 			PMD_DRV_LOG(DEBUG,
1421 				    "Disabling vector processing for mark\n");
1422 			bp->eth_dev->rx_pkt_burst = bnxt_recv_pkts;
1423 			bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
1424 		}
1425 
1426 		filter->valid_flags |= BNXT_FLOW_MARK_FLAG;
1427 		filter->mark = ((const struct rte_flow_action_mark *)
1428 				act->conf)->id;
1429 		PMD_DRV_LOG(DEBUG, "Mark the flow %d\n", filter->mark);
1430 		break;
1431 	default:
1432 		rte_flow_error_set(error,
1433 				   EINVAL,
1434 				   RTE_FLOW_ERROR_TYPE_ACTION,
1435 				   act,
1436 				   "Invalid action.");
1437 		rc = -rte_errno;
1438 		goto ret;
1439 	}
1440 
1441 done:
1442 	act = bnxt_flow_non_void_action(++act);
1443 	while (act->type != RTE_FLOW_ACTION_TYPE_END)
1444 		goto start;
1445 
1446 	return rc;
1447 ret:
1448 
1449 	if (filter1) {
1450 		bnxt_hwrm_clear_l2_filter(bp, filter1);
1451 		bnxt_free_filter(bp, filter1);
1452 	}
1453 
1454 	if (rte_errno)  {
1455 		if (vnic && STAILQ_EMPTY(&vnic->filter))
1456 			vnic->rx_queue_cnt = 0;
1457 
1458 		if (rxq && !vnic->rx_queue_cnt)
1459 			rxq->vnic = &bp->vnic_info[0];
1460 	}
1461 	return -rte_errno;
1462 }
1463 
1464 static
find_matching_vnic(struct bnxt * bp,struct bnxt_filter_info * filter)1465 struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp,
1466 					  struct bnxt_filter_info *filter)
1467 {
1468 	struct bnxt_vnic_info *vnic = NULL;
1469 	unsigned int i;
1470 
1471 	for (i = 0; i < bp->max_vnics; i++) {
1472 		vnic = &bp->vnic_info[i];
1473 		if (vnic->fw_vnic_id != INVALID_VNIC_ID &&
1474 		    filter->dst_id == vnic->fw_vnic_id) {
1475 			PMD_DRV_LOG(DEBUG, "Found matching VNIC Id %d\n",
1476 				    vnic->ff_pool_idx);
1477 			return vnic;
1478 		}
1479 	}
1480 	return NULL;
1481 }
1482 
1483 static int
bnxt_flow_validate(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)1484 bnxt_flow_validate(struct rte_eth_dev *dev,
1485 		   const struct rte_flow_attr *attr,
1486 		   const struct rte_flow_item pattern[],
1487 		   const struct rte_flow_action actions[],
1488 		   struct rte_flow_error *error)
1489 {
1490 	struct bnxt *bp = dev->data->dev_private;
1491 	struct bnxt_vnic_info *vnic = NULL;
1492 	struct bnxt_filter_info *filter;
1493 	int ret = 0;
1494 
1495 	bnxt_acquire_flow_lock(bp);
1496 	ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1497 	if (ret != 0) {
1498 		bnxt_release_flow_lock(bp);
1499 		return ret;
1500 	}
1501 
1502 	filter = bnxt_get_unused_filter(bp);
1503 	if (filter == NULL) {
1504 		PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1505 		bnxt_release_flow_lock(bp);
1506 		return -ENOMEM;
1507 	}
1508 
1509 	ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1510 					   error, filter);
1511 	if (ret)
1512 		goto exit;
1513 
1514 	vnic = find_matching_vnic(bp, filter);
1515 	if (vnic) {
1516 		if (STAILQ_EMPTY(&vnic->filter)) {
1517 			rte_free(vnic->fw_grp_ids);
1518 			bnxt_hwrm_vnic_ctx_free(bp, vnic);
1519 			bnxt_hwrm_vnic_free(bp, vnic);
1520 			vnic->rx_queue_cnt = 0;
1521 			PMD_DRV_LOG(DEBUG, "Free VNIC\n");
1522 		}
1523 	}
1524 
1525 	if (filter->filter_type == HWRM_CFA_EM_FILTER)
1526 		bnxt_hwrm_clear_em_filter(bp, filter);
1527 	else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1528 		bnxt_hwrm_clear_ntuple_filter(bp, filter);
1529 	else
1530 		bnxt_hwrm_clear_l2_filter(bp, filter);
1531 
1532 exit:
1533 	/* No need to hold on to this filter if we are just validating flow */
1534 	bnxt_free_filter(bp, filter);
1535 	bnxt_release_flow_lock(bp);
1536 
1537 	return ret;
1538 }
1539 
1540 static void
bnxt_update_filter(struct bnxt * bp,struct bnxt_filter_info * old_filter,struct bnxt_filter_info * new_filter)1541 bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter,
1542 		   struct bnxt_filter_info *new_filter)
1543 {
1544 	/* Clear the new L2 filter that was created in the previous step in
1545 	 * bnxt_validate_and_parse_flow. For L2 filters, we will use the new
1546 	 * filter which points to the new destination queue and so we clear
1547 	 * the previous L2 filter. For ntuple filters, we are going to reuse
1548 	 * the old L2 filter and create new NTUPLE filter with this new
1549 	 * destination queue subsequently during bnxt_flow_create. So we
1550 	 * decrement the ref cnt of the L2 filter that would've been bumped
1551 	 * up previously in bnxt_validate_and_parse_flow as the old n-tuple
1552 	 * filter that was referencing it will be deleted now.
1553 	 */
1554 	bnxt_hwrm_clear_l2_filter(bp, old_filter);
1555 	if (new_filter->filter_type == HWRM_CFA_L2_FILTER) {
1556 		bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter);
1557 	} else {
1558 		if (new_filter->filter_type == HWRM_CFA_EM_FILTER)
1559 			bnxt_hwrm_clear_em_filter(bp, old_filter);
1560 		if (new_filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1561 			bnxt_hwrm_clear_ntuple_filter(bp, old_filter);
1562 	}
1563 }
1564 
1565 static int
bnxt_match_filter(struct bnxt * bp,struct bnxt_filter_info * nf)1566 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1567 {
1568 	struct bnxt_filter_info *mf;
1569 	struct rte_flow *flow;
1570 	int i;
1571 
1572 	for (i = bp->max_vnics - 1; i >= 0; i--) {
1573 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1574 
1575 		if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1576 			continue;
1577 
1578 		STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1579 			mf = flow->filter;
1580 
1581 			if (mf->filter_type == nf->filter_type &&
1582 			    mf->flags == nf->flags &&
1583 			    mf->src_port == nf->src_port &&
1584 			    mf->src_port_mask == nf->src_port_mask &&
1585 			    mf->dst_port == nf->dst_port &&
1586 			    mf->dst_port_mask == nf->dst_port_mask &&
1587 			    mf->ip_protocol == nf->ip_protocol &&
1588 			    mf->ip_addr_type == nf->ip_addr_type &&
1589 			    mf->ethertype == nf->ethertype &&
1590 			    mf->vni == nf->vni &&
1591 			    mf->tunnel_type == nf->tunnel_type &&
1592 			    mf->l2_ovlan == nf->l2_ovlan &&
1593 			    mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1594 			    mf->l2_ivlan == nf->l2_ivlan &&
1595 			    mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1596 			    !memcmp(mf->l2_addr, nf->l2_addr,
1597 				    RTE_ETHER_ADDR_LEN) &&
1598 			    !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1599 				    RTE_ETHER_ADDR_LEN) &&
1600 			    !memcmp(mf->src_macaddr, nf->src_macaddr,
1601 				    RTE_ETHER_ADDR_LEN) &&
1602 			    !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1603 				    RTE_ETHER_ADDR_LEN) &&
1604 			    !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1605 				    sizeof(nf->src_ipaddr)) &&
1606 			    !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1607 				    sizeof(nf->src_ipaddr_mask)) &&
1608 			    !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1609 				    sizeof(nf->dst_ipaddr)) &&
1610 			    !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1611 				    sizeof(nf->dst_ipaddr_mask))) {
1612 				if (mf->dst_id == nf->dst_id)
1613 					return -EEXIST;
1614 				/* Free the old filter, update flow
1615 				 * with new filter
1616 				 */
1617 				bnxt_update_filter(bp, mf, nf);
1618 				STAILQ_REMOVE(&vnic->filter, mf,
1619 					      bnxt_filter_info, next);
1620 				STAILQ_INSERT_TAIL(&vnic->filter, nf, next);
1621 				bnxt_free_filter(bp, mf);
1622 				flow->filter = nf;
1623 				return -EXDEV;
1624 			}
1625 		}
1626 	}
1627 	return 0;
1628 }
1629 
1630 static void
bnxt_setup_flow_counter(struct bnxt * bp)1631 bnxt_setup_flow_counter(struct bnxt *bp)
1632 {
1633 	if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
1634 	    !(bp->flags & BNXT_FLAG_FC_THREAD) && BNXT_FLOW_XSTATS_EN(bp)) {
1635 		rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER,
1636 				  bnxt_flow_cnt_alarm_cb,
1637 				  (void *)bp);
1638 		bp->flags |= BNXT_FLAG_FC_THREAD;
1639 	}
1640 }
1641 
bnxt_flow_cnt_alarm_cb(void * arg)1642 void bnxt_flow_cnt_alarm_cb(void *arg)
1643 {
1644 	int rc = 0;
1645 	struct bnxt *bp = arg;
1646 
1647 	if (!bp->flow_stat->rx_fc_out_tbl.va) {
1648 		PMD_DRV_LOG(ERR, "bp->flow_stat->rx_fc_out_tbl.va is NULL?\n");
1649 		bnxt_cancel_fc_thread(bp);
1650 		return;
1651 	}
1652 
1653 	if (!bp->flow_stat->flow_count) {
1654 		bnxt_cancel_fc_thread(bp);
1655 		return;
1656 	}
1657 
1658 	if (!bp->eth_dev->data->dev_started) {
1659 		bnxt_cancel_fc_thread(bp);
1660 		return;
1661 	}
1662 
1663 	rc = bnxt_flow_stats_req(bp);
1664 	if (rc) {
1665 		PMD_DRV_LOG(ERR, "Flow stat alarm not rescheduled.\n");
1666 		return;
1667 	}
1668 
1669 	rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER,
1670 			  bnxt_flow_cnt_alarm_cb,
1671 			  (void *)bp);
1672 }
1673 
1674 
1675 static struct rte_flow *
bnxt_flow_create(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)1676 bnxt_flow_create(struct rte_eth_dev *dev,
1677 		 const struct rte_flow_attr *attr,
1678 		 const struct rte_flow_item pattern[],
1679 		 const struct rte_flow_action actions[],
1680 		 struct rte_flow_error *error)
1681 {
1682 	struct bnxt *bp = dev->data->dev_private;
1683 	struct bnxt_vnic_info *vnic = NULL;
1684 	struct bnxt_filter_info *filter;
1685 	bool update_flow = false;
1686 	struct rte_flow *flow;
1687 	int ret = 0;
1688 	uint32_t tun_type, flow_id;
1689 
1690 	if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1691 		rte_flow_error_set(error, EINVAL,
1692 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1693 				   "Failed to create flow, Not a Trusted VF!");
1694 		return NULL;
1695 	}
1696 
1697 	if (!dev->data->dev_started) {
1698 		rte_flow_error_set(error,
1699 				   EINVAL,
1700 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1701 				   NULL,
1702 				   "Device must be started");
1703 		return NULL;
1704 	}
1705 
1706 	flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1707 	if (!flow) {
1708 		rte_flow_error_set(error, ENOMEM,
1709 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1710 				   "Failed to allocate memory");
1711 		return flow;
1712 	}
1713 
1714 	bnxt_acquire_flow_lock(bp);
1715 	ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1716 	if (ret != 0) {
1717 		PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1718 		goto free_flow;
1719 	}
1720 
1721 	filter = bnxt_get_unused_filter(bp);
1722 	if (filter == NULL) {
1723 		rte_flow_error_set(error, ENOSPC,
1724 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1725 				   "Not enough resources for a new flow");
1726 		goto free_flow;
1727 	}
1728 
1729 	ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1730 					   error, filter);
1731 	if (ret != 0)
1732 		goto free_filter;
1733 
1734 	ret = bnxt_match_filter(bp, filter);
1735 	if (ret == -EEXIST) {
1736 		PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1737 		/* Clear the filter that was created as part of
1738 		 * validate_and_parse_flow() above
1739 		 */
1740 		bnxt_hwrm_clear_l2_filter(bp, filter);
1741 		goto free_filter;
1742 	} else if (ret == -EXDEV) {
1743 		PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
1744 		PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1745 		update_flow = true;
1746 	}
1747 
1748 	/* If tunnel redirection to a VF/PF is specified then only tunnel_type
1749 	 * is set and enable is set to the tunnel type. Issue hwrm cmd directly
1750 	 * in such a case.
1751 	 */
1752 	if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1753 	    filter->enables == filter->tunnel_type) {
1754 		ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1755 		if (ret) {
1756 			rte_flow_error_set(error, -ret,
1757 					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1758 					   "Unable to query tunnel to VF");
1759 			goto free_filter;
1760 		}
1761 		if (tun_type == (1U << filter->tunnel_type)) {
1762 			ret =
1763 			bnxt_hwrm_tunnel_redirect_free(bp,
1764 						       filter->tunnel_type);
1765 			if (ret) {
1766 				PMD_DRV_LOG(ERR,
1767 					    "Unable to free existing tunnel\n");
1768 				rte_flow_error_set(error, -ret,
1769 						   RTE_FLOW_ERROR_TYPE_HANDLE,
1770 						   NULL,
1771 						   "Unable to free preexisting "
1772 						   "tunnel on VF");
1773 				goto free_filter;
1774 			}
1775 		}
1776 		ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type);
1777 		if (ret) {
1778 			rte_flow_error_set(error, -ret,
1779 					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1780 					   "Unable to redirect tunnel to VF");
1781 			goto free_filter;
1782 		}
1783 		vnic = &bp->vnic_info[0];
1784 		goto done;
1785 	}
1786 
1787 	if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1788 		filter->enables |=
1789 			HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1790 		ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1791 		if (ret != 0) {
1792 			rte_flow_error_set(error, -ret,
1793 					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1794 					   "Failed to create EM filter");
1795 			goto free_filter;
1796 		}
1797 	}
1798 
1799 	if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1800 		filter->enables |=
1801 			HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1802 		ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1803 		if (ret != 0) {
1804 			rte_flow_error_set(error, -ret,
1805 					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1806 					   "Failed to create ntuple filter");
1807 			goto free_filter;
1808 		}
1809 	}
1810 
1811 	vnic = find_matching_vnic(bp, filter);
1812 done:
1813 	if (!ret || update_flow) {
1814 		flow->filter = filter;
1815 		flow->vnic = vnic;
1816 		if (update_flow) {
1817 			ret = -EXDEV;
1818 			goto free_flow;
1819 		}
1820 
1821 		if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
1822 			PMD_DRV_LOG(DEBUG,
1823 				    "Mark action: mark id 0x%x, flow id 0x%x\n",
1824 				    filter->mark, filter->flow_id);
1825 
1826 			/* TCAM and EM should be 16-bit only.
1827 			 * Other modes not supported.
1828 			 */
1829 			flow_id = filter->flow_id & BNXT_FLOW_ID_MASK;
1830 			if (bp->mark_table[flow_id].valid) {
1831 				rte_flow_error_set(error, EEXIST,
1832 						   RTE_FLOW_ERROR_TYPE_HANDLE,
1833 						   NULL,
1834 						   "Flow with mark id exists");
1835 				bnxt_clear_one_vnic_filter(bp, filter);
1836 				goto free_filter;
1837 			}
1838 			bp->mark_table[flow_id].valid = true;
1839 			bp->mark_table[flow_id].mark_id = filter->mark;
1840 		}
1841 
1842 		STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1843 		STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1844 
1845 		if (BNXT_FLOW_XSTATS_EN(bp))
1846 			bp->flow_stat->flow_count++;
1847 		bnxt_release_flow_lock(bp);
1848 		bnxt_setup_flow_counter(bp);
1849 		PMD_DRV_LOG(DEBUG, "Successfully created flow.\n");
1850 		return flow;
1851 	}
1852 
1853 free_filter:
1854 	bnxt_free_filter(bp, filter);
1855 free_flow:
1856 	if (ret == -EEXIST)
1857 		rte_flow_error_set(error, ret,
1858 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1859 				   "Matching Flow exists.");
1860 	else if (ret == -EXDEV)
1861 		rte_flow_error_set(error, 0,
1862 				   RTE_FLOW_ERROR_TYPE_NONE, NULL,
1863 				   "Flow with pattern exists, updating destination queue");
1864 	else if (!rte_errno)
1865 		rte_flow_error_set(error, -ret,
1866 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1867 				   "Failed to create flow.");
1868 	rte_free(flow);
1869 	flow = NULL;
1870 	bnxt_release_flow_lock(bp);
1871 	return flow;
1872 }
1873 
bnxt_handle_tunnel_redirect_destroy(struct bnxt * bp,struct bnxt_filter_info * filter,struct rte_flow_error * error)1874 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
1875 					       struct bnxt_filter_info *filter,
1876 					       struct rte_flow_error *error)
1877 {
1878 	uint16_t tun_dst_fid;
1879 	uint32_t tun_type;
1880 	int ret = 0;
1881 
1882 	ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1883 	if (ret) {
1884 		rte_flow_error_set(error, -ret,
1885 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1886 				   "Unable to query tunnel to VF");
1887 		return ret;
1888 	}
1889 	if (tun_type == (1U << filter->tunnel_type)) {
1890 		ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type,
1891 						     &tun_dst_fid);
1892 		if (ret) {
1893 			rte_flow_error_set(error, -ret,
1894 					   RTE_FLOW_ERROR_TYPE_HANDLE,
1895 					   NULL,
1896 					   "tunnel_redirect info cmd fail");
1897 			return ret;
1898 		}
1899 		PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n",
1900 			    tun_dst_fid + bp->first_vf_id, bp->fw_fid);
1901 
1902 		/* Tunnel doesn't belong to this VF, so don't send HWRM
1903 		 * cmd, just delete the flow from driver
1904 		 */
1905 		if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id))
1906 			PMD_DRV_LOG(ERR,
1907 				    "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
1908 		else
1909 			ret = bnxt_hwrm_tunnel_redirect_free(bp,
1910 							filter->tunnel_type);
1911 	}
1912 	return ret;
1913 }
1914 
1915 static int
_bnxt_flow_destroy(struct bnxt * bp,struct rte_flow * flow,struct rte_flow_error * error)1916 _bnxt_flow_destroy(struct bnxt *bp,
1917 		   struct rte_flow *flow,
1918 		    struct rte_flow_error *error)
1919 {
1920 	struct bnxt_filter_info *filter;
1921 	struct bnxt_vnic_info *vnic;
1922 	int ret = 0;
1923 	uint32_t flow_id;
1924 
1925 	filter = flow->filter;
1926 	vnic = flow->vnic;
1927 
1928 	if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1929 	    filter->enables == filter->tunnel_type) {
1930 		ret = bnxt_handle_tunnel_redirect_destroy(bp, filter, error);
1931 		if (!ret)
1932 			goto done;
1933 		else
1934 			return ret;
1935 	}
1936 
1937 	ret = bnxt_match_filter(bp, filter);
1938 	if (ret == 0)
1939 		PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1940 
1941 	if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
1942 		flow_id = filter->flow_id & BNXT_FLOW_ID_MASK;
1943 		memset(&bp->mark_table[flow_id], 0,
1944 		       sizeof(bp->mark_table[flow_id]));
1945 		filter->flow_id = 0;
1946 	}
1947 
1948 	ret = bnxt_clear_one_vnic_filter(bp, filter);
1949 
1950 done:
1951 	if (!ret) {
1952 		/* If it is a L2 drop filter, when the filter is created,
1953 		 * the FW updates the BC/MC records.
1954 		 * Once this filter is removed, issue the set_rx_mask command
1955 		 * to reset the BC/MC records in the HW to the settings
1956 		 * before the drop counter is created.
1957 		 */
1958 		if (filter->valid_flags & BNXT_FLOW_L2_DROP_FLAG)
1959 			bnxt_set_rx_mask_no_vlan(bp, &bp->vnic_info[0]);
1960 
1961 		STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
1962 		bnxt_free_filter(bp, filter);
1963 		STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1964 		rte_free(flow);
1965 		if (BNXT_FLOW_XSTATS_EN(bp))
1966 			bp->flow_stat->flow_count--;
1967 
1968 		/* If this was the last flow associated with this vnic,
1969 		 * switch the queue back to RSS pool.
1970 		 */
1971 		if (vnic && !vnic->func_default &&
1972 		    STAILQ_EMPTY(&vnic->flow_list)) {
1973 			rte_free(vnic->fw_grp_ids);
1974 			if (vnic->rx_queue_cnt > 1)
1975 				bnxt_hwrm_vnic_ctx_free(bp, vnic);
1976 
1977 			bnxt_hwrm_vnic_free(bp, vnic);
1978 			vnic->rx_queue_cnt = 0;
1979 		}
1980 	} else {
1981 		rte_flow_error_set(error, -ret,
1982 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1983 				   "Failed to destroy flow.");
1984 	}
1985 
1986 	return ret;
1987 }
1988 
1989 static int
bnxt_flow_destroy(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_error * error)1990 bnxt_flow_destroy(struct rte_eth_dev *dev,
1991 		  struct rte_flow *flow,
1992 		  struct rte_flow_error *error)
1993 {
1994 	struct bnxt *bp = dev->data->dev_private;
1995 	int ret = 0;
1996 
1997 	bnxt_acquire_flow_lock(bp);
1998 	if (!flow) {
1999 		rte_flow_error_set(error, EINVAL,
2000 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2001 				   "Invalid flow: failed to destroy flow.");
2002 		bnxt_release_flow_lock(bp);
2003 		return -EINVAL;
2004 	}
2005 
2006 	if (!flow->filter) {
2007 		rte_flow_error_set(error, EINVAL,
2008 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2009 				   "Invalid flow: failed to destroy flow.");
2010 		bnxt_release_flow_lock(bp);
2011 		return -EINVAL;
2012 	}
2013 	ret = _bnxt_flow_destroy(bp, flow, error);
2014 	bnxt_release_flow_lock(bp);
2015 
2016 	return ret;
2017 }
2018 
bnxt_cancel_fc_thread(struct bnxt * bp)2019 void bnxt_cancel_fc_thread(struct bnxt *bp)
2020 {
2021 	bp->flags &= ~BNXT_FLAG_FC_THREAD;
2022 	rte_eal_alarm_cancel(bnxt_flow_cnt_alarm_cb, (void *)bp);
2023 }
2024 
2025 static int
bnxt_flow_flush(struct rte_eth_dev * dev,struct rte_flow_error * error)2026 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
2027 {
2028 	struct bnxt *bp = dev->data->dev_private;
2029 	struct bnxt_vnic_info *vnic;
2030 	struct rte_flow *flow;
2031 	unsigned int i;
2032 	int ret = 0;
2033 
2034 	bnxt_acquire_flow_lock(bp);
2035 	for (i = 0; i < bp->max_vnics; i++) {
2036 		vnic = &bp->vnic_info[i];
2037 		if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID)
2038 			continue;
2039 
2040 		while (!STAILQ_EMPTY(&vnic->flow_list)) {
2041 			flow = STAILQ_FIRST(&vnic->flow_list);
2042 
2043 			if (!flow->filter)
2044 				continue;
2045 
2046 			ret = _bnxt_flow_destroy(bp, flow, error);
2047 			if (ret)
2048 				break;
2049 		}
2050 	}
2051 
2052 	bnxt_cancel_fc_thread(bp);
2053 	bnxt_release_flow_lock(bp);
2054 
2055 	return ret;
2056 }
2057 
2058 const struct rte_flow_ops bnxt_flow_ops = {
2059 	.validate = bnxt_flow_validate,
2060 	.create = bnxt_flow_create,
2061 	.destroy = bnxt_flow_destroy,
2062 	.flush = bnxt_flow_flush,
2063 };
2064