xref: /dpdk/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c (revision 0adfa2b6)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include "bnxt.h"
7 #include "bnxt_tf_common.h"
8 #include "ulp_rte_parser.h"
9 #include "ulp_matcher.h"
10 #include "ulp_flow_db.h"
11 #include "ulp_mapper.h"
12 #include "ulp_fc_mgr.h"
13 #include "ulp_port_db.h"
14 #include "ulp_ha_mgr.h"
15 #include "ulp_tun.h"
16 #include <rte_malloc.h>
17 
18 static int32_t
bnxt_ulp_flow_validate_args(const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)19 bnxt_ulp_flow_validate_args(const struct rte_flow_attr *attr,
20 			    const struct rte_flow_item pattern[],
21 			    const struct rte_flow_action actions[],
22 			    struct rte_flow_error *error)
23 {
24 	/* Perform the validation of the arguments for null */
25 	if (!error)
26 		return BNXT_TF_RC_ERROR;
27 
28 	if (!pattern) {
29 		rte_flow_error_set(error,
30 				   EINVAL,
31 				   RTE_FLOW_ERROR_TYPE_ITEM_NUM,
32 				   NULL,
33 				   "NULL pattern.");
34 		return BNXT_TF_RC_ERROR;
35 	}
36 
37 	if (!actions) {
38 		rte_flow_error_set(error,
39 				   EINVAL,
40 				   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
41 				   NULL,
42 				   "NULL action.");
43 		return BNXT_TF_RC_ERROR;
44 	}
45 
46 	if (!attr) {
47 		rte_flow_error_set(error,
48 				   EINVAL,
49 				   RTE_FLOW_ERROR_TYPE_ATTR,
50 				   NULL,
51 				   "NULL attribute.");
52 		return BNXT_TF_RC_ERROR;
53 	}
54 
55 	if (attr->egress && attr->ingress) {
56 		rte_flow_error_set(error,
57 				   EINVAL,
58 				   RTE_FLOW_ERROR_TYPE_ATTR,
59 				   attr,
60 				   "EGRESS AND INGRESS UNSUPPORTED");
61 		return BNXT_TF_RC_ERROR;
62 	}
63 	return BNXT_TF_RC_SUCCESS;
64 }
65 
66 static inline void
bnxt_ulp_set_dir_attributes(struct ulp_rte_parser_params * params,const struct rte_flow_attr * attr)67 bnxt_ulp_set_dir_attributes(struct ulp_rte_parser_params *params,
68 			    const struct rte_flow_attr *attr)
69 {
70 	/* Set the flow attributes */
71 	if (attr->egress)
72 		params->dir_attr |= BNXT_ULP_FLOW_ATTR_EGRESS;
73 	if (attr->ingress)
74 		params->dir_attr |= BNXT_ULP_FLOW_ATTR_INGRESS;
75 #if RTE_VERSION_NUM(17, 11, 10, 16) < RTE_VERSION
76 	if (attr->transfer)
77 		params->dir_attr |= BNXT_ULP_FLOW_ATTR_TRANSFER;
78 #endif
79 }
80 
81 void
bnxt_ulp_init_mapper_params(struct bnxt_ulp_mapper_create_parms * mapper_cparms,struct ulp_rte_parser_params * params,enum bnxt_ulp_fdb_type flow_type)82 bnxt_ulp_init_mapper_params(struct bnxt_ulp_mapper_create_parms *mapper_cparms,
83 			    struct ulp_rte_parser_params *params,
84 			    enum bnxt_ulp_fdb_type flow_type)
85 {
86 	uint32_t ulp_flags = 0;
87 
88 	memset(mapper_cparms, 0, sizeof(*mapper_cparms));
89 	mapper_cparms->flow_type = flow_type;
90 	mapper_cparms->app_priority = params->priority;
91 	mapper_cparms->dir_attr = params->dir_attr;
92 	mapper_cparms->class_tid = params->class_id;
93 	mapper_cparms->act_tid = params->act_tmpl;
94 	mapper_cparms->func_id = params->func_id;
95 	mapper_cparms->hdr_bitmap = &params->hdr_bitmap;
96 	mapper_cparms->enc_hdr_bitmap = &params->enc_hdr_bitmap;
97 	mapper_cparms->hdr_field = params->hdr_field;
98 	mapper_cparms->enc_field = params->enc_field;
99 	mapper_cparms->comp_fld = params->comp_fld;
100 	mapper_cparms->act = &params->act_bitmap;
101 	mapper_cparms->act_prop = &params->act_prop;
102 	mapper_cparms->flow_id = params->fid;
103 	mapper_cparms->parent_flow = params->parent_flow;
104 	mapper_cparms->child_flow = params->child_flow;
105 	mapper_cparms->fld_bitmap = &params->fld_bitmap;
106 	mapper_cparms->flow_pattern_id = params->flow_pattern_id;
107 	mapper_cparms->act_pattern_id = params->act_pattern_id;
108 	mapper_cparms->app_id = params->app_id;
109 	mapper_cparms->port_id = params->port_id;
110 	mapper_cparms->tun_idx = params->tun_idx;
111 
112 	/* update the signature fields into the computed field list */
113 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_HDR_SIG_ID,
114 			    params->hdr_sig_id);
115 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FLOW_SIG_ID,
116 			    params->flow_sig_id);
117 
118 	if (bnxt_ulp_cntxt_ptr2_ulp_flags_get(params->ulp_ctx, &ulp_flags))
119 		return;
120 
121 	/* update the WC Priority flag */
122 	if (ULP_HIGH_AVAIL_IS_ENABLED(ulp_flags)) {
123 		enum ulp_ha_mgr_region region = ULP_HA_REGION_LOW;
124 		int32_t rc;
125 
126 		rc = ulp_ha_mgr_region_get(params->ulp_ctx, &region);
127 		if (rc)
128 			BNXT_TF_DBG(ERR, "Unable to get WC region\n");
129 		if (region == ULP_HA_REGION_HI)
130 			ULP_COMP_FLD_IDX_WR(params,
131 					    BNXT_ULP_CF_IDX_WC_IS_HA_HIGH_REG,
132 					    1);
133 	}
134 
135 	/* Update the socket direct flag */
136 	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
137 			     BNXT_ULP_HDR_BIT_SVIF_IGNORE)) {
138 		uint32_t ifindex;
139 		uint16_t vport;
140 
141 		/* Get the port db ifindex */
142 		if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
143 						      params->port_id,
144 						      &ifindex)) {
145 			BNXT_TF_DBG(ERR, "Invalid port id %u\n",
146 				    params->port_id);
147 			return;
148 		}
149 		/* Update the phy port of the other interface */
150 		if (ulp_port_db_vport_get(params->ulp_ctx, ifindex, &vport)) {
151 			BNXT_TF_DBG(ERR, "Invalid port if index %u\n", ifindex);
152 			return;
153 		}
154 		ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SOCKET_DIRECT_VPORT,
155 				    (vport == 1) ? 2 : 1);
156 	}
157 }
158 
159 /* Function to create the rte flow. */
160 static struct rte_flow *
bnxt_ulp_flow_create(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)161 bnxt_ulp_flow_create(struct rte_eth_dev *dev,
162 		     const struct rte_flow_attr *attr,
163 		     const struct rte_flow_item pattern[],
164 		     const struct rte_flow_action actions[],
165 		     struct rte_flow_error *error)
166 {
167 	struct bnxt_ulp_mapper_create_parms mapper_cparms = { 0 };
168 	struct ulp_rte_parser_params params;
169 	struct bnxt_ulp_context *ulp_ctx;
170 	int rc, ret = BNXT_TF_RC_ERROR;
171 	struct rte_flow *flow_id;
172 	uint16_t func_id;
173 	uint32_t fid;
174 
175 	if (bnxt_ulp_flow_validate_args(attr,
176 					pattern, actions,
177 					error) == BNXT_TF_RC_ERROR) {
178 		BNXT_TF_DBG(ERR, "Invalid arguments being passed\n");
179 		goto flow_error;
180 	}
181 
182 	ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev);
183 	if (!ulp_ctx) {
184 		BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
185 		goto flow_error;
186 	}
187 
188 	/* Initialize the parser params */
189 	memset(&params, 0, sizeof(struct ulp_rte_parser_params));
190 	params.ulp_ctx = ulp_ctx;
191 
192 	if (bnxt_ulp_cntxt_app_id_get(params.ulp_ctx, &params.app_id)) {
193 		BNXT_TF_DBG(ERR, "failed to get the app id\n");
194 		goto flow_error;
195 	}
196 
197 	/* Set the flow attributes */
198 	bnxt_ulp_set_dir_attributes(&params, attr);
199 
200 	/* copy the device port id and direction for further processing */
201 	ULP_COMP_FLD_IDX_WR(&params, BNXT_ULP_CF_IDX_INCOMING_IF,
202 			    dev->data->port_id);
203 	ULP_COMP_FLD_IDX_WR(&params, BNXT_ULP_CF_IDX_DEV_PORT_ID,
204 			    dev->data->port_id);
205 	ULP_COMP_FLD_IDX_WR(&params, BNXT_ULP_CF_IDX_SVIF_FLAG,
206 			    BNXT_ULP_INVALID_SVIF_VAL);
207 
208 	/* Get the function id */
209 	if (ulp_port_db_port_func_id_get(ulp_ctx,
210 					 dev->data->port_id,
211 					 &func_id)) {
212 		BNXT_TF_DBG(ERR, "conversion of port to func id failed\n");
213 		goto flow_error;
214 	}
215 
216 	/* Protect flow creation */
217 	if (bnxt_ulp_cntxt_acquire_fdb_lock(ulp_ctx)) {
218 		BNXT_TF_DBG(ERR, "Flow db lock acquire failed\n");
219 		goto flow_error;
220 	}
221 
222 	/* Allocate a Flow ID for attaching all resources for the flow to.
223 	 * Once allocated, all errors have to walk the list of resources and
224 	 * free each of them.
225 	 */
226 	rc = ulp_flow_db_fid_alloc(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR,
227 				   func_id, &fid);
228 	if (rc) {
229 		BNXT_TF_DBG(ERR, "Unable to allocate flow table entry\n");
230 		goto release_lock;
231 	}
232 
233 	/* Parse the rte flow pattern */
234 	ret = bnxt_ulp_rte_parser_hdr_parse(pattern, &params);
235 	if (ret != BNXT_TF_RC_SUCCESS)
236 		goto free_fid;
237 
238 	/* Parse the rte flow action */
239 	ret = bnxt_ulp_rte_parser_act_parse(actions, &params);
240 	if (ret != BNXT_TF_RC_SUCCESS)
241 		goto free_fid;
242 
243 	params.fid = fid;
244 	params.func_id = func_id;
245 	params.priority = attr->priority;
246 	params.port_id = dev->data->port_id;
247 
248 	/* Perform the rte flow post process */
249 	bnxt_ulp_rte_parser_post_process(&params);
250 
251 	/* do the tunnel offload process if any */
252 	ret = ulp_tunnel_offload_process(&params);
253 	if (ret == BNXT_TF_RC_ERROR)
254 		goto free_fid;
255 
256 	ret = ulp_matcher_pattern_match(&params, &params.class_id);
257 	if (ret != BNXT_TF_RC_SUCCESS)
258 		goto free_fid;
259 
260 	ret = ulp_matcher_action_match(&params, &params.act_tmpl);
261 	if (ret != BNXT_TF_RC_SUCCESS)
262 		goto free_fid;
263 
264 	bnxt_ulp_init_mapper_params(&mapper_cparms, &params,
265 				    BNXT_ULP_FDB_TYPE_REGULAR);
266 	/* Call the ulp mapper to create the flow in the hardware. */
267 	ret = ulp_mapper_flow_create(ulp_ctx, &mapper_cparms);
268 	if (ret)
269 		goto free_fid;
270 
271 	bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx);
272 
273 	flow_id = (struct rte_flow *)((uintptr_t)fid);
274 	return flow_id;
275 
276 free_fid:
277 	ulp_flow_db_fid_free(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, fid);
278 release_lock:
279 	bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx);
280 flow_error:
281 	rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
282 			   "Failed to create flow.");
283 	return NULL;
284 }
285 
286 /* Function to validate the rte flow. */
287 static int
bnxt_ulp_flow_validate(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)288 bnxt_ulp_flow_validate(struct rte_eth_dev *dev,
289 		       const struct rte_flow_attr *attr,
290 		       const struct rte_flow_item pattern[],
291 		       const struct rte_flow_action actions[],
292 		       struct rte_flow_error *error)
293 {
294 	struct ulp_rte_parser_params params;
295 	struct bnxt_ulp_context *ulp_ctx;
296 	uint32_t class_id, act_tmpl;
297 	int ret = BNXT_TF_RC_ERROR;
298 
299 	if (bnxt_ulp_flow_validate_args(attr,
300 					pattern, actions,
301 					error) == BNXT_TF_RC_ERROR) {
302 		BNXT_TF_DBG(ERR, "Invalid arguments being passed\n");
303 		goto parse_error;
304 	}
305 
306 	ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev);
307 	if (!ulp_ctx) {
308 		BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
309 		goto parse_error;
310 	}
311 
312 	/* Initialize the parser params */
313 	memset(&params, 0, sizeof(struct ulp_rte_parser_params));
314 	params.ulp_ctx = ulp_ctx;
315 
316 	if (bnxt_ulp_cntxt_app_id_get(params.ulp_ctx, &params.app_id)) {
317 		BNXT_TF_DBG(ERR, "failed to get the app id\n");
318 		goto parse_error;
319 	}
320 
321 	/* Set the flow attributes */
322 	bnxt_ulp_set_dir_attributes(&params, attr);
323 
324 	/* Parse the rte flow pattern */
325 	ret = bnxt_ulp_rte_parser_hdr_parse(pattern, &params);
326 	if (ret != BNXT_TF_RC_SUCCESS)
327 		goto parse_error;
328 
329 	/* Parse the rte flow action */
330 	ret = bnxt_ulp_rte_parser_act_parse(actions, &params);
331 	if (ret != BNXT_TF_RC_SUCCESS)
332 		goto parse_error;
333 
334 	/* Perform the rte flow post process */
335 	bnxt_ulp_rte_parser_post_process(&params);
336 
337 	/* do the tunnel offload process if any */
338 	ret = ulp_tunnel_offload_process(&params);
339 	if (ret == BNXT_TF_RC_ERROR)
340 		goto parse_error;
341 
342 	ret = ulp_matcher_pattern_match(&params, &class_id);
343 
344 	if (ret != BNXT_TF_RC_SUCCESS)
345 		goto parse_error;
346 
347 	ret = ulp_matcher_action_match(&params, &act_tmpl);
348 	if (ret != BNXT_TF_RC_SUCCESS)
349 		goto parse_error;
350 
351 	/* all good return success */
352 	return ret;
353 
354 parse_error:
355 	rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
356 			   "Failed to validate flow.");
357 	return -EINVAL;
358 }
359 
360 /* Function to destroy the rte flow. */
361 int
bnxt_ulp_flow_destroy(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_error * error)362 bnxt_ulp_flow_destroy(struct rte_eth_dev *dev,
363 		      struct rte_flow *flow,
364 		      struct rte_flow_error *error)
365 {
366 	struct bnxt_ulp_context *ulp_ctx;
367 	uint32_t flow_id;
368 	uint16_t func_id;
369 	int ret;
370 
371 	ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(dev);
372 	if (!ulp_ctx) {
373 		BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
374 		if (error)
375 			rte_flow_error_set(error, EINVAL,
376 					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
377 					   "Failed to destroy flow.");
378 		return -EINVAL;
379 	}
380 
381 	flow_id = (uint32_t)(uintptr_t)flow;
382 
383 	if (ulp_port_db_port_func_id_get(ulp_ctx,
384 					 dev->data->port_id,
385 					 &func_id)) {
386 		BNXT_TF_DBG(ERR, "conversion of port to func id failed\n");
387 		if (error)
388 			rte_flow_error_set(error, EINVAL,
389 					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
390 					   "Failed to destroy flow.");
391 		return -EINVAL;
392 	}
393 
394 	if (ulp_flow_db_validate_flow_func(ulp_ctx, flow_id, func_id) ==
395 	    false) {
396 		BNXT_TF_DBG(ERR, "Incorrect device params\n");
397 		if (error)
398 			rte_flow_error_set(error, EINVAL,
399 					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
400 					   "Failed to destroy flow.");
401 		return -EINVAL;
402 	}
403 
404 	if (bnxt_ulp_cntxt_acquire_fdb_lock(ulp_ctx)) {
405 		BNXT_TF_DBG(ERR, "Flow db lock acquire failed\n");
406 		return -EINVAL;
407 	}
408 	ret = ulp_mapper_flow_destroy(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR,
409 				      flow_id);
410 	if (ret) {
411 		BNXT_TF_DBG(ERR, "Failed to destroy flow.\n");
412 		if (error)
413 			rte_flow_error_set(error, -ret,
414 					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
415 					   "Failed to destroy flow.");
416 	}
417 	bnxt_ulp_cntxt_release_fdb_lock(ulp_ctx);
418 
419 	return ret;
420 }
421 
422 /* Function to destroy the rte flows. */
423 static int32_t
bnxt_ulp_flow_flush(struct rte_eth_dev * eth_dev,struct rte_flow_error * error)424 bnxt_ulp_flow_flush(struct rte_eth_dev *eth_dev,
425 		    struct rte_flow_error *error)
426 {
427 	struct bnxt_ulp_context *ulp_ctx;
428 	int32_t ret = 0;
429 	uint16_t func_id;
430 
431 	ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
432 	if (!ulp_ctx) {
433 		return ret;
434 	}
435 
436 	/* Free the resources for the last device */
437 	if (ulp_ctx_deinit_allowed(ulp_ctx)) {
438 		ret = ulp_flow_db_session_flow_flush(ulp_ctx);
439 	} else if (bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctx)) {
440 		ret = ulp_port_db_port_func_id_get(ulp_ctx,
441 						   eth_dev->data->port_id,
442 						   &func_id);
443 		if (!ret)
444 			ret = ulp_flow_db_function_flow_flush(ulp_ctx, func_id);
445 		else
446 			BNXT_TF_DBG(ERR, "convert port to func id failed\n");
447 	}
448 	if (ret)
449 		rte_flow_error_set(error, ret,
450 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
451 				   "Failed to flush flow.");
452 	return ret;
453 }
454 
455 /* Function to query the rte flows. */
456 static int32_t
bnxt_ulp_flow_query(struct rte_eth_dev * eth_dev,struct rte_flow * flow,const struct rte_flow_action * action,void * data,struct rte_flow_error * error)457 bnxt_ulp_flow_query(struct rte_eth_dev *eth_dev,
458 		    struct rte_flow *flow,
459 		    const struct rte_flow_action *action,
460 		    void *data,
461 		    struct rte_flow_error *error)
462 {
463 	int rc = 0;
464 	struct bnxt_ulp_context *ulp_ctx;
465 	struct rte_flow_query_count *count;
466 	uint32_t flow_id;
467 
468 	ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
469 	if (!ulp_ctx) {
470 		BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
471 		rte_flow_error_set(error, EINVAL,
472 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
473 				   "Failed to query flow.");
474 		return -EINVAL;
475 	}
476 
477 	flow_id = (uint32_t)(uintptr_t)flow;
478 
479 	switch (action->type) {
480 	case RTE_FLOW_ACTION_TYPE_COUNT:
481 		count = data;
482 		rc = ulp_fc_mgr_query_count_get(ulp_ctx, flow_id, count);
483 		if (rc) {
484 			rte_flow_error_set(error, EINVAL,
485 					   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
486 					   "Failed to query flow.");
487 		}
488 		break;
489 	default:
490 		rte_flow_error_set(error, -rc, RTE_FLOW_ERROR_TYPE_ACTION_NUM,
491 				   NULL, "Unsupported action item");
492 	}
493 
494 	return rc;
495 }
496 
497 /* Tunnel offload Apis */
498 #define BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS	1
499 
500 static int
bnxt_ulp_tunnel_decap_set(struct rte_eth_dev * eth_dev,struct rte_flow_tunnel * tunnel,struct rte_flow_action ** pmd_actions,uint32_t * num_of_actions,struct rte_flow_error * error)501 bnxt_ulp_tunnel_decap_set(struct rte_eth_dev *eth_dev,
502 			  struct rte_flow_tunnel *tunnel,
503 			  struct rte_flow_action **pmd_actions,
504 			  uint32_t *num_of_actions,
505 			  struct rte_flow_error *error)
506 {
507 	struct bnxt_ulp_context *ulp_ctx;
508 	struct bnxt_flow_app_tun_ent *tun_entry;
509 	int32_t rc = 0;
510 
511 	ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
512 	if (ulp_ctx == NULL) {
513 		BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
514 		rte_flow_error_set(error, EINVAL,
515 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
516 				   "ULP context uninitialized");
517 		return -EINVAL;
518 	}
519 
520 	if (tunnel == NULL) {
521 		BNXT_TF_DBG(ERR, "No tunnel specified\n");
522 		rte_flow_error_set(error, EINVAL,
523 				   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
524 				   "no tunnel specified");
525 		return -EINVAL;
526 	}
527 
528 	if (tunnel->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
529 		BNXT_TF_DBG(ERR, "Tunnel type unsupported\n");
530 		rte_flow_error_set(error, EINVAL,
531 				   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
532 				   "tunnel type unsupported");
533 		return -EINVAL;
534 	}
535 
536 	rc = ulp_app_tun_search_entry(ulp_ctx, tunnel, &tun_entry);
537 	if (rc < 0) {
538 		rte_flow_error_set(error, EINVAL,
539 				   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
540 				   "tunnel decap set failed");
541 		return -EINVAL;
542 	}
543 
544 	rc = ulp_app_tun_entry_set_decap_action(tun_entry);
545 	if (rc < 0) {
546 		rte_flow_error_set(error, EINVAL,
547 				   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
548 				   "tunnel decap set failed");
549 		return -EINVAL;
550 	}
551 
552 	*pmd_actions = &tun_entry->action;
553 	*num_of_actions = BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS;
554 	return 0;
555 }
556 
557 static int
bnxt_ulp_tunnel_match(struct rte_eth_dev * eth_dev,struct rte_flow_tunnel * tunnel,struct rte_flow_item ** pmd_items,uint32_t * num_of_items,struct rte_flow_error * error)558 bnxt_ulp_tunnel_match(struct rte_eth_dev *eth_dev,
559 		      struct rte_flow_tunnel *tunnel,
560 		      struct rte_flow_item **pmd_items,
561 		      uint32_t *num_of_items,
562 		      struct rte_flow_error *error)
563 {
564 	struct bnxt_ulp_context *ulp_ctx;
565 	struct bnxt_flow_app_tun_ent *tun_entry;
566 	int32_t rc = 0;
567 
568 	ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
569 	if (ulp_ctx == NULL) {
570 		BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
571 		rte_flow_error_set(error, EINVAL,
572 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
573 				   "ULP context uninitialized");
574 		return -EINVAL;
575 	}
576 
577 	if (tunnel == NULL) {
578 		BNXT_TF_DBG(ERR, "No tunnel specified\n");
579 		rte_flow_error_set(error, EINVAL,
580 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
581 				   "no tunnel specified");
582 		return -EINVAL;
583 	}
584 
585 	if (tunnel->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
586 		BNXT_TF_DBG(ERR, "Tunnel type unsupported\n");
587 		rte_flow_error_set(error, EINVAL,
588 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
589 				   "tunnel type unsupported");
590 		return -EINVAL;
591 	}
592 
593 	rc = ulp_app_tun_search_entry(ulp_ctx, tunnel, &tun_entry);
594 	if (rc < 0) {
595 		rte_flow_error_set(error, EINVAL,
596 				   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
597 				   "tunnel match set failed");
598 		return -EINVAL;
599 	}
600 
601 	rc = ulp_app_tun_entry_set_decap_item(tun_entry);
602 	if (rc < 0) {
603 		rte_flow_error_set(error, EINVAL,
604 				   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
605 				   "tunnel match set failed");
606 		return -EINVAL;
607 	}
608 
609 	*pmd_items = &tun_entry->item;
610 	*num_of_items = BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS;
611 	return 0;
612 }
613 
614 static int
bnxt_ulp_tunnel_decap_release(struct rte_eth_dev * eth_dev,struct rte_flow_action * pmd_actions,uint32_t num_actions,struct rte_flow_error * error)615 bnxt_ulp_tunnel_decap_release(struct rte_eth_dev *eth_dev,
616 			      struct rte_flow_action *pmd_actions,
617 			      uint32_t num_actions,
618 			      struct rte_flow_error *error)
619 {
620 	struct bnxt_ulp_context *ulp_ctx;
621 	struct bnxt_flow_app_tun_ent *tun_entry;
622 	const struct rte_flow_action *action_item = pmd_actions;
623 
624 	ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
625 	if (ulp_ctx == NULL) {
626 		BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
627 		rte_flow_error_set(error, EINVAL,
628 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
629 				   "ULP context uninitialized");
630 		return -EINVAL;
631 	}
632 	if (num_actions != BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS) {
633 		BNXT_TF_DBG(ERR, "num actions is invalid\n");
634 		rte_flow_error_set(error, EINVAL,
635 				   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
636 				   "num actions is invalid");
637 		return -EINVAL;
638 	}
639 	while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
640 		if (action_item->type == (typeof(tun_entry->action.type))
641 		    BNXT_RTE_FLOW_ACTION_TYPE_VXLAN_DECAP) {
642 			tun_entry = ulp_app_tun_match_entry(ulp_ctx,
643 							    action_item->conf);
644 			ulp_app_tun_entry_delete(tun_entry);
645 		}
646 		action_item++;
647 	}
648 	return 0;
649 }
650 
651 static int
bnxt_ulp_tunnel_item_release(struct rte_eth_dev * eth_dev,struct rte_flow_item * pmd_items,uint32_t num_items,struct rte_flow_error * error)652 bnxt_ulp_tunnel_item_release(struct rte_eth_dev *eth_dev,
653 			     struct rte_flow_item *pmd_items,
654 			     uint32_t num_items,
655 			     struct rte_flow_error *error)
656 {
657 	struct bnxt_ulp_context *ulp_ctx;
658 	struct bnxt_flow_app_tun_ent *tun_entry;
659 
660 	ulp_ctx = bnxt_ulp_eth_dev_ptr2_cntxt_get(eth_dev);
661 	if (ulp_ctx == NULL) {
662 		BNXT_TF_DBG(ERR, "ULP context is not initialized\n");
663 		rte_flow_error_set(error, EINVAL,
664 				   RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
665 				   "ULP context uninitialized");
666 		return -EINVAL;
667 	}
668 	if (num_items != BNXT_ULP_TUNNEL_OFFLOAD_NUM_ITEMS) {
669 		BNXT_TF_DBG(ERR, "num items is invalid\n");
670 		rte_flow_error_set(error, EINVAL,
671 				   RTE_FLOW_ERROR_TYPE_ATTR, NULL,
672 				   "num items is invalid");
673 		return -EINVAL;
674 	}
675 
676 	tun_entry = ulp_app_tun_match_entry(ulp_ctx, pmd_items->spec);
677 	ulp_app_tun_entry_delete(tun_entry);
678 	return 0;
679 }
680 
681 const struct rte_flow_ops bnxt_ulp_rte_flow_ops = {
682 	.validate = bnxt_ulp_flow_validate,
683 	.create = bnxt_ulp_flow_create,
684 	.destroy = bnxt_ulp_flow_destroy,
685 	.flush = bnxt_ulp_flow_flush,
686 	.query = bnxt_ulp_flow_query,
687 	.isolate = NULL,
688 	/* Tunnel offload callbacks */
689 	.tunnel_decap_set = bnxt_ulp_tunnel_decap_set,
690 	.tunnel_match = bnxt_ulp_tunnel_match,
691 	.tunnel_action_decap_release = bnxt_ulp_tunnel_decap_release,
692 	.tunnel_item_release = bnxt_ulp_tunnel_item_release,
693 	.get_restore_info = NULL
694 };
695