xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_tun.c (revision bdf4a3c6)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include "bnxt.h"
7 #include "bnxt_ulp.h"
8 #include "ulp_tun.h"
9 #include "ulp_utils.h"
10 
11 /* returns negative on error, 1 if new entry is allocated or zero if old */
12 int32_t
ulp_app_tun_search_entry(struct bnxt_ulp_context * ulp_ctx,struct rte_flow_tunnel * app_tunnel,struct bnxt_flow_app_tun_ent ** tun_entry)13 ulp_app_tun_search_entry(struct bnxt_ulp_context *ulp_ctx,
14 			 struct rte_flow_tunnel *app_tunnel,
15 			 struct bnxt_flow_app_tun_ent **tun_entry)
16 {
17 	struct bnxt_flow_app_tun_ent *tun_ent_list;
18 	int32_t i, rc = 0, free_entry = -1;
19 
20 	tun_ent_list = bnxt_ulp_cntxt_ptr2_app_tun_list_get(ulp_ctx);
21 	if (!tun_ent_list) {
22 		BNXT_TF_DBG(ERR, "unable to get the app tunnel list\n");
23 		return -EINVAL;
24 	}
25 
26 	for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
27 		if (!tun_ent_list[i].ref_cnt) {
28 			if (free_entry < 0)
29 				free_entry = i;
30 		} else {
31 			if (!memcmp(&tun_ent_list[i].app_tunnel,
32 				    app_tunnel,
33 				    sizeof(struct rte_flow_tunnel))) {
34 				*tun_entry =  &tun_ent_list[i];
35 				tun_ent_list[free_entry].ref_cnt++;
36 				return rc;
37 			}
38 		}
39 	}
40 	if (free_entry >= 0) {
41 		*tun_entry =  &tun_ent_list[free_entry];
42 		memcpy(&tun_ent_list[free_entry].app_tunnel, app_tunnel,
43 		       sizeof(struct rte_flow_tunnel));
44 		tun_ent_list[free_entry].ref_cnt = 1;
45 		rc = 1;
46 	} else {
47 		BNXT_TF_DBG(ERR, "ulp app tunnel list is full\n");
48 		return -ENOMEM;
49 	}
50 
51 	return rc;
52 }
53 
54 void
ulp_app_tun_entry_delete(struct bnxt_flow_app_tun_ent * tun_entry)55 ulp_app_tun_entry_delete(struct bnxt_flow_app_tun_ent *tun_entry)
56 {
57 	if (tun_entry) {
58 		if (tun_entry->ref_cnt) {
59 			tun_entry->ref_cnt--;
60 			if (!tun_entry->ref_cnt)
61 				memset(tun_entry, 0,
62 				       sizeof(struct bnxt_flow_app_tun_ent));
63 		}
64 	}
65 }
66 
67 int32_t
ulp_app_tun_entry_set_decap_action(struct bnxt_flow_app_tun_ent * tun_entry)68 ulp_app_tun_entry_set_decap_action(struct bnxt_flow_app_tun_ent *tun_entry)
69 {
70 	if (!tun_entry)
71 		return -EINVAL;
72 
73 	tun_entry->action.type = (typeof(tun_entry->action.type))
74 			      BNXT_RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
75 	tun_entry->action.conf = tun_entry;
76 	return 0;
77 }
78 
79 int32_t
ulp_app_tun_entry_set_decap_item(struct bnxt_flow_app_tun_ent * tun_entry)80 ulp_app_tun_entry_set_decap_item(struct bnxt_flow_app_tun_ent *tun_entry)
81 {
82 	if (!tun_entry)
83 		return -EINVAL;
84 
85 	tun_entry->item.type = (typeof(tun_entry->item.type))
86 			      BNXT_RTE_FLOW_ITEM_TYPE_VXLAN_DECAP;
87 	tun_entry->item.spec = tun_entry;
88 	tun_entry->item.last = NULL;
89 	tun_entry->item.mask = NULL;
90 	return 0;
91 }
92 
93 struct bnxt_flow_app_tun_ent *
ulp_app_tun_match_entry(struct bnxt_ulp_context * ulp_ctx,const void * ctx)94 ulp_app_tun_match_entry(struct bnxt_ulp_context *ulp_ctx,
95 			const void *ctx)
96 {
97 	struct bnxt_flow_app_tun_ent *tun_ent_list;
98 	int32_t i;
99 
100 	tun_ent_list = bnxt_ulp_cntxt_ptr2_app_tun_list_get(ulp_ctx);
101 	if (!tun_ent_list) {
102 		BNXT_TF_DBG(ERR, "unable to get the app tunnel list\n");
103 		return NULL;
104 	}
105 
106 	for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
107 		if (&tun_ent_list[i] == ctx)
108 			return &tun_ent_list[i];
109 	}
110 	return NULL;
111 }
112 
113 static int32_t
ulp_get_tun_entry(struct ulp_rte_parser_params * params,struct bnxt_tun_cache_entry ** tun_entry,uint16_t * tun_idx)114 ulp_get_tun_entry(struct ulp_rte_parser_params *params,
115 		  struct bnxt_tun_cache_entry **tun_entry,
116 		  uint16_t *tun_idx)
117 {
118 	int32_t i, first_free_entry = BNXT_ULP_TUN_ENTRY_INVALID;
119 	struct bnxt_tun_cache_entry *tun_tbl;
120 	uint32_t dip_idx, dmac_idx, use_ipv4 = 0;
121 
122 	tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(params->ulp_ctx);
123 	if (!tun_tbl) {
124 		BNXT_TF_DBG(ERR, "Error: could not get Tunnel table\n");
125 		return BNXT_TF_RC_ERROR;
126 	}
127 
128 	/* get the outer destination ip field index */
129 	dip_idx = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID);
130 	dmac_idx = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID);
131 	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
132 		use_ipv4 = 1;
133 
134 	for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
135 		if (!tun_tbl[i].t_dst_ip_valid) {
136 			if (first_free_entry == BNXT_ULP_TUN_ENTRY_INVALID)
137 				first_free_entry = i;
138 			continue;
139 		}
140 		/* match on the destination ip of the tunnel */
141 		if ((use_ipv4 && !memcmp(&tun_tbl[i].t_dst_ip,
142 					 params->hdr_field[dip_idx].spec,
143 					 sizeof(rte_be32_t))) ||
144 		    (!use_ipv4 &&
145 		     !memcmp(tun_tbl[i].t_dst_ip6,
146 			     params->hdr_field[dip_idx].spec,
147 			     sizeof(((struct bnxt_tun_cache_entry *)
148 				     NULL)->t_dst_ip6)))) {
149 			*tun_entry = &tun_tbl[i];
150 			*tun_idx = i;
151 			return 0;
152 		}
153 	}
154 	if (first_free_entry == BNXT_ULP_TUN_ENTRY_INVALID) {
155 		BNXT_TF_DBG(ERR, "Error: No entry available in tunnel table\n");
156 		return BNXT_TF_RC_ERROR;
157 	}
158 
159 	*tun_idx = first_free_entry;
160 	*tun_entry = &tun_tbl[first_free_entry];
161 	tun_tbl[first_free_entry].t_dst_ip_valid = true;
162 
163 	/* Update the destination ip and mac */
164 	if (use_ipv4)
165 		memcpy(&tun_tbl[first_free_entry].t_dst_ip,
166 		       params->hdr_field[dip_idx].spec, sizeof(rte_be32_t));
167 	else
168 		memcpy(tun_tbl[first_free_entry].t_dst_ip6,
169 		       params->hdr_field[dip_idx].spec,
170 		       sizeof(((struct bnxt_tun_cache_entry *)
171 				     NULL)->t_dst_ip6));
172 	memcpy(tun_tbl[first_free_entry].t_dmac,
173 	       params->hdr_field[dmac_idx].spec, RTE_ETHER_ADDR_LEN);
174 
175 	return 0;
176 }
177 
178 /* Tunnel API to delete the tunnel entry */
179 void
ulp_tunnel_offload_entry_clear(struct bnxt_tun_cache_entry * tun_tbl,uint8_t tun_idx)180 ulp_tunnel_offload_entry_clear(struct bnxt_tun_cache_entry *tun_tbl,
181 			       uint8_t tun_idx)
182 {
183 	memset(&tun_tbl[tun_idx], 0, sizeof(struct bnxt_tun_cache_entry));
184 }
185 
186 /* Tunnel API to perform tunnel offload process when there is F1/F2 flows */
187 int32_t
ulp_tunnel_offload_process(struct ulp_rte_parser_params * params)188 ulp_tunnel_offload_process(struct ulp_rte_parser_params *params)
189 {
190 	struct bnxt_tun_cache_entry *tun_entry;
191 	uint16_t tun_idx;
192 	int32_t rc = BNXT_TF_RC_SUCCESS;
193 
194 	/* Perform the tunnel offload only for F1 and F2 flows */
195 	if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
196 			      BNXT_ULP_HDR_BIT_F1) &&
197 	    !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
198 			      BNXT_ULP_HDR_BIT_F2))
199 		return rc;
200 
201 	/* search for the tunnel entry if not found create one */
202 	rc = ulp_get_tun_entry(params, &tun_entry, &tun_idx);
203 	if (rc == BNXT_TF_RC_ERROR)
204 		return rc;
205 
206 	/* Tunnel offload for the outer Tunnel flow */
207 	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
208 			     BNXT_ULP_HDR_BIT_F1)) {
209 		/* Reset the JUMP action bit in the action bitmap as we don't
210 		 * offload this action.
211 		 */
212 		ULP_BITMAP_RESET(params->act_bitmap.bits,
213 				 BNXT_ULP_ACT_BIT_JUMP);
214 		params->parent_flow = true;
215 		params->tun_idx = tun_idx;
216 		tun_entry->outer_tun_flow_id = params->fid;
217 	} else if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
218 			     BNXT_ULP_HDR_BIT_F2)) {
219 		ULP_BITMAP_RESET(params->hdr_bitmap.bits,
220 				 BNXT_ULP_HDR_BIT_F2);
221 		/* add the vxlan decap action for F2 flows */
222 		ULP_BITMAP_SET(params->act_bitmap.bits,
223 			       BNXT_ULP_ACT_BIT_VXLAN_DECAP);
224 		params->child_flow = true;
225 		params->tun_idx = tun_idx;
226 		params->parent_flow = false;
227 	}
228 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUNNEL_ID, tun_idx);
229 	return rc;
230 }
231