1*2d9fd380Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2*2d9fd380Sjfb8856606 * Copyright(c) 2014-2020 Broadcom
3*2d9fd380Sjfb8856606 * All rights reserved.
4*2d9fd380Sjfb8856606 */
5*2d9fd380Sjfb8856606
6*2d9fd380Sjfb8856606 #include <rte_malloc.h>
7*2d9fd380Sjfb8856606
8*2d9fd380Sjfb8856606 #include "ulp_tun.h"
9*2d9fd380Sjfb8856606 #include "ulp_rte_parser.h"
10*2d9fd380Sjfb8856606 #include "ulp_template_db_enum.h"
11*2d9fd380Sjfb8856606 #include "ulp_template_struct.h"
12*2d9fd380Sjfb8856606 #include "ulp_matcher.h"
13*2d9fd380Sjfb8856606 #include "ulp_mapper.h"
14*2d9fd380Sjfb8856606 #include "ulp_flow_db.h"
15*2d9fd380Sjfb8856606
16*2d9fd380Sjfb8856606 /* This function programs the outer tunnel flow in the hardware. */
17*2d9fd380Sjfb8856606 static int32_t
ulp_install_outer_tun_flow(struct ulp_rte_parser_params * params,struct bnxt_tun_cache_entry * tun_entry,uint16_t tun_idx)18*2d9fd380Sjfb8856606 ulp_install_outer_tun_flow(struct ulp_rte_parser_params *params,
19*2d9fd380Sjfb8856606 struct bnxt_tun_cache_entry *tun_entry,
20*2d9fd380Sjfb8856606 uint16_t tun_idx)
21*2d9fd380Sjfb8856606 {
22*2d9fd380Sjfb8856606 struct bnxt_ulp_mapper_create_parms mparms = { 0 };
23*2d9fd380Sjfb8856606 int ret;
24*2d9fd380Sjfb8856606
25*2d9fd380Sjfb8856606 /* Reset the JUMP action bit in the action bitmap as we don't
26*2d9fd380Sjfb8856606 * offload this action.
27*2d9fd380Sjfb8856606 */
28*2d9fd380Sjfb8856606 ULP_BITMAP_RESET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_JUMP);
29*2d9fd380Sjfb8856606
30*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
31*2d9fd380Sjfb8856606
32*2d9fd380Sjfb8856606 ret = ulp_matcher_pattern_match(params, ¶ms->class_id);
33*2d9fd380Sjfb8856606 if (ret != BNXT_TF_RC_SUCCESS)
34*2d9fd380Sjfb8856606 goto err;
35*2d9fd380Sjfb8856606
36*2d9fd380Sjfb8856606 ret = ulp_matcher_action_match(params, ¶ms->act_tmpl);
37*2d9fd380Sjfb8856606 if (ret != BNXT_TF_RC_SUCCESS)
38*2d9fd380Sjfb8856606 goto err;
39*2d9fd380Sjfb8856606
40*2d9fd380Sjfb8856606 params->parent_flow = true;
41*2d9fd380Sjfb8856606 bnxt_ulp_init_mapper_params(&mparms, params,
42*2d9fd380Sjfb8856606 BNXT_ULP_FDB_TYPE_REGULAR);
43*2d9fd380Sjfb8856606 mparms.tun_idx = tun_idx;
44*2d9fd380Sjfb8856606
45*2d9fd380Sjfb8856606 /* Call the ulp mapper to create the flow in the hardware. */
46*2d9fd380Sjfb8856606 ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
47*2d9fd380Sjfb8856606 if (ret)
48*2d9fd380Sjfb8856606 goto err;
49*2d9fd380Sjfb8856606
50*2d9fd380Sjfb8856606 /* Store the tunnel dmac in the tunnel cache table and use it while
51*2d9fd380Sjfb8856606 * programming tunnel flow F2.
52*2d9fd380Sjfb8856606 */
53*2d9fd380Sjfb8856606 memcpy(tun_entry->t_dmac,
54*2d9fd380Sjfb8856606 ¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX].spec,
55*2d9fd380Sjfb8856606 RTE_ETHER_ADDR_LEN);
56*2d9fd380Sjfb8856606
57*2d9fd380Sjfb8856606 tun_entry->valid = true;
58*2d9fd380Sjfb8856606 tun_entry->tun_flow_info[params->port_id].state =
59*2d9fd380Sjfb8856606 BNXT_ULP_FLOW_STATE_TUN_O_OFFLD;
60*2d9fd380Sjfb8856606 tun_entry->outer_tun_flow_id = params->fid;
61*2d9fd380Sjfb8856606
62*2d9fd380Sjfb8856606 /* F1 and it's related F2s are correlated based on
63*2d9fd380Sjfb8856606 * Tunnel Destination IP Address.
64*2d9fd380Sjfb8856606 */
65*2d9fd380Sjfb8856606 if (tun_entry->t_dst_ip_valid)
66*2d9fd380Sjfb8856606 goto done;
67*2d9fd380Sjfb8856606 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
68*2d9fd380Sjfb8856606 memcpy(&tun_entry->t_dst_ip,
69*2d9fd380Sjfb8856606 ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
70*2d9fd380Sjfb8856606 sizeof(rte_be32_t));
71*2d9fd380Sjfb8856606 else
72*2d9fd380Sjfb8856606 memcpy(tun_entry->t_dst_ip6,
73*2d9fd380Sjfb8856606 ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
74*2d9fd380Sjfb8856606 sizeof(tun_entry->t_dst_ip6));
75*2d9fd380Sjfb8856606 tun_entry->t_dst_ip_valid = true;
76*2d9fd380Sjfb8856606
77*2d9fd380Sjfb8856606 done:
78*2d9fd380Sjfb8856606 return BNXT_TF_RC_FID;
79*2d9fd380Sjfb8856606
80*2d9fd380Sjfb8856606 err:
81*2d9fd380Sjfb8856606 memset(tun_entry, 0, sizeof(struct bnxt_tun_cache_entry));
82*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
83*2d9fd380Sjfb8856606 }
84*2d9fd380Sjfb8856606
85*2d9fd380Sjfb8856606 /* This function programs the inner tunnel flow in the hardware. */
86*2d9fd380Sjfb8856606 static void
ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry * tun_entry,struct ulp_rte_parser_params * tun_o_params)87*2d9fd380Sjfb8856606 ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry *tun_entry,
88*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *tun_o_params)
89*2d9fd380Sjfb8856606 {
90*2d9fd380Sjfb8856606 struct bnxt_ulp_mapper_create_parms mparms = { 0 };
91*2d9fd380Sjfb8856606 struct ulp_per_port_flow_info *flow_info;
92*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params;
93*2d9fd380Sjfb8856606 int ret;
94*2d9fd380Sjfb8856606
95*2d9fd380Sjfb8856606 /* F2 doesn't have tunnel dmac, use the tunnel dmac that was
96*2d9fd380Sjfb8856606 * stored during F1 programming.
97*2d9fd380Sjfb8856606 */
98*2d9fd380Sjfb8856606 flow_info = &tun_entry->tun_flow_info[tun_o_params->port_id];
99*2d9fd380Sjfb8856606 params = &flow_info->first_inner_tun_params;
100*2d9fd380Sjfb8856606 memcpy(¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
101*2d9fd380Sjfb8856606 tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
102*2d9fd380Sjfb8856606 params->parent_fid = tun_entry->outer_tun_flow_id;
103*2d9fd380Sjfb8856606 params->fid = flow_info->first_tun_i_fid;
104*2d9fd380Sjfb8856606
105*2d9fd380Sjfb8856606 bnxt_ulp_init_mapper_params(&mparms, params,
106*2d9fd380Sjfb8856606 BNXT_ULP_FDB_TYPE_REGULAR);
107*2d9fd380Sjfb8856606
108*2d9fd380Sjfb8856606 ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
109*2d9fd380Sjfb8856606 if (ret)
110*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "Failed to create F2 flow.");
111*2d9fd380Sjfb8856606 }
112*2d9fd380Sjfb8856606
113*2d9fd380Sjfb8856606 /* This function either install outer tunnel flow & inner tunnel flow
114*2d9fd380Sjfb8856606 * or just the outer tunnel flow based on the flow state.
115*2d9fd380Sjfb8856606 */
116*2d9fd380Sjfb8856606 static int32_t
ulp_post_process_outer_tun_flow(struct ulp_rte_parser_params * params,struct bnxt_tun_cache_entry * tun_entry,uint16_t tun_idx)117*2d9fd380Sjfb8856606 ulp_post_process_outer_tun_flow(struct ulp_rte_parser_params *params,
118*2d9fd380Sjfb8856606 struct bnxt_tun_cache_entry *tun_entry,
119*2d9fd380Sjfb8856606 uint16_t tun_idx)
120*2d9fd380Sjfb8856606 {
121*2d9fd380Sjfb8856606 enum bnxt_ulp_tun_flow_state flow_state;
122*2d9fd380Sjfb8856606 int ret;
123*2d9fd380Sjfb8856606
124*2d9fd380Sjfb8856606 flow_state = tun_entry->tun_flow_info[params->port_id].state;
125*2d9fd380Sjfb8856606 ret = ulp_install_outer_tun_flow(params, tun_entry, tun_idx);
126*2d9fd380Sjfb8856606 if (ret == BNXT_TF_RC_ERROR) {
127*2d9fd380Sjfb8856606 PMD_DRV_LOG(ERR, "Failed to create outer tunnel flow.");
128*2d9fd380Sjfb8856606 return ret;
129*2d9fd380Sjfb8856606 }
130*2d9fd380Sjfb8856606
131*2d9fd380Sjfb8856606 /* If flow_state == BNXT_ULP_FLOW_STATE_NORMAL before installing
132*2d9fd380Sjfb8856606 * F1, that means F2 is not deferred. Hence, no need to install F2.
133*2d9fd380Sjfb8856606 */
134*2d9fd380Sjfb8856606 if (flow_state != BNXT_ULP_FLOW_STATE_NORMAL)
135*2d9fd380Sjfb8856606 ulp_install_inner_tun_flow(tun_entry, params);
136*2d9fd380Sjfb8856606
137*2d9fd380Sjfb8856606 return BNXT_TF_RC_FID;
138*2d9fd380Sjfb8856606 }
139*2d9fd380Sjfb8856606
140*2d9fd380Sjfb8856606 /* This function will be called if inner tunnel flow request comes before
141*2d9fd380Sjfb8856606 * outer tunnel flow request.
142*2d9fd380Sjfb8856606 */
143*2d9fd380Sjfb8856606 static int32_t
ulp_post_process_first_inner_tun_flow(struct ulp_rte_parser_params * params,struct bnxt_tun_cache_entry * tun_entry)144*2d9fd380Sjfb8856606 ulp_post_process_first_inner_tun_flow(struct ulp_rte_parser_params *params,
145*2d9fd380Sjfb8856606 struct bnxt_tun_cache_entry *tun_entry)
146*2d9fd380Sjfb8856606 {
147*2d9fd380Sjfb8856606 struct ulp_per_port_flow_info *flow_info;
148*2d9fd380Sjfb8856606 int ret;
149*2d9fd380Sjfb8856606
150*2d9fd380Sjfb8856606 ret = ulp_matcher_pattern_match(params, ¶ms->class_id);
151*2d9fd380Sjfb8856606 if (ret != BNXT_TF_RC_SUCCESS)
152*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
153*2d9fd380Sjfb8856606
154*2d9fd380Sjfb8856606 ret = ulp_matcher_action_match(params, ¶ms->act_tmpl);
155*2d9fd380Sjfb8856606 if (ret != BNXT_TF_RC_SUCCESS)
156*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
157*2d9fd380Sjfb8856606
158*2d9fd380Sjfb8856606 /* If Tunnel F2 flow comes first then we can't install it in the
159*2d9fd380Sjfb8856606 * hardware, because, F2 flow will not have L2 context information.
160*2d9fd380Sjfb8856606 * So, just cache the F2 information and program it in the context
161*2d9fd380Sjfb8856606 * of F1 flow installation.
162*2d9fd380Sjfb8856606 */
163*2d9fd380Sjfb8856606 flow_info = &tun_entry->tun_flow_info[params->port_id];
164*2d9fd380Sjfb8856606 memcpy(&flow_info->first_inner_tun_params, params,
165*2d9fd380Sjfb8856606 sizeof(struct ulp_rte_parser_params));
166*2d9fd380Sjfb8856606
167*2d9fd380Sjfb8856606 flow_info->first_tun_i_fid = params->fid;
168*2d9fd380Sjfb8856606 flow_info->state = BNXT_ULP_FLOW_STATE_TUN_I_CACHED;
169*2d9fd380Sjfb8856606
170*2d9fd380Sjfb8856606 /* F1 and it's related F2s are correlated based on
171*2d9fd380Sjfb8856606 * Tunnel Destination IP Address. It could be already set, if
172*2d9fd380Sjfb8856606 * the inner flow got offloaded first.
173*2d9fd380Sjfb8856606 */
174*2d9fd380Sjfb8856606 if (tun_entry->t_dst_ip_valid)
175*2d9fd380Sjfb8856606 goto done;
176*2d9fd380Sjfb8856606 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
177*2d9fd380Sjfb8856606 memcpy(&tun_entry->t_dst_ip,
178*2d9fd380Sjfb8856606 ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
179*2d9fd380Sjfb8856606 sizeof(rte_be32_t));
180*2d9fd380Sjfb8856606 else
181*2d9fd380Sjfb8856606 memcpy(tun_entry->t_dst_ip6,
182*2d9fd380Sjfb8856606 ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
183*2d9fd380Sjfb8856606 sizeof(tun_entry->t_dst_ip6));
184*2d9fd380Sjfb8856606 tun_entry->t_dst_ip_valid = true;
185*2d9fd380Sjfb8856606
186*2d9fd380Sjfb8856606 done:
187*2d9fd380Sjfb8856606 return BNXT_TF_RC_FID;
188*2d9fd380Sjfb8856606 }
189*2d9fd380Sjfb8856606
190*2d9fd380Sjfb8856606 /* This function will be called if inner tunnel flow request comes after
191*2d9fd380Sjfb8856606 * the outer tunnel flow request.
192*2d9fd380Sjfb8856606 */
193*2d9fd380Sjfb8856606 static int32_t
ulp_post_process_inner_tun_flow(struct ulp_rte_parser_params * params,struct bnxt_tun_cache_entry * tun_entry)194*2d9fd380Sjfb8856606 ulp_post_process_inner_tun_flow(struct ulp_rte_parser_params *params,
195*2d9fd380Sjfb8856606 struct bnxt_tun_cache_entry *tun_entry)
196*2d9fd380Sjfb8856606 {
197*2d9fd380Sjfb8856606 memcpy(¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
198*2d9fd380Sjfb8856606 tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
199*2d9fd380Sjfb8856606
200*2d9fd380Sjfb8856606 params->parent_fid = tun_entry->outer_tun_flow_id;
201*2d9fd380Sjfb8856606
202*2d9fd380Sjfb8856606 return BNXT_TF_RC_NORMAL;
203*2d9fd380Sjfb8856606 }
204*2d9fd380Sjfb8856606
205*2d9fd380Sjfb8856606 static int32_t
ulp_get_tun_entry(struct ulp_rte_parser_params * params,struct bnxt_tun_cache_entry ** tun_entry,uint16_t * tun_idx)206*2d9fd380Sjfb8856606 ulp_get_tun_entry(struct ulp_rte_parser_params *params,
207*2d9fd380Sjfb8856606 struct bnxt_tun_cache_entry **tun_entry,
208*2d9fd380Sjfb8856606 uint16_t *tun_idx)
209*2d9fd380Sjfb8856606 {
210*2d9fd380Sjfb8856606 int i, first_free_entry = BNXT_ULP_TUN_ENTRY_INVALID;
211*2d9fd380Sjfb8856606 struct bnxt_tun_cache_entry *tun_tbl;
212*2d9fd380Sjfb8856606 bool tun_entry_found = false, free_entry_found = false;
213*2d9fd380Sjfb8856606
214*2d9fd380Sjfb8856606 tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(params->ulp_ctx);
215*2d9fd380Sjfb8856606 if (!tun_tbl)
216*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
217*2d9fd380Sjfb8856606
218*2d9fd380Sjfb8856606 for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
219*2d9fd380Sjfb8856606 if (!memcmp(&tun_tbl[i].t_dst_ip,
220*2d9fd380Sjfb8856606 ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
221*2d9fd380Sjfb8856606 sizeof(rte_be32_t)) ||
222*2d9fd380Sjfb8856606 !memcmp(&tun_tbl[i].t_dst_ip6,
223*2d9fd380Sjfb8856606 ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
224*2d9fd380Sjfb8856606 16)) {
225*2d9fd380Sjfb8856606 tun_entry_found = true;
226*2d9fd380Sjfb8856606 break;
227*2d9fd380Sjfb8856606 }
228*2d9fd380Sjfb8856606
229*2d9fd380Sjfb8856606 if (!tun_tbl[i].t_dst_ip_valid && !free_entry_found) {
230*2d9fd380Sjfb8856606 first_free_entry = i;
231*2d9fd380Sjfb8856606 free_entry_found = true;
232*2d9fd380Sjfb8856606 }
233*2d9fd380Sjfb8856606 }
234*2d9fd380Sjfb8856606
235*2d9fd380Sjfb8856606 if (tun_entry_found) {
236*2d9fd380Sjfb8856606 *tun_entry = &tun_tbl[i];
237*2d9fd380Sjfb8856606 *tun_idx = i;
238*2d9fd380Sjfb8856606 } else {
239*2d9fd380Sjfb8856606 if (first_free_entry == BNXT_ULP_TUN_ENTRY_INVALID)
240*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
241*2d9fd380Sjfb8856606 *tun_entry = &tun_tbl[first_free_entry];
242*2d9fd380Sjfb8856606 *tun_idx = first_free_entry;
243*2d9fd380Sjfb8856606 }
244*2d9fd380Sjfb8856606
245*2d9fd380Sjfb8856606 return 0;
246*2d9fd380Sjfb8856606 }
247*2d9fd380Sjfb8856606
248*2d9fd380Sjfb8856606 int32_t
ulp_post_process_tun_flow(struct ulp_rte_parser_params * params)249*2d9fd380Sjfb8856606 ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)
250*2d9fd380Sjfb8856606 {
251*2d9fd380Sjfb8856606 bool outer_tun_sig, inner_tun_sig, first_inner_tun_flow;
252*2d9fd380Sjfb8856606 bool outer_tun_reject, inner_tun_reject, outer_tun_flow, inner_tun_flow;
253*2d9fd380Sjfb8856606 enum bnxt_ulp_tun_flow_state flow_state;
254*2d9fd380Sjfb8856606 struct bnxt_tun_cache_entry *tun_entry;
255*2d9fd380Sjfb8856606 uint32_t l3_tun, l3_tun_decap;
256*2d9fd380Sjfb8856606 uint16_t tun_idx;
257*2d9fd380Sjfb8856606 int rc;
258*2d9fd380Sjfb8856606
259*2d9fd380Sjfb8856606 /* Computational fields that indicate it's a TUNNEL DECAP flow */
260*2d9fd380Sjfb8856606 l3_tun = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN);
261*2d9fd380Sjfb8856606 l3_tun_decap = ULP_COMP_FLD_IDX_RD(params,
262*2d9fd380Sjfb8856606 BNXT_ULP_CF_IDX_L3_TUN_DECAP);
263*2d9fd380Sjfb8856606 if (!l3_tun)
264*2d9fd380Sjfb8856606 return BNXT_TF_RC_NORMAL;
265*2d9fd380Sjfb8856606
266*2d9fd380Sjfb8856606 rc = ulp_get_tun_entry(params, &tun_entry, &tun_idx);
267*2d9fd380Sjfb8856606 if (rc == BNXT_TF_RC_ERROR)
268*2d9fd380Sjfb8856606 return rc;
269*2d9fd380Sjfb8856606
270*2d9fd380Sjfb8856606 flow_state = tun_entry->tun_flow_info[params->port_id].state;
271*2d9fd380Sjfb8856606 /* Outer tunnel flow validation */
272*2d9fd380Sjfb8856606 outer_tun_sig = BNXT_OUTER_TUN_SIGNATURE(l3_tun, params);
273*2d9fd380Sjfb8856606 outer_tun_flow = BNXT_OUTER_TUN_FLOW(outer_tun_sig);
274*2d9fd380Sjfb8856606 outer_tun_reject = BNXT_REJECT_OUTER_TUN_FLOW(flow_state,
275*2d9fd380Sjfb8856606 outer_tun_sig);
276*2d9fd380Sjfb8856606
277*2d9fd380Sjfb8856606 /* Inner tunnel flow validation */
278*2d9fd380Sjfb8856606 inner_tun_sig = BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params);
279*2d9fd380Sjfb8856606 first_inner_tun_flow = BNXT_FIRST_INNER_TUN_FLOW(flow_state,
280*2d9fd380Sjfb8856606 inner_tun_sig);
281*2d9fd380Sjfb8856606 inner_tun_flow = BNXT_INNER_TUN_FLOW(flow_state, inner_tun_sig);
282*2d9fd380Sjfb8856606 inner_tun_reject = BNXT_REJECT_INNER_TUN_FLOW(flow_state,
283*2d9fd380Sjfb8856606 inner_tun_sig);
284*2d9fd380Sjfb8856606
285*2d9fd380Sjfb8856606 if (outer_tun_reject) {
286*2d9fd380Sjfb8856606 tun_entry->outer_tun_rej_cnt++;
287*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR,
288*2d9fd380Sjfb8856606 "Tunnel F1 flow rejected, COUNT: %d\n",
289*2d9fd380Sjfb8856606 tun_entry->outer_tun_rej_cnt);
290*2d9fd380Sjfb8856606 /* Inner tunnel flow is rejected if it comes between first inner
291*2d9fd380Sjfb8856606 * tunnel flow and outer flow requests.
292*2d9fd380Sjfb8856606 */
293*2d9fd380Sjfb8856606 } else if (inner_tun_reject) {
294*2d9fd380Sjfb8856606 tun_entry->inner_tun_rej_cnt++;
295*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR,
296*2d9fd380Sjfb8856606 "Tunnel F2 flow rejected, COUNT: %d\n",
297*2d9fd380Sjfb8856606 tun_entry->inner_tun_rej_cnt);
298*2d9fd380Sjfb8856606 }
299*2d9fd380Sjfb8856606
300*2d9fd380Sjfb8856606 if (outer_tun_reject || inner_tun_reject)
301*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
302*2d9fd380Sjfb8856606 else if (first_inner_tun_flow)
303*2d9fd380Sjfb8856606 return ulp_post_process_first_inner_tun_flow(params, tun_entry);
304*2d9fd380Sjfb8856606 else if (outer_tun_flow)
305*2d9fd380Sjfb8856606 return ulp_post_process_outer_tun_flow(params, tun_entry,
306*2d9fd380Sjfb8856606 tun_idx);
307*2d9fd380Sjfb8856606 else if (inner_tun_flow)
308*2d9fd380Sjfb8856606 return ulp_post_process_inner_tun_flow(params, tun_entry);
309*2d9fd380Sjfb8856606 else
310*2d9fd380Sjfb8856606 return BNXT_TF_RC_NORMAL;
311*2d9fd380Sjfb8856606 }
312*2d9fd380Sjfb8856606
313*2d9fd380Sjfb8856606 void
ulp_clear_tun_entry(struct bnxt_tun_cache_entry * tun_tbl,uint8_t tun_idx)314*2d9fd380Sjfb8856606 ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx)
315*2d9fd380Sjfb8856606 {
316*2d9fd380Sjfb8856606 memset(&tun_tbl[tun_idx], 0,
317*2d9fd380Sjfb8856606 sizeof(struct bnxt_tun_cache_entry));
318*2d9fd380Sjfb8856606 }
319*2d9fd380Sjfb8856606
320*2d9fd380Sjfb8856606 /* When a dpdk application offloads the same tunnel inner flow
321*2d9fd380Sjfb8856606 * on all the uplink ports, a tunnel inner flow entry is cached
322*2d9fd380Sjfb8856606 * even if it is not for the right uplink port. Such tunnel
323*2d9fd380Sjfb8856606 * inner flows will eventually get aged out as there won't be
324*2d9fd380Sjfb8856606 * any traffic on these ports. When such a flow destroy is
325*2d9fd380Sjfb8856606 * called, cleanup the tunnel inner flow entry.
326*2d9fd380Sjfb8856606 */
327*2d9fd380Sjfb8856606 void
ulp_clear_tun_inner_entry(struct bnxt_tun_cache_entry * tun_tbl,uint32_t fid)328*2d9fd380Sjfb8856606 ulp_clear_tun_inner_entry(struct bnxt_tun_cache_entry *tun_tbl, uint32_t fid)
329*2d9fd380Sjfb8856606 {
330*2d9fd380Sjfb8856606 struct ulp_per_port_flow_info *flow_info;
331*2d9fd380Sjfb8856606 int i, j;
332*2d9fd380Sjfb8856606
333*2d9fd380Sjfb8856606 for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES ; i++) {
334*2d9fd380Sjfb8856606 for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
335*2d9fd380Sjfb8856606 flow_info = &tun_tbl[i].tun_flow_info[j];
336*2d9fd380Sjfb8856606 if (flow_info->first_tun_i_fid == fid &&
337*2d9fd380Sjfb8856606 flow_info->state == BNXT_ULP_FLOW_STATE_TUN_I_CACHED)
338*2d9fd380Sjfb8856606 memset(flow_info, 0, sizeof(*flow_info));
339*2d9fd380Sjfb8856606 }
340*2d9fd380Sjfb8856606 }
341*2d9fd380Sjfb8856606 }
342