1*2d9fd380Sjfb8856606 /* SPDX-License-Identifier: BSD-3-Clause
2*2d9fd380Sjfb8856606 * Copyright(c) 2014-2020 Broadcom
3*2d9fd380Sjfb8856606 * All rights reserved.
4*2d9fd380Sjfb8856606 */
5*2d9fd380Sjfb8856606
6*2d9fd380Sjfb8856606 #include <rte_vxlan.h>
7*2d9fd380Sjfb8856606 #include "bnxt.h"
8*2d9fd380Sjfb8856606 #include "ulp_template_db_enum.h"
9*2d9fd380Sjfb8856606 #include "ulp_template_struct.h"
10*2d9fd380Sjfb8856606 #include "bnxt_ulp.h"
11*2d9fd380Sjfb8856606 #include "bnxt_tf_common.h"
12*2d9fd380Sjfb8856606 #include "ulp_rte_parser.h"
13*2d9fd380Sjfb8856606 #include "ulp_matcher.h"
14*2d9fd380Sjfb8856606 #include "ulp_utils.h"
15*2d9fd380Sjfb8856606 #include "tfp.h"
16*2d9fd380Sjfb8856606 #include "ulp_port_db.h"
17*2d9fd380Sjfb8856606 #include "ulp_flow_db.h"
18*2d9fd380Sjfb8856606 #include "ulp_mapper.h"
19*2d9fd380Sjfb8856606 #include "ulp_tun.h"
20*2d9fd380Sjfb8856606
21*2d9fd380Sjfb8856606 /* Local defines for the parsing functions */
22*2d9fd380Sjfb8856606 #define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
23*2d9fd380Sjfb8856606 #define ULP_VLAN_PRIORITY_MASK 0x700
24*2d9fd380Sjfb8856606 #define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/
25*2d9fd380Sjfb8856606 #define ULP_UDP_PORT_VXLAN 4789
26*2d9fd380Sjfb8856606
27*2d9fd380Sjfb8856606 /* Utility function to skip the void items. */
28*2d9fd380Sjfb8856606 static inline int32_t
ulp_rte_item_skip_void(const struct rte_flow_item ** item,uint32_t increment)29*2d9fd380Sjfb8856606 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
30*2d9fd380Sjfb8856606 {
31*2d9fd380Sjfb8856606 if (!*item)
32*2d9fd380Sjfb8856606 return 0;
33*2d9fd380Sjfb8856606 if (increment)
34*2d9fd380Sjfb8856606 (*item)++;
35*2d9fd380Sjfb8856606 while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
36*2d9fd380Sjfb8856606 (*item)++;
37*2d9fd380Sjfb8856606 if (*item)
38*2d9fd380Sjfb8856606 return 1;
39*2d9fd380Sjfb8856606 return 0;
40*2d9fd380Sjfb8856606 }
41*2d9fd380Sjfb8856606
42*2d9fd380Sjfb8856606 /* Utility function to update the field_bitmap */
43*2d9fd380Sjfb8856606 static void
ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params * params,uint32_t idx)44*2d9fd380Sjfb8856606 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
45*2d9fd380Sjfb8856606 uint32_t idx)
46*2d9fd380Sjfb8856606 {
47*2d9fd380Sjfb8856606 struct ulp_rte_hdr_field *field;
48*2d9fd380Sjfb8856606
49*2d9fd380Sjfb8856606 field = ¶ms->hdr_field[idx];
50*2d9fd380Sjfb8856606 if (ulp_bitmap_notzero(field->mask, field->size)) {
51*2d9fd380Sjfb8856606 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
52*2d9fd380Sjfb8856606 /* Not exact match */
53*2d9fd380Sjfb8856606 if (!ulp_bitmap_is_ones(field->mask, field->size))
54*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->fld_bitmap.bits,
55*2d9fd380Sjfb8856606 BNXT_ULP_MATCH_TYPE_BITMASK_WM);
56*2d9fd380Sjfb8856606 } else {
57*2d9fd380Sjfb8856606 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
58*2d9fd380Sjfb8856606 }
59*2d9fd380Sjfb8856606 }
60*2d9fd380Sjfb8856606
61*2d9fd380Sjfb8856606 /* Utility function to copy field spec items */
62*2d9fd380Sjfb8856606 static struct ulp_rte_hdr_field *
ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field * field,const void * buffer,uint32_t size)63*2d9fd380Sjfb8856606 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
64*2d9fd380Sjfb8856606 const void *buffer,
65*2d9fd380Sjfb8856606 uint32_t size)
66*2d9fd380Sjfb8856606 {
67*2d9fd380Sjfb8856606 field->size = size;
68*2d9fd380Sjfb8856606 memcpy(field->spec, buffer, field->size);
69*2d9fd380Sjfb8856606 field++;
70*2d9fd380Sjfb8856606 return field;
71*2d9fd380Sjfb8856606 }
72*2d9fd380Sjfb8856606
73*2d9fd380Sjfb8856606 /* Utility function to copy field masks items */
74*2d9fd380Sjfb8856606 static void
ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params * params,uint32_t * idx,const void * buffer,uint32_t size)75*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
76*2d9fd380Sjfb8856606 uint32_t *idx,
77*2d9fd380Sjfb8856606 const void *buffer,
78*2d9fd380Sjfb8856606 uint32_t size)
79*2d9fd380Sjfb8856606 {
80*2d9fd380Sjfb8856606 struct ulp_rte_hdr_field *field = ¶ms->hdr_field[*idx];
81*2d9fd380Sjfb8856606
82*2d9fd380Sjfb8856606 memcpy(field->mask, buffer, size);
83*2d9fd380Sjfb8856606 ulp_rte_parser_field_bitmap_update(params, *idx);
84*2d9fd380Sjfb8856606 *idx = *idx + 1;
85*2d9fd380Sjfb8856606 }
86*2d9fd380Sjfb8856606
87*2d9fd380Sjfb8856606 /* Utility function to ignore field masks items */
88*2d9fd380Sjfb8856606 static void
ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params * params __rte_unused,uint32_t * idx,const void * buffer __rte_unused,uint32_t size __rte_unused)89*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params *params __rte_unused,
90*2d9fd380Sjfb8856606 uint32_t *idx,
91*2d9fd380Sjfb8856606 const void *buffer __rte_unused,
92*2d9fd380Sjfb8856606 uint32_t size __rte_unused)
93*2d9fd380Sjfb8856606 {
94*2d9fd380Sjfb8856606 *idx = *idx + 1;
95*2d9fd380Sjfb8856606 }
96*2d9fd380Sjfb8856606
97*2d9fd380Sjfb8856606 /*
98*2d9fd380Sjfb8856606 * Function to handle the parsing of RTE Flows and placing
99*2d9fd380Sjfb8856606 * the RTE flow items into the ulp structures.
100*2d9fd380Sjfb8856606 */
101*2d9fd380Sjfb8856606 int32_t
bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],struct ulp_rte_parser_params * params)102*2d9fd380Sjfb8856606 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
103*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
104*2d9fd380Sjfb8856606 {
105*2d9fd380Sjfb8856606 const struct rte_flow_item *item = pattern;
106*2d9fd380Sjfb8856606 struct bnxt_ulp_rte_hdr_info *hdr_info;
107*2d9fd380Sjfb8856606
108*2d9fd380Sjfb8856606 params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
109*2d9fd380Sjfb8856606
110*2d9fd380Sjfb8856606 /* Set the computed flags for no vlan tags before parsing */
111*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
112*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
113*2d9fd380Sjfb8856606
114*2d9fd380Sjfb8856606 /* Parse all the items in the pattern */
115*2d9fd380Sjfb8856606 while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
116*2d9fd380Sjfb8856606 /* get the header information from the flow_hdr_info table */
117*2d9fd380Sjfb8856606 hdr_info = &ulp_hdr_info[item->type];
118*2d9fd380Sjfb8856606 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
119*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR,
120*2d9fd380Sjfb8856606 "Truflow parser does not support type %d\n",
121*2d9fd380Sjfb8856606 item->type);
122*2d9fd380Sjfb8856606 return BNXT_TF_RC_PARSE_ERR;
123*2d9fd380Sjfb8856606 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
124*2d9fd380Sjfb8856606 /* call the registered callback handler */
125*2d9fd380Sjfb8856606 if (hdr_info->proto_hdr_func) {
126*2d9fd380Sjfb8856606 if (hdr_info->proto_hdr_func(item, params) !=
127*2d9fd380Sjfb8856606 BNXT_TF_RC_SUCCESS) {
128*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
129*2d9fd380Sjfb8856606 }
130*2d9fd380Sjfb8856606 }
131*2d9fd380Sjfb8856606 }
132*2d9fd380Sjfb8856606 item++;
133*2d9fd380Sjfb8856606 }
134*2d9fd380Sjfb8856606 /* update the implied SVIF */
135*2d9fd380Sjfb8856606 return ulp_rte_parser_implicit_match_port_process(params);
136*2d9fd380Sjfb8856606 }
137*2d9fd380Sjfb8856606
138*2d9fd380Sjfb8856606 /*
139*2d9fd380Sjfb8856606 * Function to handle the parsing of RTE Flows and placing
140*2d9fd380Sjfb8856606 * the RTE flow actions into the ulp structures.
141*2d9fd380Sjfb8856606 */
142*2d9fd380Sjfb8856606 int32_t
bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],struct ulp_rte_parser_params * params)143*2d9fd380Sjfb8856606 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
144*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
145*2d9fd380Sjfb8856606 {
146*2d9fd380Sjfb8856606 const struct rte_flow_action *action_item = actions;
147*2d9fd380Sjfb8856606 struct bnxt_ulp_rte_act_info *hdr_info;
148*2d9fd380Sjfb8856606
149*2d9fd380Sjfb8856606 /* Parse all the items in the pattern */
150*2d9fd380Sjfb8856606 while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
151*2d9fd380Sjfb8856606 /* get the header information from the flow_hdr_info table */
152*2d9fd380Sjfb8856606 hdr_info = &ulp_act_info[action_item->type];
153*2d9fd380Sjfb8856606 if (hdr_info->act_type ==
154*2d9fd380Sjfb8856606 BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
155*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR,
156*2d9fd380Sjfb8856606 "Truflow parser does not support act %u\n",
157*2d9fd380Sjfb8856606 action_item->type);
158*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
159*2d9fd380Sjfb8856606 } else if (hdr_info->act_type ==
160*2d9fd380Sjfb8856606 BNXT_ULP_ACT_TYPE_SUPPORTED) {
161*2d9fd380Sjfb8856606 /* call the registered callback handler */
162*2d9fd380Sjfb8856606 if (hdr_info->proto_act_func) {
163*2d9fd380Sjfb8856606 if (hdr_info->proto_act_func(action_item,
164*2d9fd380Sjfb8856606 params) !=
165*2d9fd380Sjfb8856606 BNXT_TF_RC_SUCCESS) {
166*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
167*2d9fd380Sjfb8856606 }
168*2d9fd380Sjfb8856606 }
169*2d9fd380Sjfb8856606 }
170*2d9fd380Sjfb8856606 action_item++;
171*2d9fd380Sjfb8856606 }
172*2d9fd380Sjfb8856606 /* update the implied port details */
173*2d9fd380Sjfb8856606 ulp_rte_parser_implicit_act_port_process(params);
174*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
175*2d9fd380Sjfb8856606 }
176*2d9fd380Sjfb8856606
177*2d9fd380Sjfb8856606 /*
178*2d9fd380Sjfb8856606 * Function to handle the post processing of the computed
179*2d9fd380Sjfb8856606 * fields for the interface.
180*2d9fd380Sjfb8856606 */
181*2d9fd380Sjfb8856606 static void
bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params * params)182*2d9fd380Sjfb8856606 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
183*2d9fd380Sjfb8856606 {
184*2d9fd380Sjfb8856606 uint32_t ifindex;
185*2d9fd380Sjfb8856606 uint16_t port_id, parif;
186*2d9fd380Sjfb8856606 uint32_t mtype;
187*2d9fd380Sjfb8856606 enum bnxt_ulp_direction_type dir;
188*2d9fd380Sjfb8856606
189*2d9fd380Sjfb8856606 /* get the direction details */
190*2d9fd380Sjfb8856606 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
191*2d9fd380Sjfb8856606
192*2d9fd380Sjfb8856606 /* read the port id details */
193*2d9fd380Sjfb8856606 port_id = ULP_COMP_FLD_IDX_RD(params,
194*2d9fd380Sjfb8856606 BNXT_ULP_CF_IDX_INCOMING_IF);
195*2d9fd380Sjfb8856606 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
196*2d9fd380Sjfb8856606 port_id,
197*2d9fd380Sjfb8856606 &ifindex)) {
198*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
199*2d9fd380Sjfb8856606 return;
200*2d9fd380Sjfb8856606 }
201*2d9fd380Sjfb8856606
202*2d9fd380Sjfb8856606 if (dir == BNXT_ULP_DIR_INGRESS) {
203*2d9fd380Sjfb8856606 /* Set port PARIF */
204*2d9fd380Sjfb8856606 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
205*2d9fd380Sjfb8856606 BNXT_ULP_PHY_PORT_PARIF, &parif)) {
206*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
207*2d9fd380Sjfb8856606 return;
208*2d9fd380Sjfb8856606 }
209*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
210*2d9fd380Sjfb8856606 parif);
211*2d9fd380Sjfb8856606 } else {
212*2d9fd380Sjfb8856606 /* Get the match port type */
213*2d9fd380Sjfb8856606 mtype = ULP_COMP_FLD_IDX_RD(params,
214*2d9fd380Sjfb8856606 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
215*2d9fd380Sjfb8856606 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
216*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params,
217*2d9fd380Sjfb8856606 BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
218*2d9fd380Sjfb8856606 1);
219*2d9fd380Sjfb8856606 /* Set VF func PARIF */
220*2d9fd380Sjfb8856606 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
221*2d9fd380Sjfb8856606 BNXT_ULP_VF_FUNC_PARIF,
222*2d9fd380Sjfb8856606 &parif)) {
223*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR,
224*2d9fd380Sjfb8856606 "ParseErr:ifindex is not valid\n");
225*2d9fd380Sjfb8856606 return;
226*2d9fd380Sjfb8856606 }
227*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params,
228*2d9fd380Sjfb8856606 BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
229*2d9fd380Sjfb8856606 parif);
230*2d9fd380Sjfb8856606
231*2d9fd380Sjfb8856606 /* populate the loopback parif */
232*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params,
233*2d9fd380Sjfb8856606 BNXT_ULP_CF_IDX_LOOPBACK_PARIF,
234*2d9fd380Sjfb8856606 BNXT_ULP_SYM_VF_FUNC_PARIF);
235*2d9fd380Sjfb8856606
236*2d9fd380Sjfb8856606 } else {
237*2d9fd380Sjfb8856606 /* Set DRV func PARIF */
238*2d9fd380Sjfb8856606 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
239*2d9fd380Sjfb8856606 BNXT_ULP_DRV_FUNC_PARIF,
240*2d9fd380Sjfb8856606 &parif)) {
241*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR,
242*2d9fd380Sjfb8856606 "ParseErr:ifindex is not valid\n");
243*2d9fd380Sjfb8856606 return;
244*2d9fd380Sjfb8856606 }
245*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params,
246*2d9fd380Sjfb8856606 BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
247*2d9fd380Sjfb8856606 parif);
248*2d9fd380Sjfb8856606 }
249*2d9fd380Sjfb8856606 }
250*2d9fd380Sjfb8856606 }
251*2d9fd380Sjfb8856606
252*2d9fd380Sjfb8856606 static int32_t
ulp_post_process_normal_flow(struct ulp_rte_parser_params * params)253*2d9fd380Sjfb8856606 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
254*2d9fd380Sjfb8856606 {
255*2d9fd380Sjfb8856606 enum bnxt_ulp_intf_type match_port_type, act_port_type;
256*2d9fd380Sjfb8856606 enum bnxt_ulp_direction_type dir;
257*2d9fd380Sjfb8856606 uint32_t act_port_set;
258*2d9fd380Sjfb8856606
259*2d9fd380Sjfb8856606 /* Get the computed details */
260*2d9fd380Sjfb8856606 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
261*2d9fd380Sjfb8856606 match_port_type = ULP_COMP_FLD_IDX_RD(params,
262*2d9fd380Sjfb8856606 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
263*2d9fd380Sjfb8856606 act_port_type = ULP_COMP_FLD_IDX_RD(params,
264*2d9fd380Sjfb8856606 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
265*2d9fd380Sjfb8856606 act_port_set = ULP_COMP_FLD_IDX_RD(params,
266*2d9fd380Sjfb8856606 BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
267*2d9fd380Sjfb8856606
268*2d9fd380Sjfb8856606 /* set the flow direction in the proto and action header */
269*2d9fd380Sjfb8856606 if (dir == BNXT_ULP_DIR_EGRESS) {
270*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->hdr_bitmap.bits,
271*2d9fd380Sjfb8856606 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
272*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->act_bitmap.bits,
273*2d9fd380Sjfb8856606 BNXT_ULP_FLOW_DIR_BITMASK_EGR);
274*2d9fd380Sjfb8856606 }
275*2d9fd380Sjfb8856606
276*2d9fd380Sjfb8856606 /* calculate the VF to VF flag */
277*2d9fd380Sjfb8856606 if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
278*2d9fd380Sjfb8856606 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
279*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
280*2d9fd380Sjfb8856606
281*2d9fd380Sjfb8856606 /* Update the decrement ttl computational fields */
282*2d9fd380Sjfb8856606 if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
283*2d9fd380Sjfb8856606 BNXT_ULP_ACTION_BIT_DEC_TTL)) {
284*2d9fd380Sjfb8856606 /*
285*2d9fd380Sjfb8856606 * Check that vxlan proto is included and vxlan decap
286*2d9fd380Sjfb8856606 * action is not set then decrement tunnel ttl.
287*2d9fd380Sjfb8856606 * Similarly add GRE and NVGRE in future.
288*2d9fd380Sjfb8856606 */
289*2d9fd380Sjfb8856606 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
290*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_T_VXLAN) &&
291*2d9fd380Sjfb8856606 !ULP_BITMAP_ISSET(params->act_bitmap.bits,
292*2d9fd380Sjfb8856606 BNXT_ULP_ACTION_BIT_VXLAN_DECAP))) {
293*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params,
294*2d9fd380Sjfb8856606 BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
295*2d9fd380Sjfb8856606 } else {
296*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params,
297*2d9fd380Sjfb8856606 BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
298*2d9fd380Sjfb8856606 }
299*2d9fd380Sjfb8856606 }
300*2d9fd380Sjfb8856606
301*2d9fd380Sjfb8856606 /* Merge the hdr_fp_bit into the proto header bit */
302*2d9fd380Sjfb8856606 params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
303*2d9fd380Sjfb8856606
304*2d9fd380Sjfb8856606 /* Update the computed interface parameters */
305*2d9fd380Sjfb8856606 bnxt_ulp_comp_fld_intf_update(params);
306*2d9fd380Sjfb8856606
307*2d9fd380Sjfb8856606 /* TBD: Handle the flow rejection scenarios */
308*2d9fd380Sjfb8856606 return 0;
309*2d9fd380Sjfb8856606 }
310*2d9fd380Sjfb8856606
311*2d9fd380Sjfb8856606 /*
312*2d9fd380Sjfb8856606 * Function to handle the post processing of the parsing details
313*2d9fd380Sjfb8856606 */
314*2d9fd380Sjfb8856606 int32_t
bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params * params)315*2d9fd380Sjfb8856606 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
316*2d9fd380Sjfb8856606 {
317*2d9fd380Sjfb8856606 ulp_post_process_normal_flow(params);
318*2d9fd380Sjfb8856606 return ulp_post_process_tun_flow(params);
319*2d9fd380Sjfb8856606 }
320*2d9fd380Sjfb8856606
321*2d9fd380Sjfb8856606 /*
322*2d9fd380Sjfb8856606 * Function to compute the flow direction based on the match port details
323*2d9fd380Sjfb8856606 */
324*2d9fd380Sjfb8856606 static void
bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params * params)325*2d9fd380Sjfb8856606 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
326*2d9fd380Sjfb8856606 {
327*2d9fd380Sjfb8856606 enum bnxt_ulp_intf_type match_port_type;
328*2d9fd380Sjfb8856606
329*2d9fd380Sjfb8856606 /* Get the match port type */
330*2d9fd380Sjfb8856606 match_port_type = ULP_COMP_FLD_IDX_RD(params,
331*2d9fd380Sjfb8856606 BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
332*2d9fd380Sjfb8856606
333*2d9fd380Sjfb8856606 /* If ingress flow and matchport is vf rep then dir is egress*/
334*2d9fd380Sjfb8856606 if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
335*2d9fd380Sjfb8856606 match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
336*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
337*2d9fd380Sjfb8856606 BNXT_ULP_DIR_EGRESS);
338*2d9fd380Sjfb8856606 } else {
339*2d9fd380Sjfb8856606 /* Assign the input direction */
340*2d9fd380Sjfb8856606 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
341*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
342*2d9fd380Sjfb8856606 BNXT_ULP_DIR_INGRESS);
343*2d9fd380Sjfb8856606 else
344*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
345*2d9fd380Sjfb8856606 BNXT_ULP_DIR_EGRESS);
346*2d9fd380Sjfb8856606 }
347*2d9fd380Sjfb8856606 }
348*2d9fd380Sjfb8856606
349*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow item PF Header. */
350*2d9fd380Sjfb8856606 static int32_t
ulp_rte_parser_svif_set(struct ulp_rte_parser_params * params,uint32_t ifindex,uint16_t mask)351*2d9fd380Sjfb8856606 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
352*2d9fd380Sjfb8856606 uint32_t ifindex,
353*2d9fd380Sjfb8856606 uint16_t mask)
354*2d9fd380Sjfb8856606 {
355*2d9fd380Sjfb8856606 uint16_t svif;
356*2d9fd380Sjfb8856606 enum bnxt_ulp_direction_type dir;
357*2d9fd380Sjfb8856606 struct ulp_rte_hdr_field *hdr_field;
358*2d9fd380Sjfb8856606 enum bnxt_ulp_svif_type svif_type;
359*2d9fd380Sjfb8856606 enum bnxt_ulp_intf_type port_type;
360*2d9fd380Sjfb8856606
361*2d9fd380Sjfb8856606 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
362*2d9fd380Sjfb8856606 BNXT_ULP_INVALID_SVIF_VAL) {
363*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR,
364*2d9fd380Sjfb8856606 "SVIF already set,multiple source not support'd\n");
365*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
366*2d9fd380Sjfb8856606 }
367*2d9fd380Sjfb8856606
368*2d9fd380Sjfb8856606 /* Get port type details */
369*2d9fd380Sjfb8856606 port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
370*2d9fd380Sjfb8856606 if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
371*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Invalid port type\n");
372*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
373*2d9fd380Sjfb8856606 }
374*2d9fd380Sjfb8856606
375*2d9fd380Sjfb8856606 /* Update the match port type */
376*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
377*2d9fd380Sjfb8856606
378*2d9fd380Sjfb8856606 /* compute the direction */
379*2d9fd380Sjfb8856606 bnxt_ulp_rte_parser_direction_compute(params);
380*2d9fd380Sjfb8856606
381*2d9fd380Sjfb8856606 /* Get the computed direction */
382*2d9fd380Sjfb8856606 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
383*2d9fd380Sjfb8856606 if (dir == BNXT_ULP_DIR_INGRESS) {
384*2d9fd380Sjfb8856606 svif_type = BNXT_ULP_PHY_PORT_SVIF;
385*2d9fd380Sjfb8856606 } else {
386*2d9fd380Sjfb8856606 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
387*2d9fd380Sjfb8856606 svif_type = BNXT_ULP_VF_FUNC_SVIF;
388*2d9fd380Sjfb8856606 else
389*2d9fd380Sjfb8856606 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
390*2d9fd380Sjfb8856606 }
391*2d9fd380Sjfb8856606 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
392*2d9fd380Sjfb8856606 &svif);
393*2d9fd380Sjfb8856606 svif = rte_cpu_to_be_16(svif);
394*2d9fd380Sjfb8856606 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
395*2d9fd380Sjfb8856606 memcpy(hdr_field->spec, &svif, sizeof(svif));
396*2d9fd380Sjfb8856606 memcpy(hdr_field->mask, &mask, sizeof(mask));
397*2d9fd380Sjfb8856606 hdr_field->size = sizeof(svif);
398*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
399*2d9fd380Sjfb8856606 rte_be_to_cpu_16(svif));
400*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
401*2d9fd380Sjfb8856606 }
402*2d9fd380Sjfb8856606
403*2d9fd380Sjfb8856606 /* Function to handle the parsing of the RTE port id */
404*2d9fd380Sjfb8856606 int32_t
ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params * params)405*2d9fd380Sjfb8856606 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
406*2d9fd380Sjfb8856606 {
407*2d9fd380Sjfb8856606 uint16_t port_id = 0;
408*2d9fd380Sjfb8856606 uint16_t svif_mask = 0xFFFF;
409*2d9fd380Sjfb8856606 uint32_t ifindex;
410*2d9fd380Sjfb8856606 int32_t rc = BNXT_TF_RC_ERROR;
411*2d9fd380Sjfb8856606
412*2d9fd380Sjfb8856606 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
413*2d9fd380Sjfb8856606 BNXT_ULP_INVALID_SVIF_VAL)
414*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
415*2d9fd380Sjfb8856606
416*2d9fd380Sjfb8856606 /* SVIF not set. So get the port id */
417*2d9fd380Sjfb8856606 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
418*2d9fd380Sjfb8856606
419*2d9fd380Sjfb8856606 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
420*2d9fd380Sjfb8856606 port_id,
421*2d9fd380Sjfb8856606 &ifindex)) {
422*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
423*2d9fd380Sjfb8856606 return rc;
424*2d9fd380Sjfb8856606 }
425*2d9fd380Sjfb8856606
426*2d9fd380Sjfb8856606 /* Update the SVIF details */
427*2d9fd380Sjfb8856606 rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
428*2d9fd380Sjfb8856606 return rc;
429*2d9fd380Sjfb8856606 }
430*2d9fd380Sjfb8856606
431*2d9fd380Sjfb8856606 /* Function to handle the implicit action port id */
432*2d9fd380Sjfb8856606 int32_t
ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params * params)433*2d9fd380Sjfb8856606 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
434*2d9fd380Sjfb8856606 {
435*2d9fd380Sjfb8856606 struct rte_flow_action action_item = {0};
436*2d9fd380Sjfb8856606 struct rte_flow_action_port_id port_id = {0};
437*2d9fd380Sjfb8856606
438*2d9fd380Sjfb8856606 /* Read the action port set bit */
439*2d9fd380Sjfb8856606 if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
440*2d9fd380Sjfb8856606 /* Already set, so just exit */
441*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
442*2d9fd380Sjfb8856606 }
443*2d9fd380Sjfb8856606 port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
444*2d9fd380Sjfb8856606 action_item.conf = &port_id;
445*2d9fd380Sjfb8856606
446*2d9fd380Sjfb8856606 /* Update the action port based on incoming port */
447*2d9fd380Sjfb8856606 ulp_rte_port_id_act_handler(&action_item, params);
448*2d9fd380Sjfb8856606
449*2d9fd380Sjfb8856606 /* Reset the action port set bit */
450*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
451*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
452*2d9fd380Sjfb8856606 }
453*2d9fd380Sjfb8856606
454*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow item PF Header. */
455*2d9fd380Sjfb8856606 int32_t
ulp_rte_pf_hdr_handler(const struct rte_flow_item * item __rte_unused,struct ulp_rte_parser_params * params)456*2d9fd380Sjfb8856606 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
457*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
458*2d9fd380Sjfb8856606 {
459*2d9fd380Sjfb8856606 uint16_t port_id = 0;
460*2d9fd380Sjfb8856606 uint16_t svif_mask = 0xFFFF;
461*2d9fd380Sjfb8856606 uint32_t ifindex;
462*2d9fd380Sjfb8856606
463*2d9fd380Sjfb8856606 /* Get the implicit port id */
464*2d9fd380Sjfb8856606 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
465*2d9fd380Sjfb8856606
466*2d9fd380Sjfb8856606 /* perform the conversion from dpdk port to bnxt ifindex */
467*2d9fd380Sjfb8856606 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
468*2d9fd380Sjfb8856606 port_id,
469*2d9fd380Sjfb8856606 &ifindex)) {
470*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
471*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
472*2d9fd380Sjfb8856606 }
473*2d9fd380Sjfb8856606
474*2d9fd380Sjfb8856606 /* Update the SVIF details */
475*2d9fd380Sjfb8856606 return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
476*2d9fd380Sjfb8856606 }
477*2d9fd380Sjfb8856606
478*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow item VF Header. */
479*2d9fd380Sjfb8856606 int32_t
ulp_rte_vf_hdr_handler(const struct rte_flow_item * item,struct ulp_rte_parser_params * params)480*2d9fd380Sjfb8856606 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
481*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
482*2d9fd380Sjfb8856606 {
483*2d9fd380Sjfb8856606 const struct rte_flow_item_vf *vf_spec = item->spec;
484*2d9fd380Sjfb8856606 const struct rte_flow_item_vf *vf_mask = item->mask;
485*2d9fd380Sjfb8856606 uint16_t mask = 0;
486*2d9fd380Sjfb8856606 uint32_t ifindex;
487*2d9fd380Sjfb8856606 int32_t rc = BNXT_TF_RC_PARSE_ERR;
488*2d9fd380Sjfb8856606
489*2d9fd380Sjfb8856606 /* Get VF rte_flow_item for Port details */
490*2d9fd380Sjfb8856606 if (!vf_spec) {
491*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
492*2d9fd380Sjfb8856606 return rc;
493*2d9fd380Sjfb8856606 }
494*2d9fd380Sjfb8856606 if (!vf_mask) {
495*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
496*2d9fd380Sjfb8856606 return rc;
497*2d9fd380Sjfb8856606 }
498*2d9fd380Sjfb8856606 mask = vf_mask->id;
499*2d9fd380Sjfb8856606
500*2d9fd380Sjfb8856606 /* perform the conversion from VF Func id to bnxt ifindex */
501*2d9fd380Sjfb8856606 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
502*2d9fd380Sjfb8856606 vf_spec->id,
503*2d9fd380Sjfb8856606 &ifindex)) {
504*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
505*2d9fd380Sjfb8856606 return rc;
506*2d9fd380Sjfb8856606 }
507*2d9fd380Sjfb8856606 /* Update the SVIF details */
508*2d9fd380Sjfb8856606 return ulp_rte_parser_svif_set(params, ifindex, mask);
509*2d9fd380Sjfb8856606 }
510*2d9fd380Sjfb8856606
511*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow item port id Header. */
512*2d9fd380Sjfb8856606 int32_t
ulp_rte_port_id_hdr_handler(const struct rte_flow_item * item,struct ulp_rte_parser_params * params)513*2d9fd380Sjfb8856606 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
514*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
515*2d9fd380Sjfb8856606 {
516*2d9fd380Sjfb8856606 const struct rte_flow_item_port_id *port_spec = item->spec;
517*2d9fd380Sjfb8856606 const struct rte_flow_item_port_id *port_mask = item->mask;
518*2d9fd380Sjfb8856606 uint16_t mask = 0;
519*2d9fd380Sjfb8856606 int32_t rc = BNXT_TF_RC_PARSE_ERR;
520*2d9fd380Sjfb8856606 uint32_t ifindex;
521*2d9fd380Sjfb8856606
522*2d9fd380Sjfb8856606 if (!port_spec) {
523*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
524*2d9fd380Sjfb8856606 return rc;
525*2d9fd380Sjfb8856606 }
526*2d9fd380Sjfb8856606 if (!port_mask) {
527*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
528*2d9fd380Sjfb8856606 return rc;
529*2d9fd380Sjfb8856606 }
530*2d9fd380Sjfb8856606 mask = port_mask->id;
531*2d9fd380Sjfb8856606
532*2d9fd380Sjfb8856606 /* perform the conversion from dpdk port to bnxt ifindex */
533*2d9fd380Sjfb8856606 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
534*2d9fd380Sjfb8856606 port_spec->id,
535*2d9fd380Sjfb8856606 &ifindex)) {
536*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
537*2d9fd380Sjfb8856606 return rc;
538*2d9fd380Sjfb8856606 }
539*2d9fd380Sjfb8856606 /* Update the SVIF details */
540*2d9fd380Sjfb8856606 return ulp_rte_parser_svif_set(params, ifindex, mask);
541*2d9fd380Sjfb8856606 }
542*2d9fd380Sjfb8856606
543*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow item phy port Header. */
544*2d9fd380Sjfb8856606 int32_t
ulp_rte_phy_port_hdr_handler(const struct rte_flow_item * item,struct ulp_rte_parser_params * params)545*2d9fd380Sjfb8856606 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
546*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
547*2d9fd380Sjfb8856606 {
548*2d9fd380Sjfb8856606 const struct rte_flow_item_phy_port *port_spec = item->spec;
549*2d9fd380Sjfb8856606 const struct rte_flow_item_phy_port *port_mask = item->mask;
550*2d9fd380Sjfb8856606 uint16_t mask = 0;
551*2d9fd380Sjfb8856606 int32_t rc = BNXT_TF_RC_ERROR;
552*2d9fd380Sjfb8856606 uint16_t svif;
553*2d9fd380Sjfb8856606 enum bnxt_ulp_direction_type dir;
554*2d9fd380Sjfb8856606 struct ulp_rte_hdr_field *hdr_field;
555*2d9fd380Sjfb8856606
556*2d9fd380Sjfb8856606 /* Copy the rte_flow_item for phy port into hdr_field */
557*2d9fd380Sjfb8856606 if (!port_spec) {
558*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
559*2d9fd380Sjfb8856606 return rc;
560*2d9fd380Sjfb8856606 }
561*2d9fd380Sjfb8856606 if (!port_mask) {
562*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
563*2d9fd380Sjfb8856606 return rc;
564*2d9fd380Sjfb8856606 }
565*2d9fd380Sjfb8856606 mask = port_mask->index;
566*2d9fd380Sjfb8856606
567*2d9fd380Sjfb8856606 /* Update the match port type */
568*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
569*2d9fd380Sjfb8856606 BNXT_ULP_INTF_TYPE_PHY_PORT);
570*2d9fd380Sjfb8856606
571*2d9fd380Sjfb8856606 /* Compute the Hw direction */
572*2d9fd380Sjfb8856606 bnxt_ulp_rte_parser_direction_compute(params);
573*2d9fd380Sjfb8856606
574*2d9fd380Sjfb8856606 /* Direction validation */
575*2d9fd380Sjfb8856606 dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
576*2d9fd380Sjfb8856606 if (dir == BNXT_ULP_DIR_EGRESS) {
577*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR,
578*2d9fd380Sjfb8856606 "Parse Err:Phy ports are valid only for ingress\n");
579*2d9fd380Sjfb8856606 return BNXT_TF_RC_PARSE_ERR;
580*2d9fd380Sjfb8856606 }
581*2d9fd380Sjfb8856606
582*2d9fd380Sjfb8856606 /* Get the physical port details from port db */
583*2d9fd380Sjfb8856606 rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
584*2d9fd380Sjfb8856606 &svif);
585*2d9fd380Sjfb8856606 if (rc) {
586*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Failed to get port details\n");
587*2d9fd380Sjfb8856606 return BNXT_TF_RC_PARSE_ERR;
588*2d9fd380Sjfb8856606 }
589*2d9fd380Sjfb8856606
590*2d9fd380Sjfb8856606 /* Update the SVIF details */
591*2d9fd380Sjfb8856606 svif = rte_cpu_to_be_16(svif);
592*2d9fd380Sjfb8856606 hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
593*2d9fd380Sjfb8856606 memcpy(hdr_field->spec, &svif, sizeof(svif));
594*2d9fd380Sjfb8856606 memcpy(hdr_field->mask, &mask, sizeof(mask));
595*2d9fd380Sjfb8856606 hdr_field->size = sizeof(svif);
596*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
597*2d9fd380Sjfb8856606 rte_be_to_cpu_16(svif));
598*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
599*2d9fd380Sjfb8856606 }
600*2d9fd380Sjfb8856606
601*2d9fd380Sjfb8856606 /* Function to handle the update of proto header based on field values */
602*2d9fd380Sjfb8856606 static void
ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params * param,uint16_t type,uint32_t in_flag)603*2d9fd380Sjfb8856606 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
604*2d9fd380Sjfb8856606 uint16_t type, uint32_t in_flag)
605*2d9fd380Sjfb8856606 {
606*2d9fd380Sjfb8856606 if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
607*2d9fd380Sjfb8856606 if (in_flag) {
608*2d9fd380Sjfb8856606 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
609*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_I_IPV4);
610*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
611*2d9fd380Sjfb8856606 } else {
612*2d9fd380Sjfb8856606 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
613*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_O_IPV4);
614*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
615*2d9fd380Sjfb8856606 }
616*2d9fd380Sjfb8856606 } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
617*2d9fd380Sjfb8856606 if (in_flag) {
618*2d9fd380Sjfb8856606 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
619*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_I_IPV6);
620*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
621*2d9fd380Sjfb8856606 } else {
622*2d9fd380Sjfb8856606 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
623*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_O_IPV6);
624*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
625*2d9fd380Sjfb8856606 }
626*2d9fd380Sjfb8856606 }
627*2d9fd380Sjfb8856606 }
628*2d9fd380Sjfb8856606
629*2d9fd380Sjfb8856606 /* Internal Function to identify broadcast or multicast packets */
630*2d9fd380Sjfb8856606 static int32_t
ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr * eth_addr)631*2d9fd380Sjfb8856606 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
632*2d9fd380Sjfb8856606 {
633*2d9fd380Sjfb8856606 if (rte_is_multicast_ether_addr(eth_addr) ||
634*2d9fd380Sjfb8856606 rte_is_broadcast_ether_addr(eth_addr)) {
635*2d9fd380Sjfb8856606 BNXT_TF_DBG(DEBUG,
636*2d9fd380Sjfb8856606 "No support for bcast or mcast addr offload\n");
637*2d9fd380Sjfb8856606 return 1;
638*2d9fd380Sjfb8856606 }
639*2d9fd380Sjfb8856606 return 0;
640*2d9fd380Sjfb8856606 }
641*2d9fd380Sjfb8856606
642*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
643*2d9fd380Sjfb8856606 int32_t
ulp_rte_eth_hdr_handler(const struct rte_flow_item * item,struct ulp_rte_parser_params * params)644*2d9fd380Sjfb8856606 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
645*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
646*2d9fd380Sjfb8856606 {
647*2d9fd380Sjfb8856606 const struct rte_flow_item_eth *eth_spec = item->spec;
648*2d9fd380Sjfb8856606 const struct rte_flow_item_eth *eth_mask = item->mask;
649*2d9fd380Sjfb8856606 struct ulp_rte_hdr_field *field;
650*2d9fd380Sjfb8856606 uint32_t idx = params->field_idx;
651*2d9fd380Sjfb8856606 uint32_t size;
652*2d9fd380Sjfb8856606 uint16_t eth_type = 0;
653*2d9fd380Sjfb8856606 uint32_t inner_flag = 0;
654*2d9fd380Sjfb8856606
655*2d9fd380Sjfb8856606 /*
656*2d9fd380Sjfb8856606 * Copy the rte_flow_item for eth into hdr_field using ethernet
657*2d9fd380Sjfb8856606 * header fields
658*2d9fd380Sjfb8856606 */
659*2d9fd380Sjfb8856606 if (eth_spec) {
660*2d9fd380Sjfb8856606 size = sizeof(eth_spec->dst.addr_bytes);
661*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
662*2d9fd380Sjfb8856606 eth_spec->dst.addr_bytes,
663*2d9fd380Sjfb8856606 size);
664*2d9fd380Sjfb8856606 /* Todo: work around to avoid multicast and broadcast addr */
665*2d9fd380Sjfb8856606 if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
666*2d9fd380Sjfb8856606 return BNXT_TF_RC_PARSE_ERR;
667*2d9fd380Sjfb8856606
668*2d9fd380Sjfb8856606 size = sizeof(eth_spec->src.addr_bytes);
669*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
670*2d9fd380Sjfb8856606 eth_spec->src.addr_bytes,
671*2d9fd380Sjfb8856606 size);
672*2d9fd380Sjfb8856606 /* Todo: work around to avoid multicast and broadcast addr */
673*2d9fd380Sjfb8856606 if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
674*2d9fd380Sjfb8856606 return BNXT_TF_RC_PARSE_ERR;
675*2d9fd380Sjfb8856606
676*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
677*2d9fd380Sjfb8856606 ð_spec->type,
678*2d9fd380Sjfb8856606 sizeof(eth_spec->type));
679*2d9fd380Sjfb8856606 eth_type = eth_spec->type;
680*2d9fd380Sjfb8856606 }
681*2d9fd380Sjfb8856606 if (eth_mask) {
682*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
683*2d9fd380Sjfb8856606 sizeof(eth_mask->dst.addr_bytes));
684*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
685*2d9fd380Sjfb8856606 sizeof(eth_mask->src.addr_bytes));
686*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx, ð_mask->type,
687*2d9fd380Sjfb8856606 sizeof(eth_mask->type));
688*2d9fd380Sjfb8856606 }
689*2d9fd380Sjfb8856606 /* Add number of vlan header elements */
690*2d9fd380Sjfb8856606 params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
691*2d9fd380Sjfb8856606 params->vlan_idx = params->field_idx;
692*2d9fd380Sjfb8856606 params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
693*2d9fd380Sjfb8856606
694*2d9fd380Sjfb8856606 /* Update the protocol hdr bitmap */
695*2d9fd380Sjfb8856606 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
696*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_O_ETH) ||
697*2d9fd380Sjfb8856606 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
698*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_O_IPV4) ||
699*2d9fd380Sjfb8856606 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
700*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_O_IPV6) ||
701*2d9fd380Sjfb8856606 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
702*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_O_UDP) ||
703*2d9fd380Sjfb8856606 ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
704*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_O_TCP)) {
705*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
706*2d9fd380Sjfb8856606 inner_flag = 1;
707*2d9fd380Sjfb8856606 } else {
708*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
709*2d9fd380Sjfb8856606 }
710*2d9fd380Sjfb8856606 /* Update the field protocol hdr bitmap */
711*2d9fd380Sjfb8856606 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
712*2d9fd380Sjfb8856606
713*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
714*2d9fd380Sjfb8856606 }
715*2d9fd380Sjfb8856606
716*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow item Vlan Header. */
717*2d9fd380Sjfb8856606 int32_t
ulp_rte_vlan_hdr_handler(const struct rte_flow_item * item,struct ulp_rte_parser_params * params)718*2d9fd380Sjfb8856606 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
719*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
720*2d9fd380Sjfb8856606 {
721*2d9fd380Sjfb8856606 const struct rte_flow_item_vlan *vlan_spec = item->spec;
722*2d9fd380Sjfb8856606 const struct rte_flow_item_vlan *vlan_mask = item->mask;
723*2d9fd380Sjfb8856606 struct ulp_rte_hdr_field *field;
724*2d9fd380Sjfb8856606 struct ulp_rte_hdr_bitmap *hdr_bit;
725*2d9fd380Sjfb8856606 uint32_t idx = params->vlan_idx;
726*2d9fd380Sjfb8856606 uint16_t vlan_tag, priority;
727*2d9fd380Sjfb8856606 uint32_t outer_vtag_num;
728*2d9fd380Sjfb8856606 uint32_t inner_vtag_num;
729*2d9fd380Sjfb8856606 uint16_t eth_type = 0;
730*2d9fd380Sjfb8856606 uint32_t inner_flag = 0;
731*2d9fd380Sjfb8856606
732*2d9fd380Sjfb8856606 /*
733*2d9fd380Sjfb8856606 * Copy the rte_flow_item for vlan into hdr_field using Vlan
734*2d9fd380Sjfb8856606 * header fields
735*2d9fd380Sjfb8856606 */
736*2d9fd380Sjfb8856606 if (vlan_spec) {
737*2d9fd380Sjfb8856606 vlan_tag = ntohs(vlan_spec->tci);
738*2d9fd380Sjfb8856606 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
739*2d9fd380Sjfb8856606 vlan_tag &= ULP_VLAN_TAG_MASK;
740*2d9fd380Sjfb8856606 vlan_tag = htons(vlan_tag);
741*2d9fd380Sjfb8856606
742*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
743*2d9fd380Sjfb8856606 &priority,
744*2d9fd380Sjfb8856606 sizeof(priority));
745*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
746*2d9fd380Sjfb8856606 &vlan_tag,
747*2d9fd380Sjfb8856606 sizeof(vlan_tag));
748*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
749*2d9fd380Sjfb8856606 &vlan_spec->inner_type,
750*2d9fd380Sjfb8856606 sizeof(vlan_spec->inner_type));
751*2d9fd380Sjfb8856606 eth_type = vlan_spec->inner_type;
752*2d9fd380Sjfb8856606 }
753*2d9fd380Sjfb8856606
754*2d9fd380Sjfb8856606 if (vlan_mask) {
755*2d9fd380Sjfb8856606 vlan_tag = ntohs(vlan_mask->tci);
756*2d9fd380Sjfb8856606 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
757*2d9fd380Sjfb8856606 vlan_tag &= 0xfff;
758*2d9fd380Sjfb8856606
759*2d9fd380Sjfb8856606 /*
760*2d9fd380Sjfb8856606 * the storage for priority and vlan tag is 2 bytes
761*2d9fd380Sjfb8856606 * The mask of priority which is 3 bits if it is all 1's
762*2d9fd380Sjfb8856606 * then make the rest bits 13 bits as 1's
763*2d9fd380Sjfb8856606 * so that it is matched as exact match.
764*2d9fd380Sjfb8856606 */
765*2d9fd380Sjfb8856606 if (priority == ULP_VLAN_PRIORITY_MASK)
766*2d9fd380Sjfb8856606 priority |= ~ULP_VLAN_PRIORITY_MASK;
767*2d9fd380Sjfb8856606 if (vlan_tag == ULP_VLAN_TAG_MASK)
768*2d9fd380Sjfb8856606 vlan_tag |= ~ULP_VLAN_TAG_MASK;
769*2d9fd380Sjfb8856606 vlan_tag = htons(vlan_tag);
770*2d9fd380Sjfb8856606
771*2d9fd380Sjfb8856606 /*
772*2d9fd380Sjfb8856606 * The priority field is ignored since OVS is setting it as
773*2d9fd380Sjfb8856606 * wild card match and it is not supported. This is a work
774*2d9fd380Sjfb8856606 * around and shall be addressed in the future.
775*2d9fd380Sjfb8856606 */
776*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_ignore(params, &idx, &priority,
777*2d9fd380Sjfb8856606 sizeof(priority));
778*2d9fd380Sjfb8856606
779*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
780*2d9fd380Sjfb8856606 sizeof(vlan_tag));
781*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
782*2d9fd380Sjfb8856606 sizeof(vlan_mask->inner_type));
783*2d9fd380Sjfb8856606 }
784*2d9fd380Sjfb8856606 /* Set the vlan index to new incremented value */
785*2d9fd380Sjfb8856606 params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
786*2d9fd380Sjfb8856606
787*2d9fd380Sjfb8856606 /* Get the outer tag and inner tag counts */
788*2d9fd380Sjfb8856606 outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
789*2d9fd380Sjfb8856606 BNXT_ULP_CF_IDX_O_VTAG_NUM);
790*2d9fd380Sjfb8856606 inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
791*2d9fd380Sjfb8856606 BNXT_ULP_CF_IDX_I_VTAG_NUM);
792*2d9fd380Sjfb8856606
793*2d9fd380Sjfb8856606 /* Update the hdr_bitmap of the vlans */
794*2d9fd380Sjfb8856606 hdr_bit = ¶ms->hdr_bitmap;
795*2d9fd380Sjfb8856606 if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
796*2d9fd380Sjfb8856606 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
797*2d9fd380Sjfb8856606 !outer_vtag_num) {
798*2d9fd380Sjfb8856606 /* Update the vlan tag num */
799*2d9fd380Sjfb8856606 outer_vtag_num++;
800*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
801*2d9fd380Sjfb8856606 outer_vtag_num);
802*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
803*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
804*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->hdr_bitmap.bits,
805*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_OO_VLAN);
806*2d9fd380Sjfb8856606 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
807*2d9fd380Sjfb8856606 !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
808*2d9fd380Sjfb8856606 outer_vtag_num == 1) {
809*2d9fd380Sjfb8856606 /* update the vlan tag num */
810*2d9fd380Sjfb8856606 outer_vtag_num++;
811*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
812*2d9fd380Sjfb8856606 outer_vtag_num);
813*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
814*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
815*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->hdr_bitmap.bits,
816*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_OI_VLAN);
817*2d9fd380Sjfb8856606 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
818*2d9fd380Sjfb8856606 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
819*2d9fd380Sjfb8856606 !inner_vtag_num) {
820*2d9fd380Sjfb8856606 /* update the vlan tag num */
821*2d9fd380Sjfb8856606 inner_vtag_num++;
822*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
823*2d9fd380Sjfb8856606 inner_vtag_num);
824*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
825*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
826*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->hdr_bitmap.bits,
827*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_IO_VLAN);
828*2d9fd380Sjfb8856606 inner_flag = 1;
829*2d9fd380Sjfb8856606 } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
830*2d9fd380Sjfb8856606 ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
831*2d9fd380Sjfb8856606 inner_vtag_num == 1) {
832*2d9fd380Sjfb8856606 /* update the vlan tag num */
833*2d9fd380Sjfb8856606 inner_vtag_num++;
834*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
835*2d9fd380Sjfb8856606 inner_vtag_num);
836*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
837*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
838*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->hdr_bitmap.bits,
839*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_II_VLAN);
840*2d9fd380Sjfb8856606 inner_flag = 1;
841*2d9fd380Sjfb8856606 } else {
842*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
843*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
844*2d9fd380Sjfb8856606 }
845*2d9fd380Sjfb8856606 /* Update the field protocol hdr bitmap */
846*2d9fd380Sjfb8856606 ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
847*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
848*2d9fd380Sjfb8856606 }
849*2d9fd380Sjfb8856606
850*2d9fd380Sjfb8856606 /* Function to handle the update of proto header based on field values */
851*2d9fd380Sjfb8856606 static void
ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params * param,uint8_t proto,uint32_t in_flag)852*2d9fd380Sjfb8856606 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
853*2d9fd380Sjfb8856606 uint8_t proto, uint32_t in_flag)
854*2d9fd380Sjfb8856606 {
855*2d9fd380Sjfb8856606 if (proto == IPPROTO_UDP) {
856*2d9fd380Sjfb8856606 if (in_flag) {
857*2d9fd380Sjfb8856606 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
858*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_I_UDP);
859*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
860*2d9fd380Sjfb8856606 } else {
861*2d9fd380Sjfb8856606 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
862*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_O_UDP);
863*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
864*2d9fd380Sjfb8856606 }
865*2d9fd380Sjfb8856606 } else if (proto == IPPROTO_TCP) {
866*2d9fd380Sjfb8856606 if (in_flag) {
867*2d9fd380Sjfb8856606 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
868*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_I_TCP);
869*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
870*2d9fd380Sjfb8856606 } else {
871*2d9fd380Sjfb8856606 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
872*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_O_TCP);
873*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
874*2d9fd380Sjfb8856606 }
875*2d9fd380Sjfb8856606 }
876*2d9fd380Sjfb8856606 }
877*2d9fd380Sjfb8856606
878*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
879*2d9fd380Sjfb8856606 int32_t
ulp_rte_ipv4_hdr_handler(const struct rte_flow_item * item,struct ulp_rte_parser_params * params)880*2d9fd380Sjfb8856606 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
881*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
882*2d9fd380Sjfb8856606 {
883*2d9fd380Sjfb8856606 const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
884*2d9fd380Sjfb8856606 const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
885*2d9fd380Sjfb8856606 struct ulp_rte_hdr_field *field;
886*2d9fd380Sjfb8856606 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
887*2d9fd380Sjfb8856606 uint32_t idx = params->field_idx;
888*2d9fd380Sjfb8856606 uint32_t size;
889*2d9fd380Sjfb8856606 uint8_t proto = 0;
890*2d9fd380Sjfb8856606 uint32_t inner_flag = 0;
891*2d9fd380Sjfb8856606 uint32_t cnt;
892*2d9fd380Sjfb8856606
893*2d9fd380Sjfb8856606 /* validate there are no 3rd L3 header */
894*2d9fd380Sjfb8856606 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
895*2d9fd380Sjfb8856606 if (cnt == 2) {
896*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
897*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
898*2d9fd380Sjfb8856606 }
899*2d9fd380Sjfb8856606
900*2d9fd380Sjfb8856606 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
901*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_O_ETH) &&
902*2d9fd380Sjfb8856606 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
903*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_I_ETH)) {
904*2d9fd380Sjfb8856606 /* Since F2 flow does not include eth item, when parser detects
905*2d9fd380Sjfb8856606 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
906*2d9fd380Sjfb8856606 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
907*2d9fd380Sjfb8856606 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
908*2d9fd380Sjfb8856606 * This will allow the parser post processor to update the
909*2d9fd380Sjfb8856606 * t_dmac in hdr_field[o_eth.dmac]
910*2d9fd380Sjfb8856606 */
911*2d9fd380Sjfb8856606 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
912*2d9fd380Sjfb8856606 BNXT_ULP_PROTO_HDR_VLAN_NUM);
913*2d9fd380Sjfb8856606 params->field_idx = idx;
914*2d9fd380Sjfb8856606 }
915*2d9fd380Sjfb8856606
916*2d9fd380Sjfb8856606 /*
917*2d9fd380Sjfb8856606 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
918*2d9fd380Sjfb8856606 * header fields
919*2d9fd380Sjfb8856606 */
920*2d9fd380Sjfb8856606 if (ipv4_spec) {
921*2d9fd380Sjfb8856606 size = sizeof(ipv4_spec->hdr.version_ihl);
922*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
923*2d9fd380Sjfb8856606 &ipv4_spec->hdr.version_ihl,
924*2d9fd380Sjfb8856606 size);
925*2d9fd380Sjfb8856606 size = sizeof(ipv4_spec->hdr.type_of_service);
926*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
927*2d9fd380Sjfb8856606 &ipv4_spec->hdr.type_of_service,
928*2d9fd380Sjfb8856606 size);
929*2d9fd380Sjfb8856606 size = sizeof(ipv4_spec->hdr.total_length);
930*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
931*2d9fd380Sjfb8856606 &ipv4_spec->hdr.total_length,
932*2d9fd380Sjfb8856606 size);
933*2d9fd380Sjfb8856606 size = sizeof(ipv4_spec->hdr.packet_id);
934*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
935*2d9fd380Sjfb8856606 &ipv4_spec->hdr.packet_id,
936*2d9fd380Sjfb8856606 size);
937*2d9fd380Sjfb8856606 size = sizeof(ipv4_spec->hdr.fragment_offset);
938*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
939*2d9fd380Sjfb8856606 &ipv4_spec->hdr.fragment_offset,
940*2d9fd380Sjfb8856606 size);
941*2d9fd380Sjfb8856606 size = sizeof(ipv4_spec->hdr.time_to_live);
942*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
943*2d9fd380Sjfb8856606 &ipv4_spec->hdr.time_to_live,
944*2d9fd380Sjfb8856606 size);
945*2d9fd380Sjfb8856606 size = sizeof(ipv4_spec->hdr.next_proto_id);
946*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
947*2d9fd380Sjfb8856606 &ipv4_spec->hdr.next_proto_id,
948*2d9fd380Sjfb8856606 size);
949*2d9fd380Sjfb8856606 proto = ipv4_spec->hdr.next_proto_id;
950*2d9fd380Sjfb8856606 size = sizeof(ipv4_spec->hdr.hdr_checksum);
951*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
952*2d9fd380Sjfb8856606 &ipv4_spec->hdr.hdr_checksum,
953*2d9fd380Sjfb8856606 size);
954*2d9fd380Sjfb8856606 size = sizeof(ipv4_spec->hdr.src_addr);
955*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
956*2d9fd380Sjfb8856606 &ipv4_spec->hdr.src_addr,
957*2d9fd380Sjfb8856606 size);
958*2d9fd380Sjfb8856606 size = sizeof(ipv4_spec->hdr.dst_addr);
959*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
960*2d9fd380Sjfb8856606 &ipv4_spec->hdr.dst_addr,
961*2d9fd380Sjfb8856606 size);
962*2d9fd380Sjfb8856606 }
963*2d9fd380Sjfb8856606 if (ipv4_mask) {
964*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
965*2d9fd380Sjfb8856606 &ipv4_mask->hdr.version_ihl,
966*2d9fd380Sjfb8856606 sizeof(ipv4_mask->hdr.version_ihl));
967*2d9fd380Sjfb8856606 /*
968*2d9fd380Sjfb8856606 * The tos field is ignored since OVS is setting it as wild card
969*2d9fd380Sjfb8856606 * match and it is not supported. This is a work around and
970*2d9fd380Sjfb8856606 * shall be addressed in the future.
971*2d9fd380Sjfb8856606 */
972*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_ignore(params, &idx,
973*2d9fd380Sjfb8856606 &ipv4_mask->hdr.type_of_service,
974*2d9fd380Sjfb8856606 sizeof(ipv4_mask->hdr.type_of_service)
975*2d9fd380Sjfb8856606 );
976*2d9fd380Sjfb8856606
977*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
978*2d9fd380Sjfb8856606 &ipv4_mask->hdr.total_length,
979*2d9fd380Sjfb8856606 sizeof(ipv4_mask->hdr.total_length));
980*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
981*2d9fd380Sjfb8856606 &ipv4_mask->hdr.packet_id,
982*2d9fd380Sjfb8856606 sizeof(ipv4_mask->hdr.packet_id));
983*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
984*2d9fd380Sjfb8856606 &ipv4_mask->hdr.fragment_offset,
985*2d9fd380Sjfb8856606 sizeof(ipv4_mask->hdr.fragment_offset));
986*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
987*2d9fd380Sjfb8856606 &ipv4_mask->hdr.time_to_live,
988*2d9fd380Sjfb8856606 sizeof(ipv4_mask->hdr.time_to_live));
989*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
990*2d9fd380Sjfb8856606 &ipv4_mask->hdr.next_proto_id,
991*2d9fd380Sjfb8856606 sizeof(ipv4_mask->hdr.next_proto_id));
992*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
993*2d9fd380Sjfb8856606 &ipv4_mask->hdr.hdr_checksum,
994*2d9fd380Sjfb8856606 sizeof(ipv4_mask->hdr.hdr_checksum));
995*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
996*2d9fd380Sjfb8856606 &ipv4_mask->hdr.src_addr,
997*2d9fd380Sjfb8856606 sizeof(ipv4_mask->hdr.src_addr));
998*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
999*2d9fd380Sjfb8856606 &ipv4_mask->hdr.dst_addr,
1000*2d9fd380Sjfb8856606 sizeof(ipv4_mask->hdr.dst_addr));
1001*2d9fd380Sjfb8856606 }
1002*2d9fd380Sjfb8856606 /* Add the number of ipv4 header elements */
1003*2d9fd380Sjfb8856606 params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
1004*2d9fd380Sjfb8856606
1005*2d9fd380Sjfb8856606 /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1006*2d9fd380Sjfb8856606 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1007*2d9fd380Sjfb8856606 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1008*2d9fd380Sjfb8856606 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1009*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1010*2d9fd380Sjfb8856606 inner_flag = 1;
1011*2d9fd380Sjfb8856606 } else {
1012*2d9fd380Sjfb8856606 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1013*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1014*2d9fd380Sjfb8856606 }
1015*2d9fd380Sjfb8856606
1016*2d9fd380Sjfb8856606 /* Some of the PMD applications may set the protocol field
1017*2d9fd380Sjfb8856606 * in the IPv4 spec but don't set the mask. So, consider
1018*2d9fd380Sjfb8856606 * the mask in the proto value calculation.
1019*2d9fd380Sjfb8856606 */
1020*2d9fd380Sjfb8856606 if (ipv4_mask)
1021*2d9fd380Sjfb8856606 proto &= ipv4_mask->hdr.next_proto_id;
1022*2d9fd380Sjfb8856606
1023*2d9fd380Sjfb8856606 /* Update the field protocol hdr bitmap */
1024*2d9fd380Sjfb8856606 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1025*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1026*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
1027*2d9fd380Sjfb8856606 }
1028*2d9fd380Sjfb8856606
1029*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1030*2d9fd380Sjfb8856606 int32_t
ulp_rte_ipv6_hdr_handler(const struct rte_flow_item * item,struct ulp_rte_parser_params * params)1031*2d9fd380Sjfb8856606 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1032*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
1033*2d9fd380Sjfb8856606 {
1034*2d9fd380Sjfb8856606 const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1035*2d9fd380Sjfb8856606 const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1036*2d9fd380Sjfb8856606 struct ulp_rte_hdr_field *field;
1037*2d9fd380Sjfb8856606 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1038*2d9fd380Sjfb8856606 uint32_t idx = params->field_idx;
1039*2d9fd380Sjfb8856606 uint32_t size;
1040*2d9fd380Sjfb8856606 uint32_t vtcf, vtcf_mask;
1041*2d9fd380Sjfb8856606 uint8_t proto = 0;
1042*2d9fd380Sjfb8856606 uint32_t inner_flag = 0;
1043*2d9fd380Sjfb8856606 uint32_t cnt;
1044*2d9fd380Sjfb8856606
1045*2d9fd380Sjfb8856606 /* validate there are no 3rd L3 header */
1046*2d9fd380Sjfb8856606 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1047*2d9fd380Sjfb8856606 if (cnt == 2) {
1048*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1049*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1050*2d9fd380Sjfb8856606 }
1051*2d9fd380Sjfb8856606
1052*2d9fd380Sjfb8856606 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1053*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_O_ETH) &&
1054*2d9fd380Sjfb8856606 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1055*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_I_ETH)) {
1056*2d9fd380Sjfb8856606 /* Since F2 flow does not include eth item, when parser detects
1057*2d9fd380Sjfb8856606 * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
1058*2d9fd380Sjfb8856606 * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
1059*2d9fd380Sjfb8856606 * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
1060*2d9fd380Sjfb8856606 * This will allow the parser post processor to update the
1061*2d9fd380Sjfb8856606 * t_dmac in hdr_field[o_eth.dmac]
1062*2d9fd380Sjfb8856606 */
1063*2d9fd380Sjfb8856606 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
1064*2d9fd380Sjfb8856606 BNXT_ULP_PROTO_HDR_VLAN_NUM);
1065*2d9fd380Sjfb8856606 params->field_idx = idx;
1066*2d9fd380Sjfb8856606 }
1067*2d9fd380Sjfb8856606
1068*2d9fd380Sjfb8856606 /*
1069*2d9fd380Sjfb8856606 * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1070*2d9fd380Sjfb8856606 * header fields
1071*2d9fd380Sjfb8856606 */
1072*2d9fd380Sjfb8856606 if (ipv6_spec) {
1073*2d9fd380Sjfb8856606 size = sizeof(ipv6_spec->hdr.vtc_flow);
1074*2d9fd380Sjfb8856606
1075*2d9fd380Sjfb8856606 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1076*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1077*2d9fd380Sjfb8856606 &vtcf,
1078*2d9fd380Sjfb8856606 size);
1079*2d9fd380Sjfb8856606
1080*2d9fd380Sjfb8856606 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1081*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1082*2d9fd380Sjfb8856606 &vtcf,
1083*2d9fd380Sjfb8856606 size);
1084*2d9fd380Sjfb8856606
1085*2d9fd380Sjfb8856606 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1086*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1087*2d9fd380Sjfb8856606 &vtcf,
1088*2d9fd380Sjfb8856606 size);
1089*2d9fd380Sjfb8856606
1090*2d9fd380Sjfb8856606 size = sizeof(ipv6_spec->hdr.payload_len);
1091*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1092*2d9fd380Sjfb8856606 &ipv6_spec->hdr.payload_len,
1093*2d9fd380Sjfb8856606 size);
1094*2d9fd380Sjfb8856606 size = sizeof(ipv6_spec->hdr.proto);
1095*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1096*2d9fd380Sjfb8856606 &ipv6_spec->hdr.proto,
1097*2d9fd380Sjfb8856606 size);
1098*2d9fd380Sjfb8856606 proto = ipv6_spec->hdr.proto;
1099*2d9fd380Sjfb8856606 size = sizeof(ipv6_spec->hdr.hop_limits);
1100*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1101*2d9fd380Sjfb8856606 &ipv6_spec->hdr.hop_limits,
1102*2d9fd380Sjfb8856606 size);
1103*2d9fd380Sjfb8856606 size = sizeof(ipv6_spec->hdr.src_addr);
1104*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1105*2d9fd380Sjfb8856606 &ipv6_spec->hdr.src_addr,
1106*2d9fd380Sjfb8856606 size);
1107*2d9fd380Sjfb8856606 size = sizeof(ipv6_spec->hdr.dst_addr);
1108*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1109*2d9fd380Sjfb8856606 &ipv6_spec->hdr.dst_addr,
1110*2d9fd380Sjfb8856606 size);
1111*2d9fd380Sjfb8856606 }
1112*2d9fd380Sjfb8856606 if (ipv6_mask) {
1113*2d9fd380Sjfb8856606 size = sizeof(ipv6_mask->hdr.vtc_flow);
1114*2d9fd380Sjfb8856606
1115*2d9fd380Sjfb8856606 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1116*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1117*2d9fd380Sjfb8856606 &vtcf_mask,
1118*2d9fd380Sjfb8856606 size);
1119*2d9fd380Sjfb8856606 /*
1120*2d9fd380Sjfb8856606 * The TC and flow label field are ignored since OVS is
1121*2d9fd380Sjfb8856606 * setting it for match and it is not supported.
1122*2d9fd380Sjfb8856606 * This is a work around and
1123*2d9fd380Sjfb8856606 * shall be addressed in the future.
1124*2d9fd380Sjfb8856606 */
1125*2d9fd380Sjfb8856606 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1126*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1127*2d9fd380Sjfb8856606 vtcf_mask =
1128*2d9fd380Sjfb8856606 BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1129*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1130*2d9fd380Sjfb8856606
1131*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1132*2d9fd380Sjfb8856606 &ipv6_mask->hdr.payload_len,
1133*2d9fd380Sjfb8856606 sizeof(ipv6_mask->hdr.payload_len));
1134*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1135*2d9fd380Sjfb8856606 &ipv6_mask->hdr.proto,
1136*2d9fd380Sjfb8856606 sizeof(ipv6_mask->hdr.proto));
1137*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1138*2d9fd380Sjfb8856606 &ipv6_mask->hdr.hop_limits,
1139*2d9fd380Sjfb8856606 sizeof(ipv6_mask->hdr.hop_limits));
1140*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1141*2d9fd380Sjfb8856606 &ipv6_mask->hdr.src_addr,
1142*2d9fd380Sjfb8856606 sizeof(ipv6_mask->hdr.src_addr));
1143*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1144*2d9fd380Sjfb8856606 &ipv6_mask->hdr.dst_addr,
1145*2d9fd380Sjfb8856606 sizeof(ipv6_mask->hdr.dst_addr));
1146*2d9fd380Sjfb8856606 }
1147*2d9fd380Sjfb8856606 /* add number of ipv6 header elements */
1148*2d9fd380Sjfb8856606 params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1149*2d9fd380Sjfb8856606
1150*2d9fd380Sjfb8856606 /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1151*2d9fd380Sjfb8856606 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1152*2d9fd380Sjfb8856606 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1153*2d9fd380Sjfb8856606 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1154*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1155*2d9fd380Sjfb8856606 inner_flag = 1;
1156*2d9fd380Sjfb8856606 } else {
1157*2d9fd380Sjfb8856606 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1158*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1159*2d9fd380Sjfb8856606 }
1160*2d9fd380Sjfb8856606
1161*2d9fd380Sjfb8856606 /* Some of the PMD applications may set the protocol field
1162*2d9fd380Sjfb8856606 * in the IPv6 spec but don't set the mask. So, consider
1163*2d9fd380Sjfb8856606 * the mask in proto value calculation.
1164*2d9fd380Sjfb8856606 */
1165*2d9fd380Sjfb8856606 if (ipv6_mask)
1166*2d9fd380Sjfb8856606 proto &= ipv6_mask->hdr.proto;
1167*2d9fd380Sjfb8856606
1168*2d9fd380Sjfb8856606 /* Update the field protocol hdr bitmap */
1169*2d9fd380Sjfb8856606 ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1170*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1171*2d9fd380Sjfb8856606
1172*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
1173*2d9fd380Sjfb8856606 }
1174*2d9fd380Sjfb8856606
1175*2d9fd380Sjfb8856606 /* Function to handle the update of proto header based on field values */
1176*2d9fd380Sjfb8856606 static void
ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params * param,uint16_t dst_port)1177*2d9fd380Sjfb8856606 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1178*2d9fd380Sjfb8856606 uint16_t dst_port)
1179*2d9fd380Sjfb8856606 {
1180*2d9fd380Sjfb8856606 if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1181*2d9fd380Sjfb8856606 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1182*2d9fd380Sjfb8856606 BNXT_ULP_HDR_BIT_T_VXLAN);
1183*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1184*2d9fd380Sjfb8856606 }
1185*2d9fd380Sjfb8856606 }
1186*2d9fd380Sjfb8856606
1187*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow item UDP Header. */
1188*2d9fd380Sjfb8856606 int32_t
ulp_rte_udp_hdr_handler(const struct rte_flow_item * item,struct ulp_rte_parser_params * params)1189*2d9fd380Sjfb8856606 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1190*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
1191*2d9fd380Sjfb8856606 {
1192*2d9fd380Sjfb8856606 const struct rte_flow_item_udp *udp_spec = item->spec;
1193*2d9fd380Sjfb8856606 const struct rte_flow_item_udp *udp_mask = item->mask;
1194*2d9fd380Sjfb8856606 struct ulp_rte_hdr_field *field;
1195*2d9fd380Sjfb8856606 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1196*2d9fd380Sjfb8856606 uint32_t idx = params->field_idx;
1197*2d9fd380Sjfb8856606 uint32_t size;
1198*2d9fd380Sjfb8856606 uint16_t dst_port = 0;
1199*2d9fd380Sjfb8856606 uint32_t cnt;
1200*2d9fd380Sjfb8856606
1201*2d9fd380Sjfb8856606 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1202*2d9fd380Sjfb8856606 if (cnt == 2) {
1203*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1204*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1205*2d9fd380Sjfb8856606 }
1206*2d9fd380Sjfb8856606
1207*2d9fd380Sjfb8856606 /*
1208*2d9fd380Sjfb8856606 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1209*2d9fd380Sjfb8856606 * header fields
1210*2d9fd380Sjfb8856606 */
1211*2d9fd380Sjfb8856606 if (udp_spec) {
1212*2d9fd380Sjfb8856606 size = sizeof(udp_spec->hdr.src_port);
1213*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1214*2d9fd380Sjfb8856606 &udp_spec->hdr.src_port,
1215*2d9fd380Sjfb8856606 size);
1216*2d9fd380Sjfb8856606
1217*2d9fd380Sjfb8856606 size = sizeof(udp_spec->hdr.dst_port);
1218*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1219*2d9fd380Sjfb8856606 &udp_spec->hdr.dst_port,
1220*2d9fd380Sjfb8856606 size);
1221*2d9fd380Sjfb8856606 dst_port = udp_spec->hdr.dst_port;
1222*2d9fd380Sjfb8856606 size = sizeof(udp_spec->hdr.dgram_len);
1223*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1224*2d9fd380Sjfb8856606 &udp_spec->hdr.dgram_len,
1225*2d9fd380Sjfb8856606 size);
1226*2d9fd380Sjfb8856606 size = sizeof(udp_spec->hdr.dgram_cksum);
1227*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1228*2d9fd380Sjfb8856606 &udp_spec->hdr.dgram_cksum,
1229*2d9fd380Sjfb8856606 size);
1230*2d9fd380Sjfb8856606 }
1231*2d9fd380Sjfb8856606 if (udp_mask) {
1232*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1233*2d9fd380Sjfb8856606 &udp_mask->hdr.src_port,
1234*2d9fd380Sjfb8856606 sizeof(udp_mask->hdr.src_port));
1235*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1236*2d9fd380Sjfb8856606 &udp_mask->hdr.dst_port,
1237*2d9fd380Sjfb8856606 sizeof(udp_mask->hdr.dst_port));
1238*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1239*2d9fd380Sjfb8856606 &udp_mask->hdr.dgram_len,
1240*2d9fd380Sjfb8856606 sizeof(udp_mask->hdr.dgram_len));
1241*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1242*2d9fd380Sjfb8856606 &udp_mask->hdr.dgram_cksum,
1243*2d9fd380Sjfb8856606 sizeof(udp_mask->hdr.dgram_cksum));
1244*2d9fd380Sjfb8856606 }
1245*2d9fd380Sjfb8856606
1246*2d9fd380Sjfb8856606 /* Add number of UDP header elements */
1247*2d9fd380Sjfb8856606 params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1248*2d9fd380Sjfb8856606
1249*2d9fd380Sjfb8856606 /* Set the udp header bitmap and computed l4 header bitmaps */
1250*2d9fd380Sjfb8856606 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1251*2d9fd380Sjfb8856606 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1252*2d9fd380Sjfb8856606 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1253*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1254*2d9fd380Sjfb8856606 } else {
1255*2d9fd380Sjfb8856606 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1256*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1257*2d9fd380Sjfb8856606 /* Update the field protocol hdr bitmap */
1258*2d9fd380Sjfb8856606 ulp_rte_l4_proto_type_update(params, dst_port);
1259*2d9fd380Sjfb8856606 }
1260*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1261*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
1262*2d9fd380Sjfb8856606 }
1263*2d9fd380Sjfb8856606
1264*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow item TCP Header. */
1265*2d9fd380Sjfb8856606 int32_t
ulp_rte_tcp_hdr_handler(const struct rte_flow_item * item,struct ulp_rte_parser_params * params)1266*2d9fd380Sjfb8856606 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1267*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
1268*2d9fd380Sjfb8856606 {
1269*2d9fd380Sjfb8856606 const struct rte_flow_item_tcp *tcp_spec = item->spec;
1270*2d9fd380Sjfb8856606 const struct rte_flow_item_tcp *tcp_mask = item->mask;
1271*2d9fd380Sjfb8856606 struct ulp_rte_hdr_field *field;
1272*2d9fd380Sjfb8856606 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1273*2d9fd380Sjfb8856606 uint32_t idx = params->field_idx;
1274*2d9fd380Sjfb8856606 uint32_t size;
1275*2d9fd380Sjfb8856606 uint32_t cnt;
1276*2d9fd380Sjfb8856606
1277*2d9fd380Sjfb8856606 cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1278*2d9fd380Sjfb8856606 if (cnt == 2) {
1279*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1280*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1281*2d9fd380Sjfb8856606 }
1282*2d9fd380Sjfb8856606
1283*2d9fd380Sjfb8856606 /*
1284*2d9fd380Sjfb8856606 * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1285*2d9fd380Sjfb8856606 * header fields
1286*2d9fd380Sjfb8856606 */
1287*2d9fd380Sjfb8856606 if (tcp_spec) {
1288*2d9fd380Sjfb8856606 size = sizeof(tcp_spec->hdr.src_port);
1289*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1290*2d9fd380Sjfb8856606 &tcp_spec->hdr.src_port,
1291*2d9fd380Sjfb8856606 size);
1292*2d9fd380Sjfb8856606 size = sizeof(tcp_spec->hdr.dst_port);
1293*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1294*2d9fd380Sjfb8856606 &tcp_spec->hdr.dst_port,
1295*2d9fd380Sjfb8856606 size);
1296*2d9fd380Sjfb8856606 size = sizeof(tcp_spec->hdr.sent_seq);
1297*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1298*2d9fd380Sjfb8856606 &tcp_spec->hdr.sent_seq,
1299*2d9fd380Sjfb8856606 size);
1300*2d9fd380Sjfb8856606 size = sizeof(tcp_spec->hdr.recv_ack);
1301*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1302*2d9fd380Sjfb8856606 &tcp_spec->hdr.recv_ack,
1303*2d9fd380Sjfb8856606 size);
1304*2d9fd380Sjfb8856606 size = sizeof(tcp_spec->hdr.data_off);
1305*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1306*2d9fd380Sjfb8856606 &tcp_spec->hdr.data_off,
1307*2d9fd380Sjfb8856606 size);
1308*2d9fd380Sjfb8856606 size = sizeof(tcp_spec->hdr.tcp_flags);
1309*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1310*2d9fd380Sjfb8856606 &tcp_spec->hdr.tcp_flags,
1311*2d9fd380Sjfb8856606 size);
1312*2d9fd380Sjfb8856606 size = sizeof(tcp_spec->hdr.rx_win);
1313*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1314*2d9fd380Sjfb8856606 &tcp_spec->hdr.rx_win,
1315*2d9fd380Sjfb8856606 size);
1316*2d9fd380Sjfb8856606 size = sizeof(tcp_spec->hdr.cksum);
1317*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1318*2d9fd380Sjfb8856606 &tcp_spec->hdr.cksum,
1319*2d9fd380Sjfb8856606 size);
1320*2d9fd380Sjfb8856606 size = sizeof(tcp_spec->hdr.tcp_urp);
1321*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1322*2d9fd380Sjfb8856606 &tcp_spec->hdr.tcp_urp,
1323*2d9fd380Sjfb8856606 size);
1324*2d9fd380Sjfb8856606 } else {
1325*2d9fd380Sjfb8856606 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1326*2d9fd380Sjfb8856606 }
1327*2d9fd380Sjfb8856606
1328*2d9fd380Sjfb8856606 if (tcp_mask) {
1329*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1330*2d9fd380Sjfb8856606 &tcp_mask->hdr.src_port,
1331*2d9fd380Sjfb8856606 sizeof(tcp_mask->hdr.src_port));
1332*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1333*2d9fd380Sjfb8856606 &tcp_mask->hdr.dst_port,
1334*2d9fd380Sjfb8856606 sizeof(tcp_mask->hdr.dst_port));
1335*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1336*2d9fd380Sjfb8856606 &tcp_mask->hdr.sent_seq,
1337*2d9fd380Sjfb8856606 sizeof(tcp_mask->hdr.sent_seq));
1338*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1339*2d9fd380Sjfb8856606 &tcp_mask->hdr.recv_ack,
1340*2d9fd380Sjfb8856606 sizeof(tcp_mask->hdr.recv_ack));
1341*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1342*2d9fd380Sjfb8856606 &tcp_mask->hdr.data_off,
1343*2d9fd380Sjfb8856606 sizeof(tcp_mask->hdr.data_off));
1344*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1345*2d9fd380Sjfb8856606 &tcp_mask->hdr.tcp_flags,
1346*2d9fd380Sjfb8856606 sizeof(tcp_mask->hdr.tcp_flags));
1347*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1348*2d9fd380Sjfb8856606 &tcp_mask->hdr.rx_win,
1349*2d9fd380Sjfb8856606 sizeof(tcp_mask->hdr.rx_win));
1350*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1351*2d9fd380Sjfb8856606 &tcp_mask->hdr.cksum,
1352*2d9fd380Sjfb8856606 sizeof(tcp_mask->hdr.cksum));
1353*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1354*2d9fd380Sjfb8856606 &tcp_mask->hdr.tcp_urp,
1355*2d9fd380Sjfb8856606 sizeof(tcp_mask->hdr.tcp_urp));
1356*2d9fd380Sjfb8856606 }
1357*2d9fd380Sjfb8856606 /* add number of TCP header elements */
1358*2d9fd380Sjfb8856606 params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1359*2d9fd380Sjfb8856606
1360*2d9fd380Sjfb8856606 /* Set the udp header bitmap and computed l4 header bitmaps */
1361*2d9fd380Sjfb8856606 if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1362*2d9fd380Sjfb8856606 ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1363*2d9fd380Sjfb8856606 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1364*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1365*2d9fd380Sjfb8856606 } else {
1366*2d9fd380Sjfb8856606 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1367*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1368*2d9fd380Sjfb8856606 }
1369*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1370*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
1371*2d9fd380Sjfb8856606 }
1372*2d9fd380Sjfb8856606
1373*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1374*2d9fd380Sjfb8856606 int32_t
ulp_rte_vxlan_hdr_handler(const struct rte_flow_item * item,struct ulp_rte_parser_params * params)1375*2d9fd380Sjfb8856606 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1376*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
1377*2d9fd380Sjfb8856606 {
1378*2d9fd380Sjfb8856606 const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1379*2d9fd380Sjfb8856606 const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1380*2d9fd380Sjfb8856606 struct ulp_rte_hdr_field *field;
1381*2d9fd380Sjfb8856606 struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
1382*2d9fd380Sjfb8856606 uint32_t idx = params->field_idx;
1383*2d9fd380Sjfb8856606 uint32_t size;
1384*2d9fd380Sjfb8856606
1385*2d9fd380Sjfb8856606 /*
1386*2d9fd380Sjfb8856606 * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1387*2d9fd380Sjfb8856606 * header fields
1388*2d9fd380Sjfb8856606 */
1389*2d9fd380Sjfb8856606 if (vxlan_spec) {
1390*2d9fd380Sjfb8856606 size = sizeof(vxlan_spec->flags);
1391*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
1392*2d9fd380Sjfb8856606 &vxlan_spec->flags,
1393*2d9fd380Sjfb8856606 size);
1394*2d9fd380Sjfb8856606 size = sizeof(vxlan_spec->rsvd0);
1395*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1396*2d9fd380Sjfb8856606 &vxlan_spec->rsvd0,
1397*2d9fd380Sjfb8856606 size);
1398*2d9fd380Sjfb8856606 size = sizeof(vxlan_spec->vni);
1399*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1400*2d9fd380Sjfb8856606 &vxlan_spec->vni,
1401*2d9fd380Sjfb8856606 size);
1402*2d9fd380Sjfb8856606 size = sizeof(vxlan_spec->rsvd1);
1403*2d9fd380Sjfb8856606 field = ulp_rte_parser_fld_copy(field,
1404*2d9fd380Sjfb8856606 &vxlan_spec->rsvd1,
1405*2d9fd380Sjfb8856606 size);
1406*2d9fd380Sjfb8856606 }
1407*2d9fd380Sjfb8856606 if (vxlan_mask) {
1408*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1409*2d9fd380Sjfb8856606 &vxlan_mask->flags,
1410*2d9fd380Sjfb8856606 sizeof(vxlan_mask->flags));
1411*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1412*2d9fd380Sjfb8856606 &vxlan_mask->rsvd0,
1413*2d9fd380Sjfb8856606 sizeof(vxlan_mask->rsvd0));
1414*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1415*2d9fd380Sjfb8856606 &vxlan_mask->vni,
1416*2d9fd380Sjfb8856606 sizeof(vxlan_mask->vni));
1417*2d9fd380Sjfb8856606 ulp_rte_prsr_mask_copy(params, &idx,
1418*2d9fd380Sjfb8856606 &vxlan_mask->rsvd1,
1419*2d9fd380Sjfb8856606 sizeof(vxlan_mask->rsvd1));
1420*2d9fd380Sjfb8856606 }
1421*2d9fd380Sjfb8856606 /* Add number of vxlan header elements */
1422*2d9fd380Sjfb8856606 params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1423*2d9fd380Sjfb8856606
1424*2d9fd380Sjfb8856606 /* Update the hdr_bitmap with vxlan */
1425*2d9fd380Sjfb8856606 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1426*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
1427*2d9fd380Sjfb8856606 }
1428*2d9fd380Sjfb8856606
1429*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow item void Header */
1430*2d9fd380Sjfb8856606 int32_t
ulp_rte_void_hdr_handler(const struct rte_flow_item * item __rte_unused,struct ulp_rte_parser_params * params __rte_unused)1431*2d9fd380Sjfb8856606 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1432*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params __rte_unused)
1433*2d9fd380Sjfb8856606 {
1434*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
1435*2d9fd380Sjfb8856606 }
1436*2d9fd380Sjfb8856606
1437*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action void Header. */
1438*2d9fd380Sjfb8856606 int32_t
ulp_rte_void_act_handler(const struct rte_flow_action * action_item __rte_unused,struct ulp_rte_parser_params * params __rte_unused)1439*2d9fd380Sjfb8856606 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1440*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params __rte_unused)
1441*2d9fd380Sjfb8856606 {
1442*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
1443*2d9fd380Sjfb8856606 }
1444*2d9fd380Sjfb8856606
1445*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action Mark Header. */
1446*2d9fd380Sjfb8856606 int32_t
ulp_rte_mark_act_handler(const struct rte_flow_action * action_item,struct ulp_rte_parser_params * param)1447*2d9fd380Sjfb8856606 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1448*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *param)
1449*2d9fd380Sjfb8856606 {
1450*2d9fd380Sjfb8856606 const struct rte_flow_action_mark *mark;
1451*2d9fd380Sjfb8856606 struct ulp_rte_act_bitmap *act = ¶m->act_bitmap;
1452*2d9fd380Sjfb8856606 uint32_t mark_id;
1453*2d9fd380Sjfb8856606
1454*2d9fd380Sjfb8856606 mark = action_item->conf;
1455*2d9fd380Sjfb8856606 if (mark) {
1456*2d9fd380Sjfb8856606 mark_id = tfp_cpu_to_be_32(mark->id);
1457*2d9fd380Sjfb8856606 memcpy(¶m->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1458*2d9fd380Sjfb8856606 &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1459*2d9fd380Sjfb8856606
1460*2d9fd380Sjfb8856606 /* Update the hdr_bitmap with vxlan */
1461*2d9fd380Sjfb8856606 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1462*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
1463*2d9fd380Sjfb8856606 }
1464*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1465*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1466*2d9fd380Sjfb8856606 }
1467*2d9fd380Sjfb8856606
1468*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action RSS Header. */
1469*2d9fd380Sjfb8856606 int32_t
ulp_rte_rss_act_handler(const struct rte_flow_action * action_item,struct ulp_rte_parser_params * param)1470*2d9fd380Sjfb8856606 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1471*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *param)
1472*2d9fd380Sjfb8856606 {
1473*2d9fd380Sjfb8856606 const struct rte_flow_action_rss *rss = action_item->conf;
1474*2d9fd380Sjfb8856606
1475*2d9fd380Sjfb8856606 if (rss) {
1476*2d9fd380Sjfb8856606 /* Update the hdr_bitmap with vxlan */
1477*2d9fd380Sjfb8856606 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1478*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
1479*2d9fd380Sjfb8856606 }
1480*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1481*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1482*2d9fd380Sjfb8856606 }
1483*2d9fd380Sjfb8856606
1484*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1485*2d9fd380Sjfb8856606 int32_t
ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action * action_item,struct ulp_rte_parser_params * params)1486*2d9fd380Sjfb8856606 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1487*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
1488*2d9fd380Sjfb8856606 {
1489*2d9fd380Sjfb8856606 const struct rte_flow_action_vxlan_encap *vxlan_encap;
1490*2d9fd380Sjfb8856606 const struct rte_flow_item *item;
1491*2d9fd380Sjfb8856606 const struct rte_flow_item_eth *eth_spec;
1492*2d9fd380Sjfb8856606 const struct rte_flow_item_ipv4 *ipv4_spec;
1493*2d9fd380Sjfb8856606 const struct rte_flow_item_ipv6 *ipv6_spec;
1494*2d9fd380Sjfb8856606 struct rte_flow_item_vxlan vxlan_spec;
1495*2d9fd380Sjfb8856606 uint32_t vlan_num = 0, vlan_size = 0;
1496*2d9fd380Sjfb8856606 uint32_t ip_size = 0, ip_type = 0;
1497*2d9fd380Sjfb8856606 uint32_t vxlan_size = 0;
1498*2d9fd380Sjfb8856606 uint8_t *buff;
1499*2d9fd380Sjfb8856606 /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1500*2d9fd380Sjfb8856606 const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1501*2d9fd380Sjfb8856606 0x00, 0x40, 0x11};
1502*2d9fd380Sjfb8856606 /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1503*2d9fd380Sjfb8856606 const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1504*2d9fd380Sjfb8856606 0x00, 0x11, 0xf6};
1505*2d9fd380Sjfb8856606 struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
1506*2d9fd380Sjfb8856606 struct ulp_rte_act_prop *ap = ¶ms->act_prop;
1507*2d9fd380Sjfb8856606 const uint8_t *tmp_buff;
1508*2d9fd380Sjfb8856606
1509*2d9fd380Sjfb8856606 vxlan_encap = action_item->conf;
1510*2d9fd380Sjfb8856606 if (!vxlan_encap) {
1511*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1512*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1513*2d9fd380Sjfb8856606 }
1514*2d9fd380Sjfb8856606
1515*2d9fd380Sjfb8856606 item = vxlan_encap->definition;
1516*2d9fd380Sjfb8856606 if (!item) {
1517*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1518*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1519*2d9fd380Sjfb8856606 }
1520*2d9fd380Sjfb8856606
1521*2d9fd380Sjfb8856606 if (!ulp_rte_item_skip_void(&item, 0))
1522*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1523*2d9fd380Sjfb8856606
1524*2d9fd380Sjfb8856606 /* must have ethernet header */
1525*2d9fd380Sjfb8856606 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1526*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1527*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1528*2d9fd380Sjfb8856606 }
1529*2d9fd380Sjfb8856606 eth_spec = item->spec;
1530*2d9fd380Sjfb8856606 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1531*2d9fd380Sjfb8856606 ulp_encap_buffer_copy(buff,
1532*2d9fd380Sjfb8856606 eth_spec->dst.addr_bytes,
1533*2d9fd380Sjfb8856606 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1534*2d9fd380Sjfb8856606 ULP_BUFFER_ALIGN_8_BYTE);
1535*2d9fd380Sjfb8856606
1536*2d9fd380Sjfb8856606 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1537*2d9fd380Sjfb8856606 ulp_encap_buffer_copy(buff,
1538*2d9fd380Sjfb8856606 eth_spec->src.addr_bytes,
1539*2d9fd380Sjfb8856606 BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1540*2d9fd380Sjfb8856606 ULP_BUFFER_ALIGN_8_BYTE);
1541*2d9fd380Sjfb8856606
1542*2d9fd380Sjfb8856606 /* Goto the next item */
1543*2d9fd380Sjfb8856606 if (!ulp_rte_item_skip_void(&item, 1))
1544*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1545*2d9fd380Sjfb8856606
1546*2d9fd380Sjfb8856606 /* May have vlan header */
1547*2d9fd380Sjfb8856606 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1548*2d9fd380Sjfb8856606 vlan_num++;
1549*2d9fd380Sjfb8856606 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1550*2d9fd380Sjfb8856606 ulp_encap_buffer_copy(buff,
1551*2d9fd380Sjfb8856606 item->spec,
1552*2d9fd380Sjfb8856606 sizeof(struct rte_vlan_hdr),
1553*2d9fd380Sjfb8856606 ULP_BUFFER_ALIGN_8_BYTE);
1554*2d9fd380Sjfb8856606
1555*2d9fd380Sjfb8856606 if (!ulp_rte_item_skip_void(&item, 1))
1556*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1557*2d9fd380Sjfb8856606 }
1558*2d9fd380Sjfb8856606
1559*2d9fd380Sjfb8856606 /* may have two vlan headers */
1560*2d9fd380Sjfb8856606 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1561*2d9fd380Sjfb8856606 vlan_num++;
1562*2d9fd380Sjfb8856606 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1563*2d9fd380Sjfb8856606 sizeof(struct rte_vlan_hdr)],
1564*2d9fd380Sjfb8856606 item->spec,
1565*2d9fd380Sjfb8856606 sizeof(struct rte_vlan_hdr));
1566*2d9fd380Sjfb8856606 if (!ulp_rte_item_skip_void(&item, 1))
1567*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1568*2d9fd380Sjfb8856606 }
1569*2d9fd380Sjfb8856606 /* Update the vlan count and size of more than one */
1570*2d9fd380Sjfb8856606 if (vlan_num) {
1571*2d9fd380Sjfb8856606 vlan_size = vlan_num * sizeof(struct rte_vlan_hdr);
1572*2d9fd380Sjfb8856606 vlan_num = tfp_cpu_to_be_32(vlan_num);
1573*2d9fd380Sjfb8856606 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1574*2d9fd380Sjfb8856606 &vlan_num,
1575*2d9fd380Sjfb8856606 sizeof(uint32_t));
1576*2d9fd380Sjfb8856606 vlan_size = tfp_cpu_to_be_32(vlan_size);
1577*2d9fd380Sjfb8856606 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1578*2d9fd380Sjfb8856606 &vlan_size,
1579*2d9fd380Sjfb8856606 sizeof(uint32_t));
1580*2d9fd380Sjfb8856606 }
1581*2d9fd380Sjfb8856606
1582*2d9fd380Sjfb8856606 /* L3 must be IPv4, IPv6 */
1583*2d9fd380Sjfb8856606 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1584*2d9fd380Sjfb8856606 ipv4_spec = item->spec;
1585*2d9fd380Sjfb8856606 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1586*2d9fd380Sjfb8856606
1587*2d9fd380Sjfb8856606 /* copy the ipv4 details */
1588*2d9fd380Sjfb8856606 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1589*2d9fd380Sjfb8856606 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1590*2d9fd380Sjfb8856606 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1591*2d9fd380Sjfb8856606 ulp_encap_buffer_copy(buff,
1592*2d9fd380Sjfb8856606 def_ipv4_hdr,
1593*2d9fd380Sjfb8856606 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1594*2d9fd380Sjfb8856606 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1595*2d9fd380Sjfb8856606 ULP_BUFFER_ALIGN_8_BYTE);
1596*2d9fd380Sjfb8856606 } else {
1597*2d9fd380Sjfb8856606 /* Total length being ignored in the ip hdr. */
1598*2d9fd380Sjfb8856606 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1599*2d9fd380Sjfb8856606 tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1600*2d9fd380Sjfb8856606 ulp_encap_buffer_copy(buff,
1601*2d9fd380Sjfb8856606 tmp_buff,
1602*2d9fd380Sjfb8856606 BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1603*2d9fd380Sjfb8856606 ULP_BUFFER_ALIGN_8_BYTE);
1604*2d9fd380Sjfb8856606 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1605*2d9fd380Sjfb8856606 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1606*2d9fd380Sjfb8856606 ulp_encap_buffer_copy(buff,
1607*2d9fd380Sjfb8856606 &ipv4_spec->hdr.version_ihl,
1608*2d9fd380Sjfb8856606 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1609*2d9fd380Sjfb8856606 ULP_BUFFER_ALIGN_8_BYTE);
1610*2d9fd380Sjfb8856606 }
1611*2d9fd380Sjfb8856606
1612*2d9fd380Sjfb8856606 /* Update the dst ip address in ip encap buffer */
1613*2d9fd380Sjfb8856606 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1614*2d9fd380Sjfb8856606 BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1615*2d9fd380Sjfb8856606 BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1616*2d9fd380Sjfb8856606 ulp_encap_buffer_copy(buff,
1617*2d9fd380Sjfb8856606 (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1618*2d9fd380Sjfb8856606 sizeof(ipv4_spec->hdr.dst_addr),
1619*2d9fd380Sjfb8856606 ULP_BUFFER_ALIGN_8_BYTE);
1620*2d9fd380Sjfb8856606
1621*2d9fd380Sjfb8856606 /* Update the src ip address */
1622*2d9fd380Sjfb8856606 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1623*2d9fd380Sjfb8856606 BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1624*2d9fd380Sjfb8856606 sizeof(ipv4_spec->hdr.src_addr)];
1625*2d9fd380Sjfb8856606 ulp_encap_buffer_copy(buff,
1626*2d9fd380Sjfb8856606 (const uint8_t *)&ipv4_spec->hdr.src_addr,
1627*2d9fd380Sjfb8856606 sizeof(ipv4_spec->hdr.src_addr),
1628*2d9fd380Sjfb8856606 ULP_BUFFER_ALIGN_8_BYTE);
1629*2d9fd380Sjfb8856606
1630*2d9fd380Sjfb8856606 /* Update the ip size details */
1631*2d9fd380Sjfb8856606 ip_size = tfp_cpu_to_be_32(ip_size);
1632*2d9fd380Sjfb8856606 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1633*2d9fd380Sjfb8856606 &ip_size, sizeof(uint32_t));
1634*2d9fd380Sjfb8856606
1635*2d9fd380Sjfb8856606 /* update the ip type */
1636*2d9fd380Sjfb8856606 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1637*2d9fd380Sjfb8856606 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1638*2d9fd380Sjfb8856606 &ip_type, sizeof(uint32_t));
1639*2d9fd380Sjfb8856606
1640*2d9fd380Sjfb8856606 /* update the computed field to notify it is ipv4 header */
1641*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1642*2d9fd380Sjfb8856606 1);
1643*2d9fd380Sjfb8856606
1644*2d9fd380Sjfb8856606 if (!ulp_rte_item_skip_void(&item, 1))
1645*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1646*2d9fd380Sjfb8856606 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1647*2d9fd380Sjfb8856606 ipv6_spec = item->spec;
1648*2d9fd380Sjfb8856606 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1649*2d9fd380Sjfb8856606
1650*2d9fd380Sjfb8856606 /* copy the ipv6 details */
1651*2d9fd380Sjfb8856606 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1652*2d9fd380Sjfb8856606 if (ulp_buffer_is_empty(tmp_buff,
1653*2d9fd380Sjfb8856606 BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1654*2d9fd380Sjfb8856606 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1655*2d9fd380Sjfb8856606 ulp_encap_buffer_copy(buff,
1656*2d9fd380Sjfb8856606 def_ipv6_hdr,
1657*2d9fd380Sjfb8856606 sizeof(def_ipv6_hdr),
1658*2d9fd380Sjfb8856606 ULP_BUFFER_ALIGN_8_BYTE);
1659*2d9fd380Sjfb8856606 } else {
1660*2d9fd380Sjfb8856606 /* The payload length being ignored in the ip hdr. */
1661*2d9fd380Sjfb8856606 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1662*2d9fd380Sjfb8856606 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1663*2d9fd380Sjfb8856606 ulp_encap_buffer_copy(buff,
1664*2d9fd380Sjfb8856606 tmp_buff,
1665*2d9fd380Sjfb8856606 BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1666*2d9fd380Sjfb8856606 ULP_BUFFER_ALIGN_8_BYTE);
1667*2d9fd380Sjfb8856606 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1668*2d9fd380Sjfb8856606 BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1669*2d9fd380Sjfb8856606 BNXT_ULP_ENCAP_IPV6_DO];
1670*2d9fd380Sjfb8856606 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1671*2d9fd380Sjfb8856606 ulp_encap_buffer_copy(buff,
1672*2d9fd380Sjfb8856606 tmp_buff,
1673*2d9fd380Sjfb8856606 BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1674*2d9fd380Sjfb8856606 ULP_BUFFER_ALIGN_8_BYTE);
1675*2d9fd380Sjfb8856606 }
1676*2d9fd380Sjfb8856606 /* Update the dst ip address in ip encap buffer */
1677*2d9fd380Sjfb8856606 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1678*2d9fd380Sjfb8856606 sizeof(def_ipv6_hdr)];
1679*2d9fd380Sjfb8856606 ulp_encap_buffer_copy(buff,
1680*2d9fd380Sjfb8856606 (const uint8_t *)ipv6_spec->hdr.dst_addr,
1681*2d9fd380Sjfb8856606 sizeof(ipv6_spec->hdr.dst_addr),
1682*2d9fd380Sjfb8856606 ULP_BUFFER_ALIGN_8_BYTE);
1683*2d9fd380Sjfb8856606
1684*2d9fd380Sjfb8856606 /* Update the src ip address */
1685*2d9fd380Sjfb8856606 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1686*2d9fd380Sjfb8856606 ulp_encap_buffer_copy(buff,
1687*2d9fd380Sjfb8856606 (const uint8_t *)ipv6_spec->hdr.src_addr,
1688*2d9fd380Sjfb8856606 sizeof(ipv6_spec->hdr.src_addr),
1689*2d9fd380Sjfb8856606 ULP_BUFFER_ALIGN_16_BYTE);
1690*2d9fd380Sjfb8856606
1691*2d9fd380Sjfb8856606 /* Update the ip size details */
1692*2d9fd380Sjfb8856606 ip_size = tfp_cpu_to_be_32(ip_size);
1693*2d9fd380Sjfb8856606 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1694*2d9fd380Sjfb8856606 &ip_size, sizeof(uint32_t));
1695*2d9fd380Sjfb8856606
1696*2d9fd380Sjfb8856606 /* update the ip type */
1697*2d9fd380Sjfb8856606 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1698*2d9fd380Sjfb8856606 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1699*2d9fd380Sjfb8856606 &ip_type, sizeof(uint32_t));
1700*2d9fd380Sjfb8856606
1701*2d9fd380Sjfb8856606 /* update the computed field to notify it is ipv6 header */
1702*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1703*2d9fd380Sjfb8856606 1);
1704*2d9fd380Sjfb8856606
1705*2d9fd380Sjfb8856606 if (!ulp_rte_item_skip_void(&item, 1))
1706*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1707*2d9fd380Sjfb8856606 } else {
1708*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1709*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1710*2d9fd380Sjfb8856606 }
1711*2d9fd380Sjfb8856606
1712*2d9fd380Sjfb8856606 /* L4 is UDP */
1713*2d9fd380Sjfb8856606 if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1714*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1715*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1716*2d9fd380Sjfb8856606 }
1717*2d9fd380Sjfb8856606 /* copy the udp details */
1718*2d9fd380Sjfb8856606 ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1719*2d9fd380Sjfb8856606 item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1720*2d9fd380Sjfb8856606 ULP_BUFFER_ALIGN_8_BYTE);
1721*2d9fd380Sjfb8856606
1722*2d9fd380Sjfb8856606 if (!ulp_rte_item_skip_void(&item, 1))
1723*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1724*2d9fd380Sjfb8856606
1725*2d9fd380Sjfb8856606 /* Finally VXLAN */
1726*2d9fd380Sjfb8856606 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1727*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1728*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1729*2d9fd380Sjfb8856606 }
1730*2d9fd380Sjfb8856606 vxlan_size = sizeof(struct rte_vxlan_hdr);
1731*2d9fd380Sjfb8856606 /* copy the vxlan details */
1732*2d9fd380Sjfb8856606 memcpy(&vxlan_spec, item->spec, vxlan_size);
1733*2d9fd380Sjfb8856606 vxlan_spec.flags = 0x08;
1734*2d9fd380Sjfb8856606 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1735*2d9fd380Sjfb8856606 if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1736*2d9fd380Sjfb8856606 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1737*2d9fd380Sjfb8856606 vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1738*2d9fd380Sjfb8856606 } else {
1739*2d9fd380Sjfb8856606 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1740*2d9fd380Sjfb8856606 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1741*2d9fd380Sjfb8856606 ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1742*2d9fd380Sjfb8856606 (const uint8_t *)&vxlan_spec.vni,
1743*2d9fd380Sjfb8856606 vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1744*2d9fd380Sjfb8856606 }
1745*2d9fd380Sjfb8856606 vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1746*2d9fd380Sjfb8856606 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1747*2d9fd380Sjfb8856606 &vxlan_size, sizeof(uint32_t));
1748*2d9fd380Sjfb8856606
1749*2d9fd380Sjfb8856606 /* update the hdr_bitmap with vxlan */
1750*2d9fd380Sjfb8856606 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1751*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
1752*2d9fd380Sjfb8856606 }
1753*2d9fd380Sjfb8856606
1754*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1755*2d9fd380Sjfb8856606 int32_t
ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action * action_item __rte_unused,struct ulp_rte_parser_params * params)1756*2d9fd380Sjfb8856606 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1757*2d9fd380Sjfb8856606 __rte_unused,
1758*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
1759*2d9fd380Sjfb8856606 {
1760*2d9fd380Sjfb8856606 /* update the hdr_bitmap with vxlan */
1761*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->act_bitmap.bits,
1762*2d9fd380Sjfb8856606 BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1763*2d9fd380Sjfb8856606 /* Update computational field with tunnel decap info */
1764*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
1765*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1766*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
1767*2d9fd380Sjfb8856606 }
1768*2d9fd380Sjfb8856606
1769*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action drop Header. */
1770*2d9fd380Sjfb8856606 int32_t
ulp_rte_drop_act_handler(const struct rte_flow_action * action_item __rte_unused,struct ulp_rte_parser_params * params)1771*2d9fd380Sjfb8856606 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1772*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
1773*2d9fd380Sjfb8856606 {
1774*2d9fd380Sjfb8856606 /* Update the hdr_bitmap with drop */
1775*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1776*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
1777*2d9fd380Sjfb8856606 }
1778*2d9fd380Sjfb8856606
1779*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action count. */
1780*2d9fd380Sjfb8856606 int32_t
ulp_rte_count_act_handler(const struct rte_flow_action * action_item,struct ulp_rte_parser_params * params)1781*2d9fd380Sjfb8856606 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1782*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
1783*2d9fd380Sjfb8856606
1784*2d9fd380Sjfb8856606 {
1785*2d9fd380Sjfb8856606 const struct rte_flow_action_count *act_count;
1786*2d9fd380Sjfb8856606 struct ulp_rte_act_prop *act_prop = ¶ms->act_prop;
1787*2d9fd380Sjfb8856606
1788*2d9fd380Sjfb8856606 act_count = action_item->conf;
1789*2d9fd380Sjfb8856606 if (act_count) {
1790*2d9fd380Sjfb8856606 if (act_count->shared) {
1791*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR,
1792*2d9fd380Sjfb8856606 "Parse Error:Shared count not supported\n");
1793*2d9fd380Sjfb8856606 return BNXT_TF_RC_PARSE_ERR;
1794*2d9fd380Sjfb8856606 }
1795*2d9fd380Sjfb8856606 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1796*2d9fd380Sjfb8856606 &act_count->id,
1797*2d9fd380Sjfb8856606 BNXT_ULP_ACT_PROP_SZ_COUNT);
1798*2d9fd380Sjfb8856606 }
1799*2d9fd380Sjfb8856606
1800*2d9fd380Sjfb8856606 /* Update the hdr_bitmap with count */
1801*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1802*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
1803*2d9fd380Sjfb8856606 }
1804*2d9fd380Sjfb8856606
1805*2d9fd380Sjfb8856606 /* Function to handle the parsing of action ports. */
1806*2d9fd380Sjfb8856606 static int32_t
ulp_rte_parser_act_port_set(struct ulp_rte_parser_params * param,uint32_t ifindex)1807*2d9fd380Sjfb8856606 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1808*2d9fd380Sjfb8856606 uint32_t ifindex)
1809*2d9fd380Sjfb8856606 {
1810*2d9fd380Sjfb8856606 enum bnxt_ulp_direction_type dir;
1811*2d9fd380Sjfb8856606 uint16_t pid_s;
1812*2d9fd380Sjfb8856606 uint32_t pid;
1813*2d9fd380Sjfb8856606 struct ulp_rte_act_prop *act = ¶m->act_prop;
1814*2d9fd380Sjfb8856606 enum bnxt_ulp_intf_type port_type;
1815*2d9fd380Sjfb8856606 uint32_t vnic_type;
1816*2d9fd380Sjfb8856606
1817*2d9fd380Sjfb8856606 /* Get the direction */
1818*2d9fd380Sjfb8856606 dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1819*2d9fd380Sjfb8856606 if (dir == BNXT_ULP_DIR_EGRESS) {
1820*2d9fd380Sjfb8856606 /* For egress direction, fill vport */
1821*2d9fd380Sjfb8856606 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1822*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1823*2d9fd380Sjfb8856606
1824*2d9fd380Sjfb8856606 pid = pid_s;
1825*2d9fd380Sjfb8856606 pid = rte_cpu_to_be_32(pid);
1826*2d9fd380Sjfb8856606 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1827*2d9fd380Sjfb8856606 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1828*2d9fd380Sjfb8856606 } else {
1829*2d9fd380Sjfb8856606 /* For ingress direction, fill vnic */
1830*2d9fd380Sjfb8856606 port_type = ULP_COMP_FLD_IDX_RD(param,
1831*2d9fd380Sjfb8856606 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1832*2d9fd380Sjfb8856606 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1833*2d9fd380Sjfb8856606 vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1834*2d9fd380Sjfb8856606 else
1835*2d9fd380Sjfb8856606 vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1836*2d9fd380Sjfb8856606
1837*2d9fd380Sjfb8856606 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1838*2d9fd380Sjfb8856606 vnic_type, &pid_s))
1839*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1840*2d9fd380Sjfb8856606
1841*2d9fd380Sjfb8856606 pid = pid_s;
1842*2d9fd380Sjfb8856606 pid = rte_cpu_to_be_32(pid);
1843*2d9fd380Sjfb8856606 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1844*2d9fd380Sjfb8856606 &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1845*2d9fd380Sjfb8856606 }
1846*2d9fd380Sjfb8856606
1847*2d9fd380Sjfb8856606 /* Update the action port set bit */
1848*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1849*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
1850*2d9fd380Sjfb8856606 }
1851*2d9fd380Sjfb8856606
1852*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action PF. */
1853*2d9fd380Sjfb8856606 int32_t
ulp_rte_pf_act_handler(const struct rte_flow_action * action_item __rte_unused,struct ulp_rte_parser_params * params)1854*2d9fd380Sjfb8856606 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1855*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
1856*2d9fd380Sjfb8856606 {
1857*2d9fd380Sjfb8856606 uint32_t port_id;
1858*2d9fd380Sjfb8856606 uint32_t ifindex;
1859*2d9fd380Sjfb8856606 enum bnxt_ulp_intf_type intf_type;
1860*2d9fd380Sjfb8856606
1861*2d9fd380Sjfb8856606 /* Get the port id of the current device */
1862*2d9fd380Sjfb8856606 port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1863*2d9fd380Sjfb8856606
1864*2d9fd380Sjfb8856606 /* Get the port db ifindex */
1865*2d9fd380Sjfb8856606 if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1866*2d9fd380Sjfb8856606 &ifindex)) {
1867*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Invalid port id\n");
1868*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1869*2d9fd380Sjfb8856606 }
1870*2d9fd380Sjfb8856606
1871*2d9fd380Sjfb8856606 /* Check the port is PF port */
1872*2d9fd380Sjfb8856606 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1873*2d9fd380Sjfb8856606 if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1874*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1875*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1876*2d9fd380Sjfb8856606 }
1877*2d9fd380Sjfb8856606 /* Update the action properties */
1878*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1879*2d9fd380Sjfb8856606 return ulp_rte_parser_act_port_set(params, ifindex);
1880*2d9fd380Sjfb8856606 }
1881*2d9fd380Sjfb8856606
1882*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action VF. */
1883*2d9fd380Sjfb8856606 int32_t
ulp_rte_vf_act_handler(const struct rte_flow_action * action_item,struct ulp_rte_parser_params * params)1884*2d9fd380Sjfb8856606 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1885*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
1886*2d9fd380Sjfb8856606 {
1887*2d9fd380Sjfb8856606 const struct rte_flow_action_vf *vf_action;
1888*2d9fd380Sjfb8856606 uint32_t ifindex;
1889*2d9fd380Sjfb8856606 enum bnxt_ulp_intf_type intf_type;
1890*2d9fd380Sjfb8856606
1891*2d9fd380Sjfb8856606 vf_action = action_item->conf;
1892*2d9fd380Sjfb8856606 if (!vf_action) {
1893*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1894*2d9fd380Sjfb8856606 return BNXT_TF_RC_PARSE_ERR;
1895*2d9fd380Sjfb8856606 }
1896*2d9fd380Sjfb8856606
1897*2d9fd380Sjfb8856606 if (vf_action->original) {
1898*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1899*2d9fd380Sjfb8856606 return BNXT_TF_RC_PARSE_ERR;
1900*2d9fd380Sjfb8856606 }
1901*2d9fd380Sjfb8856606
1902*2d9fd380Sjfb8856606 /* Check the port is VF port */
1903*2d9fd380Sjfb8856606 if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1904*2d9fd380Sjfb8856606 &ifindex)) {
1905*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1906*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1907*2d9fd380Sjfb8856606 }
1908*2d9fd380Sjfb8856606 intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1909*2d9fd380Sjfb8856606 if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1910*2d9fd380Sjfb8856606 intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1911*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1912*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1913*2d9fd380Sjfb8856606 }
1914*2d9fd380Sjfb8856606
1915*2d9fd380Sjfb8856606 /* Update the action properties */
1916*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1917*2d9fd380Sjfb8856606 return ulp_rte_parser_act_port_set(params, ifindex);
1918*2d9fd380Sjfb8856606 }
1919*2d9fd380Sjfb8856606
1920*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action port_id. */
1921*2d9fd380Sjfb8856606 int32_t
ulp_rte_port_id_act_handler(const struct rte_flow_action * act_item,struct ulp_rte_parser_params * param)1922*2d9fd380Sjfb8856606 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1923*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *param)
1924*2d9fd380Sjfb8856606 {
1925*2d9fd380Sjfb8856606 const struct rte_flow_action_port_id *port_id = act_item->conf;
1926*2d9fd380Sjfb8856606 uint32_t ifindex;
1927*2d9fd380Sjfb8856606 enum bnxt_ulp_intf_type intf_type;
1928*2d9fd380Sjfb8856606
1929*2d9fd380Sjfb8856606 if (!port_id) {
1930*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR,
1931*2d9fd380Sjfb8856606 "ParseErr: Invalid Argument\n");
1932*2d9fd380Sjfb8856606 return BNXT_TF_RC_PARSE_ERR;
1933*2d9fd380Sjfb8856606 }
1934*2d9fd380Sjfb8856606 if (port_id->original) {
1935*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR,
1936*2d9fd380Sjfb8856606 "ParseErr:Portid Original not supported\n");
1937*2d9fd380Sjfb8856606 return BNXT_TF_RC_PARSE_ERR;
1938*2d9fd380Sjfb8856606 }
1939*2d9fd380Sjfb8856606
1940*2d9fd380Sjfb8856606 /* Get the port db ifindex */
1941*2d9fd380Sjfb8856606 if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1942*2d9fd380Sjfb8856606 &ifindex)) {
1943*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Invalid port id\n");
1944*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1945*2d9fd380Sjfb8856606 }
1946*2d9fd380Sjfb8856606
1947*2d9fd380Sjfb8856606 /* Get the intf type */
1948*2d9fd380Sjfb8856606 intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1949*2d9fd380Sjfb8856606 if (!intf_type) {
1950*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Invalid port type\n");
1951*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
1952*2d9fd380Sjfb8856606 }
1953*2d9fd380Sjfb8856606
1954*2d9fd380Sjfb8856606 /* Set the action port */
1955*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1956*2d9fd380Sjfb8856606 return ulp_rte_parser_act_port_set(param, ifindex);
1957*2d9fd380Sjfb8856606 }
1958*2d9fd380Sjfb8856606
1959*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action phy_port. */
1960*2d9fd380Sjfb8856606 int32_t
ulp_rte_phy_port_act_handler(const struct rte_flow_action * action_item,struct ulp_rte_parser_params * prm)1961*2d9fd380Sjfb8856606 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1962*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *prm)
1963*2d9fd380Sjfb8856606 {
1964*2d9fd380Sjfb8856606 const struct rte_flow_action_phy_port *phy_port;
1965*2d9fd380Sjfb8856606 uint32_t pid;
1966*2d9fd380Sjfb8856606 int32_t rc;
1967*2d9fd380Sjfb8856606 uint16_t pid_s;
1968*2d9fd380Sjfb8856606 enum bnxt_ulp_direction_type dir;
1969*2d9fd380Sjfb8856606
1970*2d9fd380Sjfb8856606 phy_port = action_item->conf;
1971*2d9fd380Sjfb8856606 if (!phy_port) {
1972*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR,
1973*2d9fd380Sjfb8856606 "ParseErr: Invalid Argument\n");
1974*2d9fd380Sjfb8856606 return BNXT_TF_RC_PARSE_ERR;
1975*2d9fd380Sjfb8856606 }
1976*2d9fd380Sjfb8856606
1977*2d9fd380Sjfb8856606 if (phy_port->original) {
1978*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR,
1979*2d9fd380Sjfb8856606 "Parse Err:Port Original not supported\n");
1980*2d9fd380Sjfb8856606 return BNXT_TF_RC_PARSE_ERR;
1981*2d9fd380Sjfb8856606 }
1982*2d9fd380Sjfb8856606 dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1983*2d9fd380Sjfb8856606 if (dir != BNXT_ULP_DIR_EGRESS) {
1984*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR,
1985*2d9fd380Sjfb8856606 "Parse Err:Phy ports are valid only for egress\n");
1986*2d9fd380Sjfb8856606 return BNXT_TF_RC_PARSE_ERR;
1987*2d9fd380Sjfb8856606 }
1988*2d9fd380Sjfb8856606 /* Get the physical port details from port db */
1989*2d9fd380Sjfb8856606 rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1990*2d9fd380Sjfb8856606 &pid_s);
1991*2d9fd380Sjfb8856606 if (rc) {
1992*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1993*2d9fd380Sjfb8856606 return -EINVAL;
1994*2d9fd380Sjfb8856606 }
1995*2d9fd380Sjfb8856606
1996*2d9fd380Sjfb8856606 pid = pid_s;
1997*2d9fd380Sjfb8856606 pid = rte_cpu_to_be_32(pid);
1998*2d9fd380Sjfb8856606 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1999*2d9fd380Sjfb8856606 &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2000*2d9fd380Sjfb8856606
2001*2d9fd380Sjfb8856606 /* Update the action port set bit */
2002*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2003*2d9fd380Sjfb8856606 ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2004*2d9fd380Sjfb8856606 BNXT_ULP_INTF_TYPE_PHY_PORT);
2005*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
2006*2d9fd380Sjfb8856606 }
2007*2d9fd380Sjfb8856606
2008*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action pop vlan. */
2009*2d9fd380Sjfb8856606 int32_t
ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action * a __rte_unused,struct ulp_rte_parser_params * params)2010*2d9fd380Sjfb8856606 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2011*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
2012*2d9fd380Sjfb8856606 {
2013*2d9fd380Sjfb8856606 /* Update the act_bitmap with pop */
2014*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
2015*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
2016*2d9fd380Sjfb8856606 }
2017*2d9fd380Sjfb8856606
2018*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action push vlan. */
2019*2d9fd380Sjfb8856606 int32_t
ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action * action_item,struct ulp_rte_parser_params * params)2020*2d9fd380Sjfb8856606 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2021*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
2022*2d9fd380Sjfb8856606 {
2023*2d9fd380Sjfb8856606 const struct rte_flow_action_of_push_vlan *push_vlan;
2024*2d9fd380Sjfb8856606 uint16_t ethertype;
2025*2d9fd380Sjfb8856606 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2026*2d9fd380Sjfb8856606
2027*2d9fd380Sjfb8856606 push_vlan = action_item->conf;
2028*2d9fd380Sjfb8856606 if (push_vlan) {
2029*2d9fd380Sjfb8856606 ethertype = push_vlan->ethertype;
2030*2d9fd380Sjfb8856606 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2031*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR,
2032*2d9fd380Sjfb8856606 "Parse Err: Ethertype not supported\n");
2033*2d9fd380Sjfb8856606 return BNXT_TF_RC_PARSE_ERR;
2034*2d9fd380Sjfb8856606 }
2035*2d9fd380Sjfb8856606 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2036*2d9fd380Sjfb8856606 ðertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2037*2d9fd380Sjfb8856606 /* Update the hdr_bitmap with push vlan */
2038*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->act_bitmap.bits,
2039*2d9fd380Sjfb8856606 BNXT_ULP_ACTION_BIT_PUSH_VLAN);
2040*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
2041*2d9fd380Sjfb8856606 }
2042*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2043*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
2044*2d9fd380Sjfb8856606 }
2045*2d9fd380Sjfb8856606
2046*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action set vlan id. */
2047*2d9fd380Sjfb8856606 int32_t
ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action * action_item,struct ulp_rte_parser_params * params)2048*2d9fd380Sjfb8856606 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2049*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
2050*2d9fd380Sjfb8856606 {
2051*2d9fd380Sjfb8856606 const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2052*2d9fd380Sjfb8856606 uint32_t vid;
2053*2d9fd380Sjfb8856606 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2054*2d9fd380Sjfb8856606
2055*2d9fd380Sjfb8856606 vlan_vid = action_item->conf;
2056*2d9fd380Sjfb8856606 if (vlan_vid && vlan_vid->vlan_vid) {
2057*2d9fd380Sjfb8856606 vid = vlan_vid->vlan_vid;
2058*2d9fd380Sjfb8856606 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2059*2d9fd380Sjfb8856606 &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2060*2d9fd380Sjfb8856606 /* Update the hdr_bitmap with vlan vid */
2061*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->act_bitmap.bits,
2062*2d9fd380Sjfb8856606 BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
2063*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
2064*2d9fd380Sjfb8856606 }
2065*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2066*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
2067*2d9fd380Sjfb8856606 }
2068*2d9fd380Sjfb8856606
2069*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2070*2d9fd380Sjfb8856606 int32_t
ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action * action_item,struct ulp_rte_parser_params * params)2071*2d9fd380Sjfb8856606 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2072*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
2073*2d9fd380Sjfb8856606 {
2074*2d9fd380Sjfb8856606 const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2075*2d9fd380Sjfb8856606 uint8_t pcp;
2076*2d9fd380Sjfb8856606 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2077*2d9fd380Sjfb8856606
2078*2d9fd380Sjfb8856606 vlan_pcp = action_item->conf;
2079*2d9fd380Sjfb8856606 if (vlan_pcp) {
2080*2d9fd380Sjfb8856606 pcp = vlan_pcp->vlan_pcp;
2081*2d9fd380Sjfb8856606 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2082*2d9fd380Sjfb8856606 &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2083*2d9fd380Sjfb8856606 /* Update the hdr_bitmap with vlan vid */
2084*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->act_bitmap.bits,
2085*2d9fd380Sjfb8856606 BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
2086*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
2087*2d9fd380Sjfb8856606 }
2088*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2089*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
2090*2d9fd380Sjfb8856606 }
2091*2d9fd380Sjfb8856606
2092*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2093*2d9fd380Sjfb8856606 int32_t
ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action * action_item,struct ulp_rte_parser_params * params)2094*2d9fd380Sjfb8856606 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2095*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
2096*2d9fd380Sjfb8856606 {
2097*2d9fd380Sjfb8856606 const struct rte_flow_action_set_ipv4 *set_ipv4;
2098*2d9fd380Sjfb8856606 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2099*2d9fd380Sjfb8856606
2100*2d9fd380Sjfb8856606 set_ipv4 = action_item->conf;
2101*2d9fd380Sjfb8856606 if (set_ipv4) {
2102*2d9fd380Sjfb8856606 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2103*2d9fd380Sjfb8856606 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2104*2d9fd380Sjfb8856606 /* Update the hdr_bitmap with set ipv4 src */
2105*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->act_bitmap.bits,
2106*2d9fd380Sjfb8856606 BNXT_ULP_ACTION_BIT_SET_IPV4_SRC);
2107*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
2108*2d9fd380Sjfb8856606 }
2109*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2110*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
2111*2d9fd380Sjfb8856606 }
2112*2d9fd380Sjfb8856606
2113*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2114*2d9fd380Sjfb8856606 int32_t
ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action * action_item,struct ulp_rte_parser_params * params)2115*2d9fd380Sjfb8856606 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2116*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
2117*2d9fd380Sjfb8856606 {
2118*2d9fd380Sjfb8856606 const struct rte_flow_action_set_ipv4 *set_ipv4;
2119*2d9fd380Sjfb8856606 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2120*2d9fd380Sjfb8856606
2121*2d9fd380Sjfb8856606 set_ipv4 = action_item->conf;
2122*2d9fd380Sjfb8856606 if (set_ipv4) {
2123*2d9fd380Sjfb8856606 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2124*2d9fd380Sjfb8856606 &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2125*2d9fd380Sjfb8856606 /* Update the hdr_bitmap with set ipv4 dst */
2126*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->act_bitmap.bits,
2127*2d9fd380Sjfb8856606 BNXT_ULP_ACTION_BIT_SET_IPV4_DST);
2128*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
2129*2d9fd380Sjfb8856606 }
2130*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2131*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
2132*2d9fd380Sjfb8856606 }
2133*2d9fd380Sjfb8856606
2134*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action set tp src.*/
2135*2d9fd380Sjfb8856606 int32_t
ulp_rte_set_tp_src_act_handler(const struct rte_flow_action * action_item,struct ulp_rte_parser_params * params)2136*2d9fd380Sjfb8856606 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2137*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
2138*2d9fd380Sjfb8856606 {
2139*2d9fd380Sjfb8856606 const struct rte_flow_action_set_tp *set_tp;
2140*2d9fd380Sjfb8856606 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2141*2d9fd380Sjfb8856606
2142*2d9fd380Sjfb8856606 set_tp = action_item->conf;
2143*2d9fd380Sjfb8856606 if (set_tp) {
2144*2d9fd380Sjfb8856606 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2145*2d9fd380Sjfb8856606 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2146*2d9fd380Sjfb8856606 /* Update the hdr_bitmap with set tp src */
2147*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->act_bitmap.bits,
2148*2d9fd380Sjfb8856606 BNXT_ULP_ACTION_BIT_SET_TP_SRC);
2149*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
2150*2d9fd380Sjfb8856606 }
2151*2d9fd380Sjfb8856606
2152*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2153*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
2154*2d9fd380Sjfb8856606 }
2155*2d9fd380Sjfb8856606
2156*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2157*2d9fd380Sjfb8856606 int32_t
ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action * action_item,struct ulp_rte_parser_params * params)2158*2d9fd380Sjfb8856606 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2159*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
2160*2d9fd380Sjfb8856606 {
2161*2d9fd380Sjfb8856606 const struct rte_flow_action_set_tp *set_tp;
2162*2d9fd380Sjfb8856606 struct ulp_rte_act_prop *act = ¶ms->act_prop;
2163*2d9fd380Sjfb8856606
2164*2d9fd380Sjfb8856606 set_tp = action_item->conf;
2165*2d9fd380Sjfb8856606 if (set_tp) {
2166*2d9fd380Sjfb8856606 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2167*2d9fd380Sjfb8856606 &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2168*2d9fd380Sjfb8856606 /* Update the hdr_bitmap with set tp dst */
2169*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->act_bitmap.bits,
2170*2d9fd380Sjfb8856606 BNXT_ULP_ACTION_BIT_SET_TP_DST);
2171*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
2172*2d9fd380Sjfb8856606 }
2173*2d9fd380Sjfb8856606
2174*2d9fd380Sjfb8856606 BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2175*2d9fd380Sjfb8856606 return BNXT_TF_RC_ERROR;
2176*2d9fd380Sjfb8856606 }
2177*2d9fd380Sjfb8856606
2178*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2179*2d9fd380Sjfb8856606 int32_t
ulp_rte_dec_ttl_act_handler(const struct rte_flow_action * act __rte_unused,struct ulp_rte_parser_params * params)2180*2d9fd380Sjfb8856606 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2181*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
2182*2d9fd380Sjfb8856606 {
2183*2d9fd380Sjfb8856606 /* Update the act_bitmap with dec ttl */
2184*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL);
2185*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
2186*2d9fd380Sjfb8856606 }
2187*2d9fd380Sjfb8856606
2188*2d9fd380Sjfb8856606 /* Function to handle the parsing of RTE Flow action JUMP */
2189*2d9fd380Sjfb8856606 int32_t
ulp_rte_jump_act_handler(const struct rte_flow_action * action_item __rte_unused,struct ulp_rte_parser_params * params)2190*2d9fd380Sjfb8856606 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2191*2d9fd380Sjfb8856606 struct ulp_rte_parser_params *params)
2192*2d9fd380Sjfb8856606 {
2193*2d9fd380Sjfb8856606 /* Update the act_bitmap with dec ttl */
2194*2d9fd380Sjfb8856606 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_JUMP);
2195*2d9fd380Sjfb8856606 return BNXT_TF_RC_SUCCESS;
2196*2d9fd380Sjfb8856606 }
2197