xref: /f-stack/dpdk/drivers/net/mlx5/mlx5_flow_dv.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4 
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10 
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24 
25 #include <mlx5_glue.h>
26 #include <mlx5_devx_cmds.h>
27 #include <mlx5_prm.h>
28 #include <mlx5_malloc.h>
29 
30 #include "mlx5_defs.h"
31 #include "mlx5.h"
32 #include "mlx5_common_os.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_flow_os.h"
35 #include "mlx5_rxtx.h"
36 #include "rte_pmd_mlx5.h"
37 
38 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39 
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43 
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49 
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53 
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60 
61 union flow_dv_attr {
62 	struct {
63 		uint32_t valid:1;
64 		uint32_t ipv4:1;
65 		uint32_t ipv6:1;
66 		uint32_t tcp:1;
67 		uint32_t udp:1;
68 		uint32_t reserved:27;
69 	};
70 	uint32_t attr;
71 };
72 
73 static int
74 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
75 			     struct mlx5_flow_tbl_resource *tbl);
76 
77 static int
78 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
79 				      uint32_t encap_decap_idx);
80 
81 static int
82 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
83 					uint32_t port_id);
84 static void
85 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
86 
87 /**
88  * Initialize flow attributes structure according to flow items' types.
89  *
90  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
91  * mode. For tunnel mode, the items to be modified are the outermost ones.
92  *
93  * @param[in] item
94  *   Pointer to item specification.
95  * @param[out] attr
96  *   Pointer to flow attributes structure.
97  * @param[in] dev_flow
98  *   Pointer to the sub flow.
99  * @param[in] tunnel_decap
100  *   Whether action is after tunnel decapsulation.
101  */
102 static void
flow_dv_attr_init(const struct rte_flow_item * item,union flow_dv_attr * attr,struct mlx5_flow * dev_flow,bool tunnel_decap)103 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
104 		  struct mlx5_flow *dev_flow, bool tunnel_decap)
105 {
106 	uint64_t layers = dev_flow->handle->layers;
107 
108 	/*
109 	 * If layers is already initialized, it means this dev_flow is the
110 	 * suffix flow, the layers flags is set by the prefix flow. Need to
111 	 * use the layer flags from prefix flow as the suffix flow may not
112 	 * have the user defined items as the flow is split.
113 	 */
114 	if (layers) {
115 		if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
116 			attr->ipv4 = 1;
117 		else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
118 			attr->ipv6 = 1;
119 		if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
120 			attr->tcp = 1;
121 		else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
122 			attr->udp = 1;
123 		attr->valid = 1;
124 		return;
125 	}
126 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
127 		uint8_t next_protocol = 0xff;
128 		switch (item->type) {
129 		case RTE_FLOW_ITEM_TYPE_GRE:
130 		case RTE_FLOW_ITEM_TYPE_NVGRE:
131 		case RTE_FLOW_ITEM_TYPE_VXLAN:
132 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
133 		case RTE_FLOW_ITEM_TYPE_GENEVE:
134 		case RTE_FLOW_ITEM_TYPE_MPLS:
135 			if (tunnel_decap)
136 				attr->attr = 0;
137 			break;
138 		case RTE_FLOW_ITEM_TYPE_IPV4:
139 			if (!attr->ipv6)
140 				attr->ipv4 = 1;
141 			if (item->mask != NULL &&
142 			    ((const struct rte_flow_item_ipv4 *)
143 			    item->mask)->hdr.next_proto_id)
144 				next_protocol =
145 				    ((const struct rte_flow_item_ipv4 *)
146 				      (item->spec))->hdr.next_proto_id &
147 				    ((const struct rte_flow_item_ipv4 *)
148 				      (item->mask))->hdr.next_proto_id;
149 			if ((next_protocol == IPPROTO_IPIP ||
150 			    next_protocol == IPPROTO_IPV6) && tunnel_decap)
151 				attr->attr = 0;
152 			break;
153 		case RTE_FLOW_ITEM_TYPE_IPV6:
154 			if (!attr->ipv4)
155 				attr->ipv6 = 1;
156 			if (item->mask != NULL &&
157 			    ((const struct rte_flow_item_ipv6 *)
158 			    item->mask)->hdr.proto)
159 				next_protocol =
160 				    ((const struct rte_flow_item_ipv6 *)
161 				      (item->spec))->hdr.proto &
162 				    ((const struct rte_flow_item_ipv6 *)
163 				      (item->mask))->hdr.proto;
164 			if ((next_protocol == IPPROTO_IPIP ||
165 			    next_protocol == IPPROTO_IPV6) && tunnel_decap)
166 				attr->attr = 0;
167 			break;
168 		case RTE_FLOW_ITEM_TYPE_UDP:
169 			if (!attr->tcp)
170 				attr->udp = 1;
171 			break;
172 		case RTE_FLOW_ITEM_TYPE_TCP:
173 			if (!attr->udp)
174 				attr->tcp = 1;
175 			break;
176 		default:
177 			break;
178 		}
179 	}
180 	attr->valid = 1;
181 }
182 
183 /**
184  * Convert rte_mtr_color to mlx5 color.
185  *
186  * @param[in] rcol
187  *   rte_mtr_color.
188  *
189  * @return
190  *   mlx5 color.
191  */
192 static int
rte_col_2_mlx5_col(enum rte_color rcol)193 rte_col_2_mlx5_col(enum rte_color rcol)
194 {
195 	switch (rcol) {
196 	case RTE_COLOR_GREEN:
197 		return MLX5_FLOW_COLOR_GREEN;
198 	case RTE_COLOR_YELLOW:
199 		return MLX5_FLOW_COLOR_YELLOW;
200 	case RTE_COLOR_RED:
201 		return MLX5_FLOW_COLOR_RED;
202 	default:
203 		break;
204 	}
205 	return MLX5_FLOW_COLOR_UNDEFINED;
206 }
207 
208 struct field_modify_info {
209 	uint32_t size; /* Size of field in protocol header, in bytes. */
210 	uint32_t offset; /* Offset of field in protocol header, in bytes. */
211 	enum mlx5_modification_field id;
212 };
213 
214 struct field_modify_info modify_eth[] = {
215 	{4,  0, MLX5_MODI_OUT_DMAC_47_16},
216 	{2,  4, MLX5_MODI_OUT_DMAC_15_0},
217 	{4,  6, MLX5_MODI_OUT_SMAC_47_16},
218 	{2, 10, MLX5_MODI_OUT_SMAC_15_0},
219 	{0, 0, 0},
220 };
221 
222 struct field_modify_info modify_vlan_out_first_vid[] = {
223 	/* Size in bits !!! */
224 	{12, 0, MLX5_MODI_OUT_FIRST_VID},
225 	{0, 0, 0},
226 };
227 
228 struct field_modify_info modify_ipv4[] = {
229 	{1,  1, MLX5_MODI_OUT_IP_DSCP},
230 	{1,  8, MLX5_MODI_OUT_IPV4_TTL},
231 	{4, 12, MLX5_MODI_OUT_SIPV4},
232 	{4, 16, MLX5_MODI_OUT_DIPV4},
233 	{0, 0, 0},
234 };
235 
236 struct field_modify_info modify_ipv6[] = {
237 	{1,  0, MLX5_MODI_OUT_IP_DSCP},
238 	{1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
239 	{4,  8, MLX5_MODI_OUT_SIPV6_127_96},
240 	{4, 12, MLX5_MODI_OUT_SIPV6_95_64},
241 	{4, 16, MLX5_MODI_OUT_SIPV6_63_32},
242 	{4, 20, MLX5_MODI_OUT_SIPV6_31_0},
243 	{4, 24, MLX5_MODI_OUT_DIPV6_127_96},
244 	{4, 28, MLX5_MODI_OUT_DIPV6_95_64},
245 	{4, 32, MLX5_MODI_OUT_DIPV6_63_32},
246 	{4, 36, MLX5_MODI_OUT_DIPV6_31_0},
247 	{0, 0, 0},
248 };
249 
250 struct field_modify_info modify_udp[] = {
251 	{2, 0, MLX5_MODI_OUT_UDP_SPORT},
252 	{2, 2, MLX5_MODI_OUT_UDP_DPORT},
253 	{0, 0, 0},
254 };
255 
256 struct field_modify_info modify_tcp[] = {
257 	{2, 0, MLX5_MODI_OUT_TCP_SPORT},
258 	{2, 2, MLX5_MODI_OUT_TCP_DPORT},
259 	{4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
260 	{4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
261 	{0, 0, 0},
262 };
263 
264 static void
mlx5_flow_tunnel_ip_check(const struct rte_flow_item * item __rte_unused,uint8_t next_protocol,uint64_t * item_flags,int * tunnel)265 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
266 			  uint8_t next_protocol, uint64_t *item_flags,
267 			  int *tunnel)
268 {
269 	MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
270 		    item->type == RTE_FLOW_ITEM_TYPE_IPV6);
271 	if (next_protocol == IPPROTO_IPIP) {
272 		*item_flags |= MLX5_FLOW_LAYER_IPIP;
273 		*tunnel = 1;
274 	}
275 	if (next_protocol == IPPROTO_IPV6) {
276 		*item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
277 		*tunnel = 1;
278 	}
279 }
280 
281 /* Update VLAN's VID/PCP based on input rte_flow_action.
282  *
283  * @param[in] action
284  *   Pointer to struct rte_flow_action.
285  * @param[out] vlan
286  *   Pointer to struct rte_vlan_hdr.
287  */
288 static void
mlx5_update_vlan_vid_pcp(const struct rte_flow_action * action,struct rte_vlan_hdr * vlan)289 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
290 			 struct rte_vlan_hdr *vlan)
291 {
292 	uint16_t vlan_tci;
293 	if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
294 		vlan_tci =
295 		    ((const struct rte_flow_action_of_set_vlan_pcp *)
296 					       action->conf)->vlan_pcp;
297 		vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
298 		vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
299 		vlan->vlan_tci |= vlan_tci;
300 	} else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
301 		vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
302 		vlan->vlan_tci |= rte_be_to_cpu_16
303 		    (((const struct rte_flow_action_of_set_vlan_vid *)
304 					     action->conf)->vlan_vid);
305 	}
306 }
307 
308 /**
309  * Fetch 1, 2, 3 or 4 byte field from the byte array
310  * and return as unsigned integer in host-endian format.
311  *
312  * @param[in] data
313  *   Pointer to data array.
314  * @param[in] size
315  *   Size of field to extract.
316  *
317  * @return
318  *   converted field in host endian format.
319  */
320 static inline uint32_t
flow_dv_fetch_field(const uint8_t * data,uint32_t size)321 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
322 {
323 	uint32_t ret;
324 
325 	switch (size) {
326 	case 1:
327 		ret = *data;
328 		break;
329 	case 2:
330 		ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
331 		break;
332 	case 3:
333 		ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
334 		ret = (ret << 8) | *(data + sizeof(uint16_t));
335 		break;
336 	case 4:
337 		ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
338 		break;
339 	default:
340 		MLX5_ASSERT(false);
341 		ret = 0;
342 		break;
343 	}
344 	return ret;
345 }
346 
347 /**
348  * Convert modify-header action to DV specification.
349  *
350  * Data length of each action is determined by provided field description
351  * and the item mask. Data bit offset and width of each action is determined
352  * by provided item mask.
353  *
354  * @param[in] item
355  *   Pointer to item specification.
356  * @param[in] field
357  *   Pointer to field modification information.
358  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
359  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
360  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
361  * @param[in] dcopy
362  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
363  *   Negative offset value sets the same offset as source offset.
364  *   size field is ignored, value is taken from source field.
365  * @param[in,out] resource
366  *   Pointer to the modify-header resource.
367  * @param[in] type
368  *   Type of modification.
369  * @param[out] error
370  *   Pointer to the error structure.
371  *
372  * @return
373  *   0 on success, a negative errno value otherwise and rte_errno is set.
374  */
375 static int
flow_dv_convert_modify_action(struct rte_flow_item * item,struct field_modify_info * field,struct field_modify_info * dcopy,struct mlx5_flow_dv_modify_hdr_resource * resource,uint32_t type,struct rte_flow_error * error)376 flow_dv_convert_modify_action(struct rte_flow_item *item,
377 			      struct field_modify_info *field,
378 			      struct field_modify_info *dcopy,
379 			      struct mlx5_flow_dv_modify_hdr_resource *resource,
380 			      uint32_t type, struct rte_flow_error *error)
381 {
382 	uint32_t i = resource->actions_num;
383 	struct mlx5_modification_cmd *actions = resource->actions;
384 
385 	/*
386 	 * The item and mask are provided in big-endian format.
387 	 * The fields should be presented as in big-endian format either.
388 	 * Mask must be always present, it defines the actual field width.
389 	 */
390 	MLX5_ASSERT(item->mask);
391 	MLX5_ASSERT(field->size);
392 	do {
393 		unsigned int size_b;
394 		unsigned int off_b;
395 		uint32_t mask;
396 		uint32_t data;
397 
398 		if (i >= MLX5_MAX_MODIFY_NUM)
399 			return rte_flow_error_set(error, EINVAL,
400 				 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
401 				 "too many items to modify");
402 		/* Fetch variable byte size mask from the array. */
403 		mask = flow_dv_fetch_field((const uint8_t *)item->mask +
404 					   field->offset, field->size);
405 		if (!mask) {
406 			++field;
407 			continue;
408 		}
409 		/* Deduce actual data width in bits from mask value. */
410 		off_b = rte_bsf32(mask);
411 		size_b = sizeof(uint32_t) * CHAR_BIT -
412 			 off_b - __builtin_clz(mask);
413 		MLX5_ASSERT(size_b);
414 		size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
415 		actions[i] = (struct mlx5_modification_cmd) {
416 			.action_type = type,
417 			.field = field->id,
418 			.offset = off_b,
419 			.length = size_b,
420 		};
421 		/* Convert entire record to expected big-endian format. */
422 		actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
423 		if (type == MLX5_MODIFICATION_TYPE_COPY) {
424 			MLX5_ASSERT(dcopy);
425 			actions[i].dst_field = dcopy->id;
426 			actions[i].dst_offset =
427 				(int)dcopy->offset < 0 ? off_b : dcopy->offset;
428 			/* Convert entire record to big-endian format. */
429 			actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
430 		} else {
431 			MLX5_ASSERT(item->spec);
432 			data = flow_dv_fetch_field((const uint8_t *)item->spec +
433 						   field->offset, field->size);
434 			/* Shift out the trailing masked bits from data. */
435 			data = (data & mask) >> off_b;
436 			actions[i].data1 = rte_cpu_to_be_32(data);
437 		}
438 		++i;
439 		++field;
440 	} while (field->size);
441 	if (resource->actions_num == i)
442 		return rte_flow_error_set(error, EINVAL,
443 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
444 					  "invalid modification flow item");
445 	resource->actions_num = i;
446 	return 0;
447 }
448 
449 /**
450  * Convert modify-header set IPv4 address action to DV specification.
451  *
452  * @param[in,out] resource
453  *   Pointer to the modify-header resource.
454  * @param[in] action
455  *   Pointer to action specification.
456  * @param[out] error
457  *   Pointer to the error structure.
458  *
459  * @return
460  *   0 on success, a negative errno value otherwise and rte_errno is set.
461  */
462 static int
flow_dv_convert_action_modify_ipv4(struct mlx5_flow_dv_modify_hdr_resource * resource,const struct rte_flow_action * action,struct rte_flow_error * error)463 flow_dv_convert_action_modify_ipv4
464 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
465 			 const struct rte_flow_action *action,
466 			 struct rte_flow_error *error)
467 {
468 	const struct rte_flow_action_set_ipv4 *conf =
469 		(const struct rte_flow_action_set_ipv4 *)(action->conf);
470 	struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
471 	struct rte_flow_item_ipv4 ipv4;
472 	struct rte_flow_item_ipv4 ipv4_mask;
473 
474 	memset(&ipv4, 0, sizeof(ipv4));
475 	memset(&ipv4_mask, 0, sizeof(ipv4_mask));
476 	if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
477 		ipv4.hdr.src_addr = conf->ipv4_addr;
478 		ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
479 	} else {
480 		ipv4.hdr.dst_addr = conf->ipv4_addr;
481 		ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
482 	}
483 	item.spec = &ipv4;
484 	item.mask = &ipv4_mask;
485 	return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
486 					     MLX5_MODIFICATION_TYPE_SET, error);
487 }
488 
489 /**
490  * Convert modify-header set IPv6 address action to DV specification.
491  *
492  * @param[in,out] resource
493  *   Pointer to the modify-header resource.
494  * @param[in] action
495  *   Pointer to action specification.
496  * @param[out] error
497  *   Pointer to the error structure.
498  *
499  * @return
500  *   0 on success, a negative errno value otherwise and rte_errno is set.
501  */
502 static int
flow_dv_convert_action_modify_ipv6(struct mlx5_flow_dv_modify_hdr_resource * resource,const struct rte_flow_action * action,struct rte_flow_error * error)503 flow_dv_convert_action_modify_ipv6
504 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
505 			 const struct rte_flow_action *action,
506 			 struct rte_flow_error *error)
507 {
508 	const struct rte_flow_action_set_ipv6 *conf =
509 		(const struct rte_flow_action_set_ipv6 *)(action->conf);
510 	struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
511 	struct rte_flow_item_ipv6 ipv6;
512 	struct rte_flow_item_ipv6 ipv6_mask;
513 
514 	memset(&ipv6, 0, sizeof(ipv6));
515 	memset(&ipv6_mask, 0, sizeof(ipv6_mask));
516 	if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
517 		memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
518 		       sizeof(ipv6.hdr.src_addr));
519 		memcpy(&ipv6_mask.hdr.src_addr,
520 		       &rte_flow_item_ipv6_mask.hdr.src_addr,
521 		       sizeof(ipv6.hdr.src_addr));
522 	} else {
523 		memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
524 		       sizeof(ipv6.hdr.dst_addr));
525 		memcpy(&ipv6_mask.hdr.dst_addr,
526 		       &rte_flow_item_ipv6_mask.hdr.dst_addr,
527 		       sizeof(ipv6.hdr.dst_addr));
528 	}
529 	item.spec = &ipv6;
530 	item.mask = &ipv6_mask;
531 	return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
532 					     MLX5_MODIFICATION_TYPE_SET, error);
533 }
534 
535 /**
536  * Convert modify-header set MAC address action to DV specification.
537  *
538  * @param[in,out] resource
539  *   Pointer to the modify-header resource.
540  * @param[in] action
541  *   Pointer to action specification.
542  * @param[out] error
543  *   Pointer to the error structure.
544  *
545  * @return
546  *   0 on success, a negative errno value otherwise and rte_errno is set.
547  */
548 static int
flow_dv_convert_action_modify_mac(struct mlx5_flow_dv_modify_hdr_resource * resource,const struct rte_flow_action * action,struct rte_flow_error * error)549 flow_dv_convert_action_modify_mac
550 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
551 			 const struct rte_flow_action *action,
552 			 struct rte_flow_error *error)
553 {
554 	const struct rte_flow_action_set_mac *conf =
555 		(const struct rte_flow_action_set_mac *)(action->conf);
556 	struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
557 	struct rte_flow_item_eth eth;
558 	struct rte_flow_item_eth eth_mask;
559 
560 	memset(&eth, 0, sizeof(eth));
561 	memset(&eth_mask, 0, sizeof(eth_mask));
562 	if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
563 		memcpy(&eth.src.addr_bytes, &conf->mac_addr,
564 		       sizeof(eth.src.addr_bytes));
565 		memcpy(&eth_mask.src.addr_bytes,
566 		       &rte_flow_item_eth_mask.src.addr_bytes,
567 		       sizeof(eth_mask.src.addr_bytes));
568 	} else {
569 		memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
570 		       sizeof(eth.dst.addr_bytes));
571 		memcpy(&eth_mask.dst.addr_bytes,
572 		       &rte_flow_item_eth_mask.dst.addr_bytes,
573 		       sizeof(eth_mask.dst.addr_bytes));
574 	}
575 	item.spec = &eth;
576 	item.mask = &eth_mask;
577 	return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
578 					     MLX5_MODIFICATION_TYPE_SET, error);
579 }
580 
581 /**
582  * Convert modify-header set VLAN VID action to DV specification.
583  *
584  * @param[in,out] resource
585  *   Pointer to the modify-header resource.
586  * @param[in] action
587  *   Pointer to action specification.
588  * @param[out] error
589  *   Pointer to the error structure.
590  *
591  * @return
592  *   0 on success, a negative errno value otherwise and rte_errno is set.
593  */
594 static int
flow_dv_convert_action_modify_vlan_vid(struct mlx5_flow_dv_modify_hdr_resource * resource,const struct rte_flow_action * action,struct rte_flow_error * error)595 flow_dv_convert_action_modify_vlan_vid
596 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
597 			 const struct rte_flow_action *action,
598 			 struct rte_flow_error *error)
599 {
600 	const struct rte_flow_action_of_set_vlan_vid *conf =
601 		(const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
602 	int i = resource->actions_num;
603 	struct mlx5_modification_cmd *actions = resource->actions;
604 	struct field_modify_info *field = modify_vlan_out_first_vid;
605 
606 	if (i >= MLX5_MAX_MODIFY_NUM)
607 		return rte_flow_error_set(error, EINVAL,
608 			 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
609 			 "too many items to modify");
610 	actions[i] = (struct mlx5_modification_cmd) {
611 		.action_type = MLX5_MODIFICATION_TYPE_SET,
612 		.field = field->id,
613 		.length = field->size,
614 		.offset = field->offset,
615 	};
616 	actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
617 	actions[i].data1 = conf->vlan_vid;
618 	actions[i].data1 = actions[i].data1 << 16;
619 	resource->actions_num = ++i;
620 	return 0;
621 }
622 
623 /**
624  * Convert modify-header set TP action to DV specification.
625  *
626  * @param[in,out] resource
627  *   Pointer to the modify-header resource.
628  * @param[in] action
629  *   Pointer to action specification.
630  * @param[in] items
631  *   Pointer to rte_flow_item objects list.
632  * @param[in] attr
633  *   Pointer to flow attributes structure.
634  * @param[in] dev_flow
635  *   Pointer to the sub flow.
636  * @param[in] tunnel_decap
637  *   Whether action is after tunnel decapsulation.
638  * @param[out] error
639  *   Pointer to the error structure.
640  *
641  * @return
642  *   0 on success, a negative errno value otherwise and rte_errno is set.
643  */
644 static int
flow_dv_convert_action_modify_tp(struct mlx5_flow_dv_modify_hdr_resource * resource,const struct rte_flow_action * action,const struct rte_flow_item * items,union flow_dv_attr * attr,struct mlx5_flow * dev_flow,bool tunnel_decap,struct rte_flow_error * error)645 flow_dv_convert_action_modify_tp
646 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
647 			 const struct rte_flow_action *action,
648 			 const struct rte_flow_item *items,
649 			 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
650 			 bool tunnel_decap, struct rte_flow_error *error)
651 {
652 	const struct rte_flow_action_set_tp *conf =
653 		(const struct rte_flow_action_set_tp *)(action->conf);
654 	struct rte_flow_item item;
655 	struct rte_flow_item_udp udp;
656 	struct rte_flow_item_udp udp_mask;
657 	struct rte_flow_item_tcp tcp;
658 	struct rte_flow_item_tcp tcp_mask;
659 	struct field_modify_info *field;
660 
661 	if (!attr->valid)
662 		flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
663 	if (attr->udp) {
664 		memset(&udp, 0, sizeof(udp));
665 		memset(&udp_mask, 0, sizeof(udp_mask));
666 		if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
667 			udp.hdr.src_port = conf->port;
668 			udp_mask.hdr.src_port =
669 					rte_flow_item_udp_mask.hdr.src_port;
670 		} else {
671 			udp.hdr.dst_port = conf->port;
672 			udp_mask.hdr.dst_port =
673 					rte_flow_item_udp_mask.hdr.dst_port;
674 		}
675 		item.type = RTE_FLOW_ITEM_TYPE_UDP;
676 		item.spec = &udp;
677 		item.mask = &udp_mask;
678 		field = modify_udp;
679 	} else {
680 		MLX5_ASSERT(attr->tcp);
681 		memset(&tcp, 0, sizeof(tcp));
682 		memset(&tcp_mask, 0, sizeof(tcp_mask));
683 		if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
684 			tcp.hdr.src_port = conf->port;
685 			tcp_mask.hdr.src_port =
686 					rte_flow_item_tcp_mask.hdr.src_port;
687 		} else {
688 			tcp.hdr.dst_port = conf->port;
689 			tcp_mask.hdr.dst_port =
690 					rte_flow_item_tcp_mask.hdr.dst_port;
691 		}
692 		item.type = RTE_FLOW_ITEM_TYPE_TCP;
693 		item.spec = &tcp;
694 		item.mask = &tcp_mask;
695 		field = modify_tcp;
696 	}
697 	return flow_dv_convert_modify_action(&item, field, NULL, resource,
698 					     MLX5_MODIFICATION_TYPE_SET, error);
699 }
700 
701 /**
702  * Convert modify-header set TTL action to DV specification.
703  *
704  * @param[in,out] resource
705  *   Pointer to the modify-header resource.
706  * @param[in] action
707  *   Pointer to action specification.
708  * @param[in] items
709  *   Pointer to rte_flow_item objects list.
710  * @param[in] attr
711  *   Pointer to flow attributes structure.
712  * @param[in] dev_flow
713  *   Pointer to the sub flow.
714  * @param[in] tunnel_decap
715  *   Whether action is after tunnel decapsulation.
716  * @param[out] error
717  *   Pointer to the error structure.
718  *
719  * @return
720  *   0 on success, a negative errno value otherwise and rte_errno is set.
721  */
722 static int
flow_dv_convert_action_modify_ttl(struct mlx5_flow_dv_modify_hdr_resource * resource,const struct rte_flow_action * action,const struct rte_flow_item * items,union flow_dv_attr * attr,struct mlx5_flow * dev_flow,bool tunnel_decap,struct rte_flow_error * error)723 flow_dv_convert_action_modify_ttl
724 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
725 			 const struct rte_flow_action *action,
726 			 const struct rte_flow_item *items,
727 			 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
728 			 bool tunnel_decap, struct rte_flow_error *error)
729 {
730 	const struct rte_flow_action_set_ttl *conf =
731 		(const struct rte_flow_action_set_ttl *)(action->conf);
732 	struct rte_flow_item item;
733 	struct rte_flow_item_ipv4 ipv4;
734 	struct rte_flow_item_ipv4 ipv4_mask;
735 	struct rte_flow_item_ipv6 ipv6;
736 	struct rte_flow_item_ipv6 ipv6_mask;
737 	struct field_modify_info *field;
738 
739 	if (!attr->valid)
740 		flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
741 	if (attr->ipv4) {
742 		memset(&ipv4, 0, sizeof(ipv4));
743 		memset(&ipv4_mask, 0, sizeof(ipv4_mask));
744 		ipv4.hdr.time_to_live = conf->ttl_value;
745 		ipv4_mask.hdr.time_to_live = 0xFF;
746 		item.type = RTE_FLOW_ITEM_TYPE_IPV4;
747 		item.spec = &ipv4;
748 		item.mask = &ipv4_mask;
749 		field = modify_ipv4;
750 	} else {
751 		MLX5_ASSERT(attr->ipv6);
752 		memset(&ipv6, 0, sizeof(ipv6));
753 		memset(&ipv6_mask, 0, sizeof(ipv6_mask));
754 		ipv6.hdr.hop_limits = conf->ttl_value;
755 		ipv6_mask.hdr.hop_limits = 0xFF;
756 		item.type = RTE_FLOW_ITEM_TYPE_IPV6;
757 		item.spec = &ipv6;
758 		item.mask = &ipv6_mask;
759 		field = modify_ipv6;
760 	}
761 	return flow_dv_convert_modify_action(&item, field, NULL, resource,
762 					     MLX5_MODIFICATION_TYPE_SET, error);
763 }
764 
765 /**
766  * Convert modify-header decrement TTL action to DV specification.
767  *
768  * @param[in,out] resource
769  *   Pointer to the modify-header resource.
770  * @param[in] action
771  *   Pointer to action specification.
772  * @param[in] items
773  *   Pointer to rte_flow_item objects list.
774  * @param[in] attr
775  *   Pointer to flow attributes structure.
776  * @param[in] dev_flow
777  *   Pointer to the sub flow.
778  * @param[in] tunnel_decap
779  *   Whether action is after tunnel decapsulation.
780  * @param[out] error
781  *   Pointer to the error structure.
782  *
783  * @return
784  *   0 on success, a negative errno value otherwise and rte_errno is set.
785  */
786 static int
flow_dv_convert_action_modify_dec_ttl(struct mlx5_flow_dv_modify_hdr_resource * resource,const struct rte_flow_item * items,union flow_dv_attr * attr,struct mlx5_flow * dev_flow,bool tunnel_decap,struct rte_flow_error * error)787 flow_dv_convert_action_modify_dec_ttl
788 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
789 			 const struct rte_flow_item *items,
790 			 union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
791 			 bool tunnel_decap, struct rte_flow_error *error)
792 {
793 	struct rte_flow_item item;
794 	struct rte_flow_item_ipv4 ipv4;
795 	struct rte_flow_item_ipv4 ipv4_mask;
796 	struct rte_flow_item_ipv6 ipv6;
797 	struct rte_flow_item_ipv6 ipv6_mask;
798 	struct field_modify_info *field;
799 
800 	if (!attr->valid)
801 		flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
802 	if (attr->ipv4) {
803 		memset(&ipv4, 0, sizeof(ipv4));
804 		memset(&ipv4_mask, 0, sizeof(ipv4_mask));
805 		ipv4.hdr.time_to_live = 0xFF;
806 		ipv4_mask.hdr.time_to_live = 0xFF;
807 		item.type = RTE_FLOW_ITEM_TYPE_IPV4;
808 		item.spec = &ipv4;
809 		item.mask = &ipv4_mask;
810 		field = modify_ipv4;
811 	} else {
812 		MLX5_ASSERT(attr->ipv6);
813 		memset(&ipv6, 0, sizeof(ipv6));
814 		memset(&ipv6_mask, 0, sizeof(ipv6_mask));
815 		ipv6.hdr.hop_limits = 0xFF;
816 		ipv6_mask.hdr.hop_limits = 0xFF;
817 		item.type = RTE_FLOW_ITEM_TYPE_IPV6;
818 		item.spec = &ipv6;
819 		item.mask = &ipv6_mask;
820 		field = modify_ipv6;
821 	}
822 	return flow_dv_convert_modify_action(&item, field, NULL, resource,
823 					     MLX5_MODIFICATION_TYPE_ADD, error);
824 }
825 
826 /**
827  * Convert modify-header increment/decrement TCP Sequence number
828  * to DV specification.
829  *
830  * @param[in,out] resource
831  *   Pointer to the modify-header resource.
832  * @param[in] action
833  *   Pointer to action specification.
834  * @param[out] error
835  *   Pointer to the error structure.
836  *
837  * @return
838  *   0 on success, a negative errno value otherwise and rte_errno is set.
839  */
840 static int
flow_dv_convert_action_modify_tcp_seq(struct mlx5_flow_dv_modify_hdr_resource * resource,const struct rte_flow_action * action,struct rte_flow_error * error)841 flow_dv_convert_action_modify_tcp_seq
842 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
843 			 const struct rte_flow_action *action,
844 			 struct rte_flow_error *error)
845 {
846 	const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
847 	uint64_t value = rte_be_to_cpu_32(*conf);
848 	struct rte_flow_item item;
849 	struct rte_flow_item_tcp tcp;
850 	struct rte_flow_item_tcp tcp_mask;
851 
852 	memset(&tcp, 0, sizeof(tcp));
853 	memset(&tcp_mask, 0, sizeof(tcp_mask));
854 	if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
855 		/*
856 		 * The HW has no decrement operation, only increment operation.
857 		 * To simulate decrement X from Y using increment operation
858 		 * we need to add UINT32_MAX X times to Y.
859 		 * Each adding of UINT32_MAX decrements Y by 1.
860 		 */
861 		value *= UINT32_MAX;
862 	tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
863 	tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
864 	item.type = RTE_FLOW_ITEM_TYPE_TCP;
865 	item.spec = &tcp;
866 	item.mask = &tcp_mask;
867 	return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
868 					     MLX5_MODIFICATION_TYPE_ADD, error);
869 }
870 
871 /**
872  * Convert modify-header increment/decrement TCP Acknowledgment number
873  * to DV specification.
874  *
875  * @param[in,out] resource
876  *   Pointer to the modify-header resource.
877  * @param[in] action
878  *   Pointer to action specification.
879  * @param[out] error
880  *   Pointer to the error structure.
881  *
882  * @return
883  *   0 on success, a negative errno value otherwise and rte_errno is set.
884  */
885 static int
flow_dv_convert_action_modify_tcp_ack(struct mlx5_flow_dv_modify_hdr_resource * resource,const struct rte_flow_action * action,struct rte_flow_error * error)886 flow_dv_convert_action_modify_tcp_ack
887 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
888 			 const struct rte_flow_action *action,
889 			 struct rte_flow_error *error)
890 {
891 	const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
892 	uint64_t value = rte_be_to_cpu_32(*conf);
893 	struct rte_flow_item item;
894 	struct rte_flow_item_tcp tcp;
895 	struct rte_flow_item_tcp tcp_mask;
896 
897 	memset(&tcp, 0, sizeof(tcp));
898 	memset(&tcp_mask, 0, sizeof(tcp_mask));
899 	if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
900 		/*
901 		 * The HW has no decrement operation, only increment operation.
902 		 * To simulate decrement X from Y using increment operation
903 		 * we need to add UINT32_MAX X times to Y.
904 		 * Each adding of UINT32_MAX decrements Y by 1.
905 		 */
906 		value *= UINT32_MAX;
907 	tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
908 	tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
909 	item.type = RTE_FLOW_ITEM_TYPE_TCP;
910 	item.spec = &tcp;
911 	item.mask = &tcp_mask;
912 	return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
913 					     MLX5_MODIFICATION_TYPE_ADD, error);
914 }
915 
916 static enum mlx5_modification_field reg_to_field[] = {
917 	[REG_NON] = MLX5_MODI_OUT_NONE,
918 	[REG_A] = MLX5_MODI_META_DATA_REG_A,
919 	[REG_B] = MLX5_MODI_META_DATA_REG_B,
920 	[REG_C_0] = MLX5_MODI_META_REG_C_0,
921 	[REG_C_1] = MLX5_MODI_META_REG_C_1,
922 	[REG_C_2] = MLX5_MODI_META_REG_C_2,
923 	[REG_C_3] = MLX5_MODI_META_REG_C_3,
924 	[REG_C_4] = MLX5_MODI_META_REG_C_4,
925 	[REG_C_5] = MLX5_MODI_META_REG_C_5,
926 	[REG_C_6] = MLX5_MODI_META_REG_C_6,
927 	[REG_C_7] = MLX5_MODI_META_REG_C_7,
928 };
929 
930 /**
931  * Convert register set to DV specification.
932  *
933  * @param[in,out] resource
934  *   Pointer to the modify-header resource.
935  * @param[in] action
936  *   Pointer to action specification.
937  * @param[out] error
938  *   Pointer to the error structure.
939  *
940  * @return
941  *   0 on success, a negative errno value otherwise and rte_errno is set.
942  */
943 static int
flow_dv_convert_action_set_reg(struct mlx5_flow_dv_modify_hdr_resource * resource,const struct rte_flow_action * action,struct rte_flow_error * error)944 flow_dv_convert_action_set_reg
945 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
946 			 const struct rte_flow_action *action,
947 			 struct rte_flow_error *error)
948 {
949 	const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
950 	struct mlx5_modification_cmd *actions = resource->actions;
951 	uint32_t i = resource->actions_num;
952 
953 	if (i >= MLX5_MAX_MODIFY_NUM)
954 		return rte_flow_error_set(error, EINVAL,
955 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
956 					  "too many items to modify");
957 	MLX5_ASSERT(conf->id != REG_NON);
958 	MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
959 	actions[i] = (struct mlx5_modification_cmd) {
960 		.action_type = MLX5_MODIFICATION_TYPE_SET,
961 		.field = reg_to_field[conf->id],
962 	};
963 	actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
964 	actions[i].data1 = rte_cpu_to_be_32(conf->data);
965 	++i;
966 	resource->actions_num = i;
967 	return 0;
968 }
969 
970 /**
971  * Convert SET_TAG action to DV specification.
972  *
973  * @param[in] dev
974  *   Pointer to the rte_eth_dev structure.
975  * @param[in,out] resource
976  *   Pointer to the modify-header resource.
977  * @param[in] conf
978  *   Pointer to action specification.
979  * @param[out] error
980  *   Pointer to the error structure.
981  *
982  * @return
983  *   0 on success, a negative errno value otherwise and rte_errno is set.
984  */
985 static int
flow_dv_convert_action_set_tag(struct rte_eth_dev * dev,struct mlx5_flow_dv_modify_hdr_resource * resource,const struct rte_flow_action_set_tag * conf,struct rte_flow_error * error)986 flow_dv_convert_action_set_tag
987 			(struct rte_eth_dev *dev,
988 			 struct mlx5_flow_dv_modify_hdr_resource *resource,
989 			 const struct rte_flow_action_set_tag *conf,
990 			 struct rte_flow_error *error)
991 {
992 	rte_be32_t data = rte_cpu_to_be_32(conf->data);
993 	rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
994 	struct rte_flow_item item = {
995 		.spec = &data,
996 		.mask = &mask,
997 	};
998 	struct field_modify_info reg_c_x[] = {
999 		[1] = {0, 0, 0},
1000 	};
1001 	enum mlx5_modification_field reg_type;
1002 	int ret;
1003 
1004 	ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1005 	if (ret < 0)
1006 		return ret;
1007 	MLX5_ASSERT(ret != REG_NON);
1008 	MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1009 	reg_type = reg_to_field[ret];
1010 	MLX5_ASSERT(reg_type > 0);
1011 	reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1012 	return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1013 					     MLX5_MODIFICATION_TYPE_SET, error);
1014 }
1015 
1016 /**
1017  * Convert internal COPY_REG action to DV specification.
1018  *
1019  * @param[in] dev
1020  *   Pointer to the rte_eth_dev structure.
1021  * @param[in,out] res
1022  *   Pointer to the modify-header resource.
1023  * @param[in] action
1024  *   Pointer to action specification.
1025  * @param[out] error
1026  *   Pointer to the error structure.
1027  *
1028  * @return
1029  *   0 on success, a negative errno value otherwise and rte_errno is set.
1030  */
1031 static int
flow_dv_convert_action_copy_mreg(struct rte_eth_dev * dev,struct mlx5_flow_dv_modify_hdr_resource * res,const struct rte_flow_action * action,struct rte_flow_error * error)1032 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1033 				 struct mlx5_flow_dv_modify_hdr_resource *res,
1034 				 const struct rte_flow_action *action,
1035 				 struct rte_flow_error *error)
1036 {
1037 	const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1038 	rte_be32_t mask = RTE_BE32(UINT32_MAX);
1039 	struct rte_flow_item item = {
1040 		.spec = NULL,
1041 		.mask = &mask,
1042 	};
1043 	struct field_modify_info reg_src[] = {
1044 		{4, 0, reg_to_field[conf->src]},
1045 		{0, 0, 0},
1046 	};
1047 	struct field_modify_info reg_dst = {
1048 		.offset = 0,
1049 		.id = reg_to_field[conf->dst],
1050 	};
1051 	/* Adjust reg_c[0] usage according to reported mask. */
1052 	if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1053 		struct mlx5_priv *priv = dev->data->dev_private;
1054 		uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1055 
1056 		MLX5_ASSERT(reg_c0);
1057 		MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1058 		if (conf->dst == REG_C_0) {
1059 			/* Copy to reg_c[0], within mask only. */
1060 			reg_dst.offset = rte_bsf32(reg_c0);
1061 			/*
1062 			 * Mask is ignoring the enianness, because
1063 			 * there is no conversion in datapath.
1064 			 */
1065 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1066 			/* Copy from destination lower bits to reg_c[0]. */
1067 			mask = reg_c0 >> reg_dst.offset;
1068 #else
1069 			/* Copy from destination upper bits to reg_c[0]. */
1070 			mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1071 					  rte_fls_u32(reg_c0));
1072 #endif
1073 		} else {
1074 			mask = rte_cpu_to_be_32(reg_c0);
1075 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1076 			/* Copy from reg_c[0] to destination lower bits. */
1077 			reg_dst.offset = 0;
1078 #else
1079 			/* Copy from reg_c[0] to destination upper bits. */
1080 			reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1081 					 (rte_fls_u32(reg_c0) -
1082 					  rte_bsf32(reg_c0));
1083 #endif
1084 		}
1085 	}
1086 	return flow_dv_convert_modify_action(&item,
1087 					     reg_src, &reg_dst, res,
1088 					     MLX5_MODIFICATION_TYPE_COPY,
1089 					     error);
1090 }
1091 
1092 /**
1093  * Convert MARK action to DV specification. This routine is used
1094  * in extensive metadata only and requires metadata register to be
1095  * handled. In legacy mode hardware tag resource is engaged.
1096  *
1097  * @param[in] dev
1098  *   Pointer to the rte_eth_dev structure.
1099  * @param[in] conf
1100  *   Pointer to MARK action specification.
1101  * @param[in,out] resource
1102  *   Pointer to the modify-header resource.
1103  * @param[out] error
1104  *   Pointer to the error structure.
1105  *
1106  * @return
1107  *   0 on success, a negative errno value otherwise and rte_errno is set.
1108  */
1109 static int
flow_dv_convert_action_mark(struct rte_eth_dev * dev,const struct rte_flow_action_mark * conf,struct mlx5_flow_dv_modify_hdr_resource * resource,struct rte_flow_error * error)1110 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1111 			    const struct rte_flow_action_mark *conf,
1112 			    struct mlx5_flow_dv_modify_hdr_resource *resource,
1113 			    struct rte_flow_error *error)
1114 {
1115 	struct mlx5_priv *priv = dev->data->dev_private;
1116 	rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1117 					   priv->sh->dv_mark_mask);
1118 	rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1119 	struct rte_flow_item item = {
1120 		.spec = &data,
1121 		.mask = &mask,
1122 	};
1123 	struct field_modify_info reg_c_x[] = {
1124 		[1] = {0, 0, 0},
1125 	};
1126 	int reg;
1127 
1128 	if (!mask)
1129 		return rte_flow_error_set(error, EINVAL,
1130 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1131 					  NULL, "zero mark action mask");
1132 	reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1133 	if (reg < 0)
1134 		return reg;
1135 	MLX5_ASSERT(reg > 0);
1136 	if (reg == REG_C_0) {
1137 		uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1138 		uint32_t shl_c0 = rte_bsf32(msk_c0);
1139 
1140 		data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1141 		mask = rte_cpu_to_be_32(mask) & msk_c0;
1142 		mask = rte_cpu_to_be_32(mask << shl_c0);
1143 	}
1144 	reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1145 	return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1146 					     MLX5_MODIFICATION_TYPE_SET, error);
1147 }
1148 
1149 /**
1150  * Get metadata register index for specified steering domain.
1151  *
1152  * @param[in] dev
1153  *   Pointer to the rte_eth_dev structure.
1154  * @param[in] attr
1155  *   Attributes of flow to determine steering domain.
1156  * @param[out] error
1157  *   Pointer to the error structure.
1158  *
1159  * @return
1160  *   positive index on success, a negative errno value otherwise
1161  *   and rte_errno is set.
1162  */
1163 static enum modify_reg
flow_dv_get_metadata_reg(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,struct rte_flow_error * error)1164 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1165 			 const struct rte_flow_attr *attr,
1166 			 struct rte_flow_error *error)
1167 {
1168 	int reg =
1169 		mlx5_flow_get_reg_id(dev, attr->transfer ?
1170 					  MLX5_METADATA_FDB :
1171 					    attr->egress ?
1172 					    MLX5_METADATA_TX :
1173 					    MLX5_METADATA_RX, 0, error);
1174 	if (reg < 0)
1175 		return rte_flow_error_set(error,
1176 					  ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1177 					  NULL, "unavailable "
1178 					  "metadata register");
1179 	return reg;
1180 }
1181 
1182 /**
1183  * Convert SET_META action to DV specification.
1184  *
1185  * @param[in] dev
1186  *   Pointer to the rte_eth_dev structure.
1187  * @param[in,out] resource
1188  *   Pointer to the modify-header resource.
1189  * @param[in] attr
1190  *   Attributes of flow that includes this item.
1191  * @param[in] conf
1192  *   Pointer to action specification.
1193  * @param[out] error
1194  *   Pointer to the error structure.
1195  *
1196  * @return
1197  *   0 on success, a negative errno value otherwise and rte_errno is set.
1198  */
1199 static int
flow_dv_convert_action_set_meta(struct rte_eth_dev * dev,struct mlx5_flow_dv_modify_hdr_resource * resource,const struct rte_flow_attr * attr,const struct rte_flow_action_set_meta * conf,struct rte_flow_error * error)1200 flow_dv_convert_action_set_meta
1201 			(struct rte_eth_dev *dev,
1202 			 struct mlx5_flow_dv_modify_hdr_resource *resource,
1203 			 const struct rte_flow_attr *attr,
1204 			 const struct rte_flow_action_set_meta *conf,
1205 			 struct rte_flow_error *error)
1206 {
1207 	uint32_t data = conf->data;
1208 	uint32_t mask = conf->mask;
1209 	struct rte_flow_item item = {
1210 		.spec = &data,
1211 		.mask = &mask,
1212 	};
1213 	struct field_modify_info reg_c_x[] = {
1214 		[1] = {0, 0, 0},
1215 	};
1216 	int reg = flow_dv_get_metadata_reg(dev, attr, error);
1217 
1218 	if (reg < 0)
1219 		return reg;
1220 	MLX5_ASSERT(reg != REG_NON);
1221 	/*
1222 	 * In datapath code there is no endianness
1223 	 * coversions for perfromance reasons, all
1224 	 * pattern conversions are done in rte_flow.
1225 	 */
1226 	if (reg == REG_C_0) {
1227 		struct mlx5_priv *priv = dev->data->dev_private;
1228 		uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1229 		uint32_t shl_c0;
1230 
1231 		MLX5_ASSERT(msk_c0);
1232 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1233 		shl_c0 = rte_bsf32(msk_c0);
1234 #else
1235 		shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1236 #endif
1237 		mask <<= shl_c0;
1238 		data <<= shl_c0;
1239 		MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1240 	}
1241 	reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1242 	/* The routine expects parameters in memory as big-endian ones. */
1243 	return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1244 					     MLX5_MODIFICATION_TYPE_SET, error);
1245 }
1246 
1247 /**
1248  * Convert modify-header set IPv4 DSCP action to DV specification.
1249  *
1250  * @param[in,out] resource
1251  *   Pointer to the modify-header resource.
1252  * @param[in] action
1253  *   Pointer to action specification.
1254  * @param[out] error
1255  *   Pointer to the error structure.
1256  *
1257  * @return
1258  *   0 on success, a negative errno value otherwise and rte_errno is set.
1259  */
1260 static int
flow_dv_convert_action_modify_ipv4_dscp(struct mlx5_flow_dv_modify_hdr_resource * resource,const struct rte_flow_action * action,struct rte_flow_error * error)1261 flow_dv_convert_action_modify_ipv4_dscp
1262 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
1263 			 const struct rte_flow_action *action,
1264 			 struct rte_flow_error *error)
1265 {
1266 	const struct rte_flow_action_set_dscp *conf =
1267 		(const struct rte_flow_action_set_dscp *)(action->conf);
1268 	struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1269 	struct rte_flow_item_ipv4 ipv4;
1270 	struct rte_flow_item_ipv4 ipv4_mask;
1271 
1272 	memset(&ipv4, 0, sizeof(ipv4));
1273 	memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1274 	ipv4.hdr.type_of_service = conf->dscp;
1275 	ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1276 	item.spec = &ipv4;
1277 	item.mask = &ipv4_mask;
1278 	return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1279 					     MLX5_MODIFICATION_TYPE_SET, error);
1280 }
1281 
1282 /**
1283  * Convert modify-header set IPv6 DSCP action to DV specification.
1284  *
1285  * @param[in,out] resource
1286  *   Pointer to the modify-header resource.
1287  * @param[in] action
1288  *   Pointer to action specification.
1289  * @param[out] error
1290  *   Pointer to the error structure.
1291  *
1292  * @return
1293  *   0 on success, a negative errno value otherwise and rte_errno is set.
1294  */
1295 static int
flow_dv_convert_action_modify_ipv6_dscp(struct mlx5_flow_dv_modify_hdr_resource * resource,const struct rte_flow_action * action,struct rte_flow_error * error)1296 flow_dv_convert_action_modify_ipv6_dscp
1297 			(struct mlx5_flow_dv_modify_hdr_resource *resource,
1298 			 const struct rte_flow_action *action,
1299 			 struct rte_flow_error *error)
1300 {
1301 	const struct rte_flow_action_set_dscp *conf =
1302 		(const struct rte_flow_action_set_dscp *)(action->conf);
1303 	struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1304 	struct rte_flow_item_ipv6 ipv6;
1305 	struct rte_flow_item_ipv6 ipv6_mask;
1306 
1307 	memset(&ipv6, 0, sizeof(ipv6));
1308 	memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1309 	/*
1310 	 * Even though the DSCP bits offset of IPv6 is not byte aligned,
1311 	 * rdma-core only accept the DSCP bits byte aligned start from
1312 	 * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1313 	 * bits in IPv6 case as rdma-core requires byte aligned value.
1314 	 */
1315 	ipv6.hdr.vtc_flow = conf->dscp;
1316 	ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1317 	item.spec = &ipv6;
1318 	item.mask = &ipv6_mask;
1319 	return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1320 					     MLX5_MODIFICATION_TYPE_SET, error);
1321 }
1322 
1323 /**
1324  * Validate MARK item.
1325  *
1326  * @param[in] dev
1327  *   Pointer to the rte_eth_dev structure.
1328  * @param[in] item
1329  *   Item specification.
1330  * @param[in] attr
1331  *   Attributes of flow that includes this item.
1332  * @param[out] error
1333  *   Pointer to error structure.
1334  *
1335  * @return
1336  *   0 on success, a negative errno value otherwise and rte_errno is set.
1337  */
1338 static int
flow_dv_validate_item_mark(struct rte_eth_dev * dev,const struct rte_flow_item * item,const struct rte_flow_attr * attr __rte_unused,struct rte_flow_error * error)1339 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1340 			   const struct rte_flow_item *item,
1341 			   const struct rte_flow_attr *attr __rte_unused,
1342 			   struct rte_flow_error *error)
1343 {
1344 	struct mlx5_priv *priv = dev->data->dev_private;
1345 	struct mlx5_dev_config *config = &priv->config;
1346 	const struct rte_flow_item_mark *spec = item->spec;
1347 	const struct rte_flow_item_mark *mask = item->mask;
1348 	const struct rte_flow_item_mark nic_mask = {
1349 		.id = priv->sh->dv_mark_mask,
1350 	};
1351 	int ret;
1352 
1353 	if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1354 		return rte_flow_error_set(error, ENOTSUP,
1355 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1356 					  "extended metadata feature"
1357 					  " isn't enabled");
1358 	if (!mlx5_flow_ext_mreg_supported(dev))
1359 		return rte_flow_error_set(error, ENOTSUP,
1360 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1361 					  "extended metadata register"
1362 					  " isn't supported");
1363 	if (!nic_mask.id)
1364 		return rte_flow_error_set(error, ENOTSUP,
1365 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1366 					  "extended metadata register"
1367 					  " isn't available");
1368 	ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1369 	if (ret < 0)
1370 		return ret;
1371 	if (!spec)
1372 		return rte_flow_error_set(error, EINVAL,
1373 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1374 					  item->spec,
1375 					  "data cannot be empty");
1376 	if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1377 		return rte_flow_error_set(error, EINVAL,
1378 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1379 					  &spec->id,
1380 					  "mark id exceeds the limit");
1381 	if (!mask)
1382 		mask = &nic_mask;
1383 	if (!mask->id)
1384 		return rte_flow_error_set(error, EINVAL,
1385 					RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1386 					"mask cannot be zero");
1387 
1388 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1389 					(const uint8_t *)&nic_mask,
1390 					sizeof(struct rte_flow_item_mark),
1391 					MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1392 	if (ret < 0)
1393 		return ret;
1394 	return 0;
1395 }
1396 
1397 /**
1398  * Validate META item.
1399  *
1400  * @param[in] dev
1401  *   Pointer to the rte_eth_dev structure.
1402  * @param[in] item
1403  *   Item specification.
1404  * @param[in] attr
1405  *   Attributes of flow that includes this item.
1406  * @param[out] error
1407  *   Pointer to error structure.
1408  *
1409  * @return
1410  *   0 on success, a negative errno value otherwise and rte_errno is set.
1411  */
1412 static int
flow_dv_validate_item_meta(struct rte_eth_dev * dev __rte_unused,const struct rte_flow_item * item,const struct rte_flow_attr * attr,struct rte_flow_error * error)1413 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1414 			   const struct rte_flow_item *item,
1415 			   const struct rte_flow_attr *attr,
1416 			   struct rte_flow_error *error)
1417 {
1418 	struct mlx5_priv *priv = dev->data->dev_private;
1419 	struct mlx5_dev_config *config = &priv->config;
1420 	const struct rte_flow_item_meta *spec = item->spec;
1421 	const struct rte_flow_item_meta *mask = item->mask;
1422 	struct rte_flow_item_meta nic_mask = {
1423 		.data = UINT32_MAX
1424 	};
1425 	int reg;
1426 	int ret;
1427 
1428 	if (!spec)
1429 		return rte_flow_error_set(error, EINVAL,
1430 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1431 					  item->spec,
1432 					  "data cannot be empty");
1433 	if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1434 		if (!mlx5_flow_ext_mreg_supported(dev))
1435 			return rte_flow_error_set(error, ENOTSUP,
1436 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1437 					  "extended metadata register"
1438 					  " isn't supported");
1439 		reg = flow_dv_get_metadata_reg(dev, attr, error);
1440 		if (reg < 0)
1441 			return reg;
1442 		if (reg == REG_NON)
1443 			return rte_flow_error_set(error, ENOTSUP,
1444 					RTE_FLOW_ERROR_TYPE_ITEM, item,
1445 					"unavalable extended metadata register");
1446 		if (reg == REG_B)
1447 			return rte_flow_error_set(error, ENOTSUP,
1448 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1449 					  "match on reg_b "
1450 					  "isn't supported");
1451 		if (reg != REG_A)
1452 			nic_mask.data = priv->sh->dv_meta_mask;
1453 	} else if (attr->transfer) {
1454 		return rte_flow_error_set(error, ENOTSUP,
1455 					RTE_FLOW_ERROR_TYPE_ITEM, item,
1456 					"extended metadata feature "
1457 					"should be enabled when "
1458 					"meta item is requested "
1459 					"with e-switch mode ");
1460 	}
1461 	if (!mask)
1462 		mask = &rte_flow_item_meta_mask;
1463 	if (!mask->data)
1464 		return rte_flow_error_set(error, EINVAL,
1465 					RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1466 					"mask cannot be zero");
1467 
1468 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1469 					(const uint8_t *)&nic_mask,
1470 					sizeof(struct rte_flow_item_meta),
1471 					MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1472 	return ret;
1473 }
1474 
1475 /**
1476  * Validate TAG item.
1477  *
1478  * @param[in] dev
1479  *   Pointer to the rte_eth_dev structure.
1480  * @param[in] item
1481  *   Item specification.
1482  * @param[in] attr
1483  *   Attributes of flow that includes this item.
1484  * @param[out] error
1485  *   Pointer to error structure.
1486  *
1487  * @return
1488  *   0 on success, a negative errno value otherwise and rte_errno is set.
1489  */
1490 static int
flow_dv_validate_item_tag(struct rte_eth_dev * dev,const struct rte_flow_item * item,const struct rte_flow_attr * attr __rte_unused,struct rte_flow_error * error)1491 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1492 			  const struct rte_flow_item *item,
1493 			  const struct rte_flow_attr *attr __rte_unused,
1494 			  struct rte_flow_error *error)
1495 {
1496 	const struct rte_flow_item_tag *spec = item->spec;
1497 	const struct rte_flow_item_tag *mask = item->mask;
1498 	const struct rte_flow_item_tag nic_mask = {
1499 		.data = RTE_BE32(UINT32_MAX),
1500 		.index = 0xff,
1501 	};
1502 	int ret;
1503 
1504 	if (!mlx5_flow_ext_mreg_supported(dev))
1505 		return rte_flow_error_set(error, ENOTSUP,
1506 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1507 					  "extensive metadata register"
1508 					  " isn't supported");
1509 	if (!spec)
1510 		return rte_flow_error_set(error, EINVAL,
1511 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1512 					  item->spec,
1513 					  "data cannot be empty");
1514 	if (!mask)
1515 		mask = &rte_flow_item_tag_mask;
1516 	if (!mask->data)
1517 		return rte_flow_error_set(error, EINVAL,
1518 					RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1519 					"mask cannot be zero");
1520 
1521 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1522 					(const uint8_t *)&nic_mask,
1523 					sizeof(struct rte_flow_item_tag),
1524 					MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1525 	if (ret < 0)
1526 		return ret;
1527 	if (mask->index != 0xff)
1528 		return rte_flow_error_set(error, EINVAL,
1529 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1530 					  "partial mask for tag index"
1531 					  " is not supported");
1532 	ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1533 	if (ret < 0)
1534 		return ret;
1535 	MLX5_ASSERT(ret != REG_NON);
1536 	return 0;
1537 }
1538 
1539 /**
1540  * Validate vport item.
1541  *
1542  * @param[in] dev
1543  *   Pointer to the rte_eth_dev structure.
1544  * @param[in] item
1545  *   Item specification.
1546  * @param[in] attr
1547  *   Attributes of flow that includes this item.
1548  * @param[in] item_flags
1549  *   Bit-fields that holds the items detected until now.
1550  * @param[out] error
1551  *   Pointer to error structure.
1552  *
1553  * @return
1554  *   0 on success, a negative errno value otherwise and rte_errno is set.
1555  */
1556 static int
flow_dv_validate_item_port_id(struct rte_eth_dev * dev,const struct rte_flow_item * item,const struct rte_flow_attr * attr,uint64_t item_flags,struct rte_flow_error * error)1557 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1558 			      const struct rte_flow_item *item,
1559 			      const struct rte_flow_attr *attr,
1560 			      uint64_t item_flags,
1561 			      struct rte_flow_error *error)
1562 {
1563 	const struct rte_flow_item_port_id *spec = item->spec;
1564 	const struct rte_flow_item_port_id *mask = item->mask;
1565 	const struct rte_flow_item_port_id switch_mask = {
1566 			.id = 0xffffffff,
1567 	};
1568 	struct mlx5_priv *esw_priv;
1569 	struct mlx5_priv *dev_priv;
1570 	int ret;
1571 
1572 	if (!attr->transfer)
1573 		return rte_flow_error_set(error, EINVAL,
1574 					  RTE_FLOW_ERROR_TYPE_ITEM,
1575 					  NULL,
1576 					  "match on port id is valid only"
1577 					  " when transfer flag is enabled");
1578 	if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1579 		return rte_flow_error_set(error, ENOTSUP,
1580 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1581 					  "multiple source ports are not"
1582 					  " supported");
1583 	if (!mask)
1584 		mask = &switch_mask;
1585 	if (mask->id != 0xffffffff)
1586 		return rte_flow_error_set(error, ENOTSUP,
1587 					   RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1588 					   mask,
1589 					   "no support for partial mask on"
1590 					   " \"id\" field");
1591 	ret = mlx5_flow_item_acceptable
1592 				(item, (const uint8_t *)mask,
1593 				 (const uint8_t *)&rte_flow_item_port_id_mask,
1594 				 sizeof(struct rte_flow_item_port_id),
1595 				 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1596 	if (ret)
1597 		return ret;
1598 	if (!spec)
1599 		return 0;
1600 	esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1601 	if (!esw_priv)
1602 		return rte_flow_error_set(error, rte_errno,
1603 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1604 					  "failed to obtain E-Switch info for"
1605 					  " port");
1606 	dev_priv = mlx5_dev_to_eswitch_info(dev);
1607 	if (!dev_priv)
1608 		return rte_flow_error_set(error, rte_errno,
1609 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1610 					  NULL,
1611 					  "failed to obtain E-Switch info");
1612 	if (esw_priv->domain_id != dev_priv->domain_id)
1613 		return rte_flow_error_set(error, EINVAL,
1614 					  RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1615 					  "cannot match on a port from a"
1616 					  " different E-Switch");
1617 	return 0;
1618 }
1619 
1620 /**
1621  * Validate VLAN item.
1622  *
1623  * @param[in] item
1624  *   Item specification.
1625  * @param[in] item_flags
1626  *   Bit-fields that holds the items detected until now.
1627  * @param[in] dev
1628  *   Ethernet device flow is being created on.
1629  * @param[out] error
1630  *   Pointer to error structure.
1631  *
1632  * @return
1633  *   0 on success, a negative errno value otherwise and rte_errno is set.
1634  */
1635 static int
flow_dv_validate_item_vlan(const struct rte_flow_item * item,uint64_t item_flags,struct rte_eth_dev * dev,struct rte_flow_error * error)1636 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
1637 			   uint64_t item_flags,
1638 			   struct rte_eth_dev *dev,
1639 			   struct rte_flow_error *error)
1640 {
1641 	const struct rte_flow_item_vlan *mask = item->mask;
1642 	const struct rte_flow_item_vlan nic_mask = {
1643 		.tci = RTE_BE16(UINT16_MAX),
1644 		.inner_type = RTE_BE16(UINT16_MAX),
1645 		.has_more_vlan = 1,
1646 	};
1647 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1648 	int ret;
1649 	const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1650 					MLX5_FLOW_LAYER_INNER_L4) :
1651 				       (MLX5_FLOW_LAYER_OUTER_L3 |
1652 					MLX5_FLOW_LAYER_OUTER_L4);
1653 	const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1654 					MLX5_FLOW_LAYER_OUTER_VLAN;
1655 
1656 	if (item_flags & vlanm)
1657 		return rte_flow_error_set(error, EINVAL,
1658 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1659 					  "multiple VLAN layers not supported");
1660 	else if ((item_flags & l34m) != 0)
1661 		return rte_flow_error_set(error, EINVAL,
1662 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1663 					  "VLAN cannot follow L3/L4 layer");
1664 	if (!mask)
1665 		mask = &rte_flow_item_vlan_mask;
1666 	ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1667 					(const uint8_t *)&nic_mask,
1668 					sizeof(struct rte_flow_item_vlan),
1669 					MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1670 	if (ret)
1671 		return ret;
1672 	if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1673 		struct mlx5_priv *priv = dev->data->dev_private;
1674 
1675 		if (priv->vmwa_context) {
1676 			/*
1677 			 * Non-NULL context means we have a virtual machine
1678 			 * and SR-IOV enabled, we have to create VLAN interface
1679 			 * to make hypervisor to setup E-Switch vport
1680 			 * context correctly. We avoid creating the multiple
1681 			 * VLAN interfaces, so we cannot support VLAN tag mask.
1682 			 */
1683 			return rte_flow_error_set(error, EINVAL,
1684 						  RTE_FLOW_ERROR_TYPE_ITEM,
1685 						  item,
1686 						  "VLAN tag mask is not"
1687 						  " supported in virtual"
1688 						  " environment");
1689 		}
1690 	}
1691 	return 0;
1692 }
1693 
1694 /*
1695  * GTP flags are contained in 1 byte of the format:
1696  * -------------------------------------------
1697  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
1698  * |-----------------------------------------|
1699  * | value | Version | PT | Res | E | S | PN |
1700  * -------------------------------------------
1701  *
1702  * Matching is supported only for GTP flags E, S, PN.
1703  */
1704 #define MLX5_GTP_FLAGS_MASK	0x07
1705 
1706 /**
1707  * Validate GTP item.
1708  *
1709  * @param[in] dev
1710  *   Pointer to the rte_eth_dev structure.
1711  * @param[in] item
1712  *   Item specification.
1713  * @param[in] item_flags
1714  *   Bit-fields that holds the items detected until now.
1715  * @param[out] error
1716  *   Pointer to error structure.
1717  *
1718  * @return
1719  *   0 on success, a negative errno value otherwise and rte_errno is set.
1720  */
1721 static int
flow_dv_validate_item_gtp(struct rte_eth_dev * dev,const struct rte_flow_item * item,uint64_t item_flags,struct rte_flow_error * error)1722 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1723 			  const struct rte_flow_item *item,
1724 			  uint64_t item_flags,
1725 			  struct rte_flow_error *error)
1726 {
1727 	struct mlx5_priv *priv = dev->data->dev_private;
1728 	const struct rte_flow_item_gtp *spec = item->spec;
1729 	const struct rte_flow_item_gtp *mask = item->mask;
1730 	const struct rte_flow_item_gtp nic_mask = {
1731 		.v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
1732 		.msg_type = 0xff,
1733 		.teid = RTE_BE32(0xffffffff),
1734 	};
1735 
1736 	if (!priv->config.hca_attr.tunnel_stateless_gtp)
1737 		return rte_flow_error_set(error, ENOTSUP,
1738 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1739 					  "GTP support is not enabled");
1740 	if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1741 		return rte_flow_error_set(error, ENOTSUP,
1742 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1743 					  "multiple tunnel layers not"
1744 					  " supported");
1745 	if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1746 		return rte_flow_error_set(error, EINVAL,
1747 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1748 					  "no outer UDP layer found");
1749 	if (!mask)
1750 		mask = &rte_flow_item_gtp_mask;
1751 	if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
1752 		return rte_flow_error_set(error, ENOTSUP,
1753 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1754 					  "Match is supported for GTP"
1755 					  " flags only");
1756 	return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1757 					 (const uint8_t *)&nic_mask,
1758 					 sizeof(struct rte_flow_item_gtp),
1759 					 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1760 }
1761 
1762 /**
1763  * Validate IPV4 item.
1764  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
1765  * add specific validation of fragment_offset field,
1766  *
1767  * @param[in] item
1768  *   Item specification.
1769  * @param[in] item_flags
1770  *   Bit-fields that holds the items detected until now.
1771  * @param[out] error
1772  *   Pointer to error structure.
1773  *
1774  * @return
1775  *   0 on success, a negative errno value otherwise and rte_errno is set.
1776  */
1777 static int
flow_dv_validate_item_ipv4(const struct rte_flow_item * item,uint64_t item_flags,uint64_t last_item,uint16_t ether_type,struct rte_flow_error * error)1778 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
1779 			   uint64_t item_flags,
1780 			   uint64_t last_item,
1781 			   uint16_t ether_type,
1782 			   struct rte_flow_error *error)
1783 {
1784 	int ret;
1785 	const struct rte_flow_item_ipv4 *spec = item->spec;
1786 	const struct rte_flow_item_ipv4 *last = item->last;
1787 	const struct rte_flow_item_ipv4 *mask = item->mask;
1788 	rte_be16_t fragment_offset_spec = 0;
1789 	rte_be16_t fragment_offset_last = 0;
1790 	const struct rte_flow_item_ipv4 nic_ipv4_mask = {
1791 		.hdr = {
1792 			.src_addr = RTE_BE32(0xffffffff),
1793 			.dst_addr = RTE_BE32(0xffffffff),
1794 			.type_of_service = 0xff,
1795 			.fragment_offset = RTE_BE16(0xffff),
1796 			.next_proto_id = 0xff,
1797 			.time_to_live = 0xff,
1798 		},
1799 	};
1800 
1801 	ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
1802 					   ether_type, &nic_ipv4_mask,
1803 					   MLX5_ITEM_RANGE_ACCEPTED, error);
1804 	if (ret < 0)
1805 		return ret;
1806 	if (spec && mask)
1807 		fragment_offset_spec = spec->hdr.fragment_offset &
1808 				       mask->hdr.fragment_offset;
1809 	if (!fragment_offset_spec)
1810 		return 0;
1811 	/*
1812 	 * spec and mask are valid, enforce using full mask to make sure the
1813 	 * complete value is used correctly.
1814 	 */
1815 	if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1816 			!= RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1817 		return rte_flow_error_set(error, EINVAL,
1818 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1819 					  item, "must use full mask for"
1820 					  " fragment_offset");
1821 	/*
1822 	 * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
1823 	 * indicating this is 1st fragment of fragmented packet.
1824 	 * This is not yet supported in MLX5, return appropriate error message.
1825 	 */
1826 	if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
1827 		return rte_flow_error_set(error, ENOTSUP,
1828 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1829 					  "match on first fragment not "
1830 					  "supported");
1831 	if (fragment_offset_spec && !last)
1832 		return rte_flow_error_set(error, ENOTSUP,
1833 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1834 					  "specified value not supported");
1835 	/* spec and last are valid, validate the specified range. */
1836 	fragment_offset_last = last->hdr.fragment_offset &
1837 			       mask->hdr.fragment_offset;
1838 	/*
1839 	 * Match on fragment_offset spec 0x2001 and last 0x3fff
1840 	 * means MF is 1 and frag-offset is > 0.
1841 	 * This packet is fragment 2nd and onward, excluding last.
1842 	 * This is not yet supported in MLX5, return appropriate
1843 	 * error message.
1844 	 */
1845 	if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
1846 	    fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1847 		return rte_flow_error_set(error, ENOTSUP,
1848 					  RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1849 					  last, "match on following "
1850 					  "fragments not supported");
1851 	/*
1852 	 * Match on fragment_offset spec 0x0001 and last 0x1fff
1853 	 * means MF is 0 and frag-offset is > 0.
1854 	 * This packet is last fragment of fragmented packet.
1855 	 * This is not yet supported in MLX5, return appropriate
1856 	 * error message.
1857 	 */
1858 	if (fragment_offset_spec == RTE_BE16(1) &&
1859 	    fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
1860 		return rte_flow_error_set(error, ENOTSUP,
1861 					  RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1862 					  last, "match on last "
1863 					  "fragment not supported");
1864 	/*
1865 	 * Match on fragment_offset spec 0x0001 and last 0x3fff
1866 	 * means MF and/or frag-offset is not 0.
1867 	 * This is a fragmented packet.
1868 	 * Other range values are invalid and rejected.
1869 	 */
1870 	if (!(fragment_offset_spec == RTE_BE16(1) &&
1871 	      fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
1872 		return rte_flow_error_set(error, ENOTSUP,
1873 					  RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
1874 					  "specified range not supported");
1875 	return 0;
1876 }
1877 
1878 /**
1879  * Validate IPV6 fragment extension item.
1880  *
1881  * @param[in] item
1882  *   Item specification.
1883  * @param[in] item_flags
1884  *   Bit-fields that holds the items detected until now.
1885  * @param[out] error
1886  *   Pointer to error structure.
1887  *
1888  * @return
1889  *   0 on success, a negative errno value otherwise and rte_errno is set.
1890  */
1891 static int
flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item * item,uint64_t item_flags,struct rte_flow_error * error)1892 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
1893 				    uint64_t item_flags,
1894 				    struct rte_flow_error *error)
1895 {
1896 	const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
1897 	const struct rte_flow_item_ipv6_frag_ext *last = item->last;
1898 	const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
1899 	rte_be16_t frag_data_spec = 0;
1900 	rte_be16_t frag_data_last = 0;
1901 	const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1902 	const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1903 				      MLX5_FLOW_LAYER_OUTER_L4;
1904 	int ret = 0;
1905 	struct rte_flow_item_ipv6_frag_ext nic_mask = {
1906 		.hdr = {
1907 			.next_header = 0xff,
1908 			.frag_data = RTE_BE16(0xffff),
1909 		},
1910 	};
1911 
1912 	if (item_flags & l4m)
1913 		return rte_flow_error_set(error, EINVAL,
1914 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1915 					  "ipv6 fragment extension item cannot "
1916 					  "follow L4 item.");
1917 	if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
1918 	    (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
1919 		return rte_flow_error_set(error, EINVAL,
1920 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1921 					  "ipv6 fragment extension item must "
1922 					  "follow ipv6 item");
1923 	if (spec && mask)
1924 		frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
1925 	if (!frag_data_spec)
1926 		return 0;
1927 	/*
1928 	 * spec and mask are valid, enforce using full mask to make sure the
1929 	 * complete value is used correctly.
1930 	 */
1931 	if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
1932 				RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
1933 		return rte_flow_error_set(error, EINVAL,
1934 					  RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1935 					  item, "must use full mask for"
1936 					  " frag_data");
1937 	/*
1938 	 * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
1939 	 * This is 1st fragment of fragmented packet.
1940 	 */
1941 	if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
1942 		return rte_flow_error_set(error, ENOTSUP,
1943 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1944 					  "match on first fragment not "
1945 					  "supported");
1946 	if (frag_data_spec && !last)
1947 		return rte_flow_error_set(error, EINVAL,
1948 					  RTE_FLOW_ERROR_TYPE_ITEM, item,
1949 					  "specified value not supported");
1950 	ret = mlx5_flow_item_acceptable
1951 				(item, (const uint8_t *)mask,
1952 				 (const uint8_t *)&nic_mask,
1953 				 sizeof(struct rte_flow_item_ipv6_frag_ext),
1954 				 MLX5_ITEM_RANGE_ACCEPTED, error);
1955 	if (ret)
1956 		return ret;
1957 	/* spec and last are valid, validate the specified range. */
1958 	frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
1959 	/*
1960 	 * Match on frag_data spec 0x0009 and last 0xfff9
1961 	 * means M is 1 and frag-offset is > 0.
1962 	 * This packet is fragment 2nd and onward, excluding last.
1963 	 * This is not yet supported in MLX5, return appropriate
1964 	 * error message.
1965 	 */
1966 	if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
1967 				       RTE_IPV6_EHDR_MF_MASK) &&
1968 	    frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
1969 		return rte_flow_error_set(error, ENOTSUP,
1970 					  RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1971 					  last, "match on following "
1972 					  "fragments not supported");
1973 	/*
1974 	 * Match on frag_data spec 0x0008 and last 0xfff8
1975 	 * means M is 0 and frag-offset is > 0.
1976 	 * This packet is last fragment of fragmented packet.
1977 	 * This is not yet supported in MLX5, return appropriate
1978 	 * error message.
1979 	 */
1980 	if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
1981 	    frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
1982 		return rte_flow_error_set(error, ENOTSUP,
1983 					  RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1984 					  last, "match on last "
1985 					  "fragment not supported");
1986 	/* Other range values are invalid and rejected. */
1987 	return rte_flow_error_set(error, EINVAL,
1988 				  RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
1989 				  "specified range not supported");
1990 }
1991 
1992 /**
1993  * Validate the pop VLAN action.
1994  *
1995  * @param[in] dev
1996  *   Pointer to the rte_eth_dev structure.
1997  * @param[in] action_flags
1998  *   Holds the actions detected until now.
1999  * @param[in] action
2000  *   Pointer to the pop vlan action.
2001  * @param[in] item_flags
2002  *   The items found in this flow rule.
2003  * @param[in] attr
2004  *   Pointer to flow attributes.
2005  * @param[out] error
2006  *   Pointer to error structure.
2007  *
2008  * @return
2009  *   0 on success, a negative errno value otherwise and rte_errno is set.
2010  */
2011 static int
flow_dv_validate_action_pop_vlan(struct rte_eth_dev * dev,uint64_t action_flags,const struct rte_flow_action * action,uint64_t item_flags,const struct rte_flow_attr * attr,struct rte_flow_error * error)2012 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2013 				 uint64_t action_flags,
2014 				 const struct rte_flow_action *action,
2015 				 uint64_t item_flags,
2016 				 const struct rte_flow_attr *attr,
2017 				 struct rte_flow_error *error)
2018 {
2019 	const struct mlx5_priv *priv = dev->data->dev_private;
2020 
2021 	(void)action;
2022 	(void)attr;
2023 	if (!priv->sh->pop_vlan_action)
2024 		return rte_flow_error_set(error, ENOTSUP,
2025 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2026 					  NULL,
2027 					  "pop vlan action is not supported");
2028 	if (attr->egress)
2029 		return rte_flow_error_set(error, ENOTSUP,
2030 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2031 					  NULL,
2032 					  "pop vlan action not supported for "
2033 					  "egress");
2034 	if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2035 		return rte_flow_error_set(error, ENOTSUP,
2036 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2037 					  "no support for multiple VLAN "
2038 					  "actions");
2039 	/* Pop VLAN with preceding Decap requires inner header with VLAN. */
2040 	if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2041 	    !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2042 		return rte_flow_error_set(error, ENOTSUP,
2043 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2044 					  NULL,
2045 					  "cannot pop vlan after decap without "
2046 					  "match on inner vlan in the flow");
2047 	/* Pop VLAN without preceding Decap requires outer header with VLAN. */
2048 	if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2049 	    !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2050 		return rte_flow_error_set(error, ENOTSUP,
2051 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2052 					  NULL,
2053 					  "cannot pop vlan without a "
2054 					  "match on (outer) vlan in the flow");
2055 	if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2056 		return rte_flow_error_set(error, EINVAL,
2057 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2058 					  "wrong action order, port_id should "
2059 					  "be after pop VLAN action");
2060 	if (!attr->transfer && priv->representor)
2061 		return rte_flow_error_set(error, ENOTSUP,
2062 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2063 					  "pop vlan action for VF representor "
2064 					  "not supported on NIC table");
2065 	return 0;
2066 }
2067 
2068 /**
2069  * Get VLAN default info from vlan match info.
2070  *
2071  * @param[in] items
2072  *   the list of item specifications.
2073  * @param[out] vlan
2074  *   pointer VLAN info to fill to.
2075  *
2076  * @return
2077  *   0 on success, a negative errno value otherwise and rte_errno is set.
2078  */
2079 static void
flow_dev_get_vlan_info_from_items(const struct rte_flow_item * items,struct rte_vlan_hdr * vlan)2080 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2081 				  struct rte_vlan_hdr *vlan)
2082 {
2083 	const struct rte_flow_item_vlan nic_mask = {
2084 		.tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2085 				MLX5DV_FLOW_VLAN_VID_MASK),
2086 		.inner_type = RTE_BE16(0xffff),
2087 	};
2088 
2089 	if (items == NULL)
2090 		return;
2091 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2092 		int type = items->type;
2093 
2094 		if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2095 		    type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2096 			break;
2097 	}
2098 	if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2099 		const struct rte_flow_item_vlan *vlan_m = items->mask;
2100 		const struct rte_flow_item_vlan *vlan_v = items->spec;
2101 
2102 		/* If VLAN item in pattern doesn't contain data, return here. */
2103 		if (!vlan_v)
2104 			return;
2105 		if (!vlan_m)
2106 			vlan_m = &nic_mask;
2107 		/* Only full match values are accepted */
2108 		if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2109 		     MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2110 			vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2111 			vlan->vlan_tci |=
2112 				rte_be_to_cpu_16(vlan_v->tci &
2113 						 MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2114 		}
2115 		if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2116 		     MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2117 			vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2118 			vlan->vlan_tci |=
2119 				rte_be_to_cpu_16(vlan_v->tci &
2120 						 MLX5DV_FLOW_VLAN_VID_MASK_BE);
2121 		}
2122 		if (vlan_m->inner_type == nic_mask.inner_type)
2123 			vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2124 							   vlan_m->inner_type);
2125 	}
2126 }
2127 
2128 /**
2129  * Validate the push VLAN action.
2130  *
2131  * @param[in] dev
2132  *   Pointer to the rte_eth_dev structure.
2133  * @param[in] action_flags
2134  *   Holds the actions detected until now.
2135  * @param[in] item_flags
2136  *   The items found in this flow rule.
2137  * @param[in] action
2138  *   Pointer to the action structure.
2139  * @param[in] attr
2140  *   Pointer to flow attributes
2141  * @param[out] error
2142  *   Pointer to error structure.
2143  *
2144  * @return
2145  *   0 on success, a negative errno value otherwise and rte_errno is set.
2146  */
2147 static int
flow_dv_validate_action_push_vlan(struct rte_eth_dev * dev,uint64_t action_flags,const struct rte_flow_item_vlan * vlan_m,const struct rte_flow_action * action,const struct rte_flow_attr * attr,struct rte_flow_error * error)2148 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2149 				  uint64_t action_flags,
2150 				  const struct rte_flow_item_vlan *vlan_m,
2151 				  const struct rte_flow_action *action,
2152 				  const struct rte_flow_attr *attr,
2153 				  struct rte_flow_error *error)
2154 {
2155 	const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2156 	const struct mlx5_priv *priv = dev->data->dev_private;
2157 
2158 	if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2159 	    push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2160 		return rte_flow_error_set(error, EINVAL,
2161 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2162 					  "invalid vlan ethertype");
2163 	if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2164 		return rte_flow_error_set(error, EINVAL,
2165 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2166 					  "wrong action order, port_id should "
2167 					  "be after push VLAN");
2168 	if (!attr->transfer && priv->representor)
2169 		return rte_flow_error_set(error, ENOTSUP,
2170 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2171 					  "push vlan action for VF representor "
2172 					  "not supported on NIC table");
2173 	if (vlan_m &&
2174 	    (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2175 	    (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2176 		MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2177 	    !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2178 	    !(mlx5_flow_find_action
2179 		(action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2180 		return rte_flow_error_set(error, EINVAL,
2181 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2182 					  "not full match mask on VLAN PCP and "
2183 					  "there is no of_set_vlan_pcp action, "
2184 					  "push VLAN action cannot figure out "
2185 					  "PCP value");
2186 	if (vlan_m &&
2187 	    (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2188 	    (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2189 		MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2190 	    !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2191 	    !(mlx5_flow_find_action
2192 		(action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2193 		return rte_flow_error_set(error, EINVAL,
2194 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2195 					  "not full match mask on VLAN VID and "
2196 					  "there is no of_set_vlan_vid action, "
2197 					  "push VLAN action cannot figure out "
2198 					  "VID value");
2199 	(void)attr;
2200 	return 0;
2201 }
2202 
2203 /**
2204  * Validate the set VLAN PCP.
2205  *
2206  * @param[in] action_flags
2207  *   Holds the actions detected until now.
2208  * @param[in] actions
2209  *   Pointer to the list of actions remaining in the flow rule.
2210  * @param[out] error
2211  *   Pointer to error structure.
2212  *
2213  * @return
2214  *   0 on success, a negative errno value otherwise and rte_errno is set.
2215  */
2216 static int
flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,const struct rte_flow_action actions[],struct rte_flow_error * error)2217 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2218 				     const struct rte_flow_action actions[],
2219 				     struct rte_flow_error *error)
2220 {
2221 	const struct rte_flow_action *action = actions;
2222 	const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2223 
2224 	if (conf->vlan_pcp > 7)
2225 		return rte_flow_error_set(error, EINVAL,
2226 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2227 					  "VLAN PCP value is too big");
2228 	if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2229 		return rte_flow_error_set(error, ENOTSUP,
2230 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2231 					  "set VLAN PCP action must follow "
2232 					  "the push VLAN action");
2233 	if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2234 		return rte_flow_error_set(error, ENOTSUP,
2235 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2236 					  "Multiple VLAN PCP modification are "
2237 					  "not supported");
2238 	if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2239 		return rte_flow_error_set(error, EINVAL,
2240 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2241 					  "wrong action order, port_id should "
2242 					  "be after set VLAN PCP");
2243 	return 0;
2244 }
2245 
2246 /**
2247  * Validate the set VLAN VID.
2248  *
2249  * @param[in] item_flags
2250  *   Holds the items detected in this rule.
2251  * @param[in] action_flags
2252  *   Holds the actions detected until now.
2253  * @param[in] actions
2254  *   Pointer to the list of actions remaining in the flow rule.
2255  * @param[out] error
2256  *   Pointer to error structure.
2257  *
2258  * @return
2259  *   0 on success, a negative errno value otherwise and rte_errno is set.
2260  */
2261 static int
flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,uint64_t action_flags,const struct rte_flow_action actions[],struct rte_flow_error * error)2262 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2263 				     uint64_t action_flags,
2264 				     const struct rte_flow_action actions[],
2265 				     struct rte_flow_error *error)
2266 {
2267 	const struct rte_flow_action *action = actions;
2268 	const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2269 
2270 	if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2271 		return rte_flow_error_set(error, EINVAL,
2272 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2273 					  "VLAN VID value is too big");
2274 	if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2275 	    !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2276 		return rte_flow_error_set(error, ENOTSUP,
2277 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2278 					  "set VLAN VID action must follow push"
2279 					  " VLAN action or match on VLAN item");
2280 	if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2281 		return rte_flow_error_set(error, ENOTSUP,
2282 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2283 					  "Multiple VLAN VID modifications are "
2284 					  "not supported");
2285 	if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2286 		return rte_flow_error_set(error, EINVAL,
2287 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2288 					  "wrong action order, port_id should "
2289 					  "be after set VLAN VID");
2290 	return 0;
2291 }
2292 
2293 /*
2294  * Validate the FLAG action.
2295  *
2296  * @param[in] dev
2297  *   Pointer to the rte_eth_dev structure.
2298  * @param[in] action_flags
2299  *   Holds the actions detected until now.
2300  * @param[in] attr
2301  *   Pointer to flow attributes
2302  * @param[out] error
2303  *   Pointer to error structure.
2304  *
2305  * @return
2306  *   0 on success, a negative errno value otherwise and rte_errno is set.
2307  */
2308 static int
flow_dv_validate_action_flag(struct rte_eth_dev * dev,uint64_t action_flags,const struct rte_flow_attr * attr,struct rte_flow_error * error)2309 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2310 			     uint64_t action_flags,
2311 			     const struct rte_flow_attr *attr,
2312 			     struct rte_flow_error *error)
2313 {
2314 	struct mlx5_priv *priv = dev->data->dev_private;
2315 	struct mlx5_dev_config *config = &priv->config;
2316 	int ret;
2317 
2318 	/* Fall back if no extended metadata register support. */
2319 	if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2320 		return mlx5_flow_validate_action_flag(action_flags, attr,
2321 						      error);
2322 	/* Extensive metadata mode requires registers. */
2323 	if (!mlx5_flow_ext_mreg_supported(dev))
2324 		return rte_flow_error_set(error, ENOTSUP,
2325 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2326 					  "no metadata registers "
2327 					  "to support flag action");
2328 	if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2329 		return rte_flow_error_set(error, ENOTSUP,
2330 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2331 					  "extended metadata register"
2332 					  " isn't available");
2333 	ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2334 	if (ret < 0)
2335 		return ret;
2336 	MLX5_ASSERT(ret > 0);
2337 	if (action_flags & MLX5_FLOW_ACTION_MARK)
2338 		return rte_flow_error_set(error, EINVAL,
2339 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2340 					  "can't mark and flag in same flow");
2341 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
2342 		return rte_flow_error_set(error, EINVAL,
2343 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2344 					  "can't have 2 flag"
2345 					  " actions in same flow");
2346 	return 0;
2347 }
2348 
2349 /**
2350  * Validate MARK action.
2351  *
2352  * @param[in] dev
2353  *   Pointer to the rte_eth_dev structure.
2354  * @param[in] action
2355  *   Pointer to action.
2356  * @param[in] action_flags
2357  *   Holds the actions detected until now.
2358  * @param[in] attr
2359  *   Pointer to flow attributes
2360  * @param[out] error
2361  *   Pointer to error structure.
2362  *
2363  * @return
2364  *   0 on success, a negative errno value otherwise and rte_errno is set.
2365  */
2366 static int
flow_dv_validate_action_mark(struct rte_eth_dev * dev,const struct rte_flow_action * action,uint64_t action_flags,const struct rte_flow_attr * attr,struct rte_flow_error * error)2367 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2368 			     const struct rte_flow_action *action,
2369 			     uint64_t action_flags,
2370 			     const struct rte_flow_attr *attr,
2371 			     struct rte_flow_error *error)
2372 {
2373 	struct mlx5_priv *priv = dev->data->dev_private;
2374 	struct mlx5_dev_config *config = &priv->config;
2375 	const struct rte_flow_action_mark *mark = action->conf;
2376 	int ret;
2377 
2378 	/* Fall back if no extended metadata register support. */
2379 	if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2380 		return mlx5_flow_validate_action_mark(action, action_flags,
2381 						      attr, error);
2382 	/* Extensive metadata mode requires registers. */
2383 	if (!mlx5_flow_ext_mreg_supported(dev))
2384 		return rte_flow_error_set(error, ENOTSUP,
2385 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2386 					  "no metadata registers "
2387 					  "to support mark action");
2388 	if (!priv->sh->dv_mark_mask)
2389 		return rte_flow_error_set(error, ENOTSUP,
2390 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2391 					  "extended metadata register"
2392 					  " isn't available");
2393 	ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2394 	if (ret < 0)
2395 		return ret;
2396 	MLX5_ASSERT(ret > 0);
2397 	if (!mark)
2398 		return rte_flow_error_set(error, EINVAL,
2399 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2400 					  "configuration cannot be null");
2401 	if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2402 		return rte_flow_error_set(error, EINVAL,
2403 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2404 					  &mark->id,
2405 					  "mark id exceeds the limit");
2406 	if (action_flags & MLX5_FLOW_ACTION_FLAG)
2407 		return rte_flow_error_set(error, EINVAL,
2408 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2409 					  "can't flag and mark in same flow");
2410 	if (action_flags & MLX5_FLOW_ACTION_MARK)
2411 		return rte_flow_error_set(error, EINVAL,
2412 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2413 					  "can't have 2 mark actions in same"
2414 					  " flow");
2415 	return 0;
2416 }
2417 
2418 /**
2419  * Validate SET_META action.
2420  *
2421  * @param[in] dev
2422  *   Pointer to the rte_eth_dev structure.
2423  * @param[in] action
2424  *   Pointer to the action structure.
2425  * @param[in] action_flags
2426  *   Holds the actions detected until now.
2427  * @param[in] attr
2428  *   Pointer to flow attributes
2429  * @param[out] error
2430  *   Pointer to error structure.
2431  *
2432  * @return
2433  *   0 on success, a negative errno value otherwise and rte_errno is set.
2434  */
2435 static int
flow_dv_validate_action_set_meta(struct rte_eth_dev * dev,const struct rte_flow_action * action,uint64_t action_flags __rte_unused,const struct rte_flow_attr * attr,struct rte_flow_error * error)2436 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2437 				 const struct rte_flow_action *action,
2438 				 uint64_t action_flags __rte_unused,
2439 				 const struct rte_flow_attr *attr,
2440 				 struct rte_flow_error *error)
2441 {
2442 	const struct rte_flow_action_set_meta *conf;
2443 	uint32_t nic_mask = UINT32_MAX;
2444 	int reg;
2445 
2446 	if (!mlx5_flow_ext_mreg_supported(dev))
2447 		return rte_flow_error_set(error, ENOTSUP,
2448 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2449 					  "extended metadata register"
2450 					  " isn't supported");
2451 	reg = flow_dv_get_metadata_reg(dev, attr, error);
2452 	if (reg < 0)
2453 		return reg;
2454 	if (reg == REG_NON)
2455 		return rte_flow_error_set(error, ENOTSUP,
2456 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2457 					  "unavalable extended metadata register");
2458 	if (reg != REG_A && reg != REG_B) {
2459 		struct mlx5_priv *priv = dev->data->dev_private;
2460 
2461 		nic_mask = priv->sh->dv_meta_mask;
2462 	}
2463 	if (!(action->conf))
2464 		return rte_flow_error_set(error, EINVAL,
2465 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2466 					  "configuration cannot be null");
2467 	conf = (const struct rte_flow_action_set_meta *)action->conf;
2468 	if (!conf->mask)
2469 		return rte_flow_error_set(error, EINVAL,
2470 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2471 					  "zero mask doesn't have any effect");
2472 	if (conf->mask & ~nic_mask)
2473 		return rte_flow_error_set(error, EINVAL,
2474 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2475 					  "meta data must be within reg C0");
2476 	return 0;
2477 }
2478 
2479 /**
2480  * Validate SET_TAG action.
2481  *
2482  * @param[in] dev
2483  *   Pointer to the rte_eth_dev structure.
2484  * @param[in] action
2485  *   Pointer to the action structure.
2486  * @param[in] action_flags
2487  *   Holds the actions detected until now.
2488  * @param[in] attr
2489  *   Pointer to flow attributes
2490  * @param[out] error
2491  *   Pointer to error structure.
2492  *
2493  * @return
2494  *   0 on success, a negative errno value otherwise and rte_errno is set.
2495  */
2496 static int
flow_dv_validate_action_set_tag(struct rte_eth_dev * dev,const struct rte_flow_action * action,uint64_t action_flags,const struct rte_flow_attr * attr,struct rte_flow_error * error)2497 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2498 				const struct rte_flow_action *action,
2499 				uint64_t action_flags,
2500 				const struct rte_flow_attr *attr,
2501 				struct rte_flow_error *error)
2502 {
2503 	const struct rte_flow_action_set_tag *conf;
2504 	const uint64_t terminal_action_flags =
2505 		MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2506 		MLX5_FLOW_ACTION_RSS;
2507 	int ret;
2508 
2509 	if (!mlx5_flow_ext_mreg_supported(dev))
2510 		return rte_flow_error_set(error, ENOTSUP,
2511 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2512 					  "extensive metadata register"
2513 					  " isn't supported");
2514 	if (!(action->conf))
2515 		return rte_flow_error_set(error, EINVAL,
2516 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2517 					  "configuration cannot be null");
2518 	conf = (const struct rte_flow_action_set_tag *)action->conf;
2519 	if (!conf->mask)
2520 		return rte_flow_error_set(error, EINVAL,
2521 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2522 					  "zero mask doesn't have any effect");
2523 	ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2524 	if (ret < 0)
2525 		return ret;
2526 	if (!attr->transfer && attr->ingress &&
2527 	    (action_flags & terminal_action_flags))
2528 		return rte_flow_error_set(error, EINVAL,
2529 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2530 					  "set_tag has no effect"
2531 					  " with terminal actions");
2532 	return 0;
2533 }
2534 
2535 /**
2536  * Validate count action.
2537  *
2538  * @param[in] dev
2539  *   Pointer to rte_eth_dev structure.
2540  * @param[out] error
2541  *   Pointer to error structure.
2542  *
2543  * @return
2544  *   0 on success, a negative errno value otherwise and rte_errno is set.
2545  */
2546 static int
flow_dv_validate_action_count(struct rte_eth_dev * dev,struct rte_flow_error * error)2547 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2548 			      struct rte_flow_error *error)
2549 {
2550 	struct mlx5_priv *priv = dev->data->dev_private;
2551 
2552 	if (!priv->config.devx)
2553 		goto notsup_err;
2554 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2555 	return 0;
2556 #endif
2557 notsup_err:
2558 	return rte_flow_error_set
2559 		      (error, ENOTSUP,
2560 		       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2561 		       NULL,
2562 		       "count action not supported");
2563 }
2564 
2565 /**
2566  * Validate the L2 encap action.
2567  *
2568  * @param[in] dev
2569  *   Pointer to the rte_eth_dev structure.
2570  * @param[in] action_flags
2571  *   Holds the actions detected until now.
2572  * @param[in] action
2573  *   Pointer to the action structure.
2574  * @param[in] attr
2575  *   Pointer to flow attributes.
2576  * @param[out] error
2577  *   Pointer to error structure.
2578  *
2579  * @return
2580  *   0 on success, a negative errno value otherwise and rte_errno is set.
2581  */
2582 static int
flow_dv_validate_action_l2_encap(struct rte_eth_dev * dev,uint64_t action_flags,const struct rte_flow_action * action,const struct rte_flow_attr * attr,struct rte_flow_error * error)2583 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2584 				 uint64_t action_flags,
2585 				 const struct rte_flow_action *action,
2586 				 const struct rte_flow_attr *attr,
2587 				 struct rte_flow_error *error)
2588 {
2589 	const struct mlx5_priv *priv = dev->data->dev_private;
2590 
2591 	if (!(action->conf))
2592 		return rte_flow_error_set(error, EINVAL,
2593 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
2594 					  "configuration cannot be null");
2595 	if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2596 		return rte_flow_error_set(error, EINVAL,
2597 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2598 					  "can only have a single encap action "
2599 					  "in a flow");
2600 	if (!attr->transfer && priv->representor)
2601 		return rte_flow_error_set(error, ENOTSUP,
2602 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2603 					  "encap action for VF representor "
2604 					  "not supported on NIC table");
2605 	return 0;
2606 }
2607 
2608 /**
2609  * Validate a decap action.
2610  *
2611  * @param[in] dev
2612  *   Pointer to the rte_eth_dev structure.
2613  * @param[in] action_flags
2614  *   Holds the actions detected until now.
2615  * @param[in] attr
2616  *   Pointer to flow attributes
2617  * @param[out] error
2618  *   Pointer to error structure.
2619  *
2620  * @return
2621  *   0 on success, a negative errno value otherwise and rte_errno is set.
2622  */
2623 static int
flow_dv_validate_action_decap(struct rte_eth_dev * dev,uint64_t action_flags,const struct rte_flow_attr * attr,struct rte_flow_error * error)2624 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2625 			      uint64_t action_flags,
2626 			      const struct rte_flow_attr *attr,
2627 			      struct rte_flow_error *error)
2628 {
2629 	const struct mlx5_priv *priv = dev->data->dev_private;
2630 
2631 	if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
2632 	    !priv->config.decap_en)
2633 		return rte_flow_error_set(error, ENOTSUP,
2634 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2635 					  "decap is not enabled");
2636 	if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2637 		return rte_flow_error_set(error, ENOTSUP,
2638 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2639 					  action_flags &
2640 					  MLX5_FLOW_ACTION_DECAP ? "can only "
2641 					  "have a single decap action" : "decap "
2642 					  "after encap is not supported");
2643 	if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2644 		return rte_flow_error_set(error, EINVAL,
2645 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2646 					  "can't have decap action after"
2647 					  " modify action");
2648 	if (attr->egress)
2649 		return rte_flow_error_set(error, ENOTSUP,
2650 					  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2651 					  NULL,
2652 					  "decap action not supported for "
2653 					  "egress");
2654 	if (!attr->transfer && priv->representor)
2655 		return rte_flow_error_set(error, ENOTSUP,
2656 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2657 					  "decap action for VF representor "
2658 					  "not supported on NIC table");
2659 	return 0;
2660 }
2661 
2662 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2663 
2664 /**
2665  * Validate the raw encap and decap actions.
2666  *
2667  * @param[in] dev
2668  *   Pointer to the rte_eth_dev structure.
2669  * @param[in] decap
2670  *   Pointer to the decap action.
2671  * @param[in] encap
2672  *   Pointer to the encap action.
2673  * @param[in] attr
2674  *   Pointer to flow attributes
2675  * @param[in/out] action_flags
2676  *   Holds the actions detected until now.
2677  * @param[out] actions_n
2678  *   pointer to the number of actions counter.
2679  * @param[out] error
2680  *   Pointer to error structure.
2681  *
2682  * @return
2683  *   0 on success, a negative errno value otherwise and rte_errno is set.
2684  */
2685 static int
flow_dv_validate_action_raw_encap_decap(struct rte_eth_dev * dev,const struct rte_flow_action_raw_decap * decap,const struct rte_flow_action_raw_encap * encap,const struct rte_flow_attr * attr,uint64_t * action_flags,int * actions_n,struct rte_flow_error * error)2686 flow_dv_validate_action_raw_encap_decap
2687 	(struct rte_eth_dev *dev,
2688 	 const struct rte_flow_action_raw_decap *decap,
2689 	 const struct rte_flow_action_raw_encap *encap,
2690 	 const struct rte_flow_attr *attr, uint64_t *action_flags,
2691 	 int *actions_n, struct rte_flow_error *error)
2692 {
2693 	const struct mlx5_priv *priv = dev->data->dev_private;
2694 	int ret;
2695 
2696 	if (encap && (!encap->size || !encap->data))
2697 		return rte_flow_error_set(error, EINVAL,
2698 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2699 					  "raw encap data cannot be empty");
2700 	if (decap && encap) {
2701 		if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2702 		    encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2703 			/* L3 encap. */
2704 			decap = NULL;
2705 		else if (encap->size <=
2706 			   MLX5_ENCAPSULATION_DECISION_SIZE &&
2707 			   decap->size >
2708 			   MLX5_ENCAPSULATION_DECISION_SIZE)
2709 			/* L3 decap. */
2710 			encap = NULL;
2711 		else if (encap->size >
2712 			   MLX5_ENCAPSULATION_DECISION_SIZE &&
2713 			   decap->size >
2714 			   MLX5_ENCAPSULATION_DECISION_SIZE)
2715 			/* 2 L2 actions: encap and decap. */
2716 			;
2717 		else
2718 			return rte_flow_error_set(error,
2719 				ENOTSUP,
2720 				RTE_FLOW_ERROR_TYPE_ACTION,
2721 				NULL, "unsupported too small "
2722 				"raw decap and too small raw "
2723 				"encap combination");
2724 	}
2725 	if (decap) {
2726 		ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
2727 						    error);
2728 		if (ret < 0)
2729 			return ret;
2730 		*action_flags |= MLX5_FLOW_ACTION_DECAP;
2731 		++(*actions_n);
2732 	}
2733 	if (encap) {
2734 		if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2735 			return rte_flow_error_set(error, ENOTSUP,
2736 						  RTE_FLOW_ERROR_TYPE_ACTION,
2737 						  NULL,
2738 						  "small raw encap size");
2739 		if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2740 			return rte_flow_error_set(error, EINVAL,
2741 						  RTE_FLOW_ERROR_TYPE_ACTION,
2742 						  NULL,
2743 						  "more than one encap action");
2744 		if (!attr->transfer && priv->representor)
2745 			return rte_flow_error_set
2746 					(error, ENOTSUP,
2747 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2748 					 "encap action for VF representor "
2749 					 "not supported on NIC table");
2750 		*action_flags |= MLX5_FLOW_ACTION_ENCAP;
2751 		++(*actions_n);
2752 	}
2753 	return 0;
2754 }
2755 
2756 /**
2757  * Match encap_decap resource.
2758  *
2759  * @param list
2760  *   Pointer to the hash list.
2761  * @param entry
2762  *   Pointer to exist resource entry object.
2763  * @param key
2764  *   Key of the new entry.
2765  * @param ctx_cb
2766  *   Pointer to new encap_decap resource.
2767  *
2768  * @return
2769  *   0 on matching, none-zero otherwise.
2770  */
2771 int
flow_dv_encap_decap_match_cb(struct mlx5_hlist * list __rte_unused,struct mlx5_hlist_entry * entry,uint64_t key __rte_unused,void * cb_ctx)2772 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
2773 			     struct mlx5_hlist_entry *entry,
2774 			     uint64_t key __rte_unused, void *cb_ctx)
2775 {
2776 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2777 	struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2778 	struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2779 
2780 	cache_resource = container_of(entry,
2781 				      struct mlx5_flow_dv_encap_decap_resource,
2782 				      entry);
2783 	if (resource->entry.key == cache_resource->entry.key &&
2784 	    resource->reformat_type == cache_resource->reformat_type &&
2785 	    resource->ft_type == cache_resource->ft_type &&
2786 	    resource->flags == cache_resource->flags &&
2787 	    resource->size == cache_resource->size &&
2788 	    !memcmp((const void *)resource->buf,
2789 		    (const void *)cache_resource->buf,
2790 		    resource->size))
2791 		return 0;
2792 	return -1;
2793 }
2794 
2795 /**
2796  * Allocate encap_decap resource.
2797  *
2798  * @param list
2799  *   Pointer to the hash list.
2800  * @param entry
2801  *   Pointer to exist resource entry object.
2802  * @param ctx_cb
2803  *   Pointer to new encap_decap resource.
2804  *
2805  * @return
2806  *   0 on matching, none-zero otherwise.
2807  */
2808 struct mlx5_hlist_entry *
flow_dv_encap_decap_create_cb(struct mlx5_hlist * list,uint64_t key __rte_unused,void * cb_ctx)2809 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
2810 			      uint64_t key __rte_unused,
2811 			      void *cb_ctx)
2812 {
2813 	struct mlx5_dev_ctx_shared *sh = list->ctx;
2814 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2815 	struct mlx5dv_dr_domain *domain;
2816 	struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2817 	struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2818 	uint32_t idx;
2819 	int ret;
2820 
2821 	if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2822 		domain = sh->fdb_domain;
2823 	else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2824 		domain = sh->rx_domain;
2825 	else
2826 		domain = sh->tx_domain;
2827 	/* Register new encap/decap resource. */
2828 	cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2829 				       &idx);
2830 	if (!cache_resource) {
2831 		rte_flow_error_set(ctx->error, ENOMEM,
2832 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2833 				   "cannot allocate resource memory");
2834 		return NULL;
2835 	}
2836 	*cache_resource = *resource;
2837 	cache_resource->idx = idx;
2838 	ret = mlx5_flow_os_create_flow_action_packet_reformat
2839 					(sh->ctx, domain, cache_resource,
2840 					 &cache_resource->action);
2841 	if (ret) {
2842 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
2843 		rte_flow_error_set(ctx->error, ENOMEM,
2844 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2845 				   NULL, "cannot create action");
2846 		return NULL;
2847 	}
2848 
2849 	return &cache_resource->entry;
2850 }
2851 
2852 /**
2853  * Find existing encap/decap resource or create and register a new one.
2854  *
2855  * @param[in, out] dev
2856  *   Pointer to rte_eth_dev structure.
2857  * @param[in, out] resource
2858  *   Pointer to encap/decap resource.
2859  * @parm[in, out] dev_flow
2860  *   Pointer to the dev_flow.
2861  * @param[out] error
2862  *   pointer to error structure.
2863  *
2864  * @return
2865  *   0 on success otherwise -errno and errno is set.
2866  */
2867 static int
flow_dv_encap_decap_resource_register(struct rte_eth_dev * dev,struct mlx5_flow_dv_encap_decap_resource * resource,struct mlx5_flow * dev_flow,struct rte_flow_error * error)2868 flow_dv_encap_decap_resource_register
2869 			(struct rte_eth_dev *dev,
2870 			 struct mlx5_flow_dv_encap_decap_resource *resource,
2871 			 struct mlx5_flow *dev_flow,
2872 			 struct rte_flow_error *error)
2873 {
2874 	struct mlx5_priv *priv = dev->data->dev_private;
2875 	struct mlx5_dev_ctx_shared *sh = priv->sh;
2876 	struct mlx5_hlist_entry *entry;
2877 	union {
2878 		struct {
2879 			uint32_t ft_type:8;
2880 			uint32_t refmt_type:8;
2881 			/*
2882 			 * Header reformat actions can be shared between
2883 			 * non-root tables. One bit to indicate non-root
2884 			 * table or not.
2885 			 */
2886 			uint32_t is_root:1;
2887 			uint32_t reserve:15;
2888 		};
2889 		uint32_t v32;
2890 	} encap_decap_key = {
2891 		{
2892 			.ft_type = resource->ft_type,
2893 			.refmt_type = resource->reformat_type,
2894 			.is_root = !!dev_flow->dv.group,
2895 			.reserve = 0,
2896 		}
2897 	};
2898 	struct mlx5_flow_cb_ctx ctx = {
2899 		.error = error,
2900 		.data = resource,
2901 	};
2902 
2903 	resource->flags = dev_flow->dv.group ? 0 : 1;
2904 	resource->entry.key =  __rte_raw_cksum(&encap_decap_key.v32,
2905 					       sizeof(encap_decap_key.v32), 0);
2906 	if (resource->reformat_type !=
2907 	    MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
2908 	    resource->size)
2909 		resource->entry.key = __rte_raw_cksum(resource->buf,
2910 						      resource->size,
2911 						      resource->entry.key);
2912 	entry = mlx5_hlist_register(sh->encaps_decaps, resource->entry.key,
2913 				    &ctx);
2914 	if (!entry)
2915 		return -rte_errno;
2916 	resource = container_of(entry, typeof(*resource), entry);
2917 	dev_flow->dv.encap_decap = resource;
2918 	dev_flow->handle->dvh.rix_encap_decap = resource->idx;
2919 	return 0;
2920 }
2921 
2922 /**
2923  * Find existing table jump resource or create and register a new one.
2924  *
2925  * @param[in, out] dev
2926  *   Pointer to rte_eth_dev structure.
2927  * @param[in, out] tbl
2928  *   Pointer to flow table resource.
2929  * @parm[in, out] dev_flow
2930  *   Pointer to the dev_flow.
2931  * @param[out] error
2932  *   pointer to error structure.
2933  *
2934  * @return
2935  *   0 on success otherwise -errno and errno is set.
2936  */
2937 static int
flow_dv_jump_tbl_resource_register(struct rte_eth_dev * dev __rte_unused,struct mlx5_flow_tbl_resource * tbl,struct mlx5_flow * dev_flow,struct rte_flow_error * error __rte_unused)2938 flow_dv_jump_tbl_resource_register
2939 			(struct rte_eth_dev *dev __rte_unused,
2940 			 struct mlx5_flow_tbl_resource *tbl,
2941 			 struct mlx5_flow *dev_flow,
2942 			 struct rte_flow_error *error __rte_unused)
2943 {
2944 	struct mlx5_flow_tbl_data_entry *tbl_data =
2945 		container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2946 
2947 	MLX5_ASSERT(tbl);
2948 	MLX5_ASSERT(tbl_data->jump.action);
2949 	dev_flow->handle->rix_jump = tbl_data->idx;
2950 	dev_flow->dv.jump = &tbl_data->jump;
2951 	return 0;
2952 }
2953 
2954 int
flow_dv_port_id_match_cb(struct mlx5_cache_list * list __rte_unused,struct mlx5_cache_entry * entry,void * cb_ctx)2955 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
2956 			 struct mlx5_cache_entry *entry, void *cb_ctx)
2957 {
2958 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2959 	struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
2960 	struct mlx5_flow_dv_port_id_action_resource *res =
2961 			container_of(entry, typeof(*res), entry);
2962 
2963 	return ref->port_id != res->port_id;
2964 }
2965 
2966 struct mlx5_cache_entry *
flow_dv_port_id_create_cb(struct mlx5_cache_list * list,struct mlx5_cache_entry * entry __rte_unused,void * cb_ctx)2967 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
2968 			  struct mlx5_cache_entry *entry __rte_unused,
2969 			  void *cb_ctx)
2970 {
2971 	struct mlx5_dev_ctx_shared *sh = list->ctx;
2972 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2973 	struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
2974 	struct mlx5_flow_dv_port_id_action_resource *cache;
2975 	uint32_t idx;
2976 	int ret;
2977 
2978 	/* Register new port id action resource. */
2979 	cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
2980 	if (!cache) {
2981 		rte_flow_error_set(ctx->error, ENOMEM,
2982 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2983 				   "cannot allocate port_id action cache memory");
2984 		return NULL;
2985 	}
2986 	*cache = *ref;
2987 	ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
2988 							ref->port_id,
2989 							&cache->action);
2990 	if (ret) {
2991 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
2992 		rte_flow_error_set(ctx->error, ENOMEM,
2993 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2994 				   "cannot create action");
2995 		return NULL;
2996 	}
2997 	return &cache->entry;
2998 }
2999 
3000 /**
3001  * Find existing table port ID resource or create and register a new one.
3002  *
3003  * @param[in, out] dev
3004  *   Pointer to rte_eth_dev structure.
3005  * @param[in, out] resource
3006  *   Pointer to port ID action resource.
3007  * @parm[in, out] dev_flow
3008  *   Pointer to the dev_flow.
3009  * @param[out] error
3010  *   pointer to error structure.
3011  *
3012  * @return
3013  *   0 on success otherwise -errno and errno is set.
3014  */
3015 static int
flow_dv_port_id_action_resource_register(struct rte_eth_dev * dev,struct mlx5_flow_dv_port_id_action_resource * resource,struct mlx5_flow * dev_flow,struct rte_flow_error * error)3016 flow_dv_port_id_action_resource_register
3017 			(struct rte_eth_dev *dev,
3018 			 struct mlx5_flow_dv_port_id_action_resource *resource,
3019 			 struct mlx5_flow *dev_flow,
3020 			 struct rte_flow_error *error)
3021 {
3022 	struct mlx5_priv *priv = dev->data->dev_private;
3023 	struct mlx5_cache_entry *entry;
3024 	struct mlx5_flow_dv_port_id_action_resource *cache;
3025 	struct mlx5_flow_cb_ctx ctx = {
3026 		.error = error,
3027 		.data = resource,
3028 	};
3029 
3030 	entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3031 	if (!entry)
3032 		return -rte_errno;
3033 	cache = container_of(entry, typeof(*cache), entry);
3034 	dev_flow->dv.port_id_action = cache;
3035 	dev_flow->handle->rix_port_id_action = cache->idx;
3036 	return 0;
3037 }
3038 
3039 int
flow_dv_push_vlan_match_cb(struct mlx5_cache_list * list __rte_unused,struct mlx5_cache_entry * entry,void * cb_ctx)3040 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3041 			 struct mlx5_cache_entry *entry, void *cb_ctx)
3042 {
3043 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3044 	struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3045 	struct mlx5_flow_dv_push_vlan_action_resource *res =
3046 			container_of(entry, typeof(*res), entry);
3047 
3048 	return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3049 }
3050 
3051 struct mlx5_cache_entry *
flow_dv_push_vlan_create_cb(struct mlx5_cache_list * list,struct mlx5_cache_entry * entry __rte_unused,void * cb_ctx)3052 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3053 			  struct mlx5_cache_entry *entry __rte_unused,
3054 			  void *cb_ctx)
3055 {
3056 	struct mlx5_dev_ctx_shared *sh = list->ctx;
3057 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3058 	struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3059 	struct mlx5_flow_dv_push_vlan_action_resource *cache;
3060 	struct mlx5dv_dr_domain *domain;
3061 	uint32_t idx;
3062 	int ret;
3063 
3064 	/* Register new port id action resource. */
3065 	cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3066 	if (!cache) {
3067 		rte_flow_error_set(ctx->error, ENOMEM,
3068 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3069 				   "cannot allocate push_vlan action cache memory");
3070 		return NULL;
3071 	}
3072 	*cache = *ref;
3073 	if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3074 		domain = sh->fdb_domain;
3075 	else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3076 		domain = sh->rx_domain;
3077 	else
3078 		domain = sh->tx_domain;
3079 	ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3080 							&cache->action);
3081 	if (ret) {
3082 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3083 		rte_flow_error_set(ctx->error, ENOMEM,
3084 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3085 				   "cannot create push vlan action");
3086 		return NULL;
3087 	}
3088 	return &cache->entry;
3089 }
3090 
3091 /**
3092  * Find existing push vlan resource or create and register a new one.
3093  *
3094  * @param [in, out] dev
3095  *   Pointer to rte_eth_dev structure.
3096  * @param[in, out] resource
3097  *   Pointer to port ID action resource.
3098  * @parm[in, out] dev_flow
3099  *   Pointer to the dev_flow.
3100  * @param[out] error
3101  *   pointer to error structure.
3102  *
3103  * @return
3104  *   0 on success otherwise -errno and errno is set.
3105  */
3106 static int
flow_dv_push_vlan_action_resource_register(struct rte_eth_dev * dev,struct mlx5_flow_dv_push_vlan_action_resource * resource,struct mlx5_flow * dev_flow,struct rte_flow_error * error)3107 flow_dv_push_vlan_action_resource_register
3108 		       (struct rte_eth_dev *dev,
3109 			struct mlx5_flow_dv_push_vlan_action_resource *resource,
3110 			struct mlx5_flow *dev_flow,
3111 			struct rte_flow_error *error)
3112 {
3113 	struct mlx5_priv *priv = dev->data->dev_private;
3114 	struct mlx5_flow_dv_push_vlan_action_resource *cache;
3115 	struct mlx5_cache_entry *entry;
3116 	struct mlx5_flow_cb_ctx ctx = {
3117 		.error = error,
3118 		.data = resource,
3119 	};
3120 
3121 	entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3122 	if (!entry)
3123 		return -rte_errno;
3124 	cache = container_of(entry, typeof(*cache), entry);
3125 
3126 	dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3127 	dev_flow->dv.push_vlan_res = cache;
3128 	return 0;
3129 }
3130 
3131 /**
3132  * Get the size of specific rte_flow_item_type hdr size
3133  *
3134  * @param[in] item_type
3135  *   Tested rte_flow_item_type.
3136  *
3137  * @return
3138  *   sizeof struct item_type, 0 if void or irrelevant.
3139  */
3140 static size_t
flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)3141 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3142 {
3143 	size_t retval;
3144 
3145 	switch (item_type) {
3146 	case RTE_FLOW_ITEM_TYPE_ETH:
3147 		retval = sizeof(struct rte_ether_hdr);
3148 		break;
3149 	case RTE_FLOW_ITEM_TYPE_VLAN:
3150 		retval = sizeof(struct rte_vlan_hdr);
3151 		break;
3152 	case RTE_FLOW_ITEM_TYPE_IPV4:
3153 		retval = sizeof(struct rte_ipv4_hdr);
3154 		break;
3155 	case RTE_FLOW_ITEM_TYPE_IPV6:
3156 		retval = sizeof(struct rte_ipv6_hdr);
3157 		break;
3158 	case RTE_FLOW_ITEM_TYPE_UDP:
3159 		retval = sizeof(struct rte_udp_hdr);
3160 		break;
3161 	case RTE_FLOW_ITEM_TYPE_TCP:
3162 		retval = sizeof(struct rte_tcp_hdr);
3163 		break;
3164 	case RTE_FLOW_ITEM_TYPE_VXLAN:
3165 	case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3166 		retval = sizeof(struct rte_vxlan_hdr);
3167 		break;
3168 	case RTE_FLOW_ITEM_TYPE_GRE:
3169 	case RTE_FLOW_ITEM_TYPE_NVGRE:
3170 		retval = sizeof(struct rte_gre_hdr);
3171 		break;
3172 	case RTE_FLOW_ITEM_TYPE_MPLS:
3173 		retval = sizeof(struct rte_mpls_hdr);
3174 		break;
3175 	case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3176 	default:
3177 		retval = 0;
3178 		break;
3179 	}
3180 	return retval;
3181 }
3182 
3183 #define MLX5_ENCAP_IPV4_VERSION		0x40
3184 #define MLX5_ENCAP_IPV4_IHL_MIN		0x05
3185 #define MLX5_ENCAP_IPV4_TTL_DEF		0x40
3186 #define MLX5_ENCAP_IPV6_VTC_FLOW	0x60000000
3187 #define MLX5_ENCAP_IPV6_HOP_LIMIT	0xff
3188 #define MLX5_ENCAP_VXLAN_FLAGS		0x08000000
3189 #define MLX5_ENCAP_VXLAN_GPE_FLAGS	0x04
3190 
3191 /**
3192  * Convert the encap action data from list of rte_flow_item to raw buffer
3193  *
3194  * @param[in] items
3195  *   Pointer to rte_flow_item objects list.
3196  * @param[out] buf
3197  *   Pointer to the output buffer.
3198  * @param[out] size
3199  *   Pointer to the output buffer size.
3200  * @param[out] error
3201  *   Pointer to the error structure.
3202  *
3203  * @return
3204  *   0 on success, a negative errno value otherwise and rte_errno is set.
3205  */
3206 static int
flow_dv_convert_encap_data(const struct rte_flow_item * items,uint8_t * buf,size_t * size,struct rte_flow_error * error)3207 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3208 			   size_t *size, struct rte_flow_error *error)
3209 {
3210 	struct rte_ether_hdr *eth = NULL;
3211 	struct rte_vlan_hdr *vlan = NULL;
3212 	struct rte_ipv4_hdr *ipv4 = NULL;
3213 	struct rte_ipv6_hdr *ipv6 = NULL;
3214 	struct rte_udp_hdr *udp = NULL;
3215 	struct rte_vxlan_hdr *vxlan = NULL;
3216 	struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3217 	struct rte_gre_hdr *gre = NULL;
3218 	size_t len;
3219 	size_t temp_size = 0;
3220 
3221 	if (!items)
3222 		return rte_flow_error_set(error, EINVAL,
3223 					  RTE_FLOW_ERROR_TYPE_ACTION,
3224 					  NULL, "invalid empty data");
3225 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3226 		len = flow_dv_get_item_hdr_len(items->type);
3227 		if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3228 			return rte_flow_error_set(error, EINVAL,
3229 						  RTE_FLOW_ERROR_TYPE_ACTION,
3230 						  (void *)items->type,
3231 						  "items total size is too big"
3232 						  " for encap action");
3233 		rte_memcpy((void *)&buf[temp_size], items->spec, len);
3234 		switch (items->type) {
3235 		case RTE_FLOW_ITEM_TYPE_ETH:
3236 			eth = (struct rte_ether_hdr *)&buf[temp_size];
3237 			break;
3238 		case RTE_FLOW_ITEM_TYPE_VLAN:
3239 			vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3240 			if (!eth)
3241 				return rte_flow_error_set(error, EINVAL,
3242 						RTE_FLOW_ERROR_TYPE_ACTION,
3243 						(void *)items->type,
3244 						"eth header not found");
3245 			if (!eth->ether_type)
3246 				eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3247 			break;
3248 		case RTE_FLOW_ITEM_TYPE_IPV4:
3249 			ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3250 			if (!vlan && !eth)
3251 				return rte_flow_error_set(error, EINVAL,
3252 						RTE_FLOW_ERROR_TYPE_ACTION,
3253 						(void *)items->type,
3254 						"neither eth nor vlan"
3255 						" header found");
3256 			if (vlan && !vlan->eth_proto)
3257 				vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3258 			else if (eth && !eth->ether_type)
3259 				eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3260 			if (!ipv4->version_ihl)
3261 				ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3262 						    MLX5_ENCAP_IPV4_IHL_MIN;
3263 			if (!ipv4->time_to_live)
3264 				ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3265 			break;
3266 		case RTE_FLOW_ITEM_TYPE_IPV6:
3267 			ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3268 			if (!vlan && !eth)
3269 				return rte_flow_error_set(error, EINVAL,
3270 						RTE_FLOW_ERROR_TYPE_ACTION,
3271 						(void *)items->type,
3272 						"neither eth nor vlan"
3273 						" header found");
3274 			if (vlan && !vlan->eth_proto)
3275 				vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3276 			else if (eth && !eth->ether_type)
3277 				eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3278 			if (!ipv6->vtc_flow)
3279 				ipv6->vtc_flow =
3280 					RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3281 			if (!ipv6->hop_limits)
3282 				ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3283 			break;
3284 		case RTE_FLOW_ITEM_TYPE_UDP:
3285 			udp = (struct rte_udp_hdr *)&buf[temp_size];
3286 			if (!ipv4 && !ipv6)
3287 				return rte_flow_error_set(error, EINVAL,
3288 						RTE_FLOW_ERROR_TYPE_ACTION,
3289 						(void *)items->type,
3290 						"ip header not found");
3291 			if (ipv4 && !ipv4->next_proto_id)
3292 				ipv4->next_proto_id = IPPROTO_UDP;
3293 			else if (ipv6 && !ipv6->proto)
3294 				ipv6->proto = IPPROTO_UDP;
3295 			break;
3296 		case RTE_FLOW_ITEM_TYPE_VXLAN:
3297 			vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3298 			if (!udp)
3299 				return rte_flow_error_set(error, EINVAL,
3300 						RTE_FLOW_ERROR_TYPE_ACTION,
3301 						(void *)items->type,
3302 						"udp header not found");
3303 			if (!udp->dst_port)
3304 				udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3305 			if (!vxlan->vx_flags)
3306 				vxlan->vx_flags =
3307 					RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3308 			break;
3309 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3310 			vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3311 			if (!udp)
3312 				return rte_flow_error_set(error, EINVAL,
3313 						RTE_FLOW_ERROR_TYPE_ACTION,
3314 						(void *)items->type,
3315 						"udp header not found");
3316 			if (!vxlan_gpe->proto)
3317 				return rte_flow_error_set(error, EINVAL,
3318 						RTE_FLOW_ERROR_TYPE_ACTION,
3319 						(void *)items->type,
3320 						"next protocol not found");
3321 			if (!udp->dst_port)
3322 				udp->dst_port =
3323 					RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3324 			if (!vxlan_gpe->vx_flags)
3325 				vxlan_gpe->vx_flags =
3326 						MLX5_ENCAP_VXLAN_GPE_FLAGS;
3327 			break;
3328 		case RTE_FLOW_ITEM_TYPE_GRE:
3329 		case RTE_FLOW_ITEM_TYPE_NVGRE:
3330 			gre = (struct rte_gre_hdr *)&buf[temp_size];
3331 			if (!gre->proto)
3332 				return rte_flow_error_set(error, EINVAL,
3333 						RTE_FLOW_ERROR_TYPE_ACTION,
3334 						(void *)items->type,
3335 						"next protocol not found");
3336 			if (!ipv4 && !ipv6)
3337 				return rte_flow_error_set(error, EINVAL,
3338 						RTE_FLOW_ERROR_TYPE_ACTION,
3339 						(void *)items->type,
3340 						"ip header not found");
3341 			if (ipv4 && !ipv4->next_proto_id)
3342 				ipv4->next_proto_id = IPPROTO_GRE;
3343 			else if (ipv6 && !ipv6->proto)
3344 				ipv6->proto = IPPROTO_GRE;
3345 			break;
3346 		case RTE_FLOW_ITEM_TYPE_VOID:
3347 			break;
3348 		default:
3349 			return rte_flow_error_set(error, EINVAL,
3350 						  RTE_FLOW_ERROR_TYPE_ACTION,
3351 						  (void *)items->type,
3352 						  "unsupported item type");
3353 			break;
3354 		}
3355 		temp_size += len;
3356 	}
3357 	*size = temp_size;
3358 	return 0;
3359 }
3360 
3361 static int
flow_dv_zero_encap_udp_csum(void * data,struct rte_flow_error * error)3362 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3363 {
3364 	struct rte_ether_hdr *eth = NULL;
3365 	struct rte_vlan_hdr *vlan = NULL;
3366 	struct rte_ipv6_hdr *ipv6 = NULL;
3367 	struct rte_udp_hdr *udp = NULL;
3368 	char *next_hdr;
3369 	uint16_t proto;
3370 
3371 	eth = (struct rte_ether_hdr *)data;
3372 	next_hdr = (char *)(eth + 1);
3373 	proto = RTE_BE16(eth->ether_type);
3374 
3375 	/* VLAN skipping */
3376 	while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3377 		vlan = (struct rte_vlan_hdr *)next_hdr;
3378 		proto = RTE_BE16(vlan->eth_proto);
3379 		next_hdr += sizeof(struct rte_vlan_hdr);
3380 	}
3381 
3382 	/* HW calculates IPv4 csum. no need to proceed */
3383 	if (proto == RTE_ETHER_TYPE_IPV4)
3384 		return 0;
3385 
3386 	/* non IPv4/IPv6 header. not supported */
3387 	if (proto != RTE_ETHER_TYPE_IPV6) {
3388 		return rte_flow_error_set(error, ENOTSUP,
3389 					  RTE_FLOW_ERROR_TYPE_ACTION,
3390 					  NULL, "Cannot offload non IPv4/IPv6");
3391 	}
3392 
3393 	ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3394 
3395 	/* ignore non UDP */
3396 	if (ipv6->proto != IPPROTO_UDP)
3397 		return 0;
3398 
3399 	udp = (struct rte_udp_hdr *)(ipv6 + 1);
3400 	udp->dgram_cksum = 0;
3401 
3402 	return 0;
3403 }
3404 
3405 /**
3406  * Convert L2 encap action to DV specification.
3407  *
3408  * @param[in] dev
3409  *   Pointer to rte_eth_dev structure.
3410  * @param[in] action
3411  *   Pointer to action structure.
3412  * @param[in, out] dev_flow
3413  *   Pointer to the mlx5_flow.
3414  * @param[in] transfer
3415  *   Mark if the flow is E-Switch flow.
3416  * @param[out] error
3417  *   Pointer to the error structure.
3418  *
3419  * @return
3420  *   0 on success, a negative errno value otherwise and rte_errno is set.
3421  */
3422 static int
flow_dv_create_action_l2_encap(struct rte_eth_dev * dev,const struct rte_flow_action * action,struct mlx5_flow * dev_flow,uint8_t transfer,struct rte_flow_error * error)3423 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
3424 			       const struct rte_flow_action *action,
3425 			       struct mlx5_flow *dev_flow,
3426 			       uint8_t transfer,
3427 			       struct rte_flow_error *error)
3428 {
3429 	const struct rte_flow_item *encap_data;
3430 	const struct rte_flow_action_raw_encap *raw_encap_data;
3431 	struct mlx5_flow_dv_encap_decap_resource res = {
3432 		.reformat_type =
3433 			MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
3434 		.ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3435 				      MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
3436 	};
3437 
3438 	if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3439 		raw_encap_data =
3440 			(const struct rte_flow_action_raw_encap *)action->conf;
3441 		res.size = raw_encap_data->size;
3442 		memcpy(res.buf, raw_encap_data->data, res.size);
3443 	} else {
3444 		if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3445 			encap_data =
3446 				((const struct rte_flow_action_vxlan_encap *)
3447 						action->conf)->definition;
3448 		else
3449 			encap_data =
3450 				((const struct rte_flow_action_nvgre_encap *)
3451 						action->conf)->definition;
3452 		if (flow_dv_convert_encap_data(encap_data, res.buf,
3453 					       &res.size, error))
3454 			return -rte_errno;
3455 	}
3456 	if (flow_dv_zero_encap_udp_csum(res.buf, error))
3457 		return -rte_errno;
3458 	if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3459 		return rte_flow_error_set(error, EINVAL,
3460 					  RTE_FLOW_ERROR_TYPE_ACTION,
3461 					  NULL, "can't create L2 encap action");
3462 	return 0;
3463 }
3464 
3465 /**
3466  * Convert L2 decap action to DV specification.
3467  *
3468  * @param[in] dev
3469  *   Pointer to rte_eth_dev structure.
3470  * @param[in, out] dev_flow
3471  *   Pointer to the mlx5_flow.
3472  * @param[in] transfer
3473  *   Mark if the flow is E-Switch flow.
3474  * @param[out] error
3475  *   Pointer to the error structure.
3476  *
3477  * @return
3478  *   0 on success, a negative errno value otherwise and rte_errno is set.
3479  */
3480 static int
flow_dv_create_action_l2_decap(struct rte_eth_dev * dev,struct mlx5_flow * dev_flow,uint8_t transfer,struct rte_flow_error * error)3481 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3482 			       struct mlx5_flow *dev_flow,
3483 			       uint8_t transfer,
3484 			       struct rte_flow_error *error)
3485 {
3486 	struct mlx5_flow_dv_encap_decap_resource res = {
3487 		.size = 0,
3488 		.reformat_type =
3489 			MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3490 		.ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3491 				      MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3492 	};
3493 
3494 	if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3495 		return rte_flow_error_set(error, EINVAL,
3496 					  RTE_FLOW_ERROR_TYPE_ACTION,
3497 					  NULL, "can't create L2 decap action");
3498 	return 0;
3499 }
3500 
3501 /**
3502  * Convert raw decap/encap (L3 tunnel) action to DV specification.
3503  *
3504  * @param[in] dev
3505  *   Pointer to rte_eth_dev structure.
3506  * @param[in] action
3507  *   Pointer to action structure.
3508  * @param[in, out] dev_flow
3509  *   Pointer to the mlx5_flow.
3510  * @param[in] attr
3511  *   Pointer to the flow attributes.
3512  * @param[out] error
3513  *   Pointer to the error structure.
3514  *
3515  * @return
3516  *   0 on success, a negative errno value otherwise and rte_errno is set.
3517  */
3518 static int
flow_dv_create_action_raw_encap(struct rte_eth_dev * dev,const struct rte_flow_action * action,struct mlx5_flow * dev_flow,const struct rte_flow_attr * attr,struct rte_flow_error * error)3519 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3520 				const struct rte_flow_action *action,
3521 				struct mlx5_flow *dev_flow,
3522 				const struct rte_flow_attr *attr,
3523 				struct rte_flow_error *error)
3524 {
3525 	const struct rte_flow_action_raw_encap *encap_data;
3526 	struct mlx5_flow_dv_encap_decap_resource res;
3527 
3528 	memset(&res, 0, sizeof(res));
3529 	encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3530 	res.size = encap_data->size;
3531 	memcpy(res.buf, encap_data->data, res.size);
3532 	res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3533 		MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3534 		MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3535 	if (attr->transfer)
3536 		res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3537 	else
3538 		res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3539 					     MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3540 	if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3541 		return rte_flow_error_set(error, EINVAL,
3542 					  RTE_FLOW_ERROR_TYPE_ACTION,
3543 					  NULL, "can't create encap action");
3544 	return 0;
3545 }
3546 
3547 /**
3548  * Create action push VLAN.
3549  *
3550  * @param[in] dev
3551  *   Pointer to rte_eth_dev structure.
3552  * @param[in] attr
3553  *   Pointer to the flow attributes.
3554  * @param[in] vlan
3555  *   Pointer to the vlan to push to the Ethernet header.
3556  * @param[in, out] dev_flow
3557  *   Pointer to the mlx5_flow.
3558  * @param[out] error
3559  *   Pointer to the error structure.
3560  *
3561  * @return
3562  *   0 on success, a negative errno value otherwise and rte_errno is set.
3563  */
3564 static int
flow_dv_create_action_push_vlan(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_vlan_hdr * vlan,struct mlx5_flow * dev_flow,struct rte_flow_error * error)3565 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3566 				const struct rte_flow_attr *attr,
3567 				const struct rte_vlan_hdr *vlan,
3568 				struct mlx5_flow *dev_flow,
3569 				struct rte_flow_error *error)
3570 {
3571 	struct mlx5_flow_dv_push_vlan_action_resource res;
3572 
3573 	memset(&res, 0, sizeof(res));
3574 	res.vlan_tag =
3575 		rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3576 				 vlan->vlan_tci);
3577 	if (attr->transfer)
3578 		res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3579 	else
3580 		res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3581 					     MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3582 	return flow_dv_push_vlan_action_resource_register
3583 					    (dev, &res, dev_flow, error);
3584 }
3585 
3586 static int fdb_mirror;
3587 
3588 /**
3589  * Validate the modify-header actions.
3590  *
3591  * @param[in] action_flags
3592  *   Holds the actions detected until now.
3593  * @param[in] action
3594  *   Pointer to the modify action.
3595  * @param[out] error
3596  *   Pointer to error structure.
3597  *
3598  * @return
3599  *   0 on success, a negative errno value otherwise and rte_errno is set.
3600  */
3601 static int
flow_dv_validate_action_modify_hdr(const uint64_t action_flags,const struct rte_flow_action * action,struct rte_flow_error * error)3602 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3603 				   const struct rte_flow_action *action,
3604 				   struct rte_flow_error *error)
3605 {
3606 	if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3607 		return rte_flow_error_set(error, EINVAL,
3608 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3609 					  NULL, "action configuration not set");
3610 	if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3611 		return rte_flow_error_set(error, EINVAL,
3612 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3613 					  "can't have encap action before"
3614 					  " modify action");
3615 	if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3616 		return rte_flow_error_set(error, EINVAL,
3617 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3618 					  "can't support sample action before"
3619 					  " modify action for E-Switch"
3620 					  " mirroring");
3621 	return 0;
3622 }
3623 
3624 /**
3625  * Validate the modify-header MAC address actions.
3626  *
3627  * @param[in] action_flags
3628  *   Holds the actions detected until now.
3629  * @param[in] action
3630  *   Pointer to the modify action.
3631  * @param[in] item_flags
3632  *   Holds the items detected.
3633  * @param[out] error
3634  *   Pointer to error structure.
3635  *
3636  * @return
3637  *   0 on success, a negative errno value otherwise and rte_errno is set.
3638  */
3639 static int
flow_dv_validate_action_modify_mac(const uint64_t action_flags,const struct rte_flow_action * action,const uint64_t item_flags,struct rte_flow_error * error)3640 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3641 				   const struct rte_flow_action *action,
3642 				   const uint64_t item_flags,
3643 				   struct rte_flow_error *error)
3644 {
3645 	int ret = 0;
3646 
3647 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3648 	if (!ret) {
3649 		if (!(item_flags & MLX5_FLOW_LAYER_L2))
3650 			return rte_flow_error_set(error, EINVAL,
3651 						  RTE_FLOW_ERROR_TYPE_ACTION,
3652 						  NULL,
3653 						  "no L2 item in pattern");
3654 	}
3655 	return ret;
3656 }
3657 
3658 /**
3659  * Validate the modify-header IPv4 address actions.
3660  *
3661  * @param[in] action_flags
3662  *   Holds the actions detected until now.
3663  * @param[in] action
3664  *   Pointer to the modify action.
3665  * @param[in] item_flags
3666  *   Holds the items detected.
3667  * @param[out] error
3668  *   Pointer to error structure.
3669  *
3670  * @return
3671  *   0 on success, a negative errno value otherwise and rte_errno is set.
3672  */
3673 static int
flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,const struct rte_flow_action * action,const uint64_t item_flags,struct rte_flow_error * error)3674 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3675 				    const struct rte_flow_action *action,
3676 				    const uint64_t item_flags,
3677 				    struct rte_flow_error *error)
3678 {
3679 	int ret = 0;
3680 	uint64_t layer;
3681 
3682 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3683 	if (!ret) {
3684 		layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3685 				 MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3686 				 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3687 		if (!(item_flags & layer))
3688 			return rte_flow_error_set(error, EINVAL,
3689 						  RTE_FLOW_ERROR_TYPE_ACTION,
3690 						  NULL,
3691 						  "no ipv4 item in pattern");
3692 	}
3693 	return ret;
3694 }
3695 
3696 /**
3697  * Validate the modify-header IPv6 address actions.
3698  *
3699  * @param[in] action_flags
3700  *   Holds the actions detected until now.
3701  * @param[in] action
3702  *   Pointer to the modify action.
3703  * @param[in] item_flags
3704  *   Holds the items detected.
3705  * @param[out] error
3706  *   Pointer to error structure.
3707  *
3708  * @return
3709  *   0 on success, a negative errno value otherwise and rte_errno is set.
3710  */
3711 static int
flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,const struct rte_flow_action * action,const uint64_t item_flags,struct rte_flow_error * error)3712 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3713 				    const struct rte_flow_action *action,
3714 				    const uint64_t item_flags,
3715 				    struct rte_flow_error *error)
3716 {
3717 	int ret = 0;
3718 	uint64_t layer;
3719 
3720 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3721 	if (!ret) {
3722 		layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3723 				 MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3724 				 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3725 		if (!(item_flags & layer))
3726 			return rte_flow_error_set(error, EINVAL,
3727 						  RTE_FLOW_ERROR_TYPE_ACTION,
3728 						  NULL,
3729 						  "no ipv6 item in pattern");
3730 	}
3731 	return ret;
3732 }
3733 
3734 /**
3735  * Validate the modify-header TP actions.
3736  *
3737  * @param[in] action_flags
3738  *   Holds the actions detected until now.
3739  * @param[in] action
3740  *   Pointer to the modify action.
3741  * @param[in] item_flags
3742  *   Holds the items detected.
3743  * @param[out] error
3744  *   Pointer to error structure.
3745  *
3746  * @return
3747  *   0 on success, a negative errno value otherwise and rte_errno is set.
3748  */
3749 static int
flow_dv_validate_action_modify_tp(const uint64_t action_flags,const struct rte_flow_action * action,const uint64_t item_flags,struct rte_flow_error * error)3750 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3751 				  const struct rte_flow_action *action,
3752 				  const uint64_t item_flags,
3753 				  struct rte_flow_error *error)
3754 {
3755 	int ret = 0;
3756 	uint64_t layer;
3757 
3758 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3759 	if (!ret) {
3760 		layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3761 				 MLX5_FLOW_LAYER_INNER_L4 :
3762 				 MLX5_FLOW_LAYER_OUTER_L4;
3763 		if (!(item_flags & layer))
3764 			return rte_flow_error_set(error, EINVAL,
3765 						  RTE_FLOW_ERROR_TYPE_ACTION,
3766 						  NULL, "no transport layer "
3767 						  "in pattern");
3768 	}
3769 	return ret;
3770 }
3771 
3772 /**
3773  * Validate the modify-header actions of increment/decrement
3774  * TCP Sequence-number.
3775  *
3776  * @param[in] action_flags
3777  *   Holds the actions detected until now.
3778  * @param[in] action
3779  *   Pointer to the modify action.
3780  * @param[in] item_flags
3781  *   Holds the items detected.
3782  * @param[out] error
3783  *   Pointer to error structure.
3784  *
3785  * @return
3786  *   0 on success, a negative errno value otherwise and rte_errno is set.
3787  */
3788 static int
flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,const struct rte_flow_action * action,const uint64_t item_flags,struct rte_flow_error * error)3789 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3790 				       const struct rte_flow_action *action,
3791 				       const uint64_t item_flags,
3792 				       struct rte_flow_error *error)
3793 {
3794 	int ret = 0;
3795 	uint64_t layer;
3796 
3797 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3798 	if (!ret) {
3799 		layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3800 				 MLX5_FLOW_LAYER_INNER_L4_TCP :
3801 				 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3802 		if (!(item_flags & layer))
3803 			return rte_flow_error_set(error, EINVAL,
3804 						  RTE_FLOW_ERROR_TYPE_ACTION,
3805 						  NULL, "no TCP item in"
3806 						  " pattern");
3807 		if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3808 			(action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3809 		    (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3810 			(action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3811 			return rte_flow_error_set(error, EINVAL,
3812 						  RTE_FLOW_ERROR_TYPE_ACTION,
3813 						  NULL,
3814 						  "cannot decrease and increase"
3815 						  " TCP sequence number"
3816 						  " at the same time");
3817 	}
3818 	return ret;
3819 }
3820 
3821 /**
3822  * Validate the modify-header actions of increment/decrement
3823  * TCP Acknowledgment number.
3824  *
3825  * @param[in] action_flags
3826  *   Holds the actions detected until now.
3827  * @param[in] action
3828  *   Pointer to the modify action.
3829  * @param[in] item_flags
3830  *   Holds the items detected.
3831  * @param[out] error
3832  *   Pointer to error structure.
3833  *
3834  * @return
3835  *   0 on success, a negative errno value otherwise and rte_errno is set.
3836  */
3837 static int
flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,const struct rte_flow_action * action,const uint64_t item_flags,struct rte_flow_error * error)3838 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3839 				       const struct rte_flow_action *action,
3840 				       const uint64_t item_flags,
3841 				       struct rte_flow_error *error)
3842 {
3843 	int ret = 0;
3844 	uint64_t layer;
3845 
3846 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3847 	if (!ret) {
3848 		layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3849 				 MLX5_FLOW_LAYER_INNER_L4_TCP :
3850 				 MLX5_FLOW_LAYER_OUTER_L4_TCP;
3851 		if (!(item_flags & layer))
3852 			return rte_flow_error_set(error, EINVAL,
3853 						  RTE_FLOW_ERROR_TYPE_ACTION,
3854 						  NULL, "no TCP item in"
3855 						  " pattern");
3856 		if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3857 			(action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3858 		    (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3859 			(action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3860 			return rte_flow_error_set(error, EINVAL,
3861 						  RTE_FLOW_ERROR_TYPE_ACTION,
3862 						  NULL,
3863 						  "cannot decrease and increase"
3864 						  " TCP acknowledgment number"
3865 						  " at the same time");
3866 	}
3867 	return ret;
3868 }
3869 
3870 /**
3871  * Validate the modify-header TTL actions.
3872  *
3873  * @param[in] action_flags
3874  *   Holds the actions detected until now.
3875  * @param[in] action
3876  *   Pointer to the modify action.
3877  * @param[in] item_flags
3878  *   Holds the items detected.
3879  * @param[out] error
3880  *   Pointer to error structure.
3881  *
3882  * @return
3883  *   0 on success, a negative errno value otherwise and rte_errno is set.
3884  */
3885 static int
flow_dv_validate_action_modify_ttl(const uint64_t action_flags,const struct rte_flow_action * action,const uint64_t item_flags,struct rte_flow_error * error)3886 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3887 				   const struct rte_flow_action *action,
3888 				   const uint64_t item_flags,
3889 				   struct rte_flow_error *error)
3890 {
3891 	int ret = 0;
3892 	uint64_t layer;
3893 
3894 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3895 	if (!ret) {
3896 		layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3897 				 MLX5_FLOW_LAYER_INNER_L3 :
3898 				 MLX5_FLOW_LAYER_OUTER_L3;
3899 		if (!(item_flags & layer))
3900 			return rte_flow_error_set(error, EINVAL,
3901 						  RTE_FLOW_ERROR_TYPE_ACTION,
3902 						  NULL,
3903 						  "no IP protocol in pattern");
3904 	}
3905 	return ret;
3906 }
3907 
3908 /**
3909  * Validate jump action.
3910  *
3911  * @param[in] action
3912  *   Pointer to the jump action.
3913  * @param[in] action_flags
3914  *   Holds the actions detected until now.
3915  * @param[in] attributes
3916  *   Pointer to flow attributes
3917  * @param[in] external
3918  *   Action belongs to flow rule created by request external to PMD.
3919  * @param[out] error
3920  *   Pointer to error structure.
3921  *
3922  * @return
3923  *   0 on success, a negative errno value otherwise and rte_errno is set.
3924  */
3925 static int
flow_dv_validate_action_jump(struct rte_eth_dev * dev,const struct mlx5_flow_tunnel * tunnel,const struct rte_flow_action * action,uint64_t action_flags,const struct rte_flow_attr * attributes,bool external,struct rte_flow_error * error)3926 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
3927 			     const struct mlx5_flow_tunnel *tunnel,
3928 			     const struct rte_flow_action *action,
3929 			     uint64_t action_flags,
3930 			     const struct rte_flow_attr *attributes,
3931 			     bool external, struct rte_flow_error *error)
3932 {
3933 	uint32_t target_group, table;
3934 	int ret = 0;
3935 	struct flow_grp_info grp_info = {
3936 		.external = !!external,
3937 		.transfer = !!attributes->transfer,
3938 		.fdb_def_rule = 1,
3939 		.std_tbl_fix = 0
3940 	};
3941 	if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3942 			    MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3943 		return rte_flow_error_set(error, EINVAL,
3944 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3945 					  "can't have 2 fate actions in"
3946 					  " same flow");
3947 	if (action_flags & MLX5_FLOW_ACTION_METER)
3948 		return rte_flow_error_set(error, ENOTSUP,
3949 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3950 					  "jump with meter not support");
3951 	if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3952 		return rte_flow_error_set(error, EINVAL,
3953 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3954 					  "E-Switch mirroring can't support"
3955 					  " Sample action and jump action in"
3956 					  " same flow now");
3957 	if (!action->conf)
3958 		return rte_flow_error_set(error, EINVAL,
3959 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3960 					  NULL, "action configuration not set");
3961 	target_group =
3962 		((const struct rte_flow_action_jump *)action->conf)->group;
3963 	ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
3964 				       &grp_info, error);
3965 	if (ret)
3966 		return ret;
3967 	if (attributes->group == target_group &&
3968 	    !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
3969 			      MLX5_FLOW_ACTION_TUNNEL_MATCH)))
3970 		return rte_flow_error_set(error, EINVAL,
3971 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3972 					  "target group must be other than"
3973 					  " the current flow group");
3974 	return 0;
3975 }
3976 
3977 /*
3978  * Validate the port_id action.
3979  *
3980  * @param[in] dev
3981  *   Pointer to rte_eth_dev structure.
3982  * @param[in] action_flags
3983  *   Bit-fields that holds the actions detected until now.
3984  * @param[in] action
3985  *   Port_id RTE action structure.
3986  * @param[in] attr
3987  *   Attributes of flow that includes this action.
3988  * @param[out] error
3989  *   Pointer to error structure.
3990  *
3991  * @return
3992  *   0 on success, a negative errno value otherwise and rte_errno is set.
3993  */
3994 static int
flow_dv_validate_action_port_id(struct rte_eth_dev * dev,uint64_t action_flags,const struct rte_flow_action * action,const struct rte_flow_attr * attr,struct rte_flow_error * error)3995 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
3996 				uint64_t action_flags,
3997 				const struct rte_flow_action *action,
3998 				const struct rte_flow_attr *attr,
3999 				struct rte_flow_error *error)
4000 {
4001 	const struct rte_flow_action_port_id *port_id;
4002 	struct mlx5_priv *act_priv;
4003 	struct mlx5_priv *dev_priv;
4004 	uint16_t port;
4005 
4006 	if (!attr->transfer)
4007 		return rte_flow_error_set(error, ENOTSUP,
4008 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4009 					  NULL,
4010 					  "port id action is valid in transfer"
4011 					  " mode only");
4012 	if (!action || !action->conf)
4013 		return rte_flow_error_set(error, ENOTSUP,
4014 					  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4015 					  NULL,
4016 					  "port id action parameters must be"
4017 					  " specified");
4018 	if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4019 			    MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4020 		return rte_flow_error_set(error, EINVAL,
4021 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4022 					  "can have only one fate actions in"
4023 					  " a flow");
4024 	dev_priv = mlx5_dev_to_eswitch_info(dev);
4025 	if (!dev_priv)
4026 		return rte_flow_error_set(error, rte_errno,
4027 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4028 					  NULL,
4029 					  "failed to obtain E-Switch info");
4030 	port_id = action->conf;
4031 	port = port_id->original ? dev->data->port_id : port_id->id;
4032 	act_priv = mlx5_port_to_eswitch_info(port, false);
4033 	if (!act_priv)
4034 		return rte_flow_error_set
4035 				(error, rte_errno,
4036 				 RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4037 				 "failed to obtain E-Switch port id for port");
4038 	if (act_priv->domain_id != dev_priv->domain_id)
4039 		return rte_flow_error_set
4040 				(error, EINVAL,
4041 				 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4042 				 "port does not belong to"
4043 				 " E-Switch being configured");
4044 	return 0;
4045 }
4046 
4047 /**
4048  * Get the maximum number of modify header actions.
4049  *
4050  * @param dev
4051  *   Pointer to rte_eth_dev structure.
4052  * @param flags
4053  *   Flags bits to check if root level.
4054  *
4055  * @return
4056  *   Max number of modify header actions device can support.
4057  */
4058 static inline unsigned int
flow_dv_modify_hdr_action_max(struct rte_eth_dev * dev __rte_unused,uint64_t flags)4059 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4060 			      uint64_t flags)
4061 {
4062 	/*
4063 	 * There's no way to directly query the max capacity from FW.
4064 	 * The maximal value on root table should be assumed to be supported.
4065 	 */
4066 	if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4067 		return MLX5_MAX_MODIFY_NUM;
4068 	else
4069 		return MLX5_ROOT_TBL_MODIFY_NUM;
4070 }
4071 
4072 /**
4073  * Validate the meter action.
4074  *
4075  * @param[in] dev
4076  *   Pointer to rte_eth_dev structure.
4077  * @param[in] action_flags
4078  *   Bit-fields that holds the actions detected until now.
4079  * @param[in] action
4080  *   Pointer to the meter action.
4081  * @param[in] attr
4082  *   Attributes of flow that includes this action.
4083  * @param[out] error
4084  *   Pointer to error structure.
4085  *
4086  * @return
4087  *   0 on success, a negative errno value otherwise and rte_ernno is set.
4088  */
4089 static int
mlx5_flow_validate_action_meter(struct rte_eth_dev * dev,uint64_t action_flags,const struct rte_flow_action * action,const struct rte_flow_attr * attr,struct rte_flow_error * error)4090 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4091 				uint64_t action_flags,
4092 				const struct rte_flow_action *action,
4093 				const struct rte_flow_attr *attr,
4094 				struct rte_flow_error *error)
4095 {
4096 	struct mlx5_priv *priv = dev->data->dev_private;
4097 	const struct rte_flow_action_meter *am = action->conf;
4098 	struct mlx5_flow_meter *fm;
4099 
4100 	if (!am)
4101 		return rte_flow_error_set(error, EINVAL,
4102 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4103 					  "meter action conf is NULL");
4104 
4105 	if (action_flags & MLX5_FLOW_ACTION_METER)
4106 		return rte_flow_error_set(error, ENOTSUP,
4107 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4108 					  "meter chaining not support");
4109 	if (action_flags & MLX5_FLOW_ACTION_JUMP)
4110 		return rte_flow_error_set(error, ENOTSUP,
4111 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4112 					  "meter with jump not support");
4113 	if (!priv->mtr_en)
4114 		return rte_flow_error_set(error, ENOTSUP,
4115 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4116 					  NULL,
4117 					  "meter action not supported");
4118 	fm = mlx5_flow_meter_find(priv, am->mtr_id);
4119 	if (!fm)
4120 		return rte_flow_error_set(error, EINVAL,
4121 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4122 					  "Meter not found");
4123 	if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
4124 	      (!fm->ingress && !attr->ingress && attr->egress) ||
4125 	      (!fm->egress && !attr->egress && attr->ingress))))
4126 		return rte_flow_error_set(error, EINVAL,
4127 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4128 					  "Flow attributes are either invalid "
4129 					  "or have a conflict with current "
4130 					  "meter attributes");
4131 	return 0;
4132 }
4133 
4134 /**
4135  * Validate the age action.
4136  *
4137  * @param[in] action_flags
4138  *   Holds the actions detected until now.
4139  * @param[in] action
4140  *   Pointer to the age action.
4141  * @param[in] dev
4142  *   Pointer to the Ethernet device structure.
4143  * @param[out] error
4144  *   Pointer to error structure.
4145  *
4146  * @return
4147  *   0 on success, a negative errno value otherwise and rte_errno is set.
4148  */
4149 static int
flow_dv_validate_action_age(uint64_t action_flags,const struct rte_flow_action * action,struct rte_eth_dev * dev,struct rte_flow_error * error)4150 flow_dv_validate_action_age(uint64_t action_flags,
4151 			    const struct rte_flow_action *action,
4152 			    struct rte_eth_dev *dev,
4153 			    struct rte_flow_error *error)
4154 {
4155 	struct mlx5_priv *priv = dev->data->dev_private;
4156 	const struct rte_flow_action_age *age = action->conf;
4157 
4158 	if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
4159 	    !priv->sh->aso_age_mng))
4160 		return rte_flow_error_set(error, ENOTSUP,
4161 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4162 					  NULL,
4163 					  "age action not supported");
4164 	if (!(action->conf))
4165 		return rte_flow_error_set(error, EINVAL,
4166 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
4167 					  "configuration cannot be null");
4168 	if (!(age->timeout))
4169 		return rte_flow_error_set(error, EINVAL,
4170 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
4171 					  "invalid timeout value 0");
4172 	if (action_flags & MLX5_FLOW_ACTION_AGE)
4173 		return rte_flow_error_set(error, EINVAL,
4174 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4175 					  "duplicate age actions set");
4176 	return 0;
4177 }
4178 
4179 /**
4180  * Validate the modify-header IPv4 DSCP actions.
4181  *
4182  * @param[in] action_flags
4183  *   Holds the actions detected until now.
4184  * @param[in] action
4185  *   Pointer to the modify action.
4186  * @param[in] item_flags
4187  *   Holds the items detected.
4188  * @param[out] error
4189  *   Pointer to error structure.
4190  *
4191  * @return
4192  *   0 on success, a negative errno value otherwise and rte_errno is set.
4193  */
4194 static int
flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,const struct rte_flow_action * action,const uint64_t item_flags,struct rte_flow_error * error)4195 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
4196 					 const struct rte_flow_action *action,
4197 					 const uint64_t item_flags,
4198 					 struct rte_flow_error *error)
4199 {
4200 	int ret = 0;
4201 
4202 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4203 	if (!ret) {
4204 		if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
4205 			return rte_flow_error_set(error, EINVAL,
4206 						  RTE_FLOW_ERROR_TYPE_ACTION,
4207 						  NULL,
4208 						  "no ipv4 item in pattern");
4209 	}
4210 	return ret;
4211 }
4212 
4213 /**
4214  * Validate the modify-header IPv6 DSCP actions.
4215  *
4216  * @param[in] action_flags
4217  *   Holds the actions detected until now.
4218  * @param[in] action
4219  *   Pointer to the modify action.
4220  * @param[in] item_flags
4221  *   Holds the items detected.
4222  * @param[out] error
4223  *   Pointer to error structure.
4224  *
4225  * @return
4226  *   0 on success, a negative errno value otherwise and rte_errno is set.
4227  */
4228 static int
flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,const struct rte_flow_action * action,const uint64_t item_flags,struct rte_flow_error * error)4229 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
4230 					 const struct rte_flow_action *action,
4231 					 const uint64_t item_flags,
4232 					 struct rte_flow_error *error)
4233 {
4234 	int ret = 0;
4235 
4236 	ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4237 	if (!ret) {
4238 		if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
4239 			return rte_flow_error_set(error, EINVAL,
4240 						  RTE_FLOW_ERROR_TYPE_ACTION,
4241 						  NULL,
4242 						  "no ipv6 item in pattern");
4243 	}
4244 	return ret;
4245 }
4246 
4247 /**
4248  * Match modify-header resource.
4249  *
4250  * @param list
4251  *   Pointer to the hash list.
4252  * @param entry
4253  *   Pointer to exist resource entry object.
4254  * @param key
4255  *   Key of the new entry.
4256  * @param ctx
4257  *   Pointer to new modify-header resource.
4258  *
4259  * @return
4260  *   0 on matching, non-zero otherwise.
4261  */
4262 int
flow_dv_modify_match_cb(struct mlx5_hlist * list __rte_unused,struct mlx5_hlist_entry * entry,uint64_t key __rte_unused,void * cb_ctx)4263 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
4264 			struct mlx5_hlist_entry *entry,
4265 			uint64_t key __rte_unused, void *cb_ctx)
4266 {
4267 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4268 	struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4269 	struct mlx5_flow_dv_modify_hdr_resource *resource =
4270 			container_of(entry, typeof(*resource), entry);
4271 	uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4272 
4273 	key_len += ref->actions_num * sizeof(ref->actions[0]);
4274 	return ref->actions_num != resource->actions_num ||
4275 	       memcmp(&ref->ft_type, &resource->ft_type, key_len);
4276 }
4277 
4278 struct mlx5_hlist_entry *
flow_dv_modify_create_cb(struct mlx5_hlist * list,uint64_t key __rte_unused,void * cb_ctx)4279 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
4280 			 void *cb_ctx)
4281 {
4282 	struct mlx5_dev_ctx_shared *sh = list->ctx;
4283 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4284 	struct mlx5dv_dr_domain *ns;
4285 	struct mlx5_flow_dv_modify_hdr_resource *entry;
4286 	struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4287 	int ret;
4288 	uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
4289 	uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4290 
4291 	entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
4292 			    SOCKET_ID_ANY);
4293 	if (!entry) {
4294 		rte_flow_error_set(ctx->error, ENOMEM,
4295 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4296 				   "cannot allocate resource memory");
4297 		return NULL;
4298 	}
4299 	rte_memcpy(&entry->ft_type,
4300 		   RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
4301 		   key_len + data_len);
4302 	if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4303 		ns = sh->fdb_domain;
4304 	else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
4305 		ns = sh->tx_domain;
4306 	else
4307 		ns = sh->rx_domain;
4308 	ret = mlx5_flow_os_create_flow_action_modify_header
4309 					(sh->ctx, ns, entry,
4310 					 data_len, &entry->action);
4311 	if (ret) {
4312 		mlx5_free(entry);
4313 		rte_flow_error_set(ctx->error, ENOMEM,
4314 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4315 				   NULL, "cannot create modification action");
4316 		return NULL;
4317 	}
4318 	return &entry->entry;
4319 }
4320 
4321 /**
4322  * Validate the sample action.
4323  *
4324  * @param[in] action_flags
4325  *   Holds the actions detected until now.
4326  * @param[in] action
4327  *   Pointer to the sample action.
4328  * @param[in] dev
4329  *   Pointer to the Ethernet device structure.
4330  * @param[in] attr
4331  *   Attributes of flow that includes this action.
4332  * @param[out] error
4333  *   Pointer to error structure.
4334  *
4335  * @return
4336  *   0 on success, a negative errno value otherwise and rte_errno is set.
4337  */
4338 static int
flow_dv_validate_action_sample(uint64_t action_flags,const struct rte_flow_action * action,struct rte_eth_dev * dev,const struct rte_flow_attr * attr,struct rte_flow_error * error)4339 flow_dv_validate_action_sample(uint64_t action_flags,
4340 			       const struct rte_flow_action *action,
4341 			       struct rte_eth_dev *dev,
4342 			       const struct rte_flow_attr *attr,
4343 			       struct rte_flow_error *error)
4344 {
4345 	struct mlx5_priv *priv = dev->data->dev_private;
4346 	struct mlx5_dev_config *dev_conf = &priv->config;
4347 	const struct rte_flow_action_sample *sample = action->conf;
4348 	const struct rte_flow_action *act;
4349 	uint64_t sub_action_flags = 0;
4350 	uint16_t queue_index = 0xFFFF;
4351 	int actions_n = 0;
4352 	int ret;
4353 	fdb_mirror = 0;
4354 
4355 	if (!sample)
4356 		return rte_flow_error_set(error, EINVAL,
4357 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
4358 					  "configuration cannot be NULL");
4359 	if (sample->ratio == 0)
4360 		return rte_flow_error_set(error, EINVAL,
4361 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
4362 					  "ratio value starts from 1");
4363 	if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
4364 		return rte_flow_error_set(error, ENOTSUP,
4365 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4366 					  NULL,
4367 					  "sample action not supported");
4368 	if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
4369 		return rte_flow_error_set(error, EINVAL,
4370 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4371 					  "Multiple sample actions not "
4372 					  "supported");
4373 	if (action_flags & MLX5_FLOW_ACTION_METER)
4374 		return rte_flow_error_set(error, EINVAL,
4375 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
4376 					  "wrong action order, meter should "
4377 					  "be after sample action");
4378 	if (action_flags & MLX5_FLOW_ACTION_JUMP)
4379 		return rte_flow_error_set(error, EINVAL,
4380 					  RTE_FLOW_ERROR_TYPE_ACTION, action,
4381 					  "wrong action order, jump should "
4382 					  "be after sample action");
4383 	act = sample->actions;
4384 	for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
4385 		if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
4386 			return rte_flow_error_set(error, ENOTSUP,
4387 						  RTE_FLOW_ERROR_TYPE_ACTION,
4388 						  act, "too many actions");
4389 		switch (act->type) {
4390 		case RTE_FLOW_ACTION_TYPE_QUEUE:
4391 			ret = mlx5_flow_validate_action_queue(act,
4392 							      sub_action_flags,
4393 							      dev,
4394 							      attr, error);
4395 			if (ret < 0)
4396 				return ret;
4397 			queue_index = ((const struct rte_flow_action_queue *)
4398 							(act->conf))->index;
4399 			sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
4400 			++actions_n;
4401 			break;
4402 		case RTE_FLOW_ACTION_TYPE_MARK:
4403 			ret = flow_dv_validate_action_mark(dev, act,
4404 							   sub_action_flags,
4405 							   attr, error);
4406 			if (ret < 0)
4407 				return ret;
4408 			if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
4409 				sub_action_flags |= MLX5_FLOW_ACTION_MARK |
4410 						MLX5_FLOW_ACTION_MARK_EXT;
4411 			else
4412 				sub_action_flags |= MLX5_FLOW_ACTION_MARK;
4413 			++actions_n;
4414 			break;
4415 		case RTE_FLOW_ACTION_TYPE_COUNT:
4416 			ret = flow_dv_validate_action_count(dev, error);
4417 			if (ret < 0)
4418 				return ret;
4419 			sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
4420 			++actions_n;
4421 			break;
4422 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
4423 			ret = flow_dv_validate_action_port_id(dev,
4424 							      sub_action_flags,
4425 							      act,
4426 							      attr,
4427 							      error);
4428 			if (ret)
4429 				return ret;
4430 			sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4431 			++actions_n;
4432 			break;
4433 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4434 			ret = flow_dv_validate_action_raw_encap_decap
4435 				(dev, NULL, act->conf, attr, &sub_action_flags,
4436 				 &actions_n, error);
4437 			if (ret < 0)
4438 				return ret;
4439 			++actions_n;
4440 			break;
4441 		default:
4442 			return rte_flow_error_set(error, ENOTSUP,
4443 						  RTE_FLOW_ERROR_TYPE_ACTION,
4444 						  NULL,
4445 						  "Doesn't support optional "
4446 						  "action");
4447 		}
4448 	}
4449 	if (attr->ingress && !attr->transfer) {
4450 		if (!(sub_action_flags & MLX5_FLOW_ACTION_QUEUE))
4451 			return rte_flow_error_set(error, EINVAL,
4452 						  RTE_FLOW_ERROR_TYPE_ACTION,
4453 						  NULL,
4454 						  "Ingress must has a dest "
4455 						  "QUEUE for Sample");
4456 	} else if (attr->egress && !attr->transfer) {
4457 		return rte_flow_error_set(error, ENOTSUP,
4458 					  RTE_FLOW_ERROR_TYPE_ACTION,
4459 					  NULL,
4460 					  "Sample Only support Ingress "
4461 					  "or E-Switch");
4462 	} else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
4463 		MLX5_ASSERT(attr->transfer);
4464 		if (sample->ratio > 1)
4465 			return rte_flow_error_set(error, ENOTSUP,
4466 						  RTE_FLOW_ERROR_TYPE_ACTION,
4467 						  NULL,
4468 						  "E-Switch doesn't support "
4469 						  "any optional action "
4470 						  "for sampling");
4471 		fdb_mirror = 1;
4472 		if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
4473 			return rte_flow_error_set(error, ENOTSUP,
4474 						  RTE_FLOW_ERROR_TYPE_ACTION,
4475 						  NULL,
4476 						  "unsupported action QUEUE");
4477 		if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
4478 			return rte_flow_error_set(error, EINVAL,
4479 						  RTE_FLOW_ERROR_TYPE_ACTION,
4480 						  NULL,
4481 						  "E-Switch must has a dest "
4482 						  "port for mirroring");
4483 	}
4484 	/* Continue validation for Xcap actions.*/
4485 	if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
4486 	    (queue_index == 0xFFFF ||
4487 	     mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
4488 		if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
4489 		     MLX5_FLOW_XCAP_ACTIONS)
4490 			return rte_flow_error_set(error, ENOTSUP,
4491 						  RTE_FLOW_ERROR_TYPE_ACTION,
4492 						  NULL, "encap and decap "
4493 						  "combination aren't "
4494 						  "supported");
4495 		if (!attr->transfer && attr->ingress && (sub_action_flags &
4496 							MLX5_FLOW_ACTION_ENCAP))
4497 			return rte_flow_error_set(error, ENOTSUP,
4498 						  RTE_FLOW_ERROR_TYPE_ACTION,
4499 						  NULL, "encap is not supported"
4500 						  " for ingress traffic");
4501 	}
4502 	return 0;
4503 }
4504 
4505 /**
4506  * Find existing modify-header resource or create and register a new one.
4507  *
4508  * @param dev[in, out]
4509  *   Pointer to rte_eth_dev structure.
4510  * @param[in, out] resource
4511  *   Pointer to modify-header resource.
4512  * @parm[in, out] dev_flow
4513  *   Pointer to the dev_flow.
4514  * @param[out] error
4515  *   pointer to error structure.
4516  *
4517  * @return
4518  *   0 on success otherwise -errno and errno is set.
4519  */
4520 static int
flow_dv_modify_hdr_resource_register(struct rte_eth_dev * dev,struct mlx5_flow_dv_modify_hdr_resource * resource,struct mlx5_flow * dev_flow,struct rte_flow_error * error)4521 flow_dv_modify_hdr_resource_register
4522 			(struct rte_eth_dev *dev,
4523 			 struct mlx5_flow_dv_modify_hdr_resource *resource,
4524 			 struct mlx5_flow *dev_flow,
4525 			 struct rte_flow_error *error)
4526 {
4527 	struct mlx5_priv *priv = dev->data->dev_private;
4528 	struct mlx5_dev_ctx_shared *sh = priv->sh;
4529 	uint32_t key_len = sizeof(*resource) -
4530 			   offsetof(typeof(*resource), ft_type) +
4531 			   resource->actions_num * sizeof(resource->actions[0]);
4532 	struct mlx5_hlist_entry *entry;
4533 	struct mlx5_flow_cb_ctx ctx = {
4534 		.error = error,
4535 		.data = resource,
4536 	};
4537 
4538 	resource->flags = dev_flow->dv.group ? 0 :
4539 			  MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
4540 	if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
4541 				    resource->flags))
4542 		return rte_flow_error_set(error, EOVERFLOW,
4543 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4544 					  "too many modify header items");
4545 	resource->entry.key = __rte_raw_cksum(&resource->ft_type, key_len, 0);
4546 	entry = mlx5_hlist_register(sh->modify_cmds, resource->entry.key, &ctx);
4547 	if (!entry)
4548 		return -rte_errno;
4549 	resource = container_of(entry, typeof(*resource), entry);
4550 	dev_flow->handle->dvh.modify_hdr = resource;
4551 	return 0;
4552 }
4553 
4554 /**
4555  * Get DV flow counter by index.
4556  *
4557  * @param[in] dev
4558  *   Pointer to the Ethernet device structure.
4559  * @param[in] idx
4560  *   mlx5 flow counter index in the container.
4561  * @param[out] ppool
4562  *   mlx5 flow counter pool in the container,
4563  *
4564  * @return
4565  *   Pointer to the counter, NULL otherwise.
4566  */
4567 static struct mlx5_flow_counter *
flow_dv_counter_get_by_idx(struct rte_eth_dev * dev,uint32_t idx,struct mlx5_flow_counter_pool ** ppool)4568 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
4569 			   uint32_t idx,
4570 			   struct mlx5_flow_counter_pool **ppool)
4571 {
4572 	struct mlx5_priv *priv = dev->data->dev_private;
4573 	struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4574 	struct mlx5_flow_counter_pool *pool;
4575 
4576 	/* Decrease to original index and clear shared bit. */
4577 	idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
4578 	MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
4579 	pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
4580 	MLX5_ASSERT(pool);
4581 	if (ppool)
4582 		*ppool = pool;
4583 	return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
4584 }
4585 
4586 /**
4587  * Check the devx counter belongs to the pool.
4588  *
4589  * @param[in] pool
4590  *   Pointer to the counter pool.
4591  * @param[in] id
4592  *   The counter devx ID.
4593  *
4594  * @return
4595  *   True if counter belongs to the pool, false otherwise.
4596  */
4597 static bool
flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool * pool,int id)4598 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
4599 {
4600 	int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
4601 		   MLX5_COUNTERS_PER_POOL;
4602 
4603 	if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
4604 		return true;
4605 	return false;
4606 }
4607 
4608 /**
4609  * Get a pool by devx counter ID.
4610  *
4611  * @param[in] cmng
4612  *   Pointer to the counter management.
4613  * @param[in] id
4614  *   The counter devx ID.
4615  *
4616  * @return
4617  *   The counter pool pointer if exists, NULL otherwise,
4618  */
4619 static struct mlx5_flow_counter_pool *
flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng * cmng,int id)4620 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
4621 {
4622 	uint32_t i;
4623 	struct mlx5_flow_counter_pool *pool = NULL;
4624 
4625 	rte_spinlock_lock(&cmng->pool_update_sl);
4626 	/* Check last used pool. */
4627 	if (cmng->last_pool_idx != POOL_IDX_INVALID &&
4628 	    flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
4629 		pool = cmng->pools[cmng->last_pool_idx];
4630 		goto out;
4631 	}
4632 	/* ID out of range means no suitable pool in the container. */
4633 	if (id > cmng->max_id || id < cmng->min_id)
4634 		goto out;
4635 	/*
4636 	 * Find the pool from the end of the container, since mostly counter
4637 	 * ID is sequence increasing, and the last pool should be the needed
4638 	 * one.
4639 	 */
4640 	i = cmng->n_valid;
4641 	while (i--) {
4642 		struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
4643 
4644 		if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
4645 			pool = pool_tmp;
4646 			break;
4647 		}
4648 	}
4649 out:
4650 	rte_spinlock_unlock(&cmng->pool_update_sl);
4651 	return pool;
4652 }
4653 
4654 /**
4655  * Resize a counter container.
4656  *
4657  * @param[in] dev
4658  *   Pointer to the Ethernet device structure.
4659  *
4660  * @return
4661  *   0 on success, otherwise negative errno value and rte_errno is set.
4662  */
4663 static int
flow_dv_container_resize(struct rte_eth_dev * dev)4664 flow_dv_container_resize(struct rte_eth_dev *dev)
4665 {
4666 	struct mlx5_priv *priv = dev->data->dev_private;
4667 	struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4668 	void *old_pools = cmng->pools;
4669 	uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
4670 	uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4671 	void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
4672 
4673 	if (!pools) {
4674 		rte_errno = ENOMEM;
4675 		return -ENOMEM;
4676 	}
4677 	if (old_pools)
4678 		memcpy(pools, old_pools, cmng->n *
4679 				       sizeof(struct mlx5_flow_counter_pool *));
4680 	cmng->n = resize;
4681 	cmng->pools = pools;
4682 	if (old_pools)
4683 		mlx5_free(old_pools);
4684 	return 0;
4685 }
4686 
4687 /**
4688  * Query a devx flow counter.
4689  *
4690  * @param[in] dev
4691  *   Pointer to the Ethernet device structure.
4692  * @param[in] cnt
4693  *   Index to the flow counter.
4694  * @param[out] pkts
4695  *   The statistics value of packets.
4696  * @param[out] bytes
4697  *   The statistics value of bytes.
4698  *
4699  * @return
4700  *   0 on success, otherwise a negative errno value and rte_errno is set.
4701  */
4702 static inline int
_flow_dv_query_count(struct rte_eth_dev * dev,uint32_t counter,uint64_t * pkts,uint64_t * bytes)4703 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4704 		     uint64_t *bytes)
4705 {
4706 	struct mlx5_priv *priv = dev->data->dev_private;
4707 	struct mlx5_flow_counter_pool *pool = NULL;
4708 	struct mlx5_flow_counter *cnt;
4709 	int offset;
4710 
4711 	cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4712 	MLX5_ASSERT(pool);
4713 	if (priv->sh->cmng.counter_fallback)
4714 		return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
4715 					0, pkts, bytes, 0, NULL, NULL, 0);
4716 	rte_spinlock_lock(&pool->sl);
4717 	if (!pool->raw) {
4718 		*pkts = 0;
4719 		*bytes = 0;
4720 	} else {
4721 		offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
4722 		*pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4723 		*bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4724 	}
4725 	rte_spinlock_unlock(&pool->sl);
4726 	return 0;
4727 }
4728 
4729 /**
4730  * Create and initialize a new counter pool.
4731  *
4732  * @param[in] dev
4733  *   Pointer to the Ethernet device structure.
4734  * @param[out] dcs
4735  *   The devX counter handle.
4736  * @param[in] age
4737  *   Whether the pool is for counter that was allocated for aging.
4738  * @param[in/out] cont_cur
4739  *   Pointer to the container pointer, it will be update in pool resize.
4740  *
4741  * @return
4742  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
4743  */
4744 static struct mlx5_flow_counter_pool *
flow_dv_pool_create(struct rte_eth_dev * dev,struct mlx5_devx_obj * dcs,uint32_t age)4745 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4746 		    uint32_t age)
4747 {
4748 	struct mlx5_priv *priv = dev->data->dev_private;
4749 	struct mlx5_flow_counter_pool *pool;
4750 	struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4751 	bool fallback = priv->sh->cmng.counter_fallback;
4752 	uint32_t size = sizeof(*pool);
4753 
4754 	size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
4755 	size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
4756 	pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
4757 	if (!pool) {
4758 		rte_errno = ENOMEM;
4759 		return NULL;
4760 	}
4761 	pool->raw = NULL;
4762 	pool->is_aged = !!age;
4763 	pool->query_gen = 0;
4764 	pool->min_dcs = dcs;
4765 	rte_spinlock_init(&pool->sl);
4766 	rte_spinlock_init(&pool->csl);
4767 	TAILQ_INIT(&pool->counters[0]);
4768 	TAILQ_INIT(&pool->counters[1]);
4769 	pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
4770 	rte_spinlock_lock(&cmng->pool_update_sl);
4771 	pool->index = cmng->n_valid;
4772 	if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
4773 		mlx5_free(pool);
4774 		rte_spinlock_unlock(&cmng->pool_update_sl);
4775 		return NULL;
4776 	}
4777 	cmng->pools[pool->index] = pool;
4778 	cmng->n_valid++;
4779 	if (unlikely(fallback)) {
4780 		int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
4781 
4782 		if (base < cmng->min_id)
4783 			cmng->min_id = base;
4784 		if (base > cmng->max_id)
4785 			cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
4786 		cmng->last_pool_idx = pool->index;
4787 	}
4788 	rte_spinlock_unlock(&cmng->pool_update_sl);
4789 	return pool;
4790 }
4791 
4792 /**
4793  * Prepare a new counter and/or a new counter pool.
4794  *
4795  * @param[in] dev
4796  *   Pointer to the Ethernet device structure.
4797  * @param[out] cnt_free
4798  *   Where to put the pointer of a new counter.
4799  * @param[in] age
4800  *   Whether the pool is for counter that was allocated for aging.
4801  *
4802  * @return
4803  *   The counter pool pointer and @p cnt_free is set on success,
4804  *   NULL otherwise and rte_errno is set.
4805  */
4806 static struct mlx5_flow_counter_pool *
flow_dv_counter_pool_prepare(struct rte_eth_dev * dev,struct mlx5_flow_counter ** cnt_free,uint32_t age)4807 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4808 			     struct mlx5_flow_counter **cnt_free,
4809 			     uint32_t age)
4810 {
4811 	struct mlx5_priv *priv = dev->data->dev_private;
4812 	struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4813 	struct mlx5_flow_counter_pool *pool;
4814 	struct mlx5_counters tmp_tq;
4815 	struct mlx5_devx_obj *dcs = NULL;
4816 	struct mlx5_flow_counter *cnt;
4817 	enum mlx5_counter_type cnt_type =
4818 			age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4819 	bool fallback = priv->sh->cmng.counter_fallback;
4820 	uint32_t i;
4821 
4822 	if (fallback) {
4823 		/* bulk_bitmap must be 0 for single counter allocation. */
4824 		dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4825 		if (!dcs)
4826 			return NULL;
4827 		pool = flow_dv_find_pool_by_id(cmng, dcs->id);
4828 		if (!pool) {
4829 			pool = flow_dv_pool_create(dev, dcs, age);
4830 			if (!pool) {
4831 				mlx5_devx_cmd_destroy(dcs);
4832 				return NULL;
4833 			}
4834 		}
4835 		i = dcs->id % MLX5_COUNTERS_PER_POOL;
4836 		cnt = MLX5_POOL_GET_CNT(pool, i);
4837 		cnt->pool = pool;
4838 		cnt->dcs_when_free = dcs;
4839 		*cnt_free = cnt;
4840 		return pool;
4841 	}
4842 	dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4843 	if (!dcs) {
4844 		rte_errno = ENODATA;
4845 		return NULL;
4846 	}
4847 	pool = flow_dv_pool_create(dev, dcs, age);
4848 	if (!pool) {
4849 		mlx5_devx_cmd_destroy(dcs);
4850 		return NULL;
4851 	}
4852 	TAILQ_INIT(&tmp_tq);
4853 	for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
4854 		cnt = MLX5_POOL_GET_CNT(pool, i);
4855 		cnt->pool = pool;
4856 		TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
4857 	}
4858 	rte_spinlock_lock(&cmng->csl[cnt_type]);
4859 	TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
4860 	rte_spinlock_unlock(&cmng->csl[cnt_type]);
4861 	*cnt_free = MLX5_POOL_GET_CNT(pool, 0);
4862 	(*cnt_free)->pool = pool;
4863 	return pool;
4864 }
4865 
4866 /**
4867  * Allocate a flow counter.
4868  *
4869  * @param[in] dev
4870  *   Pointer to the Ethernet device structure.
4871  * @param[in] age
4872  *   Whether the counter was allocated for aging.
4873  *
4874  * @return
4875  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4876  */
4877 static uint32_t
flow_dv_counter_alloc(struct rte_eth_dev * dev,uint32_t age)4878 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
4879 {
4880 	struct mlx5_priv *priv = dev->data->dev_private;
4881 	struct mlx5_flow_counter_pool *pool = NULL;
4882 	struct mlx5_flow_counter *cnt_free = NULL;
4883 	bool fallback = priv->sh->cmng.counter_fallback;
4884 	struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4885 	enum mlx5_counter_type cnt_type =
4886 			age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4887 	uint32_t cnt_idx;
4888 
4889 	if (!priv->config.devx) {
4890 		rte_errno = ENOTSUP;
4891 		return 0;
4892 	}
4893 	/* Get free counters from container. */
4894 	rte_spinlock_lock(&cmng->csl[cnt_type]);
4895 	cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
4896 	if (cnt_free)
4897 		TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
4898 	rte_spinlock_unlock(&cmng->csl[cnt_type]);
4899 	if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
4900 		goto err;
4901 	pool = cnt_free->pool;
4902 	if (fallback)
4903 		cnt_free->dcs_when_active = cnt_free->dcs_when_free;
4904 	/* Create a DV counter action only in the first time usage. */
4905 	if (!cnt_free->action) {
4906 		uint16_t offset;
4907 		struct mlx5_devx_obj *dcs;
4908 		int ret;
4909 
4910 		if (!fallback) {
4911 			offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
4912 			dcs = pool->min_dcs;
4913 		} else {
4914 			offset = 0;
4915 			dcs = cnt_free->dcs_when_free;
4916 		}
4917 		ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
4918 							    &cnt_free->action);
4919 		if (ret) {
4920 			rte_errno = errno;
4921 			goto err;
4922 		}
4923 	}
4924 	cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
4925 				MLX5_CNT_ARRAY_IDX(pool, cnt_free));
4926 	/* Update the counter reset values. */
4927 	if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
4928 				 &cnt_free->bytes))
4929 		goto err;
4930 	if (!fallback && !priv->sh->cmng.query_thread_on)
4931 		/* Start the asynchronous batch query by the host thread. */
4932 		mlx5_set_query_alarm(priv->sh);
4933 	return cnt_idx;
4934 err:
4935 	if (cnt_free) {
4936 		cnt_free->pool = pool;
4937 		if (fallback)
4938 			cnt_free->dcs_when_free = cnt_free->dcs_when_active;
4939 		rte_spinlock_lock(&cmng->csl[cnt_type]);
4940 		TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
4941 		rte_spinlock_unlock(&cmng->csl[cnt_type]);
4942 	}
4943 	return 0;
4944 }
4945 
4946 /**
4947  * Allocate a shared flow counter.
4948  *
4949  * @param[in] ctx
4950  *   Pointer to the shared counter configuration.
4951  * @param[in] data
4952  *   Pointer to save the allocated counter index.
4953  *
4954  * @return
4955  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4956  */
4957 
4958 static int32_t
flow_dv_counter_alloc_shared_cb(void * ctx,union mlx5_l3t_data * data)4959 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
4960 {
4961 	struct mlx5_shared_counter_conf *conf = ctx;
4962 	struct rte_eth_dev *dev = conf->dev;
4963 	struct mlx5_flow_counter *cnt;
4964 
4965 	data->dword = flow_dv_counter_alloc(dev, 0);
4966 	data->dword |= MLX5_CNT_SHARED_OFFSET;
4967 	cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
4968 	cnt->shared_info.id = conf->id;
4969 	return 0;
4970 }
4971 
4972 /**
4973  * Get a shared flow counter.
4974  *
4975  * @param[in] dev
4976  *   Pointer to the Ethernet device structure.
4977  * @param[in] id
4978  *   Counter identifier.
4979  *
4980  * @return
4981  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4982  */
4983 static uint32_t
flow_dv_counter_get_shared(struct rte_eth_dev * dev,uint32_t id)4984 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
4985 {
4986 	struct mlx5_priv *priv = dev->data->dev_private;
4987 	struct mlx5_shared_counter_conf conf = {
4988 		.dev = dev,
4989 		.id = id,
4990 	};
4991 	union mlx5_l3t_data data = {
4992 		.dword = 0,
4993 	};
4994 
4995 	mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
4996 			       flow_dv_counter_alloc_shared_cb, &conf);
4997 	return data.dword;
4998 }
4999 
5000 /**
5001  * Get age param from counter index.
5002  *
5003  * @param[in] dev
5004  *   Pointer to the Ethernet device structure.
5005  * @param[in] counter
5006  *   Index to the counter handler.
5007  *
5008  * @return
5009  *   The aging parameter specified for the counter index.
5010  */
5011 static struct mlx5_age_param*
flow_dv_counter_idx_get_age(struct rte_eth_dev * dev,uint32_t counter)5012 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
5013 				uint32_t counter)
5014 {
5015 	struct mlx5_flow_counter *cnt;
5016 	struct mlx5_flow_counter_pool *pool = NULL;
5017 
5018 	flow_dv_counter_get_by_idx(dev, counter, &pool);
5019 	counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
5020 	cnt = MLX5_POOL_GET_CNT(pool, counter);
5021 	return MLX5_CNT_TO_AGE(cnt);
5022 }
5023 
5024 /**
5025  * Remove a flow counter from aged counter list.
5026  *
5027  * @param[in] dev
5028  *   Pointer to the Ethernet device structure.
5029  * @param[in] counter
5030  *   Index to the counter handler.
5031  * @param[in] cnt
5032  *   Pointer to the counter handler.
5033  */
5034 static void
flow_dv_counter_remove_from_age(struct rte_eth_dev * dev,uint32_t counter,struct mlx5_flow_counter * cnt)5035 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5036 				uint32_t counter, struct mlx5_flow_counter *cnt)
5037 {
5038 	struct mlx5_age_info *age_info;
5039 	struct mlx5_age_param *age_param;
5040 	struct mlx5_priv *priv = dev->data->dev_private;
5041 	uint16_t expected = AGE_CANDIDATE;
5042 
5043 	age_info = GET_PORT_AGE_INFO(priv);
5044 	age_param = flow_dv_counter_idx_get_age(dev, counter);
5045 	if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5046 					 AGE_FREE, false, __ATOMIC_RELAXED,
5047 					 __ATOMIC_RELAXED)) {
5048 		/**
5049 		 * We need the lock even it is age timeout,
5050 		 * since counter may still in process.
5051 		 */
5052 		rte_spinlock_lock(&age_info->aged_sl);
5053 		TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5054 		rte_spinlock_unlock(&age_info->aged_sl);
5055 		__atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5056 	}
5057 }
5058 
5059 /**
5060  * Release a flow counter.
5061  *
5062  * @param[in] dev
5063  *   Pointer to the Ethernet device structure.
5064  * @param[in] counter
5065  *   Index to the counter handler.
5066  */
5067 static void
flow_dv_counter_free(struct rte_eth_dev * dev,uint32_t counter)5068 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
5069 {
5070 	struct mlx5_priv *priv = dev->data->dev_private;
5071 	struct mlx5_flow_counter_pool *pool = NULL;
5072 	struct mlx5_flow_counter *cnt;
5073 	enum mlx5_counter_type cnt_type;
5074 
5075 	if (!counter)
5076 		return;
5077 	cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5078 	MLX5_ASSERT(pool);
5079 	if (IS_SHARED_CNT(counter) &&
5080 	    mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5081 		return;
5082 	if (pool->is_aged)
5083 		flow_dv_counter_remove_from_age(dev, counter, cnt);
5084 	cnt->pool = pool;
5085 	/*
5086 	 * Put the counter back to list to be updated in none fallback mode.
5087 	 * Currently, we are using two list alternately, while one is in query,
5088 	 * add the freed counter to the other list based on the pool query_gen
5089 	 * value. After query finishes, add counter the list to the global
5090 	 * container counter list. The list changes while query starts. In
5091 	 * this case, lock will not be needed as query callback and release
5092 	 * function both operate with the different list.
5093 	 *
5094 	 */
5095 	if (!priv->sh->cmng.counter_fallback) {
5096 		rte_spinlock_lock(&pool->csl);
5097 		TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
5098 		rte_spinlock_unlock(&pool->csl);
5099 	} else {
5100 		cnt->dcs_when_free = cnt->dcs_when_active;
5101 		cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
5102 					   MLX5_COUNTER_TYPE_ORIGIN;
5103 		rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
5104 		TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
5105 				  cnt, next);
5106 		rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
5107 	}
5108 }
5109 
5110 /**
5111  * Verify the @p attributes will be correctly understood by the NIC and store
5112  * them in the @p flow if everything is correct.
5113  *
5114  * @param[in] dev
5115  *   Pointer to dev struct.
5116  * @param[in] attributes
5117  *   Pointer to flow attributes
5118  * @param[in] external
5119  *   This flow rule is created by request external to PMD.
5120  * @param[out] error
5121  *   Pointer to error structure.
5122  *
5123  * @return
5124  *   - 0 on success and non root table.
5125  *   - 1 on success and root table.
5126  *   - a negative errno value otherwise and rte_errno is set.
5127  */
5128 static int
flow_dv_validate_attributes(struct rte_eth_dev * dev,const struct mlx5_flow_tunnel * tunnel,const struct rte_flow_attr * attributes,const struct flow_grp_info * grp_info,struct rte_flow_error * error)5129 flow_dv_validate_attributes(struct rte_eth_dev *dev,
5130 			    const struct mlx5_flow_tunnel *tunnel,
5131 			    const struct rte_flow_attr *attributes,
5132 			    const struct flow_grp_info *grp_info,
5133 			    struct rte_flow_error *error)
5134 {
5135 	struct mlx5_priv *priv = dev->data->dev_private;
5136 	uint32_t priority_max = priv->config.flow_prio - 1;
5137 	int ret = 0;
5138 
5139 #ifndef HAVE_MLX5DV_DR
5140 	RTE_SET_USED(tunnel);
5141 	RTE_SET_USED(grp_info);
5142 	if (attributes->group)
5143 		return rte_flow_error_set(error, ENOTSUP,
5144 					  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5145 					  NULL,
5146 					  "groups are not supported");
5147 #else
5148 	uint32_t table = 0;
5149 
5150 	ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
5151 				       grp_info, error);
5152 	if (ret)
5153 		return ret;
5154 	if (!table)
5155 		ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5156 #endif
5157 	if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
5158 	    attributes->priority >= priority_max)
5159 		return rte_flow_error_set(error, ENOTSUP,
5160 					  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
5161 					  NULL,
5162 					  "priority out of range");
5163 	if (attributes->transfer) {
5164 		if (!priv->config.dv_esw_en)
5165 			return rte_flow_error_set
5166 				(error, ENOTSUP,
5167 				 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5168 				 "E-Switch dr is not supported");
5169 		if (!(priv->representor || priv->master))
5170 			return rte_flow_error_set
5171 				(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5172 				 NULL, "E-Switch configuration can only be"
5173 				 " done by a master or a representor device");
5174 		if (attributes->egress)
5175 			return rte_flow_error_set
5176 				(error, ENOTSUP,
5177 				 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
5178 				 "egress is not supported");
5179 	}
5180 	if (!(attributes->egress ^ attributes->ingress))
5181 		return rte_flow_error_set(error, ENOTSUP,
5182 					  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
5183 					  "must specify exactly one of "
5184 					  "ingress or egress");
5185 	return ret;
5186 }
5187 
5188 /**
5189  * Internal validation function. For validating both actions and items.
5190  *
5191  * @param[in] dev
5192  *   Pointer to the rte_eth_dev structure.
5193  * @param[in] attr
5194  *   Pointer to the flow attributes.
5195  * @param[in] items
5196  *   Pointer to the list of items.
5197  * @param[in] actions
5198  *   Pointer to the list of actions.
5199  * @param[in] external
5200  *   This flow rule is created by request external to PMD.
5201  * @param[in] hairpin
5202  *   Number of hairpin TX actions, 0 means classic flow.
5203  * @param[out] error
5204  *   Pointer to the error structure.
5205  *
5206  * @return
5207  *   0 on success, a negative errno value otherwise and rte_errno is set.
5208  */
5209 static int
flow_dv_validate(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item items[],const struct rte_flow_action actions[],bool external,int hairpin,struct rte_flow_error * error)5210 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
5211 		 const struct rte_flow_item items[],
5212 		 const struct rte_flow_action actions[],
5213 		 bool external, int hairpin, struct rte_flow_error *error)
5214 {
5215 	int ret;
5216 	uint64_t action_flags = 0;
5217 	uint64_t item_flags = 0;
5218 	uint64_t last_item = 0;
5219 	uint8_t next_protocol = 0xff;
5220 	uint16_t ether_type = 0;
5221 	int actions_n = 0;
5222 	uint8_t item_ipv6_proto = 0;
5223 	const struct rte_flow_item *gre_item = NULL;
5224 	const struct rte_flow_action_raw_decap *decap;
5225 	const struct rte_flow_action_raw_encap *encap;
5226 	const struct rte_flow_action_rss *rss;
5227 	const struct rte_flow_item_tcp nic_tcp_mask = {
5228 		.hdr = {
5229 			.tcp_flags = 0xFF,
5230 			.src_port = RTE_BE16(UINT16_MAX),
5231 			.dst_port = RTE_BE16(UINT16_MAX),
5232 		}
5233 	};
5234 	const struct rte_flow_item_ipv6 nic_ipv6_mask = {
5235 		.hdr = {
5236 			.src_addr =
5237 			"\xff\xff\xff\xff\xff\xff\xff\xff"
5238 			"\xff\xff\xff\xff\xff\xff\xff\xff",
5239 			.dst_addr =
5240 			"\xff\xff\xff\xff\xff\xff\xff\xff"
5241 			"\xff\xff\xff\xff\xff\xff\xff\xff",
5242 			.vtc_flow = RTE_BE32(0xffffffff),
5243 			.proto = 0xff,
5244 			.hop_limits = 0xff,
5245 		},
5246 		.has_frag_ext = 1,
5247 	};
5248 	const struct rte_flow_item_ecpri nic_ecpri_mask = {
5249 		.hdr = {
5250 			.common = {
5251 				.u32 =
5252 				RTE_BE32(((const struct rte_ecpri_common_hdr) {
5253 					.type = 0xFF,
5254 					}).u32),
5255 			},
5256 			.dummy[0] = 0xffffffff,
5257 		},
5258 	};
5259 	struct mlx5_priv *priv = dev->data->dev_private;
5260 	struct mlx5_dev_config *dev_conf = &priv->config;
5261 	uint16_t queue_index = 0xFFFF;
5262 	const struct rte_flow_item_vlan *vlan_m = NULL;
5263 	int16_t rw_act_num = 0;
5264 	uint64_t is_root;
5265 	const struct mlx5_flow_tunnel *tunnel;
5266 	struct flow_grp_info grp_info = {
5267 		.external = !!external,
5268 		.transfer = !!attr->transfer,
5269 		.fdb_def_rule = !!priv->fdb_def_rule,
5270 	};
5271 	const struct rte_eth_hairpin_conf *conf;
5272 
5273 	if (items == NULL)
5274 		return -1;
5275 	if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
5276 		tunnel = flow_items_to_tunnel(items);
5277 		action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
5278 				MLX5_FLOW_ACTION_DECAP;
5279 	} else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
5280 		tunnel = flow_actions_to_tunnel(actions);
5281 		action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
5282 	} else {
5283 		tunnel = NULL;
5284 	}
5285 	grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
5286 				(dev, tunnel, attr, items, actions);
5287 	ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
5288 	if (ret < 0)
5289 		return ret;
5290 	is_root = (uint64_t)ret;
5291 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5292 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5293 		int type = items->type;
5294 
5295 		if (!mlx5_flow_os_item_supported(type))
5296 			return rte_flow_error_set(error, ENOTSUP,
5297 						  RTE_FLOW_ERROR_TYPE_ITEM,
5298 						  NULL, "item not supported");
5299 		switch (type) {
5300 		case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
5301 			if (items[0].type != (typeof(items[0].type))
5302 						MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
5303 				return rte_flow_error_set
5304 						(error, EINVAL,
5305 						RTE_FLOW_ERROR_TYPE_ITEM,
5306 						NULL, "MLX5 private items "
5307 						"must be the first");
5308 			break;
5309 		case RTE_FLOW_ITEM_TYPE_VOID:
5310 			break;
5311 		case RTE_FLOW_ITEM_TYPE_PORT_ID:
5312 			ret = flow_dv_validate_item_port_id
5313 					(dev, items, attr, item_flags, error);
5314 			if (ret < 0)
5315 				return ret;
5316 			last_item = MLX5_FLOW_ITEM_PORT_ID;
5317 			break;
5318 		case RTE_FLOW_ITEM_TYPE_ETH:
5319 			ret = mlx5_flow_validate_item_eth(items, item_flags,
5320 							  true, error);
5321 			if (ret < 0)
5322 				return ret;
5323 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5324 					     MLX5_FLOW_LAYER_OUTER_L2;
5325 			if (items->mask != NULL && items->spec != NULL) {
5326 				ether_type =
5327 					((const struct rte_flow_item_eth *)
5328 					 items->spec)->type;
5329 				ether_type &=
5330 					((const struct rte_flow_item_eth *)
5331 					 items->mask)->type;
5332 				ether_type = rte_be_to_cpu_16(ether_type);
5333 			} else {
5334 				ether_type = 0;
5335 			}
5336 			break;
5337 		case RTE_FLOW_ITEM_TYPE_VLAN:
5338 			ret = flow_dv_validate_item_vlan(items, item_flags,
5339 							 dev, error);
5340 			if (ret < 0)
5341 				return ret;
5342 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
5343 					     MLX5_FLOW_LAYER_OUTER_VLAN;
5344 			if (items->mask != NULL && items->spec != NULL) {
5345 				ether_type =
5346 					((const struct rte_flow_item_vlan *)
5347 					 items->spec)->inner_type;
5348 				ether_type &=
5349 					((const struct rte_flow_item_vlan *)
5350 					 items->mask)->inner_type;
5351 				ether_type = rte_be_to_cpu_16(ether_type);
5352 			} else {
5353 				ether_type = 0;
5354 			}
5355 			/* Store outer VLAN mask for of_push_vlan action. */
5356 			if (!tunnel)
5357 				vlan_m = items->mask;
5358 			break;
5359 		case RTE_FLOW_ITEM_TYPE_IPV4:
5360 			mlx5_flow_tunnel_ip_check(items, next_protocol,
5361 						  &item_flags, &tunnel);
5362 			ret = flow_dv_validate_item_ipv4(items, item_flags,
5363 							 last_item, ether_type,
5364 							 error);
5365 			if (ret < 0)
5366 				return ret;
5367 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5368 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5369 			if (items->mask != NULL &&
5370 			    ((const struct rte_flow_item_ipv4 *)
5371 			     items->mask)->hdr.next_proto_id) {
5372 				next_protocol =
5373 					((const struct rte_flow_item_ipv4 *)
5374 					 (items->spec))->hdr.next_proto_id;
5375 				next_protocol &=
5376 					((const struct rte_flow_item_ipv4 *)
5377 					 (items->mask))->hdr.next_proto_id;
5378 			} else {
5379 				/* Reset for inner layer. */
5380 				next_protocol = 0xff;
5381 			}
5382 			break;
5383 		case RTE_FLOW_ITEM_TYPE_IPV6:
5384 			mlx5_flow_tunnel_ip_check(items, next_protocol,
5385 						  &item_flags, &tunnel);
5386 			ret = mlx5_flow_validate_item_ipv6(items, item_flags,
5387 							   last_item,
5388 							   ether_type,
5389 							   &nic_ipv6_mask,
5390 							   error);
5391 			if (ret < 0)
5392 				return ret;
5393 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5394 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5395 			if (items->mask != NULL &&
5396 			    ((const struct rte_flow_item_ipv6 *)
5397 			     items->mask)->hdr.proto) {
5398 				item_ipv6_proto =
5399 					((const struct rte_flow_item_ipv6 *)
5400 					 items->spec)->hdr.proto;
5401 				next_protocol =
5402 					((const struct rte_flow_item_ipv6 *)
5403 					 items->spec)->hdr.proto;
5404 				next_protocol &=
5405 					((const struct rte_flow_item_ipv6 *)
5406 					 items->mask)->hdr.proto;
5407 			} else {
5408 				/* Reset for inner layer. */
5409 				next_protocol = 0xff;
5410 			}
5411 			break;
5412 		case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
5413 			ret = flow_dv_validate_item_ipv6_frag_ext(items,
5414 								  item_flags,
5415 								  error);
5416 			if (ret < 0)
5417 				return ret;
5418 			last_item = tunnel ?
5419 					MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
5420 					MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
5421 			if (items->mask != NULL &&
5422 			    ((const struct rte_flow_item_ipv6_frag_ext *)
5423 			     items->mask)->hdr.next_header) {
5424 				next_protocol =
5425 				((const struct rte_flow_item_ipv6_frag_ext *)
5426 				 items->spec)->hdr.next_header;
5427 				next_protocol &=
5428 				((const struct rte_flow_item_ipv6_frag_ext *)
5429 				 items->mask)->hdr.next_header;
5430 			} else {
5431 				/* Reset for inner layer. */
5432 				next_protocol = 0xff;
5433 			}
5434 			break;
5435 		case RTE_FLOW_ITEM_TYPE_TCP:
5436 			ret = mlx5_flow_validate_item_tcp
5437 						(items, item_flags,
5438 						 next_protocol,
5439 						 &nic_tcp_mask,
5440 						 error);
5441 			if (ret < 0)
5442 				return ret;
5443 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5444 					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
5445 			break;
5446 		case RTE_FLOW_ITEM_TYPE_UDP:
5447 			ret = mlx5_flow_validate_item_udp(items, item_flags,
5448 							  next_protocol,
5449 							  error);
5450 			if (ret < 0)
5451 				return ret;
5452 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5453 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
5454 			break;
5455 		case RTE_FLOW_ITEM_TYPE_GRE:
5456 			ret = mlx5_flow_validate_item_gre(items, item_flags,
5457 							  next_protocol, error);
5458 			if (ret < 0)
5459 				return ret;
5460 			gre_item = items;
5461 			last_item = MLX5_FLOW_LAYER_GRE;
5462 			break;
5463 		case RTE_FLOW_ITEM_TYPE_NVGRE:
5464 			ret = mlx5_flow_validate_item_nvgre(items, item_flags,
5465 							    next_protocol,
5466 							    error);
5467 			if (ret < 0)
5468 				return ret;
5469 			last_item = MLX5_FLOW_LAYER_NVGRE;
5470 			break;
5471 		case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5472 			ret = mlx5_flow_validate_item_gre_key
5473 				(items, item_flags, gre_item, error);
5474 			if (ret < 0)
5475 				return ret;
5476 			last_item = MLX5_FLOW_LAYER_GRE_KEY;
5477 			break;
5478 		case RTE_FLOW_ITEM_TYPE_VXLAN:
5479 			ret = mlx5_flow_validate_item_vxlan(items, item_flags,
5480 							    error);
5481 			if (ret < 0)
5482 				return ret;
5483 			last_item = MLX5_FLOW_LAYER_VXLAN;
5484 			break;
5485 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5486 			ret = mlx5_flow_validate_item_vxlan_gpe(items,
5487 								item_flags, dev,
5488 								error);
5489 			if (ret < 0)
5490 				return ret;
5491 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5492 			break;
5493 		case RTE_FLOW_ITEM_TYPE_GENEVE:
5494 			ret = mlx5_flow_validate_item_geneve(items,
5495 							     item_flags, dev,
5496 							     error);
5497 			if (ret < 0)
5498 				return ret;
5499 			last_item = MLX5_FLOW_LAYER_GENEVE;
5500 			break;
5501 		case RTE_FLOW_ITEM_TYPE_MPLS:
5502 			ret = mlx5_flow_validate_item_mpls(dev, items,
5503 							   item_flags,
5504 							   last_item, error);
5505 			if (ret < 0)
5506 				return ret;
5507 			last_item = MLX5_FLOW_LAYER_MPLS;
5508 			break;
5509 
5510 		case RTE_FLOW_ITEM_TYPE_MARK:
5511 			ret = flow_dv_validate_item_mark(dev, items, attr,
5512 							 error);
5513 			if (ret < 0)
5514 				return ret;
5515 			last_item = MLX5_FLOW_ITEM_MARK;
5516 			break;
5517 		case RTE_FLOW_ITEM_TYPE_META:
5518 			ret = flow_dv_validate_item_meta(dev, items, attr,
5519 							 error);
5520 			if (ret < 0)
5521 				return ret;
5522 			last_item = MLX5_FLOW_ITEM_METADATA;
5523 			break;
5524 		case RTE_FLOW_ITEM_TYPE_ICMP:
5525 			ret = mlx5_flow_validate_item_icmp(items, item_flags,
5526 							   next_protocol,
5527 							   error);
5528 			if (ret < 0)
5529 				return ret;
5530 			last_item = MLX5_FLOW_LAYER_ICMP;
5531 			break;
5532 		case RTE_FLOW_ITEM_TYPE_ICMP6:
5533 			ret = mlx5_flow_validate_item_icmp6(items, item_flags,
5534 							    next_protocol,
5535 							    error);
5536 			if (ret < 0)
5537 				return ret;
5538 			item_ipv6_proto = IPPROTO_ICMPV6;
5539 			last_item = MLX5_FLOW_LAYER_ICMP6;
5540 			break;
5541 		case RTE_FLOW_ITEM_TYPE_TAG:
5542 			ret = flow_dv_validate_item_tag(dev, items,
5543 							attr, error);
5544 			if (ret < 0)
5545 				return ret;
5546 			last_item = MLX5_FLOW_ITEM_TAG;
5547 			break;
5548 		case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
5549 		case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
5550 			break;
5551 		case RTE_FLOW_ITEM_TYPE_GTP:
5552 			ret = flow_dv_validate_item_gtp(dev, items, item_flags,
5553 							error);
5554 			if (ret < 0)
5555 				return ret;
5556 			last_item = MLX5_FLOW_LAYER_GTP;
5557 			break;
5558 		case RTE_FLOW_ITEM_TYPE_ECPRI:
5559 			/* Capacity will be checked in the translate stage. */
5560 			ret = mlx5_flow_validate_item_ecpri(items, item_flags,
5561 							    last_item,
5562 							    ether_type,
5563 							    &nic_ecpri_mask,
5564 							    error);
5565 			if (ret < 0)
5566 				return ret;
5567 			last_item = MLX5_FLOW_LAYER_ECPRI;
5568 			break;
5569 		default:
5570 			return rte_flow_error_set(error, ENOTSUP,
5571 						  RTE_FLOW_ERROR_TYPE_ITEM,
5572 						  NULL, "item not supported");
5573 		}
5574 		item_flags |= last_item;
5575 	}
5576 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5577 		int type = actions->type;
5578 
5579 		if (!mlx5_flow_os_action_supported(type))
5580 			return rte_flow_error_set(error, ENOTSUP,
5581 						  RTE_FLOW_ERROR_TYPE_ACTION,
5582 						  actions,
5583 						  "action not supported");
5584 		if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5585 			return rte_flow_error_set(error, ENOTSUP,
5586 						  RTE_FLOW_ERROR_TYPE_ACTION,
5587 						  actions, "too many actions");
5588 		switch (type) {
5589 		case RTE_FLOW_ACTION_TYPE_VOID:
5590 			break;
5591 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
5592 			ret = flow_dv_validate_action_port_id(dev,
5593 							      action_flags,
5594 							      actions,
5595 							      attr,
5596 							      error);
5597 			if (ret)
5598 				return ret;
5599 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5600 			++actions_n;
5601 			break;
5602 		case RTE_FLOW_ACTION_TYPE_FLAG:
5603 			ret = flow_dv_validate_action_flag(dev, action_flags,
5604 							   attr, error);
5605 			if (ret < 0)
5606 				return ret;
5607 			if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5608 				/* Count all modify-header actions as one. */
5609 				if (!(action_flags &
5610 				      MLX5_FLOW_MODIFY_HDR_ACTIONS))
5611 					++actions_n;
5612 				action_flags |= MLX5_FLOW_ACTION_FLAG |
5613 						MLX5_FLOW_ACTION_MARK_EXT;
5614 			} else {
5615 				action_flags |= MLX5_FLOW_ACTION_FLAG;
5616 				++actions_n;
5617 			}
5618 			rw_act_num += MLX5_ACT_NUM_SET_MARK;
5619 			break;
5620 		case RTE_FLOW_ACTION_TYPE_MARK:
5621 			ret = flow_dv_validate_action_mark(dev, actions,
5622 							   action_flags,
5623 							   attr, error);
5624 			if (ret < 0)
5625 				return ret;
5626 			if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5627 				/* Count all modify-header actions as one. */
5628 				if (!(action_flags &
5629 				      MLX5_FLOW_MODIFY_HDR_ACTIONS))
5630 					++actions_n;
5631 				action_flags |= MLX5_FLOW_ACTION_MARK |
5632 						MLX5_FLOW_ACTION_MARK_EXT;
5633 			} else {
5634 				action_flags |= MLX5_FLOW_ACTION_MARK;
5635 				++actions_n;
5636 			}
5637 			rw_act_num += MLX5_ACT_NUM_SET_MARK;
5638 			break;
5639 		case RTE_FLOW_ACTION_TYPE_SET_META:
5640 			ret = flow_dv_validate_action_set_meta(dev, actions,
5641 							       action_flags,
5642 							       attr, error);
5643 			if (ret < 0)
5644 				return ret;
5645 			/* Count all modify-header actions as one action. */
5646 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5647 				++actions_n;
5648 			action_flags |= MLX5_FLOW_ACTION_SET_META;
5649 			rw_act_num += MLX5_ACT_NUM_SET_META;
5650 			break;
5651 		case RTE_FLOW_ACTION_TYPE_SET_TAG:
5652 			ret = flow_dv_validate_action_set_tag(dev, actions,
5653 							      action_flags,
5654 							      attr, error);
5655 			if (ret < 0)
5656 				return ret;
5657 			/* Count all modify-header actions as one action. */
5658 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5659 				++actions_n;
5660 			action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5661 			rw_act_num += MLX5_ACT_NUM_SET_TAG;
5662 			break;
5663 		case RTE_FLOW_ACTION_TYPE_DROP:
5664 			ret = mlx5_flow_validate_action_drop(action_flags,
5665 							     attr, error);
5666 			if (ret < 0)
5667 				return ret;
5668 			action_flags |= MLX5_FLOW_ACTION_DROP;
5669 			++actions_n;
5670 			break;
5671 		case RTE_FLOW_ACTION_TYPE_QUEUE:
5672 			ret = mlx5_flow_validate_action_queue(actions,
5673 							      action_flags, dev,
5674 							      attr, error);
5675 			if (ret < 0)
5676 				return ret;
5677 			queue_index = ((const struct rte_flow_action_queue *)
5678 							(actions->conf))->index;
5679 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
5680 			++actions_n;
5681 			break;
5682 		case RTE_FLOW_ACTION_TYPE_RSS:
5683 			rss = actions->conf;
5684 			ret = mlx5_flow_validate_action_rss(actions,
5685 							    action_flags, dev,
5686 							    attr, item_flags,
5687 							    error);
5688 			if (ret < 0)
5689 				return ret;
5690 			if (rss != NULL && rss->queue_num)
5691 				queue_index = rss->queue[0];
5692 			action_flags |= MLX5_FLOW_ACTION_RSS;
5693 			++actions_n;
5694 			break;
5695 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5696 			ret =
5697 			mlx5_flow_validate_action_default_miss(action_flags,
5698 					attr, error);
5699 			if (ret < 0)
5700 				return ret;
5701 			action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5702 			++actions_n;
5703 			break;
5704 		case RTE_FLOW_ACTION_TYPE_COUNT:
5705 			ret = flow_dv_validate_action_count(dev, error);
5706 			if (ret < 0)
5707 				return ret;
5708 			action_flags |= MLX5_FLOW_ACTION_COUNT;
5709 			++actions_n;
5710 			break;
5711 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5712 			if (flow_dv_validate_action_pop_vlan(dev,
5713 							     action_flags,
5714 							     actions,
5715 							     item_flags, attr,
5716 							     error))
5717 				return -rte_errno;
5718 			action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5719 			++actions_n;
5720 			break;
5721 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5722 			ret = flow_dv_validate_action_push_vlan(dev,
5723 								action_flags,
5724 								vlan_m,
5725 								actions, attr,
5726 								error);
5727 			if (ret < 0)
5728 				return ret;
5729 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5730 			++actions_n;
5731 			break;
5732 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5733 			ret = flow_dv_validate_action_set_vlan_pcp
5734 						(action_flags, actions, error);
5735 			if (ret < 0)
5736 				return ret;
5737 			/* Count PCP with push_vlan command. */
5738 			action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
5739 			break;
5740 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5741 			ret = flow_dv_validate_action_set_vlan_vid
5742 						(item_flags, action_flags,
5743 						 actions, error);
5744 			if (ret < 0)
5745 				return ret;
5746 			/* Count VID with push_vlan command. */
5747 			action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5748 			rw_act_num += MLX5_ACT_NUM_MDF_VID;
5749 			break;
5750 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5751 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5752 			ret = flow_dv_validate_action_l2_encap(dev,
5753 							       action_flags,
5754 							       actions, attr,
5755 							       error);
5756 			if (ret < 0)
5757 				return ret;
5758 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
5759 			++actions_n;
5760 			break;
5761 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5762 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5763 			ret = flow_dv_validate_action_decap(dev, action_flags,
5764 							    attr, error);
5765 			if (ret < 0)
5766 				return ret;
5767 			action_flags |= MLX5_FLOW_ACTION_DECAP;
5768 			++actions_n;
5769 			break;
5770 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5771 			ret = flow_dv_validate_action_raw_encap_decap
5772 				(dev, NULL, actions->conf, attr, &action_flags,
5773 				 &actions_n, error);
5774 			if (ret < 0)
5775 				return ret;
5776 			break;
5777 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5778 			decap = actions->conf;
5779 			while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5780 				;
5781 			if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5782 				encap = NULL;
5783 				actions--;
5784 			} else {
5785 				encap = actions->conf;
5786 			}
5787 			ret = flow_dv_validate_action_raw_encap_decap
5788 					   (dev,
5789 					    decap ? decap : &empty_decap, encap,
5790 					    attr, &action_flags, &actions_n,
5791 					    error);
5792 			if (ret < 0)
5793 				return ret;
5794 			break;
5795 		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5796 		case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5797 			ret = flow_dv_validate_action_modify_mac(action_flags,
5798 								 actions,
5799 								 item_flags,
5800 								 error);
5801 			if (ret < 0)
5802 				return ret;
5803 			/* Count all modify-header actions as one action. */
5804 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5805 				++actions_n;
5806 			action_flags |= actions->type ==
5807 					RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5808 						MLX5_FLOW_ACTION_SET_MAC_SRC :
5809 						MLX5_FLOW_ACTION_SET_MAC_DST;
5810 			/*
5811 			 * Even if the source and destination MAC addresses have
5812 			 * overlap in the header with 4B alignment, the convert
5813 			 * function will handle them separately and 4 SW actions
5814 			 * will be created. And 2 actions will be added each
5815 			 * time no matter how many bytes of address will be set.
5816 			 */
5817 			rw_act_num += MLX5_ACT_NUM_MDF_MAC;
5818 			break;
5819 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5820 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5821 			ret = flow_dv_validate_action_modify_ipv4(action_flags,
5822 								  actions,
5823 								  item_flags,
5824 								  error);
5825 			if (ret < 0)
5826 				return ret;
5827 			/* Count all modify-header actions as one action. */
5828 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5829 				++actions_n;
5830 			action_flags |= actions->type ==
5831 					RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5832 						MLX5_FLOW_ACTION_SET_IPV4_SRC :
5833 						MLX5_FLOW_ACTION_SET_IPV4_DST;
5834 			rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
5835 			break;
5836 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5837 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5838 			ret = flow_dv_validate_action_modify_ipv6(action_flags,
5839 								  actions,
5840 								  item_flags,
5841 								  error);
5842 			if (ret < 0)
5843 				return ret;
5844 			if (item_ipv6_proto == IPPROTO_ICMPV6)
5845 				return rte_flow_error_set(error, ENOTSUP,
5846 					RTE_FLOW_ERROR_TYPE_ACTION,
5847 					actions,
5848 					"Can't change header "
5849 					"with ICMPv6 proto");
5850 			/* Count all modify-header actions as one action. */
5851 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5852 				++actions_n;
5853 			action_flags |= actions->type ==
5854 					RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5855 						MLX5_FLOW_ACTION_SET_IPV6_SRC :
5856 						MLX5_FLOW_ACTION_SET_IPV6_DST;
5857 			rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
5858 			break;
5859 		case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5860 		case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5861 			ret = flow_dv_validate_action_modify_tp(action_flags,
5862 								actions,
5863 								item_flags,
5864 								error);
5865 			if (ret < 0)
5866 				return ret;
5867 			/* Count all modify-header actions as one action. */
5868 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5869 				++actions_n;
5870 			action_flags |= actions->type ==
5871 					RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5872 						MLX5_FLOW_ACTION_SET_TP_SRC :
5873 						MLX5_FLOW_ACTION_SET_TP_DST;
5874 			rw_act_num += MLX5_ACT_NUM_MDF_PORT;
5875 			break;
5876 		case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5877 		case RTE_FLOW_ACTION_TYPE_SET_TTL:
5878 			ret = flow_dv_validate_action_modify_ttl(action_flags,
5879 								 actions,
5880 								 item_flags,
5881 								 error);
5882 			if (ret < 0)
5883 				return ret;
5884 			/* Count all modify-header actions as one action. */
5885 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5886 				++actions_n;
5887 			action_flags |= actions->type ==
5888 					RTE_FLOW_ACTION_TYPE_SET_TTL ?
5889 						MLX5_FLOW_ACTION_SET_TTL :
5890 						MLX5_FLOW_ACTION_DEC_TTL;
5891 			rw_act_num += MLX5_ACT_NUM_MDF_TTL;
5892 			break;
5893 		case RTE_FLOW_ACTION_TYPE_JUMP:
5894 			ret = flow_dv_validate_action_jump(dev, tunnel, actions,
5895 							   action_flags,
5896 							   attr, external,
5897 							   error);
5898 			if (ret)
5899 				return ret;
5900 			++actions_n;
5901 			action_flags |= MLX5_FLOW_ACTION_JUMP;
5902 			break;
5903 		case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5904 		case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5905 			ret = flow_dv_validate_action_modify_tcp_seq
5906 								(action_flags,
5907 								 actions,
5908 								 item_flags,
5909 								 error);
5910 			if (ret < 0)
5911 				return ret;
5912 			/* Count all modify-header actions as one action. */
5913 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5914 				++actions_n;
5915 			action_flags |= actions->type ==
5916 					RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5917 						MLX5_FLOW_ACTION_INC_TCP_SEQ :
5918 						MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5919 			rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
5920 			break;
5921 		case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5922 		case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5923 			ret = flow_dv_validate_action_modify_tcp_ack
5924 								(action_flags,
5925 								 actions,
5926 								 item_flags,
5927 								 error);
5928 			if (ret < 0)
5929 				return ret;
5930 			/* Count all modify-header actions as one action. */
5931 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5932 				++actions_n;
5933 			action_flags |= actions->type ==
5934 					RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5935 						MLX5_FLOW_ACTION_INC_TCP_ACK :
5936 						MLX5_FLOW_ACTION_DEC_TCP_ACK;
5937 			rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
5938 			break;
5939 		case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
5940 			break;
5941 		case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5942 		case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
5943 			rw_act_num += MLX5_ACT_NUM_SET_TAG;
5944 			break;
5945 		case RTE_FLOW_ACTION_TYPE_METER:
5946 			ret = mlx5_flow_validate_action_meter(dev,
5947 							      action_flags,
5948 							      actions, attr,
5949 							      error);
5950 			if (ret < 0)
5951 				return ret;
5952 			action_flags |= MLX5_FLOW_ACTION_METER;
5953 			++actions_n;
5954 			/* Meter action will add one more TAG action. */
5955 			rw_act_num += MLX5_ACT_NUM_SET_TAG;
5956 			break;
5957 		case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
5958 			if (!attr->group)
5959 				return rte_flow_error_set(error, ENOTSUP,
5960 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5961 									   NULL,
5962 			  "Shared ASO age action is not supported for group 0");
5963 			action_flags |= MLX5_FLOW_ACTION_AGE;
5964 			++actions_n;
5965 			break;
5966 		case RTE_FLOW_ACTION_TYPE_AGE:
5967 			ret = flow_dv_validate_action_age(action_flags,
5968 							  actions, dev,
5969 							  error);
5970 			if (ret < 0)
5971 				return ret;
5972 			action_flags |= MLX5_FLOW_ACTION_AGE;
5973 			++actions_n;
5974 			break;
5975 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5976 			ret = flow_dv_validate_action_modify_ipv4_dscp
5977 							 (action_flags,
5978 							  actions,
5979 							  item_flags,
5980 							  error);
5981 			if (ret < 0)
5982 				return ret;
5983 			/* Count all modify-header actions as one action. */
5984 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5985 				++actions_n;
5986 			action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
5987 			rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5988 			break;
5989 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5990 			ret = flow_dv_validate_action_modify_ipv6_dscp
5991 								(action_flags,
5992 								 actions,
5993 								 item_flags,
5994 								 error);
5995 			if (ret < 0)
5996 				return ret;
5997 			/* Count all modify-header actions as one action. */
5998 			if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5999 				++actions_n;
6000 			action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
6001 			rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6002 			break;
6003 		case RTE_FLOW_ACTION_TYPE_SAMPLE:
6004 			ret = flow_dv_validate_action_sample(action_flags,
6005 							     actions, dev,
6006 							     attr, error);
6007 			if (ret < 0)
6008 				return ret;
6009 			action_flags |= MLX5_FLOW_ACTION_SAMPLE;
6010 			++actions_n;
6011 			break;
6012 		case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
6013 			if (actions[0].type != (typeof(actions[0].type))
6014 				MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
6015 				return rte_flow_error_set
6016 						(error, EINVAL,
6017 						RTE_FLOW_ERROR_TYPE_ACTION,
6018 						NULL, "MLX5 private action "
6019 						"must be the first");
6020 
6021 			action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6022 			break;
6023 		default:
6024 			return rte_flow_error_set(error, ENOTSUP,
6025 						  RTE_FLOW_ERROR_TYPE_ACTION,
6026 						  actions,
6027 						  "action not supported");
6028 		}
6029 	}
6030 	/*
6031 	 * Validate actions in flow rules
6032 	 * - Explicit decap action is prohibited by the tunnel offload API.
6033 	 * - Drop action in tunnel steer rule is prohibited by the API.
6034 	 * - Application cannot use MARK action because it's value can mask
6035 	 *   tunnel default miss nitification.
6036 	 * - JUMP in tunnel match rule has no support in current PMD
6037 	 *   implementation.
6038 	 * - TAG & META are reserved for future uses.
6039 	 */
6040 	if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
6041 		uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
6042 					    MLX5_FLOW_ACTION_MARK     |
6043 					    MLX5_FLOW_ACTION_SET_TAG  |
6044 					    MLX5_FLOW_ACTION_SET_META |
6045 					    MLX5_FLOW_ACTION_DROP;
6046 
6047 		if (action_flags & bad_actions_mask)
6048 			return rte_flow_error_set
6049 					(error, EINVAL,
6050 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6051 					"Invalid RTE action in tunnel "
6052 					"set decap rule");
6053 		if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
6054 			return rte_flow_error_set
6055 					(error, EINVAL,
6056 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6057 					"tunnel set decap rule must terminate "
6058 					"with JUMP");
6059 		if (!attr->ingress)
6060 			return rte_flow_error_set
6061 					(error, EINVAL,
6062 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6063 					"tunnel flows for ingress traffic only");
6064 	}
6065 	if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
6066 		uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
6067 					    MLX5_FLOW_ACTION_MARK    |
6068 					    MLX5_FLOW_ACTION_SET_TAG |
6069 					    MLX5_FLOW_ACTION_SET_META;
6070 
6071 		if (action_flags & bad_actions_mask)
6072 			return rte_flow_error_set
6073 					(error, EINVAL,
6074 					RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6075 					"Invalid RTE action in tunnel "
6076 					"set match rule");
6077 	}
6078 	/*
6079 	 * Validate the drop action mutual exclusion with other actions.
6080 	 * Drop action is mutually-exclusive with any other action, except for
6081 	 * Count action.
6082 	 */
6083 	if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
6084 	    (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
6085 		return rte_flow_error_set(error, EINVAL,
6086 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6087 					  "Drop action is mutually-exclusive "
6088 					  "with any other action, except for "
6089 					  "Count action");
6090 	/* Eswitch has few restrictions on using items and actions */
6091 	if (attr->transfer) {
6092 		if (!mlx5_flow_ext_mreg_supported(dev) &&
6093 		    action_flags & MLX5_FLOW_ACTION_FLAG)
6094 			return rte_flow_error_set(error, ENOTSUP,
6095 						  RTE_FLOW_ERROR_TYPE_ACTION,
6096 						  NULL,
6097 						  "unsupported action FLAG");
6098 		if (!mlx5_flow_ext_mreg_supported(dev) &&
6099 		    action_flags & MLX5_FLOW_ACTION_MARK)
6100 			return rte_flow_error_set(error, ENOTSUP,
6101 						  RTE_FLOW_ERROR_TYPE_ACTION,
6102 						  NULL,
6103 						  "unsupported action MARK");
6104 		if (action_flags & MLX5_FLOW_ACTION_QUEUE)
6105 			return rte_flow_error_set(error, ENOTSUP,
6106 						  RTE_FLOW_ERROR_TYPE_ACTION,
6107 						  NULL,
6108 						  "unsupported action QUEUE");
6109 		if (action_flags & MLX5_FLOW_ACTION_RSS)
6110 			return rte_flow_error_set(error, ENOTSUP,
6111 						  RTE_FLOW_ERROR_TYPE_ACTION,
6112 						  NULL,
6113 						  "unsupported action RSS");
6114 		if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
6115 			return rte_flow_error_set(error, EINVAL,
6116 						  RTE_FLOW_ERROR_TYPE_ACTION,
6117 						  actions,
6118 						  "no fate action is found");
6119 	} else {
6120 		if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
6121 			return rte_flow_error_set(error, EINVAL,
6122 						  RTE_FLOW_ERROR_TYPE_ACTION,
6123 						  actions,
6124 						  "no fate action is found");
6125 	}
6126 	/*
6127 	 * Continue validation for Xcap and VLAN actions.
6128 	 * If hairpin is working in explicit TX rule mode, there is no actions
6129 	 * splitting and the validation of hairpin ingress flow should be the
6130 	 * same as other standard flows.
6131 	 */
6132 	if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
6133 			     MLX5_FLOW_VLAN_ACTIONS)) &&
6134 	    (queue_index == 0xFFFF ||
6135 	     mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
6136 	     ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
6137 	     conf->tx_explicit != 0))) {
6138 		if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
6139 		    MLX5_FLOW_XCAP_ACTIONS)
6140 			return rte_flow_error_set(error, ENOTSUP,
6141 						  RTE_FLOW_ERROR_TYPE_ACTION,
6142 						  NULL, "encap and decap "
6143 						  "combination aren't supported");
6144 		if (!attr->transfer && attr->ingress) {
6145 			if (action_flags & MLX5_FLOW_ACTION_ENCAP)
6146 				return rte_flow_error_set
6147 						(error, ENOTSUP,
6148 						 RTE_FLOW_ERROR_TYPE_ACTION,
6149 						 NULL, "encap is not supported"
6150 						 " for ingress traffic");
6151 			else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
6152 				return rte_flow_error_set
6153 						(error, ENOTSUP,
6154 						 RTE_FLOW_ERROR_TYPE_ACTION,
6155 						 NULL, "push VLAN action not "
6156 						 "supported for ingress");
6157 			else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
6158 					MLX5_FLOW_VLAN_ACTIONS)
6159 				return rte_flow_error_set
6160 						(error, ENOTSUP,
6161 						 RTE_FLOW_ERROR_TYPE_ACTION,
6162 						 NULL, "no support for "
6163 						 "multiple VLAN actions");
6164 		}
6165 	}
6166 	/*
6167 	 * Hairpin flow will add one more TAG action in TX implicit mode.
6168 	 * In TX explicit mode, there will be no hairpin flow ID.
6169 	 */
6170 	if (hairpin > 0)
6171 		rw_act_num += MLX5_ACT_NUM_SET_TAG;
6172 	/* extra metadata enabled: one more TAG action will be add. */
6173 	if (dev_conf->dv_flow_en &&
6174 	    dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
6175 	    mlx5_flow_ext_mreg_supported(dev))
6176 		rw_act_num += MLX5_ACT_NUM_SET_TAG;
6177 	if ((uint32_t)rw_act_num >
6178 			flow_dv_modify_hdr_action_max(dev, is_root)) {
6179 		return rte_flow_error_set(error, ENOTSUP,
6180 					  RTE_FLOW_ERROR_TYPE_ACTION,
6181 					  NULL, "too many header modify"
6182 					  " actions to support");
6183 	}
6184 	return 0;
6185 }
6186 
6187 /**
6188  * Internal preparation function. Allocates the DV flow size,
6189  * this size is constant.
6190  *
6191  * @param[in] dev
6192  *   Pointer to the rte_eth_dev structure.
6193  * @param[in] attr
6194  *   Pointer to the flow attributes.
6195  * @param[in] items
6196  *   Pointer to the list of items.
6197  * @param[in] actions
6198  *   Pointer to the list of actions.
6199  * @param[out] error
6200  *   Pointer to the error structure.
6201  *
6202  * @return
6203  *   Pointer to mlx5_flow object on success,
6204  *   otherwise NULL and rte_errno is set.
6205  */
6206 static struct mlx5_flow *
flow_dv_prepare(struct rte_eth_dev * dev,const struct rte_flow_attr * attr __rte_unused,const struct rte_flow_item items[]__rte_unused,const struct rte_flow_action actions[]__rte_unused,struct rte_flow_error * error)6207 flow_dv_prepare(struct rte_eth_dev *dev,
6208 		const struct rte_flow_attr *attr __rte_unused,
6209 		const struct rte_flow_item items[] __rte_unused,
6210 		const struct rte_flow_action actions[] __rte_unused,
6211 		struct rte_flow_error *error)
6212 {
6213 	uint32_t handle_idx = 0;
6214 	struct mlx5_flow *dev_flow;
6215 	struct mlx5_flow_handle *dev_handle;
6216 	struct mlx5_priv *priv = dev->data->dev_private;
6217 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
6218 
6219 	MLX5_ASSERT(wks);
6220 	/* In case of corrupting the memory. */
6221 	if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
6222 		rte_flow_error_set(error, ENOSPC,
6223 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6224 				   "not free temporary device flow");
6225 		return NULL;
6226 	}
6227 	dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
6228 				   &handle_idx);
6229 	if (!dev_handle) {
6230 		rte_flow_error_set(error, ENOMEM,
6231 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6232 				   "not enough memory to create flow handle");
6233 		return NULL;
6234 	}
6235 	MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
6236 	dev_flow = &wks->flows[wks->flow_idx++];
6237 	dev_flow->handle = dev_handle;
6238 	dev_flow->handle_idx = handle_idx;
6239 	/*
6240 	 * In some old rdma-core releases, before continuing, a check of the
6241 	 * length of matching parameter will be done at first. It needs to use
6242 	 * the length without misc4 param. If the flow has misc4 support, then
6243 	 * the length needs to be adjusted accordingly. Each param member is
6244 	 * aligned with a 64B boundary naturally.
6245 	 */
6246 	dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
6247 				  MLX5_ST_SZ_BYTES(fte_match_set_misc4);
6248 	/*
6249 	 * The matching value needs to be cleared to 0 before using. In the
6250 	 * past, it will be automatically cleared when using rte_*alloc
6251 	 * API. The time consumption will be almost the same as before.
6252 	 */
6253 	memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
6254 	dev_flow->ingress = attr->ingress;
6255 	dev_flow->dv.transfer = attr->transfer;
6256 	return dev_flow;
6257 }
6258 
6259 #ifdef RTE_LIBRTE_MLX5_DEBUG
6260 /**
6261  * Sanity check for match mask and value. Similar to check_valid_spec() in
6262  * kernel driver. If unmasked bit is present in value, it returns failure.
6263  *
6264  * @param match_mask
6265  *   pointer to match mask buffer.
6266  * @param match_value
6267  *   pointer to match value buffer.
6268  *
6269  * @return
6270  *   0 if valid, -EINVAL otherwise.
6271  */
6272 static int
flow_dv_check_valid_spec(void * match_mask,void * match_value)6273 flow_dv_check_valid_spec(void *match_mask, void *match_value)
6274 {
6275 	uint8_t *m = match_mask;
6276 	uint8_t *v = match_value;
6277 	unsigned int i;
6278 
6279 	for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
6280 		if (v[i] & ~m[i]) {
6281 			DRV_LOG(ERR,
6282 				"match_value differs from match_criteria"
6283 				" %p[%u] != %p[%u]",
6284 				match_value, i, match_mask, i);
6285 			return -EINVAL;
6286 		}
6287 	}
6288 	return 0;
6289 }
6290 #endif
6291 
6292 /**
6293  * Add match of ip_version.
6294  *
6295  * @param[in] group
6296  *   Flow group.
6297  * @param[in] headers_v
6298  *   Values header pointer.
6299  * @param[in] headers_m
6300  *   Masks header pointer.
6301  * @param[in] ip_version
6302  *   The IP version to set.
6303  */
6304 static inline void
flow_dv_set_match_ip_version(uint32_t group,void * headers_v,void * headers_m,uint8_t ip_version)6305 flow_dv_set_match_ip_version(uint32_t group,
6306 			     void *headers_v,
6307 			     void *headers_m,
6308 			     uint8_t ip_version)
6309 {
6310 	if (group == 0)
6311 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
6312 	else
6313 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
6314 			 ip_version);
6315 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
6316 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
6317 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
6318 }
6319 
6320 /**
6321  * Add Ethernet item to matcher and to the value.
6322  *
6323  * @param[in, out] matcher
6324  *   Flow matcher.
6325  * @param[in, out] key
6326  *   Flow matcher value.
6327  * @param[in] item
6328  *   Flow pattern to translate.
6329  * @param[in] inner
6330  *   Item is inner pattern.
6331  */
6332 static void
flow_dv_translate_item_eth(void * matcher,void * key,const struct rte_flow_item * item,int inner,uint32_t group)6333 flow_dv_translate_item_eth(void *matcher, void *key,
6334 			   const struct rte_flow_item *item, int inner,
6335 			   uint32_t group)
6336 {
6337 	const struct rte_flow_item_eth *eth_m = item->mask;
6338 	const struct rte_flow_item_eth *eth_v = item->spec;
6339 	const struct rte_flow_item_eth nic_mask = {
6340 		.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6341 		.src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6342 		.type = RTE_BE16(0xffff),
6343 		.has_vlan = 0,
6344 	};
6345 	void *hdrs_m;
6346 	void *hdrs_v;
6347 	char *l24_v;
6348 	unsigned int i;
6349 
6350 	if (!eth_v)
6351 		return;
6352 	if (!eth_m)
6353 		eth_m = &nic_mask;
6354 	if (inner) {
6355 		hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6356 					 inner_headers);
6357 		hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6358 	} else {
6359 		hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6360 					 outer_headers);
6361 		hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6362 	}
6363 	memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
6364 	       &eth_m->dst, sizeof(eth_m->dst));
6365 	/* The value must be in the range of the mask. */
6366 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
6367 	for (i = 0; i < sizeof(eth_m->dst); ++i)
6368 		l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
6369 	memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
6370 	       &eth_m->src, sizeof(eth_m->src));
6371 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
6372 	/* The value must be in the range of the mask. */
6373 	for (i = 0; i < sizeof(eth_m->dst); ++i)
6374 		l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
6375 	/*
6376 	 * HW supports match on one Ethertype, the Ethertype following the last
6377 	 * VLAN tag of the packet (see PRM).
6378 	 * Set match on ethertype only if ETH header is not followed by VLAN.
6379 	 * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6380 	 * ethertype, and use ip_version field instead.
6381 	 * eCPRI over Ether layer will use type value 0xAEFE.
6382 	 */
6383 	if (eth_m->type == 0xFFFF) {
6384 		/* Set cvlan_tag mask for any single\multi\un-tagged case. */
6385 		MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6386 		switch (eth_v->type) {
6387 		case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6388 			MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6389 			return;
6390 		case RTE_BE16(RTE_ETHER_TYPE_QINQ):
6391 			MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6392 			MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6393 			return;
6394 		case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6395 			flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6396 			return;
6397 		case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6398 			flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6399 			return;
6400 		default:
6401 			break;
6402 		}
6403 	}
6404 	if (eth_m->has_vlan) {
6405 		MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6406 		if (eth_v->has_vlan) {
6407 			/*
6408 			 * Here, when also has_more_vlan field in VLAN item is
6409 			 * not set, only single-tagged packets will be matched.
6410 			 */
6411 			MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6412 			return;
6413 		}
6414 	}
6415 	MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6416 		 rte_be_to_cpu_16(eth_m->type));
6417 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
6418 	*(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
6419 }
6420 
6421 /**
6422  * Add VLAN item to matcher and to the value.
6423  *
6424  * @param[in, out] dev_flow
6425  *   Flow descriptor.
6426  * @param[in, out] matcher
6427  *   Flow matcher.
6428  * @param[in, out] key
6429  *   Flow matcher value.
6430  * @param[in] item
6431  *   Flow pattern to translate.
6432  * @param[in] inner
6433  *   Item is inner pattern.
6434  */
6435 static void
flow_dv_translate_item_vlan(struct mlx5_flow * dev_flow,void * matcher,void * key,const struct rte_flow_item * item,int inner,uint32_t group)6436 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
6437 			    void *matcher, void *key,
6438 			    const struct rte_flow_item *item,
6439 			    int inner, uint32_t group)
6440 {
6441 	const struct rte_flow_item_vlan *vlan_m = item->mask;
6442 	const struct rte_flow_item_vlan *vlan_v = item->spec;
6443 	void *hdrs_m;
6444 	void *hdrs_v;
6445 	uint16_t tci_m;
6446 	uint16_t tci_v;
6447 
6448 	if (inner) {
6449 		hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6450 					 inner_headers);
6451 		hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6452 	} else {
6453 		hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6454 					 outer_headers);
6455 		hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6456 		/*
6457 		 * This is workaround, masks are not supported,
6458 		 * and pre-validated.
6459 		 */
6460 		if (vlan_v)
6461 			dev_flow->handle->vf_vlan.tag =
6462 					rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
6463 	}
6464 	/*
6465 	 * When VLAN item exists in flow, mark packet as tagged,
6466 	 * even if TCI is not specified.
6467 	 */
6468 	if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
6469 		MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6470 		MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6471 	}
6472 	if (!vlan_v)
6473 		return;
6474 	if (!vlan_m)
6475 		vlan_m = &rte_flow_item_vlan_mask;
6476 	tci_m = rte_be_to_cpu_16(vlan_m->tci);
6477 	tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
6478 	MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
6479 	MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
6480 	MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
6481 	MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
6482 	MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
6483 	MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
6484 	/*
6485 	 * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6486 	 * ethertype, and use ip_version field instead.
6487 	 */
6488 	if (vlan_m->inner_type == 0xFFFF) {
6489 		switch (vlan_v->inner_type) {
6490 		case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6491 			MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6492 			MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6493 			MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6494 			return;
6495 		case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6496 			flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6497 			return;
6498 		case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6499 			flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6500 			return;
6501 		default:
6502 			break;
6503 		}
6504 	}
6505 	if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
6506 		MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6507 		MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6508 		/* Only one vlan_tag bit can be set. */
6509 		MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6510 		return;
6511 	}
6512 	MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6513 		 rte_be_to_cpu_16(vlan_m->inner_type));
6514 	MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
6515 		 rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
6516 }
6517 
6518 /**
6519  * Add IPV4 item to matcher and to the value.
6520  *
6521  * @param[in, out] matcher
6522  *   Flow matcher.
6523  * @param[in, out] key
6524  *   Flow matcher value.
6525  * @param[in] item
6526  *   Flow pattern to translate.
6527  * @param[in] inner
6528  *   Item is inner pattern.
6529  * @param[in] group
6530  *   The group to insert the rule.
6531  */
6532 static void
flow_dv_translate_item_ipv4(void * matcher,void * key,const struct rte_flow_item * item,int inner,uint32_t group)6533 flow_dv_translate_item_ipv4(void *matcher, void *key,
6534 			    const struct rte_flow_item *item,
6535 			    int inner, uint32_t group)
6536 {
6537 	const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
6538 	const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
6539 	const struct rte_flow_item_ipv4 nic_mask = {
6540 		.hdr = {
6541 			.src_addr = RTE_BE32(0xffffffff),
6542 			.dst_addr = RTE_BE32(0xffffffff),
6543 			.type_of_service = 0xff,
6544 			.next_proto_id = 0xff,
6545 			.time_to_live = 0xff,
6546 		},
6547 	};
6548 	void *headers_m;
6549 	void *headers_v;
6550 	char *l24_m;
6551 	char *l24_v;
6552 	uint8_t tos;
6553 
6554 	if (inner) {
6555 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6556 					 inner_headers);
6557 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6558 	} else {
6559 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6560 					 outer_headers);
6561 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6562 	}
6563 	flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6564 	if (!ipv4_v)
6565 		return;
6566 	if (!ipv4_m)
6567 		ipv4_m = &nic_mask;
6568 	l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6569 			     dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6570 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6571 			     dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6572 	*(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
6573 	*(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
6574 	l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6575 			  src_ipv4_src_ipv6.ipv4_layout.ipv4);
6576 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6577 			  src_ipv4_src_ipv6.ipv4_layout.ipv4);
6578 	*(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
6579 	*(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
6580 	tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
6581 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
6582 		 ipv4_m->hdr.type_of_service);
6583 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
6584 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
6585 		 ipv4_m->hdr.type_of_service >> 2);
6586 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
6587 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6588 		 ipv4_m->hdr.next_proto_id);
6589 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6590 		 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
6591 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6592 		 ipv4_m->hdr.time_to_live);
6593 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6594 		 ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
6595 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6596 		 !!(ipv4_m->hdr.fragment_offset));
6597 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6598 		 !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
6599 }
6600 
6601 /**
6602  * Add IPV6 item to matcher and to the value.
6603  *
6604  * @param[in, out] matcher
6605  *   Flow matcher.
6606  * @param[in, out] key
6607  *   Flow matcher value.
6608  * @param[in] item
6609  *   Flow pattern to translate.
6610  * @param[in] inner
6611  *   Item is inner pattern.
6612  * @param[in] group
6613  *   The group to insert the rule.
6614  */
6615 static void
flow_dv_translate_item_ipv6(void * matcher,void * key,const struct rte_flow_item * item,int inner,uint32_t group)6616 flow_dv_translate_item_ipv6(void *matcher, void *key,
6617 			    const struct rte_flow_item *item,
6618 			    int inner, uint32_t group)
6619 {
6620 	const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
6621 	const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
6622 	const struct rte_flow_item_ipv6 nic_mask = {
6623 		.hdr = {
6624 			.src_addr =
6625 				"\xff\xff\xff\xff\xff\xff\xff\xff"
6626 				"\xff\xff\xff\xff\xff\xff\xff\xff",
6627 			.dst_addr =
6628 				"\xff\xff\xff\xff\xff\xff\xff\xff"
6629 				"\xff\xff\xff\xff\xff\xff\xff\xff",
6630 			.vtc_flow = RTE_BE32(0xffffffff),
6631 			.proto = 0xff,
6632 			.hop_limits = 0xff,
6633 		},
6634 	};
6635 	void *headers_m;
6636 	void *headers_v;
6637 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6638 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6639 	char *l24_m;
6640 	char *l24_v;
6641 	uint32_t vtc_m;
6642 	uint32_t vtc_v;
6643 	int i;
6644 	int size;
6645 
6646 	if (inner) {
6647 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6648 					 inner_headers);
6649 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6650 	} else {
6651 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6652 					 outer_headers);
6653 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6654 	}
6655 	flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6656 	if (!ipv6_v)
6657 		return;
6658 	if (!ipv6_m)
6659 		ipv6_m = &nic_mask;
6660 	size = sizeof(ipv6_m->hdr.dst_addr);
6661 	l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6662 			     dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6663 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6664 			     dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6665 	memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
6666 	for (i = 0; i < size; ++i)
6667 		l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
6668 	l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6669 			     src_ipv4_src_ipv6.ipv6_layout.ipv6);
6670 	l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6671 			     src_ipv4_src_ipv6.ipv6_layout.ipv6);
6672 	memcpy(l24_m, ipv6_m->hdr.src_addr, size);
6673 	for (i = 0; i < size; ++i)
6674 		l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
6675 	/* TOS. */
6676 	vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
6677 	vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
6678 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
6679 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
6680 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
6681 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
6682 	/* Label. */
6683 	if (inner) {
6684 		MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
6685 			 vtc_m);
6686 		MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
6687 			 vtc_v);
6688 	} else {
6689 		MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
6690 			 vtc_m);
6691 		MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
6692 			 vtc_v);
6693 	}
6694 	/* Protocol. */
6695 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6696 		 ipv6_m->hdr.proto);
6697 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6698 		 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
6699 	/* Hop limit. */
6700 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6701 		 ipv6_m->hdr.hop_limits);
6702 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6703 		 ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
6704 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6705 		 !!(ipv6_m->has_frag_ext));
6706 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6707 		 !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
6708 }
6709 
6710 /**
6711  * Add IPV6 fragment extension item to matcher and to the value.
6712  *
6713  * @param[in, out] matcher
6714  *   Flow matcher.
6715  * @param[in, out] key
6716  *   Flow matcher value.
6717  * @param[in] item
6718  *   Flow pattern to translate.
6719  * @param[in] inner
6720  *   Item is inner pattern.
6721  */
6722 static void
flow_dv_translate_item_ipv6_frag_ext(void * matcher,void * key,const struct rte_flow_item * item,int inner)6723 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
6724 				     const struct rte_flow_item *item,
6725 				     int inner)
6726 {
6727 	const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
6728 	const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
6729 	const struct rte_flow_item_ipv6_frag_ext nic_mask = {
6730 		.hdr = {
6731 			.next_header = 0xff,
6732 			.frag_data = RTE_BE16(0xffff),
6733 		},
6734 	};
6735 	void *headers_m;
6736 	void *headers_v;
6737 
6738 	if (inner) {
6739 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6740 					 inner_headers);
6741 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6742 	} else {
6743 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6744 					 outer_headers);
6745 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6746 	}
6747 	/* IPv6 fragment extension item exists, so packet is IP fragment. */
6748 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
6749 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
6750 	if (!ipv6_frag_ext_v)
6751 		return;
6752 	if (!ipv6_frag_ext_m)
6753 		ipv6_frag_ext_m = &nic_mask;
6754 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6755 		 ipv6_frag_ext_m->hdr.next_header);
6756 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6757 		 ipv6_frag_ext_v->hdr.next_header &
6758 		 ipv6_frag_ext_m->hdr.next_header);
6759 }
6760 
6761 /**
6762  * Add TCP item to matcher and to the value.
6763  *
6764  * @param[in, out] matcher
6765  *   Flow matcher.
6766  * @param[in, out] key
6767  *   Flow matcher value.
6768  * @param[in] item
6769  *   Flow pattern to translate.
6770  * @param[in] inner
6771  *   Item is inner pattern.
6772  */
6773 static void
flow_dv_translate_item_tcp(void * matcher,void * key,const struct rte_flow_item * item,int inner)6774 flow_dv_translate_item_tcp(void *matcher, void *key,
6775 			   const struct rte_flow_item *item,
6776 			   int inner)
6777 {
6778 	const struct rte_flow_item_tcp *tcp_m = item->mask;
6779 	const struct rte_flow_item_tcp *tcp_v = item->spec;
6780 	void *headers_m;
6781 	void *headers_v;
6782 
6783 	if (inner) {
6784 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6785 					 inner_headers);
6786 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6787 	} else {
6788 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6789 					 outer_headers);
6790 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6791 	}
6792 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6793 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
6794 	if (!tcp_v)
6795 		return;
6796 	if (!tcp_m)
6797 		tcp_m = &rte_flow_item_tcp_mask;
6798 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
6799 		 rte_be_to_cpu_16(tcp_m->hdr.src_port));
6800 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
6801 		 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
6802 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
6803 		 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
6804 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
6805 		 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
6806 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
6807 		 tcp_m->hdr.tcp_flags);
6808 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
6809 		 (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
6810 }
6811 
6812 /**
6813  * Add UDP item to matcher and to the value.
6814  *
6815  * @param[in, out] matcher
6816  *   Flow matcher.
6817  * @param[in, out] key
6818  *   Flow matcher value.
6819  * @param[in] item
6820  *   Flow pattern to translate.
6821  * @param[in] inner
6822  *   Item is inner pattern.
6823  */
6824 static void
flow_dv_translate_item_udp(void * matcher,void * key,const struct rte_flow_item * item,int inner)6825 flow_dv_translate_item_udp(void *matcher, void *key,
6826 			   const struct rte_flow_item *item,
6827 			   int inner)
6828 {
6829 	const struct rte_flow_item_udp *udp_m = item->mask;
6830 	const struct rte_flow_item_udp *udp_v = item->spec;
6831 	void *headers_m;
6832 	void *headers_v;
6833 
6834 	if (inner) {
6835 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6836 					 inner_headers);
6837 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6838 	} else {
6839 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6840 					 outer_headers);
6841 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6842 	}
6843 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6844 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
6845 	if (!udp_v)
6846 		return;
6847 	if (!udp_m)
6848 		udp_m = &rte_flow_item_udp_mask;
6849 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
6850 		 rte_be_to_cpu_16(udp_m->hdr.src_port));
6851 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
6852 		 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
6853 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
6854 		 rte_be_to_cpu_16(udp_m->hdr.dst_port));
6855 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6856 		 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
6857 }
6858 
6859 /**
6860  * Add GRE optional Key item to matcher and to the value.
6861  *
6862  * @param[in, out] matcher
6863  *   Flow matcher.
6864  * @param[in, out] key
6865  *   Flow matcher value.
6866  * @param[in] item
6867  *   Flow pattern to translate.
6868  * @param[in] inner
6869  *   Item is inner pattern.
6870  */
6871 static void
flow_dv_translate_item_gre_key(void * matcher,void * key,const struct rte_flow_item * item)6872 flow_dv_translate_item_gre_key(void *matcher, void *key,
6873 				   const struct rte_flow_item *item)
6874 {
6875 	const rte_be32_t *key_m = item->mask;
6876 	const rte_be32_t *key_v = item->spec;
6877 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6878 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6879 	rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6880 
6881 	/* GRE K bit must be on and should already be validated */
6882 	MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
6883 	MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
6884 	if (!key_v)
6885 		return;
6886 	if (!key_m)
6887 		key_m = &gre_key_default_mask;
6888 	MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
6889 		 rte_be_to_cpu_32(*key_m) >> 8);
6890 	MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
6891 		 rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
6892 	MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
6893 		 rte_be_to_cpu_32(*key_m) & 0xFF);
6894 	MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
6895 		 rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
6896 }
6897 
6898 /**
6899  * Add GRE item to matcher and to the value.
6900  *
6901  * @param[in, out] matcher
6902  *   Flow matcher.
6903  * @param[in, out] key
6904  *   Flow matcher value.
6905  * @param[in] item
6906  *   Flow pattern to translate.
6907  * @param[in] inner
6908  *   Item is inner pattern.
6909  */
6910 static void
flow_dv_translate_item_gre(void * matcher,void * key,const struct rte_flow_item * item,int inner)6911 flow_dv_translate_item_gre(void *matcher, void *key,
6912 			   const struct rte_flow_item *item,
6913 			   int inner)
6914 {
6915 	const struct rte_flow_item_gre *gre_m = item->mask;
6916 	const struct rte_flow_item_gre *gre_v = item->spec;
6917 	void *headers_m;
6918 	void *headers_v;
6919 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6920 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6921 	struct {
6922 		union {
6923 			__extension__
6924 			struct {
6925 				uint16_t version:3;
6926 				uint16_t rsvd0:9;
6927 				uint16_t s_present:1;
6928 				uint16_t k_present:1;
6929 				uint16_t rsvd_bit1:1;
6930 				uint16_t c_present:1;
6931 			};
6932 			uint16_t value;
6933 		};
6934 	} gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
6935 
6936 	if (inner) {
6937 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6938 					 inner_headers);
6939 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6940 	} else {
6941 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6942 					 outer_headers);
6943 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6944 	}
6945 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6946 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
6947 	if (!gre_v)
6948 		return;
6949 	if (!gre_m)
6950 		gre_m = &rte_flow_item_gre_mask;
6951 	MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
6952 		 rte_be_to_cpu_16(gre_m->protocol));
6953 	MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6954 		 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
6955 	gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
6956 	gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
6957 	MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
6958 		 gre_crks_rsvd0_ver_m.c_present);
6959 	MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
6960 		 gre_crks_rsvd0_ver_v.c_present &
6961 		 gre_crks_rsvd0_ver_m.c_present);
6962 	MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
6963 		 gre_crks_rsvd0_ver_m.k_present);
6964 	MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
6965 		 gre_crks_rsvd0_ver_v.k_present &
6966 		 gre_crks_rsvd0_ver_m.k_present);
6967 	MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
6968 		 gre_crks_rsvd0_ver_m.s_present);
6969 	MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
6970 		 gre_crks_rsvd0_ver_v.s_present &
6971 		 gre_crks_rsvd0_ver_m.s_present);
6972 }
6973 
6974 /**
6975  * Add NVGRE item to matcher and to the value.
6976  *
6977  * @param[in, out] matcher
6978  *   Flow matcher.
6979  * @param[in, out] key
6980  *   Flow matcher value.
6981  * @param[in] item
6982  *   Flow pattern to translate.
6983  * @param[in] inner
6984  *   Item is inner pattern.
6985  */
6986 static void
flow_dv_translate_item_nvgre(void * matcher,void * key,const struct rte_flow_item * item,int inner)6987 flow_dv_translate_item_nvgre(void *matcher, void *key,
6988 			     const struct rte_flow_item *item,
6989 			     int inner)
6990 {
6991 	const struct rte_flow_item_nvgre *nvgre_m = item->mask;
6992 	const struct rte_flow_item_nvgre *nvgre_v = item->spec;
6993 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6994 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6995 	const char *tni_flow_id_m;
6996 	const char *tni_flow_id_v;
6997 	char *gre_key_m;
6998 	char *gre_key_v;
6999 	int size;
7000 	int i;
7001 
7002 	/* For NVGRE, GRE header fields must be set with defined values. */
7003 	const struct rte_flow_item_gre gre_spec = {
7004 		.c_rsvd0_ver = RTE_BE16(0x2000),
7005 		.protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
7006 	};
7007 	const struct rte_flow_item_gre gre_mask = {
7008 		.c_rsvd0_ver = RTE_BE16(0xB000),
7009 		.protocol = RTE_BE16(UINT16_MAX),
7010 	};
7011 	const struct rte_flow_item gre_item = {
7012 		.spec = &gre_spec,
7013 		.mask = &gre_mask,
7014 		.last = NULL,
7015 	};
7016 	flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
7017 	if (!nvgre_v)
7018 		return;
7019 	if (!nvgre_m)
7020 		nvgre_m = &rte_flow_item_nvgre_mask;
7021 	tni_flow_id_m = (const char *)nvgre_m->tni;
7022 	tni_flow_id_v = (const char *)nvgre_v->tni;
7023 	size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
7024 	gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
7025 	gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
7026 	memcpy(gre_key_m, tni_flow_id_m, size);
7027 	for (i = 0; i < size; ++i)
7028 		gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
7029 }
7030 
7031 /**
7032  * Add VXLAN item to matcher and to the value.
7033  *
7034  * @param[in, out] matcher
7035  *   Flow matcher.
7036  * @param[in, out] key
7037  *   Flow matcher value.
7038  * @param[in] item
7039  *   Flow pattern to translate.
7040  * @param[in] inner
7041  *   Item is inner pattern.
7042  */
7043 static void
flow_dv_translate_item_vxlan(void * matcher,void * key,const struct rte_flow_item * item,int inner)7044 flow_dv_translate_item_vxlan(void *matcher, void *key,
7045 			     const struct rte_flow_item *item,
7046 			     int inner)
7047 {
7048 	const struct rte_flow_item_vxlan *vxlan_m = item->mask;
7049 	const struct rte_flow_item_vxlan *vxlan_v = item->spec;
7050 	void *headers_m;
7051 	void *headers_v;
7052 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7053 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7054 	char *vni_m;
7055 	char *vni_v;
7056 	uint16_t dport;
7057 	int size;
7058 	int i;
7059 
7060 	if (inner) {
7061 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7062 					 inner_headers);
7063 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7064 	} else {
7065 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7066 					 outer_headers);
7067 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7068 	}
7069 	dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7070 		MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7071 	if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7072 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7073 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7074 	}
7075 	if (!vxlan_v)
7076 		return;
7077 	if (!vxlan_m)
7078 		vxlan_m = &rte_flow_item_vxlan_mask;
7079 	size = sizeof(vxlan_m->vni);
7080 	vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
7081 	vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
7082 	memcpy(vni_m, vxlan_m->vni, size);
7083 	for (i = 0; i < size; ++i)
7084 		vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7085 }
7086 
7087 /**
7088  * Add VXLAN-GPE item to matcher and to the value.
7089  *
7090  * @param[in, out] matcher
7091  *   Flow matcher.
7092  * @param[in, out] key
7093  *   Flow matcher value.
7094  * @param[in] item
7095  *   Flow pattern to translate.
7096  * @param[in] inner
7097  *   Item is inner pattern.
7098  */
7099 
7100 static void
flow_dv_translate_item_vxlan_gpe(void * matcher,void * key,const struct rte_flow_item * item,int inner)7101 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
7102 				 const struct rte_flow_item *item, int inner)
7103 {
7104 	const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
7105 	const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
7106 	void *headers_m;
7107 	void *headers_v;
7108 	void *misc_m =
7109 		MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
7110 	void *misc_v =
7111 		MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7112 	char *vni_m;
7113 	char *vni_v;
7114 	uint16_t dport;
7115 	int size;
7116 	int i;
7117 	uint8_t flags_m = 0xff;
7118 	uint8_t flags_v = 0xc;
7119 
7120 	if (inner) {
7121 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7122 					 inner_headers);
7123 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7124 	} else {
7125 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7126 					 outer_headers);
7127 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7128 	}
7129 	dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7130 		MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7131 	if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7132 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7133 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7134 	}
7135 	if (!vxlan_v)
7136 		return;
7137 	if (!vxlan_m)
7138 		vxlan_m = &rte_flow_item_vxlan_gpe_mask;
7139 	size = sizeof(vxlan_m->vni);
7140 	vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
7141 	vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
7142 	memcpy(vni_m, vxlan_m->vni, size);
7143 	for (i = 0; i < size; ++i)
7144 		vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7145 	if (vxlan_m->flags) {
7146 		flags_m = vxlan_m->flags;
7147 		flags_v = vxlan_v->flags;
7148 	}
7149 	MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
7150 	MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
7151 	MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
7152 		 vxlan_m->protocol);
7153 	MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
7154 		 vxlan_v->protocol);
7155 }
7156 
7157 /**
7158  * Add Geneve item to matcher and to the value.
7159  *
7160  * @param[in, out] matcher
7161  *   Flow matcher.
7162  * @param[in, out] key
7163  *   Flow matcher value.
7164  * @param[in] item
7165  *   Flow pattern to translate.
7166  * @param[in] inner
7167  *   Item is inner pattern.
7168  */
7169 
7170 static void
flow_dv_translate_item_geneve(void * matcher,void * key,const struct rte_flow_item * item,int inner)7171 flow_dv_translate_item_geneve(void *matcher, void *key,
7172 			      const struct rte_flow_item *item, int inner)
7173 {
7174 	const struct rte_flow_item_geneve *geneve_m = item->mask;
7175 	const struct rte_flow_item_geneve *geneve_v = item->spec;
7176 	void *headers_m;
7177 	void *headers_v;
7178 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7179 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7180 	uint16_t dport;
7181 	uint16_t gbhdr_m;
7182 	uint16_t gbhdr_v;
7183 	char *vni_m;
7184 	char *vni_v;
7185 	size_t size, i;
7186 
7187 	if (inner) {
7188 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7189 					 inner_headers);
7190 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7191 	} else {
7192 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7193 					 outer_headers);
7194 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7195 	}
7196 	dport = MLX5_UDP_PORT_GENEVE;
7197 	if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7198 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7199 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7200 	}
7201 	if (!geneve_v)
7202 		return;
7203 	if (!geneve_m)
7204 		geneve_m = &rte_flow_item_geneve_mask;
7205 	size = sizeof(geneve_m->vni);
7206 	vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
7207 	vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
7208 	memcpy(vni_m, geneve_m->vni, size);
7209 	for (i = 0; i < size; ++i)
7210 		vni_v[i] = vni_m[i] & geneve_v->vni[i];
7211 	MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
7212 		 rte_be_to_cpu_16(geneve_m->protocol));
7213 	MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
7214 		 rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
7215 	gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
7216 	gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
7217 	MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
7218 		 MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7219 	MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
7220 		 MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7221 	MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
7222 		 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7223 	MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
7224 		 MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
7225 		 MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7226 }
7227 
7228 /**
7229  * Add MPLS item to matcher and to the value.
7230  *
7231  * @param[in, out] matcher
7232  *   Flow matcher.
7233  * @param[in, out] key
7234  *   Flow matcher value.
7235  * @param[in] item
7236  *   Flow pattern to translate.
7237  * @param[in] prev_layer
7238  *   The protocol layer indicated in previous item.
7239  * @param[in] inner
7240  *   Item is inner pattern.
7241  */
7242 static void
flow_dv_translate_item_mpls(void * matcher,void * key,const struct rte_flow_item * item,uint64_t prev_layer,int inner)7243 flow_dv_translate_item_mpls(void *matcher, void *key,
7244 			    const struct rte_flow_item *item,
7245 			    uint64_t prev_layer,
7246 			    int inner)
7247 {
7248 	const uint32_t *in_mpls_m = item->mask;
7249 	const uint32_t *in_mpls_v = item->spec;
7250 	uint32_t *out_mpls_m = 0;
7251 	uint32_t *out_mpls_v = 0;
7252 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7253 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7254 	void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
7255 				     misc_parameters_2);
7256 	void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7257 	void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
7258 	void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7259 
7260 	switch (prev_layer) {
7261 	case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7262 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
7263 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7264 			 MLX5_UDP_PORT_MPLS);
7265 		break;
7266 	case MLX5_FLOW_LAYER_GRE:
7267 		MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
7268 		MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7269 			 RTE_ETHER_TYPE_MPLS);
7270 		break;
7271 	default:
7272 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7273 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7274 			 IPPROTO_MPLS);
7275 		break;
7276 	}
7277 	if (!in_mpls_v)
7278 		return;
7279 	if (!in_mpls_m)
7280 		in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
7281 	switch (prev_layer) {
7282 	case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7283 		out_mpls_m =
7284 			(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7285 						 outer_first_mpls_over_udp);
7286 		out_mpls_v =
7287 			(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7288 						 outer_first_mpls_over_udp);
7289 		break;
7290 	case MLX5_FLOW_LAYER_GRE:
7291 		out_mpls_m =
7292 			(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7293 						 outer_first_mpls_over_gre);
7294 		out_mpls_v =
7295 			(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7296 						 outer_first_mpls_over_gre);
7297 		break;
7298 	default:
7299 		/* Inner MPLS not over GRE is not supported. */
7300 		if (!inner) {
7301 			out_mpls_m =
7302 				(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7303 							 misc2_m,
7304 							 outer_first_mpls);
7305 			out_mpls_v =
7306 				(uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7307 							 misc2_v,
7308 							 outer_first_mpls);
7309 		}
7310 		break;
7311 	}
7312 	if (out_mpls_m && out_mpls_v) {
7313 		*out_mpls_m = *in_mpls_m;
7314 		*out_mpls_v = *in_mpls_v & *in_mpls_m;
7315 	}
7316 }
7317 
7318 /**
7319  * Add metadata register item to matcher
7320  *
7321  * @param[in, out] matcher
7322  *   Flow matcher.
7323  * @param[in, out] key
7324  *   Flow matcher value.
7325  * @param[in] reg_type
7326  *   Type of device metadata register
7327  * @param[in] value
7328  *   Register value
7329  * @param[in] mask
7330  *   Register mask
7331  */
7332 static void
flow_dv_match_meta_reg(void * matcher,void * key,enum modify_reg reg_type,uint32_t data,uint32_t mask)7333 flow_dv_match_meta_reg(void *matcher, void *key,
7334 		       enum modify_reg reg_type,
7335 		       uint32_t data, uint32_t mask)
7336 {
7337 	void *misc2_m =
7338 		MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
7339 	void *misc2_v =
7340 		MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7341 	uint32_t temp;
7342 
7343 	data &= mask;
7344 	switch (reg_type) {
7345 	case REG_A:
7346 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
7347 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
7348 		break;
7349 	case REG_B:
7350 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
7351 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
7352 		break;
7353 	case REG_C_0:
7354 		/*
7355 		 * The metadata register C0 field might be divided into
7356 		 * source vport index and META item value, we should set
7357 		 * this field according to specified mask, not as whole one.
7358 		 */
7359 		temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
7360 		temp |= mask;
7361 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
7362 		temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
7363 		temp &= ~mask;
7364 		temp |= data;
7365 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
7366 		break;
7367 	case REG_C_1:
7368 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
7369 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
7370 		break;
7371 	case REG_C_2:
7372 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
7373 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
7374 		break;
7375 	case REG_C_3:
7376 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
7377 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
7378 		break;
7379 	case REG_C_4:
7380 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
7381 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
7382 		break;
7383 	case REG_C_5:
7384 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
7385 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
7386 		break;
7387 	case REG_C_6:
7388 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
7389 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
7390 		break;
7391 	case REG_C_7:
7392 		MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
7393 		MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
7394 		break;
7395 	default:
7396 		MLX5_ASSERT(false);
7397 		break;
7398 	}
7399 }
7400 
7401 /**
7402  * Add MARK item to matcher
7403  *
7404  * @param[in] dev
7405  *   The device to configure through.
7406  * @param[in, out] matcher
7407  *   Flow matcher.
7408  * @param[in, out] key
7409  *   Flow matcher value.
7410  * @param[in] item
7411  *   Flow pattern to translate.
7412  */
7413 static void
flow_dv_translate_item_mark(struct rte_eth_dev * dev,void * matcher,void * key,const struct rte_flow_item * item)7414 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
7415 			    void *matcher, void *key,
7416 			    const struct rte_flow_item *item)
7417 {
7418 	struct mlx5_priv *priv = dev->data->dev_private;
7419 	const struct rte_flow_item_mark *mark;
7420 	uint32_t value;
7421 	uint32_t mask;
7422 
7423 	mark = item->mask ? (const void *)item->mask :
7424 			    &rte_flow_item_mark_mask;
7425 	mask = mark->id & priv->sh->dv_mark_mask;
7426 	mark = (const void *)item->spec;
7427 	MLX5_ASSERT(mark);
7428 	value = mark->id & priv->sh->dv_mark_mask & mask;
7429 	if (mask) {
7430 		enum modify_reg reg;
7431 
7432 		/* Get the metadata register index for the mark. */
7433 		reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
7434 		MLX5_ASSERT(reg > 0);
7435 		if (reg == REG_C_0) {
7436 			struct mlx5_priv *priv = dev->data->dev_private;
7437 			uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7438 			uint32_t shl_c0 = rte_bsf32(msk_c0);
7439 
7440 			mask &= msk_c0;
7441 			mask <<= shl_c0;
7442 			value <<= shl_c0;
7443 		}
7444 		flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7445 	}
7446 }
7447 
7448 /**
7449  * Add META item to matcher
7450  *
7451  * @param[in] dev
7452  *   The devich to configure through.
7453  * @param[in, out] matcher
7454  *   Flow matcher.
7455  * @param[in, out] key
7456  *   Flow matcher value.
7457  * @param[in] attr
7458  *   Attributes of flow that includes this item.
7459  * @param[in] item
7460  *   Flow pattern to translate.
7461  */
7462 static void
flow_dv_translate_item_meta(struct rte_eth_dev * dev,void * matcher,void * key,const struct rte_flow_attr * attr,const struct rte_flow_item * item)7463 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
7464 			    void *matcher, void *key,
7465 			    const struct rte_flow_attr *attr,
7466 			    const struct rte_flow_item *item)
7467 {
7468 	const struct rte_flow_item_meta *meta_m;
7469 	const struct rte_flow_item_meta *meta_v;
7470 
7471 	meta_m = (const void *)item->mask;
7472 	if (!meta_m)
7473 		meta_m = &rte_flow_item_meta_mask;
7474 	meta_v = (const void *)item->spec;
7475 	if (meta_v) {
7476 		int reg;
7477 		uint32_t value = meta_v->data;
7478 		uint32_t mask = meta_m->data;
7479 
7480 		reg = flow_dv_get_metadata_reg(dev, attr, NULL);
7481 		if (reg < 0)
7482 			return;
7483 		MLX5_ASSERT(reg != REG_NON);
7484 		/*
7485 		 * In datapath code there is no endianness
7486 		 * coversions for perfromance reasons, all
7487 		 * pattern conversions are done in rte_flow.
7488 		 */
7489 		value = rte_cpu_to_be_32(value);
7490 		mask = rte_cpu_to_be_32(mask);
7491 		if (reg == REG_C_0) {
7492 			struct mlx5_priv *priv = dev->data->dev_private;
7493 			uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7494 			uint32_t shl_c0 = rte_bsf32(msk_c0);
7495 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
7496 			uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
7497 
7498 			value >>= shr_c0;
7499 			mask >>= shr_c0;
7500 #endif
7501 			value <<= shl_c0;
7502 			mask <<= shl_c0;
7503 			MLX5_ASSERT(msk_c0);
7504 			MLX5_ASSERT(!(~msk_c0 & mask));
7505 		}
7506 		flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7507 	}
7508 }
7509 
7510 /**
7511  * Add vport metadata Reg C0 item to matcher
7512  *
7513  * @param[in, out] matcher
7514  *   Flow matcher.
7515  * @param[in, out] key
7516  *   Flow matcher value.
7517  * @param[in] reg
7518  *   Flow pattern to translate.
7519  */
7520 static void
flow_dv_translate_item_meta_vport(void * matcher,void * key,uint32_t value,uint32_t mask)7521 flow_dv_translate_item_meta_vport(void *matcher, void *key,
7522 				  uint32_t value, uint32_t mask)
7523 {
7524 	flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
7525 }
7526 
7527 /**
7528  * Add tag item to matcher
7529  *
7530  * @param[in] dev
7531  *   The devich to configure through.
7532  * @param[in, out] matcher
7533  *   Flow matcher.
7534  * @param[in, out] key
7535  *   Flow matcher value.
7536  * @param[in] item
7537  *   Flow pattern to translate.
7538  */
7539 static void
flow_dv_translate_mlx5_item_tag(struct rte_eth_dev * dev,void * matcher,void * key,const struct rte_flow_item * item)7540 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
7541 				void *matcher, void *key,
7542 				const struct rte_flow_item *item)
7543 {
7544 	const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
7545 	const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
7546 	uint32_t mask, value;
7547 
7548 	MLX5_ASSERT(tag_v);
7549 	value = tag_v->data;
7550 	mask = tag_m ? tag_m->data : UINT32_MAX;
7551 	if (tag_v->id == REG_C_0) {
7552 		struct mlx5_priv *priv = dev->data->dev_private;
7553 		uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7554 		uint32_t shl_c0 = rte_bsf32(msk_c0);
7555 
7556 		mask &= msk_c0;
7557 		mask <<= shl_c0;
7558 		value <<= shl_c0;
7559 	}
7560 	flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
7561 }
7562 
7563 /**
7564  * Add TAG item to matcher
7565  *
7566  * @param[in] dev
7567  *   The devich to configure through.
7568  * @param[in, out] matcher
7569  *   Flow matcher.
7570  * @param[in, out] key
7571  *   Flow matcher value.
7572  * @param[in] item
7573  *   Flow pattern to translate.
7574  */
7575 static void
flow_dv_translate_item_tag(struct rte_eth_dev * dev,void * matcher,void * key,const struct rte_flow_item * item)7576 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
7577 			   void *matcher, void *key,
7578 			   const struct rte_flow_item *item)
7579 {
7580 	const struct rte_flow_item_tag *tag_v = item->spec;
7581 	const struct rte_flow_item_tag *tag_m = item->mask;
7582 	enum modify_reg reg;
7583 
7584 	MLX5_ASSERT(tag_v);
7585 	tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
7586 	/* Get the metadata register index for the tag. */
7587 	reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
7588 	MLX5_ASSERT(reg > 0);
7589 	flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
7590 }
7591 
7592 /**
7593  * Add source vport match to the specified matcher.
7594  *
7595  * @param[in, out] matcher
7596  *   Flow matcher.
7597  * @param[in, out] key
7598  *   Flow matcher value.
7599  * @param[in] port
7600  *   Source vport value to match
7601  * @param[in] mask
7602  *   Mask
7603  */
7604 static void
flow_dv_translate_item_source_vport(void * matcher,void * key,int16_t port,uint16_t mask)7605 flow_dv_translate_item_source_vport(void *matcher, void *key,
7606 				    int16_t port, uint16_t mask)
7607 {
7608 	void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7609 	void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7610 
7611 	MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
7612 	MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
7613 }
7614 
7615 /**
7616  * Translate port-id item to eswitch match on  port-id.
7617  *
7618  * @param[in] dev
7619  *   The devich to configure through.
7620  * @param[in, out] matcher
7621  *   Flow matcher.
7622  * @param[in, out] key
7623  *   Flow matcher value.
7624  * @param[in] item
7625  *   Flow pattern to translate.
7626  * @param[in]
7627  *   Flow attributes.
7628  *
7629  * @return
7630  *   0 on success, a negative errno value otherwise.
7631  */
7632 static int
flow_dv_translate_item_port_id(struct rte_eth_dev * dev,void * matcher,void * key,const struct rte_flow_item * item,const struct rte_flow_attr * attr)7633 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
7634 			       void *key, const struct rte_flow_item *item,
7635 			       const struct rte_flow_attr *attr)
7636 {
7637 	const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
7638 	const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
7639 	struct mlx5_priv *priv;
7640 	uint16_t mask, id;
7641 
7642 	mask = pid_m ? pid_m->id : 0xffff;
7643 	id = pid_v ? pid_v->id : dev->data->port_id;
7644 	priv = mlx5_port_to_eswitch_info(id, item == NULL);
7645 	if (!priv)
7646 		return -rte_errno;
7647 	/*
7648 	 * Translate to vport field or to metadata, depending on mode.
7649 	 * Kernel can use either misc.source_port or half of C0 metadata
7650 	 * register.
7651 	 */
7652 	if (priv->vport_meta_mask) {
7653 		/*
7654 		 * Provide the hint for SW steering library
7655 		 * to insert the flow into ingress domain and
7656 		 * save the extra vport match.
7657 		 */
7658 		if (mask == 0xffff && priv->vport_id == 0xffff &&
7659 		    priv->pf_bond < 0 && attr->transfer)
7660 			flow_dv_translate_item_source_vport
7661 				(matcher, key, priv->vport_id, mask);
7662 		else
7663 			flow_dv_translate_item_meta_vport
7664 				(matcher, key,
7665 				 priv->vport_meta_tag,
7666 				 priv->vport_meta_mask);
7667 	} else {
7668 		flow_dv_translate_item_source_vport(matcher, key,
7669 						    priv->vport_id, mask);
7670 	}
7671 	return 0;
7672 }
7673 
7674 /**
7675  * Add ICMP6 item to matcher and to the value.
7676  *
7677  * @param[in, out] matcher
7678  *   Flow matcher.
7679  * @param[in, out] key
7680  *   Flow matcher value.
7681  * @param[in] item
7682  *   Flow pattern to translate.
7683  * @param[in] inner
7684  *   Item is inner pattern.
7685  */
7686 static void
flow_dv_translate_item_icmp6(void * matcher,void * key,const struct rte_flow_item * item,int inner)7687 flow_dv_translate_item_icmp6(void *matcher, void *key,
7688 			      const struct rte_flow_item *item,
7689 			      int inner)
7690 {
7691 	const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
7692 	const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
7693 	void *headers_m;
7694 	void *headers_v;
7695 	void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7696 				     misc_parameters_3);
7697 	void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7698 	if (inner) {
7699 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7700 					 inner_headers);
7701 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7702 	} else {
7703 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7704 					 outer_headers);
7705 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7706 	}
7707 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7708 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
7709 	if (!icmp6_v)
7710 		return;
7711 	if (!icmp6_m)
7712 		icmp6_m = &rte_flow_item_icmp6_mask;
7713 	MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
7714 	MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
7715 		 icmp6_v->type & icmp6_m->type);
7716 	MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
7717 	MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
7718 		 icmp6_v->code & icmp6_m->code);
7719 }
7720 
7721 /**
7722  * Add ICMP item to matcher and to the value.
7723  *
7724  * @param[in, out] matcher
7725  *   Flow matcher.
7726  * @param[in, out] key
7727  *   Flow matcher value.
7728  * @param[in] item
7729  *   Flow pattern to translate.
7730  * @param[in] inner
7731  *   Item is inner pattern.
7732  */
7733 static void
flow_dv_translate_item_icmp(void * matcher,void * key,const struct rte_flow_item * item,int inner)7734 flow_dv_translate_item_icmp(void *matcher, void *key,
7735 			    const struct rte_flow_item *item,
7736 			    int inner)
7737 {
7738 	const struct rte_flow_item_icmp *icmp_m = item->mask;
7739 	const struct rte_flow_item_icmp *icmp_v = item->spec;
7740 	uint32_t icmp_header_data_m = 0;
7741 	uint32_t icmp_header_data_v = 0;
7742 	void *headers_m;
7743 	void *headers_v;
7744 	void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7745 				     misc_parameters_3);
7746 	void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7747 	if (inner) {
7748 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7749 					 inner_headers);
7750 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7751 	} else {
7752 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7753 					 outer_headers);
7754 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7755 	}
7756 	MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7757 	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
7758 	if (!icmp_v)
7759 		return;
7760 	if (!icmp_m)
7761 		icmp_m = &rte_flow_item_icmp_mask;
7762 	MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
7763 		 icmp_m->hdr.icmp_type);
7764 	MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
7765 		 icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
7766 	MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
7767 		 icmp_m->hdr.icmp_code);
7768 	MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
7769 		 icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
7770 	icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
7771 	icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
7772 	if (icmp_header_data_m) {
7773 		icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
7774 		icmp_header_data_v |=
7775 			 rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
7776 		MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
7777 			 icmp_header_data_m);
7778 		MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
7779 			 icmp_header_data_v & icmp_header_data_m);
7780 	}
7781 }
7782 
7783 /**
7784  * Add GTP item to matcher and to the value.
7785  *
7786  * @param[in, out] matcher
7787  *   Flow matcher.
7788  * @param[in, out] key
7789  *   Flow matcher value.
7790  * @param[in] item
7791  *   Flow pattern to translate.
7792  * @param[in] inner
7793  *   Item is inner pattern.
7794  */
7795 static void
flow_dv_translate_item_gtp(void * matcher,void * key,const struct rte_flow_item * item,int inner)7796 flow_dv_translate_item_gtp(void *matcher, void *key,
7797 			   const struct rte_flow_item *item, int inner)
7798 {
7799 	const struct rte_flow_item_gtp *gtp_m = item->mask;
7800 	const struct rte_flow_item_gtp *gtp_v = item->spec;
7801 	void *headers_m;
7802 	void *headers_v;
7803 	void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7804 				     misc_parameters_3);
7805 	void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7806 	uint16_t dport = RTE_GTPU_UDP_PORT;
7807 
7808 	if (inner) {
7809 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7810 					 inner_headers);
7811 		headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7812 	} else {
7813 		headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7814 					 outer_headers);
7815 		headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7816 	}
7817 	if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7818 		MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7819 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7820 	}
7821 	if (!gtp_v)
7822 		return;
7823 	if (!gtp_m)
7824 		gtp_m = &rte_flow_item_gtp_mask;
7825 	MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
7826 		 gtp_m->v_pt_rsv_flags);
7827 	MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
7828 		 gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
7829 	MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
7830 	MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
7831 		 gtp_v->msg_type & gtp_m->msg_type);
7832 	MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
7833 		 rte_be_to_cpu_32(gtp_m->teid));
7834 	MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
7835 		 rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
7836 }
7837 
7838 /**
7839  * Add eCPRI item to matcher and to the value.
7840  *
7841  * @param[in] dev
7842  *   The devich to configure through.
7843  * @param[in, out] matcher
7844  *   Flow matcher.
7845  * @param[in, out] key
7846  *   Flow matcher value.
7847  * @param[in] item
7848  *   Flow pattern to translate.
7849  * @param[in] samples
7850  *   Sample IDs to be used in the matching.
7851  */
7852 static void
flow_dv_translate_item_ecpri(struct rte_eth_dev * dev,void * matcher,void * key,const struct rte_flow_item * item)7853 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
7854 			     void *key, const struct rte_flow_item *item)
7855 {
7856 	struct mlx5_priv *priv = dev->data->dev_private;
7857 	const struct rte_flow_item_ecpri *ecpri_m = item->mask;
7858 	const struct rte_flow_item_ecpri *ecpri_v = item->spec;
7859 	struct rte_ecpri_common_hdr common;
7860 	void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
7861 				     misc_parameters_4);
7862 	void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
7863 	uint32_t *samples;
7864 	void *dw_m;
7865 	void *dw_v;
7866 
7867 	if (!ecpri_v)
7868 		return;
7869 	if (!ecpri_m)
7870 		ecpri_m = &rte_flow_item_ecpri_mask;
7871 	/*
7872 	 * Maximal four DW samples are supported in a single matching now.
7873 	 * Two are used now for a eCPRI matching:
7874 	 * 1. Type: one byte, mask should be 0x00ff0000 in network order
7875 	 * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
7876 	 *    if any.
7877 	 */
7878 	if (!ecpri_m->hdr.common.u32)
7879 		return;
7880 	samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
7881 	/* Need to take the whole DW as the mask to fill the entry. */
7882 	dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7883 			    prog_sample_field_value_0);
7884 	dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7885 			    prog_sample_field_value_0);
7886 	/* Already big endian (network order) in the header. */
7887 	*(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
7888 	*(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
7889 	/* Sample#0, used for matching type, offset 0. */
7890 	MLX5_SET(fte_match_set_misc4, misc4_m,
7891 		 prog_sample_field_id_0, samples[0]);
7892 	/* It makes no sense to set the sample ID in the mask field. */
7893 	MLX5_SET(fte_match_set_misc4, misc4_v,
7894 		 prog_sample_field_id_0, samples[0]);
7895 	/*
7896 	 * Checking if message body part needs to be matched.
7897 	 * Some wildcard rules only matching type field should be supported.
7898 	 */
7899 	if (ecpri_m->hdr.dummy[0]) {
7900 		common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
7901 		switch (common.type) {
7902 		case RTE_ECPRI_MSG_TYPE_IQ_DATA:
7903 		case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
7904 		case RTE_ECPRI_MSG_TYPE_DLY_MSR:
7905 			dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7906 					    prog_sample_field_value_1);
7907 			dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7908 					    prog_sample_field_value_1);
7909 			*(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
7910 			*(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
7911 					    ecpri_m->hdr.dummy[0];
7912 			/* Sample#1, to match message body, offset 4. */
7913 			MLX5_SET(fte_match_set_misc4, misc4_m,
7914 				 prog_sample_field_id_1, samples[1]);
7915 			MLX5_SET(fte_match_set_misc4, misc4_v,
7916 				 prog_sample_field_id_1, samples[1]);
7917 			break;
7918 		default:
7919 			/* Others, do not match any sample ID. */
7920 			break;
7921 		}
7922 	}
7923 }
7924 
7925 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
7926 
7927 #define HEADER_IS_ZERO(match_criteria, headers)				     \
7928 	!(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
7929 		 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
7930 
7931 /**
7932  * Calculate flow matcher enable bitmap.
7933  *
7934  * @param match_criteria
7935  *   Pointer to flow matcher criteria.
7936  *
7937  * @return
7938  *   Bitmap of enabled fields.
7939  */
7940 static uint8_t
flow_dv_matcher_enable(uint32_t * match_criteria)7941 flow_dv_matcher_enable(uint32_t *match_criteria)
7942 {
7943 	uint8_t match_criteria_enable;
7944 
7945 	match_criteria_enable =
7946 		(!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
7947 		MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
7948 	match_criteria_enable |=
7949 		(!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
7950 		MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
7951 	match_criteria_enable |=
7952 		(!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
7953 		MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
7954 	match_criteria_enable |=
7955 		(!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
7956 		MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
7957 	match_criteria_enable |=
7958 		(!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
7959 		MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
7960 	match_criteria_enable |=
7961 		(!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
7962 		MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
7963 	return match_criteria_enable;
7964 }
7965 
7966 struct mlx5_hlist_entry *
flow_dv_tbl_create_cb(struct mlx5_hlist * list,uint64_t key64,void * cb_ctx)7967 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
7968 {
7969 	struct mlx5_dev_ctx_shared *sh = list->ctx;
7970 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
7971 	struct rte_eth_dev *dev = ctx->dev;
7972 	struct mlx5_flow_tbl_data_entry *tbl_data;
7973 	struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
7974 	struct rte_flow_error *error = ctx->error;
7975 	union mlx5_flow_tbl_key key = { .v64 = key64 };
7976 	struct mlx5_flow_tbl_resource *tbl;
7977 	void *domain;
7978 	uint32_t idx = 0;
7979 	int ret;
7980 
7981 	tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
7982 	if (!tbl_data) {
7983 		rte_flow_error_set(error, ENOMEM,
7984 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7985 				   NULL,
7986 				   "cannot allocate flow table data entry");
7987 		return NULL;
7988 	}
7989 	tbl_data->idx = idx;
7990 	tbl_data->tunnel = tt_prm->tunnel;
7991 	tbl_data->group_id = tt_prm->group_id;
7992 	tbl_data->external = tt_prm->external;
7993 	tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
7994 	tbl_data->is_egress = !!key.direction;
7995 	tbl = &tbl_data->tbl;
7996 	if (key.dummy)
7997 		return &tbl_data->entry;
7998 	if (key.domain)
7999 		domain = sh->fdb_domain;
8000 	else if (key.direction)
8001 		domain = sh->tx_domain;
8002 	else
8003 		domain = sh->rx_domain;
8004 	ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj);
8005 	if (ret) {
8006 		rte_flow_error_set(error, ENOMEM,
8007 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8008 				   NULL, "cannot create flow table object");
8009 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
8010 		return NULL;
8011 	}
8012 	if (key.table_id) {
8013 		ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
8014 					(tbl->obj, &tbl_data->jump.action);
8015 		if (ret) {
8016 			rte_flow_error_set(error, ENOMEM,
8017 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8018 					   NULL,
8019 					   "cannot create flow jump action");
8020 			mlx5_flow_os_destroy_flow_tbl(tbl->obj);
8021 			mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
8022 			return NULL;
8023 		}
8024 	}
8025 	MKSTR(matcher_name, "%s_%s_%u_matcher_cache",
8026 	      key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress",
8027 	      key.table_id);
8028 	mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
8029 			     flow_dv_matcher_create_cb,
8030 			     flow_dv_matcher_match_cb,
8031 			     flow_dv_matcher_remove_cb);
8032 	return &tbl_data->entry;
8033 }
8034 
8035 /**
8036  * Get a flow table.
8037  *
8038  * @param[in, out] dev
8039  *   Pointer to rte_eth_dev structure.
8040  * @param[in] table_id
8041  *   Table id to use.
8042  * @param[in] egress
8043  *   Direction of the table.
8044  * @param[in] transfer
8045  *   E-Switch or NIC flow.
8046  * @param[in] dummy
8047  *   Dummy entry for dv API.
8048  * @param[out] error
8049  *   pointer to error structure.
8050  *
8051  * @return
8052  *   Returns tables resource based on the index, NULL in case of failed.
8053  */
8054 struct mlx5_flow_tbl_resource *
flow_dv_tbl_resource_get(struct rte_eth_dev * dev,uint32_t table_id,uint8_t egress,uint8_t transfer,bool external,const struct mlx5_flow_tunnel * tunnel,uint32_t group_id,uint8_t dummy,struct rte_flow_error * error)8055 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
8056 			 uint32_t table_id, uint8_t egress,
8057 			 uint8_t transfer,
8058 			 bool external,
8059 			 const struct mlx5_flow_tunnel *tunnel,
8060 			 uint32_t group_id, uint8_t dummy,
8061 			 struct rte_flow_error *error)
8062 {
8063 	struct mlx5_priv *priv = dev->data->dev_private;
8064 	union mlx5_flow_tbl_key table_key = {
8065 		{
8066 			.table_id = table_id,
8067 			.dummy = dummy,
8068 			.domain = !!transfer,
8069 			.direction = !!egress,
8070 		}
8071 	};
8072 	struct mlx5_flow_tbl_tunnel_prm tt_prm = {
8073 		.tunnel = tunnel,
8074 		.group_id = group_id,
8075 		.external = external,
8076 	};
8077 	struct mlx5_flow_cb_ctx ctx = {
8078 		.dev = dev,
8079 		.error = error,
8080 		.data = &tt_prm,
8081 	};
8082 	struct mlx5_hlist_entry *entry;
8083 	struct mlx5_flow_tbl_data_entry *tbl_data;
8084 
8085 	entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
8086 	if (!entry) {
8087 		rte_flow_error_set(error, ENOMEM,
8088 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8089 				   "cannot get table");
8090 		return NULL;
8091 	}
8092 	DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.",
8093 		table_id, tunnel ? tunnel->tunnel_id : 0, group_id);
8094 	tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8095 	return &tbl_data->tbl;
8096 }
8097 
8098 void
flow_dv_tbl_remove_cb(struct mlx5_hlist * list,struct mlx5_hlist_entry * entry)8099 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
8100 		      struct mlx5_hlist_entry *entry)
8101 {
8102 	struct mlx5_dev_ctx_shared *sh = list->ctx;
8103 	struct mlx5_flow_tbl_data_entry *tbl_data =
8104 		container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8105 
8106 	MLX5_ASSERT(entry && sh);
8107 	if (tbl_data->jump.action)
8108 		mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
8109 	if (tbl_data->tbl.obj)
8110 		mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
8111 	if (tbl_data->tunnel_offload && tbl_data->external) {
8112 		struct mlx5_hlist_entry *he;
8113 		struct mlx5_hlist *tunnel_grp_hash;
8114 		struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
8115 		union tunnel_tbl_key tunnel_key = {
8116 			.tunnel_id = tbl_data->tunnel ?
8117 					tbl_data->tunnel->tunnel_id : 0,
8118 			.group = tbl_data->group_id
8119 		};
8120 		union mlx5_flow_tbl_key table_key = {
8121 			.v64 = entry->key
8122 		};
8123 		uint32_t table_id = table_key.table_id;
8124 
8125 		tunnel_grp_hash = tbl_data->tunnel ?
8126 					tbl_data->tunnel->groups :
8127 					thub->groups;
8128 		he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
8129 		if (he)
8130 			mlx5_hlist_unregister(tunnel_grp_hash, he);
8131 		DRV_LOG(DEBUG,
8132 			"Table_id %u tunnel %u group %u released.",
8133 			table_id,
8134 			tbl_data->tunnel ?
8135 			tbl_data->tunnel->tunnel_id : 0,
8136 			tbl_data->group_id);
8137 	}
8138 	mlx5_cache_list_destroy(&tbl_data->matchers);
8139 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
8140 }
8141 
8142 /**
8143  * Release a flow table.
8144  *
8145  * @param[in] sh
8146  *   Pointer to device shared structure.
8147  * @param[in] tbl
8148  *   Table resource to be released.
8149  *
8150  * @return
8151  *   Returns 0 if table was released, else return 1;
8152  */
8153 static int
flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared * sh,struct mlx5_flow_tbl_resource * tbl)8154 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
8155 			     struct mlx5_flow_tbl_resource *tbl)
8156 {
8157 	struct mlx5_flow_tbl_data_entry *tbl_data =
8158 		container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8159 
8160 	if (!tbl)
8161 		return 0;
8162 	return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
8163 }
8164 
8165 int
flow_dv_matcher_match_cb(struct mlx5_cache_list * list __rte_unused,struct mlx5_cache_entry * entry,void * cb_ctx)8166 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
8167 			 struct mlx5_cache_entry *entry, void *cb_ctx)
8168 {
8169 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8170 	struct mlx5_flow_dv_matcher *ref = ctx->data;
8171 	struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
8172 							entry);
8173 
8174 	return cur->crc != ref->crc ||
8175 	       cur->priority != ref->priority ||
8176 	       memcmp((const void *)cur->mask.buf,
8177 		      (const void *)ref->mask.buf, ref->mask.size);
8178 }
8179 
8180 struct mlx5_cache_entry *
flow_dv_matcher_create_cb(struct mlx5_cache_list * list,struct mlx5_cache_entry * entry __rte_unused,void * cb_ctx)8181 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
8182 			  struct mlx5_cache_entry *entry __rte_unused,
8183 			  void *cb_ctx)
8184 {
8185 	struct mlx5_dev_ctx_shared *sh = list->ctx;
8186 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8187 	struct mlx5_flow_dv_matcher *ref = ctx->data;
8188 	struct mlx5_flow_dv_matcher *cache;
8189 	struct mlx5dv_flow_matcher_attr dv_attr = {
8190 		.type = IBV_FLOW_ATTR_NORMAL,
8191 		.match_mask = (void *)&ref->mask,
8192 	};
8193 	struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
8194 							    typeof(*tbl), tbl);
8195 	int ret;
8196 
8197 	cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
8198 	if (!cache) {
8199 		rte_flow_error_set(ctx->error, ENOMEM,
8200 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8201 				   "cannot create matcher");
8202 		return NULL;
8203 	}
8204 	*cache = *ref;
8205 	dv_attr.match_criteria_enable =
8206 		flow_dv_matcher_enable(cache->mask.buf);
8207 	dv_attr.priority = ref->priority;
8208 	if (tbl->is_egress)
8209 		dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
8210 	ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
8211 					       &cache->matcher_object);
8212 	if (ret) {
8213 		mlx5_free(cache);
8214 		rte_flow_error_set(ctx->error, ENOMEM,
8215 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8216 				   "cannot create matcher");
8217 		return NULL;
8218 	}
8219 	return &cache->entry;
8220 }
8221 
8222 /**
8223  * Register the flow matcher.
8224  *
8225  * @param[in, out] dev
8226  *   Pointer to rte_eth_dev structure.
8227  * @param[in, out] matcher
8228  *   Pointer to flow matcher.
8229  * @param[in, out] key
8230  *   Pointer to flow table key.
8231  * @parm[in, out] dev_flow
8232  *   Pointer to the dev_flow.
8233  * @param[out] error
8234  *   pointer to error structure.
8235  *
8236  * @return
8237  *   0 on success otherwise -errno and errno is set.
8238  */
8239 static int
flow_dv_matcher_register(struct rte_eth_dev * dev,struct mlx5_flow_dv_matcher * ref,union mlx5_flow_tbl_key * key,struct mlx5_flow * dev_flow,const struct mlx5_flow_tunnel * tunnel,uint32_t group_id,struct rte_flow_error * error)8240 flow_dv_matcher_register(struct rte_eth_dev *dev,
8241 			 struct mlx5_flow_dv_matcher *ref,
8242 			 union mlx5_flow_tbl_key *key,
8243 			 struct mlx5_flow *dev_flow,
8244 			 const struct mlx5_flow_tunnel *tunnel,
8245 			 uint32_t group_id,
8246 			 struct rte_flow_error *error)
8247 {
8248 	struct mlx5_cache_entry *entry;
8249 	struct mlx5_flow_dv_matcher *cache;
8250 	struct mlx5_flow_tbl_resource *tbl;
8251 	struct mlx5_flow_tbl_data_entry *tbl_data;
8252 	struct mlx5_flow_cb_ctx ctx = {
8253 		.error = error,
8254 		.data = ref,
8255 	};
8256 
8257 	/**
8258 	 * tunnel offload API requires this registration for cases when
8259 	 * tunnel match rule was inserted before tunnel set rule.
8260 	 */
8261 	tbl = flow_dv_tbl_resource_get(dev, key->table_id,
8262 				       key->direction, key->domain,
8263 				       dev_flow->external, tunnel,
8264 				       group_id, 0, error);
8265 	if (!tbl)
8266 		return -rte_errno;	/* No need to refill the error info */
8267 	tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8268 	ref->tbl = tbl;
8269 	entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
8270 	if (!entry) {
8271 		flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
8272 		return rte_flow_error_set(error, ENOMEM,
8273 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8274 					  "cannot allocate ref memory");
8275 	}
8276 	cache = container_of(entry, typeof(*cache), entry);
8277 	dev_flow->handle->dvh.matcher = cache;
8278 	return 0;
8279 }
8280 
8281 struct mlx5_hlist_entry *
flow_dv_tag_create_cb(struct mlx5_hlist * list,uint64_t key,void * ctx)8282 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
8283 {
8284 	struct mlx5_dev_ctx_shared *sh = list->ctx;
8285 	struct rte_flow_error *error = ctx;
8286 	struct mlx5_flow_dv_tag_resource *entry;
8287 	uint32_t idx = 0;
8288 	int ret;
8289 
8290 	entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
8291 	if (!entry) {
8292 		rte_flow_error_set(error, ENOMEM,
8293 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8294 				   "cannot allocate resource memory");
8295 		return NULL;
8296 	}
8297 	entry->idx = idx;
8298 	ret = mlx5_flow_os_create_flow_action_tag(key,
8299 						  &entry->action);
8300 	if (ret) {
8301 		mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
8302 		rte_flow_error_set(error, ENOMEM,
8303 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8304 				   NULL, "cannot create action");
8305 		return NULL;
8306 	}
8307 	return &entry->entry;
8308 }
8309 
8310 /**
8311  * Find existing tag resource or create and register a new one.
8312  *
8313  * @param dev[in, out]
8314  *   Pointer to rte_eth_dev structure.
8315  * @param[in, out] tag_be24
8316  *   Tag value in big endian then R-shift 8.
8317  * @parm[in, out] dev_flow
8318  *   Pointer to the dev_flow.
8319  * @param[out] error
8320  *   pointer to error structure.
8321  *
8322  * @return
8323  *   0 on success otherwise -errno and errno is set.
8324  */
8325 static int
flow_dv_tag_resource_register(struct rte_eth_dev * dev,uint32_t tag_be24,struct mlx5_flow * dev_flow,struct rte_flow_error * error)8326 flow_dv_tag_resource_register
8327 			(struct rte_eth_dev *dev,
8328 			 uint32_t tag_be24,
8329 			 struct mlx5_flow *dev_flow,
8330 			 struct rte_flow_error *error)
8331 {
8332 	struct mlx5_priv *priv = dev->data->dev_private;
8333 	struct mlx5_flow_dv_tag_resource *cache_resource;
8334 	struct mlx5_hlist_entry *entry;
8335 
8336 	entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
8337 	if (entry) {
8338 		cache_resource = container_of
8339 			(entry, struct mlx5_flow_dv_tag_resource, entry);
8340 		dev_flow->handle->dvh.rix_tag = cache_resource->idx;
8341 		dev_flow->dv.tag_resource = cache_resource;
8342 		return 0;
8343 	}
8344 	return -rte_errno;
8345 }
8346 
8347 void
flow_dv_tag_remove_cb(struct mlx5_hlist * list,struct mlx5_hlist_entry * entry)8348 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
8349 		      struct mlx5_hlist_entry *entry)
8350 {
8351 	struct mlx5_dev_ctx_shared *sh = list->ctx;
8352 	struct mlx5_flow_dv_tag_resource *tag =
8353 		container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
8354 
8355 	MLX5_ASSERT(tag && sh && tag->action);
8356 	claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
8357 	DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
8358 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
8359 }
8360 
8361 /**
8362  * Release the tag.
8363  *
8364  * @param dev
8365  *   Pointer to Ethernet device.
8366  * @param tag_idx
8367  *   Tag index.
8368  *
8369  * @return
8370  *   1 while a reference on it exists, 0 when freed.
8371  */
8372 static int
flow_dv_tag_release(struct rte_eth_dev * dev,uint32_t tag_idx)8373 flow_dv_tag_release(struct rte_eth_dev *dev,
8374 		    uint32_t tag_idx)
8375 {
8376 	struct mlx5_priv *priv = dev->data->dev_private;
8377 	struct mlx5_flow_dv_tag_resource *tag;
8378 
8379 	tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
8380 	if (!tag)
8381 		return 0;
8382 	DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
8383 		dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
8384 	return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
8385 }
8386 
8387 /**
8388  * Translate port ID action to vport.
8389  *
8390  * @param[in] dev
8391  *   Pointer to rte_eth_dev structure.
8392  * @param[in] action
8393  *   Pointer to the port ID action.
8394  * @param[out] dst_port_id
8395  *   The target port ID.
8396  * @param[out] error
8397  *   Pointer to the error structure.
8398  *
8399  * @return
8400  *   0 on success, a negative errno value otherwise and rte_errno is set.
8401  */
8402 static int
flow_dv_translate_action_port_id(struct rte_eth_dev * dev,const struct rte_flow_action * action,uint32_t * dst_port_id,struct rte_flow_error * error)8403 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
8404 				 const struct rte_flow_action *action,
8405 				 uint32_t *dst_port_id,
8406 				 struct rte_flow_error *error)
8407 {
8408 	uint32_t port;
8409 	struct mlx5_priv *priv;
8410 	const struct rte_flow_action_port_id *conf =
8411 			(const struct rte_flow_action_port_id *)action->conf;
8412 
8413 	port = conf->original ? dev->data->port_id : conf->id;
8414 	priv = mlx5_port_to_eswitch_info(port, false);
8415 	if (!priv)
8416 		return rte_flow_error_set(error, -rte_errno,
8417 					  RTE_FLOW_ERROR_TYPE_ACTION,
8418 					  NULL,
8419 					  "No eswitch info was found for port");
8420 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
8421 	/*
8422 	 * This parameter is transferred to
8423 	 * mlx5dv_dr_action_create_dest_ib_port().
8424 	 */
8425 	*dst_port_id = priv->dev_port;
8426 #else
8427 	/*
8428 	 * Legacy mode, no LAG configurations is supported.
8429 	 * This parameter is transferred to
8430 	 * mlx5dv_dr_action_create_dest_vport().
8431 	 */
8432 	*dst_port_id = priv->vport_id;
8433 #endif
8434 	return 0;
8435 }
8436 
8437 /**
8438  * Create a counter with aging configuration.
8439  *
8440  * @param[in] dev
8441  *   Pointer to rte_eth_dev structure.
8442  * @param[out] count
8443  *   Pointer to the counter action configuration.
8444  * @param[in] age
8445  *   Pointer to the aging action configuration.
8446  *
8447  * @return
8448  *   Index to flow counter on success, 0 otherwise.
8449  */
8450 static uint32_t
flow_dv_translate_create_counter(struct rte_eth_dev * dev,struct mlx5_flow * dev_flow,const struct rte_flow_action_count * count,const struct rte_flow_action_age * age)8451 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
8452 				struct mlx5_flow *dev_flow,
8453 				const struct rte_flow_action_count *count,
8454 				const struct rte_flow_action_age *age)
8455 {
8456 	uint32_t counter;
8457 	struct mlx5_age_param *age_param;
8458 
8459 	if (count && count->shared)
8460 		counter = flow_dv_counter_get_shared(dev, count->id);
8461 	else
8462 		counter = flow_dv_counter_alloc(dev, !!age);
8463 	if (!counter || age == NULL)
8464 		return counter;
8465 	age_param  = flow_dv_counter_idx_get_age(dev, counter);
8466 	age_param->context = age->context ? age->context :
8467 		(void *)(uintptr_t)(dev_flow->flow_idx);
8468 	age_param->timeout = age->timeout;
8469 	age_param->port_id = dev->data->port_id;
8470 	__atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
8471 	__atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
8472 	return counter;
8473 }
8474 
8475 /**
8476  * Add Tx queue matcher
8477  *
8478  * @param[in] dev
8479  *   Pointer to the dev struct.
8480  * @param[in, out] matcher
8481  *   Flow matcher.
8482  * @param[in, out] key
8483  *   Flow matcher value.
8484  * @param[in] item
8485  *   Flow pattern to translate.
8486  * @param[in] inner
8487  *   Item is inner pattern.
8488  */
8489 static void
flow_dv_translate_item_tx_queue(struct rte_eth_dev * dev,void * matcher,void * key,const struct rte_flow_item * item)8490 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
8491 				void *matcher, void *key,
8492 				const struct rte_flow_item *item)
8493 {
8494 	const struct mlx5_rte_flow_item_tx_queue *queue_m;
8495 	const struct mlx5_rte_flow_item_tx_queue *queue_v;
8496 	void *misc_m =
8497 		MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8498 	void *misc_v =
8499 		MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8500 	struct mlx5_txq_ctrl *txq;
8501 	uint32_t queue;
8502 
8503 
8504 	queue_m = (const void *)item->mask;
8505 	if (!queue_m)
8506 		return;
8507 	queue_v = (const void *)item->spec;
8508 	if (!queue_v)
8509 		return;
8510 	txq = mlx5_txq_get(dev, queue_v->queue);
8511 	if (!txq)
8512 		return;
8513 	queue = txq->obj->sq->id;
8514 	MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
8515 	MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
8516 		 queue & queue_m->queue);
8517 	mlx5_txq_release(dev, queue_v->queue);
8518 }
8519 
8520 /**
8521  * Set the hash fields according to the @p flow information.
8522  *
8523  * @param[in] dev_flow
8524  *   Pointer to the mlx5_flow.
8525  * @param[in] rss_desc
8526  *   Pointer to the mlx5_flow_rss_desc.
8527  */
8528 static void
flow_dv_hashfields_set(struct mlx5_flow * dev_flow,struct mlx5_flow_rss_desc * rss_desc)8529 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
8530 		       struct mlx5_flow_rss_desc *rss_desc)
8531 {
8532 	uint64_t items = dev_flow->handle->layers;
8533 	int rss_inner = 0;
8534 	uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
8535 
8536 	dev_flow->hash_fields = 0;
8537 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
8538 	if (rss_desc->level >= 2) {
8539 		dev_flow->hash_fields |= IBV_RX_HASH_INNER;
8540 		rss_inner = 1;
8541 	}
8542 #endif
8543 	if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
8544 	    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
8545 		if (rss_types & MLX5_IPV4_LAYER_TYPES) {
8546 			if (rss_types & ETH_RSS_L3_SRC_ONLY)
8547 				dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
8548 			else if (rss_types & ETH_RSS_L3_DST_ONLY)
8549 				dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
8550 			else
8551 				dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
8552 		}
8553 	} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
8554 		   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
8555 		if (rss_types & MLX5_IPV6_LAYER_TYPES) {
8556 			if (rss_types & ETH_RSS_L3_SRC_ONLY)
8557 				dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
8558 			else if (rss_types & ETH_RSS_L3_DST_ONLY)
8559 				dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
8560 			else
8561 				dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
8562 		}
8563 	}
8564 	if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
8565 	    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
8566 		if (rss_types & ETH_RSS_UDP) {
8567 			if (rss_types & ETH_RSS_L4_SRC_ONLY)
8568 				dev_flow->hash_fields |=
8569 						IBV_RX_HASH_SRC_PORT_UDP;
8570 			else if (rss_types & ETH_RSS_L4_DST_ONLY)
8571 				dev_flow->hash_fields |=
8572 						IBV_RX_HASH_DST_PORT_UDP;
8573 			else
8574 				dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
8575 		}
8576 	} else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
8577 		   (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
8578 		if (rss_types & ETH_RSS_TCP) {
8579 			if (rss_types & ETH_RSS_L4_SRC_ONLY)
8580 				dev_flow->hash_fields |=
8581 						IBV_RX_HASH_SRC_PORT_TCP;
8582 			else if (rss_types & ETH_RSS_L4_DST_ONLY)
8583 				dev_flow->hash_fields |=
8584 						IBV_RX_HASH_DST_PORT_TCP;
8585 			else
8586 				dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
8587 		}
8588 	}
8589 }
8590 
8591 /**
8592  * Prepare an Rx Hash queue.
8593  *
8594  * @param dev
8595  *   Pointer to Ethernet device.
8596  * @param[in] dev_flow
8597  *   Pointer to the mlx5_flow.
8598  * @param[in] rss_desc
8599  *   Pointer to the mlx5_flow_rss_desc.
8600  * @param[out] hrxq_idx
8601  *   Hash Rx queue index.
8602  *
8603  * @return
8604  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
8605  */
8606 static struct mlx5_hrxq *
flow_dv_hrxq_prepare(struct rte_eth_dev * dev,struct mlx5_flow * dev_flow,struct mlx5_flow_rss_desc * rss_desc,uint32_t * hrxq_idx)8607 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
8608 		     struct mlx5_flow *dev_flow,
8609 		     struct mlx5_flow_rss_desc *rss_desc,
8610 		     uint32_t *hrxq_idx)
8611 {
8612 	struct mlx5_priv *priv = dev->data->dev_private;
8613 	struct mlx5_flow_handle *dh = dev_flow->handle;
8614 	struct mlx5_hrxq *hrxq;
8615 
8616 	MLX5_ASSERT(rss_desc->queue_num);
8617 	rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
8618 	rss_desc->hash_fields = dev_flow->hash_fields;
8619 	rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
8620 	rss_desc->shared_rss = 0;
8621 	*hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
8622 	if (!*hrxq_idx)
8623 		return NULL;
8624 	hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
8625 			      *hrxq_idx);
8626 	return hrxq;
8627 }
8628 
8629 /**
8630  * Release sample sub action resource.
8631  *
8632  * @param[in, out] dev
8633  *   Pointer to rte_eth_dev structure.
8634  * @param[in] act_res
8635  *   Pointer to sample sub action resource.
8636  */
8637 static void
flow_dv_sample_sub_actions_release(struct rte_eth_dev * dev,struct mlx5_flow_sub_actions_idx * act_res)8638 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
8639 				   struct mlx5_flow_sub_actions_idx *act_res)
8640 {
8641 	if (act_res->rix_hrxq) {
8642 		mlx5_hrxq_release(dev, act_res->rix_hrxq);
8643 		act_res->rix_hrxq = 0;
8644 	}
8645 	if (act_res->rix_encap_decap) {
8646 		flow_dv_encap_decap_resource_release(dev,
8647 						     act_res->rix_encap_decap);
8648 		act_res->rix_encap_decap = 0;
8649 	}
8650 	if (act_res->rix_port_id_action) {
8651 		flow_dv_port_id_action_resource_release(dev,
8652 						act_res->rix_port_id_action);
8653 		act_res->rix_port_id_action = 0;
8654 	}
8655 	if (act_res->rix_tag) {
8656 		flow_dv_tag_release(dev, act_res->rix_tag);
8657 		act_res->rix_tag = 0;
8658 	}
8659 	if (act_res->cnt) {
8660 		flow_dv_counter_free(dev, act_res->cnt);
8661 		act_res->cnt = 0;
8662 	}
8663 }
8664 
8665 int
flow_dv_sample_match_cb(struct mlx5_cache_list * list __rte_unused,struct mlx5_cache_entry * entry,void * cb_ctx)8666 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
8667 			struct mlx5_cache_entry *entry, void *cb_ctx)
8668 {
8669 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8670 	struct rte_eth_dev *dev = ctx->dev;
8671 	struct mlx5_flow_dv_sample_resource *resource = ctx->data;
8672 	struct mlx5_flow_dv_sample_resource *cache_resource =
8673 			container_of(entry, typeof(*cache_resource), entry);
8674 
8675 	if (resource->ratio == cache_resource->ratio &&
8676 	    resource->ft_type == cache_resource->ft_type &&
8677 	    resource->ft_id == cache_resource->ft_id &&
8678 	    resource->set_action == cache_resource->set_action &&
8679 	    !memcmp((void *)&resource->sample_act,
8680 		    (void *)&cache_resource->sample_act,
8681 		    sizeof(struct mlx5_flow_sub_actions_list))) {
8682 		/*
8683 		 * Existing sample action should release the prepared
8684 		 * sub-actions reference counter.
8685 		 */
8686 		flow_dv_sample_sub_actions_release(dev,
8687 						&resource->sample_idx);
8688 		return 0;
8689 	}
8690 	return 1;
8691 }
8692 
8693 struct mlx5_cache_entry *
flow_dv_sample_create_cb(struct mlx5_cache_list * list __rte_unused,struct mlx5_cache_entry * entry __rte_unused,void * cb_ctx)8694 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
8695 			 struct mlx5_cache_entry *entry __rte_unused,
8696 			 void *cb_ctx)
8697 {
8698 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8699 	struct rte_eth_dev *dev = ctx->dev;
8700 	struct mlx5_flow_dv_sample_resource *resource = ctx->data;
8701 	void **sample_dv_actions = resource->sub_actions;
8702 	struct mlx5_flow_dv_sample_resource *cache_resource;
8703 	struct mlx5dv_dr_flow_sampler_attr sampler_attr;
8704 	struct mlx5_priv *priv = dev->data->dev_private;
8705 	struct mlx5_dev_ctx_shared *sh = priv->sh;
8706 	struct mlx5_flow_tbl_resource *tbl;
8707 	uint32_t idx = 0;
8708 	const uint32_t next_ft_step = 1;
8709 	uint32_t next_ft_id = resource->ft_id +	next_ft_step;
8710 	uint8_t is_egress = 0;
8711 	uint8_t is_transfer = 0;
8712 	struct rte_flow_error *error = ctx->error;
8713 
8714 	/* Register new sample resource. */
8715 	cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
8716 	if (!cache_resource) {
8717 		rte_flow_error_set(error, ENOMEM,
8718 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8719 					  NULL,
8720 					  "cannot allocate resource memory");
8721 		return NULL;
8722 	}
8723 	*cache_resource = *resource;
8724 	/* Create normal path table level */
8725 	if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
8726 		is_transfer = 1;
8727 	else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
8728 		is_egress = 1;
8729 	tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
8730 					is_egress, is_transfer,
8731 					true, NULL, 0, 0, error);
8732 	if (!tbl) {
8733 		rte_flow_error_set(error, ENOMEM,
8734 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8735 					  NULL,
8736 					  "fail to create normal path table "
8737 					  "for sample");
8738 		goto error;
8739 	}
8740 	cache_resource->normal_path_tbl = tbl;
8741 	if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
8742 		cache_resource->default_miss =
8743 				mlx5_glue->dr_create_flow_action_default_miss();
8744 		if (!cache_resource->default_miss) {
8745 			rte_flow_error_set(error, ENOMEM,
8746 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8747 						NULL,
8748 						"cannot create default miss "
8749 						"action");
8750 			goto error;
8751 		}
8752 		sample_dv_actions[resource->sample_act.actions_num++] =
8753 						cache_resource->default_miss;
8754 	}
8755 	/* Create a DR sample action */
8756 	sampler_attr.sample_ratio = cache_resource->ratio;
8757 	sampler_attr.default_next_table = tbl->obj;
8758 	sampler_attr.num_sample_actions = resource->sample_act.actions_num;
8759 	sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
8760 							&sample_dv_actions[0];
8761 	sampler_attr.action = cache_resource->set_action;
8762 	cache_resource->verbs_action =
8763 		mlx5_glue->dr_create_flow_action_sampler(&sampler_attr);
8764 	if (!cache_resource->verbs_action) {
8765 		rte_flow_error_set(error, ENOMEM,
8766 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8767 					NULL, "cannot create sample action");
8768 		goto error;
8769 	}
8770 	cache_resource->idx = idx;
8771 	cache_resource->dev = dev;
8772 	return &cache_resource->entry;
8773 error:
8774 	if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB &&
8775 	    cache_resource->default_miss)
8776 		claim_zero(mlx5_glue->destroy_flow_action
8777 				(cache_resource->default_miss));
8778 	else
8779 		flow_dv_sample_sub_actions_release(dev,
8780 						   &cache_resource->sample_idx);
8781 	if (cache_resource->normal_path_tbl)
8782 		flow_dv_tbl_resource_release(MLX5_SH(dev),
8783 				cache_resource->normal_path_tbl);
8784 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
8785 	return NULL;
8786 
8787 }
8788 
8789 /**
8790  * Find existing sample resource or create and register a new one.
8791  *
8792  * @param[in, out] dev
8793  *   Pointer to rte_eth_dev structure.
8794  * @param[in] resource
8795  *   Pointer to sample resource.
8796  * @parm[in, out] dev_flow
8797  *   Pointer to the dev_flow.
8798  * @param[out] error
8799  *   pointer to error structure.
8800  *
8801  * @return
8802  *   0 on success otherwise -errno and errno is set.
8803  */
8804 static int
flow_dv_sample_resource_register(struct rte_eth_dev * dev,struct mlx5_flow_dv_sample_resource * resource,struct mlx5_flow * dev_flow,struct rte_flow_error * error)8805 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
8806 			 struct mlx5_flow_dv_sample_resource *resource,
8807 			 struct mlx5_flow *dev_flow,
8808 			 struct rte_flow_error *error)
8809 {
8810 	struct mlx5_flow_dv_sample_resource *cache_resource;
8811 	struct mlx5_cache_entry *entry;
8812 	struct mlx5_priv *priv = dev->data->dev_private;
8813 	struct mlx5_flow_cb_ctx ctx = {
8814 		.dev = dev,
8815 		.error = error,
8816 		.data = resource,
8817 	};
8818 
8819 	entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
8820 	if (!entry)
8821 		return -rte_errno;
8822 	cache_resource = container_of(entry, typeof(*cache_resource), entry);
8823 	dev_flow->handle->dvh.rix_sample = cache_resource->idx;
8824 	dev_flow->dv.sample_res = cache_resource;
8825 	return 0;
8826 }
8827 
8828 int
flow_dv_dest_array_match_cb(struct mlx5_cache_list * list __rte_unused,struct mlx5_cache_entry * entry,void * cb_ctx)8829 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
8830 			    struct mlx5_cache_entry *entry, void *cb_ctx)
8831 {
8832 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8833 	struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
8834 	struct rte_eth_dev *dev = ctx->dev;
8835 	struct mlx5_flow_dv_dest_array_resource *cache_resource =
8836 			container_of(entry, typeof(*cache_resource), entry);
8837 	uint32_t idx = 0;
8838 
8839 	if (resource->num_of_dest == cache_resource->num_of_dest &&
8840 	    resource->ft_type == cache_resource->ft_type &&
8841 	    !memcmp((void *)cache_resource->sample_act,
8842 		    (void *)resource->sample_act,
8843 		   (resource->num_of_dest *
8844 		   sizeof(struct mlx5_flow_sub_actions_list)))) {
8845 		/*
8846 		 * Existing sample action should release the prepared
8847 		 * sub-actions reference counter.
8848 		 */
8849 		for (idx = 0; idx < resource->num_of_dest; idx++)
8850 			flow_dv_sample_sub_actions_release(dev,
8851 					&resource->sample_idx[idx]);
8852 		return 0;
8853 	}
8854 	return 1;
8855 }
8856 
8857 struct mlx5_cache_entry *
flow_dv_dest_array_create_cb(struct mlx5_cache_list * list __rte_unused,struct mlx5_cache_entry * entry __rte_unused,void * cb_ctx)8858 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
8859 			 struct mlx5_cache_entry *entry __rte_unused,
8860 			 void *cb_ctx)
8861 {
8862 	struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8863 	struct rte_eth_dev *dev = ctx->dev;
8864 	struct mlx5_flow_dv_dest_array_resource *cache_resource;
8865 	struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
8866 	struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
8867 	struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
8868 	struct mlx5_priv *priv = dev->data->dev_private;
8869 	struct mlx5_dev_ctx_shared *sh = priv->sh;
8870 	struct mlx5_flow_sub_actions_list *sample_act;
8871 	struct mlx5dv_dr_domain *domain;
8872 	uint32_t idx = 0, res_idx = 0;
8873 	struct rte_flow_error *error = ctx->error;
8874 
8875 	/* Register new destination array resource. */
8876 	cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8877 					    &res_idx);
8878 	if (!cache_resource) {
8879 		rte_flow_error_set(error, ENOMEM,
8880 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8881 					  NULL,
8882 					  "cannot allocate resource memory");
8883 		return NULL;
8884 	}
8885 	*cache_resource = *resource;
8886 	if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
8887 		domain = sh->fdb_domain;
8888 	else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
8889 		domain = sh->rx_domain;
8890 	else
8891 		domain = sh->tx_domain;
8892 	for (idx = 0; idx < resource->num_of_dest; idx++) {
8893 		dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
8894 				 mlx5_malloc(MLX5_MEM_ZERO,
8895 				 sizeof(struct mlx5dv_dr_action_dest_attr),
8896 				 0, SOCKET_ID_ANY);
8897 		if (!dest_attr[idx]) {
8898 			rte_flow_error_set(error, ENOMEM,
8899 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8900 					   NULL,
8901 					   "cannot allocate resource memory");
8902 			goto error;
8903 		}
8904 		dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
8905 		sample_act = &resource->sample_act[idx];
8906 		if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) {
8907 			dest_attr[idx]->dest = sample_act->dr_queue_action;
8908 		} else if (sample_act->action_flags ==
8909 			  (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) {
8910 			dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
8911 			dest_attr[idx]->dest_reformat = &dest_reformat[idx];
8912 			dest_attr[idx]->dest_reformat->reformat =
8913 					sample_act->dr_encap_action;
8914 			dest_attr[idx]->dest_reformat->dest =
8915 					sample_act->dr_port_id_action;
8916 		} else if (sample_act->action_flags ==
8917 			   MLX5_FLOW_ACTION_PORT_ID) {
8918 			dest_attr[idx]->dest = sample_act->dr_port_id_action;
8919 		}
8920 	}
8921 	/* create a dest array actioin */
8922 	cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array
8923 						(domain,
8924 						 cache_resource->num_of_dest,
8925 						 dest_attr);
8926 	if (!cache_resource->action) {
8927 		rte_flow_error_set(error, ENOMEM,
8928 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8929 				   NULL,
8930 				   "cannot create destination array action");
8931 		goto error;
8932 	}
8933 	cache_resource->idx = res_idx;
8934 	cache_resource->dev = dev;
8935 	for (idx = 0; idx < resource->num_of_dest; idx++)
8936 		mlx5_free(dest_attr[idx]);
8937 	return &cache_resource->entry;
8938 error:
8939 	for (idx = 0; idx < resource->num_of_dest; idx++) {
8940 		struct mlx5_flow_sub_actions_idx *act_res =
8941 					&cache_resource->sample_idx[idx];
8942 		if (act_res->rix_hrxq &&
8943 		    !mlx5_hrxq_release(dev,
8944 				act_res->rix_hrxq))
8945 			act_res->rix_hrxq = 0;
8946 		if (act_res->rix_encap_decap &&
8947 			!flow_dv_encap_decap_resource_release(dev,
8948 				act_res->rix_encap_decap))
8949 			act_res->rix_encap_decap = 0;
8950 		if (act_res->rix_port_id_action &&
8951 			!flow_dv_port_id_action_resource_release(dev,
8952 				act_res->rix_port_id_action))
8953 			act_res->rix_port_id_action = 0;
8954 		if (dest_attr[idx])
8955 			mlx5_free(dest_attr[idx]);
8956 	}
8957 
8958 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
8959 	return NULL;
8960 }
8961 
8962 /**
8963  * Find existing destination array resource or create and register a new one.
8964  *
8965  * @param[in, out] dev
8966  *   Pointer to rte_eth_dev structure.
8967  * @param[in] resource
8968  *   Pointer to destination array resource.
8969  * @parm[in, out] dev_flow
8970  *   Pointer to the dev_flow.
8971  * @param[out] error
8972  *   pointer to error structure.
8973  *
8974  * @return
8975  *   0 on success otherwise -errno and errno is set.
8976  */
8977 static int
flow_dv_dest_array_resource_register(struct rte_eth_dev * dev,struct mlx5_flow_dv_dest_array_resource * resource,struct mlx5_flow * dev_flow,struct rte_flow_error * error)8978 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
8979 			 struct mlx5_flow_dv_dest_array_resource *resource,
8980 			 struct mlx5_flow *dev_flow,
8981 			 struct rte_flow_error *error)
8982 {
8983 	struct mlx5_flow_dv_dest_array_resource *cache_resource;
8984 	struct mlx5_priv *priv = dev->data->dev_private;
8985 	struct mlx5_cache_entry *entry;
8986 	struct mlx5_flow_cb_ctx ctx = {
8987 		.dev = dev,
8988 		.error = error,
8989 		.data = resource,
8990 	};
8991 
8992 	entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
8993 	if (!entry)
8994 		return -rte_errno;
8995 	cache_resource = container_of(entry, typeof(*cache_resource), entry);
8996 	dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
8997 	dev_flow->dv.dest_array_res = cache_resource;
8998 	return 0;
8999 }
9000 
9001 /**
9002  * Convert Sample action to DV specification.
9003  *
9004  * @param[in] dev
9005  *   Pointer to rte_eth_dev structure.
9006  * @param[in] action
9007  *   Pointer to action structure.
9008  * @param[in, out] dev_flow
9009  *   Pointer to the mlx5_flow.
9010  * @param[in] attr
9011  *   Pointer to the flow attributes.
9012  * @param[in, out] num_of_dest
9013  *   Pointer to the num of destination.
9014  * @param[in, out] sample_actions
9015  *   Pointer to sample actions list.
9016  * @param[in, out] res
9017  *   Pointer to sample resource.
9018  * @param[out] error
9019  *   Pointer to the error structure.
9020  *
9021  * @return
9022  *   0 on success, a negative errno value otherwise and rte_errno is set.
9023  */
9024 static int
flow_dv_translate_action_sample(struct rte_eth_dev * dev,const struct rte_flow_action * action,struct mlx5_flow * dev_flow,const struct rte_flow_attr * attr,uint32_t * num_of_dest,void ** sample_actions,struct mlx5_flow_dv_sample_resource * res,struct rte_flow_error * error)9025 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
9026 				const struct rte_flow_action *action,
9027 				struct mlx5_flow *dev_flow,
9028 				const struct rte_flow_attr *attr,
9029 				uint32_t *num_of_dest,
9030 				void **sample_actions,
9031 				struct mlx5_flow_dv_sample_resource *res,
9032 				struct rte_flow_error *error)
9033 {
9034 	struct mlx5_priv *priv = dev->data->dev_private;
9035 	const struct rte_flow_action_sample *sample_action;
9036 	const struct rte_flow_action *sub_actions;
9037 	const struct rte_flow_action_queue *queue;
9038 	struct mlx5_flow_sub_actions_list *sample_act;
9039 	struct mlx5_flow_sub_actions_idx *sample_idx;
9040 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9041 	struct mlx5_flow_rss_desc *rss_desc;
9042 	uint64_t action_flags = 0;
9043 
9044 	MLX5_ASSERT(wks);
9045 	rss_desc = &wks->rss_desc;
9046 	sample_act = &res->sample_act;
9047 	sample_idx = &res->sample_idx;
9048 	sample_action = (const struct rte_flow_action_sample *)action->conf;
9049 	res->ratio = sample_action->ratio;
9050 	sub_actions = sample_action->actions;
9051 	for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
9052 		int type = sub_actions->type;
9053 		uint32_t pre_rix = 0;
9054 		void *pre_r;
9055 		switch (type) {
9056 		case RTE_FLOW_ACTION_TYPE_QUEUE:
9057 		{
9058 			struct mlx5_hrxq *hrxq;
9059 			uint32_t hrxq_idx;
9060 
9061 			queue = sub_actions->conf;
9062 			rss_desc->queue_num = 1;
9063 			rss_desc->queue[0] = queue->index;
9064 			hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9065 						    rss_desc, &hrxq_idx);
9066 			if (!hrxq)
9067 				return rte_flow_error_set
9068 					(error, rte_errno,
9069 					 RTE_FLOW_ERROR_TYPE_ACTION,
9070 					 NULL,
9071 					 "cannot create fate queue");
9072 			sample_act->dr_queue_action = hrxq->action;
9073 			sample_idx->rix_hrxq = hrxq_idx;
9074 			sample_actions[sample_act->actions_num++] =
9075 						hrxq->action;
9076 			(*num_of_dest)++;
9077 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
9078 			if (action_flags & MLX5_FLOW_ACTION_MARK)
9079 				dev_flow->handle->rix_hrxq = hrxq_idx;
9080 			dev_flow->handle->fate_action =
9081 					MLX5_FLOW_FATE_QUEUE;
9082 			break;
9083 		}
9084 		case RTE_FLOW_ACTION_TYPE_MARK:
9085 		{
9086 			uint32_t tag_be = mlx5_flow_mark_set
9087 				(((const struct rte_flow_action_mark *)
9088 				(sub_actions->conf))->id);
9089 
9090 			dev_flow->handle->mark = 1;
9091 			pre_rix = dev_flow->handle->dvh.rix_tag;
9092 			/* Save the mark resource before sample */
9093 			pre_r = dev_flow->dv.tag_resource;
9094 			if (flow_dv_tag_resource_register(dev, tag_be,
9095 						  dev_flow, error))
9096 				return -rte_errno;
9097 			MLX5_ASSERT(dev_flow->dv.tag_resource);
9098 			sample_act->dr_tag_action =
9099 				dev_flow->dv.tag_resource->action;
9100 			sample_idx->rix_tag =
9101 				dev_flow->handle->dvh.rix_tag;
9102 			sample_actions[sample_act->actions_num++] =
9103 						sample_act->dr_tag_action;
9104 			/* Recover the mark resource after sample */
9105 			dev_flow->dv.tag_resource = pre_r;
9106 			dev_flow->handle->dvh.rix_tag = pre_rix;
9107 			action_flags |= MLX5_FLOW_ACTION_MARK;
9108 			break;
9109 		}
9110 		case RTE_FLOW_ACTION_TYPE_COUNT:
9111 		{
9112 			uint32_t counter;
9113 
9114 			counter = flow_dv_translate_create_counter(dev,
9115 					dev_flow, sub_actions->conf, 0);
9116 			if (!counter)
9117 				return rte_flow_error_set
9118 						(error, rte_errno,
9119 						 RTE_FLOW_ERROR_TYPE_ACTION,
9120 						 NULL,
9121 						 "cannot create counter"
9122 						 " object.");
9123 			sample_idx->cnt = counter;
9124 			sample_act->dr_cnt_action =
9125 				  (flow_dv_counter_get_by_idx(dev,
9126 				  counter, NULL))->action;
9127 			sample_actions[sample_act->actions_num++] =
9128 						sample_act->dr_cnt_action;
9129 			action_flags |= MLX5_FLOW_ACTION_COUNT;
9130 			break;
9131 		}
9132 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
9133 		{
9134 			struct mlx5_flow_dv_port_id_action_resource
9135 					port_id_resource;
9136 			uint32_t port_id = 0;
9137 
9138 			memset(&port_id_resource, 0, sizeof(port_id_resource));
9139 			/* Save the port id resource before sample */
9140 			pre_rix = dev_flow->handle->rix_port_id_action;
9141 			pre_r = dev_flow->dv.port_id_action;
9142 			if (flow_dv_translate_action_port_id(dev, sub_actions,
9143 							     &port_id, error))
9144 				return -rte_errno;
9145 			port_id_resource.port_id = port_id;
9146 			if (flow_dv_port_id_action_resource_register
9147 			    (dev, &port_id_resource, dev_flow, error))
9148 				return -rte_errno;
9149 			sample_act->dr_port_id_action =
9150 				dev_flow->dv.port_id_action->action;
9151 			sample_idx->rix_port_id_action =
9152 				dev_flow->handle->rix_port_id_action;
9153 			sample_actions[sample_act->actions_num++] =
9154 						sample_act->dr_port_id_action;
9155 			/* Recover the port id resource after sample */
9156 			dev_flow->dv.port_id_action = pre_r;
9157 			dev_flow->handle->rix_port_id_action = pre_rix;
9158 			(*num_of_dest)++;
9159 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9160 			break;
9161 		}
9162 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9163 			/* Save the encap resource before sample */
9164 			pre_rix = dev_flow->handle->dvh.rix_encap_decap;
9165 			pre_r = dev_flow->dv.encap_decap;
9166 			if (flow_dv_create_action_l2_encap(dev, sub_actions,
9167 							   dev_flow,
9168 							   attr->transfer,
9169 							   error))
9170 				return -rte_errno;
9171 			sample_act->dr_encap_action =
9172 				dev_flow->dv.encap_decap->action;
9173 			sample_idx->rix_encap_decap =
9174 				dev_flow->handle->dvh.rix_encap_decap;
9175 			sample_actions[sample_act->actions_num++] =
9176 						sample_act->dr_encap_action;
9177 			/* Recover the encap resource after sample */
9178 			dev_flow->dv.encap_decap = pre_r;
9179 			dev_flow->handle->dvh.rix_encap_decap = pre_rix;
9180 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
9181 			break;
9182 		default:
9183 			return rte_flow_error_set(error, EINVAL,
9184 				RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9185 				NULL,
9186 				"Not support for sampler action");
9187 		}
9188 	}
9189 	sample_act->action_flags = action_flags;
9190 	res->ft_id = dev_flow->dv.group;
9191 	if (attr->transfer) {
9192 		union {
9193 			uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
9194 			uint64_t set_action;
9195 		} action_ctx = { .set_action = 0 };
9196 
9197 		res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9198 		MLX5_SET(set_action_in, action_ctx.action_in, action_type,
9199 			 MLX5_MODIFICATION_TYPE_SET);
9200 		MLX5_SET(set_action_in, action_ctx.action_in, field,
9201 			 MLX5_MODI_META_REG_C_0);
9202 		MLX5_SET(set_action_in, action_ctx.action_in, data,
9203 			 priv->vport_meta_tag);
9204 		res->set_action = action_ctx.set_action;
9205 	} else if (attr->ingress) {
9206 		res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9207 	} else {
9208 		res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
9209 	}
9210 	return 0;
9211 }
9212 
9213 /**
9214  * Convert Sample action to DV specification.
9215  *
9216  * @param[in] dev
9217  *   Pointer to rte_eth_dev structure.
9218  * @param[in, out] dev_flow
9219  *   Pointer to the mlx5_flow.
9220  * @param[in] num_of_dest
9221  *   The num of destination.
9222  * @param[in, out] res
9223  *   Pointer to sample resource.
9224  * @param[in, out] mdest_res
9225  *   Pointer to destination array resource.
9226  * @param[in] sample_actions
9227  *   Pointer to sample path actions list.
9228  * @param[in] action_flags
9229  *   Holds the actions detected until now.
9230  * @param[out] error
9231  *   Pointer to the error structure.
9232  *
9233  * @return
9234  *   0 on success, a negative errno value otherwise and rte_errno is set.
9235  */
9236 static int
flow_dv_create_action_sample(struct rte_eth_dev * dev,struct mlx5_flow * dev_flow,uint32_t num_of_dest,struct mlx5_flow_dv_sample_resource * res,struct mlx5_flow_dv_dest_array_resource * mdest_res,void ** sample_actions,uint64_t action_flags,struct rte_flow_error * error)9237 flow_dv_create_action_sample(struct rte_eth_dev *dev,
9238 			     struct mlx5_flow *dev_flow,
9239 			     uint32_t num_of_dest,
9240 			     struct mlx5_flow_dv_sample_resource *res,
9241 			     struct mlx5_flow_dv_dest_array_resource *mdest_res,
9242 			     void **sample_actions,
9243 			     uint64_t action_flags,
9244 			     struct rte_flow_error *error)
9245 {
9246 	/* update normal path action resource into last index of array */
9247 	uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
9248 	struct mlx5_flow_sub_actions_list *sample_act =
9249 					&mdest_res->sample_act[dest_index];
9250 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9251 	struct mlx5_flow_rss_desc *rss_desc;
9252 	uint32_t normal_idx = 0;
9253 	struct mlx5_hrxq *hrxq;
9254 	uint32_t hrxq_idx;
9255 
9256 	MLX5_ASSERT(wks);
9257 	rss_desc = &wks->rss_desc;
9258 	if (num_of_dest > 1) {
9259 		if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
9260 			/* Handle QP action for mirroring */
9261 			hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9262 						    rss_desc, &hrxq_idx);
9263 			if (!hrxq)
9264 				return rte_flow_error_set
9265 				     (error, rte_errno,
9266 				      RTE_FLOW_ERROR_TYPE_ACTION,
9267 				      NULL,
9268 				      "cannot create rx queue");
9269 			normal_idx++;
9270 			mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
9271 			sample_act->dr_queue_action = hrxq->action;
9272 			if (action_flags & MLX5_FLOW_ACTION_MARK)
9273 				dev_flow->handle->rix_hrxq = hrxq_idx;
9274 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9275 		}
9276 		if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
9277 			normal_idx++;
9278 			mdest_res->sample_idx[dest_index].rix_encap_decap =
9279 				dev_flow->handle->dvh.rix_encap_decap;
9280 			sample_act->dr_encap_action =
9281 				dev_flow->dv.encap_decap->action;
9282 		}
9283 		if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
9284 			normal_idx++;
9285 			mdest_res->sample_idx[dest_index].rix_port_id_action =
9286 				dev_flow->handle->rix_port_id_action;
9287 			sample_act->dr_port_id_action =
9288 				dev_flow->dv.port_id_action->action;
9289 		}
9290 		sample_act->actions_num = normal_idx;
9291 		/* update sample action resource into first index of array */
9292 		mdest_res->ft_type = res->ft_type;
9293 		memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
9294 				sizeof(struct mlx5_flow_sub_actions_idx));
9295 		memcpy(&mdest_res->sample_act[0], &res->sample_act,
9296 				sizeof(struct mlx5_flow_sub_actions_list));
9297 		mdest_res->num_of_dest = num_of_dest;
9298 		if (flow_dv_dest_array_resource_register(dev, mdest_res,
9299 							 dev_flow, error))
9300 			return rte_flow_error_set(error, EINVAL,
9301 						  RTE_FLOW_ERROR_TYPE_ACTION,
9302 						  NULL, "can't create sample "
9303 						  "action");
9304 	} else {
9305 		res->sub_actions = sample_actions;
9306 		if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
9307 			return rte_flow_error_set(error, EINVAL,
9308 						  RTE_FLOW_ERROR_TYPE_ACTION,
9309 						  NULL,
9310 						  "can't create sample action");
9311 	}
9312 	return 0;
9313 }
9314 
9315 /**
9316  * Remove an ASO age action from age actions list.
9317  *
9318  * @param[in] dev
9319  *   Pointer to the Ethernet device structure.
9320  * @param[in] age
9321  *   Pointer to the aso age action handler.
9322  */
9323 static void
flow_dv_aso_age_remove_from_age(struct rte_eth_dev * dev,struct mlx5_aso_age_action * age)9324 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
9325 				struct mlx5_aso_age_action *age)
9326 {
9327 	struct mlx5_age_info *age_info;
9328 	struct mlx5_age_param *age_param = &age->age_params;
9329 	struct mlx5_priv *priv = dev->data->dev_private;
9330 	uint16_t expected = AGE_CANDIDATE;
9331 
9332 	age_info = GET_PORT_AGE_INFO(priv);
9333 	if (!__atomic_compare_exchange_n(&age_param->state, &expected,
9334 					 AGE_FREE, false, __ATOMIC_RELAXED,
9335 					 __ATOMIC_RELAXED)) {
9336 		/**
9337 		 * We need the lock even it is age timeout,
9338 		 * since age action may still in process.
9339 		 */
9340 		rte_spinlock_lock(&age_info->aged_sl);
9341 		LIST_REMOVE(age, next);
9342 		rte_spinlock_unlock(&age_info->aged_sl);
9343 		__atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
9344 	}
9345 }
9346 
9347 /**
9348  * Release an ASO age action.
9349  *
9350  * @param[in] dev
9351  *   Pointer to the Ethernet device structure.
9352  * @param[in] age_idx
9353  *   Index of ASO age action to release.
9354  * @param[in] flow
9355  *   True if the release operation is during flow destroy operation.
9356  *   False if the release operation is during action destroy operation.
9357  *
9358  * @return
9359  *   0 when age action was removed, otherwise the number of references.
9360  */
9361 static int
flow_dv_aso_age_release(struct rte_eth_dev * dev,uint32_t age_idx)9362 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
9363 {
9364 	struct mlx5_priv *priv = dev->data->dev_private;
9365 	struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9366 	struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
9367 	uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
9368 
9369 	if (!ret) {
9370 		flow_dv_aso_age_remove_from_age(dev, age);
9371 		rte_spinlock_lock(&mng->free_sl);
9372 		LIST_INSERT_HEAD(&mng->free, age, next);
9373 		rte_spinlock_unlock(&mng->free_sl);
9374 	}
9375 	return ret;
9376 }
9377 
9378 /**
9379  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
9380  *
9381  * @param[in] dev
9382  *   Pointer to the Ethernet device structure.
9383  *
9384  * @return
9385  *   0 on success, otherwise negative errno value and rte_errno is set.
9386  */
9387 static int
flow_dv_aso_age_pools_resize(struct rte_eth_dev * dev)9388 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
9389 {
9390 	struct mlx5_priv *priv = dev->data->dev_private;
9391 	struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9392 	void *old_pools = mng->pools;
9393 	uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
9394 	uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
9395 	void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
9396 
9397 	if (!pools) {
9398 		rte_errno = ENOMEM;
9399 		return -ENOMEM;
9400 	}
9401 	if (old_pools) {
9402 		memcpy(pools, old_pools,
9403 		       mng->n * sizeof(struct mlx5_flow_counter_pool *));
9404 		mlx5_free(old_pools);
9405 	} else {
9406 		/* First ASO flow hit allocation - starting ASO data-path. */
9407 		int ret = mlx5_aso_queue_start(priv->sh);
9408 
9409 		if (ret) {
9410 			mlx5_free(pools);
9411 			return ret;
9412 		}
9413 	}
9414 	mng->n = resize;
9415 	mng->pools = pools;
9416 	return 0;
9417 }
9418 
9419 /**
9420  * Create and initialize a new ASO aging pool.
9421  *
9422  * @param[in] dev
9423  *   Pointer to the Ethernet device structure.
9424  * @param[out] age_free
9425  *   Where to put the pointer of a new age action.
9426  *
9427  * @return
9428  *   The age actions pool pointer and @p age_free is set on success,
9429  *   NULL otherwise and rte_errno is set.
9430  */
9431 static struct mlx5_aso_age_pool *
flow_dv_age_pool_create(struct rte_eth_dev * dev,struct mlx5_aso_age_action ** age_free)9432 flow_dv_age_pool_create(struct rte_eth_dev *dev,
9433 			struct mlx5_aso_age_action **age_free)
9434 {
9435 	struct mlx5_priv *priv = dev->data->dev_private;
9436 	struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9437 	struct mlx5_aso_age_pool *pool = NULL;
9438 	struct mlx5_devx_obj *obj = NULL;
9439 	uint32_t i;
9440 
9441 	obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
9442 						    priv->sh->pdn);
9443 	if (!obj) {
9444 		rte_errno = ENODATA;
9445 		DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
9446 		return NULL;
9447 	}
9448 	pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
9449 	if (!pool) {
9450 		claim_zero(mlx5_devx_cmd_destroy(obj));
9451 		rte_errno = ENOMEM;
9452 		return NULL;
9453 	}
9454 	pool->flow_hit_aso_obj = obj;
9455 	pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
9456 	rte_spinlock_lock(&mng->resize_sl);
9457 	pool->index = mng->next;
9458 	/* Resize pools array if there is no room for the new pool in it. */
9459 	if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
9460 		claim_zero(mlx5_devx_cmd_destroy(obj));
9461 		mlx5_free(pool);
9462 		rte_spinlock_unlock(&mng->resize_sl);
9463 		return NULL;
9464 	}
9465 	mng->pools[pool->index] = pool;
9466 	mng->next++;
9467 	rte_spinlock_unlock(&mng->resize_sl);
9468 	/* Assign the first action in the new pool, the rest go to free list. */
9469 	*age_free = &pool->actions[0];
9470 	for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
9471 		pool->actions[i].offset = i;
9472 		LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
9473 	}
9474 	return pool;
9475 }
9476 
9477 /**
9478  * Allocate a ASO aging bit.
9479  *
9480  * @param[in] dev
9481  *   Pointer to the Ethernet device structure.
9482  * @param[out] error
9483  *   Pointer to the error structure.
9484  *
9485  * @return
9486  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
9487  */
9488 static uint32_t
flow_dv_aso_age_alloc(struct rte_eth_dev * dev,struct rte_flow_error * error)9489 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
9490 {
9491 	struct mlx5_priv *priv = dev->data->dev_private;
9492 	const struct mlx5_aso_age_pool *pool;
9493 	struct mlx5_aso_age_action *age_free = NULL;
9494 	struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9495 
9496 	MLX5_ASSERT(mng);
9497 	/* Try to get the next free age action bit. */
9498 	rte_spinlock_lock(&mng->free_sl);
9499 	age_free = LIST_FIRST(&mng->free);
9500 	if (age_free) {
9501 		LIST_REMOVE(age_free, next);
9502 	} else if (!flow_dv_age_pool_create(dev, &age_free)) {
9503 		rte_spinlock_unlock(&mng->free_sl);
9504 		rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
9505 				   NULL, "failed to create ASO age pool");
9506 		return 0; /* 0 is an error. */
9507 	}
9508 	rte_spinlock_unlock(&mng->free_sl);
9509 	pool = container_of
9510 	  ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
9511 		  (age_free - age_free->offset), const struct mlx5_aso_age_pool,
9512 								       actions);
9513 	if (!age_free->dr_action) {
9514 		int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
9515 						 error);
9516 
9517 		if (reg_c < 0) {
9518 			rte_flow_error_set(error, rte_errno,
9519 					   RTE_FLOW_ERROR_TYPE_ACTION,
9520 					   NULL, "failed to get reg_c "
9521 					   "for ASO flow hit");
9522 			return 0; /* 0 is an error. */
9523 		}
9524 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
9525 		age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
9526 				(priv->sh->rx_domain,
9527 				 pool->flow_hit_aso_obj->obj, age_free->offset,
9528 				 MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
9529 				 (reg_c - REG_C_0));
9530 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
9531 		if (!age_free->dr_action) {
9532 			rte_errno = errno;
9533 			rte_spinlock_lock(&mng->free_sl);
9534 			LIST_INSERT_HEAD(&mng->free, age_free, next);
9535 			rte_spinlock_unlock(&mng->free_sl);
9536 			rte_flow_error_set(error, rte_errno,
9537 					   RTE_FLOW_ERROR_TYPE_ACTION,
9538 					   NULL, "failed to create ASO "
9539 					   "flow hit action");
9540 			return 0; /* 0 is an error. */
9541 		}
9542 	}
9543 	__atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
9544 	return pool->index | ((age_free->offset + 1) << 16);
9545 }
9546 
9547 /**
9548  * Create a age action using ASO mechanism.
9549  *
9550  * @param[in] dev
9551  *   Pointer to rte_eth_dev structure.
9552  * @param[in] age
9553  *   Pointer to the aging action configuration.
9554  * @param[out] error
9555  *   Pointer to the error structure.
9556  *
9557  * @return
9558  *   Index to flow counter on success, 0 otherwise.
9559  */
9560 static uint32_t
flow_dv_translate_create_aso_age(struct rte_eth_dev * dev,const struct rte_flow_action_age * age,struct rte_flow_error * error)9561 flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
9562 				 const struct rte_flow_action_age *age,
9563 				 struct rte_flow_error *error)
9564 {
9565 	uint32_t age_idx = 0;
9566 	struct mlx5_aso_age_action *aso_age;
9567 
9568 	age_idx = flow_dv_aso_age_alloc(dev, error);
9569 	if (!age_idx)
9570 		return 0;
9571 	aso_age = flow_aso_age_get_by_idx(dev, age_idx);
9572 	aso_age->age_params.context = age->context;
9573 	aso_age->age_params.timeout = age->timeout;
9574 	aso_age->age_params.port_id = dev->data->port_id;
9575 	__atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
9576 			 __ATOMIC_RELAXED);
9577 	__atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
9578 			 __ATOMIC_RELAXED);
9579 	return age_idx;
9580 }
9581 
9582 /**
9583  * Fill the flow with DV spec, lock free
9584  * (mutex should be acquired by caller).
9585  *
9586  * @param[in] dev
9587  *   Pointer to rte_eth_dev structure.
9588  * @param[in, out] dev_flow
9589  *   Pointer to the sub flow.
9590  * @param[in] attr
9591  *   Pointer to the flow attributes.
9592  * @param[in] items
9593  *   Pointer to the list of items.
9594  * @param[in] actions
9595  *   Pointer to the list of actions.
9596  * @param[out] error
9597  *   Pointer to the error structure.
9598  *
9599  * @return
9600  *   0 on success, a negative errno value otherwise and rte_errno is set.
9601  */
9602 static int
flow_dv_translate(struct rte_eth_dev * dev,struct mlx5_flow * dev_flow,const struct rte_flow_attr * attr,const struct rte_flow_item items[],const struct rte_flow_action actions[],struct rte_flow_error * error)9603 flow_dv_translate(struct rte_eth_dev *dev,
9604 		  struct mlx5_flow *dev_flow,
9605 		  const struct rte_flow_attr *attr,
9606 		  const struct rte_flow_item items[],
9607 		  const struct rte_flow_action actions[],
9608 		  struct rte_flow_error *error)
9609 {
9610 	struct mlx5_priv *priv = dev->data->dev_private;
9611 	struct mlx5_dev_config *dev_conf = &priv->config;
9612 	struct rte_flow *flow = dev_flow->flow;
9613 	struct mlx5_flow_handle *handle = dev_flow->handle;
9614 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9615 	struct mlx5_flow_rss_desc *rss_desc;
9616 	uint64_t item_flags = 0;
9617 	uint64_t last_item = 0;
9618 	uint64_t action_flags = 0;
9619 	uint64_t priority = attr->priority;
9620 	struct mlx5_flow_dv_matcher matcher = {
9621 		.mask = {
9622 			.size = sizeof(matcher.mask.buf) -
9623 				MLX5_ST_SZ_BYTES(fte_match_set_misc4),
9624 		},
9625 	};
9626 	int actions_n = 0;
9627 	bool actions_end = false;
9628 	union {
9629 		struct mlx5_flow_dv_modify_hdr_resource res;
9630 		uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
9631 			    sizeof(struct mlx5_modification_cmd) *
9632 			    (MLX5_MAX_MODIFY_NUM + 1)];
9633 	} mhdr_dummy;
9634 	struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
9635 	const struct rte_flow_action_count *count = NULL;
9636 	const struct rte_flow_action_age *age = NULL;
9637 	union flow_dv_attr flow_attr = { .attr = 0 };
9638 	uint32_t tag_be;
9639 	union mlx5_flow_tbl_key tbl_key;
9640 	uint32_t modify_action_position = UINT32_MAX;
9641 	void *match_mask = matcher.mask.buf;
9642 	void *match_value = dev_flow->dv.value.buf;
9643 	uint8_t next_protocol = 0xff;
9644 	struct rte_vlan_hdr vlan = { 0 };
9645 	struct mlx5_flow_dv_dest_array_resource mdest_res;
9646 	struct mlx5_flow_dv_sample_resource sample_res;
9647 	void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
9648 	struct mlx5_flow_sub_actions_list *sample_act;
9649 	uint32_t sample_act_pos = UINT32_MAX;
9650 	uint32_t num_of_dest = 0;
9651 	int tmp_actions_n = 0;
9652 	uint32_t table;
9653 	int ret = 0;
9654 	const struct mlx5_flow_tunnel *tunnel;
9655 	struct flow_grp_info grp_info = {
9656 		.external = !!dev_flow->external,
9657 		.transfer = !!attr->transfer,
9658 		.fdb_def_rule = !!priv->fdb_def_rule,
9659 		.skip_scale = !!dev_flow->skip_scale,
9660 	};
9661 
9662 	if (!wks)
9663 		return rte_flow_error_set(error, ENOMEM,
9664 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9665 					  NULL,
9666 					  "failed to push flow workspace");
9667 	rss_desc = &wks->rss_desc;
9668 	memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
9669 	memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
9670 	mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9671 					   MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9672 	/* update normal path action resource into last index of array */
9673 	sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
9674 	tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
9675 		 flow_items_to_tunnel(items) :
9676 		 is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
9677 		 flow_actions_to_tunnel(actions) :
9678 		 dev_flow->tunnel ? dev_flow->tunnel : NULL;
9679 	mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9680 					   MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9681 	grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
9682 				(dev, tunnel, attr, items, actions);
9683 	ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
9684 				       &grp_info, error);
9685 	if (ret)
9686 		return ret;
9687 	dev_flow->dv.group = table;
9688 	if (attr->transfer)
9689 		mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9690 	if (priority == MLX5_FLOW_PRIO_RSVD)
9691 		priority = dev_conf->flow_prio - 1;
9692 	/* number of actions must be set to 0 in case of dirty stack. */
9693 	mhdr_res->actions_num = 0;
9694 	if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
9695 		/*
9696 		 * do not add decap action if match rule drops packet
9697 		 * HW rejects rules with decap & drop
9698 		 *
9699 		 * if tunnel match rule was inserted before matching tunnel set
9700 		 * rule flow table used in the match rule must be registered.
9701 		 * current implementation handles that in the
9702 		 * flow_dv_match_register() at the function end.
9703 		 */
9704 		bool add_decap = true;
9705 		const struct rte_flow_action *ptr = actions;
9706 
9707 		for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
9708 			if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
9709 				add_decap = false;
9710 				break;
9711 			}
9712 		}
9713 		if (add_decap) {
9714 			if (flow_dv_create_action_l2_decap(dev, dev_flow,
9715 							   attr->transfer,
9716 							   error))
9717 				return -rte_errno;
9718 			dev_flow->dv.actions[actions_n++] =
9719 					dev_flow->dv.encap_decap->action;
9720 			action_flags |= MLX5_FLOW_ACTION_DECAP;
9721 		}
9722 	}
9723 	for (; !actions_end ; actions++) {
9724 		const struct rte_flow_action_queue *queue;
9725 		const struct rte_flow_action_rss *rss;
9726 		const struct rte_flow_action *action = actions;
9727 		const uint8_t *rss_key;
9728 		const struct rte_flow_action_meter *mtr;
9729 		struct mlx5_flow_tbl_resource *tbl;
9730 		struct mlx5_aso_age_action *age_act;
9731 		uint32_t port_id = 0;
9732 		struct mlx5_flow_dv_port_id_action_resource port_id_resource;
9733 		int action_type = actions->type;
9734 		const struct rte_flow_action *found_action = NULL;
9735 		struct mlx5_flow_meter *fm = NULL;
9736 		uint32_t jump_group = 0;
9737 
9738 		if (!mlx5_flow_os_action_supported(action_type))
9739 			return rte_flow_error_set(error, ENOTSUP,
9740 						  RTE_FLOW_ERROR_TYPE_ACTION,
9741 						  actions,
9742 						  "action not supported");
9743 		switch (action_type) {
9744 		case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
9745 			action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
9746 			break;
9747 		case RTE_FLOW_ACTION_TYPE_VOID:
9748 			break;
9749 		case RTE_FLOW_ACTION_TYPE_PORT_ID:
9750 			if (flow_dv_translate_action_port_id(dev, action,
9751 							     &port_id, error))
9752 				return -rte_errno;
9753 			port_id_resource.port_id = port_id;
9754 			MLX5_ASSERT(!handle->rix_port_id_action);
9755 			if (flow_dv_port_id_action_resource_register
9756 			    (dev, &port_id_resource, dev_flow, error))
9757 				return -rte_errno;
9758 			dev_flow->dv.actions[actions_n++] =
9759 					dev_flow->dv.port_id_action->action;
9760 			action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9761 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
9762 			sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9763 			num_of_dest++;
9764 			break;
9765 		case RTE_FLOW_ACTION_TYPE_FLAG:
9766 			action_flags |= MLX5_FLOW_ACTION_FLAG;
9767 			dev_flow->handle->mark = 1;
9768 			if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9769 				struct rte_flow_action_mark mark = {
9770 					.id = MLX5_FLOW_MARK_DEFAULT,
9771 				};
9772 
9773 				if (flow_dv_convert_action_mark(dev, &mark,
9774 								mhdr_res,
9775 								error))
9776 					return -rte_errno;
9777 				action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9778 				break;
9779 			}
9780 			tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
9781 			/*
9782 			 * Only one FLAG or MARK is supported per device flow
9783 			 * right now. So the pointer to the tag resource must be
9784 			 * zero before the register process.
9785 			 */
9786 			MLX5_ASSERT(!handle->dvh.rix_tag);
9787 			if (flow_dv_tag_resource_register(dev, tag_be,
9788 							  dev_flow, error))
9789 				return -rte_errno;
9790 			MLX5_ASSERT(dev_flow->dv.tag_resource);
9791 			dev_flow->dv.actions[actions_n++] =
9792 					dev_flow->dv.tag_resource->action;
9793 			break;
9794 		case RTE_FLOW_ACTION_TYPE_MARK:
9795 			action_flags |= MLX5_FLOW_ACTION_MARK;
9796 			dev_flow->handle->mark = 1;
9797 			if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9798 				const struct rte_flow_action_mark *mark =
9799 					(const struct rte_flow_action_mark *)
9800 						actions->conf;
9801 
9802 				if (flow_dv_convert_action_mark(dev, mark,
9803 								mhdr_res,
9804 								error))
9805 					return -rte_errno;
9806 				action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9807 				break;
9808 			}
9809 			/* Fall-through */
9810 		case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
9811 			/* Legacy (non-extensive) MARK action. */
9812 			tag_be = mlx5_flow_mark_set
9813 			      (((const struct rte_flow_action_mark *)
9814 			       (actions->conf))->id);
9815 			MLX5_ASSERT(!handle->dvh.rix_tag);
9816 			if (flow_dv_tag_resource_register(dev, tag_be,
9817 							  dev_flow, error))
9818 				return -rte_errno;
9819 			MLX5_ASSERT(dev_flow->dv.tag_resource);
9820 			dev_flow->dv.actions[actions_n++] =
9821 					dev_flow->dv.tag_resource->action;
9822 			break;
9823 		case RTE_FLOW_ACTION_TYPE_SET_META:
9824 			if (flow_dv_convert_action_set_meta
9825 				(dev, mhdr_res, attr,
9826 				 (const struct rte_flow_action_set_meta *)
9827 				  actions->conf, error))
9828 				return -rte_errno;
9829 			action_flags |= MLX5_FLOW_ACTION_SET_META;
9830 			break;
9831 		case RTE_FLOW_ACTION_TYPE_SET_TAG:
9832 			if (flow_dv_convert_action_set_tag
9833 				(dev, mhdr_res,
9834 				 (const struct rte_flow_action_set_tag *)
9835 				  actions->conf, error))
9836 				return -rte_errno;
9837 			action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9838 			break;
9839 		case RTE_FLOW_ACTION_TYPE_DROP:
9840 			action_flags |= MLX5_FLOW_ACTION_DROP;
9841 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
9842 			break;
9843 		case RTE_FLOW_ACTION_TYPE_QUEUE:
9844 			queue = actions->conf;
9845 			rss_desc->queue_num = 1;
9846 			rss_desc->queue[0] = queue->index;
9847 			action_flags |= MLX5_FLOW_ACTION_QUEUE;
9848 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9849 			sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
9850 			num_of_dest++;
9851 			break;
9852 		case RTE_FLOW_ACTION_TYPE_RSS:
9853 			rss = actions->conf;
9854 			memcpy(rss_desc->queue, rss->queue,
9855 			       rss->queue_num * sizeof(uint16_t));
9856 			rss_desc->queue_num = rss->queue_num;
9857 			/* NULL RSS key indicates default RSS key. */
9858 			rss_key = !rss->key ? rss_hash_default_key : rss->key;
9859 			memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
9860 			/*
9861 			 * rss->level and rss.types should be set in advance
9862 			 * when expanding items for RSS.
9863 			 */
9864 			action_flags |= MLX5_FLOW_ACTION_RSS;
9865 			dev_flow->handle->fate_action = rss_desc->shared_rss ?
9866 				MLX5_FLOW_FATE_SHARED_RSS :
9867 				MLX5_FLOW_FATE_QUEUE;
9868 			break;
9869 		case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
9870 			flow->age = (uint32_t)(uintptr_t)(action->conf);
9871 			age_act = flow_aso_age_get_by_idx(dev, flow->age);
9872 			__atomic_fetch_add(&age_act->refcnt, 1,
9873 					   __ATOMIC_RELAXED);
9874 			dev_flow->dv.actions[actions_n++] = age_act->dr_action;
9875 			action_flags |= MLX5_FLOW_ACTION_AGE;
9876 			break;
9877 		case RTE_FLOW_ACTION_TYPE_AGE:
9878 			if (priv->sh->flow_hit_aso_en && attr->group) {
9879 				flow->age = flow_dv_translate_create_aso_age
9880 						(dev, action->conf, error);
9881 				if (!flow->age)
9882 					return rte_flow_error_set
9883 						(error, rte_errno,
9884 						 RTE_FLOW_ERROR_TYPE_ACTION,
9885 						 NULL,
9886 						 "can't create ASO age action");
9887 				dev_flow->dv.actions[actions_n++] =
9888 					  (flow_aso_age_get_by_idx
9889 						(dev, flow->age))->dr_action;
9890 				action_flags |= MLX5_FLOW_ACTION_AGE;
9891 				break;
9892 			}
9893 			/* Fall-through */
9894 		case RTE_FLOW_ACTION_TYPE_COUNT:
9895 			if (!dev_conf->devx) {
9896 				return rte_flow_error_set
9897 					      (error, ENOTSUP,
9898 					       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9899 					       NULL,
9900 					       "count action not supported");
9901 			}
9902 			/* Save information first, will apply later. */
9903 			if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
9904 				count = action->conf;
9905 			else
9906 				age = action->conf;
9907 			action_flags |= MLX5_FLOW_ACTION_COUNT;
9908 			break;
9909 		case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
9910 			dev_flow->dv.actions[actions_n++] =
9911 						priv->sh->pop_vlan_action;
9912 			action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
9913 			break;
9914 		case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
9915 			if (!(action_flags &
9916 			      MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
9917 				flow_dev_get_vlan_info_from_items(items, &vlan);
9918 			vlan.eth_proto = rte_be_to_cpu_16
9919 			     ((((const struct rte_flow_action_of_push_vlan *)
9920 						   actions->conf)->ethertype));
9921 			found_action = mlx5_flow_find_action
9922 					(actions + 1,
9923 					 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
9924 			if (found_action)
9925 				mlx5_update_vlan_vid_pcp(found_action, &vlan);
9926 			found_action = mlx5_flow_find_action
9927 					(actions + 1,
9928 					 RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
9929 			if (found_action)
9930 				mlx5_update_vlan_vid_pcp(found_action, &vlan);
9931 			if (flow_dv_create_action_push_vlan
9932 					    (dev, attr, &vlan, dev_flow, error))
9933 				return -rte_errno;
9934 			dev_flow->dv.actions[actions_n++] =
9935 					dev_flow->dv.push_vlan_res->action;
9936 			action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
9937 			break;
9938 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
9939 			/* of_vlan_push action handled this action */
9940 			MLX5_ASSERT(action_flags &
9941 				    MLX5_FLOW_ACTION_OF_PUSH_VLAN);
9942 			break;
9943 		case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
9944 			if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
9945 				break;
9946 			flow_dev_get_vlan_info_from_items(items, &vlan);
9947 			mlx5_update_vlan_vid_pcp(actions, &vlan);
9948 			/* If no VLAN push - this is a modify header action */
9949 			if (flow_dv_convert_action_modify_vlan_vid
9950 						(mhdr_res, actions, error))
9951 				return -rte_errno;
9952 			action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
9953 			break;
9954 		case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
9955 		case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
9956 			if (flow_dv_create_action_l2_encap(dev, actions,
9957 							   dev_flow,
9958 							   attr->transfer,
9959 							   error))
9960 				return -rte_errno;
9961 			dev_flow->dv.actions[actions_n++] =
9962 					dev_flow->dv.encap_decap->action;
9963 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
9964 			if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9965 				sample_act->action_flags |=
9966 							MLX5_FLOW_ACTION_ENCAP;
9967 			break;
9968 		case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
9969 		case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
9970 			if (flow_dv_create_action_l2_decap(dev, dev_flow,
9971 							   attr->transfer,
9972 							   error))
9973 				return -rte_errno;
9974 			dev_flow->dv.actions[actions_n++] =
9975 					dev_flow->dv.encap_decap->action;
9976 			action_flags |= MLX5_FLOW_ACTION_DECAP;
9977 			break;
9978 		case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9979 			/* Handle encap with preceding decap. */
9980 			if (action_flags & MLX5_FLOW_ACTION_DECAP) {
9981 				if (flow_dv_create_action_raw_encap
9982 					(dev, actions, dev_flow, attr, error))
9983 					return -rte_errno;
9984 				dev_flow->dv.actions[actions_n++] =
9985 					dev_flow->dv.encap_decap->action;
9986 			} else {
9987 				/* Handle encap without preceding decap. */
9988 				if (flow_dv_create_action_l2_encap
9989 				    (dev, actions, dev_flow, attr->transfer,
9990 				     error))
9991 					return -rte_errno;
9992 				dev_flow->dv.actions[actions_n++] =
9993 					dev_flow->dv.encap_decap->action;
9994 			}
9995 			action_flags |= MLX5_FLOW_ACTION_ENCAP;
9996 			if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9997 				sample_act->action_flags |=
9998 							MLX5_FLOW_ACTION_ENCAP;
9999 			break;
10000 		case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
10001 			while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
10002 				;
10003 			if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
10004 				if (flow_dv_create_action_l2_decap
10005 				    (dev, dev_flow, attr->transfer, error))
10006 					return -rte_errno;
10007 				dev_flow->dv.actions[actions_n++] =
10008 					dev_flow->dv.encap_decap->action;
10009 			}
10010 			/* If decap is followed by encap, handle it at encap. */
10011 			action_flags |= MLX5_FLOW_ACTION_DECAP;
10012 			break;
10013 		case RTE_FLOW_ACTION_TYPE_JUMP:
10014 			jump_group = ((const struct rte_flow_action_jump *)
10015 							action->conf)->group;
10016 			grp_info.std_tbl_fix = 0;
10017 			grp_info.skip_scale = 0;
10018 			ret = mlx5_flow_group_to_table(dev, tunnel,
10019 						       jump_group,
10020 						       &table,
10021 						       &grp_info, error);
10022 			if (ret)
10023 				return ret;
10024 			tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
10025 						       attr->transfer,
10026 						       !!dev_flow->external,
10027 						       tunnel, jump_group, 0,
10028 						       error);
10029 			if (!tbl)
10030 				return rte_flow_error_set
10031 						(error, errno,
10032 						 RTE_FLOW_ERROR_TYPE_ACTION,
10033 						 NULL,
10034 						 "cannot create jump action.");
10035 			if (flow_dv_jump_tbl_resource_register
10036 			    (dev, tbl, dev_flow, error)) {
10037 				flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10038 				return rte_flow_error_set
10039 						(error, errno,
10040 						 RTE_FLOW_ERROR_TYPE_ACTION,
10041 						 NULL,
10042 						 "cannot create jump action.");
10043 			}
10044 			dev_flow->dv.actions[actions_n++] =
10045 					dev_flow->dv.jump->action;
10046 			action_flags |= MLX5_FLOW_ACTION_JUMP;
10047 			dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
10048 			break;
10049 		case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
10050 		case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
10051 			if (flow_dv_convert_action_modify_mac
10052 					(mhdr_res, actions, error))
10053 				return -rte_errno;
10054 			action_flags |= actions->type ==
10055 					RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
10056 					MLX5_FLOW_ACTION_SET_MAC_SRC :
10057 					MLX5_FLOW_ACTION_SET_MAC_DST;
10058 			break;
10059 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
10060 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
10061 			if (flow_dv_convert_action_modify_ipv4
10062 					(mhdr_res, actions, error))
10063 				return -rte_errno;
10064 			action_flags |= actions->type ==
10065 					RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
10066 					MLX5_FLOW_ACTION_SET_IPV4_SRC :
10067 					MLX5_FLOW_ACTION_SET_IPV4_DST;
10068 			break;
10069 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
10070 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
10071 			if (flow_dv_convert_action_modify_ipv6
10072 					(mhdr_res, actions, error))
10073 				return -rte_errno;
10074 			action_flags |= actions->type ==
10075 					RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
10076 					MLX5_FLOW_ACTION_SET_IPV6_SRC :
10077 					MLX5_FLOW_ACTION_SET_IPV6_DST;
10078 			break;
10079 		case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
10080 		case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
10081 			if (flow_dv_convert_action_modify_tp
10082 					(mhdr_res, actions, items,
10083 					 &flow_attr, dev_flow, !!(action_flags &
10084 					 MLX5_FLOW_ACTION_DECAP), error))
10085 				return -rte_errno;
10086 			action_flags |= actions->type ==
10087 					RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
10088 					MLX5_FLOW_ACTION_SET_TP_SRC :
10089 					MLX5_FLOW_ACTION_SET_TP_DST;
10090 			break;
10091 		case RTE_FLOW_ACTION_TYPE_DEC_TTL:
10092 			if (flow_dv_convert_action_modify_dec_ttl
10093 					(mhdr_res, items, &flow_attr, dev_flow,
10094 					 !!(action_flags &
10095 					 MLX5_FLOW_ACTION_DECAP), error))
10096 				return -rte_errno;
10097 			action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
10098 			break;
10099 		case RTE_FLOW_ACTION_TYPE_SET_TTL:
10100 			if (flow_dv_convert_action_modify_ttl
10101 					(mhdr_res, actions, items, &flow_attr,
10102 					 dev_flow, !!(action_flags &
10103 					 MLX5_FLOW_ACTION_DECAP), error))
10104 				return -rte_errno;
10105 			action_flags |= MLX5_FLOW_ACTION_SET_TTL;
10106 			break;
10107 		case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
10108 		case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
10109 			if (flow_dv_convert_action_modify_tcp_seq
10110 					(mhdr_res, actions, error))
10111 				return -rte_errno;
10112 			action_flags |= actions->type ==
10113 					RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
10114 					MLX5_FLOW_ACTION_INC_TCP_SEQ :
10115 					MLX5_FLOW_ACTION_DEC_TCP_SEQ;
10116 			break;
10117 
10118 		case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
10119 		case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
10120 			if (flow_dv_convert_action_modify_tcp_ack
10121 					(mhdr_res, actions, error))
10122 				return -rte_errno;
10123 			action_flags |= actions->type ==
10124 					RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
10125 					MLX5_FLOW_ACTION_INC_TCP_ACK :
10126 					MLX5_FLOW_ACTION_DEC_TCP_ACK;
10127 			break;
10128 		case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
10129 			if (flow_dv_convert_action_set_reg
10130 					(mhdr_res, actions, error))
10131 				return -rte_errno;
10132 			action_flags |= MLX5_FLOW_ACTION_SET_TAG;
10133 			break;
10134 		case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
10135 			if (flow_dv_convert_action_copy_mreg
10136 					(dev, mhdr_res, actions, error))
10137 				return -rte_errno;
10138 			action_flags |= MLX5_FLOW_ACTION_SET_TAG;
10139 			break;
10140 		case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
10141 			action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
10142 			dev_flow->handle->fate_action =
10143 					MLX5_FLOW_FATE_DEFAULT_MISS;
10144 			break;
10145 		case RTE_FLOW_ACTION_TYPE_METER:
10146 			mtr = actions->conf;
10147 			if (!flow->meter) {
10148 				fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
10149 							    attr, error);
10150 				if (!fm)
10151 					return rte_flow_error_set(error,
10152 						rte_errno,
10153 						RTE_FLOW_ERROR_TYPE_ACTION,
10154 						NULL,
10155 						"meter not found "
10156 						"or invalid parameters");
10157 				flow->meter = fm->idx;
10158 			}
10159 			/* Set the meter action. */
10160 			if (!fm) {
10161 				fm = mlx5_ipool_get(priv->sh->ipool
10162 						[MLX5_IPOOL_MTR], flow->meter);
10163 				if (!fm)
10164 					return rte_flow_error_set(error,
10165 						rte_errno,
10166 						RTE_FLOW_ERROR_TYPE_ACTION,
10167 						NULL,
10168 						"meter not found "
10169 						"or invalid parameters");
10170 			}
10171 			dev_flow->dv.actions[actions_n++] =
10172 				fm->mfts->meter_action;
10173 			action_flags |= MLX5_FLOW_ACTION_METER;
10174 			break;
10175 		case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
10176 			if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
10177 							      actions, error))
10178 				return -rte_errno;
10179 			action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
10180 			break;
10181 		case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
10182 			if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
10183 							      actions, error))
10184 				return -rte_errno;
10185 			action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
10186 			break;
10187 		case RTE_FLOW_ACTION_TYPE_SAMPLE:
10188 			sample_act_pos = actions_n;
10189 			ret = flow_dv_translate_action_sample(dev,
10190 							      actions,
10191 							      dev_flow, attr,
10192 							      &num_of_dest,
10193 							      sample_actions,
10194 							      &sample_res,
10195 							      error);
10196 			if (ret < 0)
10197 				return ret;
10198 			actions_n++;
10199 			action_flags |= MLX5_FLOW_ACTION_SAMPLE;
10200 			/* put encap action into group if work with port id */
10201 			if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
10202 			    (action_flags & MLX5_FLOW_ACTION_PORT_ID))
10203 				sample_act->action_flags |=
10204 							MLX5_FLOW_ACTION_ENCAP;
10205 			break;
10206 		case RTE_FLOW_ACTION_TYPE_END:
10207 			actions_end = true;
10208 			if (mhdr_res->actions_num) {
10209 				/* create modify action if needed. */
10210 				if (flow_dv_modify_hdr_resource_register
10211 					(dev, mhdr_res, dev_flow, error))
10212 					return -rte_errno;
10213 				dev_flow->dv.actions[modify_action_position] =
10214 					handle->dvh.modify_hdr->action;
10215 			}
10216 			if (action_flags & MLX5_FLOW_ACTION_COUNT) {
10217 				flow->counter =
10218 					flow_dv_translate_create_counter(dev,
10219 						dev_flow, count, age);
10220 
10221 				if (!flow->counter)
10222 					return rte_flow_error_set
10223 						(error, rte_errno,
10224 						RTE_FLOW_ERROR_TYPE_ACTION,
10225 						NULL,
10226 						"cannot create counter"
10227 						" object.");
10228 				dev_flow->dv.actions[actions_n] =
10229 					  (flow_dv_counter_get_by_idx(dev,
10230 					  flow->counter, NULL))->action;
10231 				actions_n++;
10232 			}
10233 			if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
10234 				ret = flow_dv_create_action_sample(dev,
10235 							  dev_flow,
10236 							  num_of_dest,
10237 							  &sample_res,
10238 							  &mdest_res,
10239 							  sample_actions,
10240 							  action_flags,
10241 							  error);
10242 				if (ret < 0)
10243 					return rte_flow_error_set
10244 						(error, rte_errno,
10245 						RTE_FLOW_ERROR_TYPE_ACTION,
10246 						NULL,
10247 						"cannot create sample action");
10248 				if (num_of_dest > 1) {
10249 					dev_flow->dv.actions[sample_act_pos] =
10250 					dev_flow->dv.dest_array_res->action;
10251 				} else {
10252 					dev_flow->dv.actions[sample_act_pos] =
10253 					dev_flow->dv.sample_res->verbs_action;
10254 				}
10255 			}
10256 			break;
10257 		default:
10258 			break;
10259 		}
10260 		if (mhdr_res->actions_num &&
10261 		    modify_action_position == UINT32_MAX)
10262 			modify_action_position = actions_n++;
10263 	}
10264 	/*
10265 	 * For multiple destination (sample action with ratio=1), the encap
10266 	 * action and port id action will be combined into group action.
10267 	 * So need remove the original these actions in the flow and only
10268 	 * use the sample action instead of.
10269 	 */
10270 	if (num_of_dest > 1 && sample_act->dr_port_id_action) {
10271 		int i;
10272 		void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
10273 
10274 		for (i = 0; i < actions_n; i++) {
10275 			if ((sample_act->dr_encap_action &&
10276 				sample_act->dr_encap_action ==
10277 				dev_flow->dv.actions[i]) ||
10278 				(sample_act->dr_port_id_action &&
10279 				sample_act->dr_port_id_action ==
10280 				dev_flow->dv.actions[i]))
10281 				continue;
10282 			temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
10283 		}
10284 		memcpy((void *)dev_flow->dv.actions,
10285 				(void *)temp_actions,
10286 				tmp_actions_n * sizeof(void *));
10287 		actions_n = tmp_actions_n;
10288 	}
10289 	dev_flow->dv.actions_n = actions_n;
10290 	dev_flow->act_flags = action_flags;
10291 	for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
10292 		int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
10293 		int item_type = items->type;
10294 
10295 		if (!mlx5_flow_os_item_supported(item_type))
10296 			return rte_flow_error_set(error, ENOTSUP,
10297 						  RTE_FLOW_ERROR_TYPE_ITEM,
10298 						  NULL, "item not supported");
10299 		switch (item_type) {
10300 		case RTE_FLOW_ITEM_TYPE_PORT_ID:
10301 			flow_dv_translate_item_port_id
10302 				(dev, match_mask, match_value, items, attr);
10303 			last_item = MLX5_FLOW_ITEM_PORT_ID;
10304 			break;
10305 		case RTE_FLOW_ITEM_TYPE_ETH:
10306 			flow_dv_translate_item_eth(match_mask, match_value,
10307 						   items, tunnel,
10308 						   dev_flow->dv.group);
10309 			matcher.priority = action_flags &
10310 					MLX5_FLOW_ACTION_DEFAULT_MISS &&
10311 					!dev_flow->external ?
10312 					MLX5_PRIORITY_MAP_L3 :
10313 					MLX5_PRIORITY_MAP_L2;
10314 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
10315 					     MLX5_FLOW_LAYER_OUTER_L2;
10316 			break;
10317 		case RTE_FLOW_ITEM_TYPE_VLAN:
10318 			flow_dv_translate_item_vlan(dev_flow,
10319 						    match_mask, match_value,
10320 						    items, tunnel,
10321 						    dev_flow->dv.group);
10322 			matcher.priority = MLX5_PRIORITY_MAP_L2;
10323 			last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
10324 					      MLX5_FLOW_LAYER_INNER_VLAN) :
10325 					     (MLX5_FLOW_LAYER_OUTER_L2 |
10326 					      MLX5_FLOW_LAYER_OUTER_VLAN);
10327 			break;
10328 		case RTE_FLOW_ITEM_TYPE_IPV4:
10329 			mlx5_flow_tunnel_ip_check(items, next_protocol,
10330 						  &item_flags, &tunnel);
10331 			flow_dv_translate_item_ipv4(match_mask, match_value,
10332 						    items, tunnel,
10333 						    dev_flow->dv.group);
10334 			matcher.priority = MLX5_PRIORITY_MAP_L3;
10335 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
10336 					     MLX5_FLOW_LAYER_OUTER_L3_IPV4;
10337 			if (items->mask != NULL &&
10338 			    ((const struct rte_flow_item_ipv4 *)
10339 			     items->mask)->hdr.next_proto_id) {
10340 				next_protocol =
10341 					((const struct rte_flow_item_ipv4 *)
10342 					 (items->spec))->hdr.next_proto_id;
10343 				next_protocol &=
10344 					((const struct rte_flow_item_ipv4 *)
10345 					 (items->mask))->hdr.next_proto_id;
10346 			} else {
10347 				/* Reset for inner layer. */
10348 				next_protocol = 0xff;
10349 			}
10350 			break;
10351 		case RTE_FLOW_ITEM_TYPE_IPV6:
10352 			mlx5_flow_tunnel_ip_check(items, next_protocol,
10353 						  &item_flags, &tunnel);
10354 			flow_dv_translate_item_ipv6(match_mask, match_value,
10355 						    items, tunnel,
10356 						    dev_flow->dv.group);
10357 			matcher.priority = MLX5_PRIORITY_MAP_L3;
10358 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
10359 					     MLX5_FLOW_LAYER_OUTER_L3_IPV6;
10360 			if (items->mask != NULL &&
10361 			    ((const struct rte_flow_item_ipv6 *)
10362 			     items->mask)->hdr.proto) {
10363 				next_protocol =
10364 					((const struct rte_flow_item_ipv6 *)
10365 					 items->spec)->hdr.proto;
10366 				next_protocol &=
10367 					((const struct rte_flow_item_ipv6 *)
10368 					 items->mask)->hdr.proto;
10369 			} else {
10370 				/* Reset for inner layer. */
10371 				next_protocol = 0xff;
10372 			}
10373 			break;
10374 		case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
10375 			flow_dv_translate_item_ipv6_frag_ext(match_mask,
10376 							     match_value,
10377 							     items, tunnel);
10378 			last_item = tunnel ?
10379 					MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
10380 					MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
10381 			if (items->mask != NULL &&
10382 			    ((const struct rte_flow_item_ipv6_frag_ext *)
10383 			     items->mask)->hdr.next_header) {
10384 				next_protocol =
10385 				((const struct rte_flow_item_ipv6_frag_ext *)
10386 				 items->spec)->hdr.next_header;
10387 				next_protocol &=
10388 				((const struct rte_flow_item_ipv6_frag_ext *)
10389 				 items->mask)->hdr.next_header;
10390 			} else {
10391 				/* Reset for inner layer. */
10392 				next_protocol = 0xff;
10393 			}
10394 			break;
10395 		case RTE_FLOW_ITEM_TYPE_TCP:
10396 			flow_dv_translate_item_tcp(match_mask, match_value,
10397 						   items, tunnel);
10398 			matcher.priority = MLX5_PRIORITY_MAP_L4;
10399 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
10400 					     MLX5_FLOW_LAYER_OUTER_L4_TCP;
10401 			break;
10402 		case RTE_FLOW_ITEM_TYPE_UDP:
10403 			flow_dv_translate_item_udp(match_mask, match_value,
10404 						   items, tunnel);
10405 			matcher.priority = MLX5_PRIORITY_MAP_L4;
10406 			last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
10407 					     MLX5_FLOW_LAYER_OUTER_L4_UDP;
10408 			break;
10409 		case RTE_FLOW_ITEM_TYPE_GRE:
10410 			flow_dv_translate_item_gre(match_mask, match_value,
10411 						   items, tunnel);
10412 			matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10413 			last_item = MLX5_FLOW_LAYER_GRE;
10414 			break;
10415 		case RTE_FLOW_ITEM_TYPE_GRE_KEY:
10416 			flow_dv_translate_item_gre_key(match_mask,
10417 						       match_value, items);
10418 			last_item = MLX5_FLOW_LAYER_GRE_KEY;
10419 			break;
10420 		case RTE_FLOW_ITEM_TYPE_NVGRE:
10421 			flow_dv_translate_item_nvgre(match_mask, match_value,
10422 						     items, tunnel);
10423 			matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10424 			last_item = MLX5_FLOW_LAYER_GRE;
10425 			break;
10426 		case RTE_FLOW_ITEM_TYPE_VXLAN:
10427 			flow_dv_translate_item_vxlan(match_mask, match_value,
10428 						     items, tunnel);
10429 			matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10430 			last_item = MLX5_FLOW_LAYER_VXLAN;
10431 			break;
10432 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
10433 			flow_dv_translate_item_vxlan_gpe(match_mask,
10434 							 match_value, items,
10435 							 tunnel);
10436 			matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10437 			last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
10438 			break;
10439 		case RTE_FLOW_ITEM_TYPE_GENEVE:
10440 			flow_dv_translate_item_geneve(match_mask, match_value,
10441 						      items, tunnel);
10442 			matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10443 			last_item = MLX5_FLOW_LAYER_GENEVE;
10444 			break;
10445 		case RTE_FLOW_ITEM_TYPE_MPLS:
10446 			flow_dv_translate_item_mpls(match_mask, match_value,
10447 						    items, last_item, tunnel);
10448 			matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10449 			last_item = MLX5_FLOW_LAYER_MPLS;
10450 			break;
10451 		case RTE_FLOW_ITEM_TYPE_MARK:
10452 			flow_dv_translate_item_mark(dev, match_mask,
10453 						    match_value, items);
10454 			last_item = MLX5_FLOW_ITEM_MARK;
10455 			break;
10456 		case RTE_FLOW_ITEM_TYPE_META:
10457 			flow_dv_translate_item_meta(dev, match_mask,
10458 						    match_value, attr, items);
10459 			last_item = MLX5_FLOW_ITEM_METADATA;
10460 			break;
10461 		case RTE_FLOW_ITEM_TYPE_ICMP:
10462 			flow_dv_translate_item_icmp(match_mask, match_value,
10463 						    items, tunnel);
10464 			last_item = MLX5_FLOW_LAYER_ICMP;
10465 			break;
10466 		case RTE_FLOW_ITEM_TYPE_ICMP6:
10467 			flow_dv_translate_item_icmp6(match_mask, match_value,
10468 						      items, tunnel);
10469 			last_item = MLX5_FLOW_LAYER_ICMP6;
10470 			break;
10471 		case RTE_FLOW_ITEM_TYPE_TAG:
10472 			flow_dv_translate_item_tag(dev, match_mask,
10473 						   match_value, items);
10474 			last_item = MLX5_FLOW_ITEM_TAG;
10475 			break;
10476 		case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
10477 			flow_dv_translate_mlx5_item_tag(dev, match_mask,
10478 							match_value, items);
10479 			last_item = MLX5_FLOW_ITEM_TAG;
10480 			break;
10481 		case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
10482 			flow_dv_translate_item_tx_queue(dev, match_mask,
10483 							match_value,
10484 							items);
10485 			last_item = MLX5_FLOW_ITEM_TX_QUEUE;
10486 			break;
10487 		case RTE_FLOW_ITEM_TYPE_GTP:
10488 			flow_dv_translate_item_gtp(match_mask, match_value,
10489 						   items, tunnel);
10490 			matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10491 			last_item = MLX5_FLOW_LAYER_GTP;
10492 			break;
10493 		case RTE_FLOW_ITEM_TYPE_ECPRI:
10494 			if (!mlx5_flex_parser_ecpri_exist(dev)) {
10495 				/* Create it only the first time to be used. */
10496 				ret = mlx5_flex_parser_ecpri_alloc(dev);
10497 				if (ret)
10498 					return rte_flow_error_set
10499 						(error, -ret,
10500 						RTE_FLOW_ERROR_TYPE_ITEM,
10501 						NULL,
10502 						"cannot create eCPRI parser");
10503 			}
10504 			/* Adjust the length matcher and device flow value. */
10505 			matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
10506 			dev_flow->dv.value.size =
10507 					MLX5_ST_SZ_BYTES(fte_match_param);
10508 			flow_dv_translate_item_ecpri(dev, match_mask,
10509 						     match_value, items);
10510 			/* No other protocol should follow eCPRI layer. */
10511 			last_item = MLX5_FLOW_LAYER_ECPRI;
10512 			break;
10513 		default:
10514 			break;
10515 		}
10516 		item_flags |= last_item;
10517 	}
10518 	/*
10519 	 * When E-Switch mode is enabled, we have two cases where we need to
10520 	 * set the source port manually.
10521 	 * The first one, is in case of Nic steering rule, and the second is
10522 	 * E-Switch rule where no port_id item was found. In both cases
10523 	 * the source port is set according the current port in use.
10524 	 */
10525 	if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
10526 	    (priv->representor || priv->master)) {
10527 		if (flow_dv_translate_item_port_id(dev, match_mask,
10528 						   match_value, NULL, attr))
10529 			return -rte_errno;
10530 	}
10531 #ifdef RTE_LIBRTE_MLX5_DEBUG
10532 	MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
10533 					      dev_flow->dv.value.buf));
10534 #endif
10535 	/*
10536 	 * Layers may be already initialized from prefix flow if this dev_flow
10537 	 * is the suffix flow.
10538 	 */
10539 	handle->layers |= item_flags;
10540 	if (action_flags & MLX5_FLOW_ACTION_RSS)
10541 		flow_dv_hashfields_set(dev_flow, rss_desc);
10542 	/* Register matcher. */
10543 	matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
10544 				    matcher.mask.size);
10545 	matcher.priority = mlx5_flow_adjust_priority(dev, priority,
10546 						     matcher.priority);
10547 	/* reserved field no needs to be set to 0 here. */
10548 	tbl_key.domain = attr->transfer;
10549 	tbl_key.direction = attr->egress;
10550 	tbl_key.table_id = dev_flow->dv.group;
10551 	if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
10552 				     tunnel, attr->group, error))
10553 		return -rte_errno;
10554 	return 0;
10555 }
10556 
10557 /**
10558  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10559  * and tunnel.
10560  *
10561  * @param[in, out] action
10562  *   Shred RSS action holding hash RX queue objects.
10563  * @param[in] hash_fields
10564  *   Defines combination of packet fields to participate in RX hash.
10565  * @param[in] tunnel
10566  *   Tunnel type
10567  * @param[in] hrxq_idx
10568  *   Hash RX queue index to set.
10569  *
10570  * @return
10571  *   0 on success, otherwise negative errno value.
10572  */
10573 static int
__flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss * action,const uint64_t hash_fields,const int tunnel,uint32_t hrxq_idx)10574 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
10575 			      const uint64_t hash_fields,
10576 			      const int tunnel,
10577 			      uint32_t hrxq_idx)
10578 {
10579 	uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
10580 
10581 	switch (hash_fields & ~IBV_RX_HASH_INNER) {
10582 	case MLX5_RSS_HASH_IPV4:
10583 		hrxqs[0] = hrxq_idx;
10584 		return 0;
10585 	case MLX5_RSS_HASH_IPV4_TCP:
10586 		hrxqs[1] = hrxq_idx;
10587 		return 0;
10588 	case MLX5_RSS_HASH_IPV4_UDP:
10589 		hrxqs[2] = hrxq_idx;
10590 		return 0;
10591 	case MLX5_RSS_HASH_IPV6:
10592 		hrxqs[3] = hrxq_idx;
10593 		return 0;
10594 	case MLX5_RSS_HASH_IPV6_TCP:
10595 		hrxqs[4] = hrxq_idx;
10596 		return 0;
10597 	case MLX5_RSS_HASH_IPV6_UDP:
10598 		hrxqs[5] = hrxq_idx;
10599 		return 0;
10600 	case MLX5_RSS_HASH_NONE:
10601 		hrxqs[6] = hrxq_idx;
10602 		return 0;
10603 	default:
10604 		return -1;
10605 	}
10606 }
10607 
10608 /**
10609  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10610  * and tunnel.
10611  *
10612  * @param[in] dev
10613  *   Pointer to the Ethernet device structure.
10614  * @param[in] idx
10615  *   Shared RSS action ID holding hash RX queue objects.
10616  * @param[in] hash_fields
10617  *   Defines combination of packet fields to participate in RX hash.
10618  * @param[in] tunnel
10619  *   Tunnel type
10620  *
10621  * @return
10622  *   Valid hash RX queue index, otherwise 0.
10623  */
10624 static uint32_t
__flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev * dev,uint32_t idx,const uint64_t hash_fields,const int tunnel)10625 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
10626 				 const uint64_t hash_fields,
10627 				 const int tunnel)
10628 {
10629 	struct mlx5_priv *priv = dev->data->dev_private;
10630 	struct mlx5_shared_action_rss *shared_rss =
10631 	    mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
10632 	const uint32_t *hrxqs = tunnel ? shared_rss->hrxq :
10633 							shared_rss->hrxq_tunnel;
10634 
10635 	switch (hash_fields & ~IBV_RX_HASH_INNER) {
10636 	case MLX5_RSS_HASH_IPV4:
10637 		return hrxqs[0];
10638 	case MLX5_RSS_HASH_IPV4_TCP:
10639 		return hrxqs[1];
10640 	case MLX5_RSS_HASH_IPV4_UDP:
10641 		return hrxqs[2];
10642 	case MLX5_RSS_HASH_IPV6:
10643 		return hrxqs[3];
10644 	case MLX5_RSS_HASH_IPV6_TCP:
10645 		return hrxqs[4];
10646 	case MLX5_RSS_HASH_IPV6_UDP:
10647 		return hrxqs[5];
10648 	case MLX5_RSS_HASH_NONE:
10649 		return hrxqs[6];
10650 	default:
10651 		return 0;
10652 	}
10653 }
10654 
10655 /**
10656  * Retrieves hash RX queue suitable for the *flow*.
10657  * If shared action configured for *flow* suitable hash RX queue will be
10658  * retrieved from attached shared action.
10659  *
10660  * @param[in] dev
10661  *   Pointer to the Ethernet device structure.
10662  * @param[in] dev_flow
10663  *   Pointer to the sub flow.
10664  * @param[in] rss_desc
10665  *   Pointer to the RSS descriptor.
10666  * @param[out] hrxq
10667  *   Pointer to retrieved hash RX queue object.
10668  *
10669  * @return
10670  *   Valid hash RX queue index, otherwise 0 and rte_errno is set.
10671  */
10672 static uint32_t
__flow_dv_rss_get_hrxq(struct rte_eth_dev * dev,struct mlx5_flow * dev_flow,struct mlx5_flow_rss_desc * rss_desc,struct mlx5_hrxq ** hrxq)10673 __flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
10674 		       struct mlx5_flow_rss_desc *rss_desc,
10675 		       struct mlx5_hrxq **hrxq)
10676 {
10677 	struct mlx5_priv *priv = dev->data->dev_private;
10678 	uint32_t hrxq_idx;
10679 
10680 	if (rss_desc->shared_rss) {
10681 		hrxq_idx = __flow_dv_action_rss_hrxq_lookup
10682 				(dev, rss_desc->shared_rss,
10683 				 dev_flow->hash_fields,
10684 				 !!(dev_flow->handle->layers &
10685 				    MLX5_FLOW_LAYER_TUNNEL));
10686 		if (hrxq_idx)
10687 			*hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10688 					       hrxq_idx);
10689 	} else {
10690 		*hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
10691 					     &hrxq_idx);
10692 	}
10693 	return hrxq_idx;
10694 }
10695 
10696 /**
10697  * Apply the flow to the NIC, lock free,
10698  * (mutex should be acquired by caller).
10699  *
10700  * @param[in] dev
10701  *   Pointer to the Ethernet device structure.
10702  * @param[in, out] flow
10703  *   Pointer to flow structure.
10704  * @param[out] error
10705  *   Pointer to error structure.
10706  *
10707  * @return
10708  *   0 on success, a negative errno value otherwise and rte_errno is set.
10709  */
10710 static int
flow_dv_apply(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_error * error)10711 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
10712 	      struct rte_flow_error *error)
10713 {
10714 	struct mlx5_flow_dv_workspace *dv;
10715 	struct mlx5_flow_handle *dh;
10716 	struct mlx5_flow_handle_dv *dv_h;
10717 	struct mlx5_flow *dev_flow;
10718 	struct mlx5_priv *priv = dev->data->dev_private;
10719 	uint32_t handle_idx;
10720 	int n;
10721 	int err;
10722 	int idx;
10723 	struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10724 	struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
10725 
10726 	MLX5_ASSERT(wks);
10727 	if (rss_desc->shared_rss) {
10728 		dh = wks->flows[wks->flow_idx - 1].handle;
10729 		MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS);
10730 		dh->rix_srss = rss_desc->shared_rss;
10731 	}
10732 	for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
10733 		dev_flow = &wks->flows[idx];
10734 		dv = &dev_flow->dv;
10735 		dh = dev_flow->handle;
10736 		dv_h = &dh->dvh;
10737 		n = dv->actions_n;
10738 		if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
10739 			if (dv->transfer) {
10740 				dv->actions[n++] = priv->sh->esw_drop_action;
10741 			} else {
10742 				MLX5_ASSERT(priv->drop_queue.hrxq);
10743 				dv->actions[n++] =
10744 						priv->drop_queue.hrxq->action;
10745 			}
10746 		} else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
10747 			   !dv_h->rix_sample && !dv_h->rix_dest_array) ||
10748 			    (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS)) {
10749 			struct mlx5_hrxq *hrxq = NULL;
10750 			uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
10751 					(dev, dev_flow, rss_desc, &hrxq);
10752 			if (!hrxq) {
10753 				rte_flow_error_set
10754 					(error, rte_errno,
10755 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10756 					 "cannot get hash queue");
10757 				goto error;
10758 			}
10759 			if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
10760 				dh->rix_hrxq = hrxq_idx;
10761 			dv->actions[n++] = hrxq->action;
10762 		} else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
10763 			if (!priv->sh->default_miss_action) {
10764 				rte_flow_error_set
10765 					(error, rte_errno,
10766 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10767 					 "default miss action not be created.");
10768 				goto error;
10769 			}
10770 			dv->actions[n++] = priv->sh->default_miss_action;
10771 		}
10772 		err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
10773 					       (void *)&dv->value, n,
10774 					       dv->actions, &dh->drv_flow);
10775 		if (err) {
10776 			rte_flow_error_set(error, errno,
10777 					   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10778 					   NULL,
10779 					   "hardware refuses to create flow");
10780 			goto error;
10781 		}
10782 		if (priv->vmwa_context &&
10783 		    dh->vf_vlan.tag && !dh->vf_vlan.created) {
10784 			/*
10785 			 * The rule contains the VLAN pattern.
10786 			 * For VF we are going to create VLAN
10787 			 * interface to make hypervisor set correct
10788 			 * e-Switch vport context.
10789 			 */
10790 			mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
10791 		}
10792 	}
10793 	return 0;
10794 error:
10795 	err = rte_errno; /* Save rte_errno before cleanup. */
10796 	SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
10797 		       handle_idx, dh, next) {
10798 		/* hrxq is union, don't clear it if the flag is not set. */
10799 		if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
10800 			mlx5_hrxq_release(dev, dh->rix_hrxq);
10801 			dh->rix_hrxq = 0;
10802 		}
10803 		if (dh->vf_vlan.tag && dh->vf_vlan.created)
10804 			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
10805 	}
10806 	if (rss_desc->shared_rss)
10807 		wks->flows[wks->flow_idx - 1].handle->rix_srss = 0;
10808 	rte_errno = err; /* Restore rte_errno. */
10809 	return -rte_errno;
10810 }
10811 
10812 void
flow_dv_matcher_remove_cb(struct mlx5_cache_list * list __rte_unused,struct mlx5_cache_entry * entry)10813 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
10814 			  struct mlx5_cache_entry *entry)
10815 {
10816 	struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
10817 							  entry);
10818 
10819 	claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
10820 	mlx5_free(cache);
10821 }
10822 
10823 /**
10824  * Release the flow matcher.
10825  *
10826  * @param dev
10827  *   Pointer to Ethernet device.
10828  * @param handle
10829  *   Pointer to mlx5_flow_handle.
10830  *
10831  * @return
10832  *   1 while a reference on it exists, 0 when freed.
10833  */
10834 static int
flow_dv_matcher_release(struct rte_eth_dev * dev,struct mlx5_flow_handle * handle)10835 flow_dv_matcher_release(struct rte_eth_dev *dev,
10836 			struct mlx5_flow_handle *handle)
10837 {
10838 	struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
10839 	struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
10840 							    typeof(*tbl), tbl);
10841 	int ret;
10842 
10843 	MLX5_ASSERT(matcher->matcher_object);
10844 	ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
10845 	flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
10846 	return ret;
10847 }
10848 
10849 /**
10850  * Release encap_decap resource.
10851  *
10852  * @param list
10853  *   Pointer to the hash list.
10854  * @param entry
10855  *   Pointer to exist resource entry object.
10856  */
10857 void
flow_dv_encap_decap_remove_cb(struct mlx5_hlist * list,struct mlx5_hlist_entry * entry)10858 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
10859 			      struct mlx5_hlist_entry *entry)
10860 {
10861 	struct mlx5_dev_ctx_shared *sh = list->ctx;
10862 	struct mlx5_flow_dv_encap_decap_resource *res =
10863 		container_of(entry, typeof(*res), entry);
10864 
10865 	claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10866 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
10867 }
10868 
10869 /**
10870  * Release an encap/decap resource.
10871  *
10872  * @param dev
10873  *   Pointer to Ethernet device.
10874  * @param encap_decap_idx
10875  *   Index of encap decap resource.
10876  *
10877  * @return
10878  *   1 while a reference on it exists, 0 when freed.
10879  */
10880 static int
flow_dv_encap_decap_resource_release(struct rte_eth_dev * dev,uint32_t encap_decap_idx)10881 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
10882 				     uint32_t encap_decap_idx)
10883 {
10884 	struct mlx5_priv *priv = dev->data->dev_private;
10885 	struct mlx5_flow_dv_encap_decap_resource *cache_resource;
10886 
10887 	cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
10888 					encap_decap_idx);
10889 	if (!cache_resource)
10890 		return 0;
10891 	MLX5_ASSERT(cache_resource->action);
10892 	return mlx5_hlist_unregister(priv->sh->encaps_decaps,
10893 				     &cache_resource->entry);
10894 }
10895 
10896 /**
10897  * Release an jump to table action resource.
10898  *
10899  * @param dev
10900  *   Pointer to Ethernet device.
10901  * @param handle
10902  *   Pointer to mlx5_flow_handle.
10903  *
10904  * @return
10905  *   1 while a reference on it exists, 0 when freed.
10906  */
10907 static int
flow_dv_jump_tbl_resource_release(struct rte_eth_dev * dev,struct mlx5_flow_handle * handle)10908 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
10909 				  struct mlx5_flow_handle *handle)
10910 {
10911 	struct mlx5_priv *priv = dev->data->dev_private;
10912 	struct mlx5_flow_tbl_data_entry *tbl_data;
10913 
10914 	tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
10915 			     handle->rix_jump);
10916 	if (!tbl_data)
10917 		return 0;
10918 	return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
10919 }
10920 
10921 void
flow_dv_modify_remove_cb(struct mlx5_hlist * list __rte_unused,struct mlx5_hlist_entry * entry)10922 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
10923 			 struct mlx5_hlist_entry *entry)
10924 {
10925 	struct mlx5_flow_dv_modify_hdr_resource *res =
10926 		container_of(entry, typeof(*res), entry);
10927 
10928 	claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10929 	mlx5_free(entry);
10930 }
10931 
10932 /**
10933  * Release a modify-header resource.
10934  *
10935  * @param dev
10936  *   Pointer to Ethernet device.
10937  * @param handle
10938  *   Pointer to mlx5_flow_handle.
10939  *
10940  * @return
10941  *   1 while a reference on it exists, 0 when freed.
10942  */
10943 static int
flow_dv_modify_hdr_resource_release(struct rte_eth_dev * dev,struct mlx5_flow_handle * handle)10944 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
10945 				    struct mlx5_flow_handle *handle)
10946 {
10947 	struct mlx5_priv *priv = dev->data->dev_private;
10948 	struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
10949 
10950 	MLX5_ASSERT(entry->action);
10951 	return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
10952 }
10953 
10954 void
flow_dv_port_id_remove_cb(struct mlx5_cache_list * list,struct mlx5_cache_entry * entry)10955 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
10956 			  struct mlx5_cache_entry *entry)
10957 {
10958 	struct mlx5_dev_ctx_shared *sh = list->ctx;
10959 	struct mlx5_flow_dv_port_id_action_resource *cache =
10960 			container_of(entry, typeof(*cache), entry);
10961 
10962 	claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
10963 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
10964 }
10965 
10966 /**
10967  * Release port ID action resource.
10968  *
10969  * @param dev
10970  *   Pointer to Ethernet device.
10971  * @param handle
10972  *   Pointer to mlx5_flow_handle.
10973  *
10974  * @return
10975  *   1 while a reference on it exists, 0 when freed.
10976  */
10977 static int
flow_dv_port_id_action_resource_release(struct rte_eth_dev * dev,uint32_t port_id)10978 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
10979 					uint32_t port_id)
10980 {
10981 	struct mlx5_priv *priv = dev->data->dev_private;
10982 	struct mlx5_flow_dv_port_id_action_resource *cache;
10983 
10984 	cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
10985 	if (!cache)
10986 		return 0;
10987 	MLX5_ASSERT(cache->action);
10988 	return mlx5_cache_unregister(&priv->sh->port_id_action_list,
10989 				     &cache->entry);
10990 }
10991 
10992 /**
10993  * Release shared RSS action resource.
10994  *
10995  * @param dev
10996  *   Pointer to Ethernet device.
10997  * @param srss
10998  *   Shared RSS action index.
10999  */
11000 static void
flow_dv_shared_rss_action_release(struct rte_eth_dev * dev,uint32_t srss)11001 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
11002 {
11003 	struct mlx5_priv *priv = dev->data->dev_private;
11004 	struct mlx5_shared_action_rss *shared_rss;
11005 
11006 	shared_rss = mlx5_ipool_get
11007 			(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
11008 	__atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
11009 }
11010 
11011 void
flow_dv_push_vlan_remove_cb(struct mlx5_cache_list * list,struct mlx5_cache_entry * entry)11012 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
11013 			    struct mlx5_cache_entry *entry)
11014 {
11015 	struct mlx5_dev_ctx_shared *sh = list->ctx;
11016 	struct mlx5_flow_dv_push_vlan_action_resource *cache =
11017 			container_of(entry, typeof(*cache), entry);
11018 
11019 	claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
11020 	mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
11021 }
11022 
11023 /**
11024  * Release push vlan action resource.
11025  *
11026  * @param dev
11027  *   Pointer to Ethernet device.
11028  * @param handle
11029  *   Pointer to mlx5_flow_handle.
11030  *
11031  * @return
11032  *   1 while a reference on it exists, 0 when freed.
11033  */
11034 static int
flow_dv_push_vlan_action_resource_release(struct rte_eth_dev * dev,struct mlx5_flow_handle * handle)11035 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
11036 					  struct mlx5_flow_handle *handle)
11037 {
11038 	struct mlx5_priv *priv = dev->data->dev_private;
11039 	struct mlx5_flow_dv_push_vlan_action_resource *cache;
11040 	uint32_t idx = handle->dvh.rix_push_vlan;
11041 
11042 	cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
11043 	if (!cache)
11044 		return 0;
11045 	MLX5_ASSERT(cache->action);
11046 	return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
11047 				     &cache->entry);
11048 }
11049 
11050 /**
11051  * Release the fate resource.
11052  *
11053  * @param dev
11054  *   Pointer to Ethernet device.
11055  * @param handle
11056  *   Pointer to mlx5_flow_handle.
11057  */
11058 static void
flow_dv_fate_resource_release(struct rte_eth_dev * dev,struct mlx5_flow_handle * handle)11059 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
11060 			       struct mlx5_flow_handle *handle)
11061 {
11062 	if (!handle->rix_fate)
11063 		return;
11064 	switch (handle->fate_action) {
11065 	case MLX5_FLOW_FATE_QUEUE:
11066 		mlx5_hrxq_release(dev, handle->rix_hrxq);
11067 		break;
11068 	case MLX5_FLOW_FATE_JUMP:
11069 		flow_dv_jump_tbl_resource_release(dev, handle);
11070 		break;
11071 	case MLX5_FLOW_FATE_PORT_ID:
11072 		flow_dv_port_id_action_resource_release(dev,
11073 				handle->rix_port_id_action);
11074 		break;
11075 	case MLX5_FLOW_FATE_SHARED_RSS:
11076 		flow_dv_shared_rss_action_release(dev, handle->rix_srss);
11077 		break;
11078 	default:
11079 		DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
11080 		break;
11081 	}
11082 	handle->rix_fate = 0;
11083 }
11084 
11085 void
flow_dv_sample_remove_cb(struct mlx5_cache_list * list __rte_unused,struct mlx5_cache_entry * entry)11086 flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
11087 			 struct mlx5_cache_entry *entry)
11088 {
11089 	struct mlx5_flow_dv_sample_resource *cache_resource =
11090 			container_of(entry, typeof(*cache_resource), entry);
11091 	struct rte_eth_dev *dev = cache_resource->dev;
11092 	struct mlx5_priv *priv = dev->data->dev_private;
11093 
11094 	if (cache_resource->verbs_action)
11095 		claim_zero(mlx5_glue->destroy_flow_action
11096 				(cache_resource->verbs_action));
11097 	if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11098 		if (cache_resource->default_miss)
11099 			claim_zero(mlx5_glue->destroy_flow_action
11100 			  (cache_resource->default_miss));
11101 	}
11102 	if (cache_resource->normal_path_tbl)
11103 		flow_dv_tbl_resource_release(MLX5_SH(dev),
11104 			cache_resource->normal_path_tbl);
11105 	flow_dv_sample_sub_actions_release(dev,
11106 				&cache_resource->sample_idx);
11107 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
11108 			cache_resource->idx);
11109 	DRV_LOG(DEBUG, "sample resource %p: removed",
11110 		(void *)cache_resource);
11111 }
11112 
11113 /**
11114  * Release an sample resource.
11115  *
11116  * @param dev
11117  *   Pointer to Ethernet device.
11118  * @param handle
11119  *   Pointer to mlx5_flow_handle.
11120  *
11121  * @return
11122  *   1 while a reference on it exists, 0 when freed.
11123  */
11124 static int
flow_dv_sample_resource_release(struct rte_eth_dev * dev,struct mlx5_flow_handle * handle)11125 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
11126 				     struct mlx5_flow_handle *handle)
11127 {
11128 	struct mlx5_priv *priv = dev->data->dev_private;
11129 	struct mlx5_flow_dv_sample_resource *cache_resource;
11130 
11131 	cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
11132 			 handle->dvh.rix_sample);
11133 	if (!cache_resource)
11134 		return 0;
11135 	MLX5_ASSERT(cache_resource->verbs_action);
11136 	return mlx5_cache_unregister(&priv->sh->sample_action_list,
11137 				     &cache_resource->entry);
11138 }
11139 
11140 void
flow_dv_dest_array_remove_cb(struct mlx5_cache_list * list __rte_unused,struct mlx5_cache_entry * entry)11141 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
11142 			     struct mlx5_cache_entry *entry)
11143 {
11144 	struct mlx5_flow_dv_dest_array_resource *cache_resource =
11145 			container_of(entry, typeof(*cache_resource), entry);
11146 	struct rte_eth_dev *dev = cache_resource->dev;
11147 	struct mlx5_priv *priv = dev->data->dev_private;
11148 	uint32_t i = 0;
11149 
11150 	MLX5_ASSERT(cache_resource->action);
11151 	if (cache_resource->action)
11152 		claim_zero(mlx5_glue->destroy_flow_action
11153 					(cache_resource->action));
11154 	for (; i < cache_resource->num_of_dest; i++)
11155 		flow_dv_sample_sub_actions_release(dev,
11156 				&cache_resource->sample_idx[i]);
11157 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11158 			cache_resource->idx);
11159 	DRV_LOG(DEBUG, "destination array resource %p: removed",
11160 		(void *)cache_resource);
11161 }
11162 
11163 /**
11164  * Release an destination array resource.
11165  *
11166  * @param dev
11167  *   Pointer to Ethernet device.
11168  * @param handle
11169  *   Pointer to mlx5_flow_handle.
11170  *
11171  * @return
11172  *   1 while a reference on it exists, 0 when freed.
11173  */
11174 static int
flow_dv_dest_array_resource_release(struct rte_eth_dev * dev,struct mlx5_flow_handle * handle)11175 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
11176 				    struct mlx5_flow_handle *handle)
11177 {
11178 	struct mlx5_priv *priv = dev->data->dev_private;
11179 	struct mlx5_flow_dv_dest_array_resource *cache;
11180 
11181 	cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11182 			       handle->dvh.rix_dest_array);
11183 	if (!cache)
11184 		return 0;
11185 	MLX5_ASSERT(cache->action);
11186 	return mlx5_cache_unregister(&priv->sh->dest_array_list,
11187 				     &cache->entry);
11188 }
11189 
11190 /**
11191  * Remove the flow from the NIC but keeps it in memory.
11192  * Lock free, (mutex should be acquired by caller).
11193  *
11194  * @param[in] dev
11195  *   Pointer to Ethernet device.
11196  * @param[in, out] flow
11197  *   Pointer to flow structure.
11198  */
11199 static void
flow_dv_remove(struct rte_eth_dev * dev,struct rte_flow * flow)11200 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
11201 {
11202 	struct mlx5_flow_handle *dh;
11203 	uint32_t handle_idx;
11204 	struct mlx5_priv *priv = dev->data->dev_private;
11205 
11206 	if (!flow)
11207 		return;
11208 	handle_idx = flow->dev_handles;
11209 	while (handle_idx) {
11210 		dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
11211 				    handle_idx);
11212 		if (!dh)
11213 			return;
11214 		if (dh->drv_flow) {
11215 			claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
11216 			dh->drv_flow = NULL;
11217 		}
11218 		if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
11219 			flow_dv_fate_resource_release(dev, dh);
11220 		if (dh->vf_vlan.tag && dh->vf_vlan.created)
11221 			mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
11222 		handle_idx = dh->next.next;
11223 	}
11224 }
11225 
11226 /**
11227  * Remove the flow from the NIC and the memory.
11228  * Lock free, (mutex should be acquired by caller).
11229  *
11230  * @param[in] dev
11231  *   Pointer to the Ethernet device structure.
11232  * @param[in, out] flow
11233  *   Pointer to flow structure.
11234  */
11235 static void
flow_dv_destroy(struct rte_eth_dev * dev,struct rte_flow * flow)11236 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
11237 {
11238 	struct mlx5_flow_handle *dev_handle;
11239 	struct mlx5_priv *priv = dev->data->dev_private;
11240 
11241 	if (!flow)
11242 		return;
11243 	flow_dv_remove(dev, flow);
11244 	if (flow->counter) {
11245 		flow_dv_counter_free(dev, flow->counter);
11246 		flow->counter = 0;
11247 	}
11248 	if (flow->meter) {
11249 		struct mlx5_flow_meter *fm;
11250 
11251 		fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
11252 				    flow->meter);
11253 		if (fm)
11254 			mlx5_flow_meter_detach(fm);
11255 		flow->meter = 0;
11256 	}
11257 	if (flow->age)
11258 		flow_dv_aso_age_release(dev, flow->age);
11259 	while (flow->dev_handles) {
11260 		uint32_t tmp_idx = flow->dev_handles;
11261 
11262 		dev_handle = mlx5_ipool_get(priv->sh->ipool
11263 					    [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
11264 		if (!dev_handle)
11265 			return;
11266 		flow->dev_handles = dev_handle->next.next;
11267 		if (dev_handle->dvh.matcher)
11268 			flow_dv_matcher_release(dev, dev_handle);
11269 		if (dev_handle->dvh.rix_sample)
11270 			flow_dv_sample_resource_release(dev, dev_handle);
11271 		if (dev_handle->dvh.rix_dest_array)
11272 			flow_dv_dest_array_resource_release(dev, dev_handle);
11273 		if (dev_handle->dvh.rix_encap_decap)
11274 			flow_dv_encap_decap_resource_release(dev,
11275 				dev_handle->dvh.rix_encap_decap);
11276 		if (dev_handle->dvh.modify_hdr)
11277 			flow_dv_modify_hdr_resource_release(dev, dev_handle);
11278 		if (dev_handle->dvh.rix_push_vlan)
11279 			flow_dv_push_vlan_action_resource_release(dev,
11280 								  dev_handle);
11281 		if (dev_handle->dvh.rix_tag)
11282 			flow_dv_tag_release(dev,
11283 					    dev_handle->dvh.rix_tag);
11284 		flow_dv_fate_resource_release(dev, dev_handle);
11285 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
11286 			   tmp_idx);
11287 	}
11288 }
11289 
11290 /**
11291  * Release array of hash RX queue objects.
11292  * Helper function.
11293  *
11294  * @param[in] dev
11295  *   Pointer to the Ethernet device structure.
11296  * @param[in, out] hrxqs
11297  *   Array of hash RX queue objects.
11298  *
11299  * @return
11300  *   Total number of references to hash RX queue objects in *hrxqs* array
11301  *   after this operation.
11302  */
11303 static int
__flow_dv_hrxqs_release(struct rte_eth_dev * dev,uint32_t (* hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])11304 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
11305 			uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
11306 {
11307 	size_t i;
11308 	int remaining = 0;
11309 
11310 	for (i = 0; i < RTE_DIM(*hrxqs); i++) {
11311 		int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
11312 
11313 		if (!ret)
11314 			(*hrxqs)[i] = 0;
11315 		remaining += ret;
11316 	}
11317 	return remaining;
11318 }
11319 
11320 /**
11321  * Release all hash RX queue objects representing shared RSS action.
11322  *
11323  * @param[in] dev
11324  *   Pointer to the Ethernet device structure.
11325  * @param[in, out] action
11326  *   Shared RSS action to remove hash RX queue objects from.
11327  *
11328  * @return
11329  *   Total number of references to hash RX queue objects stored in *action*
11330  *   after this operation.
11331  *   Expected to be 0 if no external references held.
11332  */
11333 static int
__flow_dv_action_rss_hrxqs_release(struct rte_eth_dev * dev,struct mlx5_shared_action_rss * action)11334 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
11335 				 struct mlx5_shared_action_rss *action)
11336 {
11337 	return __flow_dv_hrxqs_release(dev, &action->hrxq) +
11338 		__flow_dv_hrxqs_release(dev, &action->hrxq_tunnel);
11339 }
11340 
11341 /**
11342  * Setup shared RSS action.
11343  * Prepare set of hash RX queue objects sufficient to handle all valid
11344  * hash_fields combinations (see enum ibv_rx_hash_fields).
11345  *
11346  * @param[in] dev
11347  *   Pointer to the Ethernet device structure.
11348  * @param[in] action_idx
11349  *   Shared RSS action ipool index.
11350  * @param[in, out] action
11351  *   Partially initialized shared RSS action.
11352  * @param[out] error
11353  *   Perform verbose error reporting if not NULL. Initialized in case of
11354  *   error only.
11355  *
11356  * @return
11357  *   0 on success, otherwise negative errno value.
11358  */
11359 static int
__flow_dv_action_rss_setup(struct rte_eth_dev * dev,uint32_t action_idx,struct mlx5_shared_action_rss * action,struct rte_flow_error * error)11360 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
11361 			   uint32_t action_idx,
11362 			   struct mlx5_shared_action_rss *action,
11363 			   struct rte_flow_error *error)
11364 {
11365 	struct mlx5_flow_rss_desc rss_desc = { 0 };
11366 	size_t i;
11367 	int err;
11368 
11369 	if (mlx5_ind_table_obj_setup(dev, action->ind_tbl)) {
11370 		return rte_flow_error_set(error, rte_errno,
11371 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11372 					  "cannot setup indirection table");
11373 	}
11374 	memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN);
11375 	rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
11376 	rss_desc.const_q = action->origin.queue;
11377 	rss_desc.queue_num = action->origin.queue_num;
11378 	/* Set non-zero value to indicate a shared RSS. */
11379 	rss_desc.shared_rss = action_idx;
11380 	rss_desc.ind_tbl = action->ind_tbl;
11381 	for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11382 		uint32_t hrxq_idx;
11383 		uint64_t hash_fields = mlx5_rss_hash_fields[i];
11384 		int tunnel;
11385 
11386 		for (tunnel = 0; tunnel < 2; tunnel++) {
11387 			rss_desc.tunnel = tunnel;
11388 			rss_desc.hash_fields = hash_fields;
11389 			hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
11390 			if (!hrxq_idx) {
11391 				rte_flow_error_set
11392 					(error, rte_errno,
11393 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11394 					 "cannot get hash queue");
11395 				goto error_hrxq_new;
11396 			}
11397 			err = __flow_dv_action_rss_hrxq_set
11398 				(action, hash_fields, tunnel, hrxq_idx);
11399 			MLX5_ASSERT(!err);
11400 		}
11401 	}
11402 	return 0;
11403 error_hrxq_new:
11404 	err = rte_errno;
11405 	__flow_dv_action_rss_hrxqs_release(dev, action);
11406 	if (!mlx5_ind_table_obj_release(dev, action->ind_tbl, true))
11407 		action->ind_tbl = NULL;
11408 	rte_errno = err;
11409 	return -rte_errno;
11410 }
11411 
11412 /**
11413  * Create shared RSS action.
11414  *
11415  * @param[in] dev
11416  *   Pointer to the Ethernet device structure.
11417  * @param[in] conf
11418  *   Shared action configuration.
11419  * @param[in] rss
11420  *   RSS action specification used to create shared action.
11421  * @param[out] error
11422  *   Perform verbose error reporting if not NULL. Initialized in case of
11423  *   error only.
11424  *
11425  * @return
11426  *   A valid shared action ID in case of success, 0 otherwise and
11427  *   rte_errno is set.
11428  */
11429 static uint32_t
__flow_dv_action_rss_create(struct rte_eth_dev * dev,const struct rte_flow_shared_action_conf * conf,const struct rte_flow_action_rss * rss,struct rte_flow_error * error)11430 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
11431 			    const struct rte_flow_shared_action_conf *conf,
11432 			    const struct rte_flow_action_rss *rss,
11433 			    struct rte_flow_error *error)
11434 {
11435 	struct mlx5_priv *priv = dev->data->dev_private;
11436 	struct mlx5_shared_action_rss *shared_action = NULL;
11437 	void *queue = NULL;
11438 	struct rte_flow_action_rss *origin;
11439 	const uint8_t *rss_key;
11440 	uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
11441 	uint32_t idx;
11442 
11443 	RTE_SET_USED(conf);
11444 	queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11445 			    0, SOCKET_ID_ANY);
11446 	shared_action = mlx5_ipool_zmalloc
11447 			 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
11448 	if (!shared_action || !queue) {
11449 		rte_flow_error_set(error, ENOMEM,
11450 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11451 				   "cannot allocate resource memory");
11452 		goto error_rss_init;
11453 	}
11454 	if (idx > (1u << MLX5_SHARED_ACTION_TYPE_OFFSET)) {
11455 		rte_flow_error_set(error, E2BIG,
11456 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11457 				   "rss action number out of range");
11458 		goto error_rss_init;
11459 	}
11460 	shared_action->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
11461 					     sizeof(*shared_action->ind_tbl),
11462 					     0, SOCKET_ID_ANY);
11463 	if (!shared_action->ind_tbl) {
11464 		rte_flow_error_set(error, ENOMEM,
11465 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11466 				   "cannot allocate resource memory");
11467 		goto error_rss_init;
11468 	}
11469 	memcpy(queue, rss->queue, queue_size);
11470 	shared_action->ind_tbl->queues = queue;
11471 	shared_action->ind_tbl->queues_n = rss->queue_num;
11472 	origin = &shared_action->origin;
11473 	origin->func = rss->func;
11474 	origin->level = rss->level;
11475 	/* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
11476 	origin->types = !rss->types ? ETH_RSS_IP : rss->types;
11477 	/* NULL RSS key indicates default RSS key. */
11478 	rss_key = !rss->key ? rss_hash_default_key : rss->key;
11479 	memcpy(shared_action->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11480 	origin->key = &shared_action->key[0];
11481 	origin->key_len = MLX5_RSS_HASH_KEY_LEN;
11482 	origin->queue = queue;
11483 	origin->queue_num = rss->queue_num;
11484 	if (__flow_dv_action_rss_setup(dev, idx, shared_action, error))
11485 		goto error_rss_init;
11486 	rte_spinlock_init(&shared_action->action_rss_sl);
11487 	__atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED);
11488 	rte_spinlock_lock(&priv->shared_act_sl);
11489 	ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
11490 		     &priv->rss_shared_actions, idx, shared_action, next);
11491 	rte_spinlock_unlock(&priv->shared_act_sl);
11492 	return idx;
11493 error_rss_init:
11494 	if (shared_action) {
11495 		if (shared_action->ind_tbl)
11496 			mlx5_free(shared_action->ind_tbl);
11497 		mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
11498 				idx);
11499 	}
11500 	if (queue)
11501 		mlx5_free(queue);
11502 	return 0;
11503 }
11504 
11505 /**
11506  * Destroy the shared RSS action.
11507  * Release related hash RX queue objects.
11508  *
11509  * @param[in] dev
11510  *   Pointer to the Ethernet device structure.
11511  * @param[in] idx
11512  *   The shared RSS action object ID to be removed.
11513  * @param[out] error
11514  *   Perform verbose error reporting if not NULL. Initialized in case of
11515  *   error only.
11516  *
11517  * @return
11518  *   0 on success, otherwise negative errno value.
11519  */
11520 static int
__flow_dv_action_rss_release(struct rte_eth_dev * dev,uint32_t idx,struct rte_flow_error * error)11521 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
11522 			     struct rte_flow_error *error)
11523 {
11524 	struct mlx5_priv *priv = dev->data->dev_private;
11525 	struct mlx5_shared_action_rss *shared_rss =
11526 	    mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
11527 	uint32_t old_refcnt = 1;
11528 	int remaining;
11529 	uint16_t *queue = NULL;
11530 
11531 	if (!shared_rss)
11532 		return rte_flow_error_set(error, EINVAL,
11533 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11534 					  "invalid shared action");
11535 	remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
11536 	if (remaining)
11537 		return rte_flow_error_set(error, EBUSY,
11538 					  RTE_FLOW_ERROR_TYPE_ACTION,
11539 					  NULL,
11540 					  "shared rss hrxq has references");
11541 	queue = shared_rss->ind_tbl->queues;
11542 	remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
11543 	if (remaining)
11544 		return rte_flow_error_set(error, EBUSY,
11545 					  RTE_FLOW_ERROR_TYPE_ACTION,
11546 					  NULL,
11547 					  "shared rss indirection table has"
11548 					  " references");
11549 	if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
11550 					 0, 0, __ATOMIC_ACQUIRE,
11551 					 __ATOMIC_RELAXED))
11552 		return rte_flow_error_set(error, EBUSY,
11553 					  RTE_FLOW_ERROR_TYPE_ACTION,
11554 					  NULL,
11555 					  "shared rss has references");
11556 	mlx5_free(queue);
11557 	rte_spinlock_lock(&priv->shared_act_sl);
11558 	ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
11559 		     &priv->rss_shared_actions, idx, shared_rss, next);
11560 	rte_spinlock_unlock(&priv->shared_act_sl);
11561 	mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
11562 			idx);
11563 	return 0;
11564 }
11565 
11566 /**
11567  * Create shared action, lock free,
11568  * (mutex should be acquired by caller).
11569  * Dispatcher for action type specific call.
11570  *
11571  * @param[in] dev
11572  *   Pointer to the Ethernet device structure.
11573  * @param[in] conf
11574  *   Shared action configuration.
11575  * @param[in] action
11576  *   Action specification used to create shared action.
11577  * @param[out] error
11578  *   Perform verbose error reporting if not NULL. Initialized in case of
11579  *   error only.
11580  *
11581  * @return
11582  *   A valid shared action handle in case of success, NULL otherwise and
11583  *   rte_errno is set.
11584  */
11585 static struct rte_flow_shared_action *
flow_dv_action_create(struct rte_eth_dev * dev,const struct rte_flow_shared_action_conf * conf,const struct rte_flow_action * action,struct rte_flow_error * err)11586 flow_dv_action_create(struct rte_eth_dev *dev,
11587 		      const struct rte_flow_shared_action_conf *conf,
11588 		      const struct rte_flow_action *action,
11589 		      struct rte_flow_error *err)
11590 {
11591 	uint32_t idx = 0;
11592 	uint32_t ret = 0;
11593 
11594 	switch (action->type) {
11595 	case RTE_FLOW_ACTION_TYPE_RSS:
11596 		ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
11597 		idx = (MLX5_SHARED_ACTION_TYPE_RSS <<
11598 		       MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
11599 		break;
11600 	case RTE_FLOW_ACTION_TYPE_AGE:
11601 		ret = flow_dv_translate_create_aso_age(dev, action->conf, err);
11602 		idx = (MLX5_SHARED_ACTION_TYPE_AGE <<
11603 		       MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
11604 		if (ret) {
11605 			struct mlx5_aso_age_action *aso_age =
11606 					      flow_aso_age_get_by_idx(dev, ret);
11607 
11608 			if (!aso_age->age_params.context)
11609 				aso_age->age_params.context =
11610 							 (void *)(uintptr_t)idx;
11611 		}
11612 		break;
11613 	default:
11614 		rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
11615 				   NULL, "action type not supported");
11616 		break;
11617 	}
11618 	return ret ? (struct rte_flow_shared_action *)(uintptr_t)idx : NULL;
11619 }
11620 
11621 /**
11622  * Destroy the shared action.
11623  * Release action related resources on the NIC and the memory.
11624  * Lock free, (mutex should be acquired by caller).
11625  * Dispatcher for action type specific call.
11626  *
11627  * @param[in] dev
11628  *   Pointer to the Ethernet device structure.
11629  * @param[in] action
11630  *   The shared action object to be removed.
11631  * @param[out] error
11632  *   Perform verbose error reporting if not NULL. Initialized in case of
11633  *   error only.
11634  *
11635  * @return
11636  *   0 on success, otherwise negative errno value.
11637  */
11638 static int
flow_dv_action_destroy(struct rte_eth_dev * dev,struct rte_flow_shared_action * action,struct rte_flow_error * error)11639 flow_dv_action_destroy(struct rte_eth_dev *dev,
11640 		       struct rte_flow_shared_action *action,
11641 		       struct rte_flow_error *error)
11642 {
11643 	uint32_t act_idx = (uint32_t)(uintptr_t)action;
11644 	uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
11645 	uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
11646 	int ret;
11647 
11648 	switch (type) {
11649 	case MLX5_SHARED_ACTION_TYPE_RSS:
11650 		return __flow_dv_action_rss_release(dev, idx, error);
11651 	case MLX5_SHARED_ACTION_TYPE_AGE:
11652 		ret = flow_dv_aso_age_release(dev, idx);
11653 		if (ret)
11654 			/*
11655 			 * In this case, the last flow has a reference will
11656 			 * actually release the age action.
11657 			 */
11658 			DRV_LOG(DEBUG, "Shared age action %" PRIu32 " was"
11659 				" released with references %d.", idx, ret);
11660 		return 0;
11661 	default:
11662 		return rte_flow_error_set(error, ENOTSUP,
11663 					  RTE_FLOW_ERROR_TYPE_ACTION,
11664 					  NULL,
11665 					  "action type not supported");
11666 	}
11667 }
11668 
11669 /**
11670  * Updates in place shared RSS action configuration.
11671  *
11672  * @param[in] dev
11673  *   Pointer to the Ethernet device structure.
11674  * @param[in] idx
11675  *   The shared RSS action object ID to be updated.
11676  * @param[in] action_conf
11677  *   RSS action specification used to modify *shared_rss*.
11678  * @param[out] error
11679  *   Perform verbose error reporting if not NULL. Initialized in case of
11680  *   error only.
11681  *
11682  * @return
11683  *   0 on success, otherwise negative errno value.
11684  * @note: currently only support update of RSS queues.
11685  */
11686 static int
__flow_dv_action_rss_update(struct rte_eth_dev * dev,uint32_t idx,const struct rte_flow_action_rss * action_conf,struct rte_flow_error * error)11687 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
11688 			    const struct rte_flow_action_rss *action_conf,
11689 			    struct rte_flow_error *error)
11690 {
11691 	struct mlx5_priv *priv = dev->data->dev_private;
11692 	struct mlx5_shared_action_rss *shared_rss =
11693 	    mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
11694 	int ret = 0;
11695 	void *queue = NULL;
11696 	uint16_t *queue_old = NULL;
11697 	uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
11698 
11699 	if (!shared_rss)
11700 		return rte_flow_error_set(error, EINVAL,
11701 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11702 					  "invalid shared action to update");
11703 	queue = mlx5_malloc(MLX5_MEM_ZERO,
11704 			    RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11705 			    0, SOCKET_ID_ANY);
11706 	if (!queue)
11707 		return rte_flow_error_set(error, ENOMEM,
11708 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11709 					  NULL,
11710 					  "cannot allocate resource memory");
11711 	memcpy(queue, action_conf->queue, queue_size);
11712 	MLX5_ASSERT(shared_rss->ind_tbl);
11713 	rte_spinlock_lock(&shared_rss->action_rss_sl);
11714 	queue_old = shared_rss->ind_tbl->queues;
11715 	ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
11716 					queue, action_conf->queue_num, true);
11717 	if (ret) {
11718 		mlx5_free(queue);
11719 		ret = rte_flow_error_set(error, rte_errno,
11720 					  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11721 					  "cannot update indirection table");
11722 	} else {
11723 		mlx5_free(queue_old);
11724 		shared_rss->origin.queue = queue;
11725 		shared_rss->origin.queue_num = action_conf->queue_num;
11726 	}
11727 	rte_spinlock_unlock(&shared_rss->action_rss_sl);
11728 	return ret;
11729 }
11730 
11731 /**
11732  * Updates in place shared action configuration, lock free,
11733  * (mutex should be acquired by caller).
11734  *
11735  * @param[in] dev
11736  *   Pointer to the Ethernet device structure.
11737  * @param[in] action
11738  *   The shared action object to be updated.
11739  * @param[in] action_conf
11740  *   Action specification used to modify *action*.
11741  *   *action_conf* should be of type correlating with type of the *action*,
11742  *   otherwise considered as invalid.
11743  * @param[out] error
11744  *   Perform verbose error reporting if not NULL. Initialized in case of
11745  *   error only.
11746  *
11747  * @return
11748  *   0 on success, otherwise negative errno value.
11749  */
11750 static int
flow_dv_action_update(struct rte_eth_dev * dev,struct rte_flow_shared_action * action,const void * action_conf,struct rte_flow_error * err)11751 flow_dv_action_update(struct rte_eth_dev *dev,
11752 			struct rte_flow_shared_action *action,
11753 			const void *action_conf,
11754 			struct rte_flow_error *err)
11755 {
11756 	uint32_t act_idx = (uint32_t)(uintptr_t)action;
11757 	uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
11758 	uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
11759 
11760 	switch (type) {
11761 	case MLX5_SHARED_ACTION_TYPE_RSS:
11762 		return __flow_dv_action_rss_update(dev, idx, action_conf, err);
11763 	default:
11764 		return rte_flow_error_set(err, ENOTSUP,
11765 					  RTE_FLOW_ERROR_TYPE_ACTION,
11766 					  NULL,
11767 					  "action type update not supported");
11768 	}
11769 }
11770 
11771 static int
flow_dv_action_query(struct rte_eth_dev * dev,const struct rte_flow_shared_action * action,void * data,struct rte_flow_error * error)11772 flow_dv_action_query(struct rte_eth_dev *dev,
11773 		     const struct rte_flow_shared_action *action, void *data,
11774 		     struct rte_flow_error *error)
11775 {
11776 	struct mlx5_age_param *age_param;
11777 	struct rte_flow_query_age *resp;
11778 	uint32_t act_idx = (uint32_t)(uintptr_t)action;
11779 	uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
11780 	uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
11781 
11782 	switch (type) {
11783 	case MLX5_SHARED_ACTION_TYPE_AGE:
11784 		age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
11785 		resp = data;
11786 		resp->aged = __atomic_load_n(&age_param->state,
11787 					      __ATOMIC_RELAXED) == AGE_TMOUT ?
11788 									  1 : 0;
11789 		resp->sec_since_last_hit_valid = !resp->aged;
11790 		if (resp->sec_since_last_hit_valid)
11791 			resp->sec_since_last_hit = __atomic_load_n
11792 			     (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
11793 		return 0;
11794 	default:
11795 		return rte_flow_error_set(error, ENOTSUP,
11796 					  RTE_FLOW_ERROR_TYPE_ACTION,
11797 					  NULL,
11798 					  "action type query not supported");
11799 	}
11800 }
11801 
11802 /**
11803  * Query a dv flow  rule for its statistics via devx.
11804  *
11805  * @param[in] dev
11806  *   Pointer to Ethernet device.
11807  * @param[in] flow
11808  *   Pointer to the sub flow.
11809  * @param[out] data
11810  *   data retrieved by the query.
11811  * @param[out] error
11812  *   Perform verbose error reporting if not NULL.
11813  *
11814  * @return
11815  *   0 on success, a negative errno value otherwise and rte_errno is set.
11816  */
11817 static int
flow_dv_query_count(struct rte_eth_dev * dev,struct rte_flow * flow,void * data,struct rte_flow_error * error)11818 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
11819 		    void *data, struct rte_flow_error *error)
11820 {
11821 	struct mlx5_priv *priv = dev->data->dev_private;
11822 	struct rte_flow_query_count *qc = data;
11823 
11824 	if (!priv->config.devx)
11825 		return rte_flow_error_set(error, ENOTSUP,
11826 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11827 					  NULL,
11828 					  "counters are not supported");
11829 	if (flow->counter) {
11830 		uint64_t pkts, bytes;
11831 		struct mlx5_flow_counter *cnt;
11832 
11833 		cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
11834 						 NULL);
11835 		int err = _flow_dv_query_count(dev, flow->counter, &pkts,
11836 					       &bytes);
11837 
11838 		if (err)
11839 			return rte_flow_error_set(error, -err,
11840 					RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11841 					NULL, "cannot read counters");
11842 		qc->hits_set = 1;
11843 		qc->bytes_set = 1;
11844 		qc->hits = pkts - cnt->hits;
11845 		qc->bytes = bytes - cnt->bytes;
11846 		if (qc->reset) {
11847 			cnt->hits = pkts;
11848 			cnt->bytes = bytes;
11849 		}
11850 		return 0;
11851 	}
11852 	return rte_flow_error_set(error, EINVAL,
11853 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11854 				  NULL,
11855 				  "counters are not available");
11856 }
11857 
11858 /**
11859  * Query a flow rule AGE action for aging information.
11860  *
11861  * @param[in] dev
11862  *   Pointer to Ethernet device.
11863  * @param[in] flow
11864  *   Pointer to the sub flow.
11865  * @param[out] data
11866  *   data retrieved by the query.
11867  * @param[out] error
11868  *   Perform verbose error reporting if not NULL.
11869  *
11870  * @return
11871  *   0 on success, a negative errno value otherwise and rte_errno is set.
11872  */
11873 static int
flow_dv_query_age(struct rte_eth_dev * dev,struct rte_flow * flow,void * data,struct rte_flow_error * error)11874 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
11875 		  void *data, struct rte_flow_error *error)
11876 {
11877 	struct rte_flow_query_age *resp = data;
11878 	struct mlx5_age_param *age_param;
11879 
11880 	if (flow->age) {
11881 		struct mlx5_aso_age_action *act =
11882 				     flow_aso_age_get_by_idx(dev, flow->age);
11883 
11884 		age_param = &act->age_params;
11885 	} else if (flow->counter) {
11886 		age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
11887 
11888 		if (!age_param || !age_param->timeout)
11889 			return rte_flow_error_set
11890 					(error, EINVAL,
11891 					 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11892 					 NULL, "cannot read age data");
11893 	} else {
11894 		return rte_flow_error_set(error, EINVAL,
11895 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11896 					  NULL, "age data not available");
11897 	}
11898 	resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
11899 				     AGE_TMOUT ? 1 : 0;
11900 	resp->sec_since_last_hit_valid = !resp->aged;
11901 	if (resp->sec_since_last_hit_valid)
11902 		resp->sec_since_last_hit = __atomic_load_n
11903 			     (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
11904 	return 0;
11905 }
11906 
11907 /**
11908  * Query a flow.
11909  *
11910  * @see rte_flow_query()
11911  * @see rte_flow_ops
11912  */
11913 static int
flow_dv_query(struct rte_eth_dev * dev,struct rte_flow * flow __rte_unused,const struct rte_flow_action * actions __rte_unused,void * data __rte_unused,struct rte_flow_error * error __rte_unused)11914 flow_dv_query(struct rte_eth_dev *dev,
11915 	      struct rte_flow *flow __rte_unused,
11916 	      const struct rte_flow_action *actions __rte_unused,
11917 	      void *data __rte_unused,
11918 	      struct rte_flow_error *error __rte_unused)
11919 {
11920 	int ret = -EINVAL;
11921 
11922 	for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
11923 		switch (actions->type) {
11924 		case RTE_FLOW_ACTION_TYPE_VOID:
11925 			break;
11926 		case RTE_FLOW_ACTION_TYPE_COUNT:
11927 			ret = flow_dv_query_count(dev, flow, data, error);
11928 			break;
11929 		case RTE_FLOW_ACTION_TYPE_AGE:
11930 			ret = flow_dv_query_age(dev, flow, data, error);
11931 			break;
11932 		default:
11933 			return rte_flow_error_set(error, ENOTSUP,
11934 						  RTE_FLOW_ERROR_TYPE_ACTION,
11935 						  actions,
11936 						  "action not supported");
11937 		}
11938 	}
11939 	return ret;
11940 }
11941 
11942 /**
11943  * Destroy the meter table set.
11944  * Lock free, (mutex should be acquired by caller).
11945  *
11946  * @param[in] dev
11947  *   Pointer to Ethernet device.
11948  * @param[in] tbl
11949  *   Pointer to the meter table set.
11950  *
11951  * @return
11952  *   Always 0.
11953  */
11954 static int
flow_dv_destroy_mtr_tbl(struct rte_eth_dev * dev,struct mlx5_meter_domains_infos * tbl)11955 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
11956 			struct mlx5_meter_domains_infos *tbl)
11957 {
11958 	struct mlx5_priv *priv = dev->data->dev_private;
11959 	struct mlx5_meter_domains_infos *mtd =
11960 				(struct mlx5_meter_domains_infos *)tbl;
11961 
11962 	if (!mtd || !priv->config.dv_flow_en)
11963 		return 0;
11964 	if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
11965 		claim_zero(mlx5_flow_os_destroy_flow
11966 			   (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
11967 	if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
11968 		claim_zero(mlx5_flow_os_destroy_flow
11969 			   (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
11970 	if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
11971 		claim_zero(mlx5_flow_os_destroy_flow
11972 			   (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
11973 	if (mtd->egress.color_matcher)
11974 		claim_zero(mlx5_flow_os_destroy_flow_matcher
11975 			   (mtd->egress.color_matcher));
11976 	if (mtd->egress.any_matcher)
11977 		claim_zero(mlx5_flow_os_destroy_flow_matcher
11978 			   (mtd->egress.any_matcher));
11979 	if (mtd->egress.tbl)
11980 		flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl);
11981 	if (mtd->egress.sfx_tbl)
11982 		flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl);
11983 	if (mtd->ingress.color_matcher)
11984 		claim_zero(mlx5_flow_os_destroy_flow_matcher
11985 			   (mtd->ingress.color_matcher));
11986 	if (mtd->ingress.any_matcher)
11987 		claim_zero(mlx5_flow_os_destroy_flow_matcher
11988 			   (mtd->ingress.any_matcher));
11989 	if (mtd->ingress.tbl)
11990 		flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl);
11991 	if (mtd->ingress.sfx_tbl)
11992 		flow_dv_tbl_resource_release(MLX5_SH(dev),
11993 					     mtd->ingress.sfx_tbl);
11994 	if (mtd->transfer.color_matcher)
11995 		claim_zero(mlx5_flow_os_destroy_flow_matcher
11996 			   (mtd->transfer.color_matcher));
11997 	if (mtd->transfer.any_matcher)
11998 		claim_zero(mlx5_flow_os_destroy_flow_matcher
11999 			   (mtd->transfer.any_matcher));
12000 	if (mtd->transfer.tbl)
12001 		flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl);
12002 	if (mtd->transfer.sfx_tbl)
12003 		flow_dv_tbl_resource_release(MLX5_SH(dev),
12004 					     mtd->transfer.sfx_tbl);
12005 	if (mtd->drop_actn)
12006 		claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
12007 	mlx5_free(mtd);
12008 	return 0;
12009 }
12010 
12011 /* Number of meter flow actions, count and jump or count and drop. */
12012 #define METER_ACTIONS 2
12013 
12014 /**
12015  * Create specify domain meter table and suffix table.
12016  *
12017  * @param[in] dev
12018  *   Pointer to Ethernet device.
12019  * @param[in,out] mtb
12020  *   Pointer to DV meter table set.
12021  * @param[in] egress
12022  *   Table attribute.
12023  * @param[in] transfer
12024  *   Table attribute.
12025  * @param[in] color_reg_c_idx
12026  *   Reg C index for color match.
12027  *
12028  * @return
12029  *   0 on success, -1 otherwise and rte_errno is set.
12030  */
12031 static int
flow_dv_prepare_mtr_tables(struct rte_eth_dev * dev,struct mlx5_meter_domains_infos * mtb,uint8_t egress,uint8_t transfer,uint32_t color_reg_c_idx)12032 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
12033 			   struct mlx5_meter_domains_infos *mtb,
12034 			   uint8_t egress, uint8_t transfer,
12035 			   uint32_t color_reg_c_idx)
12036 {
12037 	struct mlx5_priv *priv = dev->data->dev_private;
12038 	struct mlx5_dev_ctx_shared *sh = priv->sh;
12039 	struct mlx5_flow_dv_match_params mask = {
12040 		.size = sizeof(mask.buf),
12041 	};
12042 	struct mlx5_flow_dv_match_params value = {
12043 		.size = sizeof(value.buf),
12044 	};
12045 	struct mlx5dv_flow_matcher_attr dv_attr = {
12046 		.type = IBV_FLOW_ATTR_NORMAL,
12047 		.priority = 0,
12048 		.match_criteria_enable = 0,
12049 		.match_mask = (void *)&mask,
12050 	};
12051 	void *actions[METER_ACTIONS];
12052 	struct mlx5_meter_domain_info *dtb;
12053 	struct rte_flow_error error;
12054 	int i = 0;
12055 	int ret;
12056 
12057 	if (transfer)
12058 		dtb = &mtb->transfer;
12059 	else if (egress)
12060 		dtb = &mtb->egress;
12061 	else
12062 		dtb = &mtb->ingress;
12063 	/* Create the meter table with METER level. */
12064 	dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
12065 					    egress, transfer, false, NULL, 0,
12066 					    0, &error);
12067 	if (!dtb->tbl) {
12068 		DRV_LOG(ERR, "Failed to create meter policer table.");
12069 		return -1;
12070 	}
12071 	/* Create the meter suffix table with SUFFIX level. */
12072 	dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
12073 					    MLX5_FLOW_TABLE_LEVEL_SUFFIX,
12074 					    egress, transfer, false, NULL, 0,
12075 					    0, &error);
12076 	if (!dtb->sfx_tbl) {
12077 		DRV_LOG(ERR, "Failed to create meter suffix table.");
12078 		return -1;
12079 	}
12080 	/* Create matchers, Any and Color. */
12081 	dv_attr.priority = 3;
12082 	dv_attr.match_criteria_enable = 0;
12083 	ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
12084 					       &dtb->any_matcher);
12085 	if (ret) {
12086 		DRV_LOG(ERR, "Failed to create meter"
12087 			     " policer default matcher.");
12088 		goto error_exit;
12089 	}
12090 	dv_attr.priority = 0;
12091 	dv_attr.match_criteria_enable =
12092 				1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
12093 	flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
12094 			       rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
12095 	ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
12096 					       &dtb->color_matcher);
12097 	if (ret) {
12098 		DRV_LOG(ERR, "Failed to create meter policer color matcher.");
12099 		goto error_exit;
12100 	}
12101 	if (mtb->count_actns[RTE_MTR_DROPPED])
12102 		actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
12103 	actions[i++] = mtb->drop_actn;
12104 	/* Default rule: lowest priority, match any, actions: drop. */
12105 	ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
12106 				       actions,
12107 				       &dtb->policer_rules[RTE_MTR_DROPPED]);
12108 	if (ret) {
12109 		DRV_LOG(ERR, "Failed to create meter policer drop rule.");
12110 		goto error_exit;
12111 	}
12112 	return 0;
12113 error_exit:
12114 	return -1;
12115 }
12116 
12117 /**
12118  * Create the needed meter and suffix tables.
12119  * Lock free, (mutex should be acquired by caller).
12120  *
12121  * @param[in] dev
12122  *   Pointer to Ethernet device.
12123  * @param[in] fm
12124  *   Pointer to the flow meter.
12125  *
12126  * @return
12127  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
12128  */
12129 static struct mlx5_meter_domains_infos *
flow_dv_create_mtr_tbl(struct rte_eth_dev * dev,const struct mlx5_flow_meter * fm)12130 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
12131 		       const struct mlx5_flow_meter *fm)
12132 {
12133 	struct mlx5_priv *priv = dev->data->dev_private;
12134 	struct mlx5_meter_domains_infos *mtb;
12135 	int ret;
12136 	int i;
12137 
12138 	if (!priv->mtr_en) {
12139 		rte_errno = ENOTSUP;
12140 		return NULL;
12141 	}
12142 	mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
12143 	if (!mtb) {
12144 		DRV_LOG(ERR, "Failed to allocate memory for meter.");
12145 		return NULL;
12146 	}
12147 	/* Create meter count actions */
12148 	for (i = 0; i <= RTE_MTR_DROPPED; i++) {
12149 		struct mlx5_flow_counter *cnt;
12150 		if (!fm->policer_stats.cnt[i])
12151 			continue;
12152 		cnt = flow_dv_counter_get_by_idx(dev,
12153 		      fm->policer_stats.cnt[i], NULL);
12154 		mtb->count_actns[i] = cnt->action;
12155 	}
12156 	/* Create drop action. */
12157 	ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
12158 	if (ret) {
12159 		DRV_LOG(ERR, "Failed to create drop action.");
12160 		goto error_exit;
12161 	}
12162 	/* Egress meter table. */
12163 	ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
12164 	if (ret) {
12165 		DRV_LOG(ERR, "Failed to prepare egress meter table.");
12166 		goto error_exit;
12167 	}
12168 	/* Ingress meter table. */
12169 	ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
12170 	if (ret) {
12171 		DRV_LOG(ERR, "Failed to prepare ingress meter table.");
12172 		goto error_exit;
12173 	}
12174 	/* FDB meter table. */
12175 	if (priv->config.dv_esw_en) {
12176 		ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
12177 						 priv->mtr_color_reg);
12178 		if (ret) {
12179 			DRV_LOG(ERR, "Failed to prepare fdb meter table.");
12180 			goto error_exit;
12181 		}
12182 	}
12183 	return mtb;
12184 error_exit:
12185 	flow_dv_destroy_mtr_tbl(dev, mtb);
12186 	return NULL;
12187 }
12188 
12189 /**
12190  * Destroy domain policer rule.
12191  *
12192  * @param[in] dt
12193  *   Pointer to domain table.
12194  */
12195 static void
flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info * dt)12196 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
12197 {
12198 	int i;
12199 
12200 	for (i = 0; i < RTE_MTR_DROPPED; i++) {
12201 		if (dt->policer_rules[i]) {
12202 			claim_zero(mlx5_flow_os_destroy_flow
12203 				   (dt->policer_rules[i]));
12204 			dt->policer_rules[i] = NULL;
12205 		}
12206 	}
12207 	if (dt->jump_actn) {
12208 		claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
12209 		dt->jump_actn = NULL;
12210 	}
12211 }
12212 
12213 /**
12214  * Destroy policer rules.
12215  *
12216  * @param[in] dev
12217  *   Pointer to Ethernet device.
12218  * @param[in] fm
12219  *   Pointer to flow meter structure.
12220  * @param[in] attr
12221  *   Pointer to flow attributes.
12222  *
12223  * @return
12224  *   Always 0.
12225  */
12226 static int
flow_dv_destroy_policer_rules(struct rte_eth_dev * dev __rte_unused,const struct mlx5_flow_meter * fm,const struct rte_flow_attr * attr)12227 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
12228 			      const struct mlx5_flow_meter *fm,
12229 			      const struct rte_flow_attr *attr)
12230 {
12231 	struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
12232 
12233 	if (!mtb)
12234 		return 0;
12235 	if (attr->egress)
12236 		flow_dv_destroy_domain_policer_rule(&mtb->egress);
12237 	if (attr->ingress)
12238 		flow_dv_destroy_domain_policer_rule(&mtb->ingress);
12239 	if (attr->transfer)
12240 		flow_dv_destroy_domain_policer_rule(&mtb->transfer);
12241 	return 0;
12242 }
12243 
12244 /**
12245  * Create specify domain meter policer rule.
12246  *
12247  * @param[in] fm
12248  *   Pointer to flow meter structure.
12249  * @param[in] mtb
12250  *   Pointer to DV meter table set.
12251  * @param[in] mtr_reg_c
12252  *   Color match REG_C.
12253  *
12254  * @return
12255  *   0 on success, -1 otherwise.
12256  */
12257 static int
flow_dv_create_policer_forward_rule(struct mlx5_flow_meter * fm,struct mlx5_meter_domain_info * dtb,uint8_t mtr_reg_c)12258 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
12259 				    struct mlx5_meter_domain_info *dtb,
12260 				    uint8_t mtr_reg_c)
12261 {
12262 	struct mlx5_flow_dv_match_params matcher = {
12263 		.size = sizeof(matcher.buf),
12264 	};
12265 	struct mlx5_flow_dv_match_params value = {
12266 		.size = sizeof(value.buf),
12267 	};
12268 	struct mlx5_meter_domains_infos *mtb = fm->mfts;
12269 	void *actions[METER_ACTIONS];
12270 	int i;
12271 	int ret = 0;
12272 
12273 	/* Create jump action. */
12274 	if (!dtb->jump_actn)
12275 		ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
12276 				(dtb->sfx_tbl->obj, &dtb->jump_actn);
12277 	if (ret) {
12278 		DRV_LOG(ERR, "Failed to create policer jump action.");
12279 		goto error;
12280 	}
12281 	for (i = 0; i < RTE_MTR_DROPPED; i++) {
12282 		int j = 0;
12283 
12284 		flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
12285 				       rte_col_2_mlx5_col(i), UINT8_MAX);
12286 		if (mtb->count_actns[i])
12287 			actions[j++] = mtb->count_actns[i];
12288 		if (fm->action[i] == MTR_POLICER_ACTION_DROP)
12289 			actions[j++] = mtb->drop_actn;
12290 		else
12291 			actions[j++] = dtb->jump_actn;
12292 		ret = mlx5_flow_os_create_flow(dtb->color_matcher,
12293 					       (void *)&value, j, actions,
12294 					       &dtb->policer_rules[i]);
12295 		if (ret) {
12296 			DRV_LOG(ERR, "Failed to create policer rule.");
12297 			goto error;
12298 		}
12299 	}
12300 	return 0;
12301 error:
12302 	rte_errno = errno;
12303 	return -1;
12304 }
12305 
12306 /**
12307  * Create policer rules.
12308  *
12309  * @param[in] dev
12310  *   Pointer to Ethernet device.
12311  * @param[in] fm
12312  *   Pointer to flow meter structure.
12313  * @param[in] attr
12314  *   Pointer to flow attributes.
12315  *
12316  * @return
12317  *   0 on success, -1 otherwise.
12318  */
12319 static int
flow_dv_create_policer_rules(struct rte_eth_dev * dev,struct mlx5_flow_meter * fm,const struct rte_flow_attr * attr)12320 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
12321 			     struct mlx5_flow_meter *fm,
12322 			     const struct rte_flow_attr *attr)
12323 {
12324 	struct mlx5_priv *priv = dev->data->dev_private;
12325 	struct mlx5_meter_domains_infos *mtb = fm->mfts;
12326 	int ret;
12327 
12328 	if (attr->egress) {
12329 		ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
12330 						priv->mtr_color_reg);
12331 		if (ret) {
12332 			DRV_LOG(ERR, "Failed to create egress policer.");
12333 			goto error;
12334 		}
12335 	}
12336 	if (attr->ingress) {
12337 		ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
12338 						priv->mtr_color_reg);
12339 		if (ret) {
12340 			DRV_LOG(ERR, "Failed to create ingress policer.");
12341 			goto error;
12342 		}
12343 	}
12344 	if (attr->transfer) {
12345 		ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
12346 						priv->mtr_color_reg);
12347 		if (ret) {
12348 			DRV_LOG(ERR, "Failed to create transfer policer.");
12349 			goto error;
12350 		}
12351 	}
12352 	return 0;
12353 error:
12354 	flow_dv_destroy_policer_rules(dev, fm, attr);
12355 	return -1;
12356 }
12357 
12358 /**
12359  * Validate the batch counter support in root table.
12360  *
12361  * Create a simple flow with invalid counter and drop action on root table to
12362  * validate if batch counter with offset on root table is supported or not.
12363  *
12364  * @param[in] dev
12365  *   Pointer to rte_eth_dev structure.
12366  *
12367  * @return
12368  *   0 on success, a negative errno value otherwise and rte_errno is set.
12369  */
12370 int
mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev * dev)12371 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
12372 {
12373 	struct mlx5_priv *priv = dev->data->dev_private;
12374 	struct mlx5_dev_ctx_shared *sh = priv->sh;
12375 	struct mlx5_flow_dv_match_params mask = {
12376 		.size = sizeof(mask.buf),
12377 	};
12378 	struct mlx5_flow_dv_match_params value = {
12379 		.size = sizeof(value.buf),
12380 	};
12381 	struct mlx5dv_flow_matcher_attr dv_attr = {
12382 		.type = IBV_FLOW_ATTR_NORMAL,
12383 		.priority = 0,
12384 		.match_criteria_enable = 0,
12385 		.match_mask = (void *)&mask,
12386 	};
12387 	void *actions[2] = { 0 };
12388 	struct mlx5_flow_tbl_resource *tbl = NULL;
12389 	struct mlx5_devx_obj *dcs = NULL;
12390 	void *matcher = NULL;
12391 	void *flow = NULL;
12392 	int ret = -1;
12393 
12394 	tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
12395 	if (!tbl)
12396 		goto err;
12397 	dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
12398 	if (!dcs)
12399 		goto err;
12400 	ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
12401 						    &actions[0]);
12402 	if (ret)
12403 		goto err;
12404 	actions[1] = priv->drop_queue.hrxq->action;
12405 	dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
12406 	ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
12407 					       &matcher);
12408 	if (ret)
12409 		goto err;
12410 	ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
12411 				       actions, &flow);
12412 err:
12413 	/*
12414 	 * If batch counter with offset is not supported, the driver will not
12415 	 * validate the invalid offset value, flow create should success.
12416 	 * In this case, it means batch counter is not supported in root table.
12417 	 *
12418 	 * Otherwise, if flow create is failed, counter offset is supported.
12419 	 */
12420 	if (flow) {
12421 		DRV_LOG(INFO, "Batch counter is not supported in root "
12422 			      "table. Switch to fallback mode.");
12423 		rte_errno = ENOTSUP;
12424 		ret = -rte_errno;
12425 		claim_zero(mlx5_flow_os_destroy_flow(flow));
12426 	} else {
12427 		/* Check matcher to make sure validate fail at flow create. */
12428 		if (!matcher || (matcher && errno != EINVAL))
12429 			DRV_LOG(ERR, "Unexpected error in counter offset "
12430 				     "support detection");
12431 		ret = 0;
12432 	}
12433 	if (actions[0])
12434 		claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
12435 	if (matcher)
12436 		claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
12437 	if (tbl)
12438 		flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12439 	if (dcs)
12440 		claim_zero(mlx5_devx_cmd_destroy(dcs));
12441 	return ret;
12442 }
12443 
12444 /**
12445  * Query a devx counter.
12446  *
12447  * @param[in] dev
12448  *   Pointer to the Ethernet device structure.
12449  * @param[in] cnt
12450  *   Index to the flow counter.
12451  * @param[in] clear
12452  *   Set to clear the counter statistics.
12453  * @param[out] pkts
12454  *   The statistics value of packets.
12455  * @param[out] bytes
12456  *   The statistics value of bytes.
12457  *
12458  * @return
12459  *   0 on success, otherwise return -1.
12460  */
12461 static int
flow_dv_counter_query(struct rte_eth_dev * dev,uint32_t counter,bool clear,uint64_t * pkts,uint64_t * bytes)12462 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
12463 		      uint64_t *pkts, uint64_t *bytes)
12464 {
12465 	struct mlx5_priv *priv = dev->data->dev_private;
12466 	struct mlx5_flow_counter *cnt;
12467 	uint64_t inn_pkts, inn_bytes;
12468 	int ret;
12469 
12470 	if (!priv->config.devx)
12471 		return -1;
12472 
12473 	ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
12474 	if (ret)
12475 		return -1;
12476 	cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
12477 	*pkts = inn_pkts - cnt->hits;
12478 	*bytes = inn_bytes - cnt->bytes;
12479 	if (clear) {
12480 		cnt->hits = inn_pkts;
12481 		cnt->bytes = inn_bytes;
12482 	}
12483 	return 0;
12484 }
12485 
12486 /**
12487  * Get aged-out flows.
12488  *
12489  * @param[in] dev
12490  *   Pointer to the Ethernet device structure.
12491  * @param[in] context
12492  *   The address of an array of pointers to the aged-out flows contexts.
12493  * @param[in] nb_contexts
12494  *   The length of context array pointers.
12495  * @param[out] error
12496  *   Perform verbose error reporting if not NULL. Initialized in case of
12497  *   error only.
12498  *
12499  * @return
12500  *   how many contexts get in success, otherwise negative errno value.
12501  *   if nb_contexts is 0, return the amount of all aged contexts.
12502  *   if nb_contexts is not 0 , return the amount of aged flows reported
12503  *   in the context array.
12504  * @note: only stub for now
12505  */
12506 static int
flow_get_aged_flows(struct rte_eth_dev * dev,void ** context,uint32_t nb_contexts,struct rte_flow_error * error)12507 flow_get_aged_flows(struct rte_eth_dev *dev,
12508 		    void **context,
12509 		    uint32_t nb_contexts,
12510 		    struct rte_flow_error *error)
12511 {
12512 	struct mlx5_priv *priv = dev->data->dev_private;
12513 	struct mlx5_age_info *age_info;
12514 	struct mlx5_age_param *age_param;
12515 	struct mlx5_flow_counter *counter;
12516 	struct mlx5_aso_age_action *act;
12517 	int nb_flows = 0;
12518 
12519 	if (nb_contexts && !context)
12520 		return rte_flow_error_set(error, EINVAL,
12521 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12522 					  NULL, "empty context");
12523 	age_info = GET_PORT_AGE_INFO(priv);
12524 	rte_spinlock_lock(&age_info->aged_sl);
12525 	LIST_FOREACH(act, &age_info->aged_aso, next) {
12526 		nb_flows++;
12527 		if (nb_contexts) {
12528 			context[nb_flows - 1] =
12529 						act->age_params.context;
12530 			if (!(--nb_contexts))
12531 				break;
12532 		}
12533 	}
12534 	TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
12535 		nb_flows++;
12536 		if (nb_contexts) {
12537 			age_param = MLX5_CNT_TO_AGE(counter);
12538 			context[nb_flows - 1] = age_param->context;
12539 			if (!(--nb_contexts))
12540 				break;
12541 		}
12542 	}
12543 	rte_spinlock_unlock(&age_info->aged_sl);
12544 	MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
12545 	return nb_flows;
12546 }
12547 
12548 /*
12549  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
12550  */
12551 static uint32_t
flow_dv_counter_allocate(struct rte_eth_dev * dev)12552 flow_dv_counter_allocate(struct rte_eth_dev *dev)
12553 {
12554 	return flow_dv_counter_alloc(dev, 0);
12555 }
12556 
12557 /**
12558  * Validate shared action.
12559  * Dispatcher for action type specific validation.
12560  *
12561  * @param[in] dev
12562  *   Pointer to the Ethernet device structure.
12563  * @param[in] conf
12564  *   Shared action configuration.
12565  * @param[in] action
12566  *   The shared action object to validate.
12567  * @param[out] error
12568  *   Perform verbose error reporting if not NULL. Initialized in case of
12569  *   error only.
12570  *
12571  * @return
12572  *   0 on success, otherwise negative errno value.
12573  */
12574 static int
flow_dv_action_validate(struct rte_eth_dev * dev,const struct rte_flow_shared_action_conf * conf,const struct rte_flow_action * action,struct rte_flow_error * err)12575 flow_dv_action_validate(struct rte_eth_dev *dev,
12576 			const struct rte_flow_shared_action_conf *conf,
12577 			const struct rte_flow_action *action,
12578 			struct rte_flow_error *err)
12579 {
12580 	struct mlx5_priv *priv = dev->data->dev_private;
12581 
12582 	RTE_SET_USED(conf);
12583 	switch (action->type) {
12584 	case RTE_FLOW_ACTION_TYPE_RSS:
12585 		return mlx5_validate_action_rss(dev, action, err);
12586 	case RTE_FLOW_ACTION_TYPE_AGE:
12587 		if (!priv->sh->aso_age_mng)
12588 			return rte_flow_error_set(err, ENOTSUP,
12589 						RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12590 						NULL,
12591 					     "shared age action not supported");
12592 		return flow_dv_validate_action_age(0, action, dev, err);
12593 	default:
12594 		return rte_flow_error_set(err, ENOTSUP,
12595 					  RTE_FLOW_ERROR_TYPE_ACTION,
12596 					  NULL,
12597 					  "action type not supported");
12598 	}
12599 }
12600 
12601 static int
flow_dv_sync_domain(struct rte_eth_dev * dev,uint32_t domains,uint32_t flags)12602 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
12603 {
12604 	struct mlx5_priv *priv = dev->data->dev_private;
12605 	int ret = 0;
12606 
12607 	if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
12608 		ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain,
12609 						flags);
12610 		if (ret != 0)
12611 			return ret;
12612 	}
12613 	if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
12614 		ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags);
12615 		if (ret != 0)
12616 			return ret;
12617 	}
12618 	if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
12619 		ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags);
12620 		if (ret != 0)
12621 			return ret;
12622 	}
12623 	return 0;
12624 }
12625 
12626 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
12627 	.validate = flow_dv_validate,
12628 	.prepare = flow_dv_prepare,
12629 	.translate = flow_dv_translate,
12630 	.apply = flow_dv_apply,
12631 	.remove = flow_dv_remove,
12632 	.destroy = flow_dv_destroy,
12633 	.query = flow_dv_query,
12634 	.create_mtr_tbls = flow_dv_create_mtr_tbl,
12635 	.destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
12636 	.create_policer_rules = flow_dv_create_policer_rules,
12637 	.destroy_policer_rules = flow_dv_destroy_policer_rules,
12638 	.counter_alloc = flow_dv_counter_allocate,
12639 	.counter_free = flow_dv_counter_free,
12640 	.counter_query = flow_dv_counter_query,
12641 	.get_aged_flows = flow_get_aged_flows,
12642 	.action_validate = flow_dv_action_validate,
12643 	.action_create = flow_dv_action_create,
12644 	.action_destroy = flow_dv_action_destroy,
12645 	.action_update = flow_dv_action_update,
12646 	.action_query = flow_dv_action_query,
12647 	.sync_domain = flow_dv_sync_domain,
12648 };
12649 
12650 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
12651 
12652