1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2021 NVIDIA Corporation & Affiliates
3 */
4 #include <rte_malloc.h>
5 #include <mlx5_devx_cmds.h>
6 #include <mlx5_malloc.h>
7 #include "mlx5.h"
8 #include "mlx5_flow.h"
9
10 static_assert(sizeof(uint32_t) * CHAR_BIT >= MLX5_PORT_FLEX_ITEM_NUM,
11 "Flex item maximal number exceeds uint32_t bit width");
12
13 /**
14 * Routine called once on port initialization to init flex item
15 * related infrastructure initialization
16 *
17 * @param dev
18 * Ethernet device to perform flex item initialization
19 *
20 * @return
21 * 0 on success, a negative errno value otherwise and rte_errno is set.
22 */
23 int
mlx5_flex_item_port_init(struct rte_eth_dev * dev)24 mlx5_flex_item_port_init(struct rte_eth_dev *dev)
25 {
26 struct mlx5_priv *priv = dev->data->dev_private;
27
28 rte_spinlock_init(&priv->flex_item_sl);
29 MLX5_ASSERT(!priv->flex_item_map);
30 return 0;
31 }
32
33 /**
34 * Routine called once on port close to perform flex item
35 * related infrastructure cleanup.
36 *
37 * @param dev
38 * Ethernet device to perform cleanup
39 */
40 void
mlx5_flex_item_port_cleanup(struct rte_eth_dev * dev)41 mlx5_flex_item_port_cleanup(struct rte_eth_dev *dev)
42 {
43 struct mlx5_priv *priv = dev->data->dev_private;
44 uint32_t i;
45
46 for (i = 0; i < MLX5_PORT_FLEX_ITEM_NUM && priv->flex_item_map ; i++) {
47 if (priv->flex_item_map & (1 << i)) {
48 struct mlx5_flex_item *flex = &priv->flex_item[i];
49
50 claim_zero(mlx5_list_unregister
51 (priv->sh->flex_parsers_dv,
52 &flex->devx_fp->entry));
53 flex->devx_fp = NULL;
54 flex->refcnt = 0;
55 priv->flex_item_map &= ~(1 << i);
56 }
57 }
58 }
59
60 static int
mlx5_flex_index(struct mlx5_priv * priv,struct mlx5_flex_item * item)61 mlx5_flex_index(struct mlx5_priv *priv, struct mlx5_flex_item *item)
62 {
63 uintptr_t start = (uintptr_t)&priv->flex_item[0];
64 uintptr_t entry = (uintptr_t)item;
65 uintptr_t idx = (entry - start) / sizeof(struct mlx5_flex_item);
66
67 if (entry < start ||
68 idx >= MLX5_PORT_FLEX_ITEM_NUM ||
69 (entry - start) % sizeof(struct mlx5_flex_item) ||
70 !(priv->flex_item_map & (1u << idx)))
71 return -1;
72 return (int)idx;
73 }
74
75 static struct mlx5_flex_item *
mlx5_flex_alloc(struct mlx5_priv * priv)76 mlx5_flex_alloc(struct mlx5_priv *priv)
77 {
78 struct mlx5_flex_item *item = NULL;
79
80 rte_spinlock_lock(&priv->flex_item_sl);
81 if (~priv->flex_item_map) {
82 uint32_t idx = rte_bsf32(~priv->flex_item_map);
83
84 if (idx < MLX5_PORT_FLEX_ITEM_NUM) {
85 item = &priv->flex_item[idx];
86 MLX5_ASSERT(!item->refcnt);
87 MLX5_ASSERT(!item->devx_fp);
88 item->devx_fp = NULL;
89 __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
90 priv->flex_item_map |= 1u << idx;
91 }
92 }
93 rte_spinlock_unlock(&priv->flex_item_sl);
94 return item;
95 }
96
97 static void
mlx5_flex_free(struct mlx5_priv * priv,struct mlx5_flex_item * item)98 mlx5_flex_free(struct mlx5_priv *priv, struct mlx5_flex_item *item)
99 {
100 int idx = mlx5_flex_index(priv, item);
101
102 MLX5_ASSERT(idx >= 0 &&
103 idx < MLX5_PORT_FLEX_ITEM_NUM &&
104 (priv->flex_item_map & (1u << idx)));
105 if (idx >= 0) {
106 rte_spinlock_lock(&priv->flex_item_sl);
107 MLX5_ASSERT(!item->refcnt);
108 MLX5_ASSERT(!item->devx_fp);
109 item->devx_fp = NULL;
110 __atomic_store_n(&item->refcnt, 0, __ATOMIC_RELEASE);
111 priv->flex_item_map &= ~(1u << idx);
112 rte_spinlock_unlock(&priv->flex_item_sl);
113 }
114 }
115
116 static uint32_t
mlx5_flex_get_bitfield(const struct rte_flow_item_flex * item,uint32_t pos,uint32_t width,uint32_t shift)117 mlx5_flex_get_bitfield(const struct rte_flow_item_flex *item,
118 uint32_t pos, uint32_t width, uint32_t shift)
119 {
120 const uint8_t *ptr = item->pattern + pos / CHAR_BIT;
121 uint32_t val, vbits;
122
123 /* Proceed the bitfield start byte. */
124 MLX5_ASSERT(width <= sizeof(uint32_t) * CHAR_BIT && width);
125 MLX5_ASSERT(width + shift <= sizeof(uint32_t) * CHAR_BIT);
126 if (item->length <= pos / CHAR_BIT)
127 return 0;
128 val = *ptr++ >> (pos % CHAR_BIT);
129 vbits = CHAR_BIT - pos % CHAR_BIT;
130 pos = (pos + vbits) / CHAR_BIT;
131 vbits = RTE_MIN(vbits, width);
132 val &= RTE_BIT32(vbits) - 1;
133 while (vbits < width && pos < item->length) {
134 uint32_t part = RTE_MIN(width - vbits, (uint32_t)CHAR_BIT);
135 uint32_t tmp = *ptr++;
136
137 pos++;
138 tmp &= RTE_BIT32(part) - 1;
139 val |= tmp << vbits;
140 vbits += part;
141 }
142 return rte_bswap32(val <<= shift);
143 }
144
145 #define SET_FP_MATCH_SAMPLE_ID(x, def, msk, val, sid) \
146 do { \
147 uint32_t tmp, out = (def); \
148 tmp = MLX5_GET(fte_match_set_misc4, misc4_v, \
149 prog_sample_field_value_##x); \
150 tmp = (tmp & ~out) | (val); \
151 MLX5_SET(fte_match_set_misc4, misc4_v, \
152 prog_sample_field_value_##x, tmp); \
153 tmp = MLX5_GET(fte_match_set_misc4, misc4_m, \
154 prog_sample_field_value_##x); \
155 tmp = (tmp & ~out) | (msk); \
156 MLX5_SET(fte_match_set_misc4, misc4_m, \
157 prog_sample_field_value_##x, tmp); \
158 tmp = tmp ? (sid) : 0; \
159 MLX5_SET(fte_match_set_misc4, misc4_v, \
160 prog_sample_field_id_##x, tmp);\
161 MLX5_SET(fte_match_set_misc4, misc4_m, \
162 prog_sample_field_id_##x, tmp); \
163 } while (0)
164
165 __rte_always_inline static void
mlx5_flex_set_match_sample(void * misc4_m,void * misc4_v,uint32_t def,uint32_t mask,uint32_t value,uint32_t sample_id,uint32_t id)166 mlx5_flex_set_match_sample(void *misc4_m, void *misc4_v,
167 uint32_t def, uint32_t mask, uint32_t value,
168 uint32_t sample_id, uint32_t id)
169 {
170 switch (id) {
171 case 0:
172 SET_FP_MATCH_SAMPLE_ID(0, def, mask, value, sample_id);
173 break;
174 case 1:
175 SET_FP_MATCH_SAMPLE_ID(1, def, mask, value, sample_id);
176 break;
177 case 2:
178 SET_FP_MATCH_SAMPLE_ID(2, def, mask, value, sample_id);
179 break;
180 case 3:
181 SET_FP_MATCH_SAMPLE_ID(3, def, mask, value, sample_id);
182 break;
183 case 4:
184 SET_FP_MATCH_SAMPLE_ID(4, def, mask, value, sample_id);
185 break;
186 case 5:
187 SET_FP_MATCH_SAMPLE_ID(5, def, mask, value, sample_id);
188 break;
189 case 6:
190 SET_FP_MATCH_SAMPLE_ID(6, def, mask, value, sample_id);
191 break;
192 case 7:
193 SET_FP_MATCH_SAMPLE_ID(7, def, mask, value, sample_id);
194 break;
195 default:
196 MLX5_ASSERT(false);
197 break;
198 }
199 #undef SET_FP_MATCH_SAMPLE_ID
200 }
201 /**
202 * Translate item pattern into matcher fields according to translation
203 * array.
204 *
205 * @param dev
206 * Ethernet device to translate flex item on.
207 * @param[in, out] matcher
208 * Flow matcher to configure
209 * @param[in, out] key
210 * Flow matcher value.
211 * @param[in] item
212 * Flow pattern to translate.
213 * @param[in] is_inner
214 * Inner Flex Item (follows after tunnel header).
215 *
216 * @return
217 * 0 on success, a negative errno value otherwise and rte_errno is set.
218 */
219 void
mlx5_flex_flow_translate_item(struct rte_eth_dev * dev,void * matcher,void * key,const struct rte_flow_item * item,bool is_inner)220 mlx5_flex_flow_translate_item(struct rte_eth_dev *dev,
221 void *matcher, void *key,
222 const struct rte_flow_item *item,
223 bool is_inner)
224 {
225 const struct rte_flow_item_flex *spec, *mask;
226 void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
227 misc_parameters_4);
228 void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
229 struct mlx5_flex_item *tp;
230 uint32_t i, pos = 0;
231
232 RTE_SET_USED(dev);
233 MLX5_ASSERT(item->spec && item->mask);
234 spec = item->spec;
235 mask = item->mask;
236 tp = (struct mlx5_flex_item *)spec->handle;
237 MLX5_ASSERT(mlx5_flex_index(dev->data->dev_private, tp) >= 0);
238 for (i = 0; i < tp->mapnum; i++) {
239 struct mlx5_flex_pattern_field *map = tp->map + i;
240 uint32_t id = map->reg_id;
241 uint32_t def = (RTE_BIT64(map->width) - 1) << map->shift;
242 uint32_t val, msk;
243
244 /* Skip placeholders for DUMMY fields. */
245 if (id == MLX5_INVALID_SAMPLE_REG_ID) {
246 pos += map->width;
247 continue;
248 }
249 val = mlx5_flex_get_bitfield(spec, pos, map->width, map->shift);
250 msk = mlx5_flex_get_bitfield(mask, pos, map->width, map->shift);
251 MLX5_ASSERT(map->width);
252 MLX5_ASSERT(id < tp->devx_fp->num_samples);
253 if (tp->tunnel_mode == FLEX_TUNNEL_MODE_MULTI && is_inner) {
254 uint32_t num_samples = tp->devx_fp->num_samples / 2;
255
256 MLX5_ASSERT(tp->devx_fp->num_samples % 2 == 0);
257 MLX5_ASSERT(id < num_samples);
258 id += num_samples;
259 }
260 mlx5_flex_set_match_sample(misc4_m, misc4_v,
261 def, msk & def, val & msk & def,
262 tp->devx_fp->sample_ids[id], id);
263 pos += map->width;
264 }
265 }
266
267 /**
268 * Convert flex item handle (from the RTE flow) to flex item index on port.
269 * Optionally can increment flex item object reference count.
270 *
271 * @param dev
272 * Ethernet device to acquire flex item on.
273 * @param[in] handle
274 * Flow item handle from item spec.
275 * @param[in] acquire
276 * If set - increment reference counter.
277 *
278 * @return
279 * >=0 - index on success, a negative errno value otherwise
280 * and rte_errno is set.
281 */
282 int
mlx5_flex_acquire_index(struct rte_eth_dev * dev,struct rte_flow_item_flex_handle * handle,bool acquire)283 mlx5_flex_acquire_index(struct rte_eth_dev *dev,
284 struct rte_flow_item_flex_handle *handle,
285 bool acquire)
286 {
287 struct mlx5_priv *priv = dev->data->dev_private;
288 struct mlx5_flex_item *flex = (struct mlx5_flex_item *)handle;
289 int ret = mlx5_flex_index(priv, flex);
290
291 if (ret < 0) {
292 errno = -EINVAL;
293 rte_errno = EINVAL;
294 return ret;
295 }
296 if (acquire)
297 __atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
298 return ret;
299 }
300
301 /**
302 * Release flex item index on port - decrements reference counter by index.
303 *
304 * @param dev
305 * Ethernet device to acquire flex item on.
306 * @param[in] index
307 * Flow item index.
308 *
309 * @return
310 * 0 - on success, a negative errno value otherwise and rte_errno is set.
311 */
312 int
mlx5_flex_release_index(struct rte_eth_dev * dev,int index)313 mlx5_flex_release_index(struct rte_eth_dev *dev,
314 int index)
315 {
316 struct mlx5_priv *priv = dev->data->dev_private;
317 struct mlx5_flex_item *flex;
318
319 if (index >= MLX5_PORT_FLEX_ITEM_NUM ||
320 !(priv->flex_item_map & (1u << index))) {
321 errno = EINVAL;
322 rte_errno = -EINVAL;
323 return -EINVAL;
324 }
325 flex = priv->flex_item + index;
326 if (flex->refcnt <= 1) {
327 MLX5_ASSERT(false);
328 errno = EINVAL;
329 rte_errno = -EINVAL;
330 return -EINVAL;
331 }
332 __atomic_sub_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
333 return 0;
334 }
335
336 /*
337 * Calculate largest mask value for a given shift.
338 *
339 * shift mask
340 * ------- ---------------
341 * 0 b111100 0x3C
342 * 1 b111110 0x3E
343 * 2 b111111 0x3F
344 * 3 b011111 0x1F
345 * 4 b001111 0x0F
346 * 5 b000111 0x07
347 */
348 static uint8_t
mlx5_flex_hdr_len_mask(uint8_t shift,const struct mlx5_hca_flex_attr * attr)349 mlx5_flex_hdr_len_mask(uint8_t shift,
350 const struct mlx5_hca_flex_attr *attr)
351 {
352 uint32_t base_mask;
353 int diff = shift - MLX5_PARSE_GRAPH_NODE_HDR_LEN_SHIFT_DWORD;
354
355 base_mask = mlx5_hca_parse_graph_node_base_hdr_len_mask(attr);
356 return diff == 0 ? base_mask :
357 diff < 0 ? (base_mask << -diff) & base_mask : base_mask >> diff;
358 }
359
360 static int
mlx5_flex_translate_length(struct mlx5_hca_flex_attr * attr,const struct rte_flow_item_flex_conf * conf,struct mlx5_flex_parser_devx * devx,struct rte_flow_error * error)361 mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr,
362 const struct rte_flow_item_flex_conf *conf,
363 struct mlx5_flex_parser_devx *devx,
364 struct rte_flow_error *error)
365 {
366 const struct rte_flow_item_flex_field *field = &conf->next_header;
367 struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
368 uint32_t len_width, mask;
369
370 if (field->field_base % CHAR_BIT)
371 return rte_flow_error_set
372 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
373 "not byte aligned header length field");
374 switch (field->field_mode) {
375 case FIELD_MODE_DUMMY:
376 return rte_flow_error_set
377 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
378 "invalid header length field mode (DUMMY)");
379 case FIELD_MODE_FIXED:
380 if (!(attr->header_length_mode &
381 RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIXED)))
382 return rte_flow_error_set
383 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
384 "unsupported header length field mode (FIXED)");
385 if (field->field_size ||
386 field->offset_mask || field->offset_shift)
387 return rte_flow_error_set
388 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
389 "invalid fields for fixed mode");
390 if (field->field_base < 0)
391 return rte_flow_error_set
392 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
393 "negative header length field base (FIXED)");
394 node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIXED;
395 break;
396 case FIELD_MODE_OFFSET:
397 if (!(attr->header_length_mode &
398 RTE_BIT32(MLX5_GRAPH_NODE_LEN_FIELD)))
399 return rte_flow_error_set
400 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
401 "unsupported header length field mode (OFFSET)");
402 node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD;
403 if (field->offset_mask == 0 ||
404 !rte_is_power_of_2(field->offset_mask + 1))
405 return rte_flow_error_set
406 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
407 "invalid length field offset mask (OFFSET)");
408 len_width = rte_fls_u32(field->offset_mask);
409 if (len_width > attr->header_length_mask_width)
410 return rte_flow_error_set
411 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
412 "length field offset mask too wide (OFFSET)");
413 mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr);
414 if (mask < field->offset_mask)
415 return rte_flow_error_set
416 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
417 "length field shift too big (OFFSET)");
418 node->header_length_field_mask = RTE_MIN(mask,
419 field->offset_mask);
420 break;
421 case FIELD_MODE_BITMASK:
422 if (!(attr->header_length_mode &
423 RTE_BIT32(MLX5_GRAPH_NODE_LEN_BITMASK)))
424 return rte_flow_error_set
425 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
426 "unsupported header length field mode (BITMASK)");
427 if (attr->header_length_mask_width < field->field_size)
428 return rte_flow_error_set
429 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
430 "header length field width exceeds limit");
431 node->header_length_mode = MLX5_GRAPH_NODE_LEN_BITMASK;
432 mask = mlx5_flex_hdr_len_mask(field->offset_shift, attr);
433 if (mask < field->offset_mask)
434 return rte_flow_error_set
435 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
436 "length field shift too big (BITMASK)");
437 node->header_length_field_mask = RTE_MIN(mask,
438 field->offset_mask);
439 break;
440 default:
441 return rte_flow_error_set
442 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
443 "unknown header length field mode");
444 }
445 if (field->field_base / CHAR_BIT >= 0 &&
446 field->field_base / CHAR_BIT > attr->max_base_header_length)
447 return rte_flow_error_set
448 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
449 "header length field base exceeds limit");
450 node->header_length_base_value = field->field_base / CHAR_BIT;
451 if (field->field_mode == FIELD_MODE_OFFSET ||
452 field->field_mode == FIELD_MODE_BITMASK) {
453 if (field->offset_shift > 15 || field->offset_shift < 0)
454 return rte_flow_error_set
455 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
456 "header length field shift exceeds limit");
457 node->header_length_field_shift = field->offset_shift;
458 node->header_length_field_offset = field->offset_base;
459 }
460 return 0;
461 }
462
463 static int
mlx5_flex_translate_next(struct mlx5_hca_flex_attr * attr,const struct rte_flow_item_flex_conf * conf,struct mlx5_flex_parser_devx * devx,struct rte_flow_error * error)464 mlx5_flex_translate_next(struct mlx5_hca_flex_attr *attr,
465 const struct rte_flow_item_flex_conf *conf,
466 struct mlx5_flex_parser_devx *devx,
467 struct rte_flow_error *error)
468 {
469 const struct rte_flow_item_flex_field *field = &conf->next_protocol;
470 struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
471
472 switch (field->field_mode) {
473 case FIELD_MODE_DUMMY:
474 if (conf->nb_outputs)
475 return rte_flow_error_set
476 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
477 "next protocol field is required (DUMMY)");
478 return 0;
479 case FIELD_MODE_FIXED:
480 break;
481 case FIELD_MODE_OFFSET:
482 return rte_flow_error_set
483 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
484 "unsupported next protocol field mode (OFFSET)");
485 break;
486 case FIELD_MODE_BITMASK:
487 return rte_flow_error_set
488 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
489 "unsupported next protocol field mode (BITMASK)");
490 default:
491 return rte_flow_error_set
492 (error, EINVAL,
493 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
494 "unknown next protocol field mode");
495 }
496 MLX5_ASSERT(field->field_mode == FIELD_MODE_FIXED);
497 if (!conf->nb_outputs)
498 return rte_flow_error_set
499 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
500 "out link(s) is required if next field present");
501 if (attr->max_next_header_offset < field->field_base)
502 return rte_flow_error_set
503 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
504 "next protocol field base exceeds limit");
505 if (field->offset_shift)
506 return rte_flow_error_set
507 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
508 "unsupported next protocol field shift");
509 node->next_header_field_offset = field->field_base;
510 node->next_header_field_size = field->field_size;
511 return 0;
512 }
513
514 /* Helper structure to handle field bit intervals. */
515 struct mlx5_flex_field_cover {
516 uint16_t num;
517 int32_t start[MLX5_FLEX_ITEM_MAPPING_NUM];
518 int32_t end[MLX5_FLEX_ITEM_MAPPING_NUM];
519 uint8_t mapped[MLX5_FLEX_ITEM_MAPPING_NUM / CHAR_BIT + 1];
520 };
521
522 static void
mlx5_flex_insert_field(struct mlx5_flex_field_cover * cover,uint16_t num,int32_t start,int32_t end)523 mlx5_flex_insert_field(struct mlx5_flex_field_cover *cover,
524 uint16_t num, int32_t start, int32_t end)
525 {
526 MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
527 MLX5_ASSERT(num <= cover->num);
528 if (num < cover->num) {
529 memmove(&cover->start[num + 1], &cover->start[num],
530 (cover->num - num) * sizeof(int32_t));
531 memmove(&cover->end[num + 1], &cover->end[num],
532 (cover->num - num) * sizeof(int32_t));
533 }
534 cover->start[num] = start;
535 cover->end[num] = end;
536 cover->num++;
537 }
538
539 static void
mlx5_flex_merge_field(struct mlx5_flex_field_cover * cover,uint16_t num)540 mlx5_flex_merge_field(struct mlx5_flex_field_cover *cover, uint16_t num)
541 {
542 uint32_t i, del = 0;
543 int32_t end;
544
545 MLX5_ASSERT(num < MLX5_FLEX_ITEM_MAPPING_NUM);
546 MLX5_ASSERT(num < (cover->num - 1));
547 end = cover->end[num];
548 for (i = num + 1; i < cover->num; i++) {
549 if (end < cover->start[i])
550 break;
551 del++;
552 if (end <= cover->end[i]) {
553 cover->end[num] = cover->end[i];
554 break;
555 }
556 }
557 if (del) {
558 MLX5_ASSERT(del < (cover->num - 1u - num));
559 cover->num -= del;
560 MLX5_ASSERT(cover->num > num);
561 if ((cover->num - num) > 1) {
562 memmove(&cover->start[num + 1],
563 &cover->start[num + 1 + del],
564 (cover->num - num - 1) * sizeof(int32_t));
565 memmove(&cover->end[num + 1],
566 &cover->end[num + 1 + del],
567 (cover->num - num - 1) * sizeof(int32_t));
568 }
569 }
570 }
571
572 /*
573 * Validate the sample field and update interval array
574 * if parameters match with the 'match" field.
575 * Returns:
576 * < 0 - error
577 * == 0 - no match, interval array not updated
578 * > 0 - match, interval array updated
579 */
580 static int
mlx5_flex_cover_sample(struct mlx5_flex_field_cover * cover,struct rte_flow_item_flex_field * field,struct rte_flow_item_flex_field * match,struct mlx5_hca_flex_attr * attr,struct rte_flow_error * error)581 mlx5_flex_cover_sample(struct mlx5_flex_field_cover *cover,
582 struct rte_flow_item_flex_field *field,
583 struct rte_flow_item_flex_field *match,
584 struct mlx5_hca_flex_attr *attr,
585 struct rte_flow_error *error)
586 {
587 int32_t start, end;
588 uint32_t i;
589
590 switch (field->field_mode) {
591 case FIELD_MODE_DUMMY:
592 return 0;
593 case FIELD_MODE_FIXED:
594 if (!(attr->sample_offset_mode &
595 RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIXED)))
596 return rte_flow_error_set
597 (error, EINVAL,
598 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
599 "unsupported sample field mode (FIXED)");
600 if (field->offset_shift)
601 return rte_flow_error_set
602 (error, EINVAL,
603 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
604 "invalid sample field shift (FIXED");
605 if (field->field_base < 0)
606 return rte_flow_error_set
607 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
608 "invalid sample field base (FIXED)");
609 if (field->field_base / CHAR_BIT > attr->max_sample_base_offset)
610 return rte_flow_error_set
611 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
612 "sample field base exceeds limit (FIXED)");
613 break;
614 case FIELD_MODE_OFFSET:
615 if (!(attr->sample_offset_mode &
616 RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_FIELD)))
617 return rte_flow_error_set
618 (error, EINVAL,
619 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
620 "unsupported sample field mode (OFFSET)");
621 if (field->field_base / CHAR_BIT >= 0 &&
622 field->field_base / CHAR_BIT > attr->max_sample_base_offset)
623 return rte_flow_error_set
624 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
625 "sample field base exceeds limit");
626 break;
627 case FIELD_MODE_BITMASK:
628 if (!(attr->sample_offset_mode &
629 RTE_BIT32(MLX5_GRAPH_SAMPLE_OFFSET_BITMASK)))
630 return rte_flow_error_set
631 (error, EINVAL,
632 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
633 "unsupported sample field mode (BITMASK)");
634 if (field->field_base / CHAR_BIT >= 0 &&
635 field->field_base / CHAR_BIT > attr->max_sample_base_offset)
636 return rte_flow_error_set
637 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
638 "sample field base exceeds limit");
639 break;
640 default:
641 return rte_flow_error_set
642 (error, EINVAL,
643 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
644 "unknown data sample field mode");
645 }
646 if (!match) {
647 if (!field->field_size)
648 return rte_flow_error_set
649 (error, EINVAL,
650 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
651 "zero sample field width");
652 if (field->field_id)
653 DRV_LOG(DEBUG, "sample field id hint ignored");
654 } else {
655 if (field->field_mode != match->field_mode ||
656 field->offset_base | match->offset_base ||
657 field->offset_mask | match->offset_mask ||
658 field->offset_shift | match->offset_shift)
659 return 0;
660 }
661 start = field->field_base;
662 end = start + field->field_size;
663 /* Add the new or similar field to interval array. */
664 if (!cover->num) {
665 cover->start[cover->num] = start;
666 cover->end[cover->num] = end;
667 cover->num = 1;
668 return 1;
669 }
670 for (i = 0; i < cover->num; i++) {
671 if (start > cover->end[i]) {
672 if (i >= (cover->num - 1u)) {
673 mlx5_flex_insert_field(cover, cover->num,
674 start, end);
675 break;
676 }
677 continue;
678 }
679 if (end < cover->start[i]) {
680 mlx5_flex_insert_field(cover, i, start, end);
681 break;
682 }
683 if (start < cover->start[i])
684 cover->start[i] = start;
685 if (end > cover->end[i]) {
686 cover->end[i] = end;
687 if (i < (cover->num - 1u))
688 mlx5_flex_merge_field(cover, i);
689 }
690 break;
691 }
692 return 1;
693 }
694
695 static void
mlx5_flex_config_sample(struct mlx5_devx_match_sample_attr * na,struct rte_flow_item_flex_field * field,enum rte_flow_item_flex_tunnel_mode tunnel_mode)696 mlx5_flex_config_sample(struct mlx5_devx_match_sample_attr *na,
697 struct rte_flow_item_flex_field *field,
698 enum rte_flow_item_flex_tunnel_mode tunnel_mode)
699 {
700 memset(na, 0, sizeof(struct mlx5_devx_match_sample_attr));
701 na->flow_match_sample_en = 1;
702 switch (field->field_mode) {
703 case FIELD_MODE_FIXED:
704 na->flow_match_sample_offset_mode =
705 MLX5_GRAPH_SAMPLE_OFFSET_FIXED;
706 break;
707 case FIELD_MODE_OFFSET:
708 na->flow_match_sample_offset_mode =
709 MLX5_GRAPH_SAMPLE_OFFSET_FIELD;
710 na->flow_match_sample_field_offset = field->offset_base;
711 na->flow_match_sample_field_offset_mask = field->offset_mask;
712 na->flow_match_sample_field_offset_shift = field->offset_shift;
713 break;
714 case FIELD_MODE_BITMASK:
715 na->flow_match_sample_offset_mode =
716 MLX5_GRAPH_SAMPLE_OFFSET_BITMASK;
717 na->flow_match_sample_field_offset = field->offset_base;
718 na->flow_match_sample_field_offset_mask = field->offset_mask;
719 na->flow_match_sample_field_offset_shift = field->offset_shift;
720 break;
721 default:
722 MLX5_ASSERT(false);
723 break;
724 }
725 switch (tunnel_mode) {
726 case FLEX_TUNNEL_MODE_SINGLE:
727 /* Fallthrough */
728 case FLEX_TUNNEL_MODE_TUNNEL:
729 na->flow_match_sample_tunnel_mode =
730 MLX5_GRAPH_SAMPLE_TUNNEL_FIRST;
731 break;
732 case FLEX_TUNNEL_MODE_MULTI:
733 /* Fallthrough */
734 case FLEX_TUNNEL_MODE_OUTER:
735 na->flow_match_sample_tunnel_mode =
736 MLX5_GRAPH_SAMPLE_TUNNEL_OUTER;
737 break;
738 case FLEX_TUNNEL_MODE_INNER:
739 na->flow_match_sample_tunnel_mode =
740 MLX5_GRAPH_SAMPLE_TUNNEL_INNER;
741 break;
742 default:
743 MLX5_ASSERT(false);
744 break;
745 }
746 }
747
748 /* Map specified field to set/subset of allocated sample registers. */
749 static int
mlx5_flex_map_sample(struct rte_flow_item_flex_field * field,struct mlx5_flex_parser_devx * parser,struct mlx5_flex_item * item,struct rte_flow_error * error)750 mlx5_flex_map_sample(struct rte_flow_item_flex_field *field,
751 struct mlx5_flex_parser_devx *parser,
752 struct mlx5_flex_item *item,
753 struct rte_flow_error *error)
754 {
755 struct mlx5_devx_match_sample_attr node;
756 int32_t start = field->field_base;
757 int32_t end = start + field->field_size;
758 struct mlx5_flex_pattern_field *trans;
759 uint32_t i, done_bits = 0;
760
761 if (field->field_mode == FIELD_MODE_DUMMY) {
762 done_bits = field->field_size;
763 while (done_bits) {
764 uint32_t part = RTE_MIN(done_bits,
765 sizeof(uint32_t) * CHAR_BIT);
766 if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM)
767 return rte_flow_error_set
768 (error,
769 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
770 "too many flex item pattern translations");
771 trans = &item->map[item->mapnum];
772 trans->reg_id = MLX5_INVALID_SAMPLE_REG_ID;
773 trans->shift = 0;
774 trans->width = part;
775 item->mapnum++;
776 done_bits -= part;
777 }
778 return 0;
779 }
780 mlx5_flex_config_sample(&node, field, item->tunnel_mode);
781 for (i = 0; i < parser->num_samples; i++) {
782 struct mlx5_devx_match_sample_attr *sample =
783 &parser->devx_conf.sample[i];
784 int32_t reg_start, reg_end;
785 int32_t cov_start, cov_end;
786
787 MLX5_ASSERT(sample->flow_match_sample_en);
788 if (!sample->flow_match_sample_en)
789 break;
790 node.flow_match_sample_field_base_offset =
791 sample->flow_match_sample_field_base_offset;
792 if (memcmp(&node, sample, sizeof(node)))
793 continue;
794 reg_start = (int8_t)sample->flow_match_sample_field_base_offset;
795 reg_start *= CHAR_BIT;
796 reg_end = reg_start + 32;
797 if (end <= reg_start || start >= reg_end)
798 continue;
799 cov_start = RTE_MAX(reg_start, start);
800 cov_end = RTE_MIN(reg_end, end);
801 MLX5_ASSERT(cov_end > cov_start);
802 done_bits += cov_end - cov_start;
803 if (item->mapnum >= MLX5_FLEX_ITEM_MAPPING_NUM)
804 return rte_flow_error_set
805 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
806 "too many flex item pattern translations");
807 trans = &item->map[item->mapnum];
808 item->mapnum++;
809 trans->reg_id = i;
810 trans->shift = cov_start - reg_start;
811 trans->width = cov_end - cov_start;
812 }
813 if (done_bits != field->field_size) {
814 MLX5_ASSERT(false);
815 return rte_flow_error_set
816 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
817 "failed to map field to sample register");
818 }
819 return 0;
820 }
821
822 /* Allocate sample registers for the specified field type and interval array. */
823 static int
mlx5_flex_alloc_sample(struct mlx5_flex_field_cover * cover,struct mlx5_flex_parser_devx * parser,struct mlx5_flex_item * item,struct rte_flow_item_flex_field * field,struct mlx5_hca_flex_attr * attr,struct rte_flow_error * error)824 mlx5_flex_alloc_sample(struct mlx5_flex_field_cover *cover,
825 struct mlx5_flex_parser_devx *parser,
826 struct mlx5_flex_item *item,
827 struct rte_flow_item_flex_field *field,
828 struct mlx5_hca_flex_attr *attr,
829 struct rte_flow_error *error)
830 {
831 struct mlx5_devx_match_sample_attr node;
832 uint32_t idx = 0;
833
834 mlx5_flex_config_sample(&node, field, item->tunnel_mode);
835 while (idx < cover->num) {
836 int32_t start, end;
837
838 /*
839 * Sample base offsets are in bytes, should be aligned
840 * to 32-bit as required by firmware for samples.
841 */
842 start = RTE_ALIGN_FLOOR(cover->start[idx],
843 sizeof(uint32_t) * CHAR_BIT);
844 node.flow_match_sample_field_base_offset =
845 (start / CHAR_BIT) & 0xFF;
846 /* Allocate sample register. */
847 if (parser->num_samples >= MLX5_GRAPH_NODE_SAMPLE_NUM ||
848 parser->num_samples >= attr->max_num_sample ||
849 parser->num_samples >= attr->max_num_prog_sample)
850 return rte_flow_error_set
851 (error, EINVAL,
852 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
853 "no sample registers to handle all flex item fields");
854 parser->devx_conf.sample[parser->num_samples] = node;
855 parser->num_samples++;
856 /* Remove or update covered intervals. */
857 end = start + 32;
858 while (idx < cover->num) {
859 if (end >= cover->end[idx]) {
860 idx++;
861 continue;
862 }
863 if (end > cover->start[idx])
864 cover->start[idx] = end;
865 break;
866 }
867 }
868 return 0;
869 }
870
871 static int
mlx5_flex_translate_sample(struct mlx5_hca_flex_attr * attr,const struct rte_flow_item_flex_conf * conf,struct mlx5_flex_parser_devx * parser,struct mlx5_flex_item * item,struct rte_flow_error * error)872 mlx5_flex_translate_sample(struct mlx5_hca_flex_attr *attr,
873 const struct rte_flow_item_flex_conf *conf,
874 struct mlx5_flex_parser_devx *parser,
875 struct mlx5_flex_item *item,
876 struct rte_flow_error *error)
877 {
878 struct mlx5_flex_field_cover cover;
879 uint32_t i, j;
880 int ret;
881
882 switch (conf->tunnel) {
883 case FLEX_TUNNEL_MODE_SINGLE:
884 /* Fallthrough */
885 case FLEX_TUNNEL_MODE_OUTER:
886 /* Fallthrough */
887 case FLEX_TUNNEL_MODE_INNER:
888 /* Fallthrough */
889 case FLEX_TUNNEL_MODE_MULTI:
890 /* Fallthrough */
891 case FLEX_TUNNEL_MODE_TUNNEL:
892 break;
893 default:
894 return rte_flow_error_set
895 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
896 "unrecognized tunnel mode");
897 }
898 item->tunnel_mode = conf->tunnel;
899 if (conf->nb_samples > MLX5_FLEX_ITEM_MAPPING_NUM)
900 return rte_flow_error_set
901 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
902 "sample field number exceeds limit");
903 /*
904 * The application can specify fields smaller or bigger than 32 bits
905 * covered with single sample register and it can specify field
906 * offsets in any order.
907 *
908 * Gather all similar fields together, build array of bit intervals
909 * in ascending order and try to cover with the smallest set of sample
910 * registers.
911 */
912 memset(&cover, 0, sizeof(cover));
913 for (i = 0; i < conf->nb_samples; i++) {
914 struct rte_flow_item_flex_field *fl = conf->sample_data + i;
915
916 /* Check whether field was covered in the previous iteration. */
917 if (cover.mapped[i / CHAR_BIT] & (1u << (i % CHAR_BIT)))
918 continue;
919 if (fl->field_mode == FIELD_MODE_DUMMY)
920 continue;
921 /* Build an interval array for the field and similar ones */
922 cover.num = 0;
923 /* Add the first field to array unconditionally. */
924 ret = mlx5_flex_cover_sample(&cover, fl, NULL, attr, error);
925 if (ret < 0)
926 return ret;
927 MLX5_ASSERT(ret > 0);
928 cover.mapped[i / CHAR_BIT] |= 1u << (i % CHAR_BIT);
929 for (j = i + 1; j < conf->nb_samples; j++) {
930 struct rte_flow_item_flex_field *ft;
931
932 /* Add field to array if its type matches. */
933 ft = conf->sample_data + j;
934 ret = mlx5_flex_cover_sample(&cover, ft, fl,
935 attr, error);
936 if (ret < 0)
937 return ret;
938 if (!ret)
939 continue;
940 cover.mapped[j / CHAR_BIT] |= 1u << (j % CHAR_BIT);
941 }
942 /* Allocate sample registers to cover array of intervals. */
943 ret = mlx5_flex_alloc_sample(&cover, parser, item,
944 fl, attr, error);
945 if (ret)
946 return ret;
947 }
948 /* Build the item pattern translating data on flow creation. */
949 item->mapnum = 0;
950 memset(&item->map, 0, sizeof(item->map));
951 for (i = 0; i < conf->nb_samples; i++) {
952 struct rte_flow_item_flex_field *fl = conf->sample_data + i;
953
954 ret = mlx5_flex_map_sample(fl, parser, item, error);
955 if (ret) {
956 MLX5_ASSERT(false);
957 return ret;
958 }
959 }
960 if (conf->tunnel == FLEX_TUNNEL_MODE_MULTI) {
961 /*
962 * In FLEX_TUNNEL_MODE_MULTI tunnel mode PMD creates 2 sets
963 * of samples. The first set is for outer and the second set
964 * for inner flex flow item. Outer and inner samples differ
965 * only in tunnel_mode.
966 */
967 if (parser->num_samples > MLX5_GRAPH_NODE_SAMPLE_NUM / 2)
968 return rte_flow_error_set
969 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
970 "no sample registers for inner");
971 rte_memcpy(parser->devx_conf.sample + parser->num_samples,
972 parser->devx_conf.sample,
973 parser->num_samples *
974 sizeof(parser->devx_conf.sample[0]));
975 for (i = 0; i < parser->num_samples; i++) {
976 struct mlx5_devx_match_sample_attr *sm = i +
977 parser->devx_conf.sample + parser->num_samples;
978
979 sm->flow_match_sample_tunnel_mode =
980 MLX5_GRAPH_SAMPLE_TUNNEL_INNER;
981 }
982 parser->num_samples *= 2;
983 }
984 return 0;
985 }
986
987 static int
mlx5_flex_arc_type(enum rte_flow_item_type type,int in)988 mlx5_flex_arc_type(enum rte_flow_item_type type, int in)
989 {
990 switch (type) {
991 case RTE_FLOW_ITEM_TYPE_ETH:
992 return MLX5_GRAPH_ARC_NODE_MAC;
993 case RTE_FLOW_ITEM_TYPE_IPV4:
994 return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV4;
995 case RTE_FLOW_ITEM_TYPE_IPV6:
996 return in ? MLX5_GRAPH_ARC_NODE_IP : MLX5_GRAPH_ARC_NODE_IPV6;
997 case RTE_FLOW_ITEM_TYPE_UDP:
998 return MLX5_GRAPH_ARC_NODE_UDP;
999 case RTE_FLOW_ITEM_TYPE_TCP:
1000 return MLX5_GRAPH_ARC_NODE_TCP;
1001 case RTE_FLOW_ITEM_TYPE_MPLS:
1002 return MLX5_GRAPH_ARC_NODE_MPLS;
1003 case RTE_FLOW_ITEM_TYPE_GRE:
1004 return MLX5_GRAPH_ARC_NODE_GRE;
1005 case RTE_FLOW_ITEM_TYPE_GENEVE:
1006 return MLX5_GRAPH_ARC_NODE_GENEVE;
1007 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1008 return MLX5_GRAPH_ARC_NODE_VXLAN_GPE;
1009 default:
1010 return -EINVAL;
1011 }
1012 }
1013
1014 static int
mlx5_flex_arc_in_eth(const struct rte_flow_item * item,struct rte_flow_error * error)1015 mlx5_flex_arc_in_eth(const struct rte_flow_item *item,
1016 struct rte_flow_error *error)
1017 {
1018 const struct rte_flow_item_eth *spec = item->spec;
1019 const struct rte_flow_item_eth *mask = item->mask;
1020 struct rte_flow_item_eth eth = { .hdr.ether_type = RTE_BE16(0xFFFF) };
1021
1022 if (memcmp(mask, ð, sizeof(struct rte_flow_item_eth))) {
1023 return rte_flow_error_set
1024 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1025 "invalid eth item mask");
1026 }
1027 return rte_be_to_cpu_16(spec->hdr.ether_type);
1028 }
1029
1030 static int
mlx5_flex_arc_in_udp(const struct rte_flow_item * item,struct rte_flow_error * error)1031 mlx5_flex_arc_in_udp(const struct rte_flow_item *item,
1032 struct rte_flow_error *error)
1033 {
1034 const struct rte_flow_item_udp *spec = item->spec;
1035 const struct rte_flow_item_udp *mask = item->mask;
1036 struct rte_flow_item_udp udp = { .hdr.dst_port = RTE_BE16(0xFFFF) };
1037
1038 if (memcmp(mask, &udp, sizeof(struct rte_flow_item_udp))) {
1039 return rte_flow_error_set
1040 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
1041 "invalid eth item mask");
1042 }
1043 return rte_be_to_cpu_16(spec->hdr.dst_port);
1044 }
1045
1046 static int
mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr * attr,const struct rte_flow_item_flex_conf * conf,struct mlx5_flex_parser_devx * devx,struct mlx5_flex_item * item,struct rte_flow_error * error)1047 mlx5_flex_translate_arc_in(struct mlx5_hca_flex_attr *attr,
1048 const struct rte_flow_item_flex_conf *conf,
1049 struct mlx5_flex_parser_devx *devx,
1050 struct mlx5_flex_item *item,
1051 struct rte_flow_error *error)
1052 {
1053 struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
1054 uint32_t i;
1055
1056 RTE_SET_USED(item);
1057 if (conf->nb_inputs > attr->max_num_arc_in)
1058 return rte_flow_error_set
1059 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1060 "too many input links");
1061 for (i = 0; i < conf->nb_inputs; i++) {
1062 struct mlx5_devx_graph_arc_attr *arc = node->in + i;
1063 struct rte_flow_item_flex_link *link = conf->input_link + i;
1064 const struct rte_flow_item *rte_item = &link->item;
1065 int arc_type;
1066 int ret;
1067
1068 if (!rte_item->spec || !rte_item->mask || rte_item->last)
1069 return rte_flow_error_set
1070 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1071 "invalid flex item IN arc format");
1072 arc_type = mlx5_flex_arc_type(rte_item->type, true);
1073 if (arc_type < 0 || !(attr->node_in & RTE_BIT32(arc_type)))
1074 return rte_flow_error_set
1075 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1076 "unsupported flex item IN arc type");
1077 arc->arc_parse_graph_node = arc_type;
1078 arc->start_inner_tunnel = 0;
1079 /*
1080 * Configure arc IN condition value. The value location depends
1081 * on protocol. Current FW version supports IP & UDP for IN
1082 * arcs only, and locations for these protocols are defined.
1083 * Add more protocols when available.
1084 */
1085 switch (rte_item->type) {
1086 case RTE_FLOW_ITEM_TYPE_ETH:
1087 ret = mlx5_flex_arc_in_eth(rte_item, error);
1088 break;
1089 case RTE_FLOW_ITEM_TYPE_UDP:
1090 ret = mlx5_flex_arc_in_udp(rte_item, error);
1091 break;
1092 default:
1093 MLX5_ASSERT(false);
1094 return rte_flow_error_set
1095 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1096 "unsupported flex item IN arc type");
1097 }
1098 if (ret < 0)
1099 return ret;
1100 arc->compare_condition_value = (uint16_t)ret;
1101 }
1102 return 0;
1103 }
1104
1105 static int
mlx5_flex_translate_arc_out(struct mlx5_hca_flex_attr * attr,const struct rte_flow_item_flex_conf * conf,struct mlx5_flex_parser_devx * devx,struct mlx5_flex_item * item,struct rte_flow_error * error)1106 mlx5_flex_translate_arc_out(struct mlx5_hca_flex_attr *attr,
1107 const struct rte_flow_item_flex_conf *conf,
1108 struct mlx5_flex_parser_devx *devx,
1109 struct mlx5_flex_item *item,
1110 struct rte_flow_error *error)
1111 {
1112 struct mlx5_devx_graph_node_attr *node = &devx->devx_conf;
1113 bool is_tunnel = conf->tunnel == FLEX_TUNNEL_MODE_TUNNEL;
1114 uint32_t i;
1115
1116 RTE_SET_USED(item);
1117 if (conf->nb_outputs > attr->max_num_arc_out)
1118 return rte_flow_error_set
1119 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1120 "too many output links");
1121 for (i = 0; i < conf->nb_outputs; i++) {
1122 struct mlx5_devx_graph_arc_attr *arc = node->out + i;
1123 struct rte_flow_item_flex_link *link = conf->output_link + i;
1124 const struct rte_flow_item *rte_item = &link->item;
1125 int arc_type;
1126
1127 if (rte_item->spec || rte_item->mask || rte_item->last)
1128 return rte_flow_error_set
1129 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1130 "flex node: invalid OUT arc format");
1131 arc_type = mlx5_flex_arc_type(rte_item->type, false);
1132 if (arc_type < 0 || !(attr->node_out & RTE_BIT32(arc_type)))
1133 return rte_flow_error_set
1134 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1135 "unsupported flex item OUT arc type");
1136 arc->arc_parse_graph_node = arc_type;
1137 arc->start_inner_tunnel = !!is_tunnel;
1138 arc->compare_condition_value = link->next;
1139 }
1140 return 0;
1141 }
1142
1143 /* Translate RTE flex item API configuration into flaex parser settings. */
1144 static int
mlx5_flex_translate_conf(struct rte_eth_dev * dev,const struct rte_flow_item_flex_conf * conf,struct mlx5_flex_parser_devx * devx,struct mlx5_flex_item * item,struct rte_flow_error * error)1145 mlx5_flex_translate_conf(struct rte_eth_dev *dev,
1146 const struct rte_flow_item_flex_conf *conf,
1147 struct mlx5_flex_parser_devx *devx,
1148 struct mlx5_flex_item *item,
1149 struct rte_flow_error *error)
1150 {
1151 struct mlx5_priv *priv = dev->data->dev_private;
1152 struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex;
1153 int ret;
1154
1155 ret = mlx5_flex_translate_length(attr, conf, devx, error);
1156 if (ret)
1157 return ret;
1158 ret = mlx5_flex_translate_next(attr, conf, devx, error);
1159 if (ret)
1160 return ret;
1161 ret = mlx5_flex_translate_sample(attr, conf, devx, item, error);
1162 if (ret)
1163 return ret;
1164 ret = mlx5_flex_translate_arc_in(attr, conf, devx, item, error);
1165 if (ret)
1166 return ret;
1167 ret = mlx5_flex_translate_arc_out(attr, conf, devx, item, error);
1168 if (ret)
1169 return ret;
1170 return 0;
1171 }
1172
1173 /**
1174 * Create the flex item with specified configuration over the Ethernet device.
1175 *
1176 * @param dev
1177 * Ethernet device to create flex item on.
1178 * @param[in] conf
1179 * Flex item configuration.
1180 * @param[out] error
1181 * Perform verbose error reporting if not NULL. PMDs initialize this
1182 * structure in case of error only.
1183 *
1184 * @return
1185 * Non-NULL opaque pointer on success, NULL otherwise and rte_errno is set.
1186 */
1187 struct rte_flow_item_flex_handle *
flow_dv_item_create(struct rte_eth_dev * dev,const struct rte_flow_item_flex_conf * conf,struct rte_flow_error * error)1188 flow_dv_item_create(struct rte_eth_dev *dev,
1189 const struct rte_flow_item_flex_conf *conf,
1190 struct rte_flow_error *error)
1191 {
1192 struct mlx5_priv *priv = dev->data->dev_private;
1193 struct mlx5_flex_parser_devx devx_config = { .devx_obj = NULL };
1194 struct mlx5_flex_item *flex;
1195 struct mlx5_list_entry *ent;
1196
1197 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1198 flex = mlx5_flex_alloc(priv);
1199 if (!flex) {
1200 rte_flow_error_set(error, ENOMEM,
1201 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1202 "too many flex items created on the port");
1203 return NULL;
1204 }
1205 if (mlx5_flex_translate_conf(dev, conf, &devx_config, flex, error))
1206 goto error;
1207 ent = mlx5_list_register(priv->sh->flex_parsers_dv, &devx_config);
1208 if (!ent) {
1209 rte_flow_error_set(error, ENOMEM,
1210 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1211 "flex item creation failure");
1212 goto error;
1213 }
1214 flex->devx_fp = container_of(ent, struct mlx5_flex_parser_devx, entry);
1215 /* Mark initialized flex item valid. */
1216 __atomic_add_fetch(&flex->refcnt, 1, __ATOMIC_RELEASE);
1217 return (struct rte_flow_item_flex_handle *)flex;
1218
1219 error:
1220 mlx5_flex_free(priv, flex);
1221 return NULL;
1222 }
1223
1224 /**
1225 * Release the flex item on the specified Ethernet device.
1226 *
1227 * @param dev
1228 * Ethernet device to destroy flex item on.
1229 * @param[in] handle
1230 * Handle of the item existing on the specified device.
1231 * @param[out] error
1232 * Perform verbose error reporting if not NULL. PMDs initialize this
1233 * structure in case of error only.
1234 *
1235 * @return
1236 * 0 on success, a negative errno value otherwise and rte_errno is set.
1237 */
1238 int
flow_dv_item_release(struct rte_eth_dev * dev,const struct rte_flow_item_flex_handle * handle,struct rte_flow_error * error)1239 flow_dv_item_release(struct rte_eth_dev *dev,
1240 const struct rte_flow_item_flex_handle *handle,
1241 struct rte_flow_error *error)
1242 {
1243 struct mlx5_priv *priv = dev->data->dev_private;
1244 struct mlx5_flex_item *flex =
1245 (struct mlx5_flex_item *)(uintptr_t)handle;
1246 uint32_t old_refcnt = 1;
1247 int rc;
1248
1249 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
1250 rte_spinlock_lock(&priv->flex_item_sl);
1251 if (mlx5_flex_index(priv, flex) < 0) {
1252 rte_spinlock_unlock(&priv->flex_item_sl);
1253 return rte_flow_error_set(error, EINVAL,
1254 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1255 "invalid flex item handle value");
1256 }
1257 if (!__atomic_compare_exchange_n(&flex->refcnt, &old_refcnt, 0, 0,
1258 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
1259 rte_spinlock_unlock(&priv->flex_item_sl);
1260 return rte_flow_error_set(error, EBUSY,
1261 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1262 "flex item has flow references");
1263 }
1264 /* Flex item is marked as invalid, we can leave locked section. */
1265 rte_spinlock_unlock(&priv->flex_item_sl);
1266 MLX5_ASSERT(flex->devx_fp);
1267 rc = mlx5_list_unregister(priv->sh->flex_parsers_dv,
1268 &flex->devx_fp->entry);
1269 flex->devx_fp = NULL;
1270 mlx5_flex_free(priv, flex);
1271 if (rc < 0)
1272 return rte_flow_error_set(error, EBUSY,
1273 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1274 "flex item release failure");
1275 return 0;
1276 }
1277
1278 /* DevX flex parser list callbacks. */
1279 struct mlx5_list_entry *
mlx5_flex_parser_create_cb(void * list_ctx,void * ctx)1280 mlx5_flex_parser_create_cb(void *list_ctx, void *ctx)
1281 {
1282 struct mlx5_dev_ctx_shared *sh = list_ctx;
1283 struct mlx5_flex_parser_devx *fp, *conf = ctx;
1284 int ret;
1285
1286 fp = mlx5_malloc(MLX5_MEM_ZERO, sizeof(struct mlx5_flex_parser_devx),
1287 0, SOCKET_ID_ANY);
1288 if (!fp)
1289 return NULL;
1290 /* Copy the requested configurations. */
1291 fp->num_samples = conf->num_samples;
1292 memcpy(&fp->devx_conf, &conf->devx_conf, sizeof(fp->devx_conf));
1293 /* Create DevX flex parser. */
1294 fp->devx_obj = mlx5_devx_cmd_create_flex_parser(sh->cdev->ctx,
1295 &fp->devx_conf);
1296 if (!fp->devx_obj)
1297 goto error;
1298 /* Query the firmware assigned sample ids. */
1299 ret = mlx5_devx_cmd_query_parse_samples(fp->devx_obj,
1300 fp->sample_ids,
1301 fp->num_samples);
1302 if (ret)
1303 goto error;
1304 DRV_LOG(DEBUG, "DEVx flex parser %p created, samples num: %u",
1305 (const void *)fp, fp->num_samples);
1306 return &fp->entry;
1307 error:
1308 if (fp->devx_obj)
1309 mlx5_devx_cmd_destroy((void *)(uintptr_t)fp->devx_obj);
1310 if (fp)
1311 mlx5_free(fp);
1312 return NULL;
1313 }
1314
1315 int
mlx5_flex_parser_match_cb(void * list_ctx,struct mlx5_list_entry * iter,void * ctx)1316 mlx5_flex_parser_match_cb(void *list_ctx,
1317 struct mlx5_list_entry *iter, void *ctx)
1318 {
1319 struct mlx5_flex_parser_devx *fp =
1320 container_of(iter, struct mlx5_flex_parser_devx, entry);
1321 struct mlx5_flex_parser_devx *org =
1322 container_of(ctx, struct mlx5_flex_parser_devx, entry);
1323
1324 RTE_SET_USED(list_ctx);
1325 return !iter || !ctx || memcmp(&fp->devx_conf,
1326 &org->devx_conf,
1327 sizeof(fp->devx_conf));
1328 }
1329
1330 void
mlx5_flex_parser_remove_cb(void * list_ctx,struct mlx5_list_entry * entry)1331 mlx5_flex_parser_remove_cb(void *list_ctx, struct mlx5_list_entry *entry)
1332 {
1333 struct mlx5_flex_parser_devx *fp =
1334 container_of(entry, struct mlx5_flex_parser_devx, entry);
1335
1336 RTE_SET_USED(list_ctx);
1337 MLX5_ASSERT(fp->devx_obj);
1338 claim_zero(mlx5_devx_cmd_destroy(fp->devx_obj));
1339 DRV_LOG(DEBUG, "DEVx flex parser %p destroyed", (const void *)fp);
1340 mlx5_free(entry);
1341 }
1342
1343 struct mlx5_list_entry *
mlx5_flex_parser_clone_cb(void * list_ctx,struct mlx5_list_entry * entry,void * ctx)1344 mlx5_flex_parser_clone_cb(void *list_ctx,
1345 struct mlx5_list_entry *entry, void *ctx)
1346 {
1347 struct mlx5_flex_parser_devx *fp;
1348
1349 RTE_SET_USED(list_ctx);
1350 RTE_SET_USED(entry);
1351 fp = mlx5_malloc(0, sizeof(struct mlx5_flex_parser_devx),
1352 0, SOCKET_ID_ANY);
1353 if (!fp)
1354 return NULL;
1355 memcpy(fp, ctx, sizeof(struct mlx5_flex_parser_devx));
1356 return &fp->entry;
1357 }
1358
1359 void
mlx5_flex_parser_clone_free_cb(void * list_ctx,struct mlx5_list_entry * entry)1360 mlx5_flex_parser_clone_free_cb(void *list_ctx, struct mlx5_list_entry *entry)
1361 {
1362 struct mlx5_flex_parser_devx *fp =
1363 container_of(entry, struct mlx5_flex_parser_devx, entry);
1364 RTE_SET_USED(list_ctx);
1365 mlx5_free(fp);
1366 }
1367