1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
3 */
4
5 #include <netinet/in.h>
6 #include <sys/queue.h>
7 #include <stdalign.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_ip.h>
18
19 #include <mlx5_glue.h>
20 #include <mlx5_prm.h>
21 #include <mlx5_malloc.h>
22
23 #include "mlx5_defs.h"
24 #include "mlx5.h"
25 #include "mlx5_flow.h"
26 #include "mlx5_rxtx.h"
27
28 #define VERBS_SPEC_INNER(item_flags) \
29 (!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
30
31 /* Map of Verbs to Flow priority with 8 Verbs priorities. */
32 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
33 { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
34 };
35
36 /* Map of Verbs to Flow priority with 16 Verbs priorities. */
37 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
38 { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
39 { 9, 10, 11 }, { 12, 13, 14 },
40 };
41
42 /**
43 * Discover the maximum number of priority available.
44 *
45 * @param[in] dev
46 * Pointer to the Ethernet device structure.
47 *
48 * @return
49 * number of supported flow priority on success, a negative errno
50 * value otherwise and rte_errno is set.
51 */
52 int
mlx5_flow_discover_priorities(struct rte_eth_dev * dev)53 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
54 {
55 struct mlx5_priv *priv = dev->data->dev_private;
56 struct {
57 struct ibv_flow_attr attr;
58 struct ibv_flow_spec_eth eth;
59 struct ibv_flow_spec_action_drop drop;
60 } flow_attr = {
61 .attr = {
62 .num_of_specs = 2,
63 .port = (uint8_t)priv->dev_port,
64 },
65 .eth = {
66 .type = IBV_FLOW_SPEC_ETH,
67 .size = sizeof(struct ibv_flow_spec_eth),
68 },
69 .drop = {
70 .size = sizeof(struct ibv_flow_spec_action_drop),
71 .type = IBV_FLOW_SPEC_ACTION_DROP,
72 },
73 };
74 struct ibv_flow *flow;
75 struct mlx5_hrxq *drop = priv->drop_queue.hrxq;
76 uint16_t vprio[] = { 8, 16 };
77 int i;
78 int priority = 0;
79
80 if (!drop->qp) {
81 rte_errno = ENOTSUP;
82 return -rte_errno;
83 }
84 for (i = 0; i != RTE_DIM(vprio); i++) {
85 flow_attr.attr.priority = vprio[i] - 1;
86 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
87 if (!flow)
88 break;
89 claim_zero(mlx5_glue->destroy_flow(flow));
90 priority = vprio[i];
91 }
92 switch (priority) {
93 case 8:
94 priority = RTE_DIM(priority_map_3);
95 break;
96 case 16:
97 priority = RTE_DIM(priority_map_5);
98 break;
99 default:
100 rte_errno = ENOTSUP;
101 DRV_LOG(ERR,
102 "port %u verbs maximum priority: %d expected 8/16",
103 dev->data->port_id, priority);
104 return -rte_errno;
105 }
106 DRV_LOG(INFO, "port %u flow maximum priority: %d",
107 dev->data->port_id, priority);
108 return priority;
109 }
110
111 /**
112 * Adjust flow priority based on the highest layer and the request priority.
113 *
114 * @param[in] dev
115 * Pointer to the Ethernet device structure.
116 * @param[in] priority
117 * The rule base priority.
118 * @param[in] subpriority
119 * The priority based on the items.
120 *
121 * @return
122 * The new priority.
123 */
124 uint32_t
mlx5_flow_adjust_priority(struct rte_eth_dev * dev,int32_t priority,uint32_t subpriority)125 mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
126 uint32_t subpriority)
127 {
128 uint32_t res = 0;
129 struct mlx5_priv *priv = dev->data->dev_private;
130
131 switch (priv->config.flow_prio) {
132 case RTE_DIM(priority_map_3):
133 res = priority_map_3[priority][subpriority];
134 break;
135 case RTE_DIM(priority_map_5):
136 res = priority_map_5[priority][subpriority];
137 break;
138 }
139 return res;
140 }
141
142 /**
143 * Get Verbs flow counter by index.
144 *
145 * @param[in] dev
146 * Pointer to the Ethernet device structure.
147 * @param[in] idx
148 * mlx5 flow counter index in the container.
149 * @param[out] ppool
150 * mlx5 flow counter pool in the container,
151 *
152 * @return
153 * A pointer to the counter, NULL otherwise.
154 */
155 static struct mlx5_flow_counter *
flow_verbs_counter_get_by_idx(struct rte_eth_dev * dev,uint32_t idx,struct mlx5_flow_counter_pool ** ppool)156 flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev,
157 uint32_t idx,
158 struct mlx5_flow_counter_pool **ppool)
159 {
160 struct mlx5_priv *priv = dev->data->dev_private;
161 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
162 struct mlx5_flow_counter_pool *pool;
163
164 idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
165 pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
166 MLX5_ASSERT(pool);
167 if (ppool)
168 *ppool = pool;
169 return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
170 }
171
172 /**
173 * Create Verbs flow counter with Verbs library.
174 *
175 * @param[in] dev
176 * Pointer to the Ethernet device structure.
177 * @param[in, out] counter
178 * mlx5 flow counter object, contains the counter id,
179 * handle of created Verbs flow counter is returned
180 * in cs field (if counters are supported).
181 *
182 * @return
183 * 0 On success else a negative errno value is returned
184 * and rte_errno is set.
185 */
186 static int
flow_verbs_counter_create(struct rte_eth_dev * dev,struct mlx5_flow_counter * counter)187 flow_verbs_counter_create(struct rte_eth_dev *dev,
188 struct mlx5_flow_counter *counter)
189 {
190 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
191 struct mlx5_priv *priv = dev->data->dev_private;
192 struct ibv_context *ctx = priv->sh->ctx;
193 struct ibv_counter_set_init_attr init = {
194 .counter_set_id = counter->shared_info.id};
195
196 counter->dcs_when_free = mlx5_glue->create_counter_set(ctx, &init);
197 if (!counter->dcs_when_free) {
198 rte_errno = ENOTSUP;
199 return -ENOTSUP;
200 }
201 return 0;
202 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
203 struct mlx5_priv *priv = dev->data->dev_private;
204 struct ibv_context *ctx = priv->sh->ctx;
205 struct ibv_counters_init_attr init = {0};
206 struct ibv_counter_attach_attr attach;
207 int ret;
208
209 memset(&attach, 0, sizeof(attach));
210 counter->dcs_when_free = mlx5_glue->create_counters(ctx, &init);
211 if (!counter->dcs_when_free) {
212 rte_errno = ENOTSUP;
213 return -ENOTSUP;
214 }
215 attach.counter_desc = IBV_COUNTER_PACKETS;
216 attach.index = 0;
217 ret = mlx5_glue->attach_counters(counter->dcs_when_free, &attach, NULL);
218 if (!ret) {
219 attach.counter_desc = IBV_COUNTER_BYTES;
220 attach.index = 1;
221 ret = mlx5_glue->attach_counters
222 (counter->dcs_when_free, &attach, NULL);
223 }
224 if (ret) {
225 claim_zero(mlx5_glue->destroy_counters(counter->dcs_when_free));
226 counter->dcs_when_free = NULL;
227 rte_errno = ret;
228 return -ret;
229 }
230 return 0;
231 #else
232 (void)dev;
233 (void)counter;
234 rte_errno = ENOTSUP;
235 return -ENOTSUP;
236 #endif
237 }
238
239 /**
240 * Get a flow counter.
241 *
242 * @param[in] dev
243 * Pointer to the Ethernet device structure.
244 * @param[in] shared
245 * Indicate if this counter is shared with other flows.
246 * @param[in] id
247 * Counter identifier.
248 *
249 * @return
250 * Index to the counter, 0 otherwise and rte_errno is set.
251 */
252 static uint32_t
flow_verbs_counter_new(struct rte_eth_dev * dev,uint32_t shared,uint32_t id)253 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
254 {
255 struct mlx5_priv *priv = dev->data->dev_private;
256 struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
257 struct mlx5_flow_counter_pool *pool = NULL;
258 struct mlx5_flow_counter *cnt = NULL;
259 union mlx5_l3t_data data;
260 uint32_t n_valid = cmng->n_valid;
261 uint32_t pool_idx, cnt_idx;
262 uint32_t i;
263 int ret;
264
265 if (shared && !mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) &&
266 data.dword)
267 return data.dword;
268 for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) {
269 pool = cmng->pools[pool_idx];
270 if (!pool)
271 continue;
272 cnt = TAILQ_FIRST(&pool->counters[0]);
273 if (cnt)
274 break;
275 }
276 if (!cnt) {
277 struct mlx5_flow_counter_pool **pools;
278 uint32_t size;
279
280 if (n_valid == cmng->n) {
281 /* Resize the container pool array. */
282 size = sizeof(struct mlx5_flow_counter_pool *) *
283 (n_valid + MLX5_CNT_CONTAINER_RESIZE);
284 pools = mlx5_malloc(MLX5_MEM_ZERO, size, 0,
285 SOCKET_ID_ANY);
286 if (!pools)
287 return 0;
288 if (n_valid) {
289 memcpy(pools, cmng->pools,
290 sizeof(struct mlx5_flow_counter_pool *) *
291 n_valid);
292 mlx5_free(cmng->pools);
293 }
294 cmng->pools = pools;
295 cmng->n += MLX5_CNT_CONTAINER_RESIZE;
296 }
297 /* Allocate memory for new pool*/
298 size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL;
299 pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
300 if (!pool)
301 return 0;
302 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
303 cnt = MLX5_POOL_GET_CNT(pool, i);
304 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
305 }
306 cnt = MLX5_POOL_GET_CNT(pool, 0);
307 cmng->pools[n_valid] = pool;
308 pool_idx = n_valid;
309 cmng->n_valid++;
310 }
311 TAILQ_REMOVE(&pool->counters[0], cnt, next);
312 i = MLX5_CNT_ARRAY_IDX(pool, cnt);
313 cnt_idx = MLX5_MAKE_CNT_IDX(pool_idx, i);
314 if (shared) {
315 data.dword = cnt_idx;
316 if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data))
317 return 0;
318 cnt->shared_info.id = id;
319 cnt_idx |= MLX5_CNT_SHARED_OFFSET;
320 }
321 /* Create counter with Verbs. */
322 ret = flow_verbs_counter_create(dev, cnt);
323 if (!ret) {
324 cnt->dcs_when_active = cnt->dcs_when_free;
325 cnt->hits = 0;
326 cnt->bytes = 0;
327 return cnt_idx;
328 }
329 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
330 /* Some error occurred in Verbs library. */
331 rte_errno = -ret;
332 return 0;
333 }
334
335 /**
336 * Release a flow counter.
337 *
338 * @param[in] dev
339 * Pointer to the Ethernet device structure.
340 * @param[in] counter
341 * Index to the counter handler.
342 */
343 static void
flow_verbs_counter_release(struct rte_eth_dev * dev,uint32_t counter)344 flow_verbs_counter_release(struct rte_eth_dev *dev, uint32_t counter)
345 {
346 struct mlx5_priv *priv = dev->data->dev_private;
347 struct mlx5_flow_counter_pool *pool;
348 struct mlx5_flow_counter *cnt;
349
350 cnt = flow_verbs_counter_get_by_idx(dev, counter, &pool);
351 if (IS_SHARED_CNT(counter) &&
352 mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
353 return;
354 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
355 claim_zero(mlx5_glue->destroy_counter_set
356 ((struct ibv_counter_set *)cnt->dcs_when_active));
357 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
358 claim_zero(mlx5_glue->destroy_counters
359 ((struct ibv_counters *)cnt->dcs_when_active));
360 #endif
361 TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next);
362 }
363
364 /**
365 * Query a flow counter via Verbs library call.
366 *
367 * @see rte_flow_query()
368 * @see rte_flow_ops
369 */
370 static int
flow_verbs_counter_query(struct rte_eth_dev * dev __rte_unused,struct rte_flow * flow,void * data,struct rte_flow_error * error)371 flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused,
372 struct rte_flow *flow, void *data,
373 struct rte_flow_error *error)
374 {
375 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
376 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
377 if (flow->counter) {
378 struct mlx5_flow_counter_pool *pool;
379 struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx
380 (dev, flow->counter, &pool);
381 struct rte_flow_query_count *qc = data;
382 uint64_t counters[2] = {0, 0};
383 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
384 struct ibv_query_counter_set_attr query_cs_attr = {
385 .dcs_when_free = (struct ibv_counter_set *)
386 cnt->dcs_when_active,
387 .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
388 };
389 struct ibv_counter_set_data query_out = {
390 .out = counters,
391 .outlen = 2 * sizeof(uint64_t),
392 };
393 int err = mlx5_glue->query_counter_set(&query_cs_attr,
394 &query_out);
395 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
396 int err = mlx5_glue->query_counters
397 ((struct ibv_counters *)cnt->dcs_when_active, counters,
398 RTE_DIM(counters),
399 IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
400 #endif
401 if (err)
402 return rte_flow_error_set
403 (error, err,
404 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
405 NULL,
406 "cannot read counter");
407 qc->hits_set = 1;
408 qc->bytes_set = 1;
409 qc->hits = counters[0] - cnt->hits;
410 qc->bytes = counters[1] - cnt->bytes;
411 if (qc->reset) {
412 cnt->hits = counters[0];
413 cnt->bytes = counters[1];
414 }
415 return 0;
416 }
417 return rte_flow_error_set(error, EINVAL,
418 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
419 NULL,
420 "flow does not have counter");
421 #else
422 (void)flow;
423 (void)data;
424 return rte_flow_error_set(error, ENOTSUP,
425 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
426 NULL,
427 "counters are not available");
428 #endif
429 }
430
431 /**
432 * Add a verbs item specification into @p verbs.
433 *
434 * @param[out] verbs
435 * Pointer to verbs structure.
436 * @param[in] src
437 * Create specification.
438 * @param[in] size
439 * Size in bytes of the specification to copy.
440 */
441 static void
flow_verbs_spec_add(struct mlx5_flow_verbs_workspace * verbs,void * src,unsigned int size)442 flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs,
443 void *src, unsigned int size)
444 {
445 void *dst;
446
447 if (!verbs)
448 return;
449 MLX5_ASSERT(verbs->specs);
450 dst = (void *)(verbs->specs + verbs->size);
451 memcpy(dst, src, size);
452 ++verbs->attr.num_of_specs;
453 verbs->size += size;
454 }
455
456 /**
457 * Convert the @p item into a Verbs specification. This function assumes that
458 * the input is valid and that there is space to insert the requested item
459 * into the flow.
460 *
461 * @param[in, out] dev_flow
462 * Pointer to dev_flow structure.
463 * @param[in] item
464 * Item specification.
465 * @param[in] item_flags
466 * Parsed item flags.
467 */
468 static void
flow_verbs_translate_item_eth(struct mlx5_flow * dev_flow,const struct rte_flow_item * item,uint64_t item_flags)469 flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow,
470 const struct rte_flow_item *item,
471 uint64_t item_flags)
472 {
473 const struct rte_flow_item_eth *spec = item->spec;
474 const struct rte_flow_item_eth *mask = item->mask;
475 const unsigned int size = sizeof(struct ibv_flow_spec_eth);
476 struct ibv_flow_spec_eth eth = {
477 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
478 .size = size,
479 };
480
481 if (!mask)
482 mask = &rte_flow_item_eth_mask;
483 if (spec) {
484 unsigned int i;
485
486 memcpy(ð.val.dst_mac, spec->dst.addr_bytes,
487 RTE_ETHER_ADDR_LEN);
488 memcpy(ð.val.src_mac, spec->src.addr_bytes,
489 RTE_ETHER_ADDR_LEN);
490 eth.val.ether_type = spec->type;
491 memcpy(ð.mask.dst_mac, mask->dst.addr_bytes,
492 RTE_ETHER_ADDR_LEN);
493 memcpy(ð.mask.src_mac, mask->src.addr_bytes,
494 RTE_ETHER_ADDR_LEN);
495 eth.mask.ether_type = mask->type;
496 /* Remove unwanted bits from values. */
497 for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
498 eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
499 eth.val.src_mac[i] &= eth.mask.src_mac[i];
500 }
501 eth.val.ether_type &= eth.mask.ether_type;
502 }
503 flow_verbs_spec_add(&dev_flow->verbs, ð, size);
504 }
505
506 /**
507 * Update the VLAN tag in the Verbs Ethernet specification.
508 * This function assumes that the input is valid and there is space to add
509 * the requested item.
510 *
511 * @param[in, out] attr
512 * Pointer to Verbs attributes structure.
513 * @param[in] eth
514 * Verbs structure containing the VLAN information to copy.
515 */
516 static void
flow_verbs_item_vlan_update(struct ibv_flow_attr * attr,struct ibv_flow_spec_eth * eth)517 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
518 struct ibv_flow_spec_eth *eth)
519 {
520 unsigned int i;
521 const enum ibv_flow_spec_type search = eth->type;
522 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
523 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
524
525 for (i = 0; i != attr->num_of_specs; ++i) {
526 if (hdr->type == search) {
527 struct ibv_flow_spec_eth *e =
528 (struct ibv_flow_spec_eth *)hdr;
529
530 e->val.vlan_tag = eth->val.vlan_tag;
531 e->mask.vlan_tag = eth->mask.vlan_tag;
532 e->val.ether_type = eth->val.ether_type;
533 e->mask.ether_type = eth->mask.ether_type;
534 break;
535 }
536 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
537 }
538 }
539
540 /**
541 * Convert the @p item into a Verbs specification. This function assumes that
542 * the input is valid and that there is space to insert the requested item
543 * into the flow.
544 *
545 * @param[in, out] dev_flow
546 * Pointer to dev_flow structure.
547 * @param[in] item
548 * Item specification.
549 * @param[in] item_flags
550 * Parsed item flags.
551 */
552 static void
flow_verbs_translate_item_vlan(struct mlx5_flow * dev_flow,const struct rte_flow_item * item,uint64_t item_flags)553 flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow,
554 const struct rte_flow_item *item,
555 uint64_t item_flags)
556 {
557 const struct rte_flow_item_vlan *spec = item->spec;
558 const struct rte_flow_item_vlan *mask = item->mask;
559 unsigned int size = sizeof(struct ibv_flow_spec_eth);
560 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
561 struct ibv_flow_spec_eth eth = {
562 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
563 .size = size,
564 };
565 const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
566 MLX5_FLOW_LAYER_OUTER_L2;
567
568 if (!mask)
569 mask = &rte_flow_item_vlan_mask;
570 if (spec) {
571 eth.val.vlan_tag = spec->tci;
572 eth.mask.vlan_tag = mask->tci;
573 eth.val.vlan_tag &= eth.mask.vlan_tag;
574 eth.val.ether_type = spec->inner_type;
575 eth.mask.ether_type = mask->inner_type;
576 eth.val.ether_type &= eth.mask.ether_type;
577 }
578 if (!(item_flags & l2m))
579 flow_verbs_spec_add(&dev_flow->verbs, ð, size);
580 else
581 flow_verbs_item_vlan_update(&dev_flow->verbs.attr, ð);
582 if (!tunnel)
583 dev_flow->handle->vf_vlan.tag =
584 rte_be_to_cpu_16(spec->tci) & 0x0fff;
585 }
586
587 /**
588 * Convert the @p item into a Verbs specification. This function assumes that
589 * the input is valid and that there is space to insert the requested item
590 * into the flow.
591 *
592 * @param[in, out] dev_flow
593 * Pointer to dev_flow structure.
594 * @param[in] item
595 * Item specification.
596 * @param[in] item_flags
597 * Parsed item flags.
598 */
599 static void
flow_verbs_translate_item_ipv4(struct mlx5_flow * dev_flow,const struct rte_flow_item * item,uint64_t item_flags)600 flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow,
601 const struct rte_flow_item *item,
602 uint64_t item_flags)
603 {
604 const struct rte_flow_item_ipv4 *spec = item->spec;
605 const struct rte_flow_item_ipv4 *mask = item->mask;
606 unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
607 struct ibv_flow_spec_ipv4_ext ipv4 = {
608 .type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags),
609 .size = size,
610 };
611
612 if (!mask)
613 mask = &rte_flow_item_ipv4_mask;
614 if (spec) {
615 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
616 .src_ip = spec->hdr.src_addr,
617 .dst_ip = spec->hdr.dst_addr,
618 .proto = spec->hdr.next_proto_id,
619 .tos = spec->hdr.type_of_service,
620 };
621 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
622 .src_ip = mask->hdr.src_addr,
623 .dst_ip = mask->hdr.dst_addr,
624 .proto = mask->hdr.next_proto_id,
625 .tos = mask->hdr.type_of_service,
626 };
627 /* Remove unwanted bits from values. */
628 ipv4.val.src_ip &= ipv4.mask.src_ip;
629 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
630 ipv4.val.proto &= ipv4.mask.proto;
631 ipv4.val.tos &= ipv4.mask.tos;
632 }
633 flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size);
634 }
635
636 /**
637 * Convert the @p item into a Verbs specification. This function assumes that
638 * the input is valid and that there is space to insert the requested item
639 * into the flow.
640 *
641 * @param[in, out] dev_flow
642 * Pointer to dev_flow structure.
643 * @param[in] item
644 * Item specification.
645 * @param[in] item_flags
646 * Parsed item flags.
647 */
648 static void
flow_verbs_translate_item_ipv6(struct mlx5_flow * dev_flow,const struct rte_flow_item * item,uint64_t item_flags)649 flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
650 const struct rte_flow_item *item,
651 uint64_t item_flags)
652 {
653 const struct rte_flow_item_ipv6 *spec = item->spec;
654 const struct rte_flow_item_ipv6 *mask = item->mask;
655 unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
656 struct ibv_flow_spec_ipv6 ipv6 = {
657 .type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags),
658 .size = size,
659 };
660
661 if (!mask)
662 mask = &rte_flow_item_ipv6_mask;
663 if (spec) {
664 unsigned int i;
665 uint32_t vtc_flow_val;
666 uint32_t vtc_flow_mask;
667
668 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
669 RTE_DIM(ipv6.val.src_ip));
670 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
671 RTE_DIM(ipv6.val.dst_ip));
672 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
673 RTE_DIM(ipv6.mask.src_ip));
674 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
675 RTE_DIM(ipv6.mask.dst_ip));
676 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
677 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
678 ipv6.val.flow_label =
679 rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >>
680 RTE_IPV6_HDR_FL_SHIFT);
681 ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >>
682 RTE_IPV6_HDR_TC_SHIFT;
683 ipv6.val.next_hdr = spec->hdr.proto;
684 ipv6.mask.flow_label =
685 rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >>
686 RTE_IPV6_HDR_FL_SHIFT);
687 ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
688 RTE_IPV6_HDR_TC_SHIFT;
689 ipv6.mask.next_hdr = mask->hdr.proto;
690 /* Remove unwanted bits from values. */
691 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
692 ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
693 ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
694 }
695 ipv6.val.flow_label &= ipv6.mask.flow_label;
696 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
697 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
698 }
699 flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size);
700 }
701
702 /**
703 * Convert the @p item into a Verbs specification. This function assumes that
704 * the input is valid and that there is space to insert the requested item
705 * into the flow.
706 *
707 * @param[in, out] dev_flow
708 * Pointer to dev_flow structure.
709 * @param[in] item
710 * Item specification.
711 * @param[in] item_flags
712 * Parsed item flags.
713 */
714 static void
flow_verbs_translate_item_tcp(struct mlx5_flow * dev_flow,const struct rte_flow_item * item,uint64_t item_flags __rte_unused)715 flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow,
716 const struct rte_flow_item *item,
717 uint64_t item_flags __rte_unused)
718 {
719 const struct rte_flow_item_tcp *spec = item->spec;
720 const struct rte_flow_item_tcp *mask = item->mask;
721 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
722 struct ibv_flow_spec_tcp_udp tcp = {
723 .type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags),
724 .size = size,
725 };
726
727 if (!mask)
728 mask = &rte_flow_item_tcp_mask;
729 if (spec) {
730 tcp.val.dst_port = spec->hdr.dst_port;
731 tcp.val.src_port = spec->hdr.src_port;
732 tcp.mask.dst_port = mask->hdr.dst_port;
733 tcp.mask.src_port = mask->hdr.src_port;
734 /* Remove unwanted bits from values. */
735 tcp.val.src_port &= tcp.mask.src_port;
736 tcp.val.dst_port &= tcp.mask.dst_port;
737 }
738 flow_verbs_spec_add(&dev_flow->verbs, &tcp, size);
739 }
740
741 /**
742 * Convert the @p item into a Verbs specification. This function assumes that
743 * the input is valid and that there is space to insert the requested item
744 * into the flow.
745 *
746 * @param[in, out] dev_flow
747 * Pointer to dev_flow structure.
748 * @param[in] item
749 * Item specification.
750 * @param[in] item_flags
751 * Parsed item flags.
752 */
753 static void
flow_verbs_translate_item_udp(struct mlx5_flow * dev_flow,const struct rte_flow_item * item,uint64_t item_flags __rte_unused)754 flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow,
755 const struct rte_flow_item *item,
756 uint64_t item_flags __rte_unused)
757 {
758 const struct rte_flow_item_udp *spec = item->spec;
759 const struct rte_flow_item_udp *mask = item->mask;
760 unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
761 struct ibv_flow_spec_tcp_udp udp = {
762 .type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags),
763 .size = size,
764 };
765
766 if (!mask)
767 mask = &rte_flow_item_udp_mask;
768 if (spec) {
769 udp.val.dst_port = spec->hdr.dst_port;
770 udp.val.src_port = spec->hdr.src_port;
771 udp.mask.dst_port = mask->hdr.dst_port;
772 udp.mask.src_port = mask->hdr.src_port;
773 /* Remove unwanted bits from values. */
774 udp.val.src_port &= udp.mask.src_port;
775 udp.val.dst_port &= udp.mask.dst_port;
776 }
777 item++;
778 while (item->type == RTE_FLOW_ITEM_TYPE_VOID)
779 item++;
780 if (!(udp.val.dst_port & udp.mask.dst_port)) {
781 switch ((item)->type) {
782 case RTE_FLOW_ITEM_TYPE_VXLAN:
783 udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN);
784 udp.mask.dst_port = 0xffff;
785 break;
786 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
787 udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN_GPE);
788 udp.mask.dst_port = 0xffff;
789 break;
790 case RTE_FLOW_ITEM_TYPE_MPLS:
791 udp.val.dst_port = htons(MLX5_UDP_PORT_MPLS);
792 udp.mask.dst_port = 0xffff;
793 break;
794 default:
795 break;
796 }
797 }
798
799 flow_verbs_spec_add(&dev_flow->verbs, &udp, size);
800 }
801
802 /**
803 * Convert the @p item into a Verbs specification. This function assumes that
804 * the input is valid and that there is space to insert the requested item
805 * into the flow.
806 *
807 * @param[in, out] dev_flow
808 * Pointer to dev_flow structure.
809 * @param[in] item
810 * Item specification.
811 * @param[in] item_flags
812 * Parsed item flags.
813 */
814 static void
flow_verbs_translate_item_vxlan(struct mlx5_flow * dev_flow,const struct rte_flow_item * item,uint64_t item_flags __rte_unused)815 flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow,
816 const struct rte_flow_item *item,
817 uint64_t item_flags __rte_unused)
818 {
819 const struct rte_flow_item_vxlan *spec = item->spec;
820 const struct rte_flow_item_vxlan *mask = item->mask;
821 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
822 struct ibv_flow_spec_tunnel vxlan = {
823 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
824 .size = size,
825 };
826 union vni {
827 uint32_t vlan_id;
828 uint8_t vni[4];
829 } id = { .vlan_id = 0, };
830
831 if (!mask)
832 mask = &rte_flow_item_vxlan_mask;
833 if (spec) {
834 memcpy(&id.vni[1], spec->vni, 3);
835 vxlan.val.tunnel_id = id.vlan_id;
836 memcpy(&id.vni[1], mask->vni, 3);
837 vxlan.mask.tunnel_id = id.vlan_id;
838 /* Remove unwanted bits from values. */
839 vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
840 }
841 flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size);
842 }
843
844 /**
845 * Convert the @p item into a Verbs specification. This function assumes that
846 * the input is valid and that there is space to insert the requested item
847 * into the flow.
848 *
849 * @param[in, out] dev_flow
850 * Pointer to dev_flow structure.
851 * @param[in] item
852 * Item specification.
853 * @param[in] item_flags
854 * Parsed item flags.
855 */
856 static void
flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow * dev_flow,const struct rte_flow_item * item,uint64_t item_flags __rte_unused)857 flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow,
858 const struct rte_flow_item *item,
859 uint64_t item_flags __rte_unused)
860 {
861 const struct rte_flow_item_vxlan_gpe *spec = item->spec;
862 const struct rte_flow_item_vxlan_gpe *mask = item->mask;
863 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
864 struct ibv_flow_spec_tunnel vxlan_gpe = {
865 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
866 .size = size,
867 };
868 union vni {
869 uint32_t vlan_id;
870 uint8_t vni[4];
871 } id = { .vlan_id = 0, };
872
873 if (!mask)
874 mask = &rte_flow_item_vxlan_gpe_mask;
875 if (spec) {
876 memcpy(&id.vni[1], spec->vni, 3);
877 vxlan_gpe.val.tunnel_id = id.vlan_id;
878 memcpy(&id.vni[1], mask->vni, 3);
879 vxlan_gpe.mask.tunnel_id = id.vlan_id;
880 /* Remove unwanted bits from values. */
881 vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
882 }
883 flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size);
884 }
885
886 /**
887 * Update the protocol in Verbs IPv4/IPv6 spec.
888 *
889 * @param[in, out] attr
890 * Pointer to Verbs attributes structure.
891 * @param[in] search
892 * Specification type to search in order to update the IP protocol.
893 * @param[in] protocol
894 * Protocol value to set if none is present in the specification.
895 */
896 static void
flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr * attr,enum ibv_flow_spec_type search,uint8_t protocol)897 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
898 enum ibv_flow_spec_type search,
899 uint8_t protocol)
900 {
901 unsigned int i;
902 struct ibv_spec_header *hdr = (struct ibv_spec_header *)
903 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
904
905 if (!attr)
906 return;
907 for (i = 0; i != attr->num_of_specs; ++i) {
908 if (hdr->type == search) {
909 union {
910 struct ibv_flow_spec_ipv4_ext *ipv4;
911 struct ibv_flow_spec_ipv6 *ipv6;
912 } ip;
913
914 switch (search) {
915 case IBV_FLOW_SPEC_IPV4_EXT:
916 ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
917 if (!ip.ipv4->val.proto) {
918 ip.ipv4->val.proto = protocol;
919 ip.ipv4->mask.proto = 0xff;
920 }
921 break;
922 case IBV_FLOW_SPEC_IPV6:
923 ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
924 if (!ip.ipv6->val.next_hdr) {
925 ip.ipv6->val.next_hdr = protocol;
926 ip.ipv6->mask.next_hdr = 0xff;
927 }
928 break;
929 default:
930 break;
931 }
932 break;
933 }
934 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
935 }
936 }
937
938 /**
939 * Convert the @p item into a Verbs specification. This function assumes that
940 * the input is valid and that there is space to insert the requested item
941 * into the flow.
942 *
943 * @param[in, out] dev_flow
944 * Pointer to dev_flow structure.
945 * @param[in] item
946 * Item specification.
947 * @param[in] item_flags
948 * Parsed item flags.
949 */
950 static void
flow_verbs_translate_item_gre(struct mlx5_flow * dev_flow,const struct rte_flow_item * item __rte_unused,uint64_t item_flags)951 flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
952 const struct rte_flow_item *item __rte_unused,
953 uint64_t item_flags)
954 {
955 struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs;
956 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
957 unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
958 struct ibv_flow_spec_tunnel tunnel = {
959 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
960 .size = size,
961 };
962 #else
963 const struct rte_flow_item_gre *spec = item->spec;
964 const struct rte_flow_item_gre *mask = item->mask;
965 unsigned int size = sizeof(struct ibv_flow_spec_gre);
966 struct ibv_flow_spec_gre tunnel = {
967 .type = IBV_FLOW_SPEC_GRE,
968 .size = size,
969 };
970
971 if (!mask)
972 mask = &rte_flow_item_gre_mask;
973 if (spec) {
974 tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
975 tunnel.val.protocol = spec->protocol;
976 tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
977 tunnel.mask.protocol = mask->protocol;
978 /* Remove unwanted bits from values. */
979 tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
980 tunnel.val.protocol &= tunnel.mask.protocol;
981 tunnel.val.key &= tunnel.mask.key;
982 }
983 #endif
984 if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
985 flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
986 IBV_FLOW_SPEC_IPV4_EXT,
987 IPPROTO_GRE);
988 else
989 flow_verbs_item_gre_ip_protocol_update(&verbs->attr,
990 IBV_FLOW_SPEC_IPV6,
991 IPPROTO_GRE);
992 flow_verbs_spec_add(verbs, &tunnel, size);
993 }
994
995 /**
996 * Convert the @p action into a Verbs specification. This function assumes that
997 * the input is valid and that there is space to insert the requested action
998 * into the flow. This function also return the action that was added.
999 *
1000 * @param[in, out] dev_flow
1001 * Pointer to dev_flow structure.
1002 * @param[in] item
1003 * Item specification.
1004 * @param[in] item_flags
1005 * Parsed item flags.
1006 */
1007 static void
flow_verbs_translate_item_mpls(struct mlx5_flow * dev_flow __rte_unused,const struct rte_flow_item * item __rte_unused,uint64_t item_flags __rte_unused)1008 flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused,
1009 const struct rte_flow_item *item __rte_unused,
1010 uint64_t item_flags __rte_unused)
1011 {
1012 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1013 const struct rte_flow_item_mpls *spec = item->spec;
1014 const struct rte_flow_item_mpls *mask = item->mask;
1015 unsigned int size = sizeof(struct ibv_flow_spec_mpls);
1016 struct ibv_flow_spec_mpls mpls = {
1017 .type = IBV_FLOW_SPEC_MPLS,
1018 .size = size,
1019 };
1020
1021 if (!mask)
1022 mask = &rte_flow_item_mpls_mask;
1023 if (spec) {
1024 memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
1025 memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
1026 /* Remove unwanted bits from values. */
1027 mpls.val.label &= mpls.mask.label;
1028 }
1029 flow_verbs_spec_add(&dev_flow->verbs, &mpls, size);
1030 #endif
1031 }
1032
1033 /**
1034 * Convert the @p action into a Verbs specification. This function assumes that
1035 * the input is valid and that there is space to insert the requested action
1036 * into the flow.
1037 *
1038 * @param[in] dev_flow
1039 * Pointer to mlx5_flow.
1040 * @param[in] action
1041 * Action configuration.
1042 */
1043 static void
flow_verbs_translate_action_drop(struct mlx5_flow * dev_flow,const struct rte_flow_action * action __rte_unused)1044 flow_verbs_translate_action_drop
1045 (struct mlx5_flow *dev_flow,
1046 const struct rte_flow_action *action __rte_unused)
1047 {
1048 unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
1049 struct ibv_flow_spec_action_drop drop = {
1050 .type = IBV_FLOW_SPEC_ACTION_DROP,
1051 .size = size,
1052 };
1053
1054 flow_verbs_spec_add(&dev_flow->verbs, &drop, size);
1055 }
1056
1057 /**
1058 * Convert the @p action into a Verbs specification. This function assumes that
1059 * the input is valid and that there is space to insert the requested action
1060 * into the flow.
1061 *
1062 * @param[in] rss_desc
1063 * Pointer to mlx5_flow_rss_desc.
1064 * @param[in] action
1065 * Action configuration.
1066 */
1067 static void
flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc * rss_desc,const struct rte_flow_action * action)1068 flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc *rss_desc,
1069 const struct rte_flow_action *action)
1070 {
1071 const struct rte_flow_action_queue *queue = action->conf;
1072
1073 rss_desc->queue[0] = queue->index;
1074 rss_desc->queue_num = 1;
1075 }
1076
1077 /**
1078 * Convert the @p action into a Verbs specification. This function assumes that
1079 * the input is valid and that there is space to insert the requested action
1080 * into the flow.
1081 *
1082 * @param[in] rss_desc
1083 * Pointer to mlx5_flow_rss_desc.
1084 * @param[in] action
1085 * Action configuration.
1086 */
1087 static void
flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc * rss_desc,const struct rte_flow_action * action)1088 flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc *rss_desc,
1089 const struct rte_flow_action *action)
1090 {
1091 const struct rte_flow_action_rss *rss = action->conf;
1092 const uint8_t *rss_key;
1093
1094 memcpy(rss_desc->queue, rss->queue, rss->queue_num * sizeof(uint16_t));
1095 rss_desc->queue_num = rss->queue_num;
1096 /* NULL RSS key indicates default RSS key. */
1097 rss_key = !rss->key ? rss_hash_default_key : rss->key;
1098 memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
1099 /*
1100 * rss->level and rss.types should be set in advance when expanding
1101 * items for RSS.
1102 */
1103 }
1104
1105 /**
1106 * Convert the @p action into a Verbs specification. This function assumes that
1107 * the input is valid and that there is space to insert the requested action
1108 * into the flow.
1109 *
1110 * @param[in] dev_flow
1111 * Pointer to mlx5_flow.
1112 * @param[in] action
1113 * Action configuration.
1114 */
1115 static void
flow_verbs_translate_action_flag(struct mlx5_flow * dev_flow,const struct rte_flow_action * action __rte_unused)1116 flow_verbs_translate_action_flag
1117 (struct mlx5_flow *dev_flow,
1118 const struct rte_flow_action *action __rte_unused)
1119 {
1120 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1121 struct ibv_flow_spec_action_tag tag = {
1122 .type = IBV_FLOW_SPEC_ACTION_TAG,
1123 .size = size,
1124 .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
1125 };
1126
1127 flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1128 }
1129
1130 /**
1131 * Convert the @p action into a Verbs specification. This function assumes that
1132 * the input is valid and that there is space to insert the requested action
1133 * into the flow.
1134 *
1135 * @param[in] dev_flow
1136 * Pointer to mlx5_flow.
1137 * @param[in] action
1138 * Action configuration.
1139 */
1140 static void
flow_verbs_translate_action_mark(struct mlx5_flow * dev_flow,const struct rte_flow_action * action)1141 flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow,
1142 const struct rte_flow_action *action)
1143 {
1144 const struct rte_flow_action_mark *mark = action->conf;
1145 unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
1146 struct ibv_flow_spec_action_tag tag = {
1147 .type = IBV_FLOW_SPEC_ACTION_TAG,
1148 .size = size,
1149 .tag_id = mlx5_flow_mark_set(mark->id),
1150 };
1151
1152 flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
1153 }
1154
1155 /**
1156 * Convert the @p action into a Verbs specification. This function assumes that
1157 * the input is valid and that there is space to insert the requested action
1158 * into the flow.
1159 *
1160 * @param[in] dev
1161 * Pointer to the Ethernet device structure.
1162 * @param[in] action
1163 * Action configuration.
1164 * @param[in] dev_flow
1165 * Pointer to mlx5_flow.
1166 * @param[out] error
1167 * Pointer to error structure.
1168 *
1169 * @return
1170 * 0 On success else a negative errno value is returned and rte_errno is set.
1171 */
1172 static int
flow_verbs_translate_action_count(struct mlx5_flow * dev_flow,const struct rte_flow_action * action,struct rte_eth_dev * dev,struct rte_flow_error * error)1173 flow_verbs_translate_action_count(struct mlx5_flow *dev_flow,
1174 const struct rte_flow_action *action,
1175 struct rte_eth_dev *dev,
1176 struct rte_flow_error *error)
1177 {
1178 const struct rte_flow_action_count *count = action->conf;
1179 struct rte_flow *flow = dev_flow->flow;
1180 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1181 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1182 struct mlx5_flow_counter_pool *pool;
1183 struct mlx5_flow_counter *cnt = NULL;
1184 unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
1185 struct ibv_flow_spec_counter_action counter = {
1186 .type = IBV_FLOW_SPEC_ACTION_COUNT,
1187 .size = size,
1188 };
1189 #endif
1190
1191 if (!flow->counter) {
1192 flow->counter = flow_verbs_counter_new(dev, count->shared,
1193 count->id);
1194 if (!flow->counter)
1195 return rte_flow_error_set(error, rte_errno,
1196 RTE_FLOW_ERROR_TYPE_ACTION,
1197 action,
1198 "cannot get counter"
1199 " context.");
1200 }
1201 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
1202 cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1203 counter.counter_set_handle =
1204 ((struct ibv_counter_set *)cnt->dcs_when_active)->handle;
1205 flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1206 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1207 cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool);
1208 counter.counters = (struct ibv_counters *)cnt->dcs_when_active;
1209 flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
1210 #endif
1211 return 0;
1212 }
1213
1214 /**
1215 * Internal validation function. For validating both actions and items.
1216 *
1217 * @param[in] dev
1218 * Pointer to the Ethernet device structure.
1219 * @param[in] attr
1220 * Pointer to the flow attributes.
1221 * @param[in] items
1222 * Pointer to the list of items.
1223 * @param[in] actions
1224 * Pointer to the list of actions.
1225 * @param[in] external
1226 * This flow rule is created by request external to PMD.
1227 * @param[in] hairpin
1228 * Number of hairpin TX actions, 0 means classic flow.
1229 * @param[out] error
1230 * Pointer to the error structure.
1231 *
1232 * @return
1233 * 0 on success, a negative errno value otherwise and rte_errno is set.
1234 */
1235 static int
flow_verbs_validate(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item items[],const struct rte_flow_action actions[],bool external __rte_unused,int hairpin __rte_unused,struct rte_flow_error * error)1236 flow_verbs_validate(struct rte_eth_dev *dev,
1237 const struct rte_flow_attr *attr,
1238 const struct rte_flow_item items[],
1239 const struct rte_flow_action actions[],
1240 bool external __rte_unused,
1241 int hairpin __rte_unused,
1242 struct rte_flow_error *error)
1243 {
1244 int ret;
1245 uint64_t action_flags = 0;
1246 uint64_t item_flags = 0;
1247 uint64_t last_item = 0;
1248 uint8_t next_protocol = 0xff;
1249 uint16_t ether_type = 0;
1250
1251 if (items == NULL)
1252 return -1;
1253 ret = mlx5_flow_validate_attributes(dev, attr, error);
1254 if (ret < 0)
1255 return ret;
1256 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1257 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1258 int ret = 0;
1259
1260 switch (items->type) {
1261 case RTE_FLOW_ITEM_TYPE_VOID:
1262 break;
1263 case RTE_FLOW_ITEM_TYPE_ETH:
1264 ret = mlx5_flow_validate_item_eth(items, item_flags,
1265 false, error);
1266 if (ret < 0)
1267 return ret;
1268 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1269 MLX5_FLOW_LAYER_OUTER_L2;
1270 if (items->mask != NULL && items->spec != NULL) {
1271 ether_type =
1272 ((const struct rte_flow_item_eth *)
1273 items->spec)->type;
1274 ether_type &=
1275 ((const struct rte_flow_item_eth *)
1276 items->mask)->type;
1277 ether_type = rte_be_to_cpu_16(ether_type);
1278 } else {
1279 ether_type = 0;
1280 }
1281 break;
1282 case RTE_FLOW_ITEM_TYPE_VLAN:
1283 ret = mlx5_flow_validate_item_vlan(items, item_flags,
1284 dev, error);
1285 if (ret < 0)
1286 return ret;
1287 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1288 MLX5_FLOW_LAYER_INNER_VLAN) :
1289 (MLX5_FLOW_LAYER_OUTER_L2 |
1290 MLX5_FLOW_LAYER_OUTER_VLAN);
1291 if (items->mask != NULL && items->spec != NULL) {
1292 ether_type =
1293 ((const struct rte_flow_item_vlan *)
1294 items->spec)->inner_type;
1295 ether_type &=
1296 ((const struct rte_flow_item_vlan *)
1297 items->mask)->inner_type;
1298 ether_type = rte_be_to_cpu_16(ether_type);
1299 } else {
1300 ether_type = 0;
1301 }
1302 break;
1303 case RTE_FLOW_ITEM_TYPE_IPV4:
1304 ret = mlx5_flow_validate_item_ipv4
1305 (items, item_flags,
1306 last_item, ether_type, NULL,
1307 MLX5_ITEM_RANGE_NOT_ACCEPTED,
1308 error);
1309 if (ret < 0)
1310 return ret;
1311 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1312 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1313 if (items->mask != NULL &&
1314 ((const struct rte_flow_item_ipv4 *)
1315 items->mask)->hdr.next_proto_id) {
1316 next_protocol =
1317 ((const struct rte_flow_item_ipv4 *)
1318 (items->spec))->hdr.next_proto_id;
1319 next_protocol &=
1320 ((const struct rte_flow_item_ipv4 *)
1321 (items->mask))->hdr.next_proto_id;
1322 } else {
1323 /* Reset for inner layer. */
1324 next_protocol = 0xff;
1325 }
1326 break;
1327 case RTE_FLOW_ITEM_TYPE_IPV6:
1328 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1329 last_item,
1330 ether_type, NULL,
1331 error);
1332 if (ret < 0)
1333 return ret;
1334 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1335 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1336 if (items->mask != NULL &&
1337 ((const struct rte_flow_item_ipv6 *)
1338 items->mask)->hdr.proto) {
1339 next_protocol =
1340 ((const struct rte_flow_item_ipv6 *)
1341 items->spec)->hdr.proto;
1342 next_protocol &=
1343 ((const struct rte_flow_item_ipv6 *)
1344 items->mask)->hdr.proto;
1345 } else {
1346 /* Reset for inner layer. */
1347 next_protocol = 0xff;
1348 }
1349 break;
1350 case RTE_FLOW_ITEM_TYPE_UDP:
1351 ret = mlx5_flow_validate_item_udp(items, item_flags,
1352 next_protocol,
1353 error);
1354 if (ret < 0)
1355 return ret;
1356 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1357 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1358 break;
1359 case RTE_FLOW_ITEM_TYPE_TCP:
1360 ret = mlx5_flow_validate_item_tcp
1361 (items, item_flags,
1362 next_protocol,
1363 &rte_flow_item_tcp_mask,
1364 error);
1365 if (ret < 0)
1366 return ret;
1367 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1368 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1369 break;
1370 case RTE_FLOW_ITEM_TYPE_VXLAN:
1371 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1372 error);
1373 if (ret < 0)
1374 return ret;
1375 last_item = MLX5_FLOW_LAYER_VXLAN;
1376 break;
1377 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1378 ret = mlx5_flow_validate_item_vxlan_gpe(items,
1379 item_flags,
1380 dev, error);
1381 if (ret < 0)
1382 return ret;
1383 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1384 break;
1385 case RTE_FLOW_ITEM_TYPE_GRE:
1386 ret = mlx5_flow_validate_item_gre(items, item_flags,
1387 next_protocol, error);
1388 if (ret < 0)
1389 return ret;
1390 last_item = MLX5_FLOW_LAYER_GRE;
1391 break;
1392 case RTE_FLOW_ITEM_TYPE_MPLS:
1393 ret = mlx5_flow_validate_item_mpls(dev, items,
1394 item_flags,
1395 last_item, error);
1396 if (ret < 0)
1397 return ret;
1398 last_item = MLX5_FLOW_LAYER_MPLS;
1399 break;
1400 case RTE_FLOW_ITEM_TYPE_ICMP:
1401 case RTE_FLOW_ITEM_TYPE_ICMP6:
1402 return rte_flow_error_set(error, ENOTSUP,
1403 RTE_FLOW_ERROR_TYPE_ITEM,
1404 NULL, "ICMP/ICMP6 "
1405 "item not supported");
1406 default:
1407 return rte_flow_error_set(error, ENOTSUP,
1408 RTE_FLOW_ERROR_TYPE_ITEM,
1409 NULL, "item not supported");
1410 }
1411 item_flags |= last_item;
1412 }
1413 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1414 switch (actions->type) {
1415 case RTE_FLOW_ACTION_TYPE_VOID:
1416 break;
1417 case RTE_FLOW_ACTION_TYPE_FLAG:
1418 ret = mlx5_flow_validate_action_flag(action_flags,
1419 attr,
1420 error);
1421 if (ret < 0)
1422 return ret;
1423 action_flags |= MLX5_FLOW_ACTION_FLAG;
1424 break;
1425 case RTE_FLOW_ACTION_TYPE_MARK:
1426 ret = mlx5_flow_validate_action_mark(actions,
1427 action_flags,
1428 attr,
1429 error);
1430 if (ret < 0)
1431 return ret;
1432 action_flags |= MLX5_FLOW_ACTION_MARK;
1433 break;
1434 case RTE_FLOW_ACTION_TYPE_DROP:
1435 ret = mlx5_flow_validate_action_drop(action_flags,
1436 attr,
1437 error);
1438 if (ret < 0)
1439 return ret;
1440 action_flags |= MLX5_FLOW_ACTION_DROP;
1441 break;
1442 case RTE_FLOW_ACTION_TYPE_QUEUE:
1443 ret = mlx5_flow_validate_action_queue(actions,
1444 action_flags, dev,
1445 attr,
1446 error);
1447 if (ret < 0)
1448 return ret;
1449 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1450 break;
1451 case RTE_FLOW_ACTION_TYPE_RSS:
1452 ret = mlx5_flow_validate_action_rss(actions,
1453 action_flags, dev,
1454 attr, item_flags,
1455 error);
1456 if (ret < 0)
1457 return ret;
1458 action_flags |= MLX5_FLOW_ACTION_RSS;
1459 break;
1460 case RTE_FLOW_ACTION_TYPE_COUNT:
1461 ret = mlx5_flow_validate_action_count(dev, attr, error);
1462 if (ret < 0)
1463 return ret;
1464 action_flags |= MLX5_FLOW_ACTION_COUNT;
1465 break;
1466 default:
1467 return rte_flow_error_set(error, ENOTSUP,
1468 RTE_FLOW_ERROR_TYPE_ACTION,
1469 actions,
1470 "action not supported");
1471 }
1472 }
1473 /*
1474 * Validate the drop action mutual exclusion with other actions.
1475 * Drop action is mutually-exclusive with any other action, except for
1476 * Count action.
1477 */
1478 if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
1479 (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
1480 return rte_flow_error_set(error, EINVAL,
1481 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1482 "Drop action is mutually-exclusive "
1483 "with any other action, except for "
1484 "Count action");
1485 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1486 return rte_flow_error_set(error, EINVAL,
1487 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1488 "no fate action is found");
1489 return 0;
1490 }
1491
1492 /**
1493 * Calculate the required bytes that are needed for the action part of the verbs
1494 * flow.
1495 *
1496 * @param[in] actions
1497 * Pointer to the list of actions.
1498 *
1499 * @return
1500 * The size of the memory needed for all actions.
1501 */
1502 static int
flow_verbs_get_actions_size(const struct rte_flow_action actions[])1503 flow_verbs_get_actions_size(const struct rte_flow_action actions[])
1504 {
1505 int size = 0;
1506
1507 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1508 switch (actions->type) {
1509 case RTE_FLOW_ACTION_TYPE_VOID:
1510 break;
1511 case RTE_FLOW_ACTION_TYPE_FLAG:
1512 size += sizeof(struct ibv_flow_spec_action_tag);
1513 break;
1514 case RTE_FLOW_ACTION_TYPE_MARK:
1515 size += sizeof(struct ibv_flow_spec_action_tag);
1516 break;
1517 case RTE_FLOW_ACTION_TYPE_DROP:
1518 size += sizeof(struct ibv_flow_spec_action_drop);
1519 break;
1520 case RTE_FLOW_ACTION_TYPE_QUEUE:
1521 break;
1522 case RTE_FLOW_ACTION_TYPE_RSS:
1523 break;
1524 case RTE_FLOW_ACTION_TYPE_COUNT:
1525 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1526 defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1527 size += sizeof(struct ibv_flow_spec_counter_action);
1528 #endif
1529 break;
1530 default:
1531 break;
1532 }
1533 }
1534 return size;
1535 }
1536
1537 /**
1538 * Calculate the required bytes that are needed for the item part of the verbs
1539 * flow.
1540 *
1541 * @param[in] items
1542 * Pointer to the list of items.
1543 *
1544 * @return
1545 * The size of the memory needed for all items.
1546 */
1547 static int
flow_verbs_get_items_size(const struct rte_flow_item items[])1548 flow_verbs_get_items_size(const struct rte_flow_item items[])
1549 {
1550 int size = 0;
1551
1552 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1553 switch (items->type) {
1554 case RTE_FLOW_ITEM_TYPE_VOID:
1555 break;
1556 case RTE_FLOW_ITEM_TYPE_ETH:
1557 size += sizeof(struct ibv_flow_spec_eth);
1558 break;
1559 case RTE_FLOW_ITEM_TYPE_VLAN:
1560 size += sizeof(struct ibv_flow_spec_eth);
1561 break;
1562 case RTE_FLOW_ITEM_TYPE_IPV4:
1563 size += sizeof(struct ibv_flow_spec_ipv4_ext);
1564 break;
1565 case RTE_FLOW_ITEM_TYPE_IPV6:
1566 size += sizeof(struct ibv_flow_spec_ipv6);
1567 break;
1568 case RTE_FLOW_ITEM_TYPE_UDP:
1569 size += sizeof(struct ibv_flow_spec_tcp_udp);
1570 break;
1571 case RTE_FLOW_ITEM_TYPE_TCP:
1572 size += sizeof(struct ibv_flow_spec_tcp_udp);
1573 break;
1574 case RTE_FLOW_ITEM_TYPE_VXLAN:
1575 size += sizeof(struct ibv_flow_spec_tunnel);
1576 break;
1577 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1578 size += sizeof(struct ibv_flow_spec_tunnel);
1579 break;
1580 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1581 case RTE_FLOW_ITEM_TYPE_GRE:
1582 size += sizeof(struct ibv_flow_spec_gre);
1583 break;
1584 case RTE_FLOW_ITEM_TYPE_MPLS:
1585 size += sizeof(struct ibv_flow_spec_mpls);
1586 break;
1587 #else
1588 case RTE_FLOW_ITEM_TYPE_GRE:
1589 size += sizeof(struct ibv_flow_spec_tunnel);
1590 break;
1591 #endif
1592 default:
1593 break;
1594 }
1595 }
1596 return size;
1597 }
1598
1599 /**
1600 * Internal preparation function. Allocate mlx5_flow with the required size.
1601 * The required size is calculate based on the actions and items. This function
1602 * also returns the detected actions and items for later use.
1603 *
1604 * @param[in] dev
1605 * Pointer to Ethernet device.
1606 * @param[in] attr
1607 * Pointer to the flow attributes.
1608 * @param[in] items
1609 * Pointer to the list of items.
1610 * @param[in] actions
1611 * Pointer to the list of actions.
1612 * @param[out] error
1613 * Pointer to the error structure.
1614 *
1615 * @return
1616 * Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1617 * is set.
1618 */
1619 static struct mlx5_flow *
flow_verbs_prepare(struct rte_eth_dev * dev,const struct rte_flow_attr * attr __rte_unused,const struct rte_flow_item items[],const struct rte_flow_action actions[],struct rte_flow_error * error)1620 flow_verbs_prepare(struct rte_eth_dev *dev,
1621 const struct rte_flow_attr *attr __rte_unused,
1622 const struct rte_flow_item items[],
1623 const struct rte_flow_action actions[],
1624 struct rte_flow_error *error)
1625 {
1626 size_t size = 0;
1627 uint32_t handle_idx = 0;
1628 struct mlx5_flow *dev_flow;
1629 struct mlx5_flow_handle *dev_handle;
1630 struct mlx5_priv *priv = dev->data->dev_private;
1631 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1632
1633 MLX5_ASSERT(wks);
1634 size += flow_verbs_get_actions_size(actions);
1635 size += flow_verbs_get_items_size(items);
1636 if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) {
1637 rte_flow_error_set(error, E2BIG,
1638 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1639 "Verbs spec/action size too large");
1640 return NULL;
1641 }
1642 /* In case of corrupting the memory. */
1643 if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
1644 rte_flow_error_set(error, ENOSPC,
1645 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1646 "not free temporary device flow");
1647 return NULL;
1648 }
1649 dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1650 &handle_idx);
1651 if (!dev_handle) {
1652 rte_flow_error_set(error, ENOMEM,
1653 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1654 "not enough memory to create flow handle");
1655 return NULL;
1656 }
1657 MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
1658 dev_flow = &wks->flows[wks->flow_idx++];
1659 dev_flow->handle = dev_handle;
1660 dev_flow->handle_idx = handle_idx;
1661 /* Memcpy is used, only size needs to be cleared to 0. */
1662 dev_flow->verbs.size = 0;
1663 dev_flow->verbs.attr.num_of_specs = 0;
1664 dev_flow->ingress = attr->ingress;
1665 dev_flow->hash_fields = 0;
1666 /* Need to set transfer attribute: not supported in Verbs mode. */
1667 return dev_flow;
1668 }
1669
1670 /**
1671 * Fill the flow with verb spec.
1672 *
1673 * @param[in] dev
1674 * Pointer to Ethernet device.
1675 * @param[in, out] dev_flow
1676 * Pointer to the mlx5 flow.
1677 * @param[in] attr
1678 * Pointer to the flow attributes.
1679 * @param[in] items
1680 * Pointer to the list of items.
1681 * @param[in] actions
1682 * Pointer to the list of actions.
1683 * @param[out] error
1684 * Pointer to the error structure.
1685 *
1686 * @return
1687 * 0 on success, else a negative errno value otherwise and rte_errno is set.
1688 */
1689 static int
flow_verbs_translate(struct rte_eth_dev * dev,struct mlx5_flow * dev_flow,const struct rte_flow_attr * attr,const struct rte_flow_item items[],const struct rte_flow_action actions[],struct rte_flow_error * error)1690 flow_verbs_translate(struct rte_eth_dev *dev,
1691 struct mlx5_flow *dev_flow,
1692 const struct rte_flow_attr *attr,
1693 const struct rte_flow_item items[],
1694 const struct rte_flow_action actions[],
1695 struct rte_flow_error *error)
1696 {
1697 uint64_t item_flags = 0;
1698 uint64_t action_flags = 0;
1699 uint64_t priority = attr->priority;
1700 uint32_t subpriority = 0;
1701 struct mlx5_priv *priv = dev->data->dev_private;
1702 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1703 struct mlx5_flow_rss_desc *rss_desc;
1704
1705 MLX5_ASSERT(wks);
1706 rss_desc = &wks->rss_desc;
1707 if (priority == MLX5_FLOW_PRIO_RSVD)
1708 priority = priv->config.flow_prio - 1;
1709 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1710 int ret;
1711
1712 switch (actions->type) {
1713 case RTE_FLOW_ACTION_TYPE_VOID:
1714 break;
1715 case RTE_FLOW_ACTION_TYPE_FLAG:
1716 flow_verbs_translate_action_flag(dev_flow, actions);
1717 action_flags |= MLX5_FLOW_ACTION_FLAG;
1718 dev_flow->handle->mark = 1;
1719 break;
1720 case RTE_FLOW_ACTION_TYPE_MARK:
1721 flow_verbs_translate_action_mark(dev_flow, actions);
1722 action_flags |= MLX5_FLOW_ACTION_MARK;
1723 dev_flow->handle->mark = 1;
1724 break;
1725 case RTE_FLOW_ACTION_TYPE_DROP:
1726 flow_verbs_translate_action_drop(dev_flow, actions);
1727 action_flags |= MLX5_FLOW_ACTION_DROP;
1728 dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
1729 break;
1730 case RTE_FLOW_ACTION_TYPE_QUEUE:
1731 flow_verbs_translate_action_queue(rss_desc, actions);
1732 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1733 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1734 break;
1735 case RTE_FLOW_ACTION_TYPE_RSS:
1736 flow_verbs_translate_action_rss(rss_desc, actions);
1737 action_flags |= MLX5_FLOW_ACTION_RSS;
1738 dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
1739 break;
1740 case RTE_FLOW_ACTION_TYPE_COUNT:
1741 ret = flow_verbs_translate_action_count(dev_flow,
1742 actions,
1743 dev, error);
1744 if (ret < 0)
1745 return ret;
1746 action_flags |= MLX5_FLOW_ACTION_COUNT;
1747 break;
1748 default:
1749 return rte_flow_error_set(error, ENOTSUP,
1750 RTE_FLOW_ERROR_TYPE_ACTION,
1751 actions,
1752 "action not supported");
1753 }
1754 }
1755 dev_flow->act_flags = action_flags;
1756 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1757 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1758
1759 switch (items->type) {
1760 case RTE_FLOW_ITEM_TYPE_VOID:
1761 break;
1762 case RTE_FLOW_ITEM_TYPE_ETH:
1763 flow_verbs_translate_item_eth(dev_flow, items,
1764 item_flags);
1765 subpriority = MLX5_PRIORITY_MAP_L2;
1766 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1767 MLX5_FLOW_LAYER_OUTER_L2;
1768 break;
1769 case RTE_FLOW_ITEM_TYPE_VLAN:
1770 flow_verbs_translate_item_vlan(dev_flow, items,
1771 item_flags);
1772 subpriority = MLX5_PRIORITY_MAP_L2;
1773 item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1774 MLX5_FLOW_LAYER_INNER_VLAN) :
1775 (MLX5_FLOW_LAYER_OUTER_L2 |
1776 MLX5_FLOW_LAYER_OUTER_VLAN);
1777 break;
1778 case RTE_FLOW_ITEM_TYPE_IPV4:
1779 flow_verbs_translate_item_ipv4(dev_flow, items,
1780 item_flags);
1781 subpriority = MLX5_PRIORITY_MAP_L3;
1782 dev_flow->hash_fields |=
1783 mlx5_flow_hashfields_adjust
1784 (rss_desc, tunnel,
1785 MLX5_IPV4_LAYER_TYPES,
1786 MLX5_IPV4_IBV_RX_HASH);
1787 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1788 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1789 break;
1790 case RTE_FLOW_ITEM_TYPE_IPV6:
1791 flow_verbs_translate_item_ipv6(dev_flow, items,
1792 item_flags);
1793 subpriority = MLX5_PRIORITY_MAP_L3;
1794 dev_flow->hash_fields |=
1795 mlx5_flow_hashfields_adjust
1796 (rss_desc, tunnel,
1797 MLX5_IPV6_LAYER_TYPES,
1798 MLX5_IPV6_IBV_RX_HASH);
1799 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1800 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1801 break;
1802 case RTE_FLOW_ITEM_TYPE_TCP:
1803 flow_verbs_translate_item_tcp(dev_flow, items,
1804 item_flags);
1805 subpriority = MLX5_PRIORITY_MAP_L4;
1806 dev_flow->hash_fields |=
1807 mlx5_flow_hashfields_adjust
1808 (rss_desc, tunnel, ETH_RSS_TCP,
1809 (IBV_RX_HASH_SRC_PORT_TCP |
1810 IBV_RX_HASH_DST_PORT_TCP));
1811 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1812 MLX5_FLOW_LAYER_OUTER_L4_TCP;
1813 break;
1814 case RTE_FLOW_ITEM_TYPE_UDP:
1815 flow_verbs_translate_item_udp(dev_flow, items,
1816 item_flags);
1817 subpriority = MLX5_PRIORITY_MAP_L4;
1818 dev_flow->hash_fields |=
1819 mlx5_flow_hashfields_adjust
1820 (rss_desc, tunnel, ETH_RSS_UDP,
1821 (IBV_RX_HASH_SRC_PORT_UDP |
1822 IBV_RX_HASH_DST_PORT_UDP));
1823 item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1824 MLX5_FLOW_LAYER_OUTER_L4_UDP;
1825 break;
1826 case RTE_FLOW_ITEM_TYPE_VXLAN:
1827 flow_verbs_translate_item_vxlan(dev_flow, items,
1828 item_flags);
1829 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1830 item_flags |= MLX5_FLOW_LAYER_VXLAN;
1831 break;
1832 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1833 flow_verbs_translate_item_vxlan_gpe(dev_flow, items,
1834 item_flags);
1835 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1836 item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1837 break;
1838 case RTE_FLOW_ITEM_TYPE_GRE:
1839 flow_verbs_translate_item_gre(dev_flow, items,
1840 item_flags);
1841 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1842 item_flags |= MLX5_FLOW_LAYER_GRE;
1843 break;
1844 case RTE_FLOW_ITEM_TYPE_MPLS:
1845 flow_verbs_translate_item_mpls(dev_flow, items,
1846 item_flags);
1847 subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc);
1848 item_flags |= MLX5_FLOW_LAYER_MPLS;
1849 break;
1850 default:
1851 return rte_flow_error_set(error, ENOTSUP,
1852 RTE_FLOW_ERROR_TYPE_ITEM,
1853 NULL, "item not supported");
1854 }
1855 }
1856 dev_flow->handle->layers = item_flags;
1857 /* Other members of attr will be ignored. */
1858 dev_flow->verbs.attr.priority =
1859 mlx5_flow_adjust_priority(dev, priority, subpriority);
1860 dev_flow->verbs.attr.port = (uint8_t)priv->dev_port;
1861 return 0;
1862 }
1863
1864 /**
1865 * Remove the flow from the NIC but keeps it in memory.
1866 *
1867 * @param[in] dev
1868 * Pointer to the Ethernet device structure.
1869 * @param[in, out] flow
1870 * Pointer to flow structure.
1871 */
1872 static void
flow_verbs_remove(struct rte_eth_dev * dev,struct rte_flow * flow)1873 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1874 {
1875 struct mlx5_priv *priv = dev->data->dev_private;
1876 struct mlx5_flow_handle *handle;
1877 uint32_t handle_idx;
1878
1879 if (!flow)
1880 return;
1881 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1882 handle_idx, handle, next) {
1883 if (handle->drv_flow) {
1884 claim_zero(mlx5_glue->destroy_flow(handle->drv_flow));
1885 handle->drv_flow = NULL;
1886 }
1887 /* hrxq is union, don't touch it only the flag is set. */
1888 if (handle->rix_hrxq &&
1889 handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
1890 mlx5_hrxq_release(dev, handle->rix_hrxq);
1891 handle->rix_hrxq = 0;
1892 }
1893 if (handle->vf_vlan.tag && handle->vf_vlan.created)
1894 mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
1895 }
1896 }
1897
1898 /**
1899 * Remove the flow from the NIC and the memory.
1900 *
1901 * @param[in] dev
1902 * Pointer to the Ethernet device structure.
1903 * @param[in, out] flow
1904 * Pointer to flow structure.
1905 */
1906 static void
flow_verbs_destroy(struct rte_eth_dev * dev,struct rte_flow * flow)1907 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1908 {
1909 struct mlx5_priv *priv = dev->data->dev_private;
1910 struct mlx5_flow_handle *handle;
1911
1912 if (!flow)
1913 return;
1914 flow_verbs_remove(dev, flow);
1915 while (flow->dev_handles) {
1916 uint32_t tmp_idx = flow->dev_handles;
1917
1918 handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1919 tmp_idx);
1920 if (!handle)
1921 return;
1922 flow->dev_handles = handle->next.next;
1923 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
1924 tmp_idx);
1925 }
1926 if (flow->counter) {
1927 flow_verbs_counter_release(dev, flow->counter);
1928 flow->counter = 0;
1929 }
1930 }
1931
1932 /**
1933 * Apply the flow to the NIC.
1934 *
1935 * @param[in] dev
1936 * Pointer to the Ethernet device structure.
1937 * @param[in, out] flow
1938 * Pointer to flow structure.
1939 * @param[out] error
1940 * Pointer to error structure.
1941 *
1942 * @return
1943 * 0 on success, a negative errno value otherwise and rte_errno is set.
1944 */
1945 static int
flow_verbs_apply(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_error * error)1946 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1947 struct rte_flow_error *error)
1948 {
1949 struct mlx5_priv *priv = dev->data->dev_private;
1950 struct mlx5_flow_handle *handle;
1951 struct mlx5_flow *dev_flow;
1952 struct mlx5_hrxq *hrxq;
1953 uint32_t dev_handles;
1954 int err;
1955 int idx;
1956 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
1957
1958 MLX5_ASSERT(wks);
1959 for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
1960 dev_flow = &wks->flows[idx];
1961 handle = dev_flow->handle;
1962 if (handle->fate_action == MLX5_FLOW_FATE_DROP) {
1963 MLX5_ASSERT(priv->drop_queue.hrxq);
1964 hrxq = priv->drop_queue.hrxq;
1965 } else {
1966 uint32_t hrxq_idx;
1967 struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
1968
1969 MLX5_ASSERT(rss_desc->queue_num);
1970 rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
1971 rss_desc->hash_fields = dev_flow->hash_fields;
1972 rss_desc->tunnel = !!(handle->layers &
1973 MLX5_FLOW_LAYER_TUNNEL);
1974 rss_desc->shared_rss = 0;
1975 hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
1976 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1977 hrxq_idx);
1978 if (!hrxq) {
1979 rte_flow_error_set
1980 (error, rte_errno,
1981 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1982 "cannot get hash queue");
1983 goto error;
1984 }
1985 handle->rix_hrxq = hrxq_idx;
1986 }
1987 MLX5_ASSERT(hrxq);
1988 handle->drv_flow = mlx5_glue->create_flow
1989 (hrxq->qp, &dev_flow->verbs.attr);
1990 if (!handle->drv_flow) {
1991 rte_flow_error_set(error, errno,
1992 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1993 NULL,
1994 "hardware refuses to create flow");
1995 goto error;
1996 }
1997 if (priv->vmwa_context &&
1998 handle->vf_vlan.tag && !handle->vf_vlan.created) {
1999 /*
2000 * The rule contains the VLAN pattern.
2001 * For VF we are going to create VLAN
2002 * interface to make hypervisor set correct
2003 * e-Switch vport context.
2004 */
2005 mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan);
2006 }
2007 }
2008 return 0;
2009 error:
2010 err = rte_errno; /* Save rte_errno before cleanup. */
2011 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2012 dev_handles, handle, next) {
2013 /* hrxq is union, don't touch it only the flag is set. */
2014 if (handle->rix_hrxq &&
2015 handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
2016 mlx5_hrxq_release(dev, handle->rix_hrxq);
2017 handle->rix_hrxq = 0;
2018 }
2019 if (handle->vf_vlan.tag && handle->vf_vlan.created)
2020 mlx5_vlan_vmwa_release(dev, &handle->vf_vlan);
2021 }
2022 rte_errno = err; /* Restore rte_errno. */
2023 return -rte_errno;
2024 }
2025
2026 /**
2027 * Query a flow.
2028 *
2029 * @see rte_flow_query()
2030 * @see rte_flow_ops
2031 */
2032 static int
flow_verbs_query(struct rte_eth_dev * dev,struct rte_flow * flow,const struct rte_flow_action * actions,void * data,struct rte_flow_error * error)2033 flow_verbs_query(struct rte_eth_dev *dev,
2034 struct rte_flow *flow,
2035 const struct rte_flow_action *actions,
2036 void *data,
2037 struct rte_flow_error *error)
2038 {
2039 int ret = -EINVAL;
2040
2041 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2042 switch (actions->type) {
2043 case RTE_FLOW_ACTION_TYPE_VOID:
2044 break;
2045 case RTE_FLOW_ACTION_TYPE_COUNT:
2046 ret = flow_verbs_counter_query(dev, flow, data, error);
2047 break;
2048 default:
2049 return rte_flow_error_set(error, ENOTSUP,
2050 RTE_FLOW_ERROR_TYPE_ACTION,
2051 actions,
2052 "action not supported");
2053 }
2054 }
2055 return ret;
2056 }
2057
2058 static int
flow_verbs_sync_domain(struct rte_eth_dev * dev,uint32_t domains,uint32_t flags)2059 flow_verbs_sync_domain(struct rte_eth_dev *dev, uint32_t domains,
2060 uint32_t flags)
2061 {
2062 RTE_SET_USED(dev);
2063 RTE_SET_USED(domains);
2064 RTE_SET_USED(flags);
2065
2066 return 0;
2067 }
2068
2069 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
2070 .validate = flow_verbs_validate,
2071 .prepare = flow_verbs_prepare,
2072 .translate = flow_verbs_translate,
2073 .apply = flow_verbs_apply,
2074 .remove = flow_verbs_remove,
2075 .destroy = flow_verbs_destroy,
2076 .query = flow_verbs_query,
2077 .sync_domain = flow_verbs_sync_domain,
2078 };
2079