1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Marvell International Ltd.
3 * Copyright(c) 2018 Semihalf.
4 * All rights reserved.
5 */
6
7 #include <rte_flow.h>
8 #include <rte_flow_driver.h>
9 #include <rte_malloc.h>
10 #include <rte_log.h>
11
12 #include <arpa/inet.h>
13
14 #include "mrvl_flow.h"
15 #include "mrvl_qos.h"
16
17 /** Number of rules in the classifier table. */
18 #define MRVL_CLS_MAX_NUM_RULES 20
19
20 /** Size of the classifier key and mask strings. */
21 #define MRVL_CLS_STR_SIZE_MAX 40
22
23 #define MRVL_VLAN_ID_MASK 0x0fff
24 #define MRVL_VLAN_PRI_MASK 0x7000
25 #define MRVL_IPV4_DSCP_MASK 0xfc
26 #define MRVL_IPV4_ADDR_MASK 0xffffffff
27 #define MRVL_IPV6_FLOW_MASK 0x0fffff
28
29 /**
30 * Allocate memory for classifier rule key and mask fields.
31 *
32 * @param field Pointer to the classifier rule.
33 * @returns 0 in case of success, negative value otherwise.
34 */
35 static int
mrvl_alloc_key_mask(struct pp2_cls_rule_key_field * field)36 mrvl_alloc_key_mask(struct pp2_cls_rule_key_field *field)
37 {
38 unsigned int id = rte_socket_id();
39
40 field->key = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
41 if (!field->key)
42 goto out;
43
44 field->mask = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
45 if (!field->mask)
46 goto out_mask;
47
48 return 0;
49 out_mask:
50 rte_free(field->key);
51 out:
52 field->key = NULL;
53 field->mask = NULL;
54 return -1;
55 }
56
57 /**
58 * Free memory allocated for classifier rule key and mask fields.
59 *
60 * @param field Pointer to the classifier rule.
61 */
62 static void
mrvl_free_key_mask(struct pp2_cls_rule_key_field * field)63 mrvl_free_key_mask(struct pp2_cls_rule_key_field *field)
64 {
65 rte_free(field->key);
66 rte_free(field->mask);
67 field->key = NULL;
68 field->mask = NULL;
69 }
70
71 /**
72 * Free memory allocated for all classifier rule key and mask fields.
73 *
74 * @param rule Pointer to the classifier table rule.
75 */
76 static void
mrvl_free_all_key_mask(struct pp2_cls_tbl_rule * rule)77 mrvl_free_all_key_mask(struct pp2_cls_tbl_rule *rule)
78 {
79 int i;
80
81 for (i = 0; i < rule->num_fields; i++)
82 mrvl_free_key_mask(&rule->fields[i]);
83 rule->num_fields = 0;
84 }
85
86 /*
87 * Initialize rte flow item parsing.
88 *
89 * @param item Pointer to the flow item.
90 * @param spec_ptr Pointer to the specific item pointer.
91 * @param mask_ptr Pointer to the specific item's mask pointer.
92 * @def_mask Pointer to the default mask.
93 * @size Size of the flow item.
94 * @error Pointer to the rte flow error.
95 * @returns 0 in case of success, negative value otherwise.
96 */
97 static int
mrvl_parse_init(const struct rte_flow_item * item,const void ** spec_ptr,const void ** mask_ptr,const void * def_mask,unsigned int size,struct rte_flow_error * error)98 mrvl_parse_init(const struct rte_flow_item *item,
99 const void **spec_ptr,
100 const void **mask_ptr,
101 const void *def_mask,
102 unsigned int size,
103 struct rte_flow_error *error)
104 {
105 const uint8_t *spec;
106 const uint8_t *mask;
107 const uint8_t *last;
108 uint8_t zeros[size];
109
110 memset(zeros, 0, size);
111
112 if (item == NULL) {
113 rte_flow_error_set(error, EINVAL,
114 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
115 "NULL item\n");
116 return -rte_errno;
117 }
118
119 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
120 rte_flow_error_set(error, EINVAL,
121 RTE_FLOW_ERROR_TYPE_ITEM, item,
122 "Mask or last is set without spec\n");
123 return -rte_errno;
124 }
125
126 /*
127 * If "mask" is not set, default mask is used,
128 * but if default mask is NULL, "mask" should be set.
129 */
130 if (item->mask == NULL) {
131 if (def_mask == NULL) {
132 rte_flow_error_set(error, EINVAL,
133 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
134 "Mask should be specified\n");
135 return -rte_errno;
136 }
137
138 mask = (const uint8_t *)def_mask;
139 } else {
140 mask = (const uint8_t *)item->mask;
141 }
142
143 spec = (const uint8_t *)item->spec;
144 last = (const uint8_t *)item->last;
145
146 if (spec == NULL) {
147 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
148 NULL, "Spec should be specified\n");
149 return -rte_errno;
150 }
151
152 /*
153 * If field values in "last" are either 0 or equal to the corresponding
154 * values in "spec" then they are ignored.
155 */
156 if (last != NULL &&
157 !memcmp(last, zeros, size) &&
158 memcmp(last, spec, size) != 0) {
159 rte_flow_error_set(error, ENOTSUP,
160 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
161 "Ranging is not supported\n");
162 return -rte_errno;
163 }
164
165 *spec_ptr = spec;
166 *mask_ptr = mask;
167
168 return 0;
169 }
170
171 /**
172 * Parse the eth flow item.
173 *
174 * This will create classifier rule that matches either destination or source
175 * mac.
176 *
177 * @param spec Pointer to the specific flow item.
178 * @param mask Pointer to the specific flow item's mask.
179 * @param parse_dst Parse either destination or source mac address.
180 * @param flow Pointer to the flow.
181 * @return 0 in case of success, negative error value otherwise.
182 */
183 static int
mrvl_parse_mac(const struct rte_flow_item_eth * spec,const struct rte_flow_item_eth * mask,int parse_dst,struct rte_flow * flow)184 mrvl_parse_mac(const struct rte_flow_item_eth *spec,
185 const struct rte_flow_item_eth *mask,
186 int parse_dst, struct rte_flow *flow)
187 {
188 struct pp2_cls_rule_key_field *key_field;
189 const uint8_t *k, *m;
190
191 if (parse_dst) {
192 k = spec->dst.addr_bytes;
193 m = mask->dst.addr_bytes;
194
195 flow->table_key.proto_field[flow->rule.num_fields].field.eth =
196 MV_NET_ETH_F_DA;
197 } else {
198 k = spec->src.addr_bytes;
199 m = mask->src.addr_bytes;
200
201 flow->table_key.proto_field[flow->rule.num_fields].field.eth =
202 MV_NET_ETH_F_SA;
203 }
204
205 key_field = &flow->rule.fields[flow->rule.num_fields];
206 mrvl_alloc_key_mask(key_field);
207 key_field->size = 6;
208
209 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX,
210 RTE_ETHER_ADDR_PRT_FMT,
211 k[0], k[1], k[2], k[3], k[4], k[5]);
212
213 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX,
214 RTE_ETHER_ADDR_PRT_FMT,
215 m[0], m[1], m[2], m[3], m[4], m[5]);
216
217 flow->table_key.proto_field[flow->rule.num_fields].proto =
218 MV_NET_PROTO_ETH;
219 flow->table_key.key_size += key_field->size;
220
221 flow->rule.num_fields += 1;
222
223 return 0;
224 }
225
226 /**
227 * Helper for parsing the eth flow item destination mac address.
228 *
229 * @param spec Pointer to the specific flow item.
230 * @param mask Pointer to the specific flow item's mask.
231 * @param flow Pointer to the flow.
232 * @return 0 in case of success, negative error value otherwise.
233 */
234 static inline int
mrvl_parse_dmac(const struct rte_flow_item_eth * spec,const struct rte_flow_item_eth * mask,struct rte_flow * flow)235 mrvl_parse_dmac(const struct rte_flow_item_eth *spec,
236 const struct rte_flow_item_eth *mask,
237 struct rte_flow *flow)
238 {
239 return mrvl_parse_mac(spec, mask, 1, flow);
240 }
241
242 /**
243 * Helper for parsing the eth flow item source mac address.
244 *
245 * @param spec Pointer to the specific flow item.
246 * @param mask Pointer to the specific flow item's mask.
247 * @param flow Pointer to the flow.
248 * @return 0 in case of success, negative error value otherwise.
249 */
250 static inline int
mrvl_parse_smac(const struct rte_flow_item_eth * spec,const struct rte_flow_item_eth * mask,struct rte_flow * flow)251 mrvl_parse_smac(const struct rte_flow_item_eth *spec,
252 const struct rte_flow_item_eth *mask,
253 struct rte_flow *flow)
254 {
255 return mrvl_parse_mac(spec, mask, 0, flow);
256 }
257
258 /**
259 * Parse the ether type field of the eth flow item.
260 *
261 * @param spec Pointer to the specific flow item.
262 * @param mask Pointer to the specific flow item's mask.
263 * @param flow Pointer to the flow.
264 * @return 0 in case of success, negative error value otherwise.
265 */
266 static int
mrvl_parse_type(const struct rte_flow_item_eth * spec,const struct rte_flow_item_eth * mask __rte_unused,struct rte_flow * flow)267 mrvl_parse_type(const struct rte_flow_item_eth *spec,
268 const struct rte_flow_item_eth *mask __rte_unused,
269 struct rte_flow *flow)
270 {
271 struct pp2_cls_rule_key_field *key_field;
272 uint16_t k;
273
274 key_field = &flow->rule.fields[flow->rule.num_fields];
275 mrvl_alloc_key_mask(key_field);
276 key_field->size = 2;
277
278 k = rte_be_to_cpu_16(spec->type);
279 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
280
281 flow->table_key.proto_field[flow->rule.num_fields].proto =
282 MV_NET_PROTO_ETH;
283 flow->table_key.proto_field[flow->rule.num_fields].field.eth =
284 MV_NET_ETH_F_TYPE;
285 flow->table_key.key_size += key_field->size;
286
287 flow->rule.num_fields += 1;
288
289 return 0;
290 }
291
292 /**
293 * Parse the vid field of the vlan rte flow item.
294 *
295 * This will create classifier rule that matches vid.
296 *
297 * @param spec Pointer to the specific flow item.
298 * @param mask Pointer to the specific flow item's mask.
299 * @param flow Pointer to the flow.
300 * @return 0 in case of success, negative error value otherwise.
301 */
302 static int
mrvl_parse_vlan_id(const struct rte_flow_item_vlan * spec,const struct rte_flow_item_vlan * mask __rte_unused,struct rte_flow * flow)303 mrvl_parse_vlan_id(const struct rte_flow_item_vlan *spec,
304 const struct rte_flow_item_vlan *mask __rte_unused,
305 struct rte_flow *flow)
306 {
307 struct pp2_cls_rule_key_field *key_field;
308 uint16_t k;
309
310 key_field = &flow->rule.fields[flow->rule.num_fields];
311 mrvl_alloc_key_mask(key_field);
312 key_field->size = 2;
313
314 k = rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_ID_MASK;
315 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
316
317 flow->table_key.proto_field[flow->rule.num_fields].proto =
318 MV_NET_PROTO_VLAN;
319 flow->table_key.proto_field[flow->rule.num_fields].field.vlan =
320 MV_NET_VLAN_F_ID;
321 flow->table_key.key_size += key_field->size;
322
323 flow->rule.num_fields += 1;
324
325 return 0;
326 }
327
328 /**
329 * Parse the pri field of the vlan rte flow item.
330 *
331 * This will create classifier rule that matches pri.
332 *
333 * @param spec Pointer to the specific flow item.
334 * @param mask Pointer to the specific flow item's mask.
335 * @param flow Pointer to the flow.
336 * @return 0 in case of success, negative error value otherwise.
337 */
338 static int
mrvl_parse_vlan_pri(const struct rte_flow_item_vlan * spec,const struct rte_flow_item_vlan * mask __rte_unused,struct rte_flow * flow)339 mrvl_parse_vlan_pri(const struct rte_flow_item_vlan *spec,
340 const struct rte_flow_item_vlan *mask __rte_unused,
341 struct rte_flow *flow)
342 {
343 struct pp2_cls_rule_key_field *key_field;
344 uint16_t k;
345
346 key_field = &flow->rule.fields[flow->rule.num_fields];
347 mrvl_alloc_key_mask(key_field);
348 key_field->size = 1;
349
350 k = (rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_PRI_MASK) >> 13;
351 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
352
353 flow->table_key.proto_field[flow->rule.num_fields].proto =
354 MV_NET_PROTO_VLAN;
355 flow->table_key.proto_field[flow->rule.num_fields].field.vlan =
356 MV_NET_VLAN_F_PRI;
357 flow->table_key.key_size += key_field->size;
358
359 flow->rule.num_fields += 1;
360
361 return 0;
362 }
363
364 /**
365 * Parse the dscp field of the ipv4 rte flow item.
366 *
367 * This will create classifier rule that matches dscp field.
368 *
369 * @param spec Pointer to the specific flow item.
370 * @param mask Pointer to the specific flow item's mask.
371 * @param flow Pointer to the flow.
372 * @return 0 in case of success, negative error value otherwise.
373 */
374 static int
mrvl_parse_ip4_dscp(const struct rte_flow_item_ipv4 * spec,const struct rte_flow_item_ipv4 * mask,struct rte_flow * flow)375 mrvl_parse_ip4_dscp(const struct rte_flow_item_ipv4 *spec,
376 const struct rte_flow_item_ipv4 *mask,
377 struct rte_flow *flow)
378 {
379 struct pp2_cls_rule_key_field *key_field;
380 uint8_t k, m;
381
382 key_field = &flow->rule.fields[flow->rule.num_fields];
383 mrvl_alloc_key_mask(key_field);
384 key_field->size = 1;
385
386 k = (spec->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
387 m = (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
388 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
389 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
390
391 flow->table_key.proto_field[flow->rule.num_fields].proto =
392 MV_NET_PROTO_IP4;
393 flow->table_key.proto_field[flow->rule.num_fields].field.ipv4 =
394 MV_NET_IP4_F_DSCP;
395 flow->table_key.key_size += key_field->size;
396
397 flow->rule.num_fields += 1;
398
399 return 0;
400 }
401
402 /**
403 * Parse either source or destination ip addresses of the ipv4 flow item.
404 *
405 * This will create classifier rule that matches either destination
406 * or source ip field.
407 *
408 * @param spec Pointer to the specific flow item.
409 * @param mask Pointer to the specific flow item's mask.
410 * @param parse_dst Parse either destination or source ip address.
411 * @param flow Pointer to the flow.
412 * @return 0 in case of success, negative error value otherwise.
413 */
414 static int
mrvl_parse_ip4_addr(const struct rte_flow_item_ipv4 * spec,const struct rte_flow_item_ipv4 * mask,int parse_dst,struct rte_flow * flow)415 mrvl_parse_ip4_addr(const struct rte_flow_item_ipv4 *spec,
416 const struct rte_flow_item_ipv4 *mask,
417 int parse_dst, struct rte_flow *flow)
418 {
419 struct pp2_cls_rule_key_field *key_field;
420 struct in_addr k;
421 uint32_t m;
422
423 memset(&k, 0, sizeof(k));
424 if (parse_dst) {
425 k.s_addr = spec->hdr.dst_addr;
426 m = rte_be_to_cpu_32(mask->hdr.dst_addr);
427
428 flow->table_key.proto_field[flow->rule.num_fields].field.ipv4 =
429 MV_NET_IP4_F_DA;
430 } else {
431 k.s_addr = spec->hdr.src_addr;
432 m = rte_be_to_cpu_32(mask->hdr.src_addr);
433
434 flow->table_key.proto_field[flow->rule.num_fields].field.ipv4 =
435 MV_NET_IP4_F_SA;
436 }
437
438 key_field = &flow->rule.fields[flow->rule.num_fields];
439 mrvl_alloc_key_mask(key_field);
440 key_field->size = 4;
441
442 inet_ntop(AF_INET, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
443 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "0x%x", m);
444
445 flow->table_key.proto_field[flow->rule.num_fields].proto =
446 MV_NET_PROTO_IP4;
447 flow->table_key.key_size += key_field->size;
448
449 flow->rule.num_fields += 1;
450
451 return 0;
452 }
453
454 /**
455 * Helper for parsing destination ip of the ipv4 flow item.
456 *
457 * @param spec Pointer to the specific flow item.
458 * @param mask Pointer to the specific flow item's mask.
459 * @param flow Pointer to the flow.
460 * @return 0 in case of success, negative error value otherwise.
461 */
462 static inline int
mrvl_parse_ip4_dip(const struct rte_flow_item_ipv4 * spec,const struct rte_flow_item_ipv4 * mask,struct rte_flow * flow)463 mrvl_parse_ip4_dip(const struct rte_flow_item_ipv4 *spec,
464 const struct rte_flow_item_ipv4 *mask,
465 struct rte_flow *flow)
466 {
467 return mrvl_parse_ip4_addr(spec, mask, 1, flow);
468 }
469
470 /**
471 * Helper for parsing source ip of the ipv4 flow item.
472 *
473 * @param spec Pointer to the specific flow item.
474 * @param mask Pointer to the specific flow item's mask.
475 * @param flow Pointer to the flow.
476 * @return 0 in case of success, negative error value otherwise.
477 */
478 static inline int
mrvl_parse_ip4_sip(const struct rte_flow_item_ipv4 * spec,const struct rte_flow_item_ipv4 * mask,struct rte_flow * flow)479 mrvl_parse_ip4_sip(const struct rte_flow_item_ipv4 *spec,
480 const struct rte_flow_item_ipv4 *mask,
481 struct rte_flow *flow)
482 {
483 return mrvl_parse_ip4_addr(spec, mask, 0, flow);
484 }
485
486 /**
487 * Parse the proto field of the ipv4 rte flow item.
488 *
489 * This will create classifier rule that matches proto field.
490 *
491 * @param spec Pointer to the specific flow item.
492 * @param mask Pointer to the specific flow item's mask.
493 * @param flow Pointer to the flow.
494 * @return 0 in case of success, negative error value otherwise.
495 */
496 static int
mrvl_parse_ip4_proto(const struct rte_flow_item_ipv4 * spec,const struct rte_flow_item_ipv4 * mask __rte_unused,struct rte_flow * flow)497 mrvl_parse_ip4_proto(const struct rte_flow_item_ipv4 *spec,
498 const struct rte_flow_item_ipv4 *mask __rte_unused,
499 struct rte_flow *flow)
500 {
501 struct pp2_cls_rule_key_field *key_field;
502 uint8_t k = spec->hdr.next_proto_id;
503
504 key_field = &flow->rule.fields[flow->rule.num_fields];
505 mrvl_alloc_key_mask(key_field);
506 key_field->size = 1;
507
508 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
509
510 flow->table_key.proto_field[flow->rule.num_fields].proto =
511 MV_NET_PROTO_IP4;
512 flow->table_key.proto_field[flow->rule.num_fields].field.ipv4 =
513 MV_NET_IP4_F_PROTO;
514 flow->table_key.key_size += key_field->size;
515
516 flow->rule.num_fields += 1;
517
518 return 0;
519 }
520
521 /**
522 * Parse either source or destination ip addresses of the ipv6 rte flow item.
523 *
524 * This will create classifier rule that matches either destination
525 * or source ip field.
526 *
527 * @param spec Pointer to the specific flow item.
528 * @param mask Pointer to the specific flow item's mask.
529 * @param parse_dst Parse either destination or source ipv6 address.
530 * @param flow Pointer to the flow.
531 * @return 0 in case of success, negative error value otherwise.
532 */
533 static int
mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 * spec,const struct rte_flow_item_ipv6 * mask,int parse_dst,struct rte_flow * flow)534 mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec,
535 const struct rte_flow_item_ipv6 *mask,
536 int parse_dst, struct rte_flow *flow)
537 {
538 struct pp2_cls_rule_key_field *key_field;
539 int size = sizeof(spec->hdr.dst_addr);
540 struct in6_addr k, m;
541
542 memset(&k, 0, sizeof(k));
543 if (parse_dst) {
544 memcpy(k.s6_addr, spec->hdr.dst_addr, size);
545 memcpy(m.s6_addr, mask->hdr.dst_addr, size);
546
547 flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
548 MV_NET_IP6_F_DA;
549 } else {
550 memcpy(k.s6_addr, spec->hdr.src_addr, size);
551 memcpy(m.s6_addr, mask->hdr.src_addr, size);
552
553 flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
554 MV_NET_IP6_F_SA;
555 }
556
557 key_field = &flow->rule.fields[flow->rule.num_fields];
558 mrvl_alloc_key_mask(key_field);
559 key_field->size = 16;
560
561 inet_ntop(AF_INET6, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
562 inet_ntop(AF_INET6, &m, (char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX);
563
564 flow->table_key.proto_field[flow->rule.num_fields].proto =
565 MV_NET_PROTO_IP6;
566 flow->table_key.key_size += key_field->size;
567
568 flow->rule.num_fields += 1;
569
570 return 0;
571 }
572
573 /**
574 * Helper for parsing destination ip of the ipv6 flow item.
575 *
576 * @param spec Pointer to the specific flow item.
577 * @param mask Pointer to the specific flow item's mask.
578 * @param flow Pointer to the flow.
579 * @return 0 in case of success, negative error value otherwise.
580 */
581 static inline int
mrvl_parse_ip6_dip(const struct rte_flow_item_ipv6 * spec,const struct rte_flow_item_ipv6 * mask,struct rte_flow * flow)582 mrvl_parse_ip6_dip(const struct rte_flow_item_ipv6 *spec,
583 const struct rte_flow_item_ipv6 *mask,
584 struct rte_flow *flow)
585 {
586 return mrvl_parse_ip6_addr(spec, mask, 1, flow);
587 }
588
589 /**
590 * Helper for parsing source ip of the ipv6 flow item.
591 *
592 * @param spec Pointer to the specific flow item.
593 * @param mask Pointer to the specific flow item's mask.
594 * @param flow Pointer to the flow.
595 * @return 0 in case of success, negative error value otherwise.
596 */
597 static inline int
mrvl_parse_ip6_sip(const struct rte_flow_item_ipv6 * spec,const struct rte_flow_item_ipv6 * mask,struct rte_flow * flow)598 mrvl_parse_ip6_sip(const struct rte_flow_item_ipv6 *spec,
599 const struct rte_flow_item_ipv6 *mask,
600 struct rte_flow *flow)
601 {
602 return mrvl_parse_ip6_addr(spec, mask, 0, flow);
603 }
604
605 /**
606 * Parse the flow label of the ipv6 flow item.
607 *
608 * This will create classifier rule that matches flow field.
609 *
610 * @param spec Pointer to the specific flow item.
611 * @param mask Pointer to the specific flow item's mask.
612 * @param flow Pointer to the flow.
613 * @return 0 in case of success, negative error value otherwise.
614 */
615 static int
mrvl_parse_ip6_flow(const struct rte_flow_item_ipv6 * spec,const struct rte_flow_item_ipv6 * mask,struct rte_flow * flow)616 mrvl_parse_ip6_flow(const struct rte_flow_item_ipv6 *spec,
617 const struct rte_flow_item_ipv6 *mask,
618 struct rte_flow *flow)
619 {
620 struct pp2_cls_rule_key_field *key_field;
621 uint32_t k = rte_be_to_cpu_32(spec->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK,
622 m = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
623
624 key_field = &flow->rule.fields[flow->rule.num_fields];
625 mrvl_alloc_key_mask(key_field);
626 key_field->size = 3;
627
628 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
629 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
630
631 flow->table_key.proto_field[flow->rule.num_fields].proto =
632 MV_NET_PROTO_IP6;
633 flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
634 MV_NET_IP6_F_FLOW;
635 flow->table_key.key_size += key_field->size;
636
637 flow->rule.num_fields += 1;
638
639 return 0;
640 }
641
642 /**
643 * Parse the next header of the ipv6 flow item.
644 *
645 * This will create classifier rule that matches next header field.
646 *
647 * @param spec Pointer to the specific flow item.
648 * @param mask Pointer to the specific flow item's mask.
649 * @param flow Pointer to the flow.
650 * @return 0 in case of success, negative error value otherwise.
651 */
652 static int
mrvl_parse_ip6_next_hdr(const struct rte_flow_item_ipv6 * spec,const struct rte_flow_item_ipv6 * mask __rte_unused,struct rte_flow * flow)653 mrvl_parse_ip6_next_hdr(const struct rte_flow_item_ipv6 *spec,
654 const struct rte_flow_item_ipv6 *mask __rte_unused,
655 struct rte_flow *flow)
656 {
657 struct pp2_cls_rule_key_field *key_field;
658 uint8_t k = spec->hdr.proto;
659
660 key_field = &flow->rule.fields[flow->rule.num_fields];
661 mrvl_alloc_key_mask(key_field);
662 key_field->size = 1;
663
664 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
665
666 flow->table_key.proto_field[flow->rule.num_fields].proto =
667 MV_NET_PROTO_IP6;
668 flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
669 MV_NET_IP6_F_NEXT_HDR;
670 flow->table_key.key_size += key_field->size;
671
672 flow->rule.num_fields += 1;
673
674 return 0;
675 }
676
677 /**
678 * Parse destination or source port of the tcp flow item.
679 *
680 * This will create classifier rule that matches either destination or
681 * source tcp port.
682 *
683 * @param spec Pointer to the specific flow item.
684 * @param mask Pointer to the specific flow item's mask.
685 * @param parse_dst Parse either destination or source port.
686 * @param flow Pointer to the flow.
687 * @return 0 in case of success, negative error value otherwise.
688 */
689 static int
mrvl_parse_tcp_port(const struct rte_flow_item_tcp * spec,const struct rte_flow_item_tcp * mask __rte_unused,int parse_dst,struct rte_flow * flow)690 mrvl_parse_tcp_port(const struct rte_flow_item_tcp *spec,
691 const struct rte_flow_item_tcp *mask __rte_unused,
692 int parse_dst, struct rte_flow *flow)
693 {
694 struct pp2_cls_rule_key_field *key_field;
695 uint16_t k;
696
697 key_field = &flow->rule.fields[flow->rule.num_fields];
698 mrvl_alloc_key_mask(key_field);
699 key_field->size = 2;
700
701 if (parse_dst) {
702 k = rte_be_to_cpu_16(spec->hdr.dst_port);
703
704 flow->table_key.proto_field[flow->rule.num_fields].field.tcp =
705 MV_NET_TCP_F_DP;
706 } else {
707 k = rte_be_to_cpu_16(spec->hdr.src_port);
708
709 flow->table_key.proto_field[flow->rule.num_fields].field.tcp =
710 MV_NET_TCP_F_SP;
711 }
712
713 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
714
715 flow->table_key.proto_field[flow->rule.num_fields].proto =
716 MV_NET_PROTO_TCP;
717 flow->table_key.key_size += key_field->size;
718
719 flow->rule.num_fields += 1;
720
721 return 0;
722 }
723
724 /**
725 * Helper for parsing the tcp source port of the tcp flow item.
726 *
727 * @param spec Pointer to the specific flow item.
728 * @param mask Pointer to the specific flow item's mask.
729 * @param flow Pointer to the flow.
730 * @return 0 in case of success, negative error value otherwise.
731 */
732 static inline int
mrvl_parse_tcp_sport(const struct rte_flow_item_tcp * spec,const struct rte_flow_item_tcp * mask,struct rte_flow * flow)733 mrvl_parse_tcp_sport(const struct rte_flow_item_tcp *spec,
734 const struct rte_flow_item_tcp *mask,
735 struct rte_flow *flow)
736 {
737 return mrvl_parse_tcp_port(spec, mask, 0, flow);
738 }
739
740 /**
741 * Helper for parsing the tcp destination port of the tcp flow item.
742 *
743 * @param spec Pointer to the specific flow item.
744 * @param mask Pointer to the specific flow item's mask.
745 * @param flow Pointer to the flow.
746 * @return 0 in case of success, negative error value otherwise.
747 */
748 static inline int
mrvl_parse_tcp_dport(const struct rte_flow_item_tcp * spec,const struct rte_flow_item_tcp * mask,struct rte_flow * flow)749 mrvl_parse_tcp_dport(const struct rte_flow_item_tcp *spec,
750 const struct rte_flow_item_tcp *mask,
751 struct rte_flow *flow)
752 {
753 return mrvl_parse_tcp_port(spec, mask, 1, flow);
754 }
755
756 /**
757 * Parse destination or source port of the udp flow item.
758 *
759 * This will create classifier rule that matches either destination or
760 * source udp port.
761 *
762 * @param spec Pointer to the specific flow item.
763 * @param mask Pointer to the specific flow item's mask.
764 * @param parse_dst Parse either destination or source port.
765 * @param flow Pointer to the flow.
766 * @return 0 in case of success, negative error value otherwise.
767 */
768 static int
mrvl_parse_udp_port(const struct rte_flow_item_udp * spec,const struct rte_flow_item_udp * mask __rte_unused,int parse_dst,struct rte_flow * flow)769 mrvl_parse_udp_port(const struct rte_flow_item_udp *spec,
770 const struct rte_flow_item_udp *mask __rte_unused,
771 int parse_dst, struct rte_flow *flow)
772 {
773 struct pp2_cls_rule_key_field *key_field;
774 uint16_t k;
775
776 key_field = &flow->rule.fields[flow->rule.num_fields];
777 mrvl_alloc_key_mask(key_field);
778 key_field->size = 2;
779
780 if (parse_dst) {
781 k = rte_be_to_cpu_16(spec->hdr.dst_port);
782
783 flow->table_key.proto_field[flow->rule.num_fields].field.udp =
784 MV_NET_UDP_F_DP;
785 } else {
786 k = rte_be_to_cpu_16(spec->hdr.src_port);
787
788 flow->table_key.proto_field[flow->rule.num_fields].field.udp =
789 MV_NET_UDP_F_SP;
790 }
791
792 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
793
794 flow->table_key.proto_field[flow->rule.num_fields].proto =
795 MV_NET_PROTO_UDP;
796 flow->table_key.key_size += key_field->size;
797
798 flow->rule.num_fields += 1;
799
800 return 0;
801 }
802
803 /**
804 * Helper for parsing the udp source port of the udp flow item.
805 *
806 * @param spec Pointer to the specific flow item.
807 * @param mask Pointer to the specific flow item's mask.
808 * @param flow Pointer to the flow.
809 * @return 0 in case of success, negative error value otherwise.
810 */
811 static inline int
mrvl_parse_udp_sport(const struct rte_flow_item_udp * spec,const struct rte_flow_item_udp * mask,struct rte_flow * flow)812 mrvl_parse_udp_sport(const struct rte_flow_item_udp *spec,
813 const struct rte_flow_item_udp *mask,
814 struct rte_flow *flow)
815 {
816 return mrvl_parse_udp_port(spec, mask, 0, flow);
817 }
818
819 /**
820 * Helper for parsing the udp destination port of the udp flow item.
821 *
822 * @param spec Pointer to the specific flow item.
823 * @param mask Pointer to the specific flow item's mask.
824 * @param flow Pointer to the flow.
825 * @return 0 in case of success, negative error value otherwise.
826 */
827 static inline int
mrvl_parse_udp_dport(const struct rte_flow_item_udp * spec,const struct rte_flow_item_udp * mask,struct rte_flow * flow)828 mrvl_parse_udp_dport(const struct rte_flow_item_udp *spec,
829 const struct rte_flow_item_udp *mask,
830 struct rte_flow *flow)
831 {
832 return mrvl_parse_udp_port(spec, mask, 1, flow);
833 }
834
835 /**
836 * Parse eth flow item.
837 *
838 * @param item Pointer to the flow item.
839 * @param flow Pointer to the flow.
840 * @param error Pointer to the flow error.
841 * @returns 0 on success, negative value otherwise.
842 */
843 static int
mrvl_parse_eth(const struct rte_flow_item * item,struct rte_flow * flow,struct rte_flow_error * error)844 mrvl_parse_eth(const struct rte_flow_item *item, struct rte_flow *flow,
845 struct rte_flow_error *error)
846 {
847 const struct rte_flow_item_eth *spec = NULL, *mask = NULL;
848 struct rte_ether_addr zero;
849 int ret;
850
851 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
852 &rte_flow_item_eth_mask,
853 sizeof(struct rte_flow_item_eth), error);
854 if (ret)
855 return ret;
856
857 memset(&zero, 0, sizeof(zero));
858
859 if (memcmp(&mask->dst, &zero, sizeof(mask->dst))) {
860 ret = mrvl_parse_dmac(spec, mask, flow);
861 if (ret)
862 goto out;
863 }
864
865 if (memcmp(&mask->src, &zero, sizeof(mask->src))) {
866 ret = mrvl_parse_smac(spec, mask, flow);
867 if (ret)
868 goto out;
869 }
870
871 if (mask->type) {
872 MRVL_LOG(WARNING, "eth type mask is ignored");
873 ret = mrvl_parse_type(spec, mask, flow);
874 if (ret)
875 goto out;
876 }
877
878 return 0;
879 out:
880 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
881 "Reached maximum number of fields in cls tbl key\n");
882 return -rte_errno;
883 }
884
885 /**
886 * Parse vlan flow item.
887 *
888 * @param item Pointer to the flow item.
889 * @param flow Pointer to the flow.
890 * @param error Pointer to the flow error.
891 * @returns 0 on success, negative value otherwise.
892 */
893 static int
mrvl_parse_vlan(const struct rte_flow_item * item,struct rte_flow * flow,struct rte_flow_error * error)894 mrvl_parse_vlan(const struct rte_flow_item *item,
895 struct rte_flow *flow,
896 struct rte_flow_error *error)
897 {
898 const struct rte_flow_item_vlan *spec = NULL, *mask = NULL;
899 uint16_t m;
900 int ret, i;
901
902 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
903 &rte_flow_item_vlan_mask,
904 sizeof(struct rte_flow_item_vlan), error);
905 if (ret)
906 return ret;
907
908 m = rte_be_to_cpu_16(mask->tci);
909 if (m & MRVL_VLAN_ID_MASK) {
910 MRVL_LOG(WARNING, "vlan id mask is ignored");
911 ret = mrvl_parse_vlan_id(spec, mask, flow);
912 if (ret)
913 goto out;
914 }
915
916 if (m & MRVL_VLAN_PRI_MASK) {
917 MRVL_LOG(WARNING, "vlan pri mask is ignored");
918 ret = mrvl_parse_vlan_pri(spec, mask, flow);
919 if (ret)
920 goto out;
921 }
922
923 if (mask->inner_type) {
924 struct rte_flow_item_eth spec_eth = {
925 .type = spec->inner_type,
926 };
927 struct rte_flow_item_eth mask_eth = {
928 .type = mask->inner_type,
929 };
930
931 /* TPID is not supported so if ETH_TYPE was selected,
932 * error is return. else, classify eth-type with the tpid value
933 */
934 for (i = 0; i < flow->rule.num_fields; i++)
935 if (flow->table_key.proto_field[i].proto ==
936 MV_NET_PROTO_ETH &&
937 flow->table_key.proto_field[i].field.eth ==
938 MV_NET_ETH_F_TYPE) {
939 rte_flow_error_set(error, ENOTSUP,
940 RTE_FLOW_ERROR_TYPE_ITEM,
941 item,
942 "VLAN TPID matching is not supported");
943 return -rte_errno;
944 }
945
946 MRVL_LOG(WARNING, "inner eth type mask is ignored");
947 ret = mrvl_parse_type(&spec_eth, &mask_eth, flow);
948 if (ret)
949 goto out;
950 }
951
952 return 0;
953 out:
954 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
955 "Reached maximum number of fields in cls tbl key\n");
956 return -rte_errno;
957 }
958
959 /**
960 * Parse ipv4 flow item.
961 *
962 * @param item Pointer to the flow item.
963 * @param flow Pointer to the flow.
964 * @param error Pointer to the flow error.
965 * @returns 0 on success, negative value otherwise.
966 */
967 static int
mrvl_parse_ip4(const struct rte_flow_item * item,struct rte_flow * flow,struct rte_flow_error * error)968 mrvl_parse_ip4(const struct rte_flow_item *item,
969 struct rte_flow *flow,
970 struct rte_flow_error *error)
971 {
972 const struct rte_flow_item_ipv4 *spec = NULL, *mask = NULL;
973 int ret;
974
975 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
976 &rte_flow_item_ipv4_mask,
977 sizeof(struct rte_flow_item_ipv4), error);
978 if (ret)
979 return ret;
980
981 if (mask->hdr.version_ihl ||
982 mask->hdr.total_length ||
983 mask->hdr.packet_id ||
984 mask->hdr.fragment_offset ||
985 mask->hdr.time_to_live ||
986 mask->hdr.hdr_checksum) {
987 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
988 NULL, "Not supported by classifier\n");
989 return -rte_errno;
990 }
991
992 if (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) {
993 ret = mrvl_parse_ip4_dscp(spec, mask, flow);
994 if (ret)
995 goto out;
996 }
997
998 if (mask->hdr.src_addr) {
999 ret = mrvl_parse_ip4_sip(spec, mask, flow);
1000 if (ret)
1001 goto out;
1002 }
1003
1004 if (mask->hdr.dst_addr) {
1005 ret = mrvl_parse_ip4_dip(spec, mask, flow);
1006 if (ret)
1007 goto out;
1008 }
1009
1010 if (mask->hdr.next_proto_id) {
1011 MRVL_LOG(WARNING, "next proto id mask is ignored");
1012 ret = mrvl_parse_ip4_proto(spec, mask, flow);
1013 if (ret)
1014 goto out;
1015 }
1016
1017 return 0;
1018 out:
1019 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1020 "Reached maximum number of fields in cls tbl key\n");
1021 return -rte_errno;
1022 }
1023
1024 /**
1025 * Parse ipv6 flow item.
1026 *
1027 * @param item Pointer to the flow item.
1028 * @param flow Pointer to the flow.
1029 * @param error Pointer to the flow error.
1030 * @returns 0 on success, negative value otherwise.
1031 */
1032 static int
mrvl_parse_ip6(const struct rte_flow_item * item,struct rte_flow * flow,struct rte_flow_error * error)1033 mrvl_parse_ip6(const struct rte_flow_item *item,
1034 struct rte_flow *flow,
1035 struct rte_flow_error *error)
1036 {
1037 const struct rte_flow_item_ipv6 *spec = NULL, *mask = NULL;
1038 struct rte_ipv6_hdr zero;
1039 uint32_t flow_mask;
1040 int ret;
1041
1042 ret = mrvl_parse_init(item, (const void **)&spec,
1043 (const void **)&mask,
1044 &rte_flow_item_ipv6_mask,
1045 sizeof(struct rte_flow_item_ipv6),
1046 error);
1047 if (ret)
1048 return ret;
1049
1050 memset(&zero, 0, sizeof(zero));
1051
1052 if (mask->hdr.payload_len ||
1053 mask->hdr.hop_limits) {
1054 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1055 NULL, "Not supported by classifier\n");
1056 return -rte_errno;
1057 }
1058
1059 if (memcmp(mask->hdr.src_addr,
1060 zero.src_addr, sizeof(mask->hdr.src_addr))) {
1061 ret = mrvl_parse_ip6_sip(spec, mask, flow);
1062 if (ret)
1063 goto out;
1064 }
1065
1066 if (memcmp(mask->hdr.dst_addr,
1067 zero.dst_addr, sizeof(mask->hdr.dst_addr))) {
1068 ret = mrvl_parse_ip6_dip(spec, mask, flow);
1069 if (ret)
1070 goto out;
1071 }
1072
1073 flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
1074 if (flow_mask) {
1075 ret = mrvl_parse_ip6_flow(spec, mask, flow);
1076 if (ret)
1077 goto out;
1078 }
1079
1080 if (mask->hdr.proto) {
1081 MRVL_LOG(WARNING, "next header mask is ignored");
1082 ret = mrvl_parse_ip6_next_hdr(spec, mask, flow);
1083 if (ret)
1084 goto out;
1085 }
1086
1087 return 0;
1088 out:
1089 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1090 "Reached maximum number of fields in cls tbl key\n");
1091 return -rte_errno;
1092 }
1093
1094 /**
1095 * Parse tcp flow item.
1096 *
1097 * @param item Pointer to the flow item.
1098 * @param flow Pointer to the flow.
1099 * @param error Pointer to the flow error.
1100 * @returns 0 on success, negative value otherwise.
1101 */
1102 static int
mrvl_parse_tcp(const struct rte_flow_item * item,struct rte_flow * flow,struct rte_flow_error * error)1103 mrvl_parse_tcp(const struct rte_flow_item *item,
1104 struct rte_flow *flow,
1105 struct rte_flow_error *error)
1106 {
1107 const struct rte_flow_item_tcp *spec = NULL, *mask = NULL;
1108 int ret;
1109
1110 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1111 &rte_flow_item_tcp_mask,
1112 sizeof(struct rte_flow_item_tcp), error);
1113 if (ret)
1114 return ret;
1115
1116 if (mask->hdr.sent_seq ||
1117 mask->hdr.recv_ack ||
1118 mask->hdr.data_off ||
1119 mask->hdr.tcp_flags ||
1120 mask->hdr.rx_win ||
1121 mask->hdr.cksum ||
1122 mask->hdr.tcp_urp) {
1123 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1124 NULL, "Not supported by classifier\n");
1125 return -rte_errno;
1126 }
1127
1128 if (mask->hdr.src_port) {
1129 MRVL_LOG(WARNING, "tcp sport mask is ignored");
1130 ret = mrvl_parse_tcp_sport(spec, mask, flow);
1131 if (ret)
1132 goto out;
1133 }
1134
1135 if (mask->hdr.dst_port) {
1136 MRVL_LOG(WARNING, "tcp dport mask is ignored");
1137 ret = mrvl_parse_tcp_dport(spec, mask, flow);
1138 if (ret)
1139 goto out;
1140 }
1141
1142 return 0;
1143 out:
1144 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1145 "Reached maximum number of fields in cls tbl key\n");
1146 return -rte_errno;
1147 }
1148
1149 /**
1150 * Parse udp flow item.
1151 *
1152 * @param item Pointer to the flow item.
1153 * @param flow Pointer to the flow.
1154 * @param error Pointer to the flow error.
1155 * @returns 0 on success, negative value otherwise.
1156 */
1157 static int
mrvl_parse_udp(const struct rte_flow_item * item,struct rte_flow * flow,struct rte_flow_error * error)1158 mrvl_parse_udp(const struct rte_flow_item *item,
1159 struct rte_flow *flow,
1160 struct rte_flow_error *error)
1161 {
1162 const struct rte_flow_item_udp *spec = NULL, *mask = NULL;
1163 int ret;
1164
1165 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1166 &rte_flow_item_udp_mask,
1167 sizeof(struct rte_flow_item_udp), error);
1168 if (ret)
1169 return ret;
1170
1171 if (mask->hdr.dgram_len ||
1172 mask->hdr.dgram_cksum) {
1173 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1174 NULL, "Not supported by classifier\n");
1175 return -rte_errno;
1176 }
1177
1178 if (mask->hdr.src_port) {
1179 MRVL_LOG(WARNING, "udp sport mask is ignored");
1180 ret = mrvl_parse_udp_sport(spec, mask, flow);
1181 if (ret)
1182 goto out;
1183 }
1184
1185 if (mask->hdr.dst_port) {
1186 MRVL_LOG(WARNING, "udp dport mask is ignored");
1187 ret = mrvl_parse_udp_dport(spec, mask, flow);
1188 if (ret)
1189 goto out;
1190 }
1191
1192 return 0;
1193 out:
1194 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1195 "Reached maximum number of fields in cls tbl key\n");
1196 return -rte_errno;
1197 }
1198
1199 static int
mrvl_string_to_hex_values(const uint8_t * input_string,uint8_t * hex_key,uint8_t * length)1200 mrvl_string_to_hex_values(const uint8_t *input_string,
1201 uint8_t *hex_key,
1202 uint8_t *length)
1203 {
1204 char tmp_arr[3], tmp_string[MRVL_CLS_STR_SIZE_MAX], *string_iter;
1205 int i;
1206
1207 strcpy(tmp_string, (const char *)input_string);
1208 string_iter = tmp_string;
1209
1210 string_iter += 2; /* skip the '0x' */
1211 *length = ((*length - 2) + 1) / 2;
1212
1213 for (i = 0; i < *length; i++) {
1214 strncpy(tmp_arr, string_iter, 2);
1215 tmp_arr[2] = '\0';
1216 if (get_val_securely8(tmp_arr, 16,
1217 &hex_key[*length - 1 - i]) < 0)
1218 return -1;
1219 string_iter += 2;
1220 }
1221
1222 return 0;
1223 }
1224
1225 /**
1226 * Parse raw flow item.
1227 *
1228 * @param item Pointer to the flow item.
1229 * @param flow Pointer to the flow.
1230 * @param error Pointer to the flow error.
1231 * @returns 0 on success, negative value otherwise.
1232 */
1233 static int
mrvl_parse_raw(const struct rte_flow_item * item,struct rte_flow * flow,struct rte_flow_error * error)1234 mrvl_parse_raw(const struct rte_flow_item *item,
1235 struct rte_flow *flow,
1236 struct rte_flow_error *error)
1237 {
1238 const struct rte_flow_item_raw *spec = NULL, *mask = NULL;
1239 struct pp2_cls_rule_key_field *key_field;
1240 struct mv_net_udf *udf_params;
1241 uint8_t length;
1242 int ret;
1243
1244 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1245 &rte_flow_item_raw_mask,
1246 sizeof(struct rte_flow_item_raw), error);
1247 if (ret)
1248 return ret;
1249
1250 if (!spec->pattern) {
1251 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1252 NULL, "pattern pointer MUST be given\n");
1253 return -rte_errno;
1254 }
1255
1256 /* Only hex string is supported; so, it must start with '0x' */
1257 if (strncmp((const char *)spec->pattern, "0x", 2) != 0) {
1258 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1259 NULL, "'pattern' string must start with '0x'\n");
1260 return -rte_errno;
1261 }
1262
1263 if (mask->pattern &&
1264 strncmp((const char *)mask->pattern, "0x", 2) != 0) {
1265 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1266 NULL, "'mask-pattern' string must start with '0x'\n");
1267 return -rte_errno;
1268 }
1269
1270 if (mask->search && spec->search) {
1271 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1272 NULL, "'search' option must be '0'\n");
1273 return -rte_errno;
1274 }
1275
1276 if (mask->offset && spec->offset != 0) {
1277 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1278 NULL, "'offset' option must be '0'\n");
1279 return -rte_errno;
1280 }
1281
1282 if (!mask->relative || !spec->relative) {
1283 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1284 NULL, "'relative' option must be given and enabled\n");
1285 return -rte_errno;
1286 }
1287
1288 length = spec->length & mask->length;
1289 if (!length) {
1290 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1291 NULL, "'length' option must be given bigger than '0'\n");
1292 return -rte_errno;
1293 }
1294
1295 key_field = &flow->rule.fields[flow->rule.num_fields];
1296 mrvl_alloc_key_mask(key_field);
1297
1298 /* pattern and length refer to string bytes. we need to convert it to
1299 * values.
1300 */
1301 key_field->size = length;
1302 ret = mrvl_string_to_hex_values(spec->pattern, key_field->key,
1303 &key_field->size);
1304 if (ret) {
1305 rte_flow_error_set(error, EINVAL,
1306 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1307 NULL,
1308 "can't convert pattern from string to hex\n");
1309 return -rte_errno;
1310 }
1311 if (mask->pattern) {
1312 ret = mrvl_string_to_hex_values(mask->pattern, key_field->mask,
1313 &length);
1314 if (ret) {
1315 rte_flow_error_set(error, EINVAL,
1316 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1317 NULL,
1318 "can't convert mask-pattern from string to hex\n");
1319 return -rte_errno;
1320 }
1321 } else {
1322 rte_free(key_field->mask);
1323 key_field->mask = NULL;
1324 }
1325
1326 flow->table_key.proto_field[flow->rule.num_fields].proto =
1327 MV_NET_UDF;
1328 udf_params =
1329 &flow->table_key.proto_field[flow->rule.num_fields].field.udf;
1330 udf_params->id = flow->next_udf_id++;
1331 udf_params->size = key_field->size;
1332 flow->table_key.key_size += key_field->size;
1333
1334 flow->rule.num_fields += 1;
1335
1336 return 0;
1337 }
1338
1339 /**
1340 * Structure used to map specific flow pattern to the pattern parse callback
1341 * which will iterate over each pattern item and extract relevant data.
1342 */
1343 static const struct {
1344 const enum rte_flow_item_type pattern_type;
1345 int (*parse)(const struct rte_flow_item *pattern,
1346 struct rte_flow *flow,
1347 struct rte_flow_error *error);
1348 } mrvl_patterns[] = {
1349 { RTE_FLOW_ITEM_TYPE_ETH, mrvl_parse_eth },
1350 { RTE_FLOW_ITEM_TYPE_VLAN, mrvl_parse_vlan },
1351 { RTE_FLOW_ITEM_TYPE_IPV4, mrvl_parse_ip4 },
1352 { RTE_FLOW_ITEM_TYPE_IPV6, mrvl_parse_ip6 },
1353 { RTE_FLOW_ITEM_TYPE_TCP, mrvl_parse_tcp },
1354 { RTE_FLOW_ITEM_TYPE_UDP, mrvl_parse_udp },
1355 { RTE_FLOW_ITEM_TYPE_RAW, mrvl_parse_raw },
1356 { RTE_FLOW_ITEM_TYPE_END, NULL }
1357 };
1358
1359 /**
1360 * Parse flow attribute.
1361 *
1362 * This will check whether the provided attribute's flags are supported.
1363 *
1364 * @param priv Unused
1365 * @param attr Pointer to the flow attribute.
1366 * @param flow Unused
1367 * @param error Pointer to the flow error.
1368 * @returns 0 in case of success, negative value otherwise.
1369 */
1370 static int
mrvl_flow_parse_attr(struct mrvl_priv * priv __rte_unused,const struct rte_flow_attr * attr,struct rte_flow * flow __rte_unused,struct rte_flow_error * error)1371 mrvl_flow_parse_attr(struct mrvl_priv *priv __rte_unused,
1372 const struct rte_flow_attr *attr,
1373 struct rte_flow *flow __rte_unused,
1374 struct rte_flow_error *error)
1375 {
1376 if (!attr) {
1377 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
1378 NULL, "NULL attribute");
1379 return -rte_errno;
1380 }
1381
1382 if (attr->group) {
1383 rte_flow_error_set(error, ENOTSUP,
1384 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
1385 "Groups are not supported");
1386 return -rte_errno;
1387 }
1388 if (attr->priority) {
1389 rte_flow_error_set(error, ENOTSUP,
1390 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
1391 "Priorities are not supported");
1392 return -rte_errno;
1393 }
1394 if (!attr->ingress) {
1395 rte_flow_error_set(error, ENOTSUP,
1396 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL,
1397 "Only ingress is supported");
1398 return -rte_errno;
1399 }
1400 if (attr->egress) {
1401 rte_flow_error_set(error, ENOTSUP,
1402 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1403 "Egress is not supported");
1404 return -rte_errno;
1405 }
1406 if (attr->transfer) {
1407 rte_flow_error_set(error, ENOTSUP,
1408 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
1409 "Transfer is not supported");
1410 return -rte_errno;
1411 }
1412
1413 return 0;
1414 }
1415
1416 /**
1417 * Parse flow pattern.
1418 *
1419 * Specific classifier rule will be created as well.
1420 *
1421 * @param priv Unused
1422 * @param pattern Pointer to the flow pattern.
1423 * @param flow Pointer to the flow.
1424 * @param error Pointer to the flow error.
1425 * @returns 0 in case of success, negative value otherwise.
1426 */
1427 static int
mrvl_flow_parse_pattern(struct mrvl_priv * priv __rte_unused,const struct rte_flow_item pattern[],struct rte_flow * flow,struct rte_flow_error * error)1428 mrvl_flow_parse_pattern(struct mrvl_priv *priv __rte_unused,
1429 const struct rte_flow_item pattern[],
1430 struct rte_flow *flow,
1431 struct rte_flow_error *error)
1432 {
1433 unsigned int i, j;
1434 int ret;
1435
1436 for (i = 0; pattern[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
1437 if (pattern[i].type == RTE_FLOW_ITEM_TYPE_VOID)
1438 continue;
1439 for (j = 0; mrvl_patterns[j].pattern_type !=
1440 RTE_FLOW_ITEM_TYPE_END; j++) {
1441 if (mrvl_patterns[j].pattern_type != pattern[i].type)
1442 continue;
1443
1444 if (flow->rule.num_fields >=
1445 PP2_CLS_TBL_MAX_NUM_FIELDS) {
1446 rte_flow_error_set(error, ENOSPC,
1447 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1448 NULL,
1449 "too many pattern (max %d)");
1450 return -rte_errno;
1451 }
1452
1453 ret = mrvl_patterns[j].parse(&pattern[i], flow, error);
1454 if (ret) {
1455 mrvl_free_all_key_mask(&flow->rule);
1456 return ret;
1457 }
1458 break;
1459 }
1460 if (mrvl_patterns[j].pattern_type == RTE_FLOW_ITEM_TYPE_END) {
1461 rte_flow_error_set(error, ENOTSUP,
1462 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1463 "Unsupported pattern");
1464 return -rte_errno;
1465 }
1466 }
1467
1468 flow->table_key.num_fields = flow->rule.num_fields;
1469
1470 return 0;
1471 }
1472
1473 /**
1474 * Parse flow actions.
1475 *
1476 * @param priv Pointer to the port's private data.
1477 * @param actions Pointer the action table.
1478 * @param flow Pointer to the flow.
1479 * @param error Pointer to the flow error.
1480 * @returns 0 in case of success, negative value otherwise.
1481 */
1482 static int
mrvl_flow_parse_actions(struct mrvl_priv * priv,const struct rte_flow_action actions[],struct rte_flow * flow,struct rte_flow_error * error)1483 mrvl_flow_parse_actions(struct mrvl_priv *priv,
1484 const struct rte_flow_action actions[],
1485 struct rte_flow *flow,
1486 struct rte_flow_error *error)
1487 {
1488 const struct rte_flow_action *action = actions;
1489 int specified = 0;
1490
1491 for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
1492 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1493 continue;
1494
1495 if (action->type == RTE_FLOW_ACTION_TYPE_DROP) {
1496 flow->cos.ppio = priv->ppio;
1497 flow->cos.tc = 0;
1498 flow->action.type = PP2_CLS_TBL_ACT_DROP;
1499 flow->action.cos = &flow->cos;
1500 specified++;
1501 } else if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1502 const struct rte_flow_action_queue *q =
1503 (const struct rte_flow_action_queue *)
1504 action->conf;
1505
1506 if (q->index > priv->nb_rx_queues) {
1507 rte_flow_error_set(error, EINVAL,
1508 RTE_FLOW_ERROR_TYPE_ACTION,
1509 NULL,
1510 "Queue index out of range");
1511 return -rte_errno;
1512 }
1513
1514 if (priv->rxq_map[q->index].tc == MRVL_UNKNOWN_TC) {
1515 /*
1516 * Unknown TC mapping, mapping will not have
1517 * a correct queue.
1518 */
1519 MRVL_LOG(ERR,
1520 "Unknown TC mapping for queue %hu eth%hhu",
1521 q->index, priv->ppio_id);
1522
1523 rte_flow_error_set(error, EFAULT,
1524 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1525 NULL, NULL);
1526 return -rte_errno;
1527 }
1528
1529 MRVL_LOG(DEBUG,
1530 "Action: Assign packets to queue %d, tc:%d, q:%d",
1531 q->index, priv->rxq_map[q->index].tc,
1532 priv->rxq_map[q->index].inq);
1533
1534 flow->cos.ppio = priv->ppio;
1535 flow->cos.tc = priv->rxq_map[q->index].tc;
1536 flow->action.type = PP2_CLS_TBL_ACT_DONE;
1537 flow->action.cos = &flow->cos;
1538 specified++;
1539 } else if (action->type == RTE_FLOW_ACTION_TYPE_METER) {
1540 const struct rte_flow_action_meter *meter;
1541 struct mrvl_mtr *mtr;
1542
1543 meter = action->conf;
1544 if (!meter)
1545 return -rte_flow_error_set(error, EINVAL,
1546 RTE_FLOW_ERROR_TYPE_ACTION,
1547 NULL, "Invalid meter\n");
1548
1549 LIST_FOREACH(mtr, &priv->mtrs, next)
1550 if (mtr->mtr_id == meter->mtr_id)
1551 break;
1552
1553 if (!mtr)
1554 return -rte_flow_error_set(error, EINVAL,
1555 RTE_FLOW_ERROR_TYPE_ACTION,
1556 NULL,
1557 "Meter id does not exist\n");
1558
1559 if (!mtr->shared && mtr->refcnt)
1560 return -rte_flow_error_set(error, EPERM,
1561 RTE_FLOW_ERROR_TYPE_ACTION,
1562 NULL,
1563 "Meter cannot be shared\n");
1564
1565 /*
1566 * In case cos has already been set
1567 * do not modify it.
1568 */
1569 if (!flow->cos.ppio) {
1570 flow->cos.ppio = priv->ppio;
1571 flow->cos.tc = 0;
1572 }
1573
1574 flow->action.type = PP2_CLS_TBL_ACT_DONE;
1575 flow->action.cos = &flow->cos;
1576 flow->action.plcr = mtr->enabled ? mtr->plcr : NULL;
1577 flow->mtr = mtr;
1578 mtr->refcnt++;
1579 specified++;
1580 } else {
1581 rte_flow_error_set(error, ENOTSUP,
1582 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1583 "Action not supported");
1584 return -rte_errno;
1585 }
1586 }
1587
1588 if (!specified) {
1589 rte_flow_error_set(error, EINVAL,
1590 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1591 "Action not specified");
1592 return -rte_errno;
1593 }
1594
1595 return 0;
1596 }
1597
1598 /**
1599 * Parse flow attribute, pattern and actions.
1600 *
1601 * @param priv Pointer to the port's private data.
1602 * @param attr Pointer to the flow attribute.
1603 * @param pattern Pointer to the flow pattern.
1604 * @param actions Pointer to the flow actions.
1605 * @param flow Pointer to the flow.
1606 * @param error Pointer to the flow error.
1607 * @returns 0 on success, negative value otherwise.
1608 */
1609 static int
mrvl_flow_parse(struct mrvl_priv * priv,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow * flow,struct rte_flow_error * error)1610 mrvl_flow_parse(struct mrvl_priv *priv, const struct rte_flow_attr *attr,
1611 const struct rte_flow_item pattern[],
1612 const struct rte_flow_action actions[],
1613 struct rte_flow *flow,
1614 struct rte_flow_error *error)
1615 {
1616 int ret;
1617
1618 ret = mrvl_flow_parse_attr(priv, attr, flow, error);
1619 if (ret)
1620 return ret;
1621
1622 ret = mrvl_flow_parse_pattern(priv, pattern, flow, error);
1623 if (ret)
1624 return ret;
1625
1626 return mrvl_flow_parse_actions(priv, actions, flow, error);
1627 }
1628
1629 /**
1630 * Get engine type for the given flow.
1631 *
1632 * @param field Pointer to the flow.
1633 * @returns The type of the engine.
1634 */
1635 static inline enum pp2_cls_tbl_type
mrvl_engine_type(const struct rte_flow * flow)1636 mrvl_engine_type(const struct rte_flow *flow)
1637 {
1638 int i, size = 0;
1639
1640 for (i = 0; i < flow->rule.num_fields; i++)
1641 size += flow->rule.fields[i].size;
1642
1643 /*
1644 * For maskable engine type the key size must be up to 8 bytes.
1645 * For keys with size bigger than 8 bytes, engine type must
1646 * be set to exact match.
1647 */
1648 if (size > 8)
1649 return PP2_CLS_TBL_EXACT_MATCH;
1650
1651 return PP2_CLS_TBL_MASKABLE;
1652 }
1653
1654 /**
1655 * Create classifier table.
1656 *
1657 * @param dev Pointer to the device.
1658 * @param flow Pointer to the very first flow.
1659 * @returns 0 in case of success, negative value otherwise.
1660 */
1661 static int
mrvl_create_cls_table(struct rte_eth_dev * dev,struct rte_flow * first_flow)1662 mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow)
1663 {
1664 struct mrvl_priv *priv = dev->data->dev_private;
1665 struct pp2_cls_tbl_key *key = &priv->cls_tbl_params.key;
1666 int ret;
1667
1668 if (priv->cls_tbl) {
1669 pp2_cls_tbl_deinit(priv->cls_tbl);
1670 priv->cls_tbl = NULL;
1671 }
1672
1673 memset(&priv->cls_tbl_params, 0, sizeof(priv->cls_tbl_params));
1674
1675 priv->cls_tbl_params.type = mrvl_engine_type(first_flow);
1676 MRVL_LOG(INFO, "Setting cls search engine type to %s",
1677 priv->cls_tbl_params.type == PP2_CLS_TBL_EXACT_MATCH ?
1678 "exact" : "maskable");
1679 priv->cls_tbl_params.max_num_rules = MRVL_CLS_MAX_NUM_RULES;
1680 priv->cls_tbl_params.default_act.type = PP2_CLS_TBL_ACT_DONE;
1681 priv->cls_tbl_params.default_act.cos = &first_flow->cos;
1682 memcpy(key, &first_flow->table_key, sizeof(struct pp2_cls_tbl_key));
1683
1684 ret = pp2_cls_tbl_init(&priv->cls_tbl_params, &priv->cls_tbl);
1685
1686 return ret;
1687 }
1688
1689 /**
1690 * Check whether new flow can be added to the table
1691 *
1692 * @param priv Pointer to the port's private data.
1693 * @param flow Pointer to the new flow.
1694 * @return 1 in case flow can be added, 0 otherwise.
1695 */
1696 static inline int
mrvl_flow_can_be_added(struct mrvl_priv * priv,const struct rte_flow * flow)1697 mrvl_flow_can_be_added(struct mrvl_priv *priv, const struct rte_flow *flow)
1698 {
1699 int same = memcmp(&flow->table_key, &priv->cls_tbl_params.key,
1700 sizeof(struct pp2_cls_tbl_key)) == 0;
1701
1702 return same && mrvl_engine_type(flow) == priv->cls_tbl_params.type;
1703 }
1704
1705 /**
1706 * DPDK flow create callback called when flow is to be created.
1707 *
1708 * @param dev Pointer to the device.
1709 * @param attr Pointer to the flow attribute.
1710 * @param pattern Pointer to the flow pattern.
1711 * @param actions Pointer to the flow actions.
1712 * @param error Pointer to the flow error.
1713 * @returns Pointer to the created flow in case of success, NULL otherwise.
1714 */
1715 static struct rte_flow *
mrvl_flow_create(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)1716 mrvl_flow_create(struct rte_eth_dev *dev,
1717 const struct rte_flow_attr *attr,
1718 const struct rte_flow_item pattern[],
1719 const struct rte_flow_action actions[],
1720 struct rte_flow_error *error)
1721 {
1722 struct mrvl_priv *priv = dev->data->dev_private;
1723 struct rte_flow *flow, *first;
1724 int ret;
1725
1726 if (!dev->data->dev_started) {
1727 rte_flow_error_set(error, EINVAL,
1728 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1729 "Port must be started first\n");
1730 return NULL;
1731 }
1732
1733 flow = rte_zmalloc_socket(NULL, sizeof(*flow), 0, rte_socket_id());
1734 if (!flow)
1735 return NULL;
1736
1737 ret = mrvl_flow_parse(priv, attr, pattern, actions, flow, error);
1738 if (ret)
1739 goto out;
1740
1741 /*
1742 * Four cases here:
1743 *
1744 * 1. In case table does not exist - create one.
1745 * 2. In case table exists, is empty and new flow cannot be added
1746 * recreate table.
1747 * 3. In case table is not empty and new flow matches table format
1748 * add it.
1749 * 4. Otherwise flow cannot be added.
1750 */
1751 first = LIST_FIRST(&priv->flows);
1752 if (!priv->cls_tbl) {
1753 ret = mrvl_create_cls_table(dev, flow);
1754 } else if (!first && !mrvl_flow_can_be_added(priv, flow)) {
1755 ret = mrvl_create_cls_table(dev, flow);
1756 } else if (mrvl_flow_can_be_added(priv, flow)) {
1757 ret = 0;
1758 } else {
1759 rte_flow_error_set(error, EINVAL,
1760 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1761 "Pattern does not match cls table format\n");
1762 goto out;
1763 }
1764
1765 if (ret) {
1766 rte_flow_error_set(error, EINVAL,
1767 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1768 "Failed to create cls table\n");
1769 goto out;
1770 }
1771
1772 ret = pp2_cls_tbl_add_rule(priv->cls_tbl, &flow->rule, &flow->action);
1773 if (ret) {
1774 rte_flow_error_set(error, EINVAL,
1775 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1776 "Failed to add rule\n");
1777 goto out;
1778 }
1779
1780 LIST_INSERT_HEAD(&priv->flows, flow, next);
1781
1782 return flow;
1783 out:
1784 rte_free(flow);
1785 return NULL;
1786 }
1787
1788 /**
1789 * Remove classifier rule associated with given flow.
1790 *
1791 * @param priv Pointer to the port's private data.
1792 * @param flow Pointer to the flow.
1793 * @param error Pointer to the flow error.
1794 * @returns 0 in case of success, negative value otherwise.
1795 */
1796 static int
mrvl_flow_remove(struct mrvl_priv * priv,struct rte_flow * flow,struct rte_flow_error * error)1797 mrvl_flow_remove(struct mrvl_priv *priv, struct rte_flow *flow,
1798 struct rte_flow_error *error)
1799 {
1800 int ret;
1801
1802 if (!priv->cls_tbl) {
1803 rte_flow_error_set(error, EINVAL,
1804 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1805 "Classifier table not initialized");
1806 return -rte_errno;
1807 }
1808
1809 ret = pp2_cls_tbl_remove_rule(priv->cls_tbl, &flow->rule);
1810 if (ret) {
1811 rte_flow_error_set(error, EINVAL,
1812 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1813 "Failed to remove rule");
1814 return -rte_errno;
1815 }
1816
1817 mrvl_free_all_key_mask(&flow->rule);
1818
1819 if (flow->mtr) {
1820 flow->mtr->refcnt--;
1821 flow->mtr = NULL;
1822 }
1823
1824 return 0;
1825 }
1826
1827 /**
1828 * DPDK flow destroy callback called when flow is to be removed.
1829 *
1830 * @param dev Pointer to the device.
1831 * @param flow Pointer to the flow.
1832 * @param error Pointer to the flow error.
1833 * @returns 0 in case of success, negative value otherwise.
1834 */
1835 static int
mrvl_flow_destroy(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_error * error)1836 mrvl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1837 struct rte_flow_error *error)
1838 {
1839 struct mrvl_priv *priv = dev->data->dev_private;
1840 struct rte_flow *f;
1841 int ret;
1842
1843 LIST_FOREACH(f, &priv->flows, next) {
1844 if (f == flow)
1845 break;
1846 }
1847
1848 if (!flow) {
1849 rte_flow_error_set(error, EINVAL,
1850 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1851 "Rule was not found");
1852 return -rte_errno;
1853 }
1854
1855 LIST_REMOVE(f, next);
1856
1857 ret = mrvl_flow_remove(priv, flow, error);
1858 if (ret)
1859 return ret;
1860
1861 rte_free(flow);
1862
1863 return 0;
1864 }
1865
1866 /**
1867 * DPDK flow callback called to verify given attribute, pattern and actions.
1868 *
1869 * @param dev Pointer to the device.
1870 * @param attr Pointer to the flow attribute.
1871 * @param pattern Pointer to the flow pattern.
1872 * @param actions Pointer to the flow actions.
1873 * @param error Pointer to the flow error.
1874 * @returns 0 on success, negative value otherwise.
1875 */
1876 static int
mrvl_flow_validate(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)1877 mrvl_flow_validate(struct rte_eth_dev *dev,
1878 const struct rte_flow_attr *attr,
1879 const struct rte_flow_item pattern[],
1880 const struct rte_flow_action actions[],
1881 struct rte_flow_error *error)
1882 {
1883 static struct rte_flow *flow;
1884
1885 flow = mrvl_flow_create(dev, attr, pattern, actions, error);
1886 if (!flow)
1887 return -rte_errno;
1888
1889 mrvl_flow_destroy(dev, flow, error);
1890
1891 return 0;
1892 }
1893
1894 /**
1895 * DPDK flow flush callback called when flows are to be flushed.
1896 *
1897 * @param dev Pointer to the device.
1898 * @param error Pointer to the flow error.
1899 * @returns 0 in case of success, negative value otherwise.
1900 */
1901 static int
mrvl_flow_flush(struct rte_eth_dev * dev,struct rte_flow_error * error)1902 mrvl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1903 {
1904 struct mrvl_priv *priv = dev->data->dev_private;
1905
1906 while (!LIST_EMPTY(&priv->flows)) {
1907 struct rte_flow *flow = LIST_FIRST(&priv->flows);
1908 int ret = mrvl_flow_remove(priv, flow, error);
1909 if (ret)
1910 return ret;
1911
1912 LIST_REMOVE(flow, next);
1913 rte_free(flow);
1914 }
1915
1916 if (priv->cls_tbl) {
1917 pp2_cls_tbl_deinit(priv->cls_tbl);
1918 priv->cls_tbl = NULL;
1919 }
1920
1921 return 0;
1922 }
1923
1924 /**
1925 * DPDK flow isolate callback called to isolate port.
1926 *
1927 * @param dev Pointer to the device.
1928 * @param enable Pass 0/1 to disable/enable port isolation.
1929 * @param error Pointer to the flow error.
1930 * @returns 0 in case of success, negative value otherwise.
1931 */
1932 static int
mrvl_flow_isolate(struct rte_eth_dev * dev,int enable,struct rte_flow_error * error)1933 mrvl_flow_isolate(struct rte_eth_dev *dev, int enable,
1934 struct rte_flow_error *error)
1935 {
1936 struct mrvl_priv *priv = dev->data->dev_private;
1937
1938 if (dev->data->dev_started) {
1939 rte_flow_error_set(error, EBUSY,
1940 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1941 NULL, "Port must be stopped first\n");
1942 return -rte_errno;
1943 }
1944
1945 priv->isolated = enable;
1946
1947 return 0;
1948 }
1949
1950 const struct rte_flow_ops mrvl_flow_ops = {
1951 .validate = mrvl_flow_validate,
1952 .create = mrvl_flow_create,
1953 .destroy = mrvl_flow_destroy,
1954 .flush = mrvl_flow_flush,
1955 .isolate = mrvl_flow_isolate
1956 };
1957
1958 /**
1959 * Initialize flow resources.
1960 *
1961 * @param dev Pointer to the device.
1962 */
1963 void
mrvl_flow_init(struct rte_eth_dev * dev)1964 mrvl_flow_init(struct rte_eth_dev *dev)
1965 {
1966 struct mrvl_priv *priv = dev->data->dev_private;
1967
1968 LIST_INIT(&priv->flows);
1969 }
1970
1971 /**
1972 * Cleanup flow resources.
1973 *
1974 * @param dev Pointer to the device.
1975 */
1976 void
mrvl_flow_deinit(struct rte_eth_dev * dev)1977 mrvl_flow_deinit(struct rte_eth_dev *dev)
1978 {
1979 mrvl_flow_flush(dev, NULL);
1980 }
1981