1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
3 */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_io.h>
14 #include <rte_debug.h>
15 #include <rte_ether.h>
16 #include <rte_ethdev_driver.h>
17 #include <rte_log.h>
18 #include <rte_malloc.h>
19 #include <rte_eth_ctrl.h>
20 #include <rte_tailq.h>
21 #include <rte_rawdev.h>
22 #include <rte_rawdev_pmd.h>
23 #include <rte_bus_ifpga.h>
24 #include <ifpga_common.h>
25 #include <ifpga_logs.h>
26 #include <ifpga_rawdev.h>
27
28 #include "ipn3ke_rawdev_api.h"
29 #include "ipn3ke_flow.h"
30 #include "ipn3ke_logs.h"
31 #include "ipn3ke_ethdev.h"
32
33 /** Static initializer for items. */
34 #define FLOW_PATTERNS(...) \
35 ((const enum rte_flow_item_type []) { \
36 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
37 })
38
39 enum IPN3KE_HASH_KEY_TYPE {
40 IPN3KE_HASH_KEY_VXLAN,
41 IPN3KE_HASH_KEY_MAC,
42 IPN3KE_HASH_KEY_QINQ,
43 IPN3KE_HASH_KEY_MPLS,
44 IPN3KE_HASH_KEY_IP_TCP,
45 IPN3KE_HASH_KEY_IP_UDP,
46 IPN3KE_HASH_KEY_IP_NVGRE,
47 IPN3KE_HASH_KEY_VXLAN_IP_UDP,
48 };
49
50 struct ipn3ke_flow_parse {
51 uint32_t mark:1; /**< Set if the flow is marked. */
52 uint32_t drop:1; /**< ACL drop. */
53 uint32_t key_type:IPN3KE_FLOW_KEY_ID_BITS;
54 uint32_t mark_id:IPN3KE_FLOW_RESULT_UID_BITS; /**< Mark identifier. */
55 uint8_t key_len; /**< Length in bit. */
56 uint8_t key[BITS_TO_BYTES(IPN3KE_FLOW_KEY_DATA_BITS)];
57 /**< key1, key2 */
58 };
59
60 typedef int (*pattern_filter_t)(const struct rte_flow_item patterns[],
61 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser);
62
63
64 struct ipn3ke_flow_pattern {
65 const enum rte_flow_item_type *const items;
66
67 pattern_filter_t filter;
68 };
69
70 /*
71 * @ RTL definition:
72 * typedef struct packed {
73 * logic [47:0] vxlan_inner_mac;
74 * logic [23:0] vxlan_vni;
75 * } Hash_Key_Vxlan_t;
76 *
77 * @ flow items:
78 * RTE_FLOW_ITEM_TYPE_VXLAN
79 * RTE_FLOW_ITEM_TYPE_ETH
80 */
81 static int
ipn3ke_pattern_vxlan(const struct rte_flow_item patterns[],struct rte_flow_error * error,struct ipn3ke_flow_parse * parser)82 ipn3ke_pattern_vxlan(const struct rte_flow_item patterns[],
83 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
84 {
85 const struct rte_flow_item_vxlan *vxlan = NULL;
86 const struct rte_flow_item_eth *eth = NULL;
87 const struct rte_flow_item *item;
88
89 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
90 if (/*!item->spec || item->mask || */item->last) {
91 rte_flow_error_set(error,
92 EINVAL,
93 RTE_FLOW_ERROR_TYPE_ITEM,
94 item,
95 "Only support item with 'spec'");
96 return -rte_errno;
97 }
98
99 switch (item->type) {
100 case RTE_FLOW_ITEM_TYPE_ETH:
101 eth = item->spec;
102
103 rte_memcpy(&parser->key[0],
104 eth->src.addr_bytes,
105 RTE_ETHER_ADDR_LEN);
106 break;
107
108 case RTE_FLOW_ITEM_TYPE_VXLAN:
109 vxlan = item->spec;
110
111 rte_memcpy(&parser->key[6], vxlan->vni, 3);
112 break;
113
114 default:
115 rte_flow_error_set(error,
116 EINVAL,
117 RTE_FLOW_ERROR_TYPE_ITEM,
118 item,
119 "Not support item type");
120 return -rte_errno;
121 }
122 }
123
124 if (vxlan != NULL && eth != NULL) {
125 parser->key_len = 48 + 24;
126 return 0;
127 }
128
129 rte_flow_error_set(error,
130 EINVAL,
131 RTE_FLOW_ERROR_TYPE_ITEM,
132 patterns,
133 "Missed some patterns");
134 return -rte_errno;
135 }
136
137 /*
138 * @ RTL definition:
139 * typedef struct packed {
140 * logic [47:0] eth_smac;
141 * } Hash_Key_Mac_t;
142 *
143 * @ flow items:
144 * RTE_FLOW_ITEM_TYPE_ETH
145 */
146 static int
ipn3ke_pattern_mac(const struct rte_flow_item patterns[],struct rte_flow_error * error,struct ipn3ke_flow_parse * parser)147 ipn3ke_pattern_mac(const struct rte_flow_item patterns[],
148 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
149 {
150 const struct rte_flow_item_eth *eth = NULL;
151 const struct rte_flow_item *item;
152
153 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
154 if (!item->spec || item->mask || item->last) {
155 rte_flow_error_set(error,
156 EINVAL,
157 RTE_FLOW_ERROR_TYPE_ITEM,
158 item,
159 "Only support item with 'spec'");
160 return -rte_errno;
161 }
162
163 switch (item->type) {
164 case RTE_FLOW_ITEM_TYPE_ETH:
165 eth = item->spec;
166
167 rte_memcpy(parser->key,
168 eth->src.addr_bytes,
169 RTE_ETHER_ADDR_LEN);
170 break;
171
172 default:
173 rte_flow_error_set(error,
174 EINVAL,
175 RTE_FLOW_ERROR_TYPE_ITEM,
176 item,
177 "Not support item type");
178 return -rte_errno;
179 }
180 }
181
182 if (eth != NULL) {
183 parser->key_len = 48;
184 return 0;
185 }
186
187 rte_flow_error_set(error,
188 EINVAL,
189 RTE_FLOW_ERROR_TYPE_ITEM,
190 patterns,
191 "Missed some patterns");
192 return -rte_errno;
193 }
194
195 /*
196 * @ RTL definition:
197 * typedef struct packed {
198 * logic [11:0] outer_vlan_id;
199 * logic [11:0] inner_vlan_id;
200 * } Hash_Key_QinQ_t;
201 *
202 * @ flow items:
203 * RTE_FLOW_ITEM_TYPE_VLAN
204 * RTE_FLOW_ITEM_TYPE_VLAN
205 */
206 static int
ipn3ke_pattern_qinq(const struct rte_flow_item patterns[],struct rte_flow_error * error,struct ipn3ke_flow_parse * parser)207 ipn3ke_pattern_qinq(const struct rte_flow_item patterns[],
208 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
209 {
210 const struct rte_flow_item_vlan *outer_vlan = NULL;
211 const struct rte_flow_item_vlan *inner_vlan = NULL;
212 const struct rte_flow_item *item;
213 uint16_t tci;
214
215 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
216 if (!item->spec || item->mask || item->last) {
217 rte_flow_error_set(error,
218 EINVAL,
219 RTE_FLOW_ERROR_TYPE_ITEM,
220 item,
221 "Only support item with 'spec'");
222 return -rte_errno;
223 }
224
225 switch (item->type) {
226 case RTE_FLOW_ITEM_TYPE_VLAN:
227 if (!outer_vlan) {
228 outer_vlan = item->spec;
229
230 tci = rte_be_to_cpu_16(outer_vlan->tci);
231 parser->key[0] = (tci & 0xff0) >> 4;
232 parser->key[1] |= (tci & 0x00f) << 4;
233 } else {
234 inner_vlan = item->spec;
235
236 tci = rte_be_to_cpu_16(inner_vlan->tci);
237 parser->key[1] |= (tci & 0xf00) >> 8;
238 parser->key[2] = (tci & 0x0ff);
239 }
240 break;
241
242 default:
243 rte_flow_error_set(error,
244 EINVAL,
245 RTE_FLOW_ERROR_TYPE_ITEM,
246 item,
247 "Not support item type");
248 return -rte_errno;
249 }
250 }
251
252 if (outer_vlan != NULL && inner_vlan != NULL) {
253 parser->key_len = 12 + 12;
254 return 0;
255 }
256
257 rte_flow_error_set(error,
258 EINVAL,
259 RTE_FLOW_ERROR_TYPE_ITEM,
260 patterns,
261 "Missed some patterns");
262 return -rte_errno;
263 }
264
265 /*
266 * @ RTL definition:
267 * typedef struct packed {
268 * logic [19:0] mpls_label1;
269 * logic [19:0] mpls_label2;
270 * } Hash_Key_Mpls_t;
271 *
272 * @ flow items:
273 * RTE_FLOW_ITEM_TYPE_MPLS
274 * RTE_FLOW_ITEM_TYPE_MPLS
275 */
276 static int
ipn3ke_pattern_mpls(const struct rte_flow_item patterns[],struct rte_flow_error * error,struct ipn3ke_flow_parse * parser)277 ipn3ke_pattern_mpls(const struct rte_flow_item patterns[],
278 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
279 {
280 const struct rte_flow_item_mpls *mpls1 = NULL;
281 const struct rte_flow_item_mpls *mpls2 = NULL;
282 const struct rte_flow_item *item;
283
284 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
285 if (!item->spec || item->mask || item->last) {
286 rte_flow_error_set(error,
287 EINVAL,
288 RTE_FLOW_ERROR_TYPE_ITEM,
289 item,
290 "Only support item with 'spec'");
291 return -rte_errno;
292 }
293
294 switch (item->type) {
295 case RTE_FLOW_ITEM_TYPE_MPLS:
296 if (!mpls1) {
297 mpls1 = item->spec;
298
299 parser->key[0] = mpls1->label_tc_s[0];
300 parser->key[1] = mpls1->label_tc_s[1];
301 parser->key[2] = mpls1->label_tc_s[2] & 0xf0;
302 } else {
303 mpls2 = item->spec;
304
305 parser->key[2] |=
306 ((mpls2->label_tc_s[0] & 0xf0) >> 4);
307 parser->key[3] =
308 ((mpls2->label_tc_s[0] & 0xf) << 4) |
309 ((mpls2->label_tc_s[1] & 0xf0) >> 4);
310 parser->key[4] =
311 ((mpls2->label_tc_s[1] & 0xf) << 4) |
312 ((mpls2->label_tc_s[2] & 0xf0) >> 4);
313 }
314 break;
315
316 default:
317 rte_flow_error_set(error,
318 EINVAL,
319 RTE_FLOW_ERROR_TYPE_ITEM,
320 item,
321 "Not support item type");
322 return -rte_errno;
323 }
324 }
325
326 if (mpls1 != NULL && mpls2 != NULL) {
327 parser->key_len = 20 + 20;
328 return 0;
329 }
330
331 rte_flow_error_set(error,
332 EINVAL,
333 RTE_FLOW_ERROR_TYPE_ITEM,
334 patterns,
335 "Missed some patterns");
336 return -rte_errno;
337 }
338
339 /*
340 * @ RTL definition:
341 * typedef struct packed {
342 * logic [31:0] ip_sa;
343 * logic [15:0] tcp_sport;
344 * } Hash_Key_Ip_Tcp_t;
345 *
346 * @ flow items:
347 * RTE_FLOW_ITEM_TYPE_IPV4
348 * RTE_FLOW_ITEM_TYPE_TCP
349 */
350 static int
ipn3ke_pattern_ip_tcp(const struct rte_flow_item patterns[],struct rte_flow_error * error,struct ipn3ke_flow_parse * parser)351 ipn3ke_pattern_ip_tcp(const struct rte_flow_item patterns[],
352 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
353 {
354 const struct rte_flow_item_ipv4 *ipv4 = NULL;
355 const struct rte_flow_item_tcp *tcp = NULL;
356 const struct rte_flow_item *item;
357
358 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
359 if (!item->spec || item->mask || item->last) {
360 rte_flow_error_set(error,
361 EINVAL,
362 RTE_FLOW_ERROR_TYPE_ITEM,
363 item,
364 "Only support item with 'spec'");
365 return -rte_errno;
366 }
367
368 switch (item->type) {
369 case RTE_FLOW_ITEM_TYPE_IPV4:
370 ipv4 = item->spec;
371
372 rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4);
373 break;
374
375 case RTE_FLOW_ITEM_TYPE_TCP:
376 tcp = item->spec;
377
378 rte_memcpy(&parser->key[4], &tcp->hdr.src_port, 2);
379 break;
380
381 default:
382 rte_flow_error_set(error,
383 EINVAL,
384 RTE_FLOW_ERROR_TYPE_ITEM,
385 item,
386 "Not support item type");
387 return -rte_errno;
388 }
389 }
390
391 if (ipv4 != NULL && tcp != NULL) {
392 parser->key_len = 32 + 16;
393 return 0;
394 }
395
396 rte_flow_error_set(error,
397 EINVAL,
398 RTE_FLOW_ERROR_TYPE_ITEM,
399 patterns,
400 "Missed some patterns");
401 return -rte_errno;
402 }
403
404 /*
405 * @ RTL definition:
406 * typedef struct packed {
407 * logic [31:0] ip_sa;
408 * logic [15:0] udp_sport;
409 * } Hash_Key_Ip_Udp_t;
410 *
411 * @ flow items:
412 * RTE_FLOW_ITEM_TYPE_IPV4
413 * RTE_FLOW_ITEM_TYPE_UDP
414 */
415 static int
ipn3ke_pattern_ip_udp(const struct rte_flow_item patterns[],struct rte_flow_error * error,struct ipn3ke_flow_parse * parser)416 ipn3ke_pattern_ip_udp(const struct rte_flow_item patterns[],
417 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
418 {
419 const struct rte_flow_item_ipv4 *ipv4 = NULL;
420 const struct rte_flow_item_udp *udp = NULL;
421 const struct rte_flow_item *item;
422
423 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
424 if (!item->spec || item->mask || item->last) {
425 rte_flow_error_set(error,
426 EINVAL,
427 RTE_FLOW_ERROR_TYPE_ITEM,
428 item,
429 "Only support item with 'spec'");
430 return -rte_errno;
431 }
432
433 switch (item->type) {
434 case RTE_FLOW_ITEM_TYPE_IPV4:
435 ipv4 = item->spec;
436
437 rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4);
438 break;
439
440 case RTE_FLOW_ITEM_TYPE_UDP:
441 udp = item->spec;
442
443 rte_memcpy(&parser->key[4], &udp->hdr.src_port, 2);
444 break;
445
446 default:
447 rte_flow_error_set(error,
448 EINVAL,
449 RTE_FLOW_ERROR_TYPE_ITEM,
450 item,
451 "Not support item type");
452 return -rte_errno;
453 }
454 }
455
456 if (ipv4 != NULL && udp != NULL) {
457 parser->key_len = 32 + 16;
458 return 0;
459 }
460
461 rte_flow_error_set(error,
462 EINVAL,
463 RTE_FLOW_ERROR_TYPE_ITEM,
464 patterns,
465 "Missed some patterns");
466 return -rte_errno;
467 }
468
469 /*
470 * @ RTL definition:
471 * typedef struct packed {
472 * logic [31:0] ip_sa;
473 * logic [15:0] udp_sport;
474 * logic [23:0] vsid;
475 * } Hash_Key_Ip_Nvgre_t;
476 *
477 * @ flow items:
478 * RTE_FLOW_ITEM_TYPE_IPV4
479 * RTE_FLOW_ITEM_TYPE_UDP
480 * RTE_FLOW_ITEM_TYPE_NVGRE
481 */
482 static int
ipn3ke_pattern_ip_nvgre(const struct rte_flow_item patterns[],struct rte_flow_error * error,struct ipn3ke_flow_parse * parser)483 ipn3ke_pattern_ip_nvgre(const struct rte_flow_item patterns[],
484 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
485 {
486 const struct rte_flow_item_nvgre *nvgre = NULL;
487 const struct rte_flow_item_ipv4 *ipv4 = NULL;
488 const struct rte_flow_item_udp *udp = NULL;
489 const struct rte_flow_item *item;
490
491 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
492 if (!item->spec || item->mask || item->last) {
493 rte_flow_error_set(error,
494 EINVAL,
495 RTE_FLOW_ERROR_TYPE_ITEM,
496 item,
497 "Only support item with 'spec'");
498 return -rte_errno;
499 }
500
501 switch (item->type) {
502 case RTE_FLOW_ITEM_TYPE_IPV4:
503 ipv4 = item->spec;
504
505 rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4);
506 break;
507
508 case RTE_FLOW_ITEM_TYPE_UDP:
509 udp = item->spec;
510
511 rte_memcpy(&parser->key[4], &udp->hdr.src_port, 2);
512 break;
513
514 case RTE_FLOW_ITEM_TYPE_NVGRE:
515 nvgre = item->spec;
516
517 rte_memcpy(&parser->key[6], nvgre->tni, 3);
518 break;
519
520 default:
521 rte_flow_error_set(error,
522 EINVAL,
523 RTE_FLOW_ERROR_TYPE_ITEM,
524 item,
525 "Not support item type");
526 return -rte_errno;
527 }
528 }
529
530 if (ipv4 != NULL && udp != NULL && nvgre != NULL) {
531 parser->key_len = 32 + 16 + 24;
532 return 0;
533 }
534
535 rte_flow_error_set(error,
536 EINVAL,
537 RTE_FLOW_ERROR_TYPE_ITEM,
538 patterns,
539 "Missed some patterns");
540 return -rte_errno;
541 }
542
543 /*
544 * @ RTL definition:
545 * typedef struct packed{
546 * logic [23:0] vxlan_vni;
547 * logic [31:0] ip_sa;
548 * logic [15:0] udp_sport;
549 * } Hash_Key_Vxlan_Ip_Udp_t;
550 *
551 * @ flow items:
552 * RTE_FLOW_ITEM_TYPE_VXLAN
553 * RTE_FLOW_ITEM_TYPE_IPV4
554 * RTE_FLOW_ITEM_TYPE_UDP
555 */
556 static int
ipn3ke_pattern_vxlan_ip_udp(const struct rte_flow_item patterns[],struct rte_flow_error * error,struct ipn3ke_flow_parse * parser)557 ipn3ke_pattern_vxlan_ip_udp(const struct rte_flow_item patterns[],
558 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
559 {
560 const struct rte_flow_item_vxlan *vxlan = NULL;
561 const struct rte_flow_item_ipv4 *ipv4 = NULL;
562 const struct rte_flow_item_udp *udp = NULL;
563 const struct rte_flow_item *item;
564
565 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
566 if (!item->spec || item->mask || item->last) {
567 rte_flow_error_set(error,
568 EINVAL,
569 RTE_FLOW_ERROR_TYPE_ITEM,
570 item,
571 "Only support item with 'spec'");
572 return -rte_errno;
573 }
574
575 switch (item->type) {
576 case RTE_FLOW_ITEM_TYPE_VXLAN:
577 vxlan = item->spec;
578
579 rte_memcpy(&parser->key[0], vxlan->vni, 3);
580 break;
581
582 case RTE_FLOW_ITEM_TYPE_IPV4:
583 ipv4 = item->spec;
584
585 rte_memcpy(&parser->key[3], &ipv4->hdr.src_addr, 4);
586 break;
587
588 case RTE_FLOW_ITEM_TYPE_UDP:
589 udp = item->spec;
590
591 rte_memcpy(&parser->key[7], &udp->hdr.src_port, 2);
592 break;
593
594 default:
595 rte_flow_error_set(error,
596 EINVAL,
597 RTE_FLOW_ERROR_TYPE_ITEM,
598 item,
599 "Not support item type");
600 return -rte_errno;
601 }
602 }
603
604 if (vxlan != NULL && ipv4 != NULL && udp != NULL) {
605 parser->key_len = 24 + 32 + 16;
606 return 0;
607 }
608
609 rte_flow_error_set(error,
610 EINVAL,
611 RTE_FLOW_ERROR_TYPE_ITEM,
612 patterns,
613 "Missed some patterns");
614 return -rte_errno;
615 }
616
617 static const struct ipn3ke_flow_pattern ipn3ke_supported_patterns[] = {
618 [IPN3KE_HASH_KEY_VXLAN] = {
619 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VXLAN,
620 RTE_FLOW_ITEM_TYPE_ETH),
621 .filter = ipn3ke_pattern_vxlan,
622 },
623
624 [IPN3KE_HASH_KEY_MAC] = {
625 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_ETH),
626 .filter = ipn3ke_pattern_mac,
627 },
628
629 [IPN3KE_HASH_KEY_QINQ] = {
630 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VLAN,
631 RTE_FLOW_ITEM_TYPE_VLAN),
632 .filter = ipn3ke_pattern_qinq,
633 },
634
635 [IPN3KE_HASH_KEY_MPLS] = {
636 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_MPLS,
637 RTE_FLOW_ITEM_TYPE_MPLS),
638 .filter = ipn3ke_pattern_mpls,
639 },
640
641 [IPN3KE_HASH_KEY_IP_TCP] = {
642 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4,
643 RTE_FLOW_ITEM_TYPE_TCP),
644 .filter = ipn3ke_pattern_ip_tcp,
645 },
646
647 [IPN3KE_HASH_KEY_IP_UDP] = {
648 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4,
649 RTE_FLOW_ITEM_TYPE_UDP),
650 .filter = ipn3ke_pattern_ip_udp,
651 },
652
653 [IPN3KE_HASH_KEY_IP_NVGRE] = {
654 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4,
655 RTE_FLOW_ITEM_TYPE_UDP,
656 RTE_FLOW_ITEM_TYPE_NVGRE),
657 .filter = ipn3ke_pattern_ip_nvgre,
658 },
659
660 [IPN3KE_HASH_KEY_VXLAN_IP_UDP] = {
661 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VXLAN,
662 RTE_FLOW_ITEM_TYPE_IPV4,
663 RTE_FLOW_ITEM_TYPE_UDP),
664 .filter = ipn3ke_pattern_vxlan_ip_udp,
665 },
666 };
667
668 static int
ipn3ke_flow_convert_attributes(const struct rte_flow_attr * attr,struct rte_flow_error * error)669 ipn3ke_flow_convert_attributes(const struct rte_flow_attr *attr,
670 struct rte_flow_error *error)
671 {
672 if (!attr) {
673 rte_flow_error_set(error,
674 EINVAL,
675 RTE_FLOW_ERROR_TYPE_ATTR,
676 NULL,
677 "NULL attribute.");
678 return -rte_errno;
679 }
680
681 if (attr->group) {
682 rte_flow_error_set(error,
683 ENOTSUP,
684 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
685 NULL,
686 "groups are not supported");
687 return -rte_errno;
688 }
689
690 if (attr->egress) {
691 rte_flow_error_set(error,
692 ENOTSUP,
693 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
694 NULL,
695 "egress is not supported");
696 return -rte_errno;
697 }
698
699 if (attr->transfer) {
700 rte_flow_error_set(error,
701 ENOTSUP,
702 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
703 NULL,
704 "transfer is not supported");
705 return -rte_errno;
706 }
707
708 if (!attr->ingress) {
709 rte_flow_error_set(error,
710 ENOTSUP,
711 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
712 NULL,
713 "only ingress is supported");
714 return -rte_errno;
715 }
716
717 return 0;
718 }
719
720 static int
ipn3ke_flow_convert_actions(const struct rte_flow_action actions[],struct rte_flow_error * error,struct ipn3ke_flow_parse * parser)721 ipn3ke_flow_convert_actions(const struct rte_flow_action actions[],
722 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
723 {
724 const struct rte_flow_action_mark *mark = NULL;
725
726 if (!actions) {
727 rte_flow_error_set(error,
728 EINVAL,
729 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
730 NULL,
731 "NULL action.");
732 return -rte_errno;
733 }
734
735 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
736 switch (actions->type) {
737 case RTE_FLOW_ACTION_TYPE_VOID:
738 break;
739
740 case RTE_FLOW_ACTION_TYPE_MARK:
741 if (mark) {
742 rte_flow_error_set(error,
743 ENOTSUP,
744 RTE_FLOW_ERROR_TYPE_ACTION,
745 actions,
746 "duplicated mark");
747 return -rte_errno;
748 }
749
750 mark = actions->conf;
751 if (!mark) {
752 rte_flow_error_set(error,
753 EINVAL,
754 RTE_FLOW_ERROR_TYPE_ACTION,
755 actions,
756 "mark must be defined");
757 return -rte_errno;
758 } else if (mark->id > IPN3KE_FLOW_RESULT_UID_MAX) {
759 rte_flow_error_set(error,
760 ENOTSUP,
761 RTE_FLOW_ERROR_TYPE_ACTION,
762 actions,
763 "mark id is out of range");
764 return -rte_errno;
765 }
766
767 parser->mark = 1;
768 parser->mark_id = mark->id;
769 break;
770
771 case RTE_FLOW_ACTION_TYPE_DROP:
772 parser->drop = 1;
773 break;
774
775 default:
776 rte_flow_error_set(error,
777 ENOTSUP,
778 RTE_FLOW_ERROR_TYPE_ACTION,
779 actions,
780 "invalid action");
781 return -rte_errno;
782 }
783 }
784
785 if (!parser->drop && !parser->mark) {
786 rte_flow_error_set(error,
787 EINVAL,
788 RTE_FLOW_ERROR_TYPE_ACTION,
789 actions,
790 "no valid actions");
791 return -rte_errno;
792 }
793
794 return 0;
795 }
796
797 static bool
ipn3ke_match_pattern(const enum rte_flow_item_type * patterns,const struct rte_flow_item * input)798 ipn3ke_match_pattern(const enum rte_flow_item_type *patterns,
799 const struct rte_flow_item *input)
800 {
801 const struct rte_flow_item *item = input;
802
803 while ((*patterns == item->type) &&
804 (*patterns != RTE_FLOW_ITEM_TYPE_END)) {
805 patterns++;
806 item++;
807 }
808
809 return (*patterns == RTE_FLOW_ITEM_TYPE_END &&
810 item->type == RTE_FLOW_ITEM_TYPE_END);
811 }
812
813 static pattern_filter_t
ipn3ke_find_filter_func(const struct rte_flow_item * input,uint32_t * idx)814 ipn3ke_find_filter_func(const struct rte_flow_item *input,
815 uint32_t *idx)
816 {
817 pattern_filter_t filter = NULL;
818 uint32_t i;
819
820 for (i = 0; i < RTE_DIM(ipn3ke_supported_patterns); i++) {
821 if (ipn3ke_match_pattern(ipn3ke_supported_patterns[i].items,
822 input)) {
823 filter = ipn3ke_supported_patterns[i].filter;
824 *idx = i;
825 break;
826 }
827 }
828
829 return filter;
830 }
831
832 static int
ipn3ke_flow_convert_items(const struct rte_flow_item items[],struct rte_flow_error * error,struct ipn3ke_flow_parse * parser)833 ipn3ke_flow_convert_items(const struct rte_flow_item items[],
834 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
835 {
836 pattern_filter_t filter = NULL;
837 uint32_t idx;
838
839 if (!items) {
840 rte_flow_error_set(error,
841 EINVAL,
842 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
843 NULL,
844 "NULL pattern.");
845 return -rte_errno;
846 }
847
848 filter = ipn3ke_find_filter_func(items, &idx);
849
850 if (!filter) {
851 rte_flow_error_set(error,
852 EINVAL,
853 RTE_FLOW_ERROR_TYPE_ITEM,
854 items,
855 "Unsupported pattern");
856 return -rte_errno;
857 }
858
859 parser->key_type = idx;
860
861 return filter(items, error, parser);
862 }
863
864 /* Put the least @nbits of @data into @offset of @dst bits stream, and
865 * the @offset starts from MSB to LSB in each byte.
866 *
867 * MSB LSB
868 * +------+------+------+------+
869 * | | | | |
870 * +------+------+------+------+
871 * ^ ^
872 * |<- data: nbits ->|
873 * |
874 * offset
875 */
876 static void
copy_data_bits(uint8_t * dst,uint64_t data,uint32_t offset,uint8_t nbits)877 copy_data_bits(uint8_t *dst, uint64_t data,
878 uint32_t offset, uint8_t nbits)
879 {
880 uint8_t set, *p = &dst[offset / BITS_PER_BYTE];
881 uint8_t bits_to_set = BITS_PER_BYTE - (offset % BITS_PER_BYTE);
882 uint8_t mask_to_set = 0xff >> (offset % BITS_PER_BYTE);
883 uint32_t size = offset + nbits;
884
885 if (nbits > (sizeof(data) * BITS_PER_BYTE)) {
886 IPN3KE_AFU_PMD_ERR("nbits is out of range");
887 return;
888 }
889
890 while (nbits - bits_to_set >= 0) {
891 set = data >> (nbits - bits_to_set);
892
893 *p &= ~mask_to_set;
894 *p |= (set & mask_to_set);
895
896 nbits -= bits_to_set;
897 bits_to_set = BITS_PER_BYTE;
898 mask_to_set = 0xff;
899 p++;
900 }
901
902 if (nbits) {
903 uint8_t shift = BITS_PER_BYTE - (size % BITS_PER_BYTE);
904
905 set = data << shift;
906 mask_to_set = 0xff << shift;
907
908 *p &= ~mask_to_set;
909 *p |= (set & mask_to_set);
910 }
911 }
912
913 static void
ipn3ke_flow_key_generation(struct ipn3ke_flow_parse * parser,struct rte_flow * flow)914 ipn3ke_flow_key_generation(struct ipn3ke_flow_parse *parser,
915 struct rte_flow *flow)
916 {
917 uint32_t i, shift_bytes, len_in_bytes, offset;
918 uint64_t key;
919 uint8_t *dst;
920
921 dst = flow->rule.key;
922
923 copy_data_bits(dst,
924 parser->key_type,
925 IPN3KE_FLOW_KEY_ID_OFFSET,
926 IPN3KE_FLOW_KEY_ID_BITS);
927
928 /* The MSb of key is filled to 0 when it is less than
929 * IPN3KE_FLOW_KEY_DATA_BITS bit. And the parsed key data is
930 * save as MSB byte first in the array, it needs to move
931 * the bits before formatting them.
932 */
933 key = 0;
934 shift_bytes = 0;
935 len_in_bytes = BITS_TO_BYTES(parser->key_len);
936 offset = (IPN3KE_FLOW_KEY_DATA_OFFSET +
937 IPN3KE_FLOW_KEY_DATA_BITS -
938 parser->key_len);
939
940 for (i = 0; i < len_in_bytes; i++) {
941 key = (key << 8) | parser->key[i];
942
943 if (++shift_bytes == sizeof(key)) {
944 shift_bytes = 0;
945
946 copy_data_bits(dst, key, offset,
947 sizeof(key) * BITS_PER_BYTE);
948 offset += sizeof(key) * BITS_PER_BYTE;
949 key = 0;
950 }
951 }
952
953 if (shift_bytes != 0) {
954 uint32_t rem_bits;
955
956 rem_bits = parser->key_len % (sizeof(key) * BITS_PER_BYTE);
957 key >>= (shift_bytes * 8 - rem_bits);
958 copy_data_bits(dst, key, offset, rem_bits);
959 }
960 }
961
962 static void
ipn3ke_flow_result_generation(struct ipn3ke_flow_parse * parser,struct rte_flow * flow)963 ipn3ke_flow_result_generation(struct ipn3ke_flow_parse *parser,
964 struct rte_flow *flow)
965 {
966 uint8_t *dst;
967
968 if (parser->drop)
969 return;
970
971 dst = flow->rule.result;
972
973 copy_data_bits(dst,
974 1,
975 IPN3KE_FLOW_RESULT_ACL_OFFSET,
976 IPN3KE_FLOW_RESULT_ACL_BITS);
977
978 copy_data_bits(dst,
979 parser->mark_id,
980 IPN3KE_FLOW_RESULT_UID_OFFSET,
981 IPN3KE_FLOW_RESULT_UID_BITS);
982 }
983
984 #define MHL_COMMAND_TIME_COUNT 0xFFFF
985 #define MHL_COMMAND_TIME_INTERVAL_US 10
986
987 static int
ipn3ke_flow_hw_update(struct ipn3ke_hw * hw,struct rte_flow * flow,uint32_t is_add)988 ipn3ke_flow_hw_update(struct ipn3ke_hw *hw,
989 struct rte_flow *flow, uint32_t is_add)
990 {
991 uint32_t *pdata = NULL;
992 uint32_t data;
993 uint32_t time_out = MHL_COMMAND_TIME_COUNT;
994 uint32_t i;
995
996 IPN3KE_AFU_PMD_DEBUG("IPN3KE flow dump start\n");
997
998 pdata = (uint32_t *)flow->rule.key;
999 IPN3KE_AFU_PMD_DEBUG(" - key :");
1000
1001 for (i = 0; i < RTE_DIM(flow->rule.key); i++)
1002 IPN3KE_AFU_PMD_DEBUG(" %02x", flow->rule.key[i]);
1003
1004 for (i = 0; i < 4; i++)
1005 IPN3KE_AFU_PMD_DEBUG(" %02x", ipn3ke_swap32(pdata[3 - i]));
1006 IPN3KE_AFU_PMD_DEBUG("\n");
1007
1008 pdata = (uint32_t *)flow->rule.result;
1009 IPN3KE_AFU_PMD_DEBUG(" - result:");
1010
1011 for (i = 0; i < RTE_DIM(flow->rule.result); i++)
1012 IPN3KE_AFU_PMD_DEBUG(" %02x", flow->rule.result[i]);
1013
1014 for (i = 0; i < 1; i++)
1015 IPN3KE_AFU_PMD_DEBUG(" %02x", pdata[i]);
1016 IPN3KE_AFU_PMD_DEBUG("IPN3KE flow dump end\n");
1017
1018 pdata = (uint32_t *)flow->rule.key;
1019
1020 IPN3KE_MASK_WRITE_REG(hw,
1021 IPN3KE_CLF_MHL_KEY_0,
1022 0,
1023 ipn3ke_swap32(pdata[3]),
1024 IPN3KE_CLF_MHL_KEY_MASK);
1025
1026 IPN3KE_MASK_WRITE_REG(hw,
1027 IPN3KE_CLF_MHL_KEY_1,
1028 0,
1029 ipn3ke_swap32(pdata[2]),
1030 IPN3KE_CLF_MHL_KEY_MASK);
1031
1032 IPN3KE_MASK_WRITE_REG(hw,
1033 IPN3KE_CLF_MHL_KEY_2,
1034 0,
1035 ipn3ke_swap32(pdata[1]),
1036 IPN3KE_CLF_MHL_KEY_MASK);
1037
1038 IPN3KE_MASK_WRITE_REG(hw,
1039 IPN3KE_CLF_MHL_KEY_3,
1040 0,
1041 ipn3ke_swap32(pdata[0]),
1042 IPN3KE_CLF_MHL_KEY_MASK);
1043
1044 pdata = (uint32_t *)flow->rule.result;
1045 IPN3KE_MASK_WRITE_REG(hw,
1046 IPN3KE_CLF_MHL_RES,
1047 0,
1048 ipn3ke_swap32(pdata[0]),
1049 IPN3KE_CLF_MHL_RES_MASK);
1050
1051 /* insert/delete the key and result */
1052 data = 0;
1053 data = IPN3KE_MASK_READ_REG(hw,
1054 IPN3KE_CLF_MHL_MGMT_CTRL,
1055 0,
1056 0x80000000);
1057 time_out = MHL_COMMAND_TIME_COUNT;
1058 while (IPN3KE_BIT_ISSET(data, IPN3KE_CLF_MHL_MGMT_CTRL_BIT_BUSY) &&
1059 (time_out > 0)) {
1060 data = IPN3KE_MASK_READ_REG(hw,
1061 IPN3KE_CLF_MHL_MGMT_CTRL,
1062 0,
1063 0x80000000);
1064 time_out--;
1065 rte_delay_us(MHL_COMMAND_TIME_INTERVAL_US);
1066 }
1067 if (!time_out)
1068 return -1;
1069 if (is_add)
1070 IPN3KE_MASK_WRITE_REG(hw,
1071 IPN3KE_CLF_MHL_MGMT_CTRL,
1072 0,
1073 IPN3KE_CLF_MHL_MGMT_CTRL_INSERT,
1074 0x3);
1075 else
1076 IPN3KE_MASK_WRITE_REG(hw,
1077 IPN3KE_CLF_MHL_MGMT_CTRL,
1078 0,
1079 IPN3KE_CLF_MHL_MGMT_CTRL_DELETE,
1080 0x3);
1081
1082 return 0;
1083 }
1084
1085 static int
ipn3ke_flow_hw_flush(struct ipn3ke_hw * hw)1086 ipn3ke_flow_hw_flush(struct ipn3ke_hw *hw)
1087 {
1088 uint32_t data;
1089 uint32_t time_out = MHL_COMMAND_TIME_COUNT;
1090
1091 /* flush the MHL lookup table */
1092 data = 0;
1093 data = IPN3KE_MASK_READ_REG(hw,
1094 IPN3KE_CLF_MHL_MGMT_CTRL,
1095 0,
1096 0x80000000);
1097 time_out = MHL_COMMAND_TIME_COUNT;
1098 while (IPN3KE_BIT_ISSET(data, IPN3KE_CLF_MHL_MGMT_CTRL_BIT_BUSY) &&
1099 (time_out > 0)) {
1100 data = IPN3KE_MASK_READ_REG(hw,
1101 IPN3KE_CLF_MHL_MGMT_CTRL,
1102 0,
1103 0x80000000);
1104 time_out--;
1105 rte_delay_us(MHL_COMMAND_TIME_INTERVAL_US);
1106 }
1107 if (!time_out)
1108 return -1;
1109 IPN3KE_MASK_WRITE_REG(hw,
1110 IPN3KE_CLF_MHL_MGMT_CTRL,
1111 0,
1112 IPN3KE_CLF_MHL_MGMT_CTRL_FLUSH,
1113 0x3);
1114
1115 return 0;
1116 }
1117
1118 static void
ipn3ke_flow_convert_finalise(struct ipn3ke_hw * hw,struct ipn3ke_flow_parse * parser,struct rte_flow * flow)1119 ipn3ke_flow_convert_finalise(struct ipn3ke_hw *hw,
1120 struct ipn3ke_flow_parse *parser, struct rte_flow *flow)
1121 {
1122 ipn3ke_flow_key_generation(parser, flow);
1123 ipn3ke_flow_result_generation(parser, flow);
1124 ipn3ke_flow_hw_update(hw, flow, 1);
1125 }
1126
1127 static int
ipn3ke_flow_convert(const struct rte_flow_attr * attr,const struct rte_flow_item items[],const struct rte_flow_action actions[],struct rte_flow_error * error,struct ipn3ke_flow_parse * parser)1128 ipn3ke_flow_convert(const struct rte_flow_attr *attr,
1129 const struct rte_flow_item items[],
1130 const struct rte_flow_action actions[], struct rte_flow_error *error,
1131 struct ipn3ke_flow_parse *parser)
1132 {
1133 int ret;
1134
1135 ret = ipn3ke_flow_convert_attributes(attr, error);
1136 if (ret)
1137 return ret;
1138
1139 ret = ipn3ke_flow_convert_actions(actions, error, parser);
1140 if (ret)
1141 return ret;
1142
1143 ret = ipn3ke_flow_convert_items(items, error, parser);
1144 if (ret)
1145 return ret;
1146
1147 return 0;
1148 }
1149
1150 static int
ipn3ke_flow_validate(__rte_unused struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)1151 ipn3ke_flow_validate(__rte_unused struct rte_eth_dev *dev,
1152 const struct rte_flow_attr *attr, const struct rte_flow_item pattern[],
1153 const struct rte_flow_action actions[], struct rte_flow_error *error)
1154 {
1155 struct ipn3ke_flow_parse parser = {0};
1156 return ipn3ke_flow_convert(attr, pattern, actions, error, &parser);
1157 }
1158
1159 static struct rte_flow *
ipn3ke_flow_create(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)1160 ipn3ke_flow_create(struct rte_eth_dev *dev,
1161 const struct rte_flow_attr *attr, const struct rte_flow_item pattern[],
1162 const struct rte_flow_action actions[], struct rte_flow_error *error)
1163 {
1164 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1165 struct ipn3ke_flow_parse parser = {0};
1166 struct rte_flow *flow;
1167 int ret;
1168
1169 if (hw->flow_num_entries == hw->flow_max_entries) {
1170 rte_flow_error_set(error,
1171 ENOBUFS,
1172 RTE_FLOW_ERROR_TYPE_HANDLE,
1173 NULL,
1174 "The flow table is full.");
1175 return NULL;
1176 }
1177
1178 ret = ipn3ke_flow_convert(attr, pattern, actions, error, &parser);
1179 if (ret < 0) {
1180 rte_flow_error_set(error,
1181 -ret,
1182 RTE_FLOW_ERROR_TYPE_HANDLE,
1183 NULL,
1184 "Failed to create flow.");
1185 return NULL;
1186 }
1187
1188 flow = rte_zmalloc("ipn3ke_flow", sizeof(struct rte_flow), 0);
1189 if (!flow) {
1190 rte_flow_error_set(error,
1191 ENOMEM,
1192 RTE_FLOW_ERROR_TYPE_HANDLE,
1193 NULL,
1194 "Failed to allocate memory");
1195 return flow;
1196 }
1197
1198 ipn3ke_flow_convert_finalise(hw, &parser, flow);
1199
1200 TAILQ_INSERT_TAIL(&hw->flow_list, flow, next);
1201
1202 return flow;
1203 }
1204
1205 static int
ipn3ke_flow_destroy(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_error * error)1206 ipn3ke_flow_destroy(struct rte_eth_dev *dev,
1207 struct rte_flow *flow, struct rte_flow_error *error)
1208 {
1209 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1210 int ret = 0;
1211
1212 ret = ipn3ke_flow_hw_update(hw, flow, 0);
1213 if (!ret) {
1214 TAILQ_REMOVE(&hw->flow_list, flow, next);
1215 rte_free(flow);
1216 } else {
1217 rte_flow_error_set(error,
1218 -ret,
1219 RTE_FLOW_ERROR_TYPE_HANDLE,
1220 NULL,
1221 "Failed to destroy flow.");
1222 }
1223
1224 return ret;
1225 }
1226
1227 static int
ipn3ke_flow_flush(struct rte_eth_dev * dev,__rte_unused struct rte_flow_error * error)1228 ipn3ke_flow_flush(struct rte_eth_dev *dev,
1229 __rte_unused struct rte_flow_error *error)
1230 {
1231 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1232 struct rte_flow *flow, *temp;
1233
1234 TAILQ_FOREACH_SAFE(flow, &hw->flow_list, next, temp) {
1235 TAILQ_REMOVE(&hw->flow_list, flow, next);
1236 rte_free(flow);
1237 }
1238
1239 return ipn3ke_flow_hw_flush(hw);
1240 }
1241
ipn3ke_flow_init(void * dev)1242 int ipn3ke_flow_init(void *dev)
1243 {
1244 struct ipn3ke_hw *hw = (struct ipn3ke_hw *)dev;
1245 uint32_t data;
1246
1247 /* disable rx classifier bypass */
1248 IPN3KE_MASK_WRITE_REG(hw,
1249 IPN3KE_CLF_RX_TEST,
1250 0, 0, 0x1);
1251
1252 data = 0;
1253 data = IPN3KE_MASK_READ_REG(hw,
1254 IPN3KE_CLF_RX_TEST,
1255 0,
1256 0x1);
1257 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_RX_TEST: %x\n", data);
1258
1259 /* configure base mac address */
1260 IPN3KE_MASK_WRITE_REG(hw,
1261 IPN3KE_CLF_BASE_DST_MAC_ADDR_HI,
1262 0,
1263 0x2457,
1264 0xFFFF);
1265
1266 data = 0;
1267 data = IPN3KE_MASK_READ_REG(hw,
1268 IPN3KE_CLF_BASE_DST_MAC_ADDR_HI,
1269 0,
1270 0xFFFF);
1271 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_BASE_DST_MAC_ADDR_HI: %x\n", data);
1272
1273 IPN3KE_MASK_WRITE_REG(hw,
1274 IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW,
1275 0,
1276 0x9bdf1000,
1277 0xFFFFFFFF);
1278
1279 data = 0;
1280 data = IPN3KE_MASK_READ_REG(hw,
1281 IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW,
1282 0,
1283 0xFFFFFFFF);
1284 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW: %x\n", data);
1285
1286
1287 /* configure hash lookup rules enable */
1288 IPN3KE_MASK_WRITE_REG(hw,
1289 IPN3KE_CLF_LKUP_ENABLE,
1290 0,
1291 0xFD,
1292 0xFF);
1293
1294 data = 0;
1295 data = IPN3KE_MASK_READ_REG(hw,
1296 IPN3KE_CLF_LKUP_ENABLE,
1297 0,
1298 0xFF);
1299 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_LKUP_ENABLE: %x\n", data);
1300
1301
1302 /* configure rx parse config, settings associatied with VxLAN */
1303 IPN3KE_MASK_WRITE_REG(hw,
1304 IPN3KE_CLF_RX_PARSE_CFG,
1305 0,
1306 0x212b5,
1307 0x3FFFF);
1308
1309 data = 0;
1310 data = IPN3KE_MASK_READ_REG(hw,
1311 IPN3KE_CLF_RX_PARSE_CFG,
1312 0,
1313 0x3FFFF);
1314 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_RX_PARSE_CFG: %x\n", data);
1315
1316
1317 /* configure QinQ S-Tag */
1318 IPN3KE_MASK_WRITE_REG(hw,
1319 IPN3KE_CLF_QINQ_STAG,
1320 0,
1321 0x88a8,
1322 0xFFFF);
1323
1324 data = 0;
1325 data = IPN3KE_MASK_READ_REG(hw,
1326 IPN3KE_CLF_QINQ_STAG,
1327 0,
1328 0xFFFF);
1329 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_QINQ_STAG: %x\n", data);
1330
1331
1332 /* configure gen ctrl */
1333 IPN3KE_MASK_WRITE_REG(hw,
1334 IPN3KE_CLF_MHL_GEN_CTRL,
1335 0,
1336 0x3,
1337 0x3);
1338
1339 data = 0;
1340 data = IPN3KE_MASK_READ_REG(hw,
1341 IPN3KE_CLF_MHL_GEN_CTRL,
1342 0,
1343 0x1F);
1344 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_MHL_GEN_CTRL: %x\n", data);
1345
1346
1347 /* clear monitoring register */
1348 IPN3KE_MASK_WRITE_REG(hw,
1349 IPN3KE_CLF_MHL_MON_0,
1350 0,
1351 0xFFFFFFFF,
1352 0xFFFFFFFF);
1353
1354 data = 0;
1355 data = IPN3KE_MASK_READ_REG(hw,
1356 IPN3KE_CLF_MHL_MON_0,
1357 0,
1358 0xFFFFFFFF);
1359 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_MHL_MON_0: %x\n", data);
1360
1361
1362 ipn3ke_flow_hw_flush(hw);
1363
1364 TAILQ_INIT(&hw->flow_list);
1365 hw->flow_max_entries = IPN3KE_MASK_READ_REG(hw,
1366 IPN3KE_CLF_EM_NUM,
1367 0,
1368 0xFFFFFFFF);
1369 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_EN_NUM: %x\n", hw->flow_max_entries);
1370 hw->flow_num_entries = 0;
1371
1372 return 0;
1373 }
1374
1375 const struct rte_flow_ops ipn3ke_flow_ops = {
1376 .validate = ipn3ke_flow_validate,
1377 .create = ipn3ke_flow_create,
1378 .destroy = ipn3ke_flow_destroy,
1379 .flush = ipn3ke_flow_flush,
1380 };
1381