1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
4 */
5
6 #include <sys/queue.h>
7 #include <rte_bus_pci.h>
8 #include <rte_malloc.h>
9 #include <rte_flow.h>
10 #include <rte_flow_driver.h>
11
12 #include "txgbe_ethdev.h"
13
14 #define TXGBE_MIN_N_TUPLE_PRIO 1
15 #define TXGBE_MAX_N_TUPLE_PRIO 7
16 #define TXGBE_MAX_FLX_SOURCE_OFF 62
17
18 /* ntuple filter list structure */
19 struct txgbe_ntuple_filter_ele {
20 TAILQ_ENTRY(txgbe_ntuple_filter_ele) entries;
21 struct rte_eth_ntuple_filter filter_info;
22 };
23 /* ethertype filter list structure */
24 struct txgbe_ethertype_filter_ele {
25 TAILQ_ENTRY(txgbe_ethertype_filter_ele) entries;
26 struct rte_eth_ethertype_filter filter_info;
27 };
28 /* syn filter list structure */
29 struct txgbe_eth_syn_filter_ele {
30 TAILQ_ENTRY(txgbe_eth_syn_filter_ele) entries;
31 struct rte_eth_syn_filter filter_info;
32 };
33 /* fdir filter list structure */
34 struct txgbe_fdir_rule_ele {
35 TAILQ_ENTRY(txgbe_fdir_rule_ele) entries;
36 struct txgbe_fdir_rule filter_info;
37 };
38 /* l2_tunnel filter list structure */
39 struct txgbe_eth_l2_tunnel_conf_ele {
40 TAILQ_ENTRY(txgbe_eth_l2_tunnel_conf_ele) entries;
41 struct txgbe_l2_tunnel_conf filter_info;
42 };
43 /* rss filter list structure */
44 struct txgbe_rss_conf_ele {
45 TAILQ_ENTRY(txgbe_rss_conf_ele) entries;
46 struct txgbe_rte_flow_rss_conf filter_info;
47 };
48 /* txgbe_flow memory list structure */
49 struct txgbe_flow_mem {
50 TAILQ_ENTRY(txgbe_flow_mem) entries;
51 struct rte_flow *flow;
52 };
53
54 TAILQ_HEAD(txgbe_ntuple_filter_list, txgbe_ntuple_filter_ele);
55 TAILQ_HEAD(txgbe_ethertype_filter_list, txgbe_ethertype_filter_ele);
56 TAILQ_HEAD(txgbe_syn_filter_list, txgbe_eth_syn_filter_ele);
57 TAILQ_HEAD(txgbe_fdir_rule_filter_list, txgbe_fdir_rule_ele);
58 TAILQ_HEAD(txgbe_l2_tunnel_filter_list, txgbe_eth_l2_tunnel_conf_ele);
59 TAILQ_HEAD(txgbe_rss_filter_list, txgbe_rss_conf_ele);
60 TAILQ_HEAD(txgbe_flow_mem_list, txgbe_flow_mem);
61
62 static struct txgbe_ntuple_filter_list filter_ntuple_list;
63 static struct txgbe_ethertype_filter_list filter_ethertype_list;
64 static struct txgbe_syn_filter_list filter_syn_list;
65 static struct txgbe_fdir_rule_filter_list filter_fdir_list;
66 static struct txgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
67 static struct txgbe_rss_filter_list filter_rss_list;
68 static struct txgbe_flow_mem_list txgbe_flow_list;
69
70 /**
71 * Endless loop will never happen with below assumption
72 * 1. there is at least one no-void item(END)
73 * 2. cur is before END.
74 */
75 static inline
next_no_void_pattern(const struct rte_flow_item pattern[],const struct rte_flow_item * cur)76 const struct rte_flow_item *next_no_void_pattern(
77 const struct rte_flow_item pattern[],
78 const struct rte_flow_item *cur)
79 {
80 const struct rte_flow_item *next =
81 cur ? cur + 1 : &pattern[0];
82 while (1) {
83 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
84 return next;
85 next++;
86 }
87 }
88
89 static inline
next_no_void_action(const struct rte_flow_action actions[],const struct rte_flow_action * cur)90 const struct rte_flow_action *next_no_void_action(
91 const struct rte_flow_action actions[],
92 const struct rte_flow_action *cur)
93 {
94 const struct rte_flow_action *next =
95 cur ? cur + 1 : &actions[0];
96 while (1) {
97 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
98 return next;
99 next++;
100 }
101 }
102
103 /**
104 * Please aware there's an assumption for all the parsers.
105 * rte_flow_item is using big endian, rte_flow_attr and
106 * rte_flow_action are using CPU order.
107 * Because the pattern is used to describe the packets,
108 * normally the packets should use network order.
109 */
110
111 /**
112 * Parse the rule to see if it is a n-tuple rule.
113 * And get the n-tuple filter info BTW.
114 * pattern:
115 * The first not void item can be ETH or IPV4.
116 * The second not void item must be IPV4 if the first one is ETH.
117 * The third not void item must be UDP or TCP.
118 * The next not void item must be END.
119 * action:
120 * The first not void action should be QUEUE.
121 * The next not void action should be END.
122 * pattern example:
123 * ITEM Spec Mask
124 * ETH NULL NULL
125 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
126 * dst_addr 192.167.3.50 0xFFFFFFFF
127 * next_proto_id 17 0xFF
128 * UDP/TCP/ src_port 80 0xFFFF
129 * SCTP dst_port 80 0xFFFF
130 * END
131 * other members in mask and spec should set to 0x00.
132 * item->last should be NULL.
133 *
134 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
135 *
136 */
137 static int
cons_parse_ntuple_filter(const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_eth_ntuple_filter * filter,struct rte_flow_error * error)138 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
139 const struct rte_flow_item pattern[],
140 const struct rte_flow_action actions[],
141 struct rte_eth_ntuple_filter *filter,
142 struct rte_flow_error *error)
143 {
144 const struct rte_flow_item *item;
145 const struct rte_flow_action *act;
146 const struct rte_flow_item_ipv4 *ipv4_spec;
147 const struct rte_flow_item_ipv4 *ipv4_mask;
148 const struct rte_flow_item_tcp *tcp_spec;
149 const struct rte_flow_item_tcp *tcp_mask;
150 const struct rte_flow_item_udp *udp_spec;
151 const struct rte_flow_item_udp *udp_mask;
152 const struct rte_flow_item_sctp *sctp_spec;
153 const struct rte_flow_item_sctp *sctp_mask;
154 const struct rte_flow_item_eth *eth_spec;
155 const struct rte_flow_item_eth *eth_mask;
156 const struct rte_flow_item_vlan *vlan_spec;
157 const struct rte_flow_item_vlan *vlan_mask;
158 struct rte_flow_item_eth eth_null;
159 struct rte_flow_item_vlan vlan_null;
160
161 if (!pattern) {
162 rte_flow_error_set(error,
163 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
164 NULL, "NULL pattern.");
165 return -rte_errno;
166 }
167
168 if (!actions) {
169 rte_flow_error_set(error, EINVAL,
170 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
171 NULL, "NULL action.");
172 return -rte_errno;
173 }
174 if (!attr) {
175 rte_flow_error_set(error, EINVAL,
176 RTE_FLOW_ERROR_TYPE_ATTR,
177 NULL, "NULL attribute.");
178 return -rte_errno;
179 }
180
181 memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
182 memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
183
184 #ifdef RTE_LIB_SECURITY
185 /**
186 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
187 */
188 act = next_no_void_action(actions, NULL);
189 if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
190 const void *conf = act->conf;
191 /* check if the next not void item is END */
192 act = next_no_void_action(actions, act);
193 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
194 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
195 rte_flow_error_set(error, EINVAL,
196 RTE_FLOW_ERROR_TYPE_ACTION,
197 act, "Not supported action.");
198 return -rte_errno;
199 }
200
201 /* get the IP pattern*/
202 item = next_no_void_pattern(pattern, NULL);
203 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
204 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
205 if (item->last ||
206 item->type == RTE_FLOW_ITEM_TYPE_END) {
207 rte_flow_error_set(error, EINVAL,
208 RTE_FLOW_ERROR_TYPE_ITEM,
209 item, "IP pattern missing.");
210 return -rte_errno;
211 }
212 item = next_no_void_pattern(pattern, item);
213 }
214
215 filter->proto = IPPROTO_ESP;
216 return txgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
217 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
218 }
219 #endif
220
221 /* the first not void item can be MAC or IPv4 */
222 item = next_no_void_pattern(pattern, NULL);
223
224 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
225 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
226 rte_flow_error_set(error, EINVAL,
227 RTE_FLOW_ERROR_TYPE_ITEM,
228 item, "Not supported by ntuple filter");
229 return -rte_errno;
230 }
231 /* Skip Ethernet */
232 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
233 eth_spec = item->spec;
234 eth_mask = item->mask;
235 /*Not supported last point for range*/
236 if (item->last) {
237 rte_flow_error_set(error,
238 EINVAL,
239 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
240 item, "Not supported last point for range");
241 return -rte_errno;
242 }
243 /* if the first item is MAC, the content should be NULL */
244 if ((item->spec && memcmp(eth_spec, ð_null,
245 sizeof(struct rte_flow_item_eth))) ||
246 (item->mask && memcmp(eth_mask, ð_null,
247 sizeof(struct rte_flow_item_eth)))) {
248 rte_flow_error_set(error, EINVAL,
249 RTE_FLOW_ERROR_TYPE_ITEM,
250 item, "Not supported by ntuple filter");
251 return -rte_errno;
252 }
253 /* check if the next not void item is IPv4 or Vlan */
254 item = next_no_void_pattern(pattern, item);
255 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
256 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
257 rte_flow_error_set(error,
258 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
259 item, "Not supported by ntuple filter");
260 return -rte_errno;
261 }
262 }
263
264 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
265 vlan_spec = item->spec;
266 vlan_mask = item->mask;
267 /*Not supported last point for range*/
268 if (item->last) {
269 rte_flow_error_set(error,
270 EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
271 item, "Not supported last point for range");
272 return -rte_errno;
273 }
274 /* the content should be NULL */
275 if ((item->spec && memcmp(vlan_spec, &vlan_null,
276 sizeof(struct rte_flow_item_vlan))) ||
277 (item->mask && memcmp(vlan_mask, &vlan_null,
278 sizeof(struct rte_flow_item_vlan)))) {
279 rte_flow_error_set(error, EINVAL,
280 RTE_FLOW_ERROR_TYPE_ITEM,
281 item, "Not supported by ntuple filter");
282 return -rte_errno;
283 }
284 /* check if the next not void item is IPv4 */
285 item = next_no_void_pattern(pattern, item);
286 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
287 rte_flow_error_set(error,
288 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
289 item, "Not supported by ntuple filter");
290 return -rte_errno;
291 }
292 }
293
294 if (item->mask) {
295 /* get the IPv4 info */
296 if (!item->spec || !item->mask) {
297 rte_flow_error_set(error, EINVAL,
298 RTE_FLOW_ERROR_TYPE_ITEM,
299 item, "Invalid ntuple mask");
300 return -rte_errno;
301 }
302 /*Not supported last point for range*/
303 if (item->last) {
304 rte_flow_error_set(error, EINVAL,
305 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
306 item, "Not supported last point for range");
307 return -rte_errno;
308 }
309
310 ipv4_mask = item->mask;
311 /**
312 * Only support src & dst addresses, protocol,
313 * others should be masked.
314 */
315 if (ipv4_mask->hdr.version_ihl ||
316 ipv4_mask->hdr.type_of_service ||
317 ipv4_mask->hdr.total_length ||
318 ipv4_mask->hdr.packet_id ||
319 ipv4_mask->hdr.fragment_offset ||
320 ipv4_mask->hdr.time_to_live ||
321 ipv4_mask->hdr.hdr_checksum) {
322 rte_flow_error_set(error,
323 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
324 item, "Not supported by ntuple filter");
325 return -rte_errno;
326 }
327 if ((ipv4_mask->hdr.src_addr != 0 &&
328 ipv4_mask->hdr.src_addr != UINT32_MAX) ||
329 (ipv4_mask->hdr.dst_addr != 0 &&
330 ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
331 (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
332 ipv4_mask->hdr.next_proto_id != 0)) {
333 rte_flow_error_set(error,
334 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
335 item, "Not supported by ntuple filter");
336 return -rte_errno;
337 }
338
339 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
340 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
341 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
342
343 ipv4_spec = item->spec;
344 filter->dst_ip = ipv4_spec->hdr.dst_addr;
345 filter->src_ip = ipv4_spec->hdr.src_addr;
346 filter->proto = ipv4_spec->hdr.next_proto_id;
347 }
348
349 /* check if the next not void item is TCP or UDP */
350 item = next_no_void_pattern(pattern, item);
351 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
352 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
353 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
354 item->type != RTE_FLOW_ITEM_TYPE_END) {
355 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
356 rte_flow_error_set(error, EINVAL,
357 RTE_FLOW_ERROR_TYPE_ITEM,
358 item, "Not supported by ntuple filter");
359 return -rte_errno;
360 }
361
362 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
363 (!item->spec && !item->mask)) {
364 goto action;
365 }
366
367 /* get the TCP/UDP/SCTP info */
368 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
369 (!item->spec || !item->mask)) {
370 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
371 rte_flow_error_set(error, EINVAL,
372 RTE_FLOW_ERROR_TYPE_ITEM,
373 item, "Invalid ntuple mask");
374 return -rte_errno;
375 }
376
377 /*Not supported last point for range*/
378 if (item->last) {
379 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
380 rte_flow_error_set(error, EINVAL,
381 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
382 item, "Not supported last point for range");
383 return -rte_errno;
384 }
385
386 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
387 tcp_mask = item->mask;
388
389 /**
390 * Only support src & dst ports, tcp flags,
391 * others should be masked.
392 */
393 if (tcp_mask->hdr.sent_seq ||
394 tcp_mask->hdr.recv_ack ||
395 tcp_mask->hdr.data_off ||
396 tcp_mask->hdr.rx_win ||
397 tcp_mask->hdr.cksum ||
398 tcp_mask->hdr.tcp_urp) {
399 memset(filter, 0,
400 sizeof(struct rte_eth_ntuple_filter));
401 rte_flow_error_set(error, EINVAL,
402 RTE_FLOW_ERROR_TYPE_ITEM,
403 item, "Not supported by ntuple filter");
404 return -rte_errno;
405 }
406 if ((tcp_mask->hdr.src_port != 0 &&
407 tcp_mask->hdr.src_port != UINT16_MAX) ||
408 (tcp_mask->hdr.dst_port != 0 &&
409 tcp_mask->hdr.dst_port != UINT16_MAX)) {
410 rte_flow_error_set(error,
411 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
412 item, "Not supported by ntuple filter");
413 return -rte_errno;
414 }
415
416 filter->dst_port_mask = tcp_mask->hdr.dst_port;
417 filter->src_port_mask = tcp_mask->hdr.src_port;
418 if (tcp_mask->hdr.tcp_flags == 0xFF) {
419 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
420 } else if (!tcp_mask->hdr.tcp_flags) {
421 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
422 } else {
423 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
424 rte_flow_error_set(error, EINVAL,
425 RTE_FLOW_ERROR_TYPE_ITEM,
426 item, "Not supported by ntuple filter");
427 return -rte_errno;
428 }
429
430 tcp_spec = item->spec;
431 filter->dst_port = tcp_spec->hdr.dst_port;
432 filter->src_port = tcp_spec->hdr.src_port;
433 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
434 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
435 udp_mask = item->mask;
436
437 /**
438 * Only support src & dst ports,
439 * others should be masked.
440 */
441 if (udp_mask->hdr.dgram_len ||
442 udp_mask->hdr.dgram_cksum) {
443 memset(filter, 0,
444 sizeof(struct rte_eth_ntuple_filter));
445 rte_flow_error_set(error, EINVAL,
446 RTE_FLOW_ERROR_TYPE_ITEM,
447 item, "Not supported by ntuple filter");
448 return -rte_errno;
449 }
450 if ((udp_mask->hdr.src_port != 0 &&
451 udp_mask->hdr.src_port != UINT16_MAX) ||
452 (udp_mask->hdr.dst_port != 0 &&
453 udp_mask->hdr.dst_port != UINT16_MAX)) {
454 rte_flow_error_set(error,
455 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
456 item, "Not supported by ntuple filter");
457 return -rte_errno;
458 }
459
460 filter->dst_port_mask = udp_mask->hdr.dst_port;
461 filter->src_port_mask = udp_mask->hdr.src_port;
462
463 udp_spec = item->spec;
464 filter->dst_port = udp_spec->hdr.dst_port;
465 filter->src_port = udp_spec->hdr.src_port;
466 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
467 sctp_mask = item->mask;
468
469 /**
470 * Only support src & dst ports,
471 * others should be masked.
472 */
473 if (sctp_mask->hdr.tag ||
474 sctp_mask->hdr.cksum) {
475 memset(filter, 0,
476 sizeof(struct rte_eth_ntuple_filter));
477 rte_flow_error_set(error, EINVAL,
478 RTE_FLOW_ERROR_TYPE_ITEM,
479 item, "Not supported by ntuple filter");
480 return -rte_errno;
481 }
482
483 filter->dst_port_mask = sctp_mask->hdr.dst_port;
484 filter->src_port_mask = sctp_mask->hdr.src_port;
485
486 sctp_spec = item->spec;
487 filter->dst_port = sctp_spec->hdr.dst_port;
488 filter->src_port = sctp_spec->hdr.src_port;
489 } else {
490 goto action;
491 }
492
493 /* check if the next not void item is END */
494 item = next_no_void_pattern(pattern, item);
495 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
496 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
497 rte_flow_error_set(error, EINVAL,
498 RTE_FLOW_ERROR_TYPE_ITEM,
499 item, "Not supported by ntuple filter");
500 return -rte_errno;
501 }
502
503 action:
504
505 /**
506 * n-tuple only supports forwarding,
507 * check if the first not void action is QUEUE.
508 */
509 act = next_no_void_action(actions, NULL);
510 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
511 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
512 rte_flow_error_set(error, EINVAL,
513 RTE_FLOW_ERROR_TYPE_ACTION,
514 act, "Not supported action.");
515 return -rte_errno;
516 }
517 filter->queue =
518 ((const struct rte_flow_action_queue *)act->conf)->index;
519
520 /* check if the next not void item is END */
521 act = next_no_void_action(actions, act);
522 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
523 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
524 rte_flow_error_set(error, EINVAL,
525 RTE_FLOW_ERROR_TYPE_ACTION,
526 act, "Not supported action.");
527 return -rte_errno;
528 }
529
530 /* parse attr */
531 /* must be input direction */
532 if (!attr->ingress) {
533 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
534 rte_flow_error_set(error, EINVAL,
535 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
536 attr, "Only support ingress.");
537 return -rte_errno;
538 }
539
540 /* not supported */
541 if (attr->egress) {
542 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
543 rte_flow_error_set(error, EINVAL,
544 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
545 attr, "Not support egress.");
546 return -rte_errno;
547 }
548
549 /* not supported */
550 if (attr->transfer) {
551 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
552 rte_flow_error_set(error, EINVAL,
553 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
554 attr, "No support for transfer.");
555 return -rte_errno;
556 }
557
558 if (attr->priority > 0xFFFF) {
559 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
560 rte_flow_error_set(error, EINVAL,
561 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
562 attr, "Error priority.");
563 return -rte_errno;
564 }
565 filter->priority = (uint16_t)attr->priority;
566 if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
567 attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
568 filter->priority = 1;
569
570 return 0;
571 }
572
573 /* a specific function for txgbe because the flags is specific */
574 static int
txgbe_parse_ntuple_filter(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_eth_ntuple_filter * filter,struct rte_flow_error * error)575 txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
576 const struct rte_flow_attr *attr,
577 const struct rte_flow_item pattern[],
578 const struct rte_flow_action actions[],
579 struct rte_eth_ntuple_filter *filter,
580 struct rte_flow_error *error)
581 {
582 int ret;
583
584 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
585
586 if (ret)
587 return ret;
588
589 #ifdef RTE_LIB_SECURITY
590 /* ESP flow not really a flow */
591 if (filter->proto == IPPROTO_ESP)
592 return 0;
593 #endif
594
595 /* txgbe doesn't support tcp flags */
596 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
597 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
598 rte_flow_error_set(error, EINVAL,
599 RTE_FLOW_ERROR_TYPE_ITEM,
600 NULL, "Not supported by ntuple filter");
601 return -rte_errno;
602 }
603
604 /* txgbe doesn't support many priorities */
605 if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
606 filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
607 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
608 rte_flow_error_set(error, EINVAL,
609 RTE_FLOW_ERROR_TYPE_ITEM,
610 NULL, "Priority not supported by ntuple filter");
611 return -rte_errno;
612 }
613
614 if (filter->queue >= dev->data->nb_rx_queues) {
615 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
616 rte_flow_error_set(error, EINVAL,
617 RTE_FLOW_ERROR_TYPE_ITEM,
618 NULL, "Not supported by ntuple filter");
619 return -rte_errno;
620 }
621
622 /* fixed value for txgbe */
623 filter->flags = RTE_5TUPLE_FLAGS;
624 return 0;
625 }
626
627 /**
628 * Parse the rule to see if it is a ethertype rule.
629 * And get the ethertype filter info BTW.
630 * pattern:
631 * The first not void item can be ETH.
632 * The next not void item must be END.
633 * action:
634 * The first not void action should be QUEUE.
635 * The next not void action should be END.
636 * pattern example:
637 * ITEM Spec Mask
638 * ETH type 0x0807 0xFFFF
639 * END
640 * other members in mask and spec should set to 0x00.
641 * item->last should be NULL.
642 */
643 static int
cons_parse_ethertype_filter(const struct rte_flow_attr * attr,const struct rte_flow_item * pattern,const struct rte_flow_action * actions,struct rte_eth_ethertype_filter * filter,struct rte_flow_error * error)644 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
645 const struct rte_flow_item *pattern,
646 const struct rte_flow_action *actions,
647 struct rte_eth_ethertype_filter *filter,
648 struct rte_flow_error *error)
649 {
650 const struct rte_flow_item *item;
651 const struct rte_flow_action *act;
652 const struct rte_flow_item_eth *eth_spec;
653 const struct rte_flow_item_eth *eth_mask;
654 const struct rte_flow_action_queue *act_q;
655
656 if (!pattern) {
657 rte_flow_error_set(error, EINVAL,
658 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
659 NULL, "NULL pattern.");
660 return -rte_errno;
661 }
662
663 if (!actions) {
664 rte_flow_error_set(error, EINVAL,
665 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
666 NULL, "NULL action.");
667 return -rte_errno;
668 }
669
670 if (!attr) {
671 rte_flow_error_set(error, EINVAL,
672 RTE_FLOW_ERROR_TYPE_ATTR,
673 NULL, "NULL attribute.");
674 return -rte_errno;
675 }
676
677 item = next_no_void_pattern(pattern, NULL);
678 /* The first non-void item should be MAC. */
679 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
680 rte_flow_error_set(error, EINVAL,
681 RTE_FLOW_ERROR_TYPE_ITEM,
682 item, "Not supported by ethertype filter");
683 return -rte_errno;
684 }
685
686 /*Not supported last point for range*/
687 if (item->last) {
688 rte_flow_error_set(error, EINVAL,
689 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
690 item, "Not supported last point for range");
691 return -rte_errno;
692 }
693
694 /* Get the MAC info. */
695 if (!item->spec || !item->mask) {
696 rte_flow_error_set(error, EINVAL,
697 RTE_FLOW_ERROR_TYPE_ITEM,
698 item, "Not supported by ethertype filter");
699 return -rte_errno;
700 }
701
702 eth_spec = item->spec;
703 eth_mask = item->mask;
704
705 /* Mask bits of source MAC address must be full of 0.
706 * Mask bits of destination MAC address must be full
707 * of 1 or full of 0.
708 */
709 if (!rte_is_zero_ether_addr(ð_mask->src) ||
710 (!rte_is_zero_ether_addr(ð_mask->dst) &&
711 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
712 rte_flow_error_set(error, EINVAL,
713 RTE_FLOW_ERROR_TYPE_ITEM,
714 item, "Invalid ether address mask");
715 return -rte_errno;
716 }
717
718 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
719 rte_flow_error_set(error, EINVAL,
720 RTE_FLOW_ERROR_TYPE_ITEM,
721 item, "Invalid ethertype mask");
722 return -rte_errno;
723 }
724
725 /* If mask bits of destination MAC address
726 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
727 */
728 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
729 filter->mac_addr = eth_spec->dst;
730 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
731 } else {
732 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
733 }
734 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
735
736 /* Check if the next non-void item is END. */
737 item = next_no_void_pattern(pattern, item);
738 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
739 rte_flow_error_set(error, EINVAL,
740 RTE_FLOW_ERROR_TYPE_ITEM,
741 item, "Not supported by ethertype filter.");
742 return -rte_errno;
743 }
744
745 /* Parse action */
746
747 act = next_no_void_action(actions, NULL);
748 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
749 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
750 rte_flow_error_set(error, EINVAL,
751 RTE_FLOW_ERROR_TYPE_ACTION,
752 act, "Not supported action.");
753 return -rte_errno;
754 }
755
756 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
757 act_q = (const struct rte_flow_action_queue *)act->conf;
758 filter->queue = act_q->index;
759 } else {
760 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
761 }
762
763 /* Check if the next non-void item is END */
764 act = next_no_void_action(actions, act);
765 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
766 rte_flow_error_set(error, EINVAL,
767 RTE_FLOW_ERROR_TYPE_ACTION,
768 act, "Not supported action.");
769 return -rte_errno;
770 }
771
772 /* Parse attr */
773 /* Must be input direction */
774 if (!attr->ingress) {
775 rte_flow_error_set(error, EINVAL,
776 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
777 attr, "Only support ingress.");
778 return -rte_errno;
779 }
780
781 /* Not supported */
782 if (attr->egress) {
783 rte_flow_error_set(error, EINVAL,
784 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
785 attr, "Not support egress.");
786 return -rte_errno;
787 }
788
789 /* Not supported */
790 if (attr->transfer) {
791 rte_flow_error_set(error, EINVAL,
792 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
793 attr, "No support for transfer.");
794 return -rte_errno;
795 }
796
797 /* Not supported */
798 if (attr->priority) {
799 rte_flow_error_set(error, EINVAL,
800 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
801 attr, "Not support priority.");
802 return -rte_errno;
803 }
804
805 /* Not supported */
806 if (attr->group) {
807 rte_flow_error_set(error, EINVAL,
808 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
809 attr, "Not support group.");
810 return -rte_errno;
811 }
812
813 return 0;
814 }
815
816 static int
txgbe_parse_ethertype_filter(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_eth_ethertype_filter * filter,struct rte_flow_error * error)817 txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
818 const struct rte_flow_attr *attr,
819 const struct rte_flow_item pattern[],
820 const struct rte_flow_action actions[],
821 struct rte_eth_ethertype_filter *filter,
822 struct rte_flow_error *error)
823 {
824 int ret;
825
826 ret = cons_parse_ethertype_filter(attr, pattern,
827 actions, filter, error);
828
829 if (ret)
830 return ret;
831
832 if (filter->queue >= dev->data->nb_rx_queues) {
833 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
834 rte_flow_error_set(error, EINVAL,
835 RTE_FLOW_ERROR_TYPE_ITEM,
836 NULL, "queue index much too big");
837 return -rte_errno;
838 }
839
840 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
841 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
842 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
843 rte_flow_error_set(error, EINVAL,
844 RTE_FLOW_ERROR_TYPE_ITEM,
845 NULL, "IPv4/IPv6 not supported by ethertype filter");
846 return -rte_errno;
847 }
848
849 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
850 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
851 rte_flow_error_set(error, EINVAL,
852 RTE_FLOW_ERROR_TYPE_ITEM,
853 NULL, "mac compare is unsupported");
854 return -rte_errno;
855 }
856
857 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
858 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
859 rte_flow_error_set(error, EINVAL,
860 RTE_FLOW_ERROR_TYPE_ITEM,
861 NULL, "drop option is unsupported");
862 return -rte_errno;
863 }
864
865 return 0;
866 }
867
868 /**
869 * Parse the rule to see if it is a TCP SYN rule.
870 * And get the TCP SYN filter info BTW.
871 * pattern:
872 * The first not void item must be ETH.
873 * The second not void item must be IPV4 or IPV6.
874 * The third not void item must be TCP.
875 * The next not void item must be END.
876 * action:
877 * The first not void action should be QUEUE.
878 * The next not void action should be END.
879 * pattern example:
880 * ITEM Spec Mask
881 * ETH NULL NULL
882 * IPV4/IPV6 NULL NULL
883 * TCP tcp_flags 0x02 0xFF
884 * END
885 * other members in mask and spec should set to 0x00.
886 * item->last should be NULL.
887 */
888 static int
cons_parse_syn_filter(const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_eth_syn_filter * filter,struct rte_flow_error * error)889 cons_parse_syn_filter(const struct rte_flow_attr *attr,
890 const struct rte_flow_item pattern[],
891 const struct rte_flow_action actions[],
892 struct rte_eth_syn_filter *filter,
893 struct rte_flow_error *error)
894 {
895 const struct rte_flow_item *item;
896 const struct rte_flow_action *act;
897 const struct rte_flow_item_tcp *tcp_spec;
898 const struct rte_flow_item_tcp *tcp_mask;
899 const struct rte_flow_action_queue *act_q;
900
901 if (!pattern) {
902 rte_flow_error_set(error, EINVAL,
903 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
904 NULL, "NULL pattern.");
905 return -rte_errno;
906 }
907
908 if (!actions) {
909 rte_flow_error_set(error, EINVAL,
910 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
911 NULL, "NULL action.");
912 return -rte_errno;
913 }
914
915 if (!attr) {
916 rte_flow_error_set(error, EINVAL,
917 RTE_FLOW_ERROR_TYPE_ATTR,
918 NULL, "NULL attribute.");
919 return -rte_errno;
920 }
921
922
923 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
924 item = next_no_void_pattern(pattern, NULL);
925 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
926 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
927 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
928 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
929 rte_flow_error_set(error, EINVAL,
930 RTE_FLOW_ERROR_TYPE_ITEM,
931 item, "Not supported by syn filter");
932 return -rte_errno;
933 }
934 /*Not supported last point for range*/
935 if (item->last) {
936 rte_flow_error_set(error, EINVAL,
937 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
938 item, "Not supported last point for range");
939 return -rte_errno;
940 }
941
942 /* Skip Ethernet */
943 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
944 /* if the item is MAC, the content should be NULL */
945 if (item->spec || item->mask) {
946 rte_flow_error_set(error, EINVAL,
947 RTE_FLOW_ERROR_TYPE_ITEM,
948 item, "Invalid SYN address mask");
949 return -rte_errno;
950 }
951
952 /* check if the next not void item is IPv4 or IPv6 */
953 item = next_no_void_pattern(pattern, item);
954 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
955 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
956 rte_flow_error_set(error, EINVAL,
957 RTE_FLOW_ERROR_TYPE_ITEM,
958 item, "Not supported by syn filter");
959 return -rte_errno;
960 }
961 }
962
963 /* Skip IP */
964 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
965 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
966 /* if the item is IP, the content should be NULL */
967 if (item->spec || item->mask) {
968 rte_flow_error_set(error, EINVAL,
969 RTE_FLOW_ERROR_TYPE_ITEM,
970 item, "Invalid SYN mask");
971 return -rte_errno;
972 }
973
974 /* check if the next not void item is TCP */
975 item = next_no_void_pattern(pattern, item);
976 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
977 rte_flow_error_set(error, EINVAL,
978 RTE_FLOW_ERROR_TYPE_ITEM,
979 item, "Not supported by syn filter");
980 return -rte_errno;
981 }
982 }
983
984 /* Get the TCP info. Only support SYN. */
985 if (!item->spec || !item->mask) {
986 rte_flow_error_set(error, EINVAL,
987 RTE_FLOW_ERROR_TYPE_ITEM,
988 item, "Invalid SYN mask");
989 return -rte_errno;
990 }
991 /*Not supported last point for range*/
992 if (item->last) {
993 rte_flow_error_set(error, EINVAL,
994 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
995 item, "Not supported last point for range");
996 return -rte_errno;
997 }
998
999 tcp_spec = item->spec;
1000 tcp_mask = item->mask;
1001 if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
1002 tcp_mask->hdr.src_port ||
1003 tcp_mask->hdr.dst_port ||
1004 tcp_mask->hdr.sent_seq ||
1005 tcp_mask->hdr.recv_ack ||
1006 tcp_mask->hdr.data_off ||
1007 tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
1008 tcp_mask->hdr.rx_win ||
1009 tcp_mask->hdr.cksum ||
1010 tcp_mask->hdr.tcp_urp) {
1011 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1012 rte_flow_error_set(error, EINVAL,
1013 RTE_FLOW_ERROR_TYPE_ITEM,
1014 item, "Not supported by syn filter");
1015 return -rte_errno;
1016 }
1017
1018 /* check if the next not void item is END */
1019 item = next_no_void_pattern(pattern, item);
1020 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1021 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1022 rte_flow_error_set(error, EINVAL,
1023 RTE_FLOW_ERROR_TYPE_ITEM,
1024 item, "Not supported by syn filter");
1025 return -rte_errno;
1026 }
1027
1028 /* check if the first not void action is QUEUE. */
1029 act = next_no_void_action(actions, NULL);
1030 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1031 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1032 rte_flow_error_set(error, EINVAL,
1033 RTE_FLOW_ERROR_TYPE_ACTION,
1034 act, "Not supported action.");
1035 return -rte_errno;
1036 }
1037
1038 act_q = (const struct rte_flow_action_queue *)act->conf;
1039 filter->queue = act_q->index;
1040 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) {
1041 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1042 rte_flow_error_set(error, EINVAL,
1043 RTE_FLOW_ERROR_TYPE_ACTION,
1044 act, "Not supported action.");
1045 return -rte_errno;
1046 }
1047
1048 /* check if the next not void item is END */
1049 act = next_no_void_action(actions, act);
1050 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1051 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1052 rte_flow_error_set(error, EINVAL,
1053 RTE_FLOW_ERROR_TYPE_ACTION,
1054 act, "Not supported action.");
1055 return -rte_errno;
1056 }
1057
1058 /* parse attr */
1059 /* must be input direction */
1060 if (!attr->ingress) {
1061 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1062 rte_flow_error_set(error, EINVAL,
1063 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1064 attr, "Only support ingress.");
1065 return -rte_errno;
1066 }
1067
1068 /* not supported */
1069 if (attr->egress) {
1070 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1071 rte_flow_error_set(error, EINVAL,
1072 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1073 attr, "Not support egress.");
1074 return -rte_errno;
1075 }
1076
1077 /* not supported */
1078 if (attr->transfer) {
1079 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1080 rte_flow_error_set(error, EINVAL,
1081 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1082 attr, "No support for transfer.");
1083 return -rte_errno;
1084 }
1085
1086 /* Support 2 priorities, the lowest or highest. */
1087 if (!attr->priority) {
1088 filter->hig_pri = 0;
1089 } else if (attr->priority == (uint32_t)~0U) {
1090 filter->hig_pri = 1;
1091 } else {
1092 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1093 rte_flow_error_set(error, EINVAL,
1094 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1095 attr, "Not support priority.");
1096 return -rte_errno;
1097 }
1098
1099 return 0;
1100 }
1101
1102 static int
txgbe_parse_syn_filter(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_eth_syn_filter * filter,struct rte_flow_error * error)1103 txgbe_parse_syn_filter(struct rte_eth_dev *dev,
1104 const struct rte_flow_attr *attr,
1105 const struct rte_flow_item pattern[],
1106 const struct rte_flow_action actions[],
1107 struct rte_eth_syn_filter *filter,
1108 struct rte_flow_error *error)
1109 {
1110 int ret;
1111
1112 ret = cons_parse_syn_filter(attr, pattern,
1113 actions, filter, error);
1114
1115 if (filter->queue >= dev->data->nb_rx_queues)
1116 return -rte_errno;
1117
1118 if (ret)
1119 return ret;
1120
1121 return 0;
1122 }
1123
1124 /**
1125 * Parse the rule to see if it is a L2 tunnel rule.
1126 * And get the L2 tunnel filter info BTW.
1127 * Only support E-tag now.
1128 * pattern:
1129 * The first not void item can be E_TAG.
1130 * The next not void item must be END.
1131 * action:
1132 * The first not void action should be VF or PF.
1133 * The next not void action should be END.
1134 * pattern example:
1135 * ITEM Spec Mask
1136 * E_TAG grp 0x1 0x3
1137 e_cid_base 0x309 0xFFF
1138 * END
1139 * other members in mask and spec should set to 0x00.
1140 * item->last should be NULL.
1141 */
1142 static int
cons_parse_l2_tn_filter(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct txgbe_l2_tunnel_conf * filter,struct rte_flow_error * error)1143 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1144 const struct rte_flow_attr *attr,
1145 const struct rte_flow_item pattern[],
1146 const struct rte_flow_action actions[],
1147 struct txgbe_l2_tunnel_conf *filter,
1148 struct rte_flow_error *error)
1149 {
1150 const struct rte_flow_item *item;
1151 const struct rte_flow_item_e_tag *e_tag_spec;
1152 const struct rte_flow_item_e_tag *e_tag_mask;
1153 const struct rte_flow_action *act;
1154 const struct rte_flow_action_vf *act_vf;
1155 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1156
1157 if (!pattern) {
1158 rte_flow_error_set(error, EINVAL,
1159 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1160 NULL, "NULL pattern.");
1161 return -rte_errno;
1162 }
1163
1164 if (!actions) {
1165 rte_flow_error_set(error, EINVAL,
1166 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1167 NULL, "NULL action.");
1168 return -rte_errno;
1169 }
1170
1171 if (!attr) {
1172 rte_flow_error_set(error, EINVAL,
1173 RTE_FLOW_ERROR_TYPE_ATTR,
1174 NULL, "NULL attribute.");
1175 return -rte_errno;
1176 }
1177
1178 /* The first not void item should be e-tag. */
1179 item = next_no_void_pattern(pattern, NULL);
1180 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1181 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1182 rte_flow_error_set(error, EINVAL,
1183 RTE_FLOW_ERROR_TYPE_ITEM,
1184 item, "Not supported by L2 tunnel filter");
1185 return -rte_errno;
1186 }
1187
1188 if (!item->spec || !item->mask) {
1189 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1190 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1191 item, "Not supported by L2 tunnel filter");
1192 return -rte_errno;
1193 }
1194
1195 /*Not supported last point for range*/
1196 if (item->last) {
1197 rte_flow_error_set(error, EINVAL,
1198 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1199 item, "Not supported last point for range");
1200 return -rte_errno;
1201 }
1202
1203 e_tag_spec = item->spec;
1204 e_tag_mask = item->mask;
1205
1206 /* Only care about GRP and E cid base. */
1207 if (e_tag_mask->epcp_edei_in_ecid_b ||
1208 e_tag_mask->in_ecid_e ||
1209 e_tag_mask->ecid_e ||
1210 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1211 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1212 rte_flow_error_set(error, EINVAL,
1213 RTE_FLOW_ERROR_TYPE_ITEM,
1214 item, "Not supported by L2 tunnel filter");
1215 return -rte_errno;
1216 }
1217
1218 filter->l2_tunnel_type = RTE_ETH_L2_TUNNEL_TYPE_E_TAG;
1219 /**
1220 * grp and e_cid_base are bit fields and only use 14 bits.
1221 * e-tag id is taken as little endian by HW.
1222 */
1223 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1224
1225 /* check if the next not void item is END */
1226 item = next_no_void_pattern(pattern, item);
1227 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1228 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1229 rte_flow_error_set(error, EINVAL,
1230 RTE_FLOW_ERROR_TYPE_ITEM,
1231 item, "Not supported by L2 tunnel filter");
1232 return -rte_errno;
1233 }
1234
1235 /* parse attr */
1236 /* must be input direction */
1237 if (!attr->ingress) {
1238 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1239 rte_flow_error_set(error, EINVAL,
1240 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1241 attr, "Only support ingress.");
1242 return -rte_errno;
1243 }
1244
1245 /* not supported */
1246 if (attr->egress) {
1247 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1248 rte_flow_error_set(error, EINVAL,
1249 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1250 attr, "Not support egress.");
1251 return -rte_errno;
1252 }
1253
1254 /* not supported */
1255 if (attr->transfer) {
1256 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1257 rte_flow_error_set(error, EINVAL,
1258 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1259 attr, "No support for transfer.");
1260 return -rte_errno;
1261 }
1262
1263 /* not supported */
1264 if (attr->priority) {
1265 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1266 rte_flow_error_set(error, EINVAL,
1267 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1268 attr, "Not support priority.");
1269 return -rte_errno;
1270 }
1271
1272 /* check if the first not void action is VF or PF. */
1273 act = next_no_void_action(actions, NULL);
1274 if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1275 act->type != RTE_FLOW_ACTION_TYPE_PF) {
1276 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1277 rte_flow_error_set(error, EINVAL,
1278 RTE_FLOW_ERROR_TYPE_ACTION,
1279 act, "Not supported action.");
1280 return -rte_errno;
1281 }
1282
1283 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1284 act_vf = (const struct rte_flow_action_vf *)act->conf;
1285 filter->pool = act_vf->id;
1286 } else {
1287 filter->pool = pci_dev->max_vfs;
1288 }
1289
1290 /* check if the next not void item is END */
1291 act = next_no_void_action(actions, act);
1292 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1293 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1294 rte_flow_error_set(error, EINVAL,
1295 RTE_FLOW_ERROR_TYPE_ACTION,
1296 act, "Not supported action.");
1297 return -rte_errno;
1298 }
1299
1300 return 0;
1301 }
1302
1303 static int
txgbe_parse_l2_tn_filter(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct txgbe_l2_tunnel_conf * l2_tn_filter,struct rte_flow_error * error)1304 txgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1305 const struct rte_flow_attr *attr,
1306 const struct rte_flow_item pattern[],
1307 const struct rte_flow_action actions[],
1308 struct txgbe_l2_tunnel_conf *l2_tn_filter,
1309 struct rte_flow_error *error)
1310 {
1311 int ret = 0;
1312 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1313 uint16_t vf_num;
1314
1315 ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1316 actions, l2_tn_filter, error);
1317
1318 vf_num = pci_dev->max_vfs;
1319
1320 if (l2_tn_filter->pool > vf_num)
1321 return -rte_errno;
1322
1323 return ret;
1324 }
1325
1326 /* Parse to get the attr and action info of flow director rule. */
1327 static int
txgbe_parse_fdir_act_attr(const struct rte_flow_attr * attr,const struct rte_flow_action actions[],struct txgbe_fdir_rule * rule,struct rte_flow_error * error)1328 txgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1329 const struct rte_flow_action actions[],
1330 struct txgbe_fdir_rule *rule,
1331 struct rte_flow_error *error)
1332 {
1333 const struct rte_flow_action *act;
1334 const struct rte_flow_action_queue *act_q;
1335 const struct rte_flow_action_mark *mark;
1336
1337 /* parse attr */
1338 /* must be input direction */
1339 if (!attr->ingress) {
1340 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1341 rte_flow_error_set(error, EINVAL,
1342 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1343 attr, "Only support ingress.");
1344 return -rte_errno;
1345 }
1346
1347 /* not supported */
1348 if (attr->egress) {
1349 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1350 rte_flow_error_set(error, EINVAL,
1351 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1352 attr, "Not support egress.");
1353 return -rte_errno;
1354 }
1355
1356 /* not supported */
1357 if (attr->transfer) {
1358 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1359 rte_flow_error_set(error, EINVAL,
1360 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1361 attr, "No support for transfer.");
1362 return -rte_errno;
1363 }
1364
1365 /* not supported */
1366 if (attr->priority) {
1367 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1368 rte_flow_error_set(error, EINVAL,
1369 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1370 attr, "Not support priority.");
1371 return -rte_errno;
1372 }
1373
1374 /* check if the first not void action is QUEUE or DROP. */
1375 act = next_no_void_action(actions, NULL);
1376 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1377 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1378 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1379 rte_flow_error_set(error, EINVAL,
1380 RTE_FLOW_ERROR_TYPE_ACTION,
1381 act, "Not supported action.");
1382 return -rte_errno;
1383 }
1384
1385 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1386 act_q = (const struct rte_flow_action_queue *)act->conf;
1387 rule->queue = act_q->index;
1388 } else { /* drop */
1389 /* signature mode does not support drop action. */
1390 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1391 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1392 rte_flow_error_set(error, EINVAL,
1393 RTE_FLOW_ERROR_TYPE_ACTION,
1394 act, "Not supported action.");
1395 return -rte_errno;
1396 }
1397 rule->fdirflags = TXGBE_FDIRPICMD_DROP;
1398 }
1399
1400 /* check if the next not void item is MARK */
1401 act = next_no_void_action(actions, act);
1402 if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1403 act->type != RTE_FLOW_ACTION_TYPE_END) {
1404 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1405 rte_flow_error_set(error, EINVAL,
1406 RTE_FLOW_ERROR_TYPE_ACTION,
1407 act, "Not supported action.");
1408 return -rte_errno;
1409 }
1410
1411 rule->soft_id = 0;
1412
1413 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1414 mark = (const struct rte_flow_action_mark *)act->conf;
1415 rule->soft_id = mark->id;
1416 act = next_no_void_action(actions, act);
1417 }
1418
1419 /* check if the next not void item is END */
1420 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1421 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1422 rte_flow_error_set(error, EINVAL,
1423 RTE_FLOW_ERROR_TYPE_ACTION,
1424 act, "Not supported action.");
1425 return -rte_errno;
1426 }
1427
1428 return 0;
1429 }
1430
1431 /* search next no void pattern and skip fuzzy */
1432 static inline
next_no_fuzzy_pattern(const struct rte_flow_item pattern[],const struct rte_flow_item * cur)1433 const struct rte_flow_item *next_no_fuzzy_pattern(
1434 const struct rte_flow_item pattern[],
1435 const struct rte_flow_item *cur)
1436 {
1437 const struct rte_flow_item *next =
1438 next_no_void_pattern(pattern, cur);
1439 while (1) {
1440 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1441 return next;
1442 next = next_no_void_pattern(pattern, next);
1443 }
1444 }
1445
signature_match(const struct rte_flow_item pattern[])1446 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1447 {
1448 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1449 const struct rte_flow_item *item;
1450 uint32_t sh, lh, mh;
1451 int i = 0;
1452
1453 while (1) {
1454 item = pattern + i;
1455 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1456 break;
1457
1458 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1459 spec = item->spec;
1460 last = item->last;
1461 mask = item->mask;
1462
1463 if (!spec || !mask)
1464 return 0;
1465
1466 sh = spec->thresh;
1467
1468 if (!last)
1469 lh = sh;
1470 else
1471 lh = last->thresh;
1472
1473 mh = mask->thresh;
1474 sh = sh & mh;
1475 lh = lh & mh;
1476
1477 if (!sh || sh > lh)
1478 return 0;
1479
1480 return 1;
1481 }
1482
1483 i++;
1484 }
1485
1486 return 0;
1487 }
1488
1489 /**
1490 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1491 * And get the flow director filter info BTW.
1492 * UDP/TCP/SCTP PATTERN:
1493 * The first not void item can be ETH or IPV4 or IPV6
1494 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1495 * The next not void item could be UDP or TCP or SCTP (optional)
1496 * The next not void item could be RAW (for flexbyte, optional)
1497 * The next not void item must be END.
1498 * A Fuzzy Match pattern can appear at any place before END.
1499 * Fuzzy Match is optional for IPV4 but is required for IPV6
1500 * MAC VLAN PATTERN:
1501 * The first not void item must be ETH.
1502 * The second not void item must be MAC VLAN.
1503 * The next not void item must be END.
1504 * ACTION:
1505 * The first not void action should be QUEUE or DROP.
1506 * The second not void optional action should be MARK,
1507 * mark_id is a uint32_t number.
1508 * The next not void action should be END.
1509 * UDP/TCP/SCTP pattern example:
1510 * ITEM Spec Mask
1511 * ETH NULL NULL
1512 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1513 * dst_addr 192.167.3.50 0xFFFFFFFF
1514 * UDP/TCP/SCTP src_port 80 0xFFFF
1515 * dst_port 80 0xFFFF
1516 * FLEX relative 0 0x1
1517 * search 0 0x1
1518 * reserved 0 0
1519 * offset 12 0xFFFFFFFF
1520 * limit 0 0xFFFF
1521 * length 2 0xFFFF
1522 * pattern[0] 0x86 0xFF
1523 * pattern[1] 0xDD 0xFF
1524 * END
1525 * MAC VLAN pattern example:
1526 * ITEM Spec Mask
1527 * ETH dst_addr
1528 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1529 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1530 * MAC VLAN tci 0x2016 0xEFFF
1531 * END
1532 * Other members in mask and spec should set to 0x00.
1533 * Item->last should be NULL.
1534 */
1535 static int
txgbe_parse_fdir_filter_normal(struct rte_eth_dev * dev __rte_unused,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct txgbe_fdir_rule * rule,struct rte_flow_error * error)1536 txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
1537 const struct rte_flow_attr *attr,
1538 const struct rte_flow_item pattern[],
1539 const struct rte_flow_action actions[],
1540 struct txgbe_fdir_rule *rule,
1541 struct rte_flow_error *error)
1542 {
1543 const struct rte_flow_item *item;
1544 const struct rte_flow_item_eth *eth_mask;
1545 const struct rte_flow_item_ipv4 *ipv4_spec;
1546 const struct rte_flow_item_ipv4 *ipv4_mask;
1547 const struct rte_flow_item_ipv6 *ipv6_spec;
1548 const struct rte_flow_item_ipv6 *ipv6_mask;
1549 const struct rte_flow_item_tcp *tcp_spec;
1550 const struct rte_flow_item_tcp *tcp_mask;
1551 const struct rte_flow_item_udp *udp_spec;
1552 const struct rte_flow_item_udp *udp_mask;
1553 const struct rte_flow_item_sctp *sctp_spec;
1554 const struct rte_flow_item_sctp *sctp_mask;
1555 const struct rte_flow_item_raw *raw_mask;
1556 const struct rte_flow_item_raw *raw_spec;
1557 u32 ptype = 0;
1558 uint8_t j;
1559
1560 if (!pattern) {
1561 rte_flow_error_set(error, EINVAL,
1562 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1563 NULL, "NULL pattern.");
1564 return -rte_errno;
1565 }
1566
1567 if (!actions) {
1568 rte_flow_error_set(error, EINVAL,
1569 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1570 NULL, "NULL action.");
1571 return -rte_errno;
1572 }
1573
1574 if (!attr) {
1575 rte_flow_error_set(error, EINVAL,
1576 RTE_FLOW_ERROR_TYPE_ATTR,
1577 NULL, "NULL attribute.");
1578 return -rte_errno;
1579 }
1580
1581 /**
1582 * Some fields may not be provided. Set spec to 0 and mask to default
1583 * value. So, we need not do anything for the not provided fields later.
1584 */
1585 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1586 memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
1587 rule->mask.vlan_tci_mask = 0;
1588 rule->mask.flex_bytes_mask = 0;
1589
1590 /**
1591 * The first not void item should be
1592 * MAC or IPv4 or TCP or UDP or SCTP.
1593 */
1594 item = next_no_fuzzy_pattern(pattern, NULL);
1595 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1596 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1597 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1598 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1599 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1600 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1601 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1602 rte_flow_error_set(error, EINVAL,
1603 RTE_FLOW_ERROR_TYPE_ITEM,
1604 item, "Not supported by fdir filter");
1605 return -rte_errno;
1606 }
1607
1608 if (signature_match(pattern))
1609 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1610 else
1611 rule->mode = RTE_FDIR_MODE_PERFECT;
1612
1613 /*Not supported last point for range*/
1614 if (item->last) {
1615 rte_flow_error_set(error, EINVAL,
1616 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1617 item, "Not supported last point for range");
1618 return -rte_errno;
1619 }
1620
1621 /* Get the MAC info. */
1622 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1623 /**
1624 * Only support vlan and dst MAC address,
1625 * others should be masked.
1626 */
1627 if (item->spec && !item->mask) {
1628 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1629 rte_flow_error_set(error, EINVAL,
1630 RTE_FLOW_ERROR_TYPE_ITEM,
1631 item, "Not supported by fdir filter");
1632 return -rte_errno;
1633 }
1634
1635 if (item->mask) {
1636 rule->b_mask = TRUE;
1637 eth_mask = item->mask;
1638
1639 /* Ether type should be masked. */
1640 if (eth_mask->type ||
1641 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1642 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1643 rte_flow_error_set(error, EINVAL,
1644 RTE_FLOW_ERROR_TYPE_ITEM,
1645 item, "Not supported by fdir filter");
1646 return -rte_errno;
1647 }
1648
1649 /* If ethernet has meaning, it means MAC VLAN mode. */
1650 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1651
1652 /**
1653 * src MAC address must be masked,
1654 * and don't support dst MAC address mask.
1655 */
1656 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
1657 if (eth_mask->src.addr_bytes[j] ||
1658 eth_mask->dst.addr_bytes[j] != 0xFF) {
1659 memset(rule, 0,
1660 sizeof(struct txgbe_fdir_rule));
1661 rte_flow_error_set(error, EINVAL,
1662 RTE_FLOW_ERROR_TYPE_ITEM,
1663 item, "Not supported by fdir filter");
1664 return -rte_errno;
1665 }
1666 }
1667
1668 /* When no VLAN, considered as full mask. */
1669 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1670 }
1671 /*** If both spec and mask are item,
1672 * it means don't care about ETH.
1673 * Do nothing.
1674 */
1675
1676 /**
1677 * Check if the next not void item is vlan or ipv4.
1678 * IPv6 is not supported.
1679 */
1680 item = next_no_fuzzy_pattern(pattern, item);
1681 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1682 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1683 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1684 rte_flow_error_set(error, EINVAL,
1685 RTE_FLOW_ERROR_TYPE_ITEM,
1686 item, "Not supported by fdir filter");
1687 return -rte_errno;
1688 }
1689 } else {
1690 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1691 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1692 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1693 rte_flow_error_set(error, EINVAL,
1694 RTE_FLOW_ERROR_TYPE_ITEM,
1695 item, "Not supported by fdir filter");
1696 return -rte_errno;
1697 }
1698 }
1699 }
1700
1701 /* Get the IPV4 info. */
1702 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1703 /**
1704 * Set the flow type even if there's no content
1705 * as we must have a flow type.
1706 */
1707 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV4;
1708 ptype = txgbe_ptype_table[TXGBE_PT_IPV4];
1709 /*Not supported last point for range*/
1710 if (item->last) {
1711 rte_flow_error_set(error, EINVAL,
1712 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1713 item, "Not supported last point for range");
1714 return -rte_errno;
1715 }
1716 /**
1717 * Only care about src & dst addresses,
1718 * others should be masked.
1719 */
1720 if (!item->mask) {
1721 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1722 rte_flow_error_set(error, EINVAL,
1723 RTE_FLOW_ERROR_TYPE_ITEM,
1724 item, "Not supported by fdir filter");
1725 return -rte_errno;
1726 }
1727 rule->b_mask = TRUE;
1728 ipv4_mask = item->mask;
1729 if (ipv4_mask->hdr.version_ihl ||
1730 ipv4_mask->hdr.type_of_service ||
1731 ipv4_mask->hdr.total_length ||
1732 ipv4_mask->hdr.packet_id ||
1733 ipv4_mask->hdr.fragment_offset ||
1734 ipv4_mask->hdr.time_to_live ||
1735 ipv4_mask->hdr.next_proto_id ||
1736 ipv4_mask->hdr.hdr_checksum) {
1737 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1738 rte_flow_error_set(error, EINVAL,
1739 RTE_FLOW_ERROR_TYPE_ITEM,
1740 item, "Not supported by fdir filter");
1741 return -rte_errno;
1742 }
1743 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1744 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1745
1746 if (item->spec) {
1747 rule->b_spec = TRUE;
1748 ipv4_spec = item->spec;
1749 rule->input.dst_ip[0] =
1750 ipv4_spec->hdr.dst_addr;
1751 rule->input.src_ip[0] =
1752 ipv4_spec->hdr.src_addr;
1753 }
1754
1755 /**
1756 * Check if the next not void item is
1757 * TCP or UDP or SCTP or END.
1758 */
1759 item = next_no_fuzzy_pattern(pattern, item);
1760 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1761 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1762 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1763 item->type != RTE_FLOW_ITEM_TYPE_END &&
1764 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1765 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1766 rte_flow_error_set(error, EINVAL,
1767 RTE_FLOW_ERROR_TYPE_ITEM,
1768 item, "Not supported by fdir filter");
1769 return -rte_errno;
1770 }
1771 }
1772
1773 /* Get the IPV6 info. */
1774 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1775 /**
1776 * Set the flow type even if there's no content
1777 * as we must have a flow type.
1778 */
1779 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV6;
1780 ptype = txgbe_ptype_table[TXGBE_PT_IPV6];
1781
1782 /**
1783 * 1. must signature match
1784 * 2. not support last
1785 * 3. mask must not null
1786 */
1787 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1788 item->last ||
1789 !item->mask) {
1790 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1791 rte_flow_error_set(error, EINVAL,
1792 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1793 item, "Not supported last point for range");
1794 return -rte_errno;
1795 }
1796
1797 rule->b_mask = TRUE;
1798 ipv6_mask = item->mask;
1799 if (ipv6_mask->hdr.vtc_flow ||
1800 ipv6_mask->hdr.payload_len ||
1801 ipv6_mask->hdr.proto ||
1802 ipv6_mask->hdr.hop_limits) {
1803 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1804 rte_flow_error_set(error, EINVAL,
1805 RTE_FLOW_ERROR_TYPE_ITEM,
1806 item, "Not supported by fdir filter");
1807 return -rte_errno;
1808 }
1809
1810 /* check src addr mask */
1811 for (j = 0; j < 16; j++) {
1812 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1813 rule->mask.src_ipv6_mask |= 1 << j;
1814 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1815 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1816 rte_flow_error_set(error, EINVAL,
1817 RTE_FLOW_ERROR_TYPE_ITEM,
1818 item, "Not supported by fdir filter");
1819 return -rte_errno;
1820 }
1821 }
1822
1823 /* check dst addr mask */
1824 for (j = 0; j < 16; j++) {
1825 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1826 rule->mask.dst_ipv6_mask |= 1 << j;
1827 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1828 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1829 rte_flow_error_set(error, EINVAL,
1830 RTE_FLOW_ERROR_TYPE_ITEM,
1831 item, "Not supported by fdir filter");
1832 return -rte_errno;
1833 }
1834 }
1835
1836 if (item->spec) {
1837 rule->b_spec = TRUE;
1838 ipv6_spec = item->spec;
1839 rte_memcpy(rule->input.src_ip,
1840 ipv6_spec->hdr.src_addr, 16);
1841 rte_memcpy(rule->input.dst_ip,
1842 ipv6_spec->hdr.dst_addr, 16);
1843 }
1844
1845 /**
1846 * Check if the next not void item is
1847 * TCP or UDP or SCTP or END.
1848 */
1849 item = next_no_fuzzy_pattern(pattern, item);
1850 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1851 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1852 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1853 item->type != RTE_FLOW_ITEM_TYPE_END &&
1854 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1855 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1856 rte_flow_error_set(error, EINVAL,
1857 RTE_FLOW_ERROR_TYPE_ITEM,
1858 item, "Not supported by fdir filter");
1859 return -rte_errno;
1860 }
1861 }
1862
1863 /* Get the TCP info. */
1864 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1865 /**
1866 * Set the flow type even if there's no content
1867 * as we must have a flow type.
1868 */
1869 rule->input.flow_type |= TXGBE_ATR_L4TYPE_TCP;
1870 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_TCP];
1871 /*Not supported last point for range*/
1872 if (item->last) {
1873 rte_flow_error_set(error, EINVAL,
1874 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1875 item, "Not supported last point for range");
1876 return -rte_errno;
1877 }
1878 /**
1879 * Only care about src & dst ports,
1880 * others should be masked.
1881 */
1882 if (!item->mask) {
1883 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1884 rte_flow_error_set(error, EINVAL,
1885 RTE_FLOW_ERROR_TYPE_ITEM,
1886 item, "Not supported by fdir filter");
1887 return -rte_errno;
1888 }
1889 rule->b_mask = TRUE;
1890 tcp_mask = item->mask;
1891 if (tcp_mask->hdr.sent_seq ||
1892 tcp_mask->hdr.recv_ack ||
1893 tcp_mask->hdr.data_off ||
1894 tcp_mask->hdr.tcp_flags ||
1895 tcp_mask->hdr.rx_win ||
1896 tcp_mask->hdr.cksum ||
1897 tcp_mask->hdr.tcp_urp) {
1898 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1899 rte_flow_error_set(error, EINVAL,
1900 RTE_FLOW_ERROR_TYPE_ITEM,
1901 item, "Not supported by fdir filter");
1902 return -rte_errno;
1903 }
1904 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1905 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1906
1907 if (item->spec) {
1908 rule->b_spec = TRUE;
1909 tcp_spec = item->spec;
1910 rule->input.src_port =
1911 tcp_spec->hdr.src_port;
1912 rule->input.dst_port =
1913 tcp_spec->hdr.dst_port;
1914 }
1915
1916 item = next_no_fuzzy_pattern(pattern, item);
1917 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1918 item->type != RTE_FLOW_ITEM_TYPE_END) {
1919 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1920 rte_flow_error_set(error, EINVAL,
1921 RTE_FLOW_ERROR_TYPE_ITEM,
1922 item, "Not supported by fdir filter");
1923 return -rte_errno;
1924 }
1925 }
1926
1927 /* Get the UDP info */
1928 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1929 /**
1930 * Set the flow type even if there's no content
1931 * as we must have a flow type.
1932 */
1933 rule->input.flow_type |= TXGBE_ATR_L4TYPE_UDP;
1934 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_UDP];
1935 /*Not supported last point for range*/
1936 if (item->last) {
1937 rte_flow_error_set(error, EINVAL,
1938 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1939 item, "Not supported last point for range");
1940 return -rte_errno;
1941 }
1942 /**
1943 * Only care about src & dst ports,
1944 * others should be masked.
1945 */
1946 if (!item->mask) {
1947 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1948 rte_flow_error_set(error, EINVAL,
1949 RTE_FLOW_ERROR_TYPE_ITEM,
1950 item, "Not supported by fdir filter");
1951 return -rte_errno;
1952 }
1953 rule->b_mask = TRUE;
1954 udp_mask = item->mask;
1955 if (udp_mask->hdr.dgram_len ||
1956 udp_mask->hdr.dgram_cksum) {
1957 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1958 rte_flow_error_set(error, EINVAL,
1959 RTE_FLOW_ERROR_TYPE_ITEM,
1960 item, "Not supported by fdir filter");
1961 return -rte_errno;
1962 }
1963 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1964 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1965
1966 if (item->spec) {
1967 rule->b_spec = TRUE;
1968 udp_spec = item->spec;
1969 rule->input.src_port =
1970 udp_spec->hdr.src_port;
1971 rule->input.dst_port =
1972 udp_spec->hdr.dst_port;
1973 }
1974
1975 item = next_no_fuzzy_pattern(pattern, item);
1976 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1977 item->type != RTE_FLOW_ITEM_TYPE_END) {
1978 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1979 rte_flow_error_set(error, EINVAL,
1980 RTE_FLOW_ERROR_TYPE_ITEM,
1981 item, "Not supported by fdir filter");
1982 return -rte_errno;
1983 }
1984 }
1985
1986 /* Get the SCTP info */
1987 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1988 /**
1989 * Set the flow type even if there's no content
1990 * as we must have a flow type.
1991 */
1992 rule->input.flow_type |= TXGBE_ATR_L4TYPE_SCTP;
1993 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_SCTP];
1994 /*Not supported last point for range*/
1995 if (item->last) {
1996 rte_flow_error_set(error, EINVAL,
1997 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1998 item, "Not supported last point for range");
1999 return -rte_errno;
2000 }
2001
2002 /**
2003 * Only care about src & dst ports,
2004 * others should be masked.
2005 */
2006 if (!item->mask) {
2007 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2008 rte_flow_error_set(error, EINVAL,
2009 RTE_FLOW_ERROR_TYPE_ITEM,
2010 item, "Not supported by fdir filter");
2011 return -rte_errno;
2012 }
2013 rule->b_mask = TRUE;
2014 sctp_mask = item->mask;
2015 if (sctp_mask->hdr.tag ||
2016 sctp_mask->hdr.cksum) {
2017 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2018 rte_flow_error_set(error, EINVAL,
2019 RTE_FLOW_ERROR_TYPE_ITEM,
2020 item, "Not supported by fdir filter");
2021 return -rte_errno;
2022 }
2023 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2024 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2025
2026 if (item->spec) {
2027 rule->b_spec = TRUE;
2028 sctp_spec = item->spec;
2029 rule->input.src_port =
2030 sctp_spec->hdr.src_port;
2031 rule->input.dst_port =
2032 sctp_spec->hdr.dst_port;
2033 }
2034 /* others even sctp port is not supported */
2035 sctp_mask = item->mask;
2036 if (sctp_mask &&
2037 (sctp_mask->hdr.src_port ||
2038 sctp_mask->hdr.dst_port ||
2039 sctp_mask->hdr.tag ||
2040 sctp_mask->hdr.cksum)) {
2041 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2042 rte_flow_error_set(error, EINVAL,
2043 RTE_FLOW_ERROR_TYPE_ITEM,
2044 item, "Not supported by fdir filter");
2045 return -rte_errno;
2046 }
2047
2048 item = next_no_fuzzy_pattern(pattern, item);
2049 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2050 item->type != RTE_FLOW_ITEM_TYPE_END) {
2051 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2052 rte_flow_error_set(error, EINVAL,
2053 RTE_FLOW_ERROR_TYPE_ITEM,
2054 item, "Not supported by fdir filter");
2055 return -rte_errno;
2056 }
2057 }
2058
2059 /* Get the flex byte info */
2060 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2061 /* Not supported last point for range*/
2062 if (item->last) {
2063 rte_flow_error_set(error, EINVAL,
2064 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2065 item, "Not supported last point for range");
2066 return -rte_errno;
2067 }
2068 /* mask should not be null */
2069 if (!item->mask || !item->spec) {
2070 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2071 rte_flow_error_set(error, EINVAL,
2072 RTE_FLOW_ERROR_TYPE_ITEM,
2073 item, "Not supported by fdir filter");
2074 return -rte_errno;
2075 }
2076
2077 raw_mask = item->mask;
2078
2079 /* check mask */
2080 if (raw_mask->relative != 0x1 ||
2081 raw_mask->search != 0x1 ||
2082 raw_mask->reserved != 0x0 ||
2083 (uint32_t)raw_mask->offset != 0xffffffff ||
2084 raw_mask->limit != 0xffff ||
2085 raw_mask->length != 0xffff) {
2086 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2087 rte_flow_error_set(error, EINVAL,
2088 RTE_FLOW_ERROR_TYPE_ITEM,
2089 item, "Not supported by fdir filter");
2090 return -rte_errno;
2091 }
2092
2093 raw_spec = item->spec;
2094
2095 /* check spec */
2096 if (raw_spec->relative != 0 ||
2097 raw_spec->search != 0 ||
2098 raw_spec->reserved != 0 ||
2099 raw_spec->offset > TXGBE_MAX_FLX_SOURCE_OFF ||
2100 raw_spec->offset % 2 ||
2101 raw_spec->limit != 0 ||
2102 raw_spec->length != 2 ||
2103 /* pattern can't be 0xffff */
2104 (raw_spec->pattern[0] == 0xff &&
2105 raw_spec->pattern[1] == 0xff)) {
2106 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2107 rte_flow_error_set(error, EINVAL,
2108 RTE_FLOW_ERROR_TYPE_ITEM,
2109 item, "Not supported by fdir filter");
2110 return -rte_errno;
2111 }
2112
2113 /* check pattern mask */
2114 if (raw_mask->pattern[0] != 0xff ||
2115 raw_mask->pattern[1] != 0xff) {
2116 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2117 rte_flow_error_set(error, EINVAL,
2118 RTE_FLOW_ERROR_TYPE_ITEM,
2119 item, "Not supported by fdir filter");
2120 return -rte_errno;
2121 }
2122
2123 rule->mask.flex_bytes_mask = 0xffff;
2124 rule->input.flex_bytes =
2125 (((uint16_t)raw_spec->pattern[1]) << 8) |
2126 raw_spec->pattern[0];
2127 rule->flex_bytes_offset = raw_spec->offset;
2128 }
2129
2130 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2131 /* check if the next not void item is END */
2132 item = next_no_fuzzy_pattern(pattern, item);
2133 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2134 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2135 rte_flow_error_set(error, EINVAL,
2136 RTE_FLOW_ERROR_TYPE_ITEM,
2137 item, "Not supported by fdir filter");
2138 return -rte_errno;
2139 }
2140 }
2141
2142 rule->input.pkt_type = cpu_to_be16(txgbe_encode_ptype(ptype));
2143
2144 return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2145 }
2146
2147 /**
2148 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2149 * And get the flow director filter info BTW.
2150 * VxLAN PATTERN:
2151 * The first not void item must be ETH.
2152 * The second not void item must be IPV4/ IPV6.
2153 * The third not void item must be NVGRE.
2154 * The next not void item must be END.
2155 * NVGRE PATTERN:
2156 * The first not void item must be ETH.
2157 * The second not void item must be IPV4/ IPV6.
2158 * The third not void item must be NVGRE.
2159 * The next not void item must be END.
2160 * ACTION:
2161 * The first not void action should be QUEUE or DROP.
2162 * The second not void optional action should be MARK,
2163 * mark_id is a uint32_t number.
2164 * The next not void action should be END.
2165 * VxLAN pattern example:
2166 * ITEM Spec Mask
2167 * ETH NULL NULL
2168 * IPV4/IPV6 NULL NULL
2169 * UDP NULL NULL
2170 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2171 * MAC VLAN tci 0x2016 0xEFFF
2172 * END
2173 * NEGRV pattern example:
2174 * ITEM Spec Mask
2175 * ETH NULL NULL
2176 * IPV4/IPV6 NULL NULL
2177 * NVGRE protocol 0x6558 0xFFFF
2178 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2179 * MAC VLAN tci 0x2016 0xEFFF
2180 * END
2181 * other members in mask and spec should set to 0x00.
2182 * item->last should be NULL.
2183 */
2184 static int
txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct txgbe_fdir_rule * rule,struct rte_flow_error * error)2185 txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2186 const struct rte_flow_item pattern[],
2187 const struct rte_flow_action actions[],
2188 struct txgbe_fdir_rule *rule,
2189 struct rte_flow_error *error)
2190 {
2191 const struct rte_flow_item *item;
2192 const struct rte_flow_item_eth *eth_mask;
2193 uint32_t j;
2194
2195 if (!pattern) {
2196 rte_flow_error_set(error, EINVAL,
2197 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2198 NULL, "NULL pattern.");
2199 return -rte_errno;
2200 }
2201
2202 if (!actions) {
2203 rte_flow_error_set(error, EINVAL,
2204 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2205 NULL, "NULL action.");
2206 return -rte_errno;
2207 }
2208
2209 if (!attr) {
2210 rte_flow_error_set(error, EINVAL,
2211 RTE_FLOW_ERROR_TYPE_ATTR,
2212 NULL, "NULL attribute.");
2213 return -rte_errno;
2214 }
2215
2216 /**
2217 * Some fields may not be provided. Set spec to 0 and mask to default
2218 * value. So, we need not do anything for the not provided fields later.
2219 */
2220 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2221 memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
2222 rule->mask.vlan_tci_mask = 0;
2223
2224 /**
2225 * The first not void item should be
2226 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2227 */
2228 item = next_no_void_pattern(pattern, NULL);
2229 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2230 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2231 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2232 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2233 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2234 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2235 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2236 rte_flow_error_set(error, EINVAL,
2237 RTE_FLOW_ERROR_TYPE_ITEM,
2238 item, "Not supported by fdir filter");
2239 return -rte_errno;
2240 }
2241
2242 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2243
2244 /* Skip MAC. */
2245 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2246 /* Only used to describe the protocol stack. */
2247 if (item->spec || item->mask) {
2248 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2249 rte_flow_error_set(error, EINVAL,
2250 RTE_FLOW_ERROR_TYPE_ITEM,
2251 item, "Not supported by fdir filter");
2252 return -rte_errno;
2253 }
2254 /* Not supported last point for range*/
2255 if (item->last) {
2256 rte_flow_error_set(error, EINVAL,
2257 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2258 item, "Not supported last point for range");
2259 return -rte_errno;
2260 }
2261
2262 /* Check if the next not void item is IPv4 or IPv6. */
2263 item = next_no_void_pattern(pattern, item);
2264 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2265 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2266 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2267 rte_flow_error_set(error, EINVAL,
2268 RTE_FLOW_ERROR_TYPE_ITEM,
2269 item, "Not supported by fdir filter");
2270 return -rte_errno;
2271 }
2272 }
2273
2274 /* Skip IP. */
2275 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2276 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2277 /* Only used to describe the protocol stack. */
2278 if (item->spec || item->mask) {
2279 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2280 rte_flow_error_set(error, EINVAL,
2281 RTE_FLOW_ERROR_TYPE_ITEM,
2282 item, "Not supported by fdir filter");
2283 return -rte_errno;
2284 }
2285 /*Not supported last point for range*/
2286 if (item->last) {
2287 rte_flow_error_set(error, EINVAL,
2288 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2289 item, "Not supported last point for range");
2290 return -rte_errno;
2291 }
2292
2293 /* Check if the next not void item is UDP or NVGRE. */
2294 item = next_no_void_pattern(pattern, item);
2295 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2296 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2297 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2298 rte_flow_error_set(error, EINVAL,
2299 RTE_FLOW_ERROR_TYPE_ITEM,
2300 item, "Not supported by fdir filter");
2301 return -rte_errno;
2302 }
2303 }
2304
2305 /* Skip UDP. */
2306 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2307 /* Only used to describe the protocol stack. */
2308 if (item->spec || item->mask) {
2309 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2310 rte_flow_error_set(error, EINVAL,
2311 RTE_FLOW_ERROR_TYPE_ITEM,
2312 item, "Not supported by fdir filter");
2313 return -rte_errno;
2314 }
2315 /*Not supported last point for range*/
2316 if (item->last) {
2317 rte_flow_error_set(error, EINVAL,
2318 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2319 item, "Not supported last point for range");
2320 return -rte_errno;
2321 }
2322
2323 /* Check if the next not void item is VxLAN. */
2324 item = next_no_void_pattern(pattern, item);
2325 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2326 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2327 rte_flow_error_set(error, EINVAL,
2328 RTE_FLOW_ERROR_TYPE_ITEM,
2329 item, "Not supported by fdir filter");
2330 return -rte_errno;
2331 }
2332 }
2333
2334 /* check if the next not void item is MAC */
2335 item = next_no_void_pattern(pattern, item);
2336 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2337 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2338 rte_flow_error_set(error, EINVAL,
2339 RTE_FLOW_ERROR_TYPE_ITEM,
2340 item, "Not supported by fdir filter");
2341 return -rte_errno;
2342 }
2343
2344 /**
2345 * Only support vlan and dst MAC address,
2346 * others should be masked.
2347 */
2348
2349 if (!item->mask) {
2350 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2351 rte_flow_error_set(error, EINVAL,
2352 RTE_FLOW_ERROR_TYPE_ITEM,
2353 item, "Not supported by fdir filter");
2354 return -rte_errno;
2355 }
2356 /*Not supported last point for range*/
2357 if (item->last) {
2358 rte_flow_error_set(error, EINVAL,
2359 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2360 item, "Not supported last point for range");
2361 return -rte_errno;
2362 }
2363 rule->b_mask = TRUE;
2364 eth_mask = item->mask;
2365
2366 /* Ether type should be masked. */
2367 if (eth_mask->type) {
2368 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2369 rte_flow_error_set(error, EINVAL,
2370 RTE_FLOW_ERROR_TYPE_ITEM,
2371 item, "Not supported by fdir filter");
2372 return -rte_errno;
2373 }
2374
2375 /* src MAC address should be masked. */
2376 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
2377 if (eth_mask->src.addr_bytes[j]) {
2378 memset(rule, 0,
2379 sizeof(struct txgbe_fdir_rule));
2380 rte_flow_error_set(error, EINVAL,
2381 RTE_FLOW_ERROR_TYPE_ITEM,
2382 item, "Not supported by fdir filter");
2383 return -rte_errno;
2384 }
2385 }
2386 rule->mask.mac_addr_byte_mask = 0;
2387 for (j = 0; j < ETH_ADDR_LEN; j++) {
2388 /* It's a per byte mask. */
2389 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2390 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2391 } else if (eth_mask->dst.addr_bytes[j]) {
2392 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2393 rte_flow_error_set(error, EINVAL,
2394 RTE_FLOW_ERROR_TYPE_ITEM,
2395 item, "Not supported by fdir filter");
2396 return -rte_errno;
2397 }
2398 }
2399
2400 /* When no vlan, considered as full mask. */
2401 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2402
2403 /**
2404 * Check if the next not void item is vlan or ipv4.
2405 * IPv6 is not supported.
2406 */
2407 item = next_no_void_pattern(pattern, item);
2408 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
2409 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
2410 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2411 rte_flow_error_set(error, EINVAL,
2412 RTE_FLOW_ERROR_TYPE_ITEM,
2413 item, "Not supported by fdir filter");
2414 return -rte_errno;
2415 }
2416 /*Not supported last point for range*/
2417 if (item->last) {
2418 rte_flow_error_set(error, EINVAL,
2419 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2420 item, "Not supported last point for range");
2421 return -rte_errno;
2422 }
2423
2424 /**
2425 * If the tags is 0, it means don't care about the VLAN.
2426 * Do nothing.
2427 */
2428
2429 return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2430 }
2431
2432 static int
txgbe_parse_fdir_filter(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct txgbe_fdir_rule * rule,struct rte_flow_error * error)2433 txgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2434 const struct rte_flow_attr *attr,
2435 const struct rte_flow_item pattern[],
2436 const struct rte_flow_action actions[],
2437 struct txgbe_fdir_rule *rule,
2438 struct rte_flow_error *error)
2439 {
2440 int ret;
2441 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2442 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2443
2444 ret = txgbe_parse_fdir_filter_normal(dev, attr, pattern,
2445 actions, rule, error);
2446 if (!ret)
2447 goto step_next;
2448
2449 ret = txgbe_parse_fdir_filter_tunnel(attr, pattern,
2450 actions, rule, error);
2451 if (ret)
2452 return ret;
2453
2454 step_next:
2455
2456 if (hw->mac.type == txgbe_mac_raptor &&
2457 rule->fdirflags == TXGBE_FDIRPICMD_DROP &&
2458 (rule->input.src_port != 0 || rule->input.dst_port != 0))
2459 return -ENOTSUP;
2460
2461 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2462 fdir_mode != rule->mode)
2463 return -ENOTSUP;
2464
2465 if (rule->queue >= dev->data->nb_rx_queues)
2466 return -ENOTSUP;
2467
2468 return ret;
2469 }
2470
2471 static int
txgbe_parse_rss_filter(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_action actions[],struct txgbe_rte_flow_rss_conf * rss_conf,struct rte_flow_error * error)2472 txgbe_parse_rss_filter(struct rte_eth_dev *dev,
2473 const struct rte_flow_attr *attr,
2474 const struct rte_flow_action actions[],
2475 struct txgbe_rte_flow_rss_conf *rss_conf,
2476 struct rte_flow_error *error)
2477 {
2478 const struct rte_flow_action *act;
2479 const struct rte_flow_action_rss *rss;
2480 uint16_t n;
2481
2482 /**
2483 * rss only supports forwarding,
2484 * check if the first not void action is RSS.
2485 */
2486 act = next_no_void_action(actions, NULL);
2487 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2488 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2489 rte_flow_error_set(error, EINVAL,
2490 RTE_FLOW_ERROR_TYPE_ACTION,
2491 act, "Not supported action.");
2492 return -rte_errno;
2493 }
2494
2495 rss = (const struct rte_flow_action_rss *)act->conf;
2496
2497 if (!rss || !rss->queue_num) {
2498 rte_flow_error_set(error, EINVAL,
2499 RTE_FLOW_ERROR_TYPE_ACTION,
2500 act,
2501 "no valid queues");
2502 return -rte_errno;
2503 }
2504
2505 for (n = 0; n < rss->queue_num; n++) {
2506 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2507 rte_flow_error_set(error, EINVAL,
2508 RTE_FLOW_ERROR_TYPE_ACTION,
2509 act,
2510 "queue id > max number of queues");
2511 return -rte_errno;
2512 }
2513 }
2514
2515 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2516 return rte_flow_error_set
2517 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2518 "non-default RSS hash functions are not supported");
2519 if (rss->level)
2520 return rte_flow_error_set
2521 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2522 "a nonzero RSS encapsulation level is not supported");
2523 if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
2524 return rte_flow_error_set
2525 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2526 "RSS hash key must be exactly 40 bytes");
2527 if (rss->queue_num > RTE_DIM(rss_conf->queue))
2528 return rte_flow_error_set
2529 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2530 "too many queues for RSS context");
2531 if (txgbe_rss_conf_init(rss_conf, rss))
2532 return rte_flow_error_set
2533 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
2534 "RSS context initialization failure");
2535
2536 /* check if the next not void item is END */
2537 act = next_no_void_action(actions, act);
2538 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2539 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2540 rte_flow_error_set(error, EINVAL,
2541 RTE_FLOW_ERROR_TYPE_ACTION,
2542 act, "Not supported action.");
2543 return -rte_errno;
2544 }
2545
2546 /* parse attr */
2547 /* must be input direction */
2548 if (!attr->ingress) {
2549 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2550 rte_flow_error_set(error, EINVAL,
2551 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2552 attr, "Only support ingress.");
2553 return -rte_errno;
2554 }
2555
2556 /* not supported */
2557 if (attr->egress) {
2558 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2559 rte_flow_error_set(error, EINVAL,
2560 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2561 attr, "Not support egress.");
2562 return -rte_errno;
2563 }
2564
2565 /* not supported */
2566 if (attr->transfer) {
2567 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2568 rte_flow_error_set(error, EINVAL,
2569 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2570 attr, "No support for transfer.");
2571 return -rte_errno;
2572 }
2573
2574 if (attr->priority > 0xFFFF) {
2575 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2576 rte_flow_error_set(error, EINVAL,
2577 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2578 attr, "Error priority.");
2579 return -rte_errno;
2580 }
2581
2582 return 0;
2583 }
2584
2585 /* remove the rss filter */
2586 static void
txgbe_clear_rss_filter(struct rte_eth_dev * dev)2587 txgbe_clear_rss_filter(struct rte_eth_dev *dev)
2588 {
2589 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
2590
2591 if (filter_info->rss_info.conf.queue_num)
2592 txgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
2593 }
2594
2595 void
txgbe_filterlist_init(void)2596 txgbe_filterlist_init(void)
2597 {
2598 TAILQ_INIT(&filter_ntuple_list);
2599 TAILQ_INIT(&filter_ethertype_list);
2600 TAILQ_INIT(&filter_syn_list);
2601 TAILQ_INIT(&filter_fdir_list);
2602 TAILQ_INIT(&filter_l2_tunnel_list);
2603 TAILQ_INIT(&filter_rss_list);
2604 TAILQ_INIT(&txgbe_flow_list);
2605 }
2606
2607 void
txgbe_filterlist_flush(void)2608 txgbe_filterlist_flush(void)
2609 {
2610 struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
2611 struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
2612 struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
2613 struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2614 struct txgbe_fdir_rule_ele *fdir_rule_ptr;
2615 struct txgbe_flow_mem *txgbe_flow_mem_ptr;
2616 struct txgbe_rss_conf_ele *rss_filter_ptr;
2617
2618 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2619 TAILQ_REMOVE(&filter_ntuple_list,
2620 ntuple_filter_ptr,
2621 entries);
2622 rte_free(ntuple_filter_ptr);
2623 }
2624
2625 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2626 TAILQ_REMOVE(&filter_ethertype_list,
2627 ethertype_filter_ptr,
2628 entries);
2629 rte_free(ethertype_filter_ptr);
2630 }
2631
2632 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2633 TAILQ_REMOVE(&filter_syn_list,
2634 syn_filter_ptr,
2635 entries);
2636 rte_free(syn_filter_ptr);
2637 }
2638
2639 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2640 TAILQ_REMOVE(&filter_l2_tunnel_list,
2641 l2_tn_filter_ptr,
2642 entries);
2643 rte_free(l2_tn_filter_ptr);
2644 }
2645
2646 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2647 TAILQ_REMOVE(&filter_fdir_list,
2648 fdir_rule_ptr,
2649 entries);
2650 rte_free(fdir_rule_ptr);
2651 }
2652
2653 while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
2654 TAILQ_REMOVE(&filter_rss_list,
2655 rss_filter_ptr,
2656 entries);
2657 rte_free(rss_filter_ptr);
2658 }
2659
2660 while ((txgbe_flow_mem_ptr = TAILQ_FIRST(&txgbe_flow_list))) {
2661 TAILQ_REMOVE(&txgbe_flow_list,
2662 txgbe_flow_mem_ptr,
2663 entries);
2664 rte_free(txgbe_flow_mem_ptr->flow);
2665 rte_free(txgbe_flow_mem_ptr);
2666 }
2667 }
2668
2669 /**
2670 * Create or destroy a flow rule.
2671 * Theorically one rule can match more than one filters.
2672 * We will let it use the filter which it hit first.
2673 * So, the sequence matters.
2674 */
2675 static struct rte_flow *
txgbe_flow_create(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)2676 txgbe_flow_create(struct rte_eth_dev *dev,
2677 const struct rte_flow_attr *attr,
2678 const struct rte_flow_item pattern[],
2679 const struct rte_flow_action actions[],
2680 struct rte_flow_error *error)
2681 {
2682 int ret;
2683 struct rte_eth_ntuple_filter ntuple_filter;
2684 struct rte_eth_ethertype_filter ethertype_filter;
2685 struct rte_eth_syn_filter syn_filter;
2686 struct txgbe_fdir_rule fdir_rule;
2687 struct txgbe_l2_tunnel_conf l2_tn_filter;
2688 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
2689 struct txgbe_rte_flow_rss_conf rss_conf;
2690 struct rte_flow *flow = NULL;
2691 struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
2692 struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
2693 struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
2694 struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2695 struct txgbe_fdir_rule_ele *fdir_rule_ptr;
2696 struct txgbe_rss_conf_ele *rss_filter_ptr;
2697 struct txgbe_flow_mem *txgbe_flow_mem_ptr;
2698 uint8_t first_mask = FALSE;
2699
2700 flow = rte_zmalloc("txgbe_rte_flow", sizeof(struct rte_flow), 0);
2701 if (!flow) {
2702 PMD_DRV_LOG(ERR, "failed to allocate memory");
2703 return (struct rte_flow *)flow;
2704 }
2705 txgbe_flow_mem_ptr = rte_zmalloc("txgbe_flow_mem",
2706 sizeof(struct txgbe_flow_mem), 0);
2707 if (!txgbe_flow_mem_ptr) {
2708 PMD_DRV_LOG(ERR, "failed to allocate memory");
2709 rte_free(flow);
2710 return NULL;
2711 }
2712 txgbe_flow_mem_ptr->flow = flow;
2713 TAILQ_INSERT_TAIL(&txgbe_flow_list,
2714 txgbe_flow_mem_ptr, entries);
2715
2716 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2717 ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
2718 actions, &ntuple_filter, error);
2719
2720 #ifdef RTE_LIB_SECURITY
2721 /* ESP flow not really a flow*/
2722 if (ntuple_filter.proto == IPPROTO_ESP)
2723 return flow;
2724 #endif
2725
2726 if (!ret) {
2727 ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2728 if (!ret) {
2729 ntuple_filter_ptr = rte_zmalloc("txgbe_ntuple_filter",
2730 sizeof(struct txgbe_ntuple_filter_ele), 0);
2731 if (!ntuple_filter_ptr) {
2732 PMD_DRV_LOG(ERR, "failed to allocate memory");
2733 goto out;
2734 }
2735 rte_memcpy(&ntuple_filter_ptr->filter_info,
2736 &ntuple_filter,
2737 sizeof(struct rte_eth_ntuple_filter));
2738 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2739 ntuple_filter_ptr, entries);
2740 flow->rule = ntuple_filter_ptr;
2741 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2742 return flow;
2743 }
2744 goto out;
2745 }
2746
2747 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2748 ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
2749 actions, ðertype_filter, error);
2750 if (!ret) {
2751 ret = txgbe_add_del_ethertype_filter(dev,
2752 ðertype_filter, TRUE);
2753 if (!ret) {
2754 ethertype_filter_ptr =
2755 rte_zmalloc("txgbe_ethertype_filter",
2756 sizeof(struct txgbe_ethertype_filter_ele), 0);
2757 if (!ethertype_filter_ptr) {
2758 PMD_DRV_LOG(ERR, "failed to allocate memory");
2759 goto out;
2760 }
2761 rte_memcpy(ðertype_filter_ptr->filter_info,
2762 ðertype_filter,
2763 sizeof(struct rte_eth_ethertype_filter));
2764 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2765 ethertype_filter_ptr, entries);
2766 flow->rule = ethertype_filter_ptr;
2767 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2768 return flow;
2769 }
2770 goto out;
2771 }
2772
2773 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2774 ret = txgbe_parse_syn_filter(dev, attr, pattern,
2775 actions, &syn_filter, error);
2776 if (!ret) {
2777 ret = txgbe_syn_filter_set(dev, &syn_filter, TRUE);
2778 if (!ret) {
2779 syn_filter_ptr = rte_zmalloc("txgbe_syn_filter",
2780 sizeof(struct txgbe_eth_syn_filter_ele), 0);
2781 if (!syn_filter_ptr) {
2782 PMD_DRV_LOG(ERR, "failed to allocate memory");
2783 goto out;
2784 }
2785 rte_memcpy(&syn_filter_ptr->filter_info,
2786 &syn_filter,
2787 sizeof(struct rte_eth_syn_filter));
2788 TAILQ_INSERT_TAIL(&filter_syn_list,
2789 syn_filter_ptr,
2790 entries);
2791 flow->rule = syn_filter_ptr;
2792 flow->filter_type = RTE_ETH_FILTER_SYN;
2793 return flow;
2794 }
2795 goto out;
2796 }
2797
2798 memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
2799 ret = txgbe_parse_fdir_filter(dev, attr, pattern,
2800 actions, &fdir_rule, error);
2801 if (!ret) {
2802 /* A mask cannot be deleted. */
2803 if (fdir_rule.b_mask) {
2804 if (!fdir_info->mask_added) {
2805 /* It's the first time the mask is set. */
2806 rte_memcpy(&fdir_info->mask,
2807 &fdir_rule.mask,
2808 sizeof(struct txgbe_hw_fdir_mask));
2809 fdir_info->flex_bytes_offset =
2810 fdir_rule.flex_bytes_offset;
2811
2812 if (fdir_rule.mask.flex_bytes_mask)
2813 txgbe_fdir_set_flexbytes_offset(dev,
2814 fdir_rule.flex_bytes_offset);
2815
2816 ret = txgbe_fdir_set_input_mask(dev);
2817 if (ret)
2818 goto out;
2819
2820 fdir_info->mask_added = TRUE;
2821 first_mask = TRUE;
2822 } else {
2823 /**
2824 * Only support one global mask,
2825 * all the masks should be the same.
2826 */
2827 ret = memcmp(&fdir_info->mask,
2828 &fdir_rule.mask,
2829 sizeof(struct txgbe_hw_fdir_mask));
2830 if (ret)
2831 goto out;
2832
2833 if (fdir_info->flex_bytes_offset !=
2834 fdir_rule.flex_bytes_offset)
2835 goto out;
2836 }
2837 }
2838
2839 if (fdir_rule.b_spec) {
2840 ret = txgbe_fdir_filter_program(dev, &fdir_rule,
2841 FALSE, FALSE);
2842 if (!ret) {
2843 fdir_rule_ptr = rte_zmalloc("txgbe_fdir_filter",
2844 sizeof(struct txgbe_fdir_rule_ele), 0);
2845 if (!fdir_rule_ptr) {
2846 PMD_DRV_LOG(ERR,
2847 "failed to allocate memory");
2848 goto out;
2849 }
2850 rte_memcpy(&fdir_rule_ptr->filter_info,
2851 &fdir_rule,
2852 sizeof(struct txgbe_fdir_rule));
2853 TAILQ_INSERT_TAIL(&filter_fdir_list,
2854 fdir_rule_ptr, entries);
2855 flow->rule = fdir_rule_ptr;
2856 flow->filter_type = RTE_ETH_FILTER_FDIR;
2857
2858 return flow;
2859 }
2860
2861 if (ret) {
2862 /**
2863 * clean the mask_added flag if fail to
2864 * program
2865 **/
2866 if (first_mask)
2867 fdir_info->mask_added = FALSE;
2868 goto out;
2869 }
2870 }
2871
2872 goto out;
2873 }
2874
2875 memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
2876 ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
2877 actions, &l2_tn_filter, error);
2878 if (!ret) {
2879 ret = txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2880 if (!ret) {
2881 l2_tn_filter_ptr = rte_zmalloc("txgbe_l2_tn_filter",
2882 sizeof(struct txgbe_eth_l2_tunnel_conf_ele), 0);
2883 if (!l2_tn_filter_ptr) {
2884 PMD_DRV_LOG(ERR, "failed to allocate memory");
2885 goto out;
2886 }
2887 rte_memcpy(&l2_tn_filter_ptr->filter_info,
2888 &l2_tn_filter,
2889 sizeof(struct txgbe_l2_tunnel_conf));
2890 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2891 l2_tn_filter_ptr, entries);
2892 flow->rule = l2_tn_filter_ptr;
2893 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2894 return flow;
2895 }
2896 }
2897
2898 memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2899 ret = txgbe_parse_rss_filter(dev, attr,
2900 actions, &rss_conf, error);
2901 if (!ret) {
2902 ret = txgbe_config_rss_filter(dev, &rss_conf, TRUE);
2903 if (!ret) {
2904 rss_filter_ptr = rte_zmalloc("txgbe_rss_filter",
2905 sizeof(struct txgbe_rss_conf_ele), 0);
2906 if (!rss_filter_ptr) {
2907 PMD_DRV_LOG(ERR, "failed to allocate memory");
2908 goto out;
2909 }
2910 txgbe_rss_conf_init(&rss_filter_ptr->filter_info,
2911 &rss_conf.conf);
2912 TAILQ_INSERT_TAIL(&filter_rss_list,
2913 rss_filter_ptr, entries);
2914 flow->rule = rss_filter_ptr;
2915 flow->filter_type = RTE_ETH_FILTER_HASH;
2916 return flow;
2917 }
2918 }
2919
2920 out:
2921 TAILQ_REMOVE(&txgbe_flow_list,
2922 txgbe_flow_mem_ptr, entries);
2923 rte_flow_error_set(error, -ret,
2924 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2925 "Failed to create flow.");
2926 rte_free(txgbe_flow_mem_ptr);
2927 rte_free(flow);
2928 return NULL;
2929 }
2930
2931 /**
2932 * Check if the flow rule is supported by txgbe.
2933 * It only checks the format. Don't guarantee the rule can be programmed into
2934 * the HW. Because there can be no enough room for the rule.
2935 */
2936 static int
txgbe_flow_validate(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item pattern[],const struct rte_flow_action actions[],struct rte_flow_error * error)2937 txgbe_flow_validate(struct rte_eth_dev *dev,
2938 const struct rte_flow_attr *attr,
2939 const struct rte_flow_item pattern[],
2940 const struct rte_flow_action actions[],
2941 struct rte_flow_error *error)
2942 {
2943 struct rte_eth_ntuple_filter ntuple_filter;
2944 struct rte_eth_ethertype_filter ethertype_filter;
2945 struct rte_eth_syn_filter syn_filter;
2946 struct txgbe_l2_tunnel_conf l2_tn_filter;
2947 struct txgbe_fdir_rule fdir_rule;
2948 struct txgbe_rte_flow_rss_conf rss_conf;
2949 int ret = 0;
2950
2951 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2952 ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
2953 actions, &ntuple_filter, error);
2954 if (!ret)
2955 return 0;
2956
2957 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2958 ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
2959 actions, ðertype_filter, error);
2960 if (!ret)
2961 return 0;
2962
2963 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2964 ret = txgbe_parse_syn_filter(dev, attr, pattern,
2965 actions, &syn_filter, error);
2966 if (!ret)
2967 return 0;
2968
2969 memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
2970 ret = txgbe_parse_fdir_filter(dev, attr, pattern,
2971 actions, &fdir_rule, error);
2972 if (!ret)
2973 return 0;
2974
2975 memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
2976 ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
2977 actions, &l2_tn_filter, error);
2978 if (!ret)
2979 return 0;
2980
2981 memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2982 ret = txgbe_parse_rss_filter(dev, attr,
2983 actions, &rss_conf, error);
2984
2985 return ret;
2986 }
2987
2988 /* Destroy a flow rule on txgbe. */
2989 static int
txgbe_flow_destroy(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_error * error)2990 txgbe_flow_destroy(struct rte_eth_dev *dev,
2991 struct rte_flow *flow,
2992 struct rte_flow_error *error)
2993 {
2994 int ret = 0;
2995 struct rte_flow *pmd_flow = flow;
2996 enum rte_filter_type filter_type = pmd_flow->filter_type;
2997 struct rte_eth_ntuple_filter ntuple_filter;
2998 struct rte_eth_ethertype_filter ethertype_filter;
2999 struct rte_eth_syn_filter syn_filter;
3000 struct txgbe_fdir_rule fdir_rule;
3001 struct txgbe_l2_tunnel_conf l2_tn_filter;
3002 struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
3003 struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
3004 struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
3005 struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3006 struct txgbe_fdir_rule_ele *fdir_rule_ptr;
3007 struct txgbe_flow_mem *txgbe_flow_mem_ptr;
3008 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
3009 struct txgbe_rss_conf_ele *rss_filter_ptr;
3010
3011 switch (filter_type) {
3012 case RTE_ETH_FILTER_NTUPLE:
3013 ntuple_filter_ptr = (struct txgbe_ntuple_filter_ele *)
3014 pmd_flow->rule;
3015 rte_memcpy(&ntuple_filter,
3016 &ntuple_filter_ptr->filter_info,
3017 sizeof(struct rte_eth_ntuple_filter));
3018 ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3019 if (!ret) {
3020 TAILQ_REMOVE(&filter_ntuple_list,
3021 ntuple_filter_ptr, entries);
3022 rte_free(ntuple_filter_ptr);
3023 }
3024 break;
3025 case RTE_ETH_FILTER_ETHERTYPE:
3026 ethertype_filter_ptr = (struct txgbe_ethertype_filter_ele *)
3027 pmd_flow->rule;
3028 rte_memcpy(ðertype_filter,
3029 ðertype_filter_ptr->filter_info,
3030 sizeof(struct rte_eth_ethertype_filter));
3031 ret = txgbe_add_del_ethertype_filter(dev,
3032 ðertype_filter, FALSE);
3033 if (!ret) {
3034 TAILQ_REMOVE(&filter_ethertype_list,
3035 ethertype_filter_ptr, entries);
3036 rte_free(ethertype_filter_ptr);
3037 }
3038 break;
3039 case RTE_ETH_FILTER_SYN:
3040 syn_filter_ptr = (struct txgbe_eth_syn_filter_ele *)
3041 pmd_flow->rule;
3042 rte_memcpy(&syn_filter,
3043 &syn_filter_ptr->filter_info,
3044 sizeof(struct rte_eth_syn_filter));
3045 ret = txgbe_syn_filter_set(dev, &syn_filter, FALSE);
3046 if (!ret) {
3047 TAILQ_REMOVE(&filter_syn_list,
3048 syn_filter_ptr, entries);
3049 rte_free(syn_filter_ptr);
3050 }
3051 break;
3052 case RTE_ETH_FILTER_FDIR:
3053 fdir_rule_ptr = (struct txgbe_fdir_rule_ele *)pmd_flow->rule;
3054 rte_memcpy(&fdir_rule,
3055 &fdir_rule_ptr->filter_info,
3056 sizeof(struct txgbe_fdir_rule));
3057 ret = txgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3058 if (!ret) {
3059 TAILQ_REMOVE(&filter_fdir_list,
3060 fdir_rule_ptr, entries);
3061 rte_free(fdir_rule_ptr);
3062 if (TAILQ_EMPTY(&filter_fdir_list))
3063 fdir_info->mask_added = false;
3064 }
3065 break;
3066 case RTE_ETH_FILTER_L2_TUNNEL:
3067 l2_tn_filter_ptr = (struct txgbe_eth_l2_tunnel_conf_ele *)
3068 pmd_flow->rule;
3069 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3070 sizeof(struct txgbe_l2_tunnel_conf));
3071 ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3072 if (!ret) {
3073 TAILQ_REMOVE(&filter_l2_tunnel_list,
3074 l2_tn_filter_ptr, entries);
3075 rte_free(l2_tn_filter_ptr);
3076 }
3077 break;
3078 case RTE_ETH_FILTER_HASH:
3079 rss_filter_ptr = (struct txgbe_rss_conf_ele *)
3080 pmd_flow->rule;
3081 ret = txgbe_config_rss_filter(dev,
3082 &rss_filter_ptr->filter_info, FALSE);
3083 if (!ret) {
3084 TAILQ_REMOVE(&filter_rss_list,
3085 rss_filter_ptr, entries);
3086 rte_free(rss_filter_ptr);
3087 }
3088 break;
3089 default:
3090 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3091 filter_type);
3092 ret = -EINVAL;
3093 break;
3094 }
3095
3096 if (ret) {
3097 rte_flow_error_set(error, EINVAL,
3098 RTE_FLOW_ERROR_TYPE_HANDLE,
3099 NULL, "Failed to destroy flow");
3100 return ret;
3101 }
3102
3103 TAILQ_FOREACH(txgbe_flow_mem_ptr, &txgbe_flow_list, entries) {
3104 if (txgbe_flow_mem_ptr->flow == pmd_flow) {
3105 TAILQ_REMOVE(&txgbe_flow_list,
3106 txgbe_flow_mem_ptr, entries);
3107 rte_free(txgbe_flow_mem_ptr);
3108 }
3109 }
3110 rte_free(flow);
3111
3112 return ret;
3113 }
3114
3115 /* Destroy all flow rules associated with a port on txgbe. */
3116 static int
txgbe_flow_flush(struct rte_eth_dev * dev,struct rte_flow_error * error)3117 txgbe_flow_flush(struct rte_eth_dev *dev,
3118 struct rte_flow_error *error)
3119 {
3120 int ret = 0;
3121
3122 txgbe_clear_all_ntuple_filter(dev);
3123 txgbe_clear_all_ethertype_filter(dev);
3124 txgbe_clear_syn_filter(dev);
3125
3126 ret = txgbe_clear_all_fdir_filter(dev);
3127 if (ret < 0) {
3128 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3129 NULL, "Failed to flush rule");
3130 return ret;
3131 }
3132
3133 ret = txgbe_clear_all_l2_tn_filter(dev);
3134 if (ret < 0) {
3135 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3136 NULL, "Failed to flush rule");
3137 return ret;
3138 }
3139
3140 txgbe_clear_rss_filter(dev);
3141
3142 txgbe_filterlist_flush();
3143
3144 return 0;
3145 }
3146
3147 const struct rte_flow_ops txgbe_flow_ops = {
3148 .validate = txgbe_flow_validate,
3149 .create = txgbe_flow_create,
3150 .destroy = txgbe_flow_destroy,
3151 .flush = txgbe_flow_flush,
3152 };
3153