1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2020 Intel Corporation
3 */
4
5 #include "rte_malloc.h"
6 #include "igc_logs.h"
7 #include "igc_txrx.h"
8 #include "igc_filter.h"
9 #include "igc_flow.h"
10
11 /*******************************************************************************
12 * All Supported Rule Type
13 *
14 * Notes:
15 * `para` or `(para)`, the para must been set
16 * `[para]`, the para is optional
17 * `([para1][para2]...)`, all paras is optional, but must one of them been set
18 * `para1 | para2 | ...`, only one of the paras can be set
19 *
20 * ether-type filter
21 * pattern: ETH(type)/END
22 * action: QUEUE/END
23 * attribute:
24 *
25 * n-tuple filter
26 * pattern: [ETH/]([IPv4(protocol)|IPv6(protocol)/][UDP(dst_port)|
27 * TCP([dst_port],[flags])|SCTP(dst_port)/])END
28 * action: QUEUE/END
29 * attribute: [priority(0-7)]
30 *
31 * SYN filter
32 * pattern: [ETH/][IPv4|IPv6/]TCP(flags=SYN)/END
33 * action: QUEUE/END
34 * attribute: [priority(0,1)]
35 *
36 * RSS filter
37 * pattern:
38 * action: RSS/END
39 * attribute:
40 ******************************************************************************/
41
42 /* Structure to store all filters */
43 struct igc_all_filter {
44 struct igc_ethertype_filter ethertype;
45 struct igc_ntuple_filter ntuple;
46 struct igc_syn_filter syn;
47 struct igc_rss_filter rss;
48 uint32_t mask; /* see IGC_FILTER_MASK_* definition */
49 };
50
51 #define IGC_FILTER_MASK_ETHER (1u << IGC_FILTER_TYPE_ETHERTYPE)
52 #define IGC_FILTER_MASK_NTUPLE (1u << IGC_FILTER_TYPE_NTUPLE)
53 #define IGC_FILTER_MASK_TCP_SYN (1u << IGC_FILTER_TYPE_SYN)
54 #define IGC_FILTER_MASK_RSS (1u << IGC_FILTER_TYPE_HASH)
55 #define IGC_FILTER_MASK_ALL (IGC_FILTER_MASK_ETHER | \
56 IGC_FILTER_MASK_NTUPLE | \
57 IGC_FILTER_MASK_TCP_SYN | \
58 IGC_FILTER_MASK_RSS)
59
60 #define IGC_SET_FILTER_MASK(_filter, _mask_bits) \
61 ((_filter)->mask &= (_mask_bits))
62
63 #define IGC_IS_ALL_BITS_SET(_val) ((_val) == (typeof(_val))~0)
64 #define IGC_NOT_ALL_BITS_SET(_val) ((_val) != (typeof(_val))~0)
65
66 /* Parse rule attribute */
67 static int
igc_parse_attribute(const struct rte_flow_attr * attr,struct igc_all_filter * filter,struct rte_flow_error * error)68 igc_parse_attribute(const struct rte_flow_attr *attr,
69 struct igc_all_filter *filter, struct rte_flow_error *error)
70 {
71 if (!attr)
72 return 0;
73
74 if (attr->group)
75 return rte_flow_error_set(error, EINVAL,
76 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
77 "Not support");
78
79 if (attr->egress)
80 return rte_flow_error_set(error, EINVAL,
81 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
82 "Not support");
83
84 if (attr->transfer)
85 return rte_flow_error_set(error, EINVAL,
86 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
87 "Not support");
88
89 if (!attr->ingress)
90 return rte_flow_error_set(error, EINVAL,
91 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
92 "A rule must apply to ingress traffic");
93
94 if (attr->priority == 0)
95 return 0;
96
97 /* only n-tuple and SYN filter have priority level */
98 IGC_SET_FILTER_MASK(filter,
99 IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN);
100
101 if (IGC_IS_ALL_BITS_SET(attr->priority)) {
102 /* only SYN filter match this value */
103 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_TCP_SYN);
104 filter->syn.hig_pri = 1;
105 return 0;
106 }
107
108 if (attr->priority > IGC_NTUPLE_MAX_PRI)
109 return rte_flow_error_set(error, EINVAL,
110 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
111 "Priority value is invalid.");
112
113 if (attr->priority > 1) {
114 /* only n-tuple filter match this value */
115 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
116
117 /* get priority */
118 filter->ntuple.tuple_info.priority = (uint8_t)attr->priority;
119 return 0;
120 }
121
122 /* get priority */
123 filter->ntuple.tuple_info.priority = (uint8_t)attr->priority;
124 filter->syn.hig_pri = (uint8_t)attr->priority;
125
126 return 0;
127 }
128
129 /* function type of parse pattern */
130 typedef int (*igc_pattern_parse)(const struct rte_flow_item *,
131 struct igc_all_filter *, struct rte_flow_error *);
132
133 static int igc_parse_pattern_void(__rte_unused const struct rte_flow_item *item,
134 __rte_unused struct igc_all_filter *filter,
135 __rte_unused struct rte_flow_error *error);
136 static int igc_parse_pattern_ether(const struct rte_flow_item *item,
137 struct igc_all_filter *filter, struct rte_flow_error *error);
138 static int igc_parse_pattern_ip(const struct rte_flow_item *item,
139 struct igc_all_filter *filter, struct rte_flow_error *error);
140 static int igc_parse_pattern_ipv6(const struct rte_flow_item *item,
141 struct igc_all_filter *filter, struct rte_flow_error *error);
142 static int igc_parse_pattern_udp(const struct rte_flow_item *item,
143 struct igc_all_filter *filter, struct rte_flow_error *error);
144 static int igc_parse_pattern_tcp(const struct rte_flow_item *item,
145 struct igc_all_filter *filter, struct rte_flow_error *error);
146
147 static igc_pattern_parse pattern_parse_list[] = {
148 [RTE_FLOW_ITEM_TYPE_VOID] = igc_parse_pattern_void,
149 [RTE_FLOW_ITEM_TYPE_ETH] = igc_parse_pattern_ether,
150 [RTE_FLOW_ITEM_TYPE_IPV4] = igc_parse_pattern_ip,
151 [RTE_FLOW_ITEM_TYPE_IPV6] = igc_parse_pattern_ipv6,
152 [RTE_FLOW_ITEM_TYPE_UDP] = igc_parse_pattern_udp,
153 [RTE_FLOW_ITEM_TYPE_TCP] = igc_parse_pattern_tcp,
154 };
155
156 /* Parse rule patterns */
157 static int
igc_parse_patterns(const struct rte_flow_item patterns[],struct igc_all_filter * filter,struct rte_flow_error * error)158 igc_parse_patterns(const struct rte_flow_item patterns[],
159 struct igc_all_filter *filter, struct rte_flow_error *error)
160 {
161 const struct rte_flow_item *item = patterns;
162
163 if (item == NULL) {
164 /* only RSS filter match this pattern */
165 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_RSS);
166 return 0;
167 }
168
169 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
170 int ret;
171
172 if (item->type >= RTE_DIM(pattern_parse_list))
173 return rte_flow_error_set(error, EINVAL,
174 RTE_FLOW_ERROR_TYPE_ITEM, item,
175 "Not been supported");
176
177 if (item->last)
178 return rte_flow_error_set(error, EINVAL,
179 RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
180 "Range not been supported");
181
182 /* check pattern format is valid */
183 if (!!item->spec ^ !!item->mask)
184 return rte_flow_error_set(error, EINVAL,
185 RTE_FLOW_ERROR_TYPE_ITEM, item,
186 "Format error");
187
188 /* get the pattern type callback */
189 igc_pattern_parse parse_func =
190 pattern_parse_list[item->type];
191 if (!parse_func)
192 return rte_flow_error_set(error, EINVAL,
193 RTE_FLOW_ERROR_TYPE_ITEM, item,
194 "Not been supported");
195
196 /* call the pattern type function */
197 ret = parse_func(item, filter, error);
198 if (ret)
199 return ret;
200
201 /* if no filter match the pattern */
202 if (filter->mask == 0)
203 return rte_flow_error_set(error, EINVAL,
204 RTE_FLOW_ERROR_TYPE_ITEM, item,
205 "Not been supported");
206 }
207
208 return 0;
209 }
210
211 static int igc_parse_action_queue(struct rte_eth_dev *dev,
212 const struct rte_flow_action *act,
213 struct igc_all_filter *filter, struct rte_flow_error *error);
214 static int igc_parse_action_rss(struct rte_eth_dev *dev,
215 const struct rte_flow_action *act,
216 struct igc_all_filter *filter, struct rte_flow_error *error);
217
218 /* Parse flow actions */
219 static int
igc_parse_actions(struct rte_eth_dev * dev,const struct rte_flow_action actions[],struct igc_all_filter * filter,struct rte_flow_error * error)220 igc_parse_actions(struct rte_eth_dev *dev,
221 const struct rte_flow_action actions[],
222 struct igc_all_filter *filter,
223 struct rte_flow_error *error)
224 {
225 const struct rte_flow_action *act = actions;
226 int ret;
227
228 if (act == NULL)
229 return rte_flow_error_set(error, EINVAL,
230 RTE_FLOW_ERROR_TYPE_ACTION_NUM, act,
231 "Action is needed");
232
233 for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
234 switch (act->type) {
235 case RTE_FLOW_ACTION_TYPE_QUEUE:
236 ret = igc_parse_action_queue(dev, act, filter, error);
237 if (ret)
238 return ret;
239 break;
240 case RTE_FLOW_ACTION_TYPE_RSS:
241 ret = igc_parse_action_rss(dev, act, filter, error);
242 if (ret)
243 return ret;
244 break;
245 case RTE_FLOW_ACTION_TYPE_VOID:
246 break;
247 default:
248 return rte_flow_error_set(error, EINVAL,
249 RTE_FLOW_ERROR_TYPE_ACTION, act,
250 "Not been supported");
251 }
252
253 /* if no filter match the action */
254 if (filter->mask == 0)
255 return rte_flow_error_set(error, EINVAL,
256 RTE_FLOW_ERROR_TYPE_ACTION, act,
257 "Not been supported");
258 }
259
260 return 0;
261 }
262
263 /* Parse a flow rule */
264 static int
igc_parse_flow(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item patterns[],const struct rte_flow_action actions[],struct rte_flow_error * error,struct igc_all_filter * filter)265 igc_parse_flow(struct rte_eth_dev *dev,
266 const struct rte_flow_attr *attr,
267 const struct rte_flow_item patterns[],
268 const struct rte_flow_action actions[],
269 struct rte_flow_error *error,
270 struct igc_all_filter *filter)
271 {
272 int ret;
273
274 /* clear all filters */
275 memset(filter, 0, sizeof(*filter));
276
277 /* set default filter mask */
278 filter->mask = IGC_FILTER_MASK_ALL;
279
280 ret = igc_parse_attribute(attr, filter, error);
281 if (ret)
282 return ret;
283
284 ret = igc_parse_patterns(patterns, filter, error);
285 if (ret)
286 return ret;
287
288 ret = igc_parse_actions(dev, actions, filter, error);
289 if (ret)
290 return ret;
291
292 /* if no or more than one filter matched this flow */
293 if (filter->mask == 0 || (filter->mask & (filter->mask - 1)))
294 return rte_flow_error_set(error, EINVAL,
295 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
296 "Flow can't be recognized");
297 return 0;
298 }
299
300 /* Parse pattern type of void */
301 static int
igc_parse_pattern_void(__rte_unused const struct rte_flow_item * item,__rte_unused struct igc_all_filter * filter,__rte_unused struct rte_flow_error * error)302 igc_parse_pattern_void(__rte_unused const struct rte_flow_item *item,
303 __rte_unused struct igc_all_filter *filter,
304 __rte_unused struct rte_flow_error *error)
305 {
306 return 0;
307 }
308
309 /* Parse pattern type of ethernet header */
310 static int
igc_parse_pattern_ether(const struct rte_flow_item * item,struct igc_all_filter * filter,struct rte_flow_error * error)311 igc_parse_pattern_ether(const struct rte_flow_item *item,
312 struct igc_all_filter *filter,
313 struct rte_flow_error *error)
314 {
315 const struct rte_flow_item_eth *spec = item->spec;
316 const struct rte_flow_item_eth *mask = item->mask;
317 struct igc_ethertype_filter *ether;
318
319 if (mask == NULL) {
320 /* only n-tuple and SYN filter match the pattern */
321 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE |
322 IGC_FILTER_MASK_TCP_SYN);
323 return 0;
324 }
325
326 /* only ether-type filter match the pattern*/
327 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_ETHER);
328
329 /* destination and source MAC address are not supported */
330 if (!rte_is_zero_ether_addr(&mask->src) ||
331 !rte_is_zero_ether_addr(&mask->dst))
332 return rte_flow_error_set(error, EINVAL,
333 RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
334 "Only support ether-type");
335
336 /* ether-type mask bits must be all 1 */
337 if (IGC_NOT_ALL_BITS_SET(mask->type))
338 return rte_flow_error_set(error, EINVAL,
339 RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
340 "Ethernet type mask bits must be all 1");
341
342 ether = &filter->ethertype;
343
344 /* get ether-type */
345 ether->ether_type = rte_be_to_cpu_16(spec->type);
346
347 /* ether-type should not be IPv4 and IPv6 */
348 if (ether->ether_type == RTE_ETHER_TYPE_IPV4 ||
349 ether->ether_type == RTE_ETHER_TYPE_IPV6 ||
350 ether->ether_type == 0)
351 return rte_flow_error_set(error, EINVAL,
352 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
353 "IPv4/IPv6/0 not supported by ethertype filter");
354 return 0;
355 }
356
357 /* Parse pattern type of IP */
358 static int
igc_parse_pattern_ip(const struct rte_flow_item * item,struct igc_all_filter * filter,struct rte_flow_error * error)359 igc_parse_pattern_ip(const struct rte_flow_item *item,
360 struct igc_all_filter *filter,
361 struct rte_flow_error *error)
362 {
363 const struct rte_flow_item_ipv4 *spec = item->spec;
364 const struct rte_flow_item_ipv4 *mask = item->mask;
365
366 if (mask == NULL) {
367 /* only n-tuple and SYN filter match this pattern */
368 IGC_SET_FILTER_MASK(filter,
369 IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN);
370 return 0;
371 }
372
373 /* only n-tuple filter match this pattern */
374 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
375
376 /* only protocol is used */
377 if (mask->hdr.version_ihl ||
378 mask->hdr.type_of_service ||
379 mask->hdr.total_length ||
380 mask->hdr.packet_id ||
381 mask->hdr.fragment_offset ||
382 mask->hdr.time_to_live ||
383 mask->hdr.hdr_checksum ||
384 mask->hdr.dst_addr ||
385 mask->hdr.src_addr)
386 return rte_flow_error_set(error,
387 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
388 "IPv4 only support protocol");
389
390 if (mask->hdr.next_proto_id == 0)
391 return 0;
392
393 if (IGC_NOT_ALL_BITS_SET(mask->hdr.next_proto_id))
394 return rte_flow_error_set(error,
395 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
396 "IPv4 protocol mask bits must be all 0 or 1");
397
398 /* get protocol type */
399 filter->ntuple.tuple_info.proto_mask = 1;
400 filter->ntuple.tuple_info.proto = spec->hdr.next_proto_id;
401 return 0;
402 }
403
404 /*
405 * Check ipv6 address is 0
406 * Return 1 if true, 0 for false.
407 */
408 static inline bool
igc_is_zero_ipv6_addr(const void * ipv6_addr)409 igc_is_zero_ipv6_addr(const void *ipv6_addr)
410 {
411 const uint64_t *ddw = ipv6_addr;
412 return ddw[0] == 0 && ddw[1] == 0;
413 }
414
415 /* Parse pattern type of IPv6 */
416 static int
igc_parse_pattern_ipv6(const struct rte_flow_item * item,struct igc_all_filter * filter,struct rte_flow_error * error)417 igc_parse_pattern_ipv6(const struct rte_flow_item *item,
418 struct igc_all_filter *filter,
419 struct rte_flow_error *error)
420 {
421 const struct rte_flow_item_ipv6 *spec = item->spec;
422 const struct rte_flow_item_ipv6 *mask = item->mask;
423
424 if (mask == NULL) {
425 /* only n-tuple and syn filter match this pattern */
426 IGC_SET_FILTER_MASK(filter,
427 IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN);
428 return 0;
429 }
430
431 /* only n-tuple filter match this pattern */
432 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
433
434 /* only protocol is used */
435 if (mask->hdr.vtc_flow ||
436 mask->hdr.payload_len ||
437 mask->hdr.hop_limits ||
438 !igc_is_zero_ipv6_addr(mask->hdr.src_addr) ||
439 !igc_is_zero_ipv6_addr(mask->hdr.dst_addr))
440 return rte_flow_error_set(error, EINVAL,
441 RTE_FLOW_ERROR_TYPE_ITEM, item,
442 "IPv6 only support protocol");
443
444 if (mask->hdr.proto == 0)
445 return 0;
446
447 if (IGC_NOT_ALL_BITS_SET(mask->hdr.proto))
448 return rte_flow_error_set(error,
449 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
450 "IPv6 protocol mask bits must be all 0 or 1");
451
452 /* get protocol type */
453 filter->ntuple.tuple_info.proto_mask = 1;
454 filter->ntuple.tuple_info.proto = spec->hdr.proto;
455
456 return 0;
457 }
458
459 /* Parse pattern type of UDP */
460 static int
igc_parse_pattern_udp(const struct rte_flow_item * item,struct igc_all_filter * filter,struct rte_flow_error * error)461 igc_parse_pattern_udp(const struct rte_flow_item *item,
462 struct igc_all_filter *filter,
463 struct rte_flow_error *error)
464 {
465 const struct rte_flow_item_udp *spec = item->spec;
466 const struct rte_flow_item_udp *mask = item->mask;
467
468 /* only n-tuple filter match this pattern */
469 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
470
471 if (mask == NULL)
472 return 0;
473
474 /* only destination port is used */
475 if (mask->hdr.dgram_len || mask->hdr.dgram_cksum || mask->hdr.src_port)
476 return rte_flow_error_set(error, EINVAL,
477 RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
478 "UDP only support destination port");
479
480 if (mask->hdr.dst_port == 0)
481 return 0;
482
483 if (IGC_NOT_ALL_BITS_SET(mask->hdr.dst_port))
484 return rte_flow_error_set(error, EINVAL,
485 RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
486 "UDP port mask bits must be all 0 or 1");
487
488 /* get destination port info. */
489 filter->ntuple.tuple_info.dst_port_mask = 1;
490 filter->ntuple.tuple_info.dst_port = spec->hdr.dst_port;
491
492 return 0;
493 }
494
495 /* Parse pattern type of TCP */
496 static int
igc_parse_pattern_tcp(const struct rte_flow_item * item,struct igc_all_filter * filter,struct rte_flow_error * error)497 igc_parse_pattern_tcp(const struct rte_flow_item *item,
498 struct igc_all_filter *filter,
499 struct rte_flow_error *error)
500 {
501 const struct rte_flow_item_tcp *spec = item->spec;
502 const struct rte_flow_item_tcp *mask = item->mask;
503 struct igc_ntuple_info *tuple_info = &filter->ntuple.tuple_info;
504
505 if (mask == NULL) {
506 /* only n-tuple filter match this pattern */
507 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
508 return 0;
509 }
510
511 /* only n-tuple and SYN filter match this pattern */
512 IGC_SET_FILTER_MASK(filter,
513 IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN);
514
515 /* only destination port and TCP flags are used */
516 if (mask->hdr.sent_seq ||
517 mask->hdr.recv_ack ||
518 mask->hdr.data_off ||
519 mask->hdr.rx_win ||
520 mask->hdr.cksum ||
521 mask->hdr.tcp_urp ||
522 mask->hdr.src_port)
523 return rte_flow_error_set(error, EINVAL,
524 RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
525 "TCP only support destination port and flags");
526
527 /* if destination port is used */
528 if (mask->hdr.dst_port) {
529 /* only n-tuple match this pattern */
530 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
531
532 if (IGC_NOT_ALL_BITS_SET(mask->hdr.dst_port))
533 return rte_flow_error_set(error, EINVAL,
534 RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
535 "TCP port mask bits must be all 1");
536
537 /* get destination port info. */
538 tuple_info->dst_port = spec->hdr.dst_port;
539 tuple_info->dst_port_mask = 1;
540 }
541
542 /* if TCP flags are used */
543 if (mask->hdr.tcp_flags) {
544 if (IGC_IS_ALL_BITS_SET(mask->hdr.tcp_flags)) {
545 /* only n-tuple match this pattern */
546 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
547
548 /* get TCP flags */
549 tuple_info->tcp_flags = spec->hdr.tcp_flags;
550 } else if (mask->hdr.tcp_flags == RTE_TCP_SYN_FLAG) {
551 /* only TCP SYN filter match this pattern */
552 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_TCP_SYN);
553 } else {
554 /* no filter match this pattern */
555 return rte_flow_error_set(error, EINVAL,
556 RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
557 "TCP flags can't match");
558 }
559 } else {
560 /* only n-tuple match this pattern */
561 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_NTUPLE);
562 }
563
564 return 0;
565 }
566
567 static int
igc_parse_action_queue(struct rte_eth_dev * dev,const struct rte_flow_action * act,struct igc_all_filter * filter,struct rte_flow_error * error)568 igc_parse_action_queue(struct rte_eth_dev *dev,
569 const struct rte_flow_action *act,
570 struct igc_all_filter *filter,
571 struct rte_flow_error *error)
572 {
573 uint16_t queue_idx;
574
575 if (act->conf == NULL)
576 return rte_flow_error_set(error, EINVAL,
577 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
578 "NULL pointer");
579
580 /* only ether-type, n-tuple, SYN filter match the action */
581 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_ETHER |
582 IGC_FILTER_MASK_NTUPLE | IGC_FILTER_MASK_TCP_SYN);
583
584 /* get queue index */
585 queue_idx = ((const struct rte_flow_action_queue *)act->conf)->index;
586
587 /* check the queue index is valid */
588 if (queue_idx >= dev->data->nb_rx_queues)
589 return rte_flow_error_set(error, EINVAL,
590 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
591 "Queue id is invalid");
592
593 /* get queue info. */
594 filter->ethertype.queue = queue_idx;
595 filter->ntuple.queue = queue_idx;
596 filter->syn.queue = queue_idx;
597 return 0;
598 }
599
600 /* Parse action of RSS */
601 static int
igc_parse_action_rss(struct rte_eth_dev * dev,const struct rte_flow_action * act,struct igc_all_filter * filter,struct rte_flow_error * error)602 igc_parse_action_rss(struct rte_eth_dev *dev,
603 const struct rte_flow_action *act,
604 struct igc_all_filter *filter,
605 struct rte_flow_error *error)
606 {
607 const struct rte_flow_action_rss *rss = act->conf;
608 uint32_t i;
609
610 if (act->conf == NULL)
611 return rte_flow_error_set(error, EINVAL,
612 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
613 "NULL pointer");
614
615 /* only RSS match the action */
616 IGC_SET_FILTER_MASK(filter, IGC_FILTER_MASK_RSS);
617
618 /* RSS redirect table can't be zero and can't exceed 128 */
619 if (!rss || !rss->queue_num || rss->queue_num > IGC_RSS_RDT_SIZD)
620 return rte_flow_error_set(error, EINVAL,
621 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
622 "No valid queues");
623
624 /* queue index can't exceed max queue index */
625 for (i = 0; i < rss->queue_num; i++) {
626 if (rss->queue[i] >= dev->data->nb_rx_queues)
627 return rte_flow_error_set(error, EINVAL,
628 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
629 "Queue id is invalid");
630 }
631
632 /* only default RSS hash function is supported */
633 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
634 return rte_flow_error_set(error, ENOTSUP,
635 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
636 "Only default RSS hash functions is supported");
637
638 if (rss->level)
639 return rte_flow_error_set(error, ENOTSUP,
640 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
641 "Only 0 RSS encapsulation level is supported");
642
643 /* check key length is valid */
644 if (rss->key_len && rss->key_len != sizeof(filter->rss.key))
645 return rte_flow_error_set(error, ENOTSUP,
646 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
647 "RSS hash key must be exactly 40 bytes");
648
649 /* get RSS info. */
650 igc_rss_conf_set(&filter->rss, rss);
651 return 0;
652 }
653
654 /**
655 * Allocate a rte_flow from the heap
656 * Return the pointer of the flow, or NULL for failed
657 **/
658 static inline struct rte_flow *
igc_alloc_flow(const void * filter,enum igc_filter_type type,uint inbytes)659 igc_alloc_flow(const void *filter, enum igc_filter_type type, uint inbytes)
660 {
661 /* allocate memory, 8 bytes boundary aligned */
662 struct rte_flow *flow = rte_malloc("igc flow filter",
663 sizeof(struct rte_flow) + inbytes, 8);
664 if (flow == NULL) {
665 PMD_DRV_LOG(ERR, "failed to allocate memory");
666 return NULL;
667 }
668
669 flow->filter_type = type;
670
671 /* copy filter data */
672 memcpy(flow->filter, filter, inbytes);
673 return flow;
674 }
675
676 /* Append a rte_flow to the list */
677 static inline void
igc_append_flow(struct igc_flow_list * list,struct rte_flow * flow)678 igc_append_flow(struct igc_flow_list *list, struct rte_flow *flow)
679 {
680 TAILQ_INSERT_TAIL(list, flow, node);
681 }
682
683 /**
684 * Remove the flow and free the flow buffer
685 * The caller should make sure the flow is really exist in the list
686 **/
687 static inline void
igc_remove_flow(struct igc_flow_list * list,struct rte_flow * flow)688 igc_remove_flow(struct igc_flow_list *list, struct rte_flow *flow)
689 {
690 TAILQ_REMOVE(list, flow, node);
691 rte_free(flow);
692 }
693
694 /* Check whether the flow is really in the list or not */
695 static inline bool
igc_is_flow_in_list(struct igc_flow_list * list,struct rte_flow * flow)696 igc_is_flow_in_list(struct igc_flow_list *list, struct rte_flow *flow)
697 {
698 struct rte_flow *it;
699
700 TAILQ_FOREACH(it, list, node) {
701 if (it == flow)
702 return true;
703 }
704
705 return false;
706 }
707
708 /**
709 * Create a flow rule.
710 * Theoretically one rule can match more than one filters.
711 * We will let it use the filter which it hit first.
712 * So, the sequence matters.
713 **/
714 static struct rte_flow *
igc_flow_create(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item patterns[],const struct rte_flow_action actions[],struct rte_flow_error * error)715 igc_flow_create(struct rte_eth_dev *dev,
716 const struct rte_flow_attr *attr,
717 const struct rte_flow_item patterns[],
718 const struct rte_flow_action actions[],
719 struct rte_flow_error *error)
720 {
721 struct rte_flow *flow = NULL;
722 struct igc_all_filter filter;
723 int ret;
724
725 ret = igc_parse_flow(dev, attr, patterns, actions, error, &filter);
726 if (ret)
727 return NULL;
728 ret = -ENOMEM;
729
730 switch (filter.mask) {
731 case IGC_FILTER_MASK_ETHER:
732 flow = igc_alloc_flow(&filter.ethertype,
733 IGC_FILTER_TYPE_ETHERTYPE,
734 sizeof(filter.ethertype));
735 if (flow)
736 ret = igc_add_ethertype_filter(dev, &filter.ethertype);
737 break;
738 case IGC_FILTER_MASK_NTUPLE:
739 /* Check n-tuple filter is valid */
740 if (filter.ntuple.tuple_info.dst_port_mask == 0 &&
741 filter.ntuple.tuple_info.proto_mask == 0) {
742 rte_flow_error_set(error, EINVAL,
743 RTE_FLOW_ERROR_TYPE_NONE, NULL,
744 "Flow can't be recognized");
745 return NULL;
746 }
747
748 flow = igc_alloc_flow(&filter.ntuple, IGC_FILTER_TYPE_NTUPLE,
749 sizeof(filter.ntuple));
750 if (flow)
751 ret = igc_add_ntuple_filter(dev, &filter.ntuple);
752 break;
753 case IGC_FILTER_MASK_TCP_SYN:
754 flow = igc_alloc_flow(&filter.syn, IGC_FILTER_TYPE_SYN,
755 sizeof(filter.syn));
756 if (flow)
757 ret = igc_set_syn_filter(dev, &filter.syn);
758 break;
759 case IGC_FILTER_MASK_RSS:
760 flow = igc_alloc_flow(&filter.rss, IGC_FILTER_TYPE_HASH,
761 sizeof(filter.rss));
762 if (flow) {
763 struct igc_rss_filter *rss =
764 (struct igc_rss_filter *)flow->filter;
765 rss->conf.key = rss->key;
766 rss->conf.queue = rss->queue;
767 ret = igc_add_rss_filter(dev, &filter.rss);
768 }
769 break;
770 default:
771 rte_flow_error_set(error, EINVAL,
772 RTE_FLOW_ERROR_TYPE_NONE, NULL,
773 "Flow can't be recognized");
774 return NULL;
775 }
776
777 if (ret) {
778 /* check and free the memory */
779 if (flow)
780 rte_free(flow);
781
782 rte_flow_error_set(error, -ret,
783 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
784 "Failed to create flow.");
785 return NULL;
786 }
787
788 /* append the flow to the tail of the list */
789 igc_append_flow(IGC_DEV_PRIVATE_FLOW_LIST(dev), flow);
790 return flow;
791 }
792
793 /**
794 * Check if the flow rule is supported by the device.
795 * It only checks the format. Don't guarantee the rule can be programmed into
796 * the HW. Because there can be no enough room for the rule.
797 **/
798 static int
igc_flow_validate(struct rte_eth_dev * dev,const struct rte_flow_attr * attr,const struct rte_flow_item patterns[],const struct rte_flow_action actions[],struct rte_flow_error * error)799 igc_flow_validate(struct rte_eth_dev *dev,
800 const struct rte_flow_attr *attr,
801 const struct rte_flow_item patterns[],
802 const struct rte_flow_action actions[],
803 struct rte_flow_error *error)
804 {
805 struct igc_all_filter filter;
806 int ret;
807
808 ret = igc_parse_flow(dev, attr, patterns, actions, error, &filter);
809 if (ret)
810 return ret;
811
812 switch (filter.mask) {
813 case IGC_FILTER_MASK_NTUPLE:
814 /* Check n-tuple filter is valid */
815 if (filter.ntuple.tuple_info.dst_port_mask == 0 &&
816 filter.ntuple.tuple_info.proto_mask == 0)
817 return rte_flow_error_set(error, EINVAL,
818 RTE_FLOW_ERROR_TYPE_NONE, NULL,
819 "Flow can't be recognized");
820 break;
821 }
822
823 return 0;
824 }
825
826 /**
827 * Disable a valid flow, the flow must be not NULL and
828 * chained in the device flow list.
829 **/
830 static int
igc_disable_flow(struct rte_eth_dev * dev,struct rte_flow * flow)831 igc_disable_flow(struct rte_eth_dev *dev, struct rte_flow *flow)
832 {
833 int ret = 0;
834
835 switch (flow->filter_type) {
836 case IGC_FILTER_TYPE_ETHERTYPE:
837 ret = igc_del_ethertype_filter(dev,
838 (struct igc_ethertype_filter *)&flow->filter);
839 break;
840 case IGC_FILTER_TYPE_NTUPLE:
841 ret = igc_del_ntuple_filter(dev,
842 (struct igc_ntuple_filter *)&flow->filter);
843 break;
844 case IGC_FILTER_TYPE_SYN:
845 igc_clear_syn_filter(dev);
846 break;
847 case IGC_FILTER_TYPE_HASH:
848 ret = igc_del_rss_filter(dev);
849 break;
850 default:
851 PMD_DRV_LOG(ERR, "Filter type (%d) not supported",
852 flow->filter_type);
853 ret = -EINVAL;
854 }
855
856 return ret;
857 }
858
859 /* Destroy a flow rule */
860 static int
igc_flow_destroy(struct rte_eth_dev * dev,struct rte_flow * flow,struct rte_flow_error * error)861 igc_flow_destroy(struct rte_eth_dev *dev,
862 struct rte_flow *flow,
863 struct rte_flow_error *error)
864 {
865 struct igc_flow_list *list = IGC_DEV_PRIVATE_FLOW_LIST(dev);
866 int ret;
867
868 if (!flow) {
869 PMD_DRV_LOG(ERR, "NULL flow!");
870 return -EINVAL;
871 }
872
873 /* check the flow is create by IGC PMD */
874 if (!igc_is_flow_in_list(list, flow)) {
875 PMD_DRV_LOG(ERR, "Flow(%p) not been found!", flow);
876 return -ENOENT;
877 }
878
879 ret = igc_disable_flow(dev, flow);
880 if (ret)
881 rte_flow_error_set(error, -ret,
882 RTE_FLOW_ERROR_TYPE_HANDLE,
883 NULL, "Failed to destroy flow");
884
885 igc_remove_flow(list, flow);
886 return ret;
887 }
888
889 /* Initiate device flow list header */
890 void
igc_flow_init(struct rte_eth_dev * dev)891 igc_flow_init(struct rte_eth_dev *dev)
892 {
893 TAILQ_INIT(IGC_DEV_PRIVATE_FLOW_LIST(dev));
894 }
895
896 /* Destroy all flow in the list and free memory */
897 int
igc_flow_flush(struct rte_eth_dev * dev,__rte_unused struct rte_flow_error * error)898 igc_flow_flush(struct rte_eth_dev *dev,
899 __rte_unused struct rte_flow_error *error)
900 {
901 struct igc_flow_list *list = IGC_DEV_PRIVATE_FLOW_LIST(dev);
902 struct rte_flow *flow;
903
904 while ((flow = TAILQ_FIRST(list)) != NULL) {
905 igc_disable_flow(dev, flow);
906 igc_remove_flow(list, flow);
907 }
908
909 return 0;
910 }
911
912 const struct rte_flow_ops igc_flow_ops = {
913 .validate = igc_flow_validate,
914 .create = igc_flow_create,
915 .destroy = igc_flow_destroy,
916 .flush = igc_flow_flush,
917 };
918