1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <inttypes.h>
7 #include <getopt.h>
8 
9 #include <rte_eal.h>
10 #include <rte_ethdev.h>
11 #include <rte_cycles.h>
12 #include <rte_lcore.h>
13 #include <rte_mbuf.h>
14 #include <rte_flow.h>
15 #include <rte_flow_classify.h>
16 #include <rte_table_acl.h>
17 
18 #define RX_RING_SIZE 1024
19 #define TX_RING_SIZE 1024
20 
21 #define NUM_MBUFS 8191
22 #define MBUF_CACHE_SIZE 250
23 #define BURST_SIZE 32
24 
25 #define MAX_NUM_CLASSIFY 30
26 #define FLOW_CLASSIFY_MAX_RULE_NUM 91
27 #define FLOW_CLASSIFY_MAX_PRIORITY 8
28 #define FLOW_CLASSIFIER_NAME_SIZE 64
29 
30 #define COMMENT_LEAD_CHAR	('#')
31 #define OPTION_RULE_IPV4	"rule_ipv4"
32 #define RTE_LOGTYPE_FLOW_CLASSIFY	RTE_LOGTYPE_USER3
33 #define flow_classify_log(format, ...) \
34 		RTE_LOG(ERR, FLOW_CLASSIFY, format, ##__VA_ARGS__)
35 
36 #define uint32_t_to_char(ip, a, b, c, d) do {\
37 		*a = (unsigned char)(ip >> 24 & 0xff);\
38 		*b = (unsigned char)(ip >> 16 & 0xff);\
39 		*c = (unsigned char)(ip >> 8 & 0xff);\
40 		*d = (unsigned char)(ip & 0xff);\
41 	} while (0)
42 
43 enum {
44 	CB_FLD_SRC_ADDR,
45 	CB_FLD_DST_ADDR,
46 	CB_FLD_SRC_PORT,
47 	CB_FLD_SRC_PORT_DLM,
48 	CB_FLD_SRC_PORT_MASK,
49 	CB_FLD_DST_PORT,
50 	CB_FLD_DST_PORT_DLM,
51 	CB_FLD_DST_PORT_MASK,
52 	CB_FLD_PROTO,
53 	CB_FLD_PRIORITY,
54 	CB_FLD_NUM,
55 };
56 
57 static struct{
58 	const char *rule_ipv4_name;
59 } parm_config;
60 const char cb_port_delim[] = ":";
61 
62 static const struct rte_eth_conf port_conf_default = {
63 	.rxmode = {
64 		.max_rx_pkt_len = RTE_ETHER_MAX_LEN,
65 	},
66 };
67 
68 struct flow_classifier {
69 	struct rte_flow_classifier *cls;
70 };
71 
72 struct flow_classifier_acl {
73 	struct flow_classifier cls;
74 } __rte_cache_aligned;
75 
76 /* ACL field definitions for IPv4 5 tuple rule */
77 
78 enum {
79 	PROTO_FIELD_IPV4,
80 	SRC_FIELD_IPV4,
81 	DST_FIELD_IPV4,
82 	SRCP_FIELD_IPV4,
83 	DSTP_FIELD_IPV4,
84 	NUM_FIELDS_IPV4
85 };
86 
87 enum {
88 	PROTO_INPUT_IPV4,
89 	SRC_INPUT_IPV4,
90 	DST_INPUT_IPV4,
91 	SRCP_DESTP_INPUT_IPV4
92 };
93 
94 static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
95 	/* first input field - always one byte long. */
96 	{
97 		.type = RTE_ACL_FIELD_TYPE_BITMASK,
98 		.size = sizeof(uint8_t),
99 		.field_index = PROTO_FIELD_IPV4,
100 		.input_index = PROTO_INPUT_IPV4,
101 		.offset = sizeof(struct rte_ether_hdr) +
102 			offsetof(struct rte_ipv4_hdr, next_proto_id),
103 	},
104 	/* next input field (IPv4 source address) - 4 consecutive bytes. */
105 	{
106 		/* rte_flow uses a bit mask for IPv4 addresses */
107 		.type = RTE_ACL_FIELD_TYPE_BITMASK,
108 		.size = sizeof(uint32_t),
109 		.field_index = SRC_FIELD_IPV4,
110 		.input_index = SRC_INPUT_IPV4,
111 		.offset = sizeof(struct rte_ether_hdr) +
112 			offsetof(struct rte_ipv4_hdr, src_addr),
113 	},
114 	/* next input field (IPv4 destination address) - 4 consecutive bytes. */
115 	{
116 		/* rte_flow uses a bit mask for IPv4 addresses */
117 		.type = RTE_ACL_FIELD_TYPE_BITMASK,
118 		.size = sizeof(uint32_t),
119 		.field_index = DST_FIELD_IPV4,
120 		.input_index = DST_INPUT_IPV4,
121 		.offset = sizeof(struct rte_ether_hdr) +
122 			offsetof(struct rte_ipv4_hdr, dst_addr),
123 	},
124 	/*
125 	 * Next 2 fields (src & dst ports) form 4 consecutive bytes.
126 	 * They share the same input index.
127 	 */
128 	{
129 		/* rte_flow uses a bit mask for protocol ports */
130 		.type = RTE_ACL_FIELD_TYPE_BITMASK,
131 		.size = sizeof(uint16_t),
132 		.field_index = SRCP_FIELD_IPV4,
133 		.input_index = SRCP_DESTP_INPUT_IPV4,
134 		.offset = sizeof(struct rte_ether_hdr) +
135 			sizeof(struct rte_ipv4_hdr) +
136 			offsetof(struct rte_tcp_hdr, src_port),
137 	},
138 	{
139 		/* rte_flow uses a bit mask for protocol ports */
140 		.type = RTE_ACL_FIELD_TYPE_BITMASK,
141 		.size = sizeof(uint16_t),
142 		.field_index = DSTP_FIELD_IPV4,
143 		.input_index = SRCP_DESTP_INPUT_IPV4,
144 		.offset = sizeof(struct rte_ether_hdr) +
145 			sizeof(struct rte_ipv4_hdr) +
146 			offsetof(struct rte_tcp_hdr, dst_port),
147 	},
148 };
149 
150 /* flow classify data */
151 static int num_classify_rules;
152 static struct rte_flow_classify_rule *rules[MAX_NUM_CLASSIFY];
153 static struct rte_flow_classify_ipv4_5tuple_stats ntuple_stats;
154 static struct rte_flow_classify_stats classify_stats = {
155 		.stats = (void **)&ntuple_stats
156 };
157 
158 /* parameters for rte_flow_classify_validate and
159  * rte_flow_classify_table_entry_add functions
160  */
161 
162 static struct rte_flow_item  eth_item = { RTE_FLOW_ITEM_TYPE_ETH,
163 	0, 0, 0 };
164 static struct rte_flow_item  end_item = { RTE_FLOW_ITEM_TYPE_END,
165 	0, 0, 0 };
166 
167 /* sample actions:
168  * "actions count / end"
169  */
170 struct rte_flow_query_count count = {
171 	.reset = 1,
172 	.hits_set = 1,
173 	.bytes_set = 1,
174 	.hits = 0,
175 	.bytes = 0,
176 };
177 static struct rte_flow_action count_action = { RTE_FLOW_ACTION_TYPE_COUNT,
178 	&count};
179 static struct rte_flow_action end_action = { RTE_FLOW_ACTION_TYPE_END, 0};
180 static struct rte_flow_action actions[2];
181 
182 /* sample attributes */
183 static struct rte_flow_attr attr;
184 
185 /* flow_classify.c: * Based on DPDK skeleton forwarding example. */
186 
187 /*
188  * Initializes a given port using global settings and with the RX buffers
189  * coming from the mbuf_pool passed as a parameter.
190  */
191 static inline int
192 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
193 {
194 	struct rte_eth_conf port_conf = port_conf_default;
195 	struct rte_ether_addr addr;
196 	const uint16_t rx_rings = 1, tx_rings = 1;
197 	int retval;
198 	uint16_t q;
199 	struct rte_eth_dev_info dev_info;
200 	struct rte_eth_txconf txconf;
201 
202 	if (!rte_eth_dev_is_valid_port(port))
203 		return -1;
204 
205 	retval = rte_eth_dev_info_get(port, &dev_info);
206 	if (retval != 0) {
207 		printf("Error during getting device (port %u) info: %s\n",
208 				port, strerror(-retval));
209 		return retval;
210 	}
211 
212 	if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
213 		port_conf.txmode.offloads |=
214 			DEV_TX_OFFLOAD_MBUF_FAST_FREE;
215 
216 	/* Configure the Ethernet device. */
217 	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
218 	if (retval != 0)
219 		return retval;
220 
221 	/* Allocate and set up 1 RX queue per Ethernet port. */
222 	for (q = 0; q < rx_rings; q++) {
223 		retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
224 				rte_eth_dev_socket_id(port), NULL, mbuf_pool);
225 		if (retval < 0)
226 			return retval;
227 	}
228 
229 	txconf = dev_info.default_txconf;
230 	txconf.offloads = port_conf.txmode.offloads;
231 	/* Allocate and set up 1 TX queue per Ethernet port. */
232 	for (q = 0; q < tx_rings; q++) {
233 		retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
234 				rte_eth_dev_socket_id(port), &txconf);
235 		if (retval < 0)
236 			return retval;
237 	}
238 
239 	/* Start the Ethernet port. */
240 	retval = rte_eth_dev_start(port);
241 	if (retval < 0)
242 		return retval;
243 
244 	/* Display the port MAC address. */
245 	rte_eth_macaddr_get(port, &addr);
246 	printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
247 			   " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
248 			port,
249 			addr.addr_bytes[0], addr.addr_bytes[1],
250 			addr.addr_bytes[2], addr.addr_bytes[3],
251 			addr.addr_bytes[4], addr.addr_bytes[5]);
252 
253 	/* Enable RX in promiscuous mode for the Ethernet device. */
254 	retval = rte_eth_promiscuous_enable(port);
255 	if (retval != 0)
256 		return retval;
257 
258 	return 0;
259 }
260 
261 /*
262  * The lcore main. This is the main thread that does the work, reading from
263  * an input port classifying the packets and writing to an output port.
264  */
265 static __attribute__((noreturn)) void
266 lcore_main(struct flow_classifier *cls_app)
267 {
268 	uint16_t port;
269 	int ret;
270 	int i = 0;
271 
272 	ret = rte_flow_classify_table_entry_delete(cls_app->cls,
273 			rules[7]);
274 	if (ret)
275 		printf("table_entry_delete failed [7] %d\n\n", ret);
276 	else
277 		printf("table_entry_delete succeeded [7]\n\n");
278 
279 	/*
280 	 * Check that the port is on the same NUMA node as the polling thread
281 	 * for best performance.
282 	 */
283 	RTE_ETH_FOREACH_DEV(port)
284 		if (rte_eth_dev_socket_id(port) > 0 &&
285 			rte_eth_dev_socket_id(port) != (int)rte_socket_id()) {
286 			printf("\n\n");
287 			printf("WARNING: port %u is on remote NUMA node\n",
288 			       port);
289 			printf("to polling thread.\n");
290 			printf("Performance will not be optimal.\n");
291 		}
292 	printf("\nCore %u forwarding packets. ", rte_lcore_id());
293 	printf("[Ctrl+C to quit]\n");
294 
295 	/* Run until the application is quit or killed. */
296 	for (;;) {
297 		/*
298 		 * Receive packets on a port, classify them and forward them
299 		 * on the paired port.
300 		 * The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc.
301 		 */
302 		RTE_ETH_FOREACH_DEV(port) {
303 			/* Get burst of RX packets, from first port of pair. */
304 			struct rte_mbuf *bufs[BURST_SIZE];
305 			const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
306 					bufs, BURST_SIZE);
307 
308 			if (unlikely(nb_rx == 0))
309 				continue;
310 
311 			for (i = 0; i < MAX_NUM_CLASSIFY; i++) {
312 				if (rules[i]) {
313 					ret = rte_flow_classifier_query(
314 						cls_app->cls,
315 						bufs, nb_rx, rules[i],
316 						&classify_stats);
317 					if (ret)
318 						printf(
319 							"rule [%d] query failed ret [%d]\n\n",
320 							i, ret);
321 					else {
322 						printf(
323 						"rule[%d] count=%"PRIu64"\n",
324 						i, ntuple_stats.counter1);
325 
326 						printf("proto = %d\n",
327 						ntuple_stats.ipv4_5tuple.proto);
328 					}
329 				}
330 			}
331 
332 			/* Send burst of TX packets, to second port of pair. */
333 			const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0,
334 					bufs, nb_rx);
335 
336 			/* Free any unsent packets. */
337 			if (unlikely(nb_tx < nb_rx)) {
338 				uint16_t buf;
339 
340 				for (buf = nb_tx; buf < nb_rx; buf++)
341 					rte_pktmbuf_free(bufs[buf]);
342 			}
343 		}
344 	}
345 }
346 
347 /*
348  * Parse IPv4 5 tuple rules file, ipv4_rules_file.txt.
349  * Expected format:
350  * <src_ipv4_addr>'/'<masklen> <space> \
351  * <dst_ipv4_addr>'/'<masklen> <space> \
352  * <src_port> <space> ":" <src_port_mask> <space> \
353  * <dst_port> <space> ":" <dst_port_mask> <space> \
354  * <proto>'/'<proto_mask> <space> \
355  * <priority>
356  */
357 
358 static int
359 get_cb_field(char **in, uint32_t *fd, int base, unsigned long lim,
360 		char dlm)
361 {
362 	unsigned long val;
363 	char *end;
364 
365 	errno = 0;
366 	val = strtoul(*in, &end, base);
367 	if (errno != 0 || end[0] != dlm || val > lim)
368 		return -EINVAL;
369 	*fd = (uint32_t)val;
370 	*in = end + 1;
371 	return 0;
372 }
373 
374 static int
375 parse_ipv4_net(char *in, uint32_t *addr, uint32_t *mask_len)
376 {
377 	uint32_t a, b, c, d, m;
378 
379 	if (get_cb_field(&in, &a, 0, UINT8_MAX, '.'))
380 		return -EINVAL;
381 	if (get_cb_field(&in, &b, 0, UINT8_MAX, '.'))
382 		return -EINVAL;
383 	if (get_cb_field(&in, &c, 0, UINT8_MAX, '.'))
384 		return -EINVAL;
385 	if (get_cb_field(&in, &d, 0, UINT8_MAX, '/'))
386 		return -EINVAL;
387 	if (get_cb_field(&in, &m, 0, sizeof(uint32_t) * CHAR_BIT, 0))
388 		return -EINVAL;
389 
390 	addr[0] = RTE_IPV4(a, b, c, d);
391 	mask_len[0] = m;
392 	return 0;
393 }
394 
395 static int
396 parse_ipv4_5tuple_rule(char *str, struct rte_eth_ntuple_filter *ntuple_filter)
397 {
398 	int i, ret;
399 	char *s, *sp, *in[CB_FLD_NUM];
400 	static const char *dlm = " \t\n";
401 	int dim = CB_FLD_NUM;
402 	uint32_t temp;
403 
404 	s = str;
405 	for (i = 0; i != dim; i++, s = NULL) {
406 		in[i] = strtok_r(s, dlm, &sp);
407 		if (in[i] == NULL)
408 			return -EINVAL;
409 	}
410 
411 	ret = parse_ipv4_net(in[CB_FLD_SRC_ADDR],
412 			&ntuple_filter->src_ip,
413 			&ntuple_filter->src_ip_mask);
414 	if (ret != 0) {
415 		flow_classify_log("failed to read source address/mask: %s\n",
416 			in[CB_FLD_SRC_ADDR]);
417 		return ret;
418 	}
419 
420 	ret = parse_ipv4_net(in[CB_FLD_DST_ADDR],
421 			&ntuple_filter->dst_ip,
422 			&ntuple_filter->dst_ip_mask);
423 	if (ret != 0) {
424 		flow_classify_log("failed to read source address/mask: %s\n",
425 			in[CB_FLD_DST_ADDR]);
426 		return ret;
427 	}
428 
429 	if (get_cb_field(&in[CB_FLD_SRC_PORT], &temp, 0, UINT16_MAX, 0))
430 		return -EINVAL;
431 	ntuple_filter->src_port = (uint16_t)temp;
432 
433 	if (strncmp(in[CB_FLD_SRC_PORT_DLM], cb_port_delim,
434 			sizeof(cb_port_delim)) != 0)
435 		return -EINVAL;
436 
437 	if (get_cb_field(&in[CB_FLD_SRC_PORT_MASK], &temp, 0, UINT16_MAX, 0))
438 		return -EINVAL;
439 	ntuple_filter->src_port_mask = (uint16_t)temp;
440 
441 	if (get_cb_field(&in[CB_FLD_DST_PORT], &temp, 0, UINT16_MAX, 0))
442 		return -EINVAL;
443 	ntuple_filter->dst_port = (uint16_t)temp;
444 
445 	if (strncmp(in[CB_FLD_DST_PORT_DLM], cb_port_delim,
446 			sizeof(cb_port_delim)) != 0)
447 		return -EINVAL;
448 
449 	if (get_cb_field(&in[CB_FLD_DST_PORT_MASK], &temp, 0, UINT16_MAX, 0))
450 		return -EINVAL;
451 	ntuple_filter->dst_port_mask = (uint16_t)temp;
452 
453 	if (get_cb_field(&in[CB_FLD_PROTO], &temp, 0, UINT8_MAX, '/'))
454 		return -EINVAL;
455 	ntuple_filter->proto = (uint8_t)temp;
456 
457 	if (get_cb_field(&in[CB_FLD_PROTO], &temp, 0, UINT8_MAX, 0))
458 		return -EINVAL;
459 	ntuple_filter->proto_mask = (uint8_t)temp;
460 
461 	if (get_cb_field(&in[CB_FLD_PRIORITY], &temp, 0, UINT16_MAX, 0))
462 		return -EINVAL;
463 	ntuple_filter->priority = (uint16_t)temp;
464 	if (ntuple_filter->priority > FLOW_CLASSIFY_MAX_PRIORITY)
465 		ret = -EINVAL;
466 
467 	return ret;
468 }
469 
470 /* Bypass comment and empty lines */
471 static inline int
472 is_bypass_line(char *buff)
473 {
474 	int i = 0;
475 
476 	/* comment line */
477 	if (buff[0] == COMMENT_LEAD_CHAR)
478 		return 1;
479 	/* empty line */
480 	while (buff[i] != '\0') {
481 		if (!isspace(buff[i]))
482 			return 0;
483 		i++;
484 	}
485 	return 1;
486 }
487 
488 static uint32_t
489 convert_depth_to_bitmask(uint32_t depth_val)
490 {
491 	uint32_t bitmask = 0;
492 	int i, j;
493 
494 	for (i = depth_val, j = 0; i > 0; i--, j++)
495 		bitmask |= (1 << (31 - j));
496 	return bitmask;
497 }
498 
499 static int
500 add_classify_rule(struct rte_eth_ntuple_filter *ntuple_filter,
501 		struct flow_classifier *cls_app)
502 {
503 	int ret = -1;
504 	int key_found;
505 	struct rte_flow_error error;
506 	struct rte_flow_item_ipv4 ipv4_spec;
507 	struct rte_flow_item_ipv4 ipv4_mask;
508 	struct rte_flow_item ipv4_udp_item;
509 	struct rte_flow_item ipv4_tcp_item;
510 	struct rte_flow_item ipv4_sctp_item;
511 	struct rte_flow_item_udp udp_spec;
512 	struct rte_flow_item_udp udp_mask;
513 	struct rte_flow_item udp_item;
514 	struct rte_flow_item_tcp tcp_spec;
515 	struct rte_flow_item_tcp tcp_mask;
516 	struct rte_flow_item tcp_item;
517 	struct rte_flow_item_sctp sctp_spec;
518 	struct rte_flow_item_sctp sctp_mask;
519 	struct rte_flow_item sctp_item;
520 	struct rte_flow_item pattern_ipv4_5tuple[4];
521 	struct rte_flow_classify_rule *rule;
522 	uint8_t ipv4_proto;
523 
524 	if (num_classify_rules >= MAX_NUM_CLASSIFY) {
525 		printf(
526 			"\nINFO:  classify rule capacity %d reached\n",
527 			num_classify_rules);
528 		return ret;
529 	}
530 
531 	/* set up parameters for validate and add */
532 	memset(&ipv4_spec, 0, sizeof(ipv4_spec));
533 	ipv4_spec.hdr.next_proto_id = ntuple_filter->proto;
534 	ipv4_spec.hdr.src_addr = ntuple_filter->src_ip;
535 	ipv4_spec.hdr.dst_addr = ntuple_filter->dst_ip;
536 	ipv4_proto = ipv4_spec.hdr.next_proto_id;
537 
538 	memset(&ipv4_mask, 0, sizeof(ipv4_mask));
539 	ipv4_mask.hdr.next_proto_id = ntuple_filter->proto_mask;
540 	ipv4_mask.hdr.src_addr = ntuple_filter->src_ip_mask;
541 	ipv4_mask.hdr.src_addr =
542 		convert_depth_to_bitmask(ipv4_mask.hdr.src_addr);
543 	ipv4_mask.hdr.dst_addr = ntuple_filter->dst_ip_mask;
544 	ipv4_mask.hdr.dst_addr =
545 		convert_depth_to_bitmask(ipv4_mask.hdr.dst_addr);
546 
547 	switch (ipv4_proto) {
548 	case IPPROTO_UDP:
549 		ipv4_udp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
550 		ipv4_udp_item.spec = &ipv4_spec;
551 		ipv4_udp_item.mask = &ipv4_mask;
552 		ipv4_udp_item.last = NULL;
553 
554 		udp_spec.hdr.src_port = ntuple_filter->src_port;
555 		udp_spec.hdr.dst_port = ntuple_filter->dst_port;
556 		udp_spec.hdr.dgram_len = 0;
557 		udp_spec.hdr.dgram_cksum = 0;
558 
559 		udp_mask.hdr.src_port = ntuple_filter->src_port_mask;
560 		udp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
561 		udp_mask.hdr.dgram_len = 0;
562 		udp_mask.hdr.dgram_cksum = 0;
563 
564 		udp_item.type = RTE_FLOW_ITEM_TYPE_UDP;
565 		udp_item.spec = &udp_spec;
566 		udp_item.mask = &udp_mask;
567 		udp_item.last = NULL;
568 
569 		attr.priority = ntuple_filter->priority;
570 		pattern_ipv4_5tuple[1] = ipv4_udp_item;
571 		pattern_ipv4_5tuple[2] = udp_item;
572 		break;
573 	case IPPROTO_TCP:
574 		ipv4_tcp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
575 		ipv4_tcp_item.spec = &ipv4_spec;
576 		ipv4_tcp_item.mask = &ipv4_mask;
577 		ipv4_tcp_item.last = NULL;
578 
579 		memset(&tcp_spec, 0, sizeof(tcp_spec));
580 		tcp_spec.hdr.src_port = ntuple_filter->src_port;
581 		tcp_spec.hdr.dst_port = ntuple_filter->dst_port;
582 
583 		memset(&tcp_mask, 0, sizeof(tcp_mask));
584 		tcp_mask.hdr.src_port = ntuple_filter->src_port_mask;
585 		tcp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
586 
587 		tcp_item.type = RTE_FLOW_ITEM_TYPE_TCP;
588 		tcp_item.spec = &tcp_spec;
589 		tcp_item.mask = &tcp_mask;
590 		tcp_item.last = NULL;
591 
592 		attr.priority = ntuple_filter->priority;
593 		pattern_ipv4_5tuple[1] = ipv4_tcp_item;
594 		pattern_ipv4_5tuple[2] = tcp_item;
595 		break;
596 	case IPPROTO_SCTP:
597 		ipv4_sctp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
598 		ipv4_sctp_item.spec = &ipv4_spec;
599 		ipv4_sctp_item.mask = &ipv4_mask;
600 		ipv4_sctp_item.last = NULL;
601 
602 		sctp_spec.hdr.src_port = ntuple_filter->src_port;
603 		sctp_spec.hdr.dst_port = ntuple_filter->dst_port;
604 		sctp_spec.hdr.cksum = 0;
605 		sctp_spec.hdr.tag = 0;
606 
607 		sctp_mask.hdr.src_port = ntuple_filter->src_port_mask;
608 		sctp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
609 		sctp_mask.hdr.cksum = 0;
610 		sctp_mask.hdr.tag = 0;
611 
612 		sctp_item.type = RTE_FLOW_ITEM_TYPE_SCTP;
613 		sctp_item.spec = &sctp_spec;
614 		sctp_item.mask = &sctp_mask;
615 		sctp_item.last = NULL;
616 
617 		attr.priority = ntuple_filter->priority;
618 		pattern_ipv4_5tuple[1] = ipv4_sctp_item;
619 		pattern_ipv4_5tuple[2] = sctp_item;
620 		break;
621 	default:
622 		return ret;
623 	}
624 
625 	attr.ingress = 1;
626 	pattern_ipv4_5tuple[0] = eth_item;
627 	pattern_ipv4_5tuple[3] = end_item;
628 	actions[0] = count_action;
629 	actions[1] = end_action;
630 
631 	/* Validate and add rule */
632 	ret = rte_flow_classify_validate(cls_app->cls, &attr,
633 			pattern_ipv4_5tuple, actions, &error);
634 	if (ret) {
635 		printf("table entry validate failed ipv4_proto = %u\n",
636 			ipv4_proto);
637 		return ret;
638 	}
639 
640 	rule = rte_flow_classify_table_entry_add(
641 			cls_app->cls, &attr, pattern_ipv4_5tuple,
642 			actions, &key_found, &error);
643 	if (rule == NULL) {
644 		printf("table entry add failed ipv4_proto = %u\n",
645 			ipv4_proto);
646 		ret = -1;
647 		return ret;
648 	}
649 
650 	rules[num_classify_rules] = rule;
651 	num_classify_rules++;
652 	return 0;
653 }
654 
655 static int
656 add_rules(const char *rule_path, struct flow_classifier *cls_app)
657 {
658 	FILE *fh;
659 	char buff[LINE_MAX];
660 	unsigned int i = 0;
661 	unsigned int total_num = 0;
662 	struct rte_eth_ntuple_filter ntuple_filter;
663 	int ret;
664 
665 	fh = fopen(rule_path, "rb");
666 	if (fh == NULL)
667 		rte_exit(EXIT_FAILURE, "%s: fopen %s failed\n", __func__,
668 			rule_path);
669 
670 	ret = fseek(fh, 0, SEEK_SET);
671 	if (ret)
672 		rte_exit(EXIT_FAILURE, "%s: fseek %d failed\n", __func__,
673 			ret);
674 
675 	i = 0;
676 	while (fgets(buff, LINE_MAX, fh) != NULL) {
677 		i++;
678 
679 		if (is_bypass_line(buff))
680 			continue;
681 
682 		if (total_num >= FLOW_CLASSIFY_MAX_RULE_NUM - 1) {
683 			printf("\nINFO: classify rule capacity %d reached\n",
684 				total_num);
685 			break;
686 		}
687 
688 		if (parse_ipv4_5tuple_rule(buff, &ntuple_filter) != 0)
689 			rte_exit(EXIT_FAILURE,
690 				"%s Line %u: parse rules error\n",
691 				rule_path, i);
692 
693 		if (add_classify_rule(&ntuple_filter, cls_app) != 0)
694 			rte_exit(EXIT_FAILURE, "add rule error\n");
695 
696 		total_num++;
697 	}
698 
699 	fclose(fh);
700 	return 0;
701 }
702 
703 /* display usage */
704 static void
705 print_usage(const char *prgname)
706 {
707 	printf("%s usage:\n", prgname);
708 	printf("[EAL options] --  --"OPTION_RULE_IPV4"=FILE: ");
709 	printf("specify the ipv4 rules file.\n");
710 	printf("Each rule occupies one line in the file.\n");
711 }
712 
713 /* Parse the argument given in the command line of the application */
714 static int
715 parse_args(int argc, char **argv)
716 {
717 	int opt, ret;
718 	char **argvopt;
719 	int option_index;
720 	char *prgname = argv[0];
721 	static struct option lgopts[] = {
722 		{OPTION_RULE_IPV4, 1, 0, 0},
723 		{NULL, 0, 0, 0}
724 	};
725 
726 	argvopt = argv;
727 
728 	while ((opt = getopt_long(argc, argvopt, "",
729 				lgopts, &option_index)) != EOF) {
730 
731 		switch (opt) {
732 		/* long options */
733 		case 0:
734 			if (!strncmp(lgopts[option_index].name,
735 					OPTION_RULE_IPV4,
736 					sizeof(OPTION_RULE_IPV4)))
737 				parm_config.rule_ipv4_name = optarg;
738 			break;
739 		default:
740 			print_usage(prgname);
741 			return -1;
742 		}
743 	}
744 
745 	if (optind >= 0)
746 		argv[optind-1] = prgname;
747 
748 	ret = optind-1;
749 	optind = 1; /* reset getopt lib */
750 	return ret;
751 }
752 
753 /*
754  * The main function, which does initialization and calls the lcore_main
755  * function.
756  */
757 int
758 main(int argc, char *argv[])
759 {
760 	struct rte_mempool *mbuf_pool;
761 	uint16_t nb_ports;
762 	uint16_t portid;
763 	int ret;
764 	int socket_id;
765 	struct rte_table_acl_params table_acl_params;
766 	struct rte_flow_classify_table_params cls_table_params;
767 	struct flow_classifier *cls_app;
768 	struct rte_flow_classifier_params cls_params;
769 	uint32_t size;
770 
771 	/* Initialize the Environment Abstraction Layer (EAL). */
772 	ret = rte_eal_init(argc, argv);
773 	if (ret < 0)
774 		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
775 
776 	argc -= ret;
777 	argv += ret;
778 
779 	/* parse application arguments (after the EAL ones) */
780 	ret = parse_args(argc, argv);
781 	if (ret < 0)
782 		rte_exit(EXIT_FAILURE, "Invalid flow_classify parameters\n");
783 
784 	/* Check that there is an even number of ports to send/receive on. */
785 	nb_ports = rte_eth_dev_count_avail();
786 	if (nb_ports < 2 || (nb_ports & 1))
787 		rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n");
788 
789 	/* Creates a new mempool in memory to hold the mbufs. */
790 	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS * nb_ports,
791 		MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
792 
793 	if (mbuf_pool == NULL)
794 		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
795 
796 	/* Initialize all ports. */
797 	RTE_ETH_FOREACH_DEV(portid)
798 		if (port_init(portid, mbuf_pool) != 0)
799 			rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
800 					portid);
801 
802 	if (rte_lcore_count() > 1)
803 		printf("\nWARNING: Too many lcores enabled. Only 1 used.\n");
804 
805 	socket_id = rte_eth_dev_socket_id(0);
806 
807 	/* Memory allocation */
808 	size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct flow_classifier_acl));
809 	cls_app = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
810 	if (cls_app == NULL)
811 		rte_exit(EXIT_FAILURE, "Cannot allocate classifier memory\n");
812 
813 	cls_params.name = "flow_classifier";
814 	cls_params.socket_id = socket_id;
815 
816 	cls_app->cls = rte_flow_classifier_create(&cls_params);
817 	if (cls_app->cls == NULL) {
818 		rte_free(cls_app);
819 		rte_exit(EXIT_FAILURE, "Cannot create classifier\n");
820 	}
821 
822 	/* initialise ACL table params */
823 	table_acl_params.name = "table_acl_ipv4_5tuple";
824 	table_acl_params.n_rules = FLOW_CLASSIFY_MAX_RULE_NUM;
825 	table_acl_params.n_rule_fields = RTE_DIM(ipv4_defs);
826 	memcpy(table_acl_params.field_format, ipv4_defs, sizeof(ipv4_defs));
827 
828 	/* initialise table create params */
829 	cls_table_params.ops = &rte_table_acl_ops;
830 	cls_table_params.arg_create = &table_acl_params;
831 	cls_table_params.type = RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE;
832 
833 	ret = rte_flow_classify_table_create(cls_app->cls, &cls_table_params);
834 	if (ret) {
835 		rte_flow_classifier_free(cls_app->cls);
836 		rte_free(cls_app);
837 		rte_exit(EXIT_FAILURE, "Failed to create classifier table\n");
838 	}
839 
840 	/* read file of IPv4 5 tuple rules and initialize parameters
841 	 * for rte_flow_classify_validate and rte_flow_classify_table_entry_add
842 	 * API's.
843 	 */
844 	if (add_rules(parm_config.rule_ipv4_name, cls_app)) {
845 		rte_flow_classifier_free(cls_app->cls);
846 		rte_free(cls_app);
847 		rte_exit(EXIT_FAILURE, "Failed to add rules\n");
848 	}
849 
850 	/* Call lcore_main on the master core only. */
851 	lcore_main(cls_app);
852 
853 	return 0;
854 }
855