1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  *
4  * The file contains the implementations of actions generators.
5  * Each generator is responsible for preparing it's action instance
6  * and initializing it with needed data.
7  */
8 
9 #include <sys/types.h>
10 #include <rte_malloc.h>
11 #include <rte_flow.h>
12 #include <rte_ethdev.h>
13 #include <rte_vxlan.h>
14 #include <rte_gtp.h>
15 #include <rte_gre.h>
16 #include <rte_geneve.h>
17 
18 #include "actions_gen.h"
19 #include "flow_gen.h"
20 #include "config.h"
21 
22 
23 /* Storage for additional parameters for actions */
24 struct additional_para {
25 	uint16_t queue;
26 	uint16_t next_table;
27 	uint16_t *queues;
28 	uint16_t queues_number;
29 	uint32_t counter;
30 	uint64_t encap_data;
31 	uint64_t decap_data;
32 };
33 
34 /* Storage for struct rte_flow_action_raw_encap including external data. */
35 struct action_raw_encap_data {
36 	struct rte_flow_action_raw_encap conf;
37 	uint8_t data[128];
38 	uint8_t preserve[128];
39 	uint16_t idx;
40 };
41 
42 /* Storage for struct rte_flow_action_raw_decap including external data. */
43 struct action_raw_decap_data {
44 	struct rte_flow_action_raw_decap conf;
45 	uint8_t data[128];
46 	uint16_t idx;
47 };
48 
49 /* Storage for struct rte_flow_action_rss including external data. */
50 struct action_rss_data {
51 	struct rte_flow_action_rss conf;
52 	uint8_t key[40];
53 	uint16_t queue[128];
54 };
55 
56 static void
add_mark(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)57 add_mark(struct rte_flow_action *actions,
58 	uint8_t actions_counter,
59 	struct additional_para para)
60 {
61 	static struct rte_flow_action_mark mark_action;
62 	uint32_t counter = para.counter;
63 
64 	do {
65 		/* Random values from 1 to 256 */
66 		mark_action.id = (counter % 255) + 1;
67 	} while (0);
68 
69 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_MARK;
70 	actions[actions_counter].conf = &mark_action;
71 }
72 
73 static void
add_queue(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)74 add_queue(struct rte_flow_action *actions,
75 	uint8_t actions_counter,
76 	struct additional_para para)
77 {
78 	static struct rte_flow_action_queue queue_action;
79 
80 	do {
81 		queue_action.index = para.queue;
82 	} while (0);
83 
84 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_QUEUE;
85 	actions[actions_counter].conf = &queue_action;
86 }
87 
88 static void
add_jump(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)89 add_jump(struct rte_flow_action *actions,
90 	uint8_t actions_counter,
91 	struct additional_para para)
92 {
93 	static struct rte_flow_action_jump jump_action;
94 
95 	do {
96 		jump_action.group = para.next_table;
97 	} while (0);
98 
99 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_JUMP;
100 	actions[actions_counter].conf = &jump_action;
101 }
102 
103 static void
add_rss(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)104 add_rss(struct rte_flow_action *actions,
105 	uint8_t actions_counter,
106 	struct additional_para para)
107 {
108 	static struct rte_flow_action_rss *rss_action;
109 	static struct action_rss_data *rss_data;
110 
111 	uint16_t queue;
112 
113 	if (rss_data == NULL)
114 		rss_data = rte_malloc("rss_data",
115 			sizeof(struct action_rss_data), 0);
116 
117 	if (rss_data == NULL)
118 		rte_exit(EXIT_FAILURE, "No Memory available!");
119 
120 	*rss_data = (struct action_rss_data){
121 		.conf = (struct rte_flow_action_rss){
122 			.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
123 			.level = 0,
124 			.types = GET_RSS_HF(),
125 			.key_len = sizeof(rss_data->key),
126 			.queue_num = para.queues_number,
127 			.key = rss_data->key,
128 			.queue = rss_data->queue,
129 		},
130 		.key = { 1 },
131 		.queue = { 0 },
132 	};
133 
134 	for (queue = 0; queue < para.queues_number; queue++)
135 		rss_data->queue[queue] = para.queues[queue];
136 
137 	rss_action = &rss_data->conf;
138 
139 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RSS;
140 	actions[actions_counter].conf = rss_action;
141 }
142 
143 static void
add_set_meta(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)144 add_set_meta(struct rte_flow_action *actions,
145 	uint8_t actions_counter,
146 	__rte_unused struct additional_para para)
147 {
148 	static struct rte_flow_action_set_meta meta_action;
149 
150 	do {
151 		meta_action.data = RTE_BE32(META_DATA);
152 		meta_action.mask = RTE_BE32(0xffffffff);
153 	} while (0);
154 
155 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_META;
156 	actions[actions_counter].conf = &meta_action;
157 }
158 
159 static void
add_set_tag(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)160 add_set_tag(struct rte_flow_action *actions,
161 	uint8_t actions_counter,
162 	__rte_unused struct additional_para para)
163 {
164 	static struct rte_flow_action_set_tag tag_action;
165 
166 	do {
167 		tag_action.data = RTE_BE32(META_DATA);
168 		tag_action.mask = RTE_BE32(0xffffffff);
169 		tag_action.index = TAG_INDEX;
170 	} while (0);
171 
172 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TAG;
173 	actions[actions_counter].conf = &tag_action;
174 }
175 
176 static void
add_port_id(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)177 add_port_id(struct rte_flow_action *actions,
178 	uint8_t actions_counter,
179 	__rte_unused struct additional_para para)
180 {
181 	static struct rte_flow_action_port_id port_id;
182 
183 	do {
184 		port_id.id = PORT_ID_DST;
185 	} while (0);
186 
187 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
188 	actions[actions_counter].conf = &port_id;
189 }
190 
191 static void
add_drop(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)192 add_drop(struct rte_flow_action *actions,
193 	uint8_t actions_counter,
194 	__rte_unused struct additional_para para)
195 {
196 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DROP;
197 }
198 
199 static void
add_count(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)200 add_count(struct rte_flow_action *actions,
201 	uint8_t actions_counter,
202 	__rte_unused struct additional_para para)
203 {
204 	static struct rte_flow_action_count count_action;
205 
206 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_COUNT;
207 	actions[actions_counter].conf = &count_action;
208 }
209 
210 static void
add_set_src_mac(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)211 add_set_src_mac(struct rte_flow_action *actions,
212 	uint8_t actions_counter,
213 	__rte_unused struct additional_para para)
214 {
215 	static struct rte_flow_action_set_mac set_mac;
216 	uint32_t mac = para.counter;
217 	uint16_t i;
218 
219 	/* Fixed value */
220 	if (FIXED_VALUES)
221 		mac = 1;
222 
223 	/* Mac address to be set is random each time */
224 	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
225 		set_mac.mac_addr[i] = mac & 0xff;
226 		mac = mac >> 8;
227 	}
228 
229 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_SRC;
230 	actions[actions_counter].conf = &set_mac;
231 }
232 
233 static void
add_set_dst_mac(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)234 add_set_dst_mac(struct rte_flow_action *actions,
235 	uint8_t actions_counter,
236 	__rte_unused struct additional_para para)
237 {
238 	static struct rte_flow_action_set_mac set_mac;
239 	uint32_t mac = para.counter;
240 	uint16_t i;
241 
242 	/* Fixed value */
243 	if (FIXED_VALUES)
244 		mac = 1;
245 
246 	/* Mac address to be set is random each time */
247 	for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
248 		set_mac.mac_addr[i] = mac & 0xff;
249 		mac = mac >> 8;
250 	}
251 
252 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_DST;
253 	actions[actions_counter].conf = &set_mac;
254 }
255 
256 static void
add_set_src_ipv4(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)257 add_set_src_ipv4(struct rte_flow_action *actions,
258 	uint8_t actions_counter,
259 	__rte_unused struct additional_para para)
260 {
261 	static struct rte_flow_action_set_ipv4 set_ipv4;
262 	uint32_t ip = para.counter;
263 
264 	/* Fixed value */
265 	if (FIXED_VALUES)
266 		ip = 1;
267 
268 	/* IPv4 value to be set is random each time */
269 	set_ipv4.ipv4_addr = RTE_BE32(ip + 1);
270 
271 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC;
272 	actions[actions_counter].conf = &set_ipv4;
273 }
274 
275 static void
add_set_dst_ipv4(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)276 add_set_dst_ipv4(struct rte_flow_action *actions,
277 	uint8_t actions_counter,
278 	__rte_unused struct additional_para para)
279 {
280 	static struct rte_flow_action_set_ipv4 set_ipv4;
281 	uint32_t ip = para.counter;
282 
283 	/* Fixed value */
284 	if (FIXED_VALUES)
285 		ip = 1;
286 
287 	/* IPv4 value to be set is random each time */
288 	set_ipv4.ipv4_addr = RTE_BE32(ip + 1);
289 
290 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DST;
291 	actions[actions_counter].conf = &set_ipv4;
292 }
293 
294 static void
add_set_src_ipv6(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)295 add_set_src_ipv6(struct rte_flow_action *actions,
296 	uint8_t actions_counter,
297 	__rte_unused struct additional_para para)
298 {
299 	static struct rte_flow_action_set_ipv6 set_ipv6;
300 	uint32_t ipv6 = para.counter;
301 	uint8_t i;
302 
303 	/* Fixed value */
304 	if (FIXED_VALUES)
305 		ipv6 = 1;
306 
307 	/* IPv6 value to set is random each time */
308 	for (i = 0; i < 16; i++) {
309 		set_ipv6.ipv6_addr[i] = ipv6 & 0xff;
310 		ipv6 = ipv6 >> 8;
311 	}
312 
313 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC;
314 	actions[actions_counter].conf = &set_ipv6;
315 }
316 
317 static void
add_set_dst_ipv6(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)318 add_set_dst_ipv6(struct rte_flow_action *actions,
319 	uint8_t actions_counter,
320 	__rte_unused struct additional_para para)
321 {
322 	static struct rte_flow_action_set_ipv6 set_ipv6;
323 	uint32_t ipv6 = para.counter;
324 	uint8_t i;
325 
326 	/* Fixed value */
327 	if (FIXED_VALUES)
328 		ipv6 = 1;
329 
330 	/* IPv6 value to set is random each time */
331 	for (i = 0; i < 16; i++) {
332 		set_ipv6.ipv6_addr[i] = ipv6 & 0xff;
333 		ipv6 = ipv6 >> 8;
334 	}
335 
336 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DST;
337 	actions[actions_counter].conf = &set_ipv6;
338 }
339 
340 static void
add_set_src_tp(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)341 add_set_src_tp(struct rte_flow_action *actions,
342 	uint8_t actions_counter,
343 	__rte_unused struct additional_para para)
344 {
345 	static struct rte_flow_action_set_tp set_tp;
346 	uint32_t tp = para.counter;
347 
348 	/* Fixed value */
349 	if (FIXED_VALUES)
350 		tp = 100;
351 
352 	/* TP src port is random each time */
353 	tp = tp % 0xffff;
354 
355 	set_tp.port = RTE_BE16(tp & 0xffff);
356 
357 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_SRC;
358 	actions[actions_counter].conf = &set_tp;
359 }
360 
361 static void
add_set_dst_tp(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)362 add_set_dst_tp(struct rte_flow_action *actions,
363 	uint8_t actions_counter,
364 	__rte_unused struct additional_para para)
365 {
366 	static struct rte_flow_action_set_tp set_tp;
367 	uint32_t tp = para.counter;
368 
369 	/* Fixed value */
370 	if (FIXED_VALUES)
371 		tp = 100;
372 
373 	/* TP src port is random each time */
374 	if (tp > 0xffff)
375 		tp = tp >> 16;
376 
377 	set_tp.port = RTE_BE16(tp & 0xffff);
378 
379 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_DST;
380 	actions[actions_counter].conf = &set_tp;
381 }
382 
383 static void
add_inc_tcp_ack(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)384 add_inc_tcp_ack(struct rte_flow_action *actions,
385 	uint8_t actions_counter,
386 	__rte_unused struct additional_para para)
387 {
388 	static rte_be32_t value;
389 	uint32_t ack_value = para.counter;
390 
391 	/* Fixed value */
392 	if (FIXED_VALUES)
393 		ack_value = 1;
394 
395 	value = RTE_BE32(ack_value);
396 
397 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_ACK;
398 	actions[actions_counter].conf = &value;
399 }
400 
401 static void
add_dec_tcp_ack(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)402 add_dec_tcp_ack(struct rte_flow_action *actions,
403 	uint8_t actions_counter,
404 	__rte_unused struct additional_para para)
405 {
406 	static rte_be32_t value;
407 	uint32_t ack_value = para.counter;
408 
409 	/* Fixed value */
410 	if (FIXED_VALUES)
411 		ack_value = 1;
412 
413 	value = RTE_BE32(ack_value);
414 
415 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK;
416 	actions[actions_counter].conf = &value;
417 }
418 
419 static void
add_inc_tcp_seq(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)420 add_inc_tcp_seq(struct rte_flow_action *actions,
421 	uint8_t actions_counter,
422 	__rte_unused struct additional_para para)
423 {
424 	static rte_be32_t value;
425 	uint32_t seq_value = para.counter;
426 
427 	/* Fixed value */
428 	if (FIXED_VALUES)
429 		seq_value = 1;
430 
431 	value = RTE_BE32(seq_value);
432 
433 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ;
434 	actions[actions_counter].conf = &value;
435 }
436 
437 static void
add_dec_tcp_seq(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)438 add_dec_tcp_seq(struct rte_flow_action *actions,
439 	uint8_t actions_counter,
440 	__rte_unused struct additional_para para)
441 {
442 	static rte_be32_t value;
443 	uint32_t seq_value = para.counter;
444 
445 	/* Fixed value */
446 	if (FIXED_VALUES)
447 		seq_value = 1;
448 
449 	value	= RTE_BE32(seq_value);
450 
451 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ;
452 	actions[actions_counter].conf = &value;
453 }
454 
455 static void
add_set_ttl(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)456 add_set_ttl(struct rte_flow_action *actions,
457 	uint8_t actions_counter,
458 	__rte_unused struct additional_para para)
459 {
460 	static struct rte_flow_action_set_ttl set_ttl;
461 	uint32_t ttl_value = para.counter;
462 
463 	/* Fixed value */
464 	if (FIXED_VALUES)
465 		ttl_value = 1;
466 
467 	/* Set ttl to random value each time */
468 	ttl_value = ttl_value % 0xff;
469 
470 	set_ttl.ttl_value = ttl_value;
471 
472 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TTL;
473 	actions[actions_counter].conf = &set_ttl;
474 }
475 
476 static void
add_dec_ttl(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)477 add_dec_ttl(struct rte_flow_action *actions,
478 	uint8_t actions_counter,
479 	__rte_unused struct additional_para para)
480 {
481 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TTL;
482 }
483 
484 static void
add_set_ipv4_dscp(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)485 add_set_ipv4_dscp(struct rte_flow_action *actions,
486 	uint8_t actions_counter,
487 	__rte_unused struct additional_para para)
488 {
489 	static struct rte_flow_action_set_dscp set_dscp;
490 	uint32_t dscp_value = para.counter;
491 
492 	/* Fixed value */
493 	if (FIXED_VALUES)
494 		dscp_value = 1;
495 
496 	/* Set dscp to random value each time */
497 	dscp_value = dscp_value % 0xff;
498 
499 	set_dscp.dscp = dscp_value;
500 
501 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP;
502 	actions[actions_counter].conf = &set_dscp;
503 }
504 
505 static void
add_set_ipv6_dscp(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)506 add_set_ipv6_dscp(struct rte_flow_action *actions,
507 	uint8_t actions_counter,
508 	__rte_unused struct additional_para para)
509 {
510 	static struct rte_flow_action_set_dscp set_dscp;
511 	uint32_t dscp_value = para.counter;
512 
513 	/* Fixed value */
514 	if (FIXED_VALUES)
515 		dscp_value = 1;
516 
517 	/* Set dscp to random value each time */
518 	dscp_value = dscp_value % 0xff;
519 
520 	set_dscp.dscp = dscp_value;
521 
522 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP;
523 	actions[actions_counter].conf = &set_dscp;
524 }
525 
526 static void
add_flag(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)527 add_flag(struct rte_flow_action *actions,
528 	uint8_t actions_counter,
529 	__rte_unused struct additional_para para)
530 {
531 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_FLAG;
532 }
533 
534 static void
add_ether_header(uint8_t ** header,uint64_t data,__rte_unused struct additional_para para)535 add_ether_header(uint8_t **header, uint64_t data,
536 	__rte_unused struct additional_para para)
537 {
538 	struct rte_ether_hdr eth_hdr;
539 
540 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH)))
541 		return;
542 
543 	memset(&eth_hdr, 0, sizeof(struct rte_ether_hdr));
544 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN))
545 		eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
546 	else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
547 		eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
548 	else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
549 		eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
550 	memcpy(*header, &eth_hdr, sizeof(eth_hdr));
551 	*header += sizeof(eth_hdr);
552 }
553 
554 static void
add_vlan_header(uint8_t ** header,uint64_t data,__rte_unused struct additional_para para)555 add_vlan_header(uint8_t **header, uint64_t data,
556 	__rte_unused struct additional_para para)
557 {
558 	struct rte_vlan_hdr vlan_hdr;
559 	uint16_t vlan_value;
560 
561 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN)))
562 		return;
563 
564 	vlan_value = VLAN_VALUE;
565 
566 	memset(&vlan_hdr, 0, sizeof(struct rte_vlan_hdr));
567 	vlan_hdr.vlan_tci = RTE_BE16(vlan_value);
568 
569 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
570 		vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
571 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
572 		vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
573 	memcpy(*header, &vlan_hdr, sizeof(vlan_hdr));
574 	*header += sizeof(vlan_hdr);
575 }
576 
577 static void
add_ipv4_header(uint8_t ** header,uint64_t data,struct additional_para para)578 add_ipv4_header(uint8_t **header, uint64_t data,
579 	struct additional_para para)
580 {
581 	struct rte_ipv4_hdr ipv4_hdr;
582 	uint32_t ip_dst = para.counter;
583 
584 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4)))
585 		return;
586 
587 	/* Fixed value */
588 	if (FIXED_VALUES)
589 		ip_dst = 1;
590 
591 	memset(&ipv4_hdr, 0, sizeof(struct rte_ipv4_hdr));
592 	ipv4_hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
593 	ipv4_hdr.dst_addr = RTE_BE32(ip_dst);
594 	ipv4_hdr.version_ihl = RTE_IPV4_VHL_DEF;
595 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
596 		ipv4_hdr.next_proto_id = RTE_IP_TYPE_UDP;
597 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
598 		ipv4_hdr.next_proto_id = RTE_IP_TYPE_GRE;
599 	memcpy(*header, &ipv4_hdr, sizeof(ipv4_hdr));
600 	*header += sizeof(ipv4_hdr);
601 }
602 
603 static void
add_ipv6_header(uint8_t ** header,uint64_t data,__rte_unused struct additional_para para)604 add_ipv6_header(uint8_t **header, uint64_t data,
605 	__rte_unused struct additional_para para)
606 {
607 	struct rte_ipv6_hdr ipv6_hdr;
608 
609 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6)))
610 		return;
611 
612 	memset(&ipv6_hdr, 0, sizeof(struct rte_ipv6_hdr));
613 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
614 		ipv6_hdr.proto = RTE_IP_TYPE_UDP;
615 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
616 		ipv6_hdr.proto = RTE_IP_TYPE_GRE;
617 	memcpy(*header, &ipv6_hdr, sizeof(ipv6_hdr));
618 	*header += sizeof(ipv6_hdr);
619 }
620 
621 static void
add_udp_header(uint8_t ** header,uint64_t data,__rte_unused struct additional_para para)622 add_udp_header(uint8_t **header, uint64_t data,
623 	__rte_unused struct additional_para para)
624 {
625 	struct rte_udp_hdr udp_hdr;
626 
627 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP)))
628 		return;
629 
630 	memset(&udp_hdr, 0, sizeof(struct rte_flow_item_udp));
631 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN))
632 		udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
633 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE))
634 		udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_GPE_UDP_PORT);
635 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE))
636 		udp_hdr.dst_port = RTE_BE16(RTE_GENEVE_UDP_PORT);
637 	if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP))
638 		udp_hdr.dst_port = RTE_BE16(RTE_GTPU_UDP_PORT);
639 	 memcpy(*header, &udp_hdr, sizeof(udp_hdr));
640 	 *header += sizeof(udp_hdr);
641 }
642 
643 static void
add_vxlan_header(uint8_t ** header,uint64_t data,struct additional_para para)644 add_vxlan_header(uint8_t **header, uint64_t data,
645 	struct additional_para para)
646 {
647 	struct rte_vxlan_hdr vxlan_hdr;
648 	uint32_t vni_value = para.counter;
649 
650 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN)))
651 		return;
652 
653 	/* Fixed value */
654 	if (FIXED_VALUES)
655 		vni_value = 1;
656 
657 	memset(&vxlan_hdr, 0, sizeof(struct rte_vxlan_hdr));
658 
659 	vxlan_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
660 	vxlan_hdr.vx_flags = 0x8;
661 
662 	memcpy(*header, &vxlan_hdr, sizeof(vxlan_hdr));
663 	*header += sizeof(vxlan_hdr);
664 }
665 
666 static void
add_vxlan_gpe_header(uint8_t ** header,uint64_t data,struct additional_para para)667 add_vxlan_gpe_header(uint8_t **header, uint64_t data,
668 	struct additional_para para)
669 {
670 	struct rte_vxlan_gpe_hdr vxlan_gpe_hdr;
671 	uint32_t vni_value = para.counter;
672 
673 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE)))
674 		return;
675 
676 	/* Fixed value */
677 	if (FIXED_VALUES)
678 		vni_value = 1;
679 
680 	memset(&vxlan_gpe_hdr, 0, sizeof(struct rte_vxlan_gpe_hdr));
681 
682 	vxlan_gpe_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
683 	vxlan_gpe_hdr.vx_flags = 0x0c;
684 
685 	memcpy(*header, &vxlan_gpe_hdr, sizeof(vxlan_gpe_hdr));
686 	*header += sizeof(vxlan_gpe_hdr);
687 }
688 
689 static void
add_gre_header(uint8_t ** header,uint64_t data,__rte_unused struct additional_para para)690 add_gre_header(uint8_t **header, uint64_t data,
691 	__rte_unused struct additional_para para)
692 {
693 	struct rte_gre_hdr gre_hdr;
694 
695 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE)))
696 		return;
697 
698 	memset(&gre_hdr, 0, sizeof(struct rte_gre_hdr));
699 
700 	gre_hdr.proto = RTE_BE16(RTE_ETHER_TYPE_TEB);
701 
702 	memcpy(*header, &gre_hdr, sizeof(gre_hdr));
703 	*header += sizeof(gre_hdr);
704 }
705 
706 static void
add_geneve_header(uint8_t ** header,uint64_t data,struct additional_para para)707 add_geneve_header(uint8_t **header, uint64_t data,
708 	struct additional_para para)
709 {
710 	struct rte_geneve_hdr geneve_hdr;
711 	uint32_t vni_value = para.counter;
712 	uint8_t i;
713 
714 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE)))
715 		return;
716 
717 	/* Fixed value */
718 	if (FIXED_VALUES)
719 		vni_value = 1;
720 
721 	memset(&geneve_hdr, 0, sizeof(struct rte_geneve_hdr));
722 
723 	for (i = 0; i < 3; i++)
724 		geneve_hdr.vni[2 - i] = vni_value >> (i * 8);
725 
726 	memcpy(*header, &geneve_hdr, sizeof(geneve_hdr));
727 	*header += sizeof(geneve_hdr);
728 }
729 
730 static void
add_gtp_header(uint8_t ** header,uint64_t data,struct additional_para para)731 add_gtp_header(uint8_t **header, uint64_t data,
732 	struct additional_para para)
733 {
734 	struct rte_gtp_hdr gtp_hdr;
735 	uint32_t teid_value = para.counter;
736 
737 	if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP)))
738 		return;
739 
740 	/* Fixed value */
741 	if (FIXED_VALUES)
742 		teid_value = 1;
743 
744 	memset(&gtp_hdr, 0, sizeof(struct rte_flow_item_gtp));
745 
746 	gtp_hdr.teid = RTE_BE32(teid_value);
747 	gtp_hdr.msg_type = 255;
748 
749 	memcpy(*header, &gtp_hdr, sizeof(gtp_hdr));
750 	*header += sizeof(gtp_hdr);
751 }
752 
753 static const struct encap_decap_headers {
754 	void (*funct)(
755 		uint8_t **header,
756 		uint64_t data,
757 		struct additional_para para
758 		);
759 } headers[] = {
760 	{.funct = add_ether_header},
761 	{.funct = add_vlan_header},
762 	{.funct = add_ipv4_header},
763 	{.funct = add_ipv6_header},
764 	{.funct = add_udp_header},
765 	{.funct = add_vxlan_header},
766 	{.funct = add_vxlan_gpe_header},
767 	{.funct = add_gre_header},
768 	{.funct = add_geneve_header},
769 	{.funct = add_gtp_header},
770 };
771 
772 static void
add_raw_encap(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)773 add_raw_encap(struct rte_flow_action *actions,
774 	uint8_t actions_counter,
775 	struct additional_para para)
776 {
777 	static struct action_raw_encap_data *action_encap_data;
778 	uint64_t encap_data = para.encap_data;
779 	uint8_t *header;
780 	uint8_t i;
781 
782 	/* Avoid double allocation. */
783 	if (action_encap_data == NULL)
784 		action_encap_data = rte_malloc("encap_data",
785 			sizeof(struct action_raw_encap_data), 0);
786 
787 	/* Check if allocation failed. */
788 	if (action_encap_data == NULL)
789 		rte_exit(EXIT_FAILURE, "No Memory available!");
790 
791 	*action_encap_data = (struct action_raw_encap_data) {
792 		.conf = (struct rte_flow_action_raw_encap) {
793 			.data = action_encap_data->data,
794 		},
795 			.data = {},
796 	};
797 	header = action_encap_data->data;
798 
799 	for (i = 0; i < RTE_DIM(headers); i++)
800 		headers[i].funct(&header, encap_data, para);
801 
802 	action_encap_data->conf.size = header -
803 		action_encap_data->data;
804 
805 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
806 	actions[actions_counter].conf = &action_encap_data->conf;
807 }
808 
809 static void
add_raw_decap(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)810 add_raw_decap(struct rte_flow_action *actions,
811 	uint8_t actions_counter,
812 	struct additional_para para)
813 {
814 	static struct action_raw_decap_data *action_decap_data;
815 	uint64_t decap_data = para.decap_data;
816 	uint8_t *header;
817 	uint8_t i;
818 
819 	/* Avoid double allocation. */
820 	if (action_decap_data == NULL)
821 		action_decap_data = rte_malloc("decap_data",
822 			sizeof(struct action_raw_decap_data), 0);
823 
824 	/* Check if allocation failed. */
825 	if (action_decap_data == NULL)
826 		rte_exit(EXIT_FAILURE, "No Memory available!");
827 
828 	*action_decap_data = (struct action_raw_decap_data) {
829 		.conf = (struct rte_flow_action_raw_decap) {
830 			.data = action_decap_data->data,
831 		},
832 			.data = {},
833 	};
834 	header = action_decap_data->data;
835 
836 	for (i = 0; i < RTE_DIM(headers); i++)
837 		headers[i].funct(&header, decap_data, para);
838 
839 	action_decap_data->conf.size = header -
840 		action_decap_data->data;
841 
842 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_DECAP;
843 	actions[actions_counter].conf = &action_decap_data->conf;
844 }
845 
846 static void
add_vxlan_encap(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)847 add_vxlan_encap(struct rte_flow_action *actions,
848 	uint8_t actions_counter,
849 	__rte_unused struct additional_para para)
850 {
851 	static struct rte_flow_action_vxlan_encap vxlan_encap;
852 	static struct rte_flow_item items[5];
853 	static struct rte_flow_item_eth item_eth;
854 	static struct rte_flow_item_ipv4 item_ipv4;
855 	static struct rte_flow_item_udp item_udp;
856 	static struct rte_flow_item_vxlan item_vxlan;
857 	uint32_t ip_dst = para.counter;
858 
859 	/* Fixed value */
860 	if (FIXED_VALUES)
861 		ip_dst = 1;
862 
863 	items[0].spec = &item_eth;
864 	items[0].mask = &item_eth;
865 	items[0].type = RTE_FLOW_ITEM_TYPE_ETH;
866 
867 	item_ipv4.hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
868 	item_ipv4.hdr.dst_addr = RTE_BE32(ip_dst);
869 	item_ipv4.hdr.version_ihl = RTE_IPV4_VHL_DEF;
870 	items[1].spec = &item_ipv4;
871 	items[1].mask = &item_ipv4;
872 	items[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
873 
874 
875 	item_udp.hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
876 	items[2].spec = &item_udp;
877 	items[2].mask = &item_udp;
878 	items[2].type = RTE_FLOW_ITEM_TYPE_UDP;
879 
880 
881 	item_vxlan.vni[2] = 1;
882 	items[3].spec = &item_vxlan;
883 	items[3].mask = &item_vxlan;
884 	items[3].type = RTE_FLOW_ITEM_TYPE_VXLAN;
885 
886 	items[4].type = RTE_FLOW_ITEM_TYPE_END;
887 
888 	vxlan_encap.definition = items;
889 
890 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
891 	actions[actions_counter].conf = &vxlan_encap;
892 }
893 
894 static void
add_vxlan_decap(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)895 add_vxlan_decap(struct rte_flow_action *actions,
896 	uint8_t actions_counter,
897 	__rte_unused struct additional_para para)
898 {
899 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
900 }
901 
902 void
fill_actions(struct rte_flow_action * actions,uint64_t * flow_actions,uint32_t counter,uint16_t next_table,uint16_t hairpinq,uint64_t encap_data,uint64_t decap_data)903 fill_actions(struct rte_flow_action *actions, uint64_t *flow_actions,
904 	uint32_t counter, uint16_t next_table, uint16_t hairpinq,
905 	uint64_t encap_data, uint64_t decap_data)
906 {
907 	struct additional_para additional_para_data;
908 	uint8_t actions_counter = 0;
909 	uint16_t hairpin_queues[hairpinq];
910 	uint16_t queues[RXQ_NUM];
911 	uint16_t i, j;
912 
913 	for (i = 0; i < RXQ_NUM; i++)
914 		queues[i] = i;
915 
916 	for (i = 0; i < hairpinq; i++)
917 		hairpin_queues[i] = i + RXQ_NUM;
918 
919 	additional_para_data = (struct additional_para){
920 		.queue = counter % RXQ_NUM,
921 		.next_table = next_table,
922 		.queues = queues,
923 		.queues_number = RXQ_NUM,
924 		.counter = counter,
925 		.encap_data = encap_data,
926 		.decap_data = decap_data,
927 	};
928 
929 	if (hairpinq != 0) {
930 		additional_para_data.queues = hairpin_queues;
931 		additional_para_data.queues_number = hairpinq;
932 		additional_para_data.queue = (counter % hairpinq) + RXQ_NUM;
933 	}
934 
935 	static const struct actions_dict {
936 		uint64_t mask;
937 		void (*funct)(
938 			struct rte_flow_action *actions,
939 			uint8_t actions_counter,
940 			struct additional_para para
941 			);
942 	} actions_list[] = {
943 		{
944 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_MARK),
945 			.funct = add_mark,
946 		},
947 		{
948 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_COUNT),
949 			.funct = add_count,
950 		},
951 		{
952 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_META),
953 			.funct = add_set_meta,
954 		},
955 		{
956 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_TAG),
957 			.funct = add_set_tag,
958 		},
959 		{
960 			.mask = FLOW_ACTION_MASK(
961 				RTE_FLOW_ACTION_TYPE_FLAG
962 			),
963 			.funct = add_flag,
964 		},
965 		{
966 			.mask = FLOW_ACTION_MASK(
967 				RTE_FLOW_ACTION_TYPE_SET_MAC_SRC
968 			),
969 			.funct = add_set_src_mac,
970 		},
971 		{
972 			.mask = FLOW_ACTION_MASK(
973 				RTE_FLOW_ACTION_TYPE_SET_MAC_DST
974 			),
975 			.funct = add_set_dst_mac,
976 		},
977 		{
978 			.mask = FLOW_ACTION_MASK(
979 				RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC
980 			),
981 			.funct = add_set_src_ipv4,
982 		},
983 		{
984 			.mask =	FLOW_ACTION_MASK(
985 				RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
986 			),
987 			.funct = add_set_dst_ipv4,
988 		},
989 		{
990 			.mask = FLOW_ACTION_MASK(
991 				RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC
992 			),
993 			.funct = add_set_src_ipv6,
994 		},
995 		{
996 			.mask = FLOW_ACTION_MASK(
997 				RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
998 			),
999 			.funct = add_set_dst_ipv6,
1000 		},
1001 		{
1002 			.mask = FLOW_ACTION_MASK(
1003 				RTE_FLOW_ACTION_TYPE_SET_TP_SRC
1004 			),
1005 			.funct = add_set_src_tp,
1006 		},
1007 		{
1008 			.mask = FLOW_ACTION_MASK(
1009 				RTE_FLOW_ACTION_TYPE_SET_TP_DST
1010 			),
1011 			.funct = add_set_dst_tp,
1012 		},
1013 		{
1014 			.mask = FLOW_ACTION_MASK(
1015 				RTE_FLOW_ACTION_TYPE_INC_TCP_ACK
1016 			),
1017 			.funct = add_inc_tcp_ack,
1018 		},
1019 		{
1020 			.mask = FLOW_ACTION_MASK(
1021 				RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK
1022 			),
1023 			.funct = add_dec_tcp_ack,
1024 		},
1025 		{
1026 			.mask = FLOW_ACTION_MASK(
1027 				RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ
1028 			),
1029 			.funct = add_inc_tcp_seq,
1030 		},
1031 		{
1032 			.mask = FLOW_ACTION_MASK(
1033 				RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ
1034 			),
1035 			.funct = add_dec_tcp_seq,
1036 		},
1037 		{
1038 			.mask = FLOW_ACTION_MASK(
1039 				RTE_FLOW_ACTION_TYPE_SET_TTL
1040 			),
1041 			.funct = add_set_ttl,
1042 		},
1043 		{
1044 			.mask = FLOW_ACTION_MASK(
1045 				RTE_FLOW_ACTION_TYPE_DEC_TTL
1046 			),
1047 			.funct = add_dec_ttl,
1048 		},
1049 		{
1050 			.mask = FLOW_ACTION_MASK(
1051 				RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP
1052 			),
1053 			.funct = add_set_ipv4_dscp,
1054 		},
1055 		{
1056 			.mask = FLOW_ACTION_MASK(
1057 				RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP
1058 			),
1059 			.funct = add_set_ipv6_dscp,
1060 		},
1061 		{
1062 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_QUEUE),
1063 			.funct = add_queue,
1064 		},
1065 		{
1066 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_RSS),
1067 			.funct = add_rss,
1068 		},
1069 		{
1070 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_JUMP),
1071 			.funct = add_jump,
1072 		},
1073 		{
1074 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_PORT_ID),
1075 			.funct = add_port_id
1076 		},
1077 		{
1078 			.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_DROP),
1079 			.funct = add_drop,
1080 		},
1081 		{
1082 			.mask = HAIRPIN_QUEUE_ACTION,
1083 			.funct = add_queue,
1084 		},
1085 		{
1086 			.mask = HAIRPIN_RSS_ACTION,
1087 			.funct = add_rss,
1088 		},
1089 		{
1090 			.mask = FLOW_ACTION_MASK(
1091 				RTE_FLOW_ACTION_TYPE_RAW_ENCAP
1092 			),
1093 			.funct = add_raw_encap,
1094 		},
1095 		{
1096 			.mask = FLOW_ACTION_MASK(
1097 				RTE_FLOW_ACTION_TYPE_RAW_DECAP
1098 			),
1099 			.funct = add_raw_decap,
1100 		},
1101 		{
1102 			.mask = FLOW_ACTION_MASK(
1103 				RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
1104 			),
1105 			.funct = add_vxlan_encap,
1106 		},
1107 		{
1108 			.mask = FLOW_ACTION_MASK(
1109 				RTE_FLOW_ACTION_TYPE_VXLAN_DECAP
1110 			),
1111 			.funct = add_vxlan_decap,
1112 		},
1113 	};
1114 
1115 	for (j = 0; j < MAX_ACTIONS_NUM; j++) {
1116 		if (flow_actions[j] == 0)
1117 			break;
1118 		for (i = 0; i < RTE_DIM(actions_list); i++) {
1119 			if ((flow_actions[j] &
1120 				actions_list[i].mask) == 0)
1121 				continue;
1122 			actions_list[i].funct(
1123 				actions, actions_counter++,
1124 				additional_para_data
1125 			);
1126 			break;
1127 		}
1128 	}
1129 	actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_END;
1130 }
1131