1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
3 *
4 * The file contains the implementations of actions generators.
5 * Each generator is responsible for preparing it's action instance
6 * and initializing it with needed data.
7 */
8
9 #include <sys/types.h>
10 #include <rte_malloc.h>
11 #include <rte_flow.h>
12 #include <rte_ethdev.h>
13 #include <rte_vxlan.h>
14 #include <rte_gtp.h>
15 #include <rte_gre.h>
16 #include <rte_geneve.h>
17
18 #include "actions_gen.h"
19 #include "flow_gen.h"
20 #include "config.h"
21
22
23 /* Storage for additional parameters for actions */
24 struct additional_para {
25 uint16_t queue;
26 uint16_t next_table;
27 uint16_t *queues;
28 uint16_t queues_number;
29 uint32_t counter;
30 uint64_t encap_data;
31 uint64_t decap_data;
32 uint16_t dst_port;
33 uint8_t core_idx;
34 bool unique_data;
35 };
36
37 /* Storage for struct rte_flow_action_raw_encap including external data. */
38 struct action_raw_encap_data {
39 struct rte_flow_action_raw_encap conf;
40 uint8_t data[128];
41 uint8_t preserve[128];
42 uint16_t idx;
43 };
44
45 /* Storage for struct rte_flow_action_raw_decap including external data. */
46 struct action_raw_decap_data {
47 struct rte_flow_action_raw_decap conf;
48 uint8_t data[128];
49 uint16_t idx;
50 };
51
52 /* Storage for struct rte_flow_action_rss including external data. */
53 struct action_rss_data {
54 struct rte_flow_action_rss conf;
55 uint8_t key[40];
56 uint16_t queue[128];
57 };
58
59 static void
add_mark(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)60 add_mark(struct rte_flow_action *actions,
61 uint8_t actions_counter,
62 struct additional_para para)
63 {
64 static struct rte_flow_action_mark mark_actions[RTE_MAX_LCORE] __rte_cache_aligned;
65 uint32_t counter = para.counter;
66
67 do {
68 /* Random values from 1 to 256 */
69 mark_actions[para.core_idx].id = (counter % 255) + 1;
70 } while (0);
71
72 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_MARK;
73 actions[actions_counter].conf = &mark_actions[para.core_idx];
74 }
75
76 static void
add_queue(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)77 add_queue(struct rte_flow_action *actions,
78 uint8_t actions_counter,
79 struct additional_para para)
80 {
81 static struct rte_flow_action_queue queue_actions[RTE_MAX_LCORE] __rte_cache_aligned;
82
83 do {
84 queue_actions[para.core_idx].index = para.queue;
85 } while (0);
86
87 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_QUEUE;
88 actions[actions_counter].conf = &queue_actions[para.core_idx];
89 }
90
91 static void
add_jump(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)92 add_jump(struct rte_flow_action *actions,
93 uint8_t actions_counter,
94 struct additional_para para)
95 {
96 static struct rte_flow_action_jump jump_action;
97
98 do {
99 jump_action.group = para.next_table;
100 } while (0);
101
102 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_JUMP;
103 actions[actions_counter].conf = &jump_action;
104 }
105
106 static void
add_rss(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)107 add_rss(struct rte_flow_action *actions,
108 uint8_t actions_counter,
109 struct additional_para para)
110 {
111 static struct action_rss_data *rss_data[RTE_MAX_LCORE] __rte_cache_aligned;
112
113 uint16_t queue;
114
115 if (rss_data[para.core_idx] == NULL)
116 rss_data[para.core_idx] = rte_malloc("rss_data",
117 sizeof(struct action_rss_data), 0);
118
119 if (rss_data[para.core_idx] == NULL)
120 rte_exit(EXIT_FAILURE, "No Memory available!");
121
122 *rss_data[para.core_idx] = (struct action_rss_data){
123 .conf = (struct rte_flow_action_rss){
124 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
125 .level = 0,
126 .types = GET_RSS_HF(),
127 .key_len = sizeof(rss_data[para.core_idx]->key),
128 .queue_num = para.queues_number,
129 .key = rss_data[para.core_idx]->key,
130 .queue = rss_data[para.core_idx]->queue,
131 },
132 .key = { 1 },
133 .queue = { 0 },
134 };
135
136 for (queue = 0; queue < para.queues_number; queue++)
137 rss_data[para.core_idx]->queue[queue] = para.queues[queue];
138
139 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RSS;
140 actions[actions_counter].conf = &rss_data[para.core_idx]->conf;
141 }
142
143 static void
add_set_meta(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)144 add_set_meta(struct rte_flow_action *actions,
145 uint8_t actions_counter,
146 __rte_unused struct additional_para para)
147 {
148 static struct rte_flow_action_set_meta meta_action = {
149 .data = RTE_BE32(META_DATA),
150 .mask = RTE_BE32(0xffffffff),
151 };
152
153 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_META;
154 actions[actions_counter].conf = &meta_action;
155 }
156
157 static void
add_set_tag(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)158 add_set_tag(struct rte_flow_action *actions,
159 uint8_t actions_counter,
160 __rte_unused struct additional_para para)
161 {
162 static struct rte_flow_action_set_tag tag_action = {
163 .data = RTE_BE32(META_DATA),
164 .mask = RTE_BE32(0xffffffff),
165 .index = TAG_INDEX,
166 };
167
168 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TAG;
169 actions[actions_counter].conf = &tag_action;
170 }
171
172 static void
add_port_id(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)173 add_port_id(struct rte_flow_action *actions,
174 uint8_t actions_counter,
175 struct additional_para para)
176 {
177 static struct rte_flow_action_port_id port_id = {
178 .id = PORT_ID_DST,
179 };
180
181 port_id.id = para.dst_port;
182 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_PORT_ID;
183 actions[actions_counter].conf = &port_id;
184 }
185
186 static void
add_drop(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)187 add_drop(struct rte_flow_action *actions,
188 uint8_t actions_counter,
189 __rte_unused struct additional_para para)
190 {
191 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DROP;
192 }
193
194 static void
add_count(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)195 add_count(struct rte_flow_action *actions,
196 uint8_t actions_counter,
197 __rte_unused struct additional_para para)
198 {
199 static struct rte_flow_action_count count_action;
200
201 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_COUNT;
202 actions[actions_counter].conf = &count_action;
203 }
204
205 static void
add_set_src_mac(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)206 add_set_src_mac(struct rte_flow_action *actions,
207 uint8_t actions_counter,
208 struct additional_para para)
209 {
210 static struct rte_flow_action_set_mac set_macs[RTE_MAX_LCORE] __rte_cache_aligned;
211 uint32_t mac = para.counter;
212 uint16_t i;
213
214 /* Fixed value */
215 if (!para.unique_data)
216 mac = 1;
217
218 /* Mac address to be set is random each time */
219 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
220 set_macs[para.core_idx].mac_addr[i] = mac & 0xff;
221 mac = mac >> 8;
222 }
223
224 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_SRC;
225 actions[actions_counter].conf = &set_macs[para.core_idx];
226 }
227
228 static void
add_set_dst_mac(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)229 add_set_dst_mac(struct rte_flow_action *actions,
230 uint8_t actions_counter,
231 struct additional_para para)
232 {
233 static struct rte_flow_action_set_mac set_macs[RTE_MAX_LCORE] __rte_cache_aligned;
234 uint32_t mac = para.counter;
235 uint16_t i;
236
237 /* Fixed value */
238 if (!para.unique_data)
239 mac = 1;
240
241 /* Mac address to be set is random each time */
242 for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) {
243 set_macs[para.core_idx].mac_addr[i] = mac & 0xff;
244 mac = mac >> 8;
245 }
246
247 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_DST;
248 actions[actions_counter].conf = &set_macs[para.core_idx];
249 }
250
251 static void
add_set_src_ipv4(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)252 add_set_src_ipv4(struct rte_flow_action *actions,
253 uint8_t actions_counter,
254 struct additional_para para)
255 {
256 static struct rte_flow_action_set_ipv4 set_ipv4[RTE_MAX_LCORE] __rte_cache_aligned;
257 uint32_t ip = para.counter;
258
259 /* Fixed value */
260 if (!para.unique_data)
261 ip = 1;
262
263 /* IPv4 value to be set is random each time */
264 set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
265
266 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC;
267 actions[actions_counter].conf = &set_ipv4[para.core_idx];
268 }
269
270 static void
add_set_dst_ipv4(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)271 add_set_dst_ipv4(struct rte_flow_action *actions,
272 uint8_t actions_counter,
273 struct additional_para para)
274 {
275 static struct rte_flow_action_set_ipv4 set_ipv4[RTE_MAX_LCORE] __rte_cache_aligned;
276 uint32_t ip = para.counter;
277
278 /* Fixed value */
279 if (!para.unique_data)
280 ip = 1;
281
282 /* IPv4 value to be set is random each time */
283 set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1);
284
285 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DST;
286 actions[actions_counter].conf = &set_ipv4[para.core_idx];
287 }
288
289 static void
add_set_src_ipv6(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)290 add_set_src_ipv6(struct rte_flow_action *actions,
291 uint8_t actions_counter,
292 struct additional_para para)
293 {
294 static struct rte_flow_action_set_ipv6 set_ipv6[RTE_MAX_LCORE] __rte_cache_aligned;
295 uint32_t ipv6 = para.counter;
296 uint8_t i;
297
298 /* Fixed value */
299 if (!para.unique_data)
300 ipv6 = 1;
301
302 /* IPv6 value to set is random each time */
303 for (i = 0; i < 16; i++) {
304 set_ipv6[para.core_idx].ipv6_addr[i] = ipv6 & 0xff;
305 ipv6 = ipv6 >> 8;
306 }
307
308 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC;
309 actions[actions_counter].conf = &set_ipv6[para.core_idx];
310 }
311
312 static void
add_set_dst_ipv6(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)313 add_set_dst_ipv6(struct rte_flow_action *actions,
314 uint8_t actions_counter,
315 struct additional_para para)
316 {
317 static struct rte_flow_action_set_ipv6 set_ipv6[RTE_MAX_LCORE] __rte_cache_aligned;
318 uint32_t ipv6 = para.counter;
319 uint8_t i;
320
321 /* Fixed value */
322 if (!para.unique_data)
323 ipv6 = 1;
324
325 /* IPv6 value to set is random each time */
326 for (i = 0; i < 16; i++) {
327 set_ipv6[para.core_idx].ipv6_addr[i] = ipv6 & 0xff;
328 ipv6 = ipv6 >> 8;
329 }
330
331 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DST;
332 actions[actions_counter].conf = &set_ipv6[para.core_idx];
333 }
334
335 static void
add_set_src_tp(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)336 add_set_src_tp(struct rte_flow_action *actions,
337 uint8_t actions_counter,
338 struct additional_para para)
339 {
340 static struct rte_flow_action_set_tp set_tp[RTE_MAX_LCORE] __rte_cache_aligned;
341 uint32_t tp = para.counter;
342
343 /* Fixed value */
344 if (!para.unique_data)
345 tp = 100;
346
347 /* TP src port is random each time */
348 tp = tp % 0xffff;
349
350 set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
351
352 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_SRC;
353 actions[actions_counter].conf = &set_tp[para.core_idx];
354 }
355
356 static void
add_set_dst_tp(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)357 add_set_dst_tp(struct rte_flow_action *actions,
358 uint8_t actions_counter,
359 struct additional_para para)
360 {
361 static struct rte_flow_action_set_tp set_tp[RTE_MAX_LCORE] __rte_cache_aligned;
362 uint32_t tp = para.counter;
363
364 /* Fixed value */
365 if (!para.unique_data)
366 tp = 100;
367
368 /* TP src port is random each time */
369 if (tp > 0xffff)
370 tp = tp >> 16;
371
372 set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff);
373
374 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_DST;
375 actions[actions_counter].conf = &set_tp[para.core_idx];
376 }
377
378 static void
add_inc_tcp_ack(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)379 add_inc_tcp_ack(struct rte_flow_action *actions,
380 uint8_t actions_counter,
381 struct additional_para para)
382 {
383 static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
384 uint32_t ack_value = para.counter;
385
386 /* Fixed value */
387 if (!para.unique_data)
388 ack_value = 1;
389
390 value[para.core_idx] = RTE_BE32(ack_value);
391
392 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_ACK;
393 actions[actions_counter].conf = &value[para.core_idx];
394 }
395
396 static void
add_dec_tcp_ack(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)397 add_dec_tcp_ack(struct rte_flow_action *actions,
398 uint8_t actions_counter,
399 struct additional_para para)
400 {
401 static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
402 uint32_t ack_value = para.counter;
403
404 /* Fixed value */
405 if (!para.unique_data)
406 ack_value = 1;
407
408 value[para.core_idx] = RTE_BE32(ack_value);
409
410 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK;
411 actions[actions_counter].conf = &value[para.core_idx];
412 }
413
414 static void
add_inc_tcp_seq(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)415 add_inc_tcp_seq(struct rte_flow_action *actions,
416 uint8_t actions_counter,
417 struct additional_para para)
418 {
419 static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
420 uint32_t seq_value = para.counter;
421
422 /* Fixed value */
423 if (!para.unique_data)
424 seq_value = 1;
425
426 value[para.core_idx] = RTE_BE32(seq_value);
427
428 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ;
429 actions[actions_counter].conf = &value[para.core_idx];
430 }
431
432 static void
add_dec_tcp_seq(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)433 add_dec_tcp_seq(struct rte_flow_action *actions,
434 uint8_t actions_counter,
435 struct additional_para para)
436 {
437 static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned;
438 uint32_t seq_value = para.counter;
439
440 /* Fixed value */
441 if (!para.unique_data)
442 seq_value = 1;
443
444 value[para.core_idx] = RTE_BE32(seq_value);
445
446 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ;
447 actions[actions_counter].conf = &value[para.core_idx];
448 }
449
450 static void
add_set_ttl(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)451 add_set_ttl(struct rte_flow_action *actions,
452 uint8_t actions_counter,
453 struct additional_para para)
454 {
455 static struct rte_flow_action_set_ttl set_ttl[RTE_MAX_LCORE] __rte_cache_aligned;
456 uint32_t ttl_value = para.counter;
457
458 /* Fixed value */
459 if (!para.unique_data)
460 ttl_value = 1;
461
462 /* Set ttl to random value each time */
463 ttl_value = ttl_value % 0xff;
464
465 set_ttl[para.core_idx].ttl_value = ttl_value;
466
467 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TTL;
468 actions[actions_counter].conf = &set_ttl[para.core_idx];
469 }
470
471 static void
add_dec_ttl(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)472 add_dec_ttl(struct rte_flow_action *actions,
473 uint8_t actions_counter,
474 __rte_unused struct additional_para para)
475 {
476 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TTL;
477 }
478
479 static void
add_set_ipv4_dscp(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)480 add_set_ipv4_dscp(struct rte_flow_action *actions,
481 uint8_t actions_counter,
482 struct additional_para para)
483 {
484 static struct rte_flow_action_set_dscp set_dscp[RTE_MAX_LCORE] __rte_cache_aligned;
485 uint32_t dscp_value = para.counter;
486
487 /* Fixed value */
488 if (!para.unique_data)
489 dscp_value = 1;
490
491 /* Set dscp to random value each time */
492 dscp_value = dscp_value % 0xff;
493
494 set_dscp[para.core_idx].dscp = dscp_value;
495
496 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP;
497 actions[actions_counter].conf = &set_dscp[para.core_idx];
498 }
499
500 static void
add_set_ipv6_dscp(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)501 add_set_ipv6_dscp(struct rte_flow_action *actions,
502 uint8_t actions_counter,
503 struct additional_para para)
504 {
505 static struct rte_flow_action_set_dscp set_dscp[RTE_MAX_LCORE] __rte_cache_aligned;
506 uint32_t dscp_value = para.counter;
507
508 /* Fixed value */
509 if (!para.unique_data)
510 dscp_value = 1;
511
512 /* Set dscp to random value each time */
513 dscp_value = dscp_value % 0xff;
514
515 set_dscp[para.core_idx].dscp = dscp_value;
516
517 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP;
518 actions[actions_counter].conf = &set_dscp[para.core_idx];
519 }
520
521 static void
add_flag(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)522 add_flag(struct rte_flow_action *actions,
523 uint8_t actions_counter,
524 __rte_unused struct additional_para para)
525 {
526 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_FLAG;
527 }
528
529 static void
add_ether_header(uint8_t ** header,uint64_t data,__rte_unused struct additional_para para)530 add_ether_header(uint8_t **header, uint64_t data,
531 __rte_unused struct additional_para para)
532 {
533 struct rte_ether_hdr eth_hdr;
534
535 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH)))
536 return;
537
538 memset(ð_hdr, 0, sizeof(struct rte_ether_hdr));
539 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN))
540 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
541 else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
542 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
543 else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
544 eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
545 memcpy(*header, ð_hdr, sizeof(eth_hdr));
546 *header += sizeof(eth_hdr);
547 }
548
549 static void
add_vlan_header(uint8_t ** header,uint64_t data,__rte_unused struct additional_para para)550 add_vlan_header(uint8_t **header, uint64_t data,
551 __rte_unused struct additional_para para)
552 {
553 struct rte_vlan_hdr vlan_hdr;
554 uint16_t vlan_value;
555
556 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN)))
557 return;
558
559 vlan_value = VLAN_VALUE;
560
561 memset(&vlan_hdr, 0, sizeof(struct rte_vlan_hdr));
562 vlan_hdr.vlan_tci = RTE_BE16(vlan_value);
563
564 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))
565 vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
566 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))
567 vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
568 memcpy(*header, &vlan_hdr, sizeof(vlan_hdr));
569 *header += sizeof(vlan_hdr);
570 }
571
572 static void
add_ipv4_header(uint8_t ** header,uint64_t data,struct additional_para para)573 add_ipv4_header(uint8_t **header, uint64_t data,
574 struct additional_para para)
575 {
576 struct rte_ipv4_hdr ipv4_hdr;
577 uint32_t ip_dst = para.counter;
578
579 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4)))
580 return;
581
582 /* Fixed value */
583 if (!para.unique_data)
584 ip_dst = 1;
585
586 memset(&ipv4_hdr, 0, sizeof(struct rte_ipv4_hdr));
587 ipv4_hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
588 ipv4_hdr.dst_addr = RTE_BE32(ip_dst);
589 ipv4_hdr.version_ihl = RTE_IPV4_VHL_DEF;
590 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
591 ipv4_hdr.next_proto_id = RTE_IP_TYPE_UDP;
592 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
593 ipv4_hdr.next_proto_id = RTE_IP_TYPE_GRE;
594 memcpy(*header, &ipv4_hdr, sizeof(ipv4_hdr));
595 *header += sizeof(ipv4_hdr);
596 }
597
598 static void
add_ipv6_header(uint8_t ** header,uint64_t data,__rte_unused struct additional_para para)599 add_ipv6_header(uint8_t **header, uint64_t data,
600 __rte_unused struct additional_para para)
601 {
602 struct rte_ipv6_hdr ipv6_hdr;
603
604 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6)))
605 return;
606
607 memset(&ipv6_hdr, 0, sizeof(struct rte_ipv6_hdr));
608 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))
609 ipv6_hdr.proto = RTE_IP_TYPE_UDP;
610 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))
611 ipv6_hdr.proto = RTE_IP_TYPE_GRE;
612 memcpy(*header, &ipv6_hdr, sizeof(ipv6_hdr));
613 *header += sizeof(ipv6_hdr);
614 }
615
616 static void
add_udp_header(uint8_t ** header,uint64_t data,__rte_unused struct additional_para para)617 add_udp_header(uint8_t **header, uint64_t data,
618 __rte_unused struct additional_para para)
619 {
620 struct rte_udp_hdr udp_hdr;
621
622 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP)))
623 return;
624
625 memset(&udp_hdr, 0, sizeof(struct rte_flow_item_udp));
626 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN))
627 udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
628 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE))
629 udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_GPE_UDP_PORT);
630 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE))
631 udp_hdr.dst_port = RTE_BE16(RTE_GENEVE_UDP_PORT);
632 if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP))
633 udp_hdr.dst_port = RTE_BE16(RTE_GTPU_UDP_PORT);
634 memcpy(*header, &udp_hdr, sizeof(udp_hdr));
635 *header += sizeof(udp_hdr);
636 }
637
638 static void
add_vxlan_header(uint8_t ** header,uint64_t data,struct additional_para para)639 add_vxlan_header(uint8_t **header, uint64_t data,
640 struct additional_para para)
641 {
642 struct rte_vxlan_hdr vxlan_hdr;
643 uint32_t vni_value = para.counter;
644
645 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN)))
646 return;
647
648 /* Fixed value */
649 if (!para.unique_data)
650 vni_value = 1;
651
652 memset(&vxlan_hdr, 0, sizeof(struct rte_vxlan_hdr));
653
654 vxlan_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
655 vxlan_hdr.vx_flags = 0x8;
656
657 memcpy(*header, &vxlan_hdr, sizeof(vxlan_hdr));
658 *header += sizeof(vxlan_hdr);
659 }
660
661 static void
add_vxlan_gpe_header(uint8_t ** header,uint64_t data,struct additional_para para)662 add_vxlan_gpe_header(uint8_t **header, uint64_t data,
663 struct additional_para para)
664 {
665 struct rte_vxlan_gpe_hdr vxlan_gpe_hdr;
666 uint32_t vni_value = para.counter;
667
668 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE)))
669 return;
670
671 /* Fixed value */
672 if (!para.unique_data)
673 vni_value = 1;
674
675 memset(&vxlan_gpe_hdr, 0, sizeof(struct rte_vxlan_gpe_hdr));
676
677 vxlan_gpe_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16;
678 vxlan_gpe_hdr.vx_flags = 0x0c;
679
680 memcpy(*header, &vxlan_gpe_hdr, sizeof(vxlan_gpe_hdr));
681 *header += sizeof(vxlan_gpe_hdr);
682 }
683
684 static void
add_gre_header(uint8_t ** header,uint64_t data,__rte_unused struct additional_para para)685 add_gre_header(uint8_t **header, uint64_t data,
686 __rte_unused struct additional_para para)
687 {
688 struct rte_gre_hdr gre_hdr;
689
690 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE)))
691 return;
692
693 memset(&gre_hdr, 0, sizeof(struct rte_gre_hdr));
694
695 gre_hdr.proto = RTE_BE16(RTE_ETHER_TYPE_TEB);
696
697 memcpy(*header, &gre_hdr, sizeof(gre_hdr));
698 *header += sizeof(gre_hdr);
699 }
700
701 static void
add_geneve_header(uint8_t ** header,uint64_t data,struct additional_para para)702 add_geneve_header(uint8_t **header, uint64_t data,
703 struct additional_para para)
704 {
705 struct rte_geneve_hdr geneve_hdr;
706 uint32_t vni_value = para.counter;
707 uint8_t i;
708
709 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE)))
710 return;
711
712 /* Fixed value */
713 if (!para.unique_data)
714 vni_value = 1;
715
716 memset(&geneve_hdr, 0, sizeof(struct rte_geneve_hdr));
717
718 for (i = 0; i < 3; i++)
719 geneve_hdr.vni[2 - i] = vni_value >> (i * 8);
720
721 memcpy(*header, &geneve_hdr, sizeof(geneve_hdr));
722 *header += sizeof(geneve_hdr);
723 }
724
725 static void
add_gtp_header(uint8_t ** header,uint64_t data,struct additional_para para)726 add_gtp_header(uint8_t **header, uint64_t data,
727 struct additional_para para)
728 {
729 struct rte_gtp_hdr gtp_hdr;
730 uint32_t teid_value = para.counter;
731
732 if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP)))
733 return;
734
735 /* Fixed value */
736 if (!para.unique_data)
737 teid_value = 1;
738
739 memset(>p_hdr, 0, sizeof(struct rte_flow_item_gtp));
740
741 gtp_hdr.teid = RTE_BE32(teid_value);
742 gtp_hdr.msg_type = 255;
743
744 memcpy(*header, >p_hdr, sizeof(gtp_hdr));
745 *header += sizeof(gtp_hdr);
746 }
747
748 static const struct encap_decap_headers {
749 void (*funct)(
750 uint8_t **header,
751 uint64_t data,
752 struct additional_para para
753 );
754 } headers[] = {
755 {.funct = add_ether_header},
756 {.funct = add_vlan_header},
757 {.funct = add_ipv4_header},
758 {.funct = add_ipv6_header},
759 {.funct = add_udp_header},
760 {.funct = add_vxlan_header},
761 {.funct = add_vxlan_gpe_header},
762 {.funct = add_gre_header},
763 {.funct = add_geneve_header},
764 {.funct = add_gtp_header},
765 };
766
767 static void
add_raw_encap(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)768 add_raw_encap(struct rte_flow_action *actions,
769 uint8_t actions_counter,
770 struct additional_para para)
771 {
772 static struct action_raw_encap_data *action_encap_data[RTE_MAX_LCORE] __rte_cache_aligned;
773 uint64_t encap_data = para.encap_data;
774 uint8_t *header;
775 uint8_t i;
776
777 /* Avoid double allocation. */
778 if (action_encap_data[para.core_idx] == NULL)
779 action_encap_data[para.core_idx] = rte_malloc("encap_data",
780 sizeof(struct action_raw_encap_data), 0);
781
782 /* Check if allocation failed. */
783 if (action_encap_data[para.core_idx] == NULL)
784 rte_exit(EXIT_FAILURE, "No Memory available!");
785
786 *action_encap_data[para.core_idx] = (struct action_raw_encap_data) {
787 .conf = (struct rte_flow_action_raw_encap) {
788 .data = action_encap_data[para.core_idx]->data,
789 },
790 .data = {},
791 };
792 header = action_encap_data[para.core_idx]->data;
793
794 for (i = 0; i < RTE_DIM(headers); i++)
795 headers[i].funct(&header, encap_data, para);
796
797 action_encap_data[para.core_idx]->conf.size = header -
798 action_encap_data[para.core_idx]->data;
799
800 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
801 actions[actions_counter].conf = &action_encap_data[para.core_idx]->conf;
802 }
803
804 static void
add_raw_decap(struct rte_flow_action * actions,uint8_t actions_counter,struct additional_para para)805 add_raw_decap(struct rte_flow_action *actions,
806 uint8_t actions_counter,
807 struct additional_para para)
808 {
809 static struct action_raw_decap_data *action_decap_data[RTE_MAX_LCORE] __rte_cache_aligned;
810 uint64_t decap_data = para.decap_data;
811 uint8_t *header;
812 uint8_t i;
813
814 /* Avoid double allocation. */
815 if (action_decap_data[para.core_idx] == NULL)
816 action_decap_data[para.core_idx] = rte_malloc("decap_data",
817 sizeof(struct action_raw_decap_data), 0);
818
819 /* Check if allocation failed. */
820 if (action_decap_data[para.core_idx] == NULL)
821 rte_exit(EXIT_FAILURE, "No Memory available!");
822
823 *action_decap_data[para.core_idx] = (struct action_raw_decap_data) {
824 .conf = (struct rte_flow_action_raw_decap) {
825 .data = action_decap_data[para.core_idx]->data,
826 },
827 .data = {},
828 };
829 header = action_decap_data[para.core_idx]->data;
830
831 for (i = 0; i < RTE_DIM(headers); i++)
832 headers[i].funct(&header, decap_data, para);
833
834 action_decap_data[para.core_idx]->conf.size = header -
835 action_decap_data[para.core_idx]->data;
836
837 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_DECAP;
838 actions[actions_counter].conf = &action_decap_data[para.core_idx]->conf;
839 }
840
841 static void
add_vxlan_encap(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)842 add_vxlan_encap(struct rte_flow_action *actions,
843 uint8_t actions_counter,
844 __rte_unused struct additional_para para)
845 {
846 static struct rte_flow_action_vxlan_encap vxlan_encap[RTE_MAX_LCORE] __rte_cache_aligned;
847 static struct rte_flow_item items[5];
848 static struct rte_flow_item_eth item_eth;
849 static struct rte_flow_item_ipv4 item_ipv4;
850 static struct rte_flow_item_udp item_udp;
851 static struct rte_flow_item_vxlan item_vxlan;
852 uint32_t ip_dst = para.counter;
853
854 /* Fixed value */
855 if (!para.unique_data)
856 ip_dst = 1;
857
858 items[0].spec = &item_eth;
859 items[0].mask = &item_eth;
860 items[0].type = RTE_FLOW_ITEM_TYPE_ETH;
861
862 item_ipv4.hdr.src_addr = RTE_IPV4(127, 0, 0, 1);
863 item_ipv4.hdr.dst_addr = RTE_BE32(ip_dst);
864 item_ipv4.hdr.version_ihl = RTE_IPV4_VHL_DEF;
865 items[1].spec = &item_ipv4;
866 items[1].mask = &item_ipv4;
867 items[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
868
869
870 item_udp.hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
871 items[2].spec = &item_udp;
872 items[2].mask = &item_udp;
873 items[2].type = RTE_FLOW_ITEM_TYPE_UDP;
874
875
876 item_vxlan.vni[2] = 1;
877 items[3].spec = &item_vxlan;
878 items[3].mask = &item_vxlan;
879 items[3].type = RTE_FLOW_ITEM_TYPE_VXLAN;
880
881 items[4].type = RTE_FLOW_ITEM_TYPE_END;
882
883 vxlan_encap[para.core_idx].definition = items;
884
885 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP;
886 actions[actions_counter].conf = &vxlan_encap[para.core_idx];
887 }
888
889 static void
add_vxlan_decap(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)890 add_vxlan_decap(struct rte_flow_action *actions,
891 uint8_t actions_counter,
892 __rte_unused struct additional_para para)
893 {
894 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
895 }
896
897 static void
add_meter(struct rte_flow_action * actions,uint8_t actions_counter,__rte_unused struct additional_para para)898 add_meter(struct rte_flow_action *actions,
899 uint8_t actions_counter,
900 __rte_unused struct additional_para para)
901 {
902 static struct rte_flow_action_meter
903 meters[RTE_MAX_LCORE] __rte_cache_aligned;
904
905 meters[para.core_idx].mtr_id = para.counter;
906 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_METER;
907 actions[actions_counter].conf = &meters[para.core_idx];
908 }
909
910 void
fill_actions(struct rte_flow_action * actions,uint64_t * flow_actions,uint32_t counter,uint16_t next_table,uint16_t hairpinq,uint64_t encap_data,uint64_t decap_data,uint8_t core_idx,bool unique_data,uint8_t rx_queues_count,uint16_t dst_port)911 fill_actions(struct rte_flow_action *actions, uint64_t *flow_actions,
912 uint32_t counter, uint16_t next_table, uint16_t hairpinq,
913 uint64_t encap_data, uint64_t decap_data, uint8_t core_idx,
914 bool unique_data, uint8_t rx_queues_count, uint16_t dst_port)
915 {
916 struct additional_para additional_para_data;
917 uint8_t actions_counter = 0;
918 uint16_t hairpin_queues[hairpinq];
919 uint16_t queues[rx_queues_count];
920 uint16_t i, j;
921
922 for (i = 0; i < rx_queues_count; i++)
923 queues[i] = i;
924
925 for (i = 0; i < hairpinq; i++)
926 hairpin_queues[i] = i + rx_queues_count;
927
928 additional_para_data = (struct additional_para){
929 .queue = counter % rx_queues_count,
930 .next_table = next_table,
931 .queues = queues,
932 .queues_number = rx_queues_count,
933 .counter = counter,
934 .encap_data = encap_data,
935 .decap_data = decap_data,
936 .core_idx = core_idx,
937 .unique_data = unique_data,
938 .dst_port = dst_port,
939 };
940
941 if (hairpinq != 0) {
942 additional_para_data.queues = hairpin_queues;
943 additional_para_data.queues_number = hairpinq;
944 additional_para_data.queue = (counter % hairpinq) + rx_queues_count;
945 }
946
947 static const struct actions_dict {
948 uint64_t mask;
949 void (*funct)(
950 struct rte_flow_action *actions,
951 uint8_t actions_counter,
952 struct additional_para para
953 );
954 } actions_list[] = {
955 {
956 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_MARK),
957 .funct = add_mark,
958 },
959 {
960 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_COUNT),
961 .funct = add_count,
962 },
963 {
964 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_META),
965 .funct = add_set_meta,
966 },
967 {
968 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_TAG),
969 .funct = add_set_tag,
970 },
971 {
972 .mask = FLOW_ACTION_MASK(
973 RTE_FLOW_ACTION_TYPE_FLAG
974 ),
975 .funct = add_flag,
976 },
977 {
978 .mask = FLOW_ACTION_MASK(
979 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC
980 ),
981 .funct = add_set_src_mac,
982 },
983 {
984 .mask = FLOW_ACTION_MASK(
985 RTE_FLOW_ACTION_TYPE_SET_MAC_DST
986 ),
987 .funct = add_set_dst_mac,
988 },
989 {
990 .mask = FLOW_ACTION_MASK(
991 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC
992 ),
993 .funct = add_set_src_ipv4,
994 },
995 {
996 .mask = FLOW_ACTION_MASK(
997 RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
998 ),
999 .funct = add_set_dst_ipv4,
1000 },
1001 {
1002 .mask = FLOW_ACTION_MASK(
1003 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC
1004 ),
1005 .funct = add_set_src_ipv6,
1006 },
1007 {
1008 .mask = FLOW_ACTION_MASK(
1009 RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
1010 ),
1011 .funct = add_set_dst_ipv6,
1012 },
1013 {
1014 .mask = FLOW_ACTION_MASK(
1015 RTE_FLOW_ACTION_TYPE_SET_TP_SRC
1016 ),
1017 .funct = add_set_src_tp,
1018 },
1019 {
1020 .mask = FLOW_ACTION_MASK(
1021 RTE_FLOW_ACTION_TYPE_SET_TP_DST
1022 ),
1023 .funct = add_set_dst_tp,
1024 },
1025 {
1026 .mask = FLOW_ACTION_MASK(
1027 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK
1028 ),
1029 .funct = add_inc_tcp_ack,
1030 },
1031 {
1032 .mask = FLOW_ACTION_MASK(
1033 RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK
1034 ),
1035 .funct = add_dec_tcp_ack,
1036 },
1037 {
1038 .mask = FLOW_ACTION_MASK(
1039 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ
1040 ),
1041 .funct = add_inc_tcp_seq,
1042 },
1043 {
1044 .mask = FLOW_ACTION_MASK(
1045 RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ
1046 ),
1047 .funct = add_dec_tcp_seq,
1048 },
1049 {
1050 .mask = FLOW_ACTION_MASK(
1051 RTE_FLOW_ACTION_TYPE_SET_TTL
1052 ),
1053 .funct = add_set_ttl,
1054 },
1055 {
1056 .mask = FLOW_ACTION_MASK(
1057 RTE_FLOW_ACTION_TYPE_DEC_TTL
1058 ),
1059 .funct = add_dec_ttl,
1060 },
1061 {
1062 .mask = FLOW_ACTION_MASK(
1063 RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP
1064 ),
1065 .funct = add_set_ipv4_dscp,
1066 },
1067 {
1068 .mask = FLOW_ACTION_MASK(
1069 RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP
1070 ),
1071 .funct = add_set_ipv6_dscp,
1072 },
1073 {
1074 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_QUEUE),
1075 .funct = add_queue,
1076 },
1077 {
1078 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_RSS),
1079 .funct = add_rss,
1080 },
1081 {
1082 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_JUMP),
1083 .funct = add_jump,
1084 },
1085 {
1086 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_PORT_ID),
1087 .funct = add_port_id
1088 },
1089 {
1090 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_DROP),
1091 .funct = add_drop,
1092 },
1093 {
1094 .mask = HAIRPIN_QUEUE_ACTION,
1095 .funct = add_queue,
1096 },
1097 {
1098 .mask = HAIRPIN_RSS_ACTION,
1099 .funct = add_rss,
1100 },
1101 {
1102 .mask = FLOW_ACTION_MASK(
1103 RTE_FLOW_ACTION_TYPE_RAW_ENCAP
1104 ),
1105 .funct = add_raw_encap,
1106 },
1107 {
1108 .mask = FLOW_ACTION_MASK(
1109 RTE_FLOW_ACTION_TYPE_RAW_DECAP
1110 ),
1111 .funct = add_raw_decap,
1112 },
1113 {
1114 .mask = FLOW_ACTION_MASK(
1115 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
1116 ),
1117 .funct = add_vxlan_encap,
1118 },
1119 {
1120 .mask = FLOW_ACTION_MASK(
1121 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP
1122 ),
1123 .funct = add_vxlan_decap,
1124 },
1125 {
1126 .mask = FLOW_ACTION_MASK(
1127 RTE_FLOW_ACTION_TYPE_METER
1128 ),
1129 .funct = add_meter,
1130 },
1131 };
1132
1133 for (j = 0; j < MAX_ACTIONS_NUM; j++) {
1134 if (flow_actions[j] == 0)
1135 break;
1136 for (i = 0; i < RTE_DIM(actions_list); i++) {
1137 if ((flow_actions[j] &
1138 actions_list[i].mask) == 0)
1139 continue;
1140 actions_list[i].funct(
1141 actions, actions_counter++,
1142 additional_para_data
1143 );
1144 break;
1145 }
1146 }
1147 actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_END;
1148 }
1149