xref: /f-stack/dpdk/app/test-flow-perf/items_gen.c (revision 2d9fd380)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  *
4  * This file contain the implementations of the items
5  * related methods. Each Item have a method to prepare
6  * the item and add it into items array in given index.
7  */
8 
9 #include <stdint.h>
10 #include <rte_flow.h>
11 
12 #include "items_gen.h"
13 #include "config.h"
14 
15 /* Storage for additional parameters for items */
16 struct additional_para {
17 	rte_be32_t src_ip;
18 };
19 
20 static void
add_ether(struct rte_flow_item * items,uint8_t items_counter,__rte_unused struct additional_para para)21 add_ether(struct rte_flow_item *items,
22 	uint8_t items_counter,
23 	__rte_unused struct additional_para para)
24 {
25 	static struct rte_flow_item_eth eth_spec;
26 	static struct rte_flow_item_eth eth_mask;
27 
28 	memset(&eth_spec, 0, sizeof(struct rte_flow_item_eth));
29 	memset(&eth_mask, 0, sizeof(struct rte_flow_item_eth));
30 
31 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_ETH;
32 	items[items_counter].spec = &eth_spec;
33 	items[items_counter].mask = &eth_mask;
34 }
35 
36 static void
add_vlan(struct rte_flow_item * items,uint8_t items_counter,__rte_unused struct additional_para para)37 add_vlan(struct rte_flow_item *items,
38 	uint8_t items_counter,
39 	__rte_unused struct additional_para para)
40 {
41 	static struct rte_flow_item_vlan vlan_spec;
42 	static struct rte_flow_item_vlan vlan_mask;
43 
44 	uint16_t vlan_value = VLAN_VALUE;
45 
46 	memset(&vlan_spec, 0, sizeof(struct rte_flow_item_vlan));
47 	memset(&vlan_mask, 0, sizeof(struct rte_flow_item_vlan));
48 
49 	vlan_spec.tci = RTE_BE16(vlan_value);
50 	vlan_mask.tci = RTE_BE16(0xffff);
51 
52 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_VLAN;
53 	items[items_counter].spec = &vlan_spec;
54 	items[items_counter].mask = &vlan_mask;
55 }
56 
57 static void
add_ipv4(struct rte_flow_item * items,uint8_t items_counter,struct additional_para para)58 add_ipv4(struct rte_flow_item *items,
59 	uint8_t items_counter, struct additional_para para)
60 {
61 	static struct rte_flow_item_ipv4 ipv4_spec;
62 	static struct rte_flow_item_ipv4 ipv4_mask;
63 
64 	memset(&ipv4_spec, 0, sizeof(struct rte_flow_item_ipv4));
65 	memset(&ipv4_mask, 0, sizeof(struct rte_flow_item_ipv4));
66 
67 	ipv4_spec.hdr.src_addr = RTE_BE32(para.src_ip);
68 	ipv4_mask.hdr.src_addr = RTE_BE32(0xffffffff);
69 
70 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV4;
71 	items[items_counter].spec = &ipv4_spec;
72 	items[items_counter].mask = &ipv4_mask;
73 }
74 
75 
76 static void
add_ipv6(struct rte_flow_item * items,uint8_t items_counter,struct additional_para para)77 add_ipv6(struct rte_flow_item *items,
78 	uint8_t items_counter, struct additional_para para)
79 {
80 	static struct rte_flow_item_ipv6 ipv6_spec;
81 	static struct rte_flow_item_ipv6 ipv6_mask;
82 
83 	memset(&ipv6_spec, 0, sizeof(struct rte_flow_item_ipv6));
84 	memset(&ipv6_mask, 0, sizeof(struct rte_flow_item_ipv6));
85 
86 	/** Set ipv6 src **/
87 	memset(&ipv6_spec.hdr.src_addr, para.src_ip,
88 		sizeof(ipv6_spec.hdr.src_addr) / 2);
89 
90 	/** Full mask **/
91 	memset(&ipv6_mask.hdr.src_addr, 0xff,
92 		sizeof(ipv6_spec.hdr.src_addr));
93 
94 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_IPV6;
95 	items[items_counter].spec = &ipv6_spec;
96 	items[items_counter].mask = &ipv6_mask;
97 }
98 
99 static void
add_tcp(struct rte_flow_item * items,uint8_t items_counter,__rte_unused struct additional_para para)100 add_tcp(struct rte_flow_item *items,
101 	uint8_t items_counter,
102 	__rte_unused struct additional_para para)
103 {
104 	static struct rte_flow_item_tcp tcp_spec;
105 	static struct rte_flow_item_tcp tcp_mask;
106 
107 	memset(&tcp_spec, 0, sizeof(struct rte_flow_item_tcp));
108 	memset(&tcp_mask, 0, sizeof(struct rte_flow_item_tcp));
109 
110 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_TCP;
111 	items[items_counter].spec = &tcp_spec;
112 	items[items_counter].mask = &tcp_mask;
113 }
114 
115 static void
add_udp(struct rte_flow_item * items,uint8_t items_counter,__rte_unused struct additional_para para)116 add_udp(struct rte_flow_item *items,
117 	uint8_t items_counter,
118 	__rte_unused struct additional_para para)
119 {
120 	static struct rte_flow_item_udp udp_spec;
121 	static struct rte_flow_item_udp udp_mask;
122 
123 	memset(&udp_spec, 0, sizeof(struct rte_flow_item_udp));
124 	memset(&udp_mask, 0, sizeof(struct rte_flow_item_udp));
125 
126 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_UDP;
127 	items[items_counter].spec = &udp_spec;
128 	items[items_counter].mask = &udp_mask;
129 }
130 
131 static void
add_vxlan(struct rte_flow_item * items,uint8_t items_counter,__rte_unused struct additional_para para)132 add_vxlan(struct rte_flow_item *items,
133 	uint8_t items_counter,
134 	__rte_unused struct additional_para para)
135 {
136 	static struct rte_flow_item_vxlan vxlan_spec;
137 	static struct rte_flow_item_vxlan vxlan_mask;
138 
139 	uint32_t vni_value;
140 	uint8_t i;
141 
142 	vni_value = VNI_VALUE;
143 
144 	memset(&vxlan_spec, 0, sizeof(struct rte_flow_item_vxlan));
145 	memset(&vxlan_mask, 0, sizeof(struct rte_flow_item_vxlan));
146 
147 	/* Set standard vxlan vni */
148 	for (i = 0; i < 3; i++) {
149 		vxlan_spec.vni[2 - i] = vni_value >> (i * 8);
150 		vxlan_mask.vni[2 - i] = 0xff;
151 	}
152 
153 	/* Standard vxlan flags */
154 	vxlan_spec.flags = 0x8;
155 
156 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_VXLAN;
157 	items[items_counter].spec = &vxlan_spec;
158 	items[items_counter].mask = &vxlan_mask;
159 }
160 
161 static void
add_vxlan_gpe(struct rte_flow_item * items,uint8_t items_counter,__rte_unused struct additional_para para)162 add_vxlan_gpe(struct rte_flow_item *items,
163 	uint8_t items_counter,
164 	__rte_unused struct additional_para para)
165 {
166 	static struct rte_flow_item_vxlan_gpe vxlan_gpe_spec;
167 	static struct rte_flow_item_vxlan_gpe vxlan_gpe_mask;
168 
169 	uint32_t vni_value;
170 	uint8_t i;
171 
172 	vni_value = VNI_VALUE;
173 
174 	memset(&vxlan_gpe_spec, 0, sizeof(struct rte_flow_item_vxlan_gpe));
175 	memset(&vxlan_gpe_mask, 0, sizeof(struct rte_flow_item_vxlan_gpe));
176 
177 	/* Set vxlan-gpe vni */
178 	for (i = 0; i < 3; i++) {
179 		vxlan_gpe_spec.vni[2 - i] = vni_value >> (i * 8);
180 		vxlan_gpe_mask.vni[2 - i] = 0xff;
181 	}
182 
183 	/* vxlan-gpe flags */
184 	vxlan_gpe_spec.flags = 0x0c;
185 
186 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE;
187 	items[items_counter].spec = &vxlan_gpe_spec;
188 	items[items_counter].mask = &vxlan_gpe_mask;
189 }
190 
191 static void
add_gre(struct rte_flow_item * items,uint8_t items_counter,__rte_unused struct additional_para para)192 add_gre(struct rte_flow_item *items,
193 	uint8_t items_counter,
194 	__rte_unused struct additional_para para)
195 {
196 	static struct rte_flow_item_gre gre_spec;
197 	static struct rte_flow_item_gre gre_mask;
198 
199 	uint16_t proto;
200 
201 	proto = RTE_ETHER_TYPE_TEB;
202 
203 	memset(&gre_spec, 0, sizeof(struct rte_flow_item_gre));
204 	memset(&gre_mask, 0, sizeof(struct rte_flow_item_gre));
205 
206 	gre_spec.protocol = RTE_BE16(proto);
207 	gre_mask.protocol = RTE_BE16(0xffff);
208 
209 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_GRE;
210 	items[items_counter].spec = &gre_spec;
211 	items[items_counter].mask = &gre_mask;
212 }
213 
214 static void
add_geneve(struct rte_flow_item * items,uint8_t items_counter,__rte_unused struct additional_para para)215 add_geneve(struct rte_flow_item *items,
216 	uint8_t items_counter,
217 	__rte_unused struct additional_para para)
218 {
219 	static struct rte_flow_item_geneve geneve_spec;
220 	static struct rte_flow_item_geneve geneve_mask;
221 
222 	uint32_t vni_value;
223 	uint8_t i;
224 
225 	vni_value = VNI_VALUE;
226 
227 	memset(&geneve_spec, 0, sizeof(struct rte_flow_item_geneve));
228 	memset(&geneve_mask, 0, sizeof(struct rte_flow_item_geneve));
229 
230 	for (i = 0; i < 3; i++) {
231 		geneve_spec.vni[2 - i] = vni_value >> (i * 8);
232 		geneve_mask.vni[2 - i] = 0xff;
233 	}
234 
235 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_GENEVE;
236 	items[items_counter].spec = &geneve_spec;
237 	items[items_counter].mask = &geneve_mask;
238 }
239 
240 static void
add_gtp(struct rte_flow_item * items,uint8_t items_counter,__rte_unused struct additional_para para)241 add_gtp(struct rte_flow_item *items,
242 	uint8_t items_counter,
243 	__rte_unused struct additional_para para)
244 {
245 	static struct rte_flow_item_gtp gtp_spec;
246 	static struct rte_flow_item_gtp gtp_mask;
247 
248 	uint32_t teid_value;
249 
250 	teid_value = TEID_VALUE;
251 
252 	memset(&gtp_spec, 0, sizeof(struct rte_flow_item_gtp));
253 	memset(&gtp_mask, 0, sizeof(struct rte_flow_item_gtp));
254 
255 	gtp_spec.teid = RTE_BE32(teid_value);
256 	gtp_mask.teid = RTE_BE32(0xffffffff);
257 
258 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_GTP;
259 	items[items_counter].spec = &gtp_spec;
260 	items[items_counter].mask = &gtp_mask;
261 }
262 
263 static void
add_meta_data(struct rte_flow_item * items,uint8_t items_counter,__rte_unused struct additional_para para)264 add_meta_data(struct rte_flow_item *items,
265 	uint8_t items_counter,
266 	__rte_unused struct additional_para para)
267 {
268 	static struct rte_flow_item_meta meta_spec;
269 	static struct rte_flow_item_meta meta_mask;
270 
271 	uint32_t data;
272 
273 	data = META_DATA;
274 
275 	memset(&meta_spec, 0, sizeof(struct rte_flow_item_meta));
276 	memset(&meta_mask, 0, sizeof(struct rte_flow_item_meta));
277 
278 	meta_spec.data = RTE_BE32(data);
279 	meta_mask.data = RTE_BE32(0xffffffff);
280 
281 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_META;
282 	items[items_counter].spec = &meta_spec;
283 	items[items_counter].mask = &meta_mask;
284 }
285 
286 
287 static void
add_meta_tag(struct rte_flow_item * items,uint8_t items_counter,__rte_unused struct additional_para para)288 add_meta_tag(struct rte_flow_item *items,
289 	uint8_t items_counter,
290 	__rte_unused struct additional_para para)
291 {
292 	static struct rte_flow_item_tag tag_spec;
293 	static struct rte_flow_item_tag tag_mask;
294 	uint32_t data;
295 	uint8_t index;
296 
297 	data = META_DATA;
298 	index = TAG_INDEX;
299 
300 	memset(&tag_spec, 0, sizeof(struct rte_flow_item_tag));
301 	memset(&tag_mask, 0, sizeof(struct rte_flow_item_tag));
302 
303 	tag_spec.data = RTE_BE32(data);
304 	tag_mask.data = RTE_BE32(0xffffffff);
305 	tag_spec.index = index;
306 	tag_mask.index = 0xff;
307 
308 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_TAG;
309 	items[items_counter].spec = &tag_spec;
310 	items[items_counter].mask = &tag_mask;
311 }
312 
313 static void
add_icmpv4(struct rte_flow_item * items,uint8_t items_counter,__rte_unused struct additional_para para)314 add_icmpv4(struct rte_flow_item *items,
315 	uint8_t items_counter,
316 	__rte_unused struct additional_para para)
317 {
318 	static struct rte_flow_item_icmp icmpv4_spec;
319 	static struct rte_flow_item_icmp icmpv4_mask;
320 
321 	memset(&icmpv4_spec, 0, sizeof(struct rte_flow_item_icmp));
322 	memset(&icmpv4_mask, 0, sizeof(struct rte_flow_item_icmp));
323 
324 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_ICMP;
325 	items[items_counter].spec = &icmpv4_spec;
326 	items[items_counter].mask = &icmpv4_mask;
327 }
328 
329 static void
add_icmpv6(struct rte_flow_item * items,uint8_t items_counter,__rte_unused struct additional_para para)330 add_icmpv6(struct rte_flow_item *items,
331 	uint8_t items_counter,
332 	__rte_unused struct additional_para para)
333 {
334 	static struct rte_flow_item_icmp6 icmpv6_spec;
335 	static struct rte_flow_item_icmp6 icmpv6_mask;
336 
337 	memset(&icmpv6_spec, 0, sizeof(struct rte_flow_item_icmp6));
338 	memset(&icmpv6_mask, 0, sizeof(struct rte_flow_item_icmp6));
339 
340 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_ICMP6;
341 	items[items_counter].spec = &icmpv6_spec;
342 	items[items_counter].mask = &icmpv6_mask;
343 }
344 
345 void
fill_items(struct rte_flow_item * items,uint64_t * flow_items,uint32_t outer_ip_src)346 fill_items(struct rte_flow_item *items,
347 	uint64_t *flow_items, uint32_t outer_ip_src)
348 {
349 	uint8_t items_counter = 0;
350 	uint8_t i, j;
351 	struct additional_para additional_para_data = {
352 		.src_ip = outer_ip_src,
353 	};
354 
355 	/* Support outer items up to tunnel layer only. */
356 	static const struct items_dict {
357 		uint64_t mask;
358 		void (*funct)(
359 			struct rte_flow_item *items,
360 			uint8_t items_counter,
361 			struct additional_para para
362 			);
363 	} items_list[] = {
364 		{
365 			.mask = RTE_FLOW_ITEM_TYPE_META,
366 			.funct = add_meta_data,
367 		},
368 		{
369 			.mask = RTE_FLOW_ITEM_TYPE_TAG,
370 			.funct = add_meta_tag,
371 		},
372 		{
373 			.mask = RTE_FLOW_ITEM_TYPE_ETH,
374 			.funct = add_ether,
375 		},
376 		{
377 			.mask = RTE_FLOW_ITEM_TYPE_VLAN,
378 			.funct = add_vlan,
379 		},
380 		{
381 			.mask = RTE_FLOW_ITEM_TYPE_IPV4,
382 			.funct = add_ipv4,
383 		},
384 		{
385 			.mask = RTE_FLOW_ITEM_TYPE_IPV6,
386 			.funct = add_ipv6,
387 		},
388 		{
389 			.mask = RTE_FLOW_ITEM_TYPE_TCP,
390 			.funct = add_tcp,
391 		},
392 		{
393 			.mask = RTE_FLOW_ITEM_TYPE_UDP,
394 			.funct = add_udp,
395 		},
396 		{
397 			.mask = RTE_FLOW_ITEM_TYPE_VXLAN,
398 			.funct = add_vxlan,
399 		},
400 		{
401 			.mask = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
402 			.funct = add_vxlan_gpe,
403 		},
404 		{
405 			.mask = RTE_FLOW_ITEM_TYPE_GRE,
406 			.funct = add_gre,
407 		},
408 		{
409 			.mask = RTE_FLOW_ITEM_TYPE_GENEVE,
410 			.funct = add_geneve,
411 		},
412 		{
413 			.mask = RTE_FLOW_ITEM_TYPE_GTP,
414 			.funct = add_gtp,
415 		},
416 		{
417 			.mask = RTE_FLOW_ITEM_TYPE_ICMP,
418 			.funct = add_icmpv4,
419 		},
420 		{
421 			.mask = RTE_FLOW_ITEM_TYPE_ICMP6,
422 			.funct = add_icmpv6,
423 		},
424 	};
425 
426 	for (j = 0; j < MAX_ITEMS_NUM; j++) {
427 		if (flow_items[j] == 0)
428 			break;
429 		for (i = 0; i < RTE_DIM(items_list); i++) {
430 			if ((flow_items[j] &
431 				FLOW_ITEM_MASK(items_list[i].mask)) == 0)
432 				continue;
433 			items_list[i].funct(
434 				items, items_counter++,
435 				additional_para_data
436 			);
437 			break;
438 		}
439 	}
440 
441 	items[items_counter].type = RTE_FLOW_ITEM_TYPE_END;
442 }
443