xref: /dpdk/app/test/test_thash.c (revision 7be78d02)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2019 Vladimir Medvedkin <[email protected]>
3  */
4 
5 #include <rte_common.h>
6 #include <rte_eal.h>
7 #include <rte_ip.h>
8 #include <rte_random.h>
9 #include <rte_malloc.h>
10 
11 #include "test.h"
12 
13 #include <rte_thash.h>
14 
15 #define HASH_MSK(reta_sz)	((1 << reta_sz) - 1)
16 #define TUPLE_SZ	(RTE_THASH_V4_L4_LEN * 4)
17 
18 struct test_thash_v4 {
19 	uint32_t	dst_ip;
20 	uint32_t	src_ip;
21 	uint16_t	dst_port;
22 	uint16_t	src_port;
23 	uint32_t	hash_l3;
24 	uint32_t	hash_l3l4;
25 };
26 
27 struct test_thash_v6 {
28 	uint8_t		dst_ip[16];
29 	uint8_t		src_ip[16];
30 	uint16_t	dst_port;
31 	uint16_t	src_port;
32 	uint32_t	hash_l3;
33 	uint32_t	hash_l3l4;
34 };
35 
36 /*From 82599 Datasheet 7.1.2.8.3 RSS Verification Suite*/
37 struct test_thash_v4 v4_tbl[] = {
38 {RTE_IPV4(161, 142, 100, 80), RTE_IPV4(66, 9, 149, 187),
39 	1766, 2794, 0x323e8fc2, 0x51ccc178},
40 {RTE_IPV4(65, 69, 140, 83), RTE_IPV4(199, 92, 111, 2),
41 	4739, 14230, 0xd718262a, 0xc626b0ea},
42 {RTE_IPV4(12, 22, 207, 184), RTE_IPV4(24, 19, 198, 95),
43 	38024, 12898, 0xd2d0a5de, 0x5c2b394a},
44 {RTE_IPV4(209, 142, 163, 6), RTE_IPV4(38, 27, 205, 30),
45 	2217, 48228, 0x82989176, 0xafc7327f},
46 {RTE_IPV4(202, 188, 127, 2), RTE_IPV4(153, 39, 163, 191),
47 	1303, 44251, 0x5d1809c5, 0x10e828a2},
48 };
49 
50 struct test_thash_v6 v6_tbl[] = {
51 /*3ffe:2501:200:3::1*/
52 {{0x3f, 0xfe, 0x25, 0x01, 0x02, 0x00, 0x00, 0x03,
53 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,},
54 /*3ffe:2501:200:1fff::7*/
55 {0x3f, 0xfe, 0x25, 0x01, 0x02, 0x00, 0x1f, 0xff,
56 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07,},
57 1766, 2794, 0x2cc18cd5, 0x40207d3d},
58 /*ff02::1*/
59 {{0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
60 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,},
61 /*3ffe:501:8::260:97ff:fe40:efab*/
62 {0x3f, 0xfe, 0x05, 0x01, 0x00, 0x08, 0x00, 0x00,
63 0x02, 0x60, 0x97, 0xff, 0xfe, 0x40, 0xef, 0xab,},
64 4739, 14230, 0x0f0c461c, 0xdde51bbf},
65 /*fe80::200:f8ff:fe21:67cf*/
66 {{0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
67 0x02, 0x00, 0xf8, 0xff, 0xfe, 0x21, 0x67, 0xcf,},
68 /*3ffe:1900:4545:3:200:f8ff:fe21:67cf*/
69 {0x3f, 0xfe, 0x19, 0x00, 0x45, 0x45, 0x00, 0x03,
70 0x02, 0x00, 0xf8, 0xff, 0xfe, 0x21, 0x67, 0xcf,},
71 38024, 44251, 0x4b61e985, 0x02d1feef},
72 };
73 
74 uint8_t default_rss_key[] = {
75 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
76 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
77 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
78 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
79 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
80 };
81 
82 static const uint8_t big_rss_key[] = {
83 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
84 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
85 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
86 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
87 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
88 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
89 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
90 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
91 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
92 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
93 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
94 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
95 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
96 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
97 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
98 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
99 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
100 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
101 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
102 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
103 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
104 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
105 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
106 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
107 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
108 };
109 
110 static int
test_toeplitz_hash_calc(void)111 test_toeplitz_hash_calc(void)
112 {
113 	uint32_t i, j;
114 	union rte_thash_tuple tuple;
115 	uint32_t rss_l3, rss_l3l4;
116 	uint8_t rss_key_be[RTE_DIM(default_rss_key)];
117 	struct rte_ipv6_hdr ipv6_hdr;
118 
119 	/* Convert RSS key*/
120 	rte_convert_rss_key((uint32_t *)&default_rss_key,
121 		(uint32_t *)rss_key_be, RTE_DIM(default_rss_key));
122 
123 
124 	for (i = 0; i < RTE_DIM(v4_tbl); i++) {
125 		tuple.v4.src_addr = v4_tbl[i].src_ip;
126 		tuple.v4.dst_addr = v4_tbl[i].dst_ip;
127 		tuple.v4.sport = v4_tbl[i].src_port;
128 		tuple.v4.dport = v4_tbl[i].dst_port;
129 		/*Calculate hash with original key*/
130 		rss_l3 = rte_softrss((uint32_t *)&tuple,
131 				RTE_THASH_V4_L3_LEN, default_rss_key);
132 		rss_l3l4 = rte_softrss((uint32_t *)&tuple,
133 				RTE_THASH_V4_L4_LEN, default_rss_key);
134 		if ((rss_l3 != v4_tbl[i].hash_l3) ||
135 				(rss_l3l4 != v4_tbl[i].hash_l3l4))
136 			return -TEST_FAILED;
137 		/*Calculate hash with converted key*/
138 		rss_l3 = rte_softrss_be((uint32_t *)&tuple,
139 				RTE_THASH_V4_L3_LEN, rss_key_be);
140 		rss_l3l4 = rte_softrss_be((uint32_t *)&tuple,
141 				RTE_THASH_V4_L4_LEN, rss_key_be);
142 		if ((rss_l3 != v4_tbl[i].hash_l3) ||
143 				(rss_l3l4 != v4_tbl[i].hash_l3l4))
144 			return -TEST_FAILED;
145 	}
146 	for (i = 0; i < RTE_DIM(v6_tbl); i++) {
147 		/*Fill ipv6 hdr*/
148 		for (j = 0; j < RTE_DIM(ipv6_hdr.src_addr); j++)
149 			ipv6_hdr.src_addr[j] = v6_tbl[i].src_ip[j];
150 		for (j = 0; j < RTE_DIM(ipv6_hdr.dst_addr); j++)
151 			ipv6_hdr.dst_addr[j] = v6_tbl[i].dst_ip[j];
152 		/*Load and convert ipv6 address into tuple*/
153 		rte_thash_load_v6_addrs(&ipv6_hdr, &tuple);
154 		tuple.v6.sport = v6_tbl[i].src_port;
155 		tuple.v6.dport = v6_tbl[i].dst_port;
156 		/*Calculate hash with original key*/
157 		rss_l3 = rte_softrss((uint32_t *)&tuple,
158 				RTE_THASH_V6_L3_LEN, default_rss_key);
159 		rss_l3l4 = rte_softrss((uint32_t *)&tuple,
160 				RTE_THASH_V6_L4_LEN, default_rss_key);
161 		if ((rss_l3 != v6_tbl[i].hash_l3) ||
162 				(rss_l3l4 != v6_tbl[i].hash_l3l4))
163 			return -TEST_FAILED;
164 		/*Calculate hash with converted key*/
165 		rss_l3 = rte_softrss_be((uint32_t *)&tuple,
166 				RTE_THASH_V6_L3_LEN, rss_key_be);
167 		rss_l3l4 = rte_softrss_be((uint32_t *)&tuple,
168 				RTE_THASH_V6_L4_LEN, rss_key_be);
169 		if ((rss_l3 != v6_tbl[i].hash_l3) ||
170 				(rss_l3l4 != v6_tbl[i].hash_l3l4))
171 			return -TEST_FAILED;
172 	}
173 	return TEST_SUCCESS;
174 }
175 
176 static int
test_toeplitz_hash_gfni(void)177 test_toeplitz_hash_gfni(void)
178 {
179 	uint32_t i, j;
180 	union rte_thash_tuple tuple;
181 	uint32_t rss_l3, rss_l3l4;
182 	uint64_t rss_key_matrixes[RTE_DIM(default_rss_key)];
183 
184 	if (!rte_thash_gfni_supported())
185 		return TEST_SKIPPED;
186 
187 	/* Convert RSS key into matrixes */
188 	rte_thash_complete_matrix(rss_key_matrixes, default_rss_key,
189 		RTE_DIM(default_rss_key));
190 
191 	for (i = 0; i < RTE_DIM(v4_tbl); i++) {
192 		tuple.v4.src_addr = rte_cpu_to_be_32(v4_tbl[i].src_ip);
193 		tuple.v4.dst_addr = rte_cpu_to_be_32(v4_tbl[i].dst_ip);
194 		tuple.v4.sport = rte_cpu_to_be_16(v4_tbl[i].dst_port);
195 		tuple.v4.dport = rte_cpu_to_be_16(v4_tbl[i].src_port);
196 
197 		rss_l3 = rte_thash_gfni(rss_key_matrixes, (uint8_t *)&tuple,
198 				RTE_THASH_V4_L3_LEN * 4);
199 		rss_l3l4 = rte_thash_gfni(rss_key_matrixes, (uint8_t *)&tuple,
200 				RTE_THASH_V4_L4_LEN * 4);
201 		if ((rss_l3 != v4_tbl[i].hash_l3) ||
202 				(rss_l3l4 != v4_tbl[i].hash_l3l4))
203 			return -TEST_FAILED;
204 	}
205 
206 	for (i = 0; i < RTE_DIM(v6_tbl); i++) {
207 		for (j = 0; j < RTE_DIM(tuple.v6.src_addr); j++)
208 			tuple.v6.src_addr[j] = v6_tbl[i].src_ip[j];
209 		for (j = 0; j < RTE_DIM(tuple.v6.dst_addr); j++)
210 			tuple.v6.dst_addr[j] = v6_tbl[i].dst_ip[j];
211 		tuple.v6.sport = rte_cpu_to_be_16(v6_tbl[i].dst_port);
212 		tuple.v6.dport = rte_cpu_to_be_16(v6_tbl[i].src_port);
213 		rss_l3 = rte_thash_gfni(rss_key_matrixes, (uint8_t *)&tuple,
214 				RTE_THASH_V6_L3_LEN * 4);
215 		rss_l3l4 = rte_thash_gfni(rss_key_matrixes, (uint8_t *)&tuple,
216 				RTE_THASH_V6_L4_LEN * 4);
217 		if ((rss_l3 != v6_tbl[i].hash_l3) ||
218 				(rss_l3l4 != v6_tbl[i].hash_l3l4))
219 			return -TEST_FAILED;
220 	}
221 
222 	return TEST_SUCCESS;
223 }
224 
225 #define DATA_SZ		4
226 #define ITER		1000
227 
228 enum {
229 	SCALAR_DATA_BUF_1_HASH_IDX = 0,
230 	SCALAR_DATA_BUF_2_HASH_IDX,
231 	GFNI_DATA_BUF_1_HASH_IDX,
232 	GFNI_DATA_BUF_2_HASH_IDX,
233 	GFNI_BULK_DATA_BUF_1_HASH_IDX,
234 	GFNI_BULK_DATA_BUF_2_HASH_IDX,
235 	HASH_IDXES
236 };
237 
238 static int
test_toeplitz_hash_rand_data(void)239 test_toeplitz_hash_rand_data(void)
240 {
241 	uint32_t data[2][DATA_SZ];
242 	uint32_t scalar_data[2][DATA_SZ];
243 	uint32_t hash[HASH_IDXES] = { 0 };
244 	uint64_t rss_key_matrixes[RTE_DIM(default_rss_key)];
245 	int i, j;
246 	uint8_t *bulk_data[2];
247 
248 	if (!rte_thash_gfni_supported())
249 		return TEST_SKIPPED;
250 
251 	rte_thash_complete_matrix(rss_key_matrixes, default_rss_key,
252 		RTE_DIM(default_rss_key));
253 
254 	for (i = 0; i < 2; i++)
255 		bulk_data[i] = (uint8_t *)data[i];
256 
257 	for (i = 0; i < ITER; i++) {
258 		for (j = 0; j < DATA_SZ; j++) {
259 			data[0][j] = rte_rand();
260 			data[1][j] = rte_rand();
261 			scalar_data[0][j] = rte_cpu_to_be_32(data[0][j]);
262 			scalar_data[1][j] = rte_cpu_to_be_32(data[1][j]);
263 		}
264 
265 		hash[SCALAR_DATA_BUF_1_HASH_IDX] = rte_softrss(scalar_data[0],
266 			DATA_SZ, default_rss_key);
267 		hash[SCALAR_DATA_BUF_2_HASH_IDX] = rte_softrss(scalar_data[1],
268 			DATA_SZ, default_rss_key);
269 		hash[GFNI_DATA_BUF_1_HASH_IDX] = rte_thash_gfni(
270 			rss_key_matrixes, (uint8_t *)data[0],
271 			DATA_SZ * sizeof(uint32_t));
272 		hash[GFNI_DATA_BUF_2_HASH_IDX] = rte_thash_gfni(
273 			rss_key_matrixes, (uint8_t *)data[1],
274 			DATA_SZ * sizeof(uint32_t));
275 		rte_thash_gfni_bulk(rss_key_matrixes,
276 			DATA_SZ * sizeof(uint32_t), bulk_data,
277 			&hash[GFNI_BULK_DATA_BUF_1_HASH_IDX], 2);
278 
279 		if ((hash[SCALAR_DATA_BUF_1_HASH_IDX] !=
280 				hash[GFNI_DATA_BUF_1_HASH_IDX]) ||
281 				(hash[SCALAR_DATA_BUF_1_HASH_IDX] !=
282 				hash[GFNI_BULK_DATA_BUF_1_HASH_IDX]) ||
283 				(hash[SCALAR_DATA_BUF_2_HASH_IDX] !=
284 				hash[GFNI_DATA_BUF_2_HASH_IDX]) ||
285 				(hash[SCALAR_DATA_BUF_2_HASH_IDX] !=
286 				hash[GFNI_BULK_DATA_BUF_2_HASH_IDX]))
287 
288 			return -TEST_FAILED;
289 	}
290 
291 	return TEST_SUCCESS;
292 }
293 
294 enum {
295 	RSS_V4_IDX,
296 	RSS_V6_IDX
297 };
298 
299 static int
test_toeplitz_hash_gfni_bulk(void)300 test_toeplitz_hash_gfni_bulk(void)
301 {
302 	uint32_t i, j;
303 	union rte_thash_tuple tuple[2];
304 	uint8_t *tuples[2];
305 	uint32_t rss[2] = { 0 };
306 	uint64_t rss_key_matrixes[RTE_DIM(default_rss_key)];
307 
308 	if (!rte_thash_gfni_supported())
309 		return TEST_SKIPPED;
310 
311 	/* Convert RSS key into matrixes */
312 	rte_thash_complete_matrix(rss_key_matrixes, default_rss_key,
313 		RTE_DIM(default_rss_key));
314 
315 	for (i = 0; i < RTE_DIM(tuples); i++) {
316 		/* allocate memory enough for a biggest tuple */
317 		tuples[i] = rte_zmalloc(NULL, RTE_THASH_V6_L4_LEN * 4, 0);
318 		if (tuples[i] == NULL)
319 			return -TEST_FAILED;
320 	}
321 
322 	for (i = 0; i < RTE_MIN(RTE_DIM(v4_tbl), RTE_DIM(v6_tbl)); i++) {
323 		/*Load IPv4 headers and copy it into the corresponding tuple*/
324 		tuple[0].v4.src_addr = rte_cpu_to_be_32(v4_tbl[i].src_ip);
325 		tuple[0].v4.dst_addr = rte_cpu_to_be_32(v4_tbl[i].dst_ip);
326 		tuple[0].v4.sport = rte_cpu_to_be_16(v4_tbl[i].dst_port);
327 		tuple[0].v4.dport = rte_cpu_to_be_16(v4_tbl[i].src_port);
328 		rte_memcpy(tuples[0], &tuple[0], RTE_THASH_V4_L4_LEN * 4);
329 
330 		/*Load IPv6 headers and copy it into the corresponding tuple*/
331 		for (j = 0; j < RTE_DIM(tuple[1].v6.src_addr); j++)
332 			tuple[1].v6.src_addr[j] = v6_tbl[i].src_ip[j];
333 		for (j = 0; j < RTE_DIM(tuple[1].v6.dst_addr); j++)
334 			tuple[1].v6.dst_addr[j] = v6_tbl[i].dst_ip[j];
335 		tuple[1].v6.sport = rte_cpu_to_be_16(v6_tbl[i].dst_port);
336 		tuple[1].v6.dport = rte_cpu_to_be_16(v6_tbl[i].src_port);
337 		rte_memcpy(tuples[1], &tuple[1], RTE_THASH_V6_L4_LEN * 4);
338 
339 		rte_thash_gfni_bulk(rss_key_matrixes, RTE_THASH_V6_L4_LEN * 4,
340 			tuples, rss, 2);
341 
342 		if ((rss[RSS_V4_IDX] != v4_tbl[i].hash_l3l4) ||
343 				(rss[RSS_V6_IDX] != v6_tbl[i].hash_l3l4))
344 			return -TEST_FAILED;
345 	}
346 
347 	return TEST_SUCCESS;
348 }
349 
350 static int
test_big_tuple_gfni(void)351 test_big_tuple_gfni(void)
352 {
353 	uint32_t arr[16];
354 	uint32_t arr_softrss[16];
355 	uint32_t hash_1, hash_2;
356 	uint64_t rss_key_matrixes[RTE_DIM(big_rss_key)];
357 	unsigned int i, size = RTE_DIM(arr) * sizeof(uint32_t);
358 
359 	if (!rte_thash_gfni_supported())
360 		return TEST_SKIPPED;
361 
362 	/* Convert RSS key into matrixes */
363 	rte_thash_complete_matrix(rss_key_matrixes, big_rss_key,
364 		RTE_DIM(big_rss_key));
365 
366 	for (i = 0; i < RTE_DIM(arr); i++) {
367 		arr[i] = rte_rand();
368 		arr_softrss[i] = rte_be_to_cpu_32(arr[i]);
369 	}
370 
371 	hash_1 = rte_softrss(arr_softrss, RTE_DIM(arr), big_rss_key);
372 	hash_2 = rte_thash_gfni(rss_key_matrixes, (uint8_t *)arr, size);
373 
374 	if (hash_1 != hash_2)
375 		return -TEST_FAILED;
376 
377 	return TEST_SUCCESS;
378 }
379 
380 static int
test_create_invalid(void)381 test_create_invalid(void)
382 {
383 	struct rte_thash_ctx *ctx;
384 	int key_len = 40;
385 	int reta_sz = 7;
386 
387 	ctx = rte_thash_init_ctx(NULL, key_len, reta_sz, NULL, 0);
388 	RTE_TEST_ASSERT(ctx == NULL,
389 		"Call succeeded with invalid parameters\n");
390 
391 	ctx = rte_thash_init_ctx("test", 0, reta_sz, NULL, 0);
392 	RTE_TEST_ASSERT(ctx == NULL,
393 		"Call succeeded with invalid parameters\n");
394 
395 	ctx = rte_thash_init_ctx(NULL, key_len, 1, NULL, 0);
396 	RTE_TEST_ASSERT(ctx == NULL,
397 		"Call succeeded with invalid parameters\n");
398 
399 	ctx = rte_thash_init_ctx(NULL, key_len, 17, NULL, 0);
400 	RTE_TEST_ASSERT(ctx == NULL,
401 		"Call succeeded with invalid parameters\n");
402 
403 	return TEST_SUCCESS;
404 }
405 
406 static int
test_multiple_create(void)407 test_multiple_create(void)
408 {
409 	struct rte_thash_ctx *ctx;
410 	int key_len = 40;
411 	int reta_sz = 7;
412 	int i;
413 
414 	for (i = 0; i < 100; i++) {
415 		ctx = rte_thash_init_ctx("test", key_len, reta_sz, NULL, 0);
416 		RTE_TEST_ASSERT(ctx != NULL, "Can not create CTX\n");
417 
418 		rte_thash_free_ctx(ctx);
419 	}
420 
421 	return TEST_SUCCESS;
422 }
423 
424 static int
test_free_null(void)425 test_free_null(void)
426 {
427 	struct rte_thash_ctx *ctx;
428 
429 	ctx = rte_thash_init_ctx("test", 40, 7, NULL, 0);
430 	RTE_TEST_ASSERT(ctx != NULL, "Can not create CTX\n");
431 
432 	rte_thash_free_ctx(ctx);
433 	rte_thash_free_ctx(NULL);
434 
435 	return TEST_SUCCESS;
436 }
437 
438 static int
test_add_invalid_helper(void)439 test_add_invalid_helper(void)
440 {
441 	struct rte_thash_ctx *ctx;
442 	const int key_len = 40;
443 	int reta_sz = 7;
444 	int ret;
445 
446 	ctx = rte_thash_init_ctx("test", key_len, reta_sz, NULL, 0);
447 	RTE_TEST_ASSERT(ctx != NULL, "can not create thash ctx\n");
448 
449 	ret = rte_thash_add_helper(NULL, "test", reta_sz, 0);
450 	RTE_TEST_ASSERT(ret == -EINVAL,
451 		"Call succeeded with invalid parameters\n");
452 
453 	ret = rte_thash_add_helper(ctx, NULL, reta_sz, 0);
454 	RTE_TEST_ASSERT(ret == -EINVAL,
455 		"Call succeeded with invalid parameters\n");
456 
457 	ret = rte_thash_add_helper(ctx, "test", reta_sz - 1, 0);
458 	RTE_TEST_ASSERT(ret == -EINVAL,
459 		"Call succeeded with invalid parameters\n");
460 
461 	ret = rte_thash_add_helper(ctx, "test", reta_sz, key_len * 8);
462 	RTE_TEST_ASSERT(ret == -EINVAL,
463 		"Call succeeded with invalid parameters\n");
464 
465 	ret = rte_thash_add_helper(ctx, "first_range", reta_sz, 0);
466 	RTE_TEST_ASSERT(ret == 0, "Can not create helper\n");
467 
468 	ret = rte_thash_add_helper(ctx, "first_range", reta_sz, 0);
469 	RTE_TEST_ASSERT(ret == -EEXIST,
470 		"Call succeeded with duplicated name\n");
471 
472 	/*
473 	 * Create second helper with offset 3 * reta_sz.
474 	 * Note first_range helper created range in key:
475 	 * [0, 32 + length{= reta_sz} - 1), i.e [0, 37).
476 	 * second range is [44, 81)
477 	 */
478 	ret = rte_thash_add_helper(ctx, "second_range", reta_sz,
479 		32 +  2 * reta_sz);
480 	RTE_TEST_ASSERT(ret == 0, "Can not create helper\n");
481 
482 	/*
483 	 * Try to create overlapping with first_ and second_ ranges,
484 	 * i.e. [6, 49)
485 	 */
486 	ret = rte_thash_add_helper(ctx, "third_range", 2 * reta_sz, reta_sz);
487 	RTE_TEST_ASSERT(ret == -EEXIST,
488 		"Call succeeded with overlapping ranges\n");
489 
490 	rte_thash_free_ctx(ctx);
491 
492 	return TEST_SUCCESS;
493 }
494 
495 static int
test_find_existing(void)496 test_find_existing(void)
497 {
498 	struct rte_thash_ctx *ctx, *ret_ctx;
499 
500 	ctx = rte_thash_init_ctx("test", 40, 7, NULL, 0);
501 	RTE_TEST_ASSERT(ctx != NULL, "can not create thash ctx\n");
502 
503 	ret_ctx = rte_thash_find_existing("test");
504 	RTE_TEST_ASSERT(ret_ctx != NULL, "can not find existing ctx\n");
505 
506 	rte_thash_free_ctx(ctx);
507 
508 	return TEST_SUCCESS;
509 }
510 
511 static int
test_get_helper(void)512 test_get_helper(void)
513 {
514 	struct rte_thash_ctx *ctx;
515 	struct rte_thash_subtuple_helper *h;
516 	int ret;
517 
518 	ctx = rte_thash_init_ctx("test", 40, 7, NULL, 0);
519 	RTE_TEST_ASSERT(ctx != NULL, "Can not create thash ctx\n");
520 
521 	h = rte_thash_get_helper(NULL, "first_range");
522 	RTE_TEST_ASSERT(h == NULL, "Call succeeded with invalid parameters\n");
523 
524 	h = rte_thash_get_helper(ctx, NULL);
525 	RTE_TEST_ASSERT(h == NULL, "Call succeeded with invalid parameters\n");
526 
527 	ret = rte_thash_add_helper(ctx, "first_range", 8, 0);
528 	RTE_TEST_ASSERT(ret == 0, "Can not create helper\n");
529 
530 	h = rte_thash_get_helper(ctx, "first_range");
531 	RTE_TEST_ASSERT(h != NULL, "Can not find helper\n");
532 
533 	rte_thash_free_ctx(ctx);
534 
535 	return TEST_SUCCESS;
536 }
537 
538 static int
test_period_overflow(void)539 test_period_overflow(void)
540 {
541 	struct rte_thash_ctx *ctx;
542 	int reta_sz = 7; /* reflects polynomial degree */
543 	int ret;
544 
545 	/* first create without RTE_THASH_IGNORE_PERIOD_OVERFLOW flag */
546 	ctx = rte_thash_init_ctx("test", 40, reta_sz, NULL, 0);
547 	RTE_TEST_ASSERT(ctx != NULL, "Can not create thash ctx\n");
548 
549 	/* requested range > (2^reta_sz) - 1 */
550 	ret = rte_thash_add_helper(ctx, "test", (1 << reta_sz), 0);
551 	RTE_TEST_ASSERT(ret == -ENOSPC,
552 		"Call succeeded with invalid parameters\n");
553 
554 	/* requested range == len + 32 - 1, smaller than (2^reta_sz) - 1 */
555 	ret = rte_thash_add_helper(ctx, "test", (1 << reta_sz) - 32, 0);
556 	RTE_TEST_ASSERT(ret == 0, "Can not create helper\n");
557 
558 	rte_thash_free_ctx(ctx);
559 
560 	/* create with RTE_THASH_IGNORE_PERIOD_OVERFLOW flag */
561 	ctx = rte_thash_init_ctx("test", 40, reta_sz, NULL,
562 		RTE_THASH_IGNORE_PERIOD_OVERFLOW);
563 	RTE_TEST_ASSERT(ctx != NULL, "Can not create thash ctx\n");
564 
565 	/* requested range > (2^reta_sz - 1) */
566 	ret = rte_thash_add_helper(ctx, "test", (1 << reta_sz) + 10, 0);
567 	RTE_TEST_ASSERT(ret == 0, "Can not create helper\n");
568 
569 	rte_thash_free_ctx(ctx);
570 
571 	return TEST_SUCCESS;
572 }
573 
574 static int
test_predictable_rss_min_seq(void)575 test_predictable_rss_min_seq(void)
576 {
577 	struct rte_thash_ctx *ctx;
578 	struct rte_thash_subtuple_helper *h;
579 	const int key_len = 40;
580 	int reta_sz = 6;
581 	uint8_t initial_key[key_len];
582 	const uint8_t *new_key;
583 	int ret;
584 	union rte_thash_tuple tuple;
585 	uint32_t orig_hash, adj_hash, adj;
586 	unsigned int desired_value = 27 & HASH_MSK(reta_sz);
587 	uint16_t port_value = 22;
588 
589 	memset(initial_key, 0, key_len);
590 
591 	ctx = rte_thash_init_ctx("test", key_len, reta_sz, initial_key,
592 		RTE_THASH_MINIMAL_SEQ);
593 	RTE_TEST_ASSERT(ctx != NULL, "can not create thash ctx\n");
594 
595 	ret = rte_thash_add_helper(ctx, "snat", sizeof(uint16_t) * 8,
596 		offsetof(union rte_thash_tuple, v4.sport) * 8);
597 	RTE_TEST_ASSERT(ret == 0, "can not add helper, ret %d\n", ret);
598 
599 	h = rte_thash_get_helper(ctx, "snat");
600 	RTE_TEST_ASSERT(h != NULL, "can not find helper\n");
601 
602 	new_key = rte_thash_get_key(ctx);
603 	tuple.v4.src_addr = RTE_IPV4(0, 0, 0, 0);
604 	tuple.v4.dst_addr = RTE_IPV4(0, 0, 0, 0);
605 	tuple.v4.sport = 0;
606 	tuple.v4.sport = rte_cpu_to_be_16(port_value);
607 	tuple.v4.dport = 0;
608 	tuple.v4.sctp_tag = rte_be_to_cpu_32(tuple.v4.sctp_tag);
609 
610 	orig_hash = rte_softrss((uint32_t *)&tuple,
611 		RTE_THASH_V4_L4_LEN, new_key);
612 	adj = rte_thash_get_complement(h, orig_hash, desired_value);
613 
614 	tuple.v4.sctp_tag = rte_cpu_to_be_32(tuple.v4.sctp_tag);
615 	tuple.v4.sport ^= rte_cpu_to_be_16(adj);
616 	tuple.v4.sctp_tag = rte_be_to_cpu_32(tuple.v4.sctp_tag);
617 
618 	adj_hash = rte_softrss((uint32_t *)&tuple,
619 		RTE_THASH_V4_L4_LEN, new_key);
620 	RTE_TEST_ASSERT((adj_hash & HASH_MSK(reta_sz)) ==
621 		desired_value, "bad desired value\n");
622 
623 	rte_thash_free_ctx(ctx);
624 
625 	return TEST_SUCCESS;
626 }
627 
628 /*
629  * This test creates 7 subranges in the following order:
630  * range_one	= [56, 95),	len = 8, offset = 56
631  * range_two	= [64, 103),	len = 8, offset = 64
632  * range_three	= [120, 159),	len = 8, offset = 120
633  * range_four	= [48, 87),	len = 8, offset = 48
634  * range_five	= [57, 95),	len = 7, offset = 57
635  * range_six	= [40, 111),	len = 40, offset = 40
636  * range_seven	= [0, 39),	len = 8, offset = 0
637  */
638 struct range {
639 	const char *name;
640 	int len;
641 	int offset;
642 	int byte_idx;
643 };
644 
645 struct range rng_arr[] = {
646 	{"one",   8,  56,  7},
647 	{"two",   8,  64,  8},
648 	{"three", 8,  120, 15},
649 	{"four",  8,  48,  6},
650 	{"six",   40, 40,  9},
651 	{"five",  7,  57,  7},
652 	{"seven", 8,  0,   0}
653 };
654 
655 static int
test_predictable_rss_multirange(void)656 test_predictable_rss_multirange(void)
657 {
658 	struct rte_thash_ctx *ctx;
659 	struct rte_thash_subtuple_helper *h[RTE_DIM(rng_arr)];
660 	const uint8_t *new_key;
661 	const int key_len = 40;
662 	int reta_sz = 7;
663 	unsigned int i, j, k;
664 	int ret;
665 	uint32_t desired_value = rte_rand() & HASH_MSK(reta_sz);
666 	uint8_t tuples[RTE_DIM(rng_arr)][16] = { {0} };
667 	uint32_t *ptr;
668 	uint32_t hashes[RTE_DIM(rng_arr)];
669 	uint32_t adj_hashes[RTE_DIM(rng_arr)];
670 	uint32_t adj;
671 
672 	ctx = rte_thash_init_ctx("test", key_len, reta_sz, NULL, 0);
673 	RTE_TEST_ASSERT(ctx != NULL, "can not create thash ctx\n");
674 
675 	for (i = 0; i < RTE_DIM(rng_arr); i++) {
676 		ret = rte_thash_add_helper(ctx, rng_arr[i].name,
677 			rng_arr[i].len, rng_arr[i].offset);
678 		RTE_TEST_ASSERT(ret == 0, "can not add helper\n");
679 
680 		h[i] = rte_thash_get_helper(ctx, rng_arr[i].name);
681 		RTE_TEST_ASSERT(h[i] != NULL, "can not find helper\n");
682 	}
683 	new_key = rte_thash_get_key(ctx);
684 
685 	/*
686 	 * calculate hashes, complements, then adjust keys with
687 	 * complements and recalculate hashes
688 	 */
689 	for (i = 0; i < RTE_DIM(rng_arr); i++) {
690 		for (k = 0; k < 100; k++) {
691 			/* init with random keys */
692 			ptr = (uint32_t *)&tuples[i][0];
693 			for (j = 0; j < 4; j++)
694 				ptr[j] = rte_rand();
695 			/* convert keys from BE to CPU byte order */
696 			for (j = 0; j < 4; j++)
697 				ptr[j] = rte_be_to_cpu_32(ptr[j]);
698 
699 			hashes[i] = rte_softrss(ptr, 4, new_key);
700 			adj = rte_thash_get_complement(h[i], hashes[i],
701 				desired_value);
702 			/* convert back to BE to adjust the value */
703 			for (j = 0; j < 4; j++)
704 				ptr[j] = rte_cpu_to_be_32(ptr[j]);
705 
706 			tuples[i][rng_arr[i].byte_idx] ^= adj;
707 
708 			for (j = 0; j < 4; j++)
709 				ptr[j] = rte_be_to_cpu_32(ptr[j]);
710 
711 			adj_hashes[i] = rte_softrss(ptr, 4, new_key);
712 			RTE_TEST_ASSERT((adj_hashes[i] & HASH_MSK(reta_sz)) ==
713 				desired_value,
714 				"bad desired value for %d tuple\n", i);
715 		}
716 	}
717 
718 	rte_thash_free_ctx(ctx);
719 
720 	return TEST_SUCCESS;
721 }
722 
723 static int
cmp_tuple_eq(void * userdata,uint8_t * tuple)724 cmp_tuple_eq(void *userdata, uint8_t *tuple)
725 {
726 	return memcmp(userdata, tuple, TUPLE_SZ);
727 }
728 
729 static int
test_adjust_tuple(void)730 test_adjust_tuple(void)
731 {
732 	struct rte_thash_ctx *ctx;
733 	struct rte_thash_subtuple_helper *h;
734 	const int key_len = 40;
735 	const uint8_t *new_key;
736 	uint8_t tuple[TUPLE_SZ];
737 	uint32_t tmp_tuple[TUPLE_SZ / sizeof(uint32_t)];
738 	uint32_t tuple_copy[TUPLE_SZ / sizeof(uint32_t)];
739 	uint32_t hash;
740 	int reta_sz = CHAR_BIT;
741 	int ret;
742 	unsigned int i, desired_value = rte_rand() & HASH_MSK(reta_sz);
743 
744 	memset(tuple, 0xab, TUPLE_SZ);
745 
746 	ctx = rte_thash_init_ctx("test", key_len, reta_sz, NULL, 0);
747 	RTE_TEST_ASSERT(ctx != NULL, "can not create thash ctx\n");
748 
749 	/*
750 	 * set offset to be in the middle of a byte
751 	 * set size of the subtuple to be 2 * rets_sz
752 	 * to have the room for random bits
753 	 */
754 	ret = rte_thash_add_helper(ctx, "test", reta_sz * 2,
755 		(5 * CHAR_BIT) + 4);
756 	RTE_TEST_ASSERT(ret == 0, "can not add helper, ret %d\n", ret);
757 
758 	new_key = rte_thash_get_key(ctx);
759 
760 	h = rte_thash_get_helper(ctx, "test");
761 	RTE_TEST_ASSERT(h != NULL, "can not find helper\n");
762 
763 	ret = rte_thash_adjust_tuple(ctx, h, tuple, TUPLE_SZ, desired_value,
764 		1, NULL, NULL);
765 	RTE_TEST_ASSERT(ret == 0, "can not adjust tuple, ret %d\n", ret);
766 
767 	for (i = 0; i < (TUPLE_SZ / 4); i++)
768 		tmp_tuple[i] =
769 			rte_be_to_cpu_32(*(uint32_t *)&tuple[i * 4]);
770 
771 	hash = rte_softrss(tmp_tuple, TUPLE_SZ / 4, new_key);
772 	RTE_TEST_ASSERT((hash & HASH_MSK(reta_sz)) ==
773 		desired_value, "bad desired value\n");
774 
775 
776 	/* Pass previously calculated tuple to callback function */
777 	memcpy(tuple_copy, tuple, TUPLE_SZ);
778 
779 	memset(tuple, 0xab, TUPLE_SZ);
780 	ret = rte_thash_adjust_tuple(ctx, h, tuple, TUPLE_SZ, desired_value,
781 		1, cmp_tuple_eq, tuple_copy);
782 	RTE_TEST_ASSERT(ret == -EEXIST,
783 		"adjust tuple didn't indicate collision\n");
784 
785 	/*
786 	 * Make the function to generate random bits into subtuple
787 	 * after first adjustment attempt.
788 	 */
789 	memset(tuple, 0xab, TUPLE_SZ);
790 	ret = rte_thash_adjust_tuple(ctx, h, tuple, TUPLE_SZ, desired_value,
791 		2, cmp_tuple_eq, tuple_copy);
792 	RTE_TEST_ASSERT(ret == 0, "can not adjust tuple, ret %d\n", ret);
793 
794 	for (i = 0; i < (TUPLE_SZ / 4); i++)
795 		tmp_tuple[i] =
796 			rte_be_to_cpu_32(*(uint32_t *)&tuple[i * 4]);
797 
798 	hash = rte_softrss(tmp_tuple, TUPLE_SZ / 4, new_key);
799 	RTE_TEST_ASSERT((hash & HASH_MSK(reta_sz)) ==
800 		desired_value, "bad desired value\n");
801 
802 	rte_thash_free_ctx(ctx);
803 
804 	return TEST_SUCCESS;
805 }
806 
807 static struct unit_test_suite thash_tests = {
808 	.suite_name = "thash autotest",
809 	.setup = NULL,
810 	.teardown = NULL,
811 	.unit_test_cases = {
812 	TEST_CASE(test_toeplitz_hash_calc),
813 	TEST_CASE(test_toeplitz_hash_gfni),
814 	TEST_CASE(test_toeplitz_hash_rand_data),
815 	TEST_CASE(test_toeplitz_hash_gfni_bulk),
816 	TEST_CASE(test_big_tuple_gfni),
817 	TEST_CASE(test_create_invalid),
818 	TEST_CASE(test_multiple_create),
819 	TEST_CASE(test_free_null),
820 	TEST_CASE(test_add_invalid_helper),
821 	TEST_CASE(test_find_existing),
822 	TEST_CASE(test_get_helper),
823 	TEST_CASE(test_period_overflow),
824 	TEST_CASE(test_predictable_rss_min_seq),
825 	TEST_CASE(test_predictable_rss_multirange),
826 	TEST_CASE(test_adjust_tuple),
827 	TEST_CASES_END()
828 	}
829 };
830 
831 static int
test_thash(void)832 test_thash(void)
833 {
834 	return unit_test_suite_runner(&thash_tests);
835 }
836 
837 REGISTER_TEST_COMMAND(thash_autotest, test_thash);
838