xref: /mOS-networking-stack/core/src/util.c (revision 76404edc)
1 /*
2     Borrowed XXH32 and XXH64 implementation from xxHash project
3 
4     Added Copyright notice just above the function definition
5 
6     TODO 1 : We might wanna implement our version of xxh for copyright reason
7     TODO 2 : We might wanna gather all copyright notice and create a single file.
8 */
9 
10 
11 
12 #include <stdint.h>
13 #include <netinet/in.h>
14 #include <arpa/inet.h>
15 #include <string.h>
16 #include <ctype.h>
17 #include <mtcp_util.h>
18 
19 /*-------------------------------------------------------------*/
20 static void
21 BuildKeyCache(uint32_t *cache, int cache_len)
22 {
23 #ifndef NBBY
24 #define NBBY 8 /* number of bits per byte */
25 #endif
26 
27 	/* Both of DPDK and PSIO uses this key for hash calculation.
28 	 * Do not change any single bit of this key. */
29 	static const uint8_t key[] = {
30 		 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
31 		 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
32 		 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
33 		 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
34 		 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05
35 	};
36 
37 	uint32_t result = (((uint32_t)key[0]) << 24) |
38                           (((uint32_t)key[1]) << 16) |
39                           (((uint32_t)key[2]) << 8)  | ((uint32_t)key[3]);
40 	uint32_t idx = 32;
41 	int i;
42 
43 	for (i = 0; i < cache_len; i++, idx++) {
44 		uint8_t shift = (idx % NBBY);
45 		uint32_t bit;
46 
47 		cache[i] = result;
48 		bit = ((key[idx/NBBY] << shift) & 0x80) ? 1 : 0;
49 		result = ((result << 1) | bit);
50 	}
51 
52 }
53 /*-------------------------------------------------------------*/
54 uint32_t
55 GetRSSHash(in_addr_t sip, in_addr_t dip, in_port_t sp, in_port_t dp)
56 {
57 #define MSB32 0x80000000
58 #define MSB16 0x8000
59 #define KEY_CACHE_LEN 96
60 
61 	uint32_t res = 0;
62 	int i;
63 	static int first = 1;
64 	static uint32_t key_cache[KEY_CACHE_LEN] = {0};
65 
66 	if (first) {
67 		BuildKeyCache(key_cache, KEY_CACHE_LEN);
68 		first = 0;
69 	}
70 
71 	for (i = 0; i < 32; i++) {
72 		if (sip & MSB32)
73 			res ^= key_cache[i];
74 		sip <<= 1;
75 	}
76 	for (i = 0; i < 32; i++) {
77 		if (dip & MSB32)
78 			res ^= key_cache[32+i];
79 		dip <<= 1;
80 	}
81 	for (i = 0; i < 16; i++) {
82 		if (sp & MSB16)
83 			res ^= key_cache[64+i];
84 		sp <<= 1;
85 	}
86 	for (i = 0; i < 16; i++) {
87 		if (dp & MSB16)
88 			res ^= key_cache[80+i];
89 		dp <<= 1;
90 	}
91 	return res;
92 }
93 /*-------------------------------------------------------------------*/
94 /* RSS redirection table is in the little endian byte order (intel)  */
95 /*                                                                   */
96 /* idx: 0 1 2 3 | 4 5 6 7 | 8 9 10 11 | 12 13 14 15 | 16 17 18 19 ...*/
97 /* val: 3 2 1 0 | 7 6 5 4 | 11 10 9 8 | 15 14 13 12 | 19 18 17 16 ...*/
98 /* qid = val % num_queues */
99 /*-------------------------------------------------------------------*/
100 int
101 GetRSSCPUCore(in_addr_t sip, in_addr_t dip,
102 			  in_port_t sp, in_port_t dp, int num_queues)
103 {
104 	#define RSS_BIT_MASK 0x0000007F
105 
106 	uint32_t masked = GetRSSHash(sip, dip, sp, dp) & RSS_BIT_MASK;
107 
108 #ifdef ENABLE_PSIO
109 	static const uint32_t off[4] = {3, 1, -1, -3};
110 	masked += off[masked & 0x3];
111 #endif
112 	return (masked % num_queues);
113 
114 }
115 /*-------------------------------------------------------------*/
116 int
117 StrToArgs(char *str, int *argc, char **argv, int max_argc)
118 {
119 
120 	uint8_t single_quotes;
121 	uint8_t double_quotes;
122 	uint8_t delim;
123 
124 	single_quotes = 0;
125 	double_quotes = 0;
126 	delim = 1;
127 
128 	*argc = 0;
129 
130 	int i;
131 	int len = strlen(str);
132 	for (i = 0; i < len; i++) {
133 
134 		if (str[i] == '\'') {
135 			if (single_quotes)
136 				str[i] = '\0';
137 			else
138 				i++;
139 			single_quotes = !single_quotes;
140 			goto __non_space;
141 		} else if (str[i] == '\"') {
142 			if (double_quotes)
143 				str[i] = '\0';
144 			else
145 				i++;
146 			double_quotes = !double_quotes;
147 			goto __non_space;
148 		}
149 
150 		if (single_quotes || double_quotes)
151 			continue;
152 
153 		if (isspace(str[i])) {
154 			delim = 1;
155 			str[i] = '\0';
156 			continue;
157 		}
158 __non_space:
159 		if (delim == 1) {
160 			delim = 0;
161 			argv[(*argc)++] = &str[i];
162 			if (*argc > max_argc)
163 				break;
164 		}
165 	}
166 
167 	argv[*argc] = NULL;
168 
169 	return 0;
170 }
171 
172 /*
173 xxHash - Fast Hash algorithm
174 Copyright (C) 2012-2015, Yann Collet
175 
176 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
177 
178 Redistribution and use in source and binary forms, with or without
179 modification, are permitted provided that the following conditions are
180 met:
181 
182 * Redistributions of source code must retain the above copyright
183 notice, this list of conditions and the following disclaimer.
184 * Redistributions in binary form must reproduce the above
185 copyright notice, this list of conditions and the following disclaimer
186 in the documentation and/or other materials provided with the
187 distribution.
188 
189 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
190 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
191 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
192 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
193 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
194 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
195 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
196 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
197 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
198 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
199 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
200 
201 You can contact the author at :
202 - xxHash source repository : https://github.com/Cyan4973/xxHash
203 */
204 
205 
206 
207 
208 /**************************************
209 *  Tuning parameters
210 **************************************/
211 /* Unaligned memory access is automatically enabled for "common" CPU, such as x86.
212  * For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected.
213  * If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance.
214  * You can also enable this parameter if you know your input data will always be aligned (boundaries of 4, for U32).
215  */
216 #if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
217 #  define XXH_USE_UNALIGNED_ACCESS 1
218 #endif
219 
220 /* XXH_ACCEPT_NULL_INPUT_POINTER :
221  * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
222  * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
223  * By default, this option is disabled. To enable it, uncomment below define :
224  */
225 /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
226 
227 /* XXH_FORCE_NATIVE_FORMAT :
228  * By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
229  * Results are therefore identical for little-endian and big-endian CPU.
230  * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
231  * Should endian-independance be of no importance for your application, you may set the #define below to 1.
232  * It will improve speed for Big-endian CPU.
233  * This option has no impact on Little_Endian CPU.
234  */
235 #define XXH_FORCE_NATIVE_FORMAT 0
236 
237 
238 /**************************************
239 *  Compiler Specific Options
240 ***************************************/
241 #ifdef _MSC_VER    /* Visual Studio */
242 #  pragma warning(disable : 4127)      /* disable: C4127: conditional expression is constant */
243 #  define FORCE_INLINE static __forceinline
244 #else
245 #  if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
246 #    ifdef __GNUC__
247 #      define FORCE_INLINE static inline __attribute__((always_inline))
248 #    else
249 #      define FORCE_INLINE static inline
250 #    endif
251 #  else
252 #    define FORCE_INLINE static
253 #  endif /* __STDC_VERSION__ */
254 #endif
255 
256 
257 /**************************************
258 *  Basic Types
259 ***************************************/
260 #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
261 # include <stdint.h>
262   typedef uint8_t  BYTE;
263   typedef uint16_t U16;
264   typedef uint32_t U32;
265   typedef  int32_t S32;
266   typedef uint64_t U64;
267 #else
268   typedef unsigned char      BYTE;
269   typedef unsigned short     U16;
270   typedef unsigned int       U32;
271   typedef   signed int       S32;
272   typedef unsigned long long U64;
273 #endif
274 
275 static U32 XXH_read32(const void* memPtr)
276 {
277     U32 val32;
278     memcpy(&val32, memPtr, 4);
279     return val32;
280 }
281 
282 static U64 XXH_read64(const void* memPtr)
283 {
284     U64 val64;
285     memcpy(&val64, memPtr, 8);
286     return val64;
287 }
288 
289 
290 
291 /******************************************
292 *  Compiler-specific Functions and Macros
293 ******************************************/
294 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
295 
296 /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
297 #if defined(_MSC_VER)
298 #  define XXH_rotl32(x,r) _rotl(x,r)
299 #  define XXH_rotl64(x,r) _rotl64(x,r)
300 #else
301 #  define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
302 #  define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
303 #endif
304 
305 #if defined(_MSC_VER)     /* Visual Studio */
306 #  define XXH_swap32 _byteswap_ulong
307 #  define XXH_swap64 _byteswap_uint64
308 #elif GCC_VERSION >= 403
309 #  define XXH_swap32 __builtin_bswap32
310 #  define XXH_swap64 __builtin_bswap64
311 #else
312 static U32 XXH_swap32 (U32 x)
313 {
314     return  ((x << 24) & 0xff000000 ) |
315             ((x <<  8) & 0x00ff0000 ) |
316             ((x >>  8) & 0x0000ff00 ) |
317             ((x >> 24) & 0x000000ff );
318 }
319 static U64 XXH_swap64 (U64 x)
320 {
321     return  ((x << 56) & 0xff00000000000000ULL) |
322             ((x << 40) & 0x00ff000000000000ULL) |
323             ((x << 24) & 0x0000ff0000000000ULL) |
324             ((x << 8)  & 0x000000ff00000000ULL) |
325             ((x >> 8)  & 0x00000000ff000000ULL) |
326             ((x >> 24) & 0x0000000000ff0000ULL) |
327             ((x >> 40) & 0x000000000000ff00ULL) |
328             ((x >> 56) & 0x00000000000000ffULL);
329 }
330 #endif
331 
332 
333 /***************************************
334 *  Architecture Macros
335 ***************************************/
336 typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
337 #ifndef XXH_CPU_LITTLE_ENDIAN   /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example using a compiler switch */
338 static const int one = 1;
339 #   define XXH_CPU_LITTLE_ENDIAN   (*(const char*)(&one))
340 #endif
341 
342 
343 /*****************************
344 *  Memory reads
345 *****************************/
346 typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
347 
348 FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
349 {
350     if (align==XXH_unaligned)
351         return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
352     else
353         return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
354 }
355 
356 FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
357 {
358     if (align==XXH_unaligned)
359         return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
360     else
361         return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
362 }
363 
364 /***************************************
365 *  Macros
366 ***************************************/
367 #define XXH_STATIC_ASSERT(c)   { enum { XXH_static_assert = 1/(!!(c)) }; }    /* use only *after* variable declarations */
368 
369 
370 /***************************************
371 *  Constants
372 ***************************************/
373 #define PRIME32_1   2654435761U
374 #define PRIME32_2   2246822519U
375 #define PRIME32_3   3266489917U
376 #define PRIME32_4    668265263U
377 #define PRIME32_5    374761393U
378 
379 #define PRIME64_1 11400714785074694791ULL
380 #define PRIME64_2 14029467366897019727ULL
381 #define PRIME64_3  1609587929392839161ULL
382 #define PRIME64_4  9650029242287828579ULL
383 #define PRIME64_5  2870177450012600261ULL
384 
385 
386 /*****************************
387 *  Simple Hash Functions
388 *****************************/
389 FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
390 {
391     const BYTE* p = (const BYTE*)input;
392     const BYTE* bEnd = p + len;
393     U32 h32;
394 #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
395 
396 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
397     if (p==NULL)
398     {
399         len=0;
400         bEnd=p=(const BYTE*)(size_t)16;
401     }
402 #endif
403 
404     if (len>=16)
405     {
406         const BYTE* const limit = bEnd - 16;
407         U32 v1 = seed + PRIME32_1 + PRIME32_2;
408         U32 v2 = seed + PRIME32_2;
409         U32 v3 = seed + 0;
410         U32 v4 = seed - PRIME32_1;
411 
412         do
413         {
414             v1 += XXH_get32bits(p) * PRIME32_2;
415             v1 = XXH_rotl32(v1, 13);
416             v1 *= PRIME32_1;
417             p+=4;
418             v2 += XXH_get32bits(p) * PRIME32_2;
419             v2 = XXH_rotl32(v2, 13);
420             v2 *= PRIME32_1;
421             p+=4;
422             v3 += XXH_get32bits(p) * PRIME32_2;
423             v3 = XXH_rotl32(v3, 13);
424             v3 *= PRIME32_1;
425             p+=4;
426             v4 += XXH_get32bits(p) * PRIME32_2;
427             v4 = XXH_rotl32(v4, 13);
428             v4 *= PRIME32_1;
429             p+=4;
430         }
431         while (p<=limit);
432 
433         h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
434     }
435     else
436     {
437         h32  = seed + PRIME32_5;
438     }
439 
440     h32 += (U32) len;
441 
442     while (p+4<=bEnd)
443     {
444         h32 += XXH_get32bits(p) * PRIME32_3;
445         h32  = XXH_rotl32(h32, 17) * PRIME32_4 ;
446         p+=4;
447     }
448 
449     while (p<bEnd)
450     {
451         h32 += (*p) * PRIME32_5;
452         h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
453         p++;
454     }
455 
456     h32 ^= h32 >> 15;
457     h32 *= PRIME32_2;
458     h32 ^= h32 >> 13;
459     h32 *= PRIME32_3;
460     h32 ^= h32 >> 16;
461 
462     return h32;
463 }
464 
465 
466 unsigned XXH32 (const void* input, size_t len, unsigned seed)
467 {
468 #if 0
469     /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
470     XXH32_state_t state;
471     XXH32_reset(&state, seed);
472     XXH32_update(&state, input, len);
473     return XXH32_digest(&state);
474 #else
475     XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
476 
477 #  if !defined(XXH_USE_UNALIGNED_ACCESS)
478     if ((((size_t)input) & 3) == 0)   /* Input is 4-bytes aligned, leverage the speed benefit */
479     {
480         if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
481             return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
482         else
483             return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
484     }
485 #  endif
486 
487     if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
488         return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
489     else
490         return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
491 #endif
492 }
493 
494 FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
495 {
496     const BYTE* p = (const BYTE*)input;
497     const BYTE* bEnd = p + len;
498     U64 h64;
499 #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
500 
501 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
502     if (p==NULL)
503     {
504         len=0;
505         bEnd=p=(const BYTE*)(size_t)32;
506     }
507 #endif
508 
509     if (len>=32)
510     {
511         const BYTE* const limit = bEnd - 32;
512         U64 v1 = seed + PRIME64_1 + PRIME64_2;
513         U64 v2 = seed + PRIME64_2;
514         U64 v3 = seed + 0;
515         U64 v4 = seed - PRIME64_1;
516 
517         do
518         {
519             v1 += XXH_get64bits(p) * PRIME64_2;
520             p+=8;
521             v1 = XXH_rotl64(v1, 31);
522             v1 *= PRIME64_1;
523             v2 += XXH_get64bits(p) * PRIME64_2;
524             p+=8;
525             v2 = XXH_rotl64(v2, 31);
526             v2 *= PRIME64_1;
527             v3 += XXH_get64bits(p) * PRIME64_2;
528             p+=8;
529             v3 = XXH_rotl64(v3, 31);
530             v3 *= PRIME64_1;
531             v4 += XXH_get64bits(p) * PRIME64_2;
532             p+=8;
533             v4 = XXH_rotl64(v4, 31);
534             v4 *= PRIME64_1;
535         }
536         while (p<=limit);
537 
538         h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
539 
540         v1 *= PRIME64_2;
541         v1 = XXH_rotl64(v1, 31);
542         v1 *= PRIME64_1;
543         h64 ^= v1;
544         h64 = h64 * PRIME64_1 + PRIME64_4;
545 
546         v2 *= PRIME64_2;
547         v2 = XXH_rotl64(v2, 31);
548         v2 *= PRIME64_1;
549         h64 ^= v2;
550         h64 = h64 * PRIME64_1 + PRIME64_4;
551 
552         v3 *= PRIME64_2;
553         v3 = XXH_rotl64(v3, 31);
554         v3 *= PRIME64_1;
555         h64 ^= v3;
556         h64 = h64 * PRIME64_1 + PRIME64_4;
557 
558         v4 *= PRIME64_2;
559         v4 = XXH_rotl64(v4, 31);
560         v4 *= PRIME64_1;
561         h64 ^= v4;
562         h64 = h64 * PRIME64_1 + PRIME64_4;
563     }
564     else
565     {
566         h64  = seed + PRIME64_5;
567     }
568 
569     h64 += (U64) len;
570 
571     while (p+8<=bEnd)
572     {
573         U64 k1 = XXH_get64bits(p);
574         k1 *= PRIME64_2;
575         k1 = XXH_rotl64(k1,31);
576         k1 *= PRIME64_1;
577         h64 ^= k1;
578         h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
579         p+=8;
580     }
581 
582     if (p+4<=bEnd)
583     {
584         h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
585         h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
586         p+=4;
587     }
588 
589     while (p<bEnd)
590     {
591         h64 ^= (*p) * PRIME64_5;
592         h64 = XXH_rotl64(h64, 11) * PRIME64_1;
593         p++;
594     }
595 
596     h64 ^= h64 >> 33;
597     h64 *= PRIME64_2;
598     h64 ^= h64 >> 29;
599     h64 *= PRIME64_3;
600     h64 ^= h64 >> 32;
601 
602     return h64;
603 }
604 
605 
606 unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
607 {
608 #if 0
609     /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
610     XXH64_state_t state;
611     XXH64_reset(&state, seed);
612     XXH64_update(&state, input, len);
613     return XXH64_digest(&state);
614 #else
615     XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
616 
617 #  if !defined(XXH_USE_UNALIGNED_ACCESS)
618     if ((((size_t)input) & 7)==0)   /* Input is aligned, let's leverage the speed advantage */
619     {
620         if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
621             return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
622         else
623             return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
624     }
625 #  endif
626 
627     if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
628         return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
629     else
630         return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
631 #endif
632 }
633 
634