xref: /mOS-networking-stack/core/src/util.c (revision b88bd7d2)
1 /*
2     Borrowed XXH32 and XXH64 implementation from xxHash project
3 
4     Added Copyright notice just above the function definition
5 
6     TODO 1 : We might wanna implement our version of xxh for copyright reason
7     TODO 2 : We might wanna gather all copyright notice and create a single file.
8 */
9 
10 
11 
12 #include <stdint.h>
13 #include <netinet/in.h>
14 #include <arpa/inet.h>
15 #include <string.h>
16 #include <ctype.h>
17 #include <mtcp_util.h>
18 #include <limits.h>
19 #include <stdio.h>
20 #include <errno.h>
21 #include <stdlib.h>
22 
23 /*-------------------------------------------------------------*/
24 static void
25 BuildKeyCache(uint32_t *cache, int cache_len)
26 {
27 #ifndef NBBY
28 #define NBBY 8 /* number of bits per byte */
29 #endif
30 
31 	/* Both of DPDK and Netmap uses this key for hash calculation.
32 	 * Do not change any single bit of this key. */
33 	static const uint8_t key[] = {
34 		 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
35 		 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
36 		 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
37 		 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
38 		 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05
39 	};
40 
41 	uint32_t result = (((uint32_t)key[0]) << 24) |
42                           (((uint32_t)key[1]) << 16) |
43                           (((uint32_t)key[2]) << 8)  | ((uint32_t)key[3]);
44 	uint32_t idx = 32;
45 	int i;
46 
47 	for (i = 0; i < cache_len; i++, idx++) {
48 		uint8_t shift = (idx % NBBY);
49 		uint32_t bit;
50 
51 		cache[i] = result;
52 		bit = ((key[idx/NBBY] << shift) & 0x80) ? 1 : 0;
53 		result = ((result << 1) | bit);
54 	}
55 
56 }
57 /*-------------------------------------------------------------*/
58 uint32_t
59 GetRSSHash(in_addr_t sip, in_addr_t dip, in_port_t sp, in_port_t dp)
60 {
61 #define MSB32 0x80000000
62 #define MSB16 0x8000
63 #define KEY_CACHE_LEN 96
64 
65 	uint32_t res = 0;
66 	int i;
67 	static int first = 1;
68 	static uint32_t key_cache[KEY_CACHE_LEN] = {0};
69 
70 	if (first) {
71 		BuildKeyCache(key_cache, KEY_CACHE_LEN);
72 		first = 0;
73 	}
74 
75 	for (i = 0; i < 32; i++) {
76 		if (sip & MSB32)
77 			res ^= key_cache[i];
78 		sip <<= 1;
79 	}
80 	for (i = 0; i < 32; i++) {
81 		if (dip & MSB32)
82 			res ^= key_cache[32+i];
83 		dip <<= 1;
84 	}
85 	for (i = 0; i < 16; i++) {
86 		if (sp & MSB16)
87 			res ^= key_cache[64+i];
88 		sp <<= 1;
89 	}
90 	for (i = 0; i < 16; i++) {
91 		if (dp & MSB16)
92 			res ^= key_cache[80+i];
93 		dp <<= 1;
94 	}
95 	return res;
96 }
97 /*-------------------------------------------------------------------*/
98 /* RSS redirection table is in the little endian byte order (intel)  */
99 /*                                                                   */
100 /* idx: 0 1 2 3 | 4 5 6 7 | 8 9 10 11 | 12 13 14 15 | 16 17 18 19 ...*/
101 /* val: 3 2 1 0 | 7 6 5 4 | 11 10 9 8 | 15 14 13 12 | 19 18 17 16 ...*/
102 /* qid = val % num_queues */
103 /*-------------------------------------------------------------------*/
104 int
105 GetRSSCPUCore(in_addr_t sip, in_addr_t dip,
106 	      in_port_t sp, in_port_t dp, int num_queues,
107 	      int endian_type)
108 {
109 	#define RSS_BIT_MASK 0x0000007F
110 
111 	uint32_t masked = GetRSSHash(sip, dip, sp, dp) & RSS_BIT_MASK;
112 
113 	if (endian_type) {
114 		static const uint32_t off[4] = {3, 1, -1, -3};
115 		masked += off[masked & 0x3];
116 	}
117 
118 	return (masked % num_queues);
119 
120 }
121 /*-------------------------------------------------------------*/
122 int
123 mystrtol(const char *nptr, int base)
124 {
125 	int rval;
126 	char *endptr;
127 
128 	errno = 0;
129 	rval = strtol(nptr, &endptr, 10);
130 	/* check for strtol errors */
131 	if ((errno == ERANGE && (rval == LONG_MAX ||
132 				 rval == LONG_MIN))
133 	    || (errno != 0 && rval == 0)) {
134 		perror("strtol");
135 		exit(EXIT_FAILURE);
136 	}
137 	if (endptr == nptr) {
138 		fprintf(stderr, "Parsing strtol error!\n");
139 		exit(EXIT_FAILURE);
140 	}
141 
142 	return rval;
143 }
144 /*---------------------------------------------------------------*/
145 int
146 StrToArgs(char *str, int *argc, char **argv, int max_argc)
147 {
148 
149 	uint8_t single_quotes;
150 	uint8_t double_quotes;
151 	uint8_t delim;
152 
153 	single_quotes = 0;
154 	double_quotes = 0;
155 	delim = 1;
156 
157 	*argc = 0;
158 
159 	int i;
160 	int len = strlen(str);
161 	for (i = 0; i < len; i++) {
162 
163 		if (str[i] == '\'') {
164 			if (single_quotes)
165 				str[i] = '\0';
166 			else
167 				i++;
168 			single_quotes = !single_quotes;
169 			goto __non_space;
170 		} else if (str[i] == '\"') {
171 			if (double_quotes)
172 				str[i] = '\0';
173 			else
174 				i++;
175 			double_quotes = !double_quotes;
176 			goto __non_space;
177 		}
178 
179 		if (single_quotes || double_quotes)
180 			continue;
181 
182 		if (isspace(str[i])) {
183 			delim = 1;
184 			str[i] = '\0';
185 			continue;
186 		}
187 __non_space:
188 		if (delim == 1) {
189 			delim = 0;
190 			argv[(*argc)++] = &str[i];
191 			if (*argc > max_argc)
192 				break;
193 		}
194 	}
195 
196 	argv[*argc] = NULL;
197 
198 	return 0;
199 }
200 
201 /*
202 xxHash - Fast Hash algorithm
203 Copyright (C) 2012-2015, Yann Collet
204 
205 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
206 
207 Redistribution and use in source and binary forms, with or without
208 modification, are permitted provided that the following conditions are
209 met:
210 
211 * Redistributions of source code must retain the above copyright
212 notice, this list of conditions and the following disclaimer.
213 * Redistributions in binary form must reproduce the above
214 copyright notice, this list of conditions and the following disclaimer
215 in the documentation and/or other materials provided with the
216 distribution.
217 
218 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
219 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
220 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
221 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
222 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
223 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
224 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
225 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
226 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
227 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
228 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
229 
230 You can contact the author at :
231 - xxHash source repository : https://github.com/Cyan4973/xxHash
232 */
233 
234 
235 
236 
237 /**************************************
238 *  Tuning parameters
239 **************************************/
240 /* Unaligned memory access is automatically enabled for "common" CPU, such as x86.
241  * For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected.
242  * If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance.
243  * You can also enable this parameter if you know your input data will always be aligned (boundaries of 4, for U32).
244  */
245 #if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
246 #  define XXH_USE_UNALIGNED_ACCESS 1
247 #endif
248 
249 /* XXH_ACCEPT_NULL_INPUT_POINTER :
250  * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
251  * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
252  * By default, this option is disabled. To enable it, uncomment below define :
253  */
254 /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
255 
256 /* XXH_FORCE_NATIVE_FORMAT :
257  * By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
258  * Results are therefore identical for little-endian and big-endian CPU.
259  * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
260  * Should endian-independance be of no importance for your application, you may set the #define below to 1.
261  * It will improve speed for Big-endian CPU.
262  * This option has no impact on Little_Endian CPU.
263  */
264 #define XXH_FORCE_NATIVE_FORMAT 0
265 
266 
267 /**************************************
268 *  Compiler Specific Options
269 ***************************************/
270 #ifdef _MSC_VER    /* Visual Studio */
271 #  pragma warning(disable : 4127)      /* disable: C4127: conditional expression is constant */
272 #  define FORCE_INLINE static __forceinline
273 #else
274 #  if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
275 #    ifdef __GNUC__
276 #      define FORCE_INLINE static inline __attribute__((always_inline))
277 #    else
278 #      define FORCE_INLINE static inline
279 #    endif
280 #  else
281 #    define FORCE_INLINE static
282 #  endif /* __STDC_VERSION__ */
283 #endif
284 
285 
286 /**************************************
287 *  Basic Types
288 ***************************************/
289 #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
290 # include <stdint.h>
291   typedef uint8_t  BYTE;
292   typedef uint16_t U16;
293   typedef uint32_t U32;
294   typedef  int32_t S32;
295   typedef uint64_t U64;
296 #else
297   typedef unsigned char      BYTE;
298   typedef unsigned short     U16;
299   typedef unsigned int       U32;
300   typedef   signed int       S32;
301   typedef unsigned long long U64;
302 #endif
303 
304 static U32 XXH_read32(const void* memPtr)
305 {
306     U32 val32;
307     memcpy(&val32, memPtr, 4);
308     return val32;
309 }
310 
311 static U64 XXH_read64(const void* memPtr)
312 {
313     U64 val64;
314     memcpy(&val64, memPtr, 8);
315     return val64;
316 }
317 
318 
319 
320 /******************************************
321 *  Compiler-specific Functions and Macros
322 ******************************************/
323 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
324 
325 /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
326 #if defined(_MSC_VER)
327 #  define XXH_rotl32(x,r) _rotl(x,r)
328 #  define XXH_rotl64(x,r) _rotl64(x,r)
329 #else
330 #  define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
331 #  define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
332 #endif
333 
334 #if defined(_MSC_VER)     /* Visual Studio */
335 #  define XXH_swap32 _byteswap_ulong
336 #  define XXH_swap64 _byteswap_uint64
337 #elif GCC_VERSION >= 403
338 #  define XXH_swap32 __builtin_bswap32
339 #  define XXH_swap64 __builtin_bswap64
340 #else
341 static U32 XXH_swap32 (U32 x)
342 {
343     return  ((x << 24) & 0xff000000 ) |
344             ((x <<  8) & 0x00ff0000 ) |
345             ((x >>  8) & 0x0000ff00 ) |
346             ((x >> 24) & 0x000000ff );
347 }
348 static U64 XXH_swap64 (U64 x)
349 {
350     return  ((x << 56) & 0xff00000000000000ULL) |
351             ((x << 40) & 0x00ff000000000000ULL) |
352             ((x << 24) & 0x0000ff0000000000ULL) |
353             ((x << 8)  & 0x000000ff00000000ULL) |
354             ((x >> 8)  & 0x00000000ff000000ULL) |
355             ((x >> 24) & 0x0000000000ff0000ULL) |
356             ((x >> 40) & 0x000000000000ff00ULL) |
357             ((x >> 56) & 0x00000000000000ffULL);
358 }
359 #endif
360 
361 
362 /***************************************
363 *  Architecture Macros
364 ***************************************/
365 typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
366 #ifndef XXH_CPU_LITTLE_ENDIAN   /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example using a compiler switch */
367 static const int one = 1;
368 #   define XXH_CPU_LITTLE_ENDIAN   (*(const char*)(&one))
369 #endif
370 
371 
372 /*****************************
373 *  Memory reads
374 *****************************/
375 typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
376 
377 FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
378 {
379     if (align==XXH_unaligned)
380         return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
381     else
382         return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
383 }
384 
385 FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
386 {
387     if (align==XXH_unaligned)
388         return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
389     else
390         return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
391 }
392 
393 /***************************************
394 *  Macros
395 ***************************************/
396 #define XXH_STATIC_ASSERT(c)   { enum { XXH_static_assert = 1/(!!(c)) }; }    /* use only *after* variable declarations */
397 
398 
399 /***************************************
400 *  Constants
401 ***************************************/
402 #define PRIME32_1   2654435761U
403 #define PRIME32_2   2246822519U
404 #define PRIME32_3   3266489917U
405 #define PRIME32_4    668265263U
406 #define PRIME32_5    374761393U
407 
408 #define PRIME64_1 11400714785074694791ULL
409 #define PRIME64_2 14029467366897019727ULL
410 #define PRIME64_3  1609587929392839161ULL
411 #define PRIME64_4  9650029242287828579ULL
412 #define PRIME64_5  2870177450012600261ULL
413 
414 
415 /*****************************
416 *  Simple Hash Functions
417 *****************************/
418 FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
419 {
420     const BYTE* p = (const BYTE*)input;
421     const BYTE* bEnd = p + len;
422     U32 h32;
423 #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
424 
425 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
426     if (p==NULL)
427     {
428         len=0;
429         bEnd=p=(const BYTE*)(size_t)16;
430     }
431 #endif
432 
433     if (len>=16)
434     {
435         const BYTE* const limit = bEnd - 16;
436         U32 v1 = seed + PRIME32_1 + PRIME32_2;
437         U32 v2 = seed + PRIME32_2;
438         U32 v3 = seed + 0;
439         U32 v4 = seed - PRIME32_1;
440 
441         do
442         {
443             v1 += XXH_get32bits(p) * PRIME32_2;
444             v1 = XXH_rotl32(v1, 13);
445             v1 *= PRIME32_1;
446             p+=4;
447             v2 += XXH_get32bits(p) * PRIME32_2;
448             v2 = XXH_rotl32(v2, 13);
449             v2 *= PRIME32_1;
450             p+=4;
451             v3 += XXH_get32bits(p) * PRIME32_2;
452             v3 = XXH_rotl32(v3, 13);
453             v3 *= PRIME32_1;
454             p+=4;
455             v4 += XXH_get32bits(p) * PRIME32_2;
456             v4 = XXH_rotl32(v4, 13);
457             v4 *= PRIME32_1;
458             p+=4;
459         }
460         while (p<=limit);
461 
462         h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
463     }
464     else
465     {
466         h32  = seed + PRIME32_5;
467     }
468 
469     h32 += (U32) len;
470 
471     while (p+4<=bEnd)
472     {
473         h32 += XXH_get32bits(p) * PRIME32_3;
474         h32  = XXH_rotl32(h32, 17) * PRIME32_4 ;
475         p+=4;
476     }
477 
478     while (p<bEnd)
479     {
480         h32 += (*p) * PRIME32_5;
481         h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
482         p++;
483     }
484 
485     h32 ^= h32 >> 15;
486     h32 *= PRIME32_2;
487     h32 ^= h32 >> 13;
488     h32 *= PRIME32_3;
489     h32 ^= h32 >> 16;
490 
491     return h32;
492 }
493 
494 
495 unsigned XXH32 (const void* input, size_t len, unsigned seed)
496 {
497 #if 0
498     /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
499     XXH32_state_t state;
500     XXH32_reset(&state, seed);
501     XXH32_update(&state, input, len);
502     return XXH32_digest(&state);
503 #else
504     XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
505 
506 #  if !defined(XXH_USE_UNALIGNED_ACCESS)
507     if ((((size_t)input) & 3) == 0)   /* Input is 4-bytes aligned, leverage the speed benefit */
508     {
509         if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
510             return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
511         else
512             return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
513     }
514 #  endif
515 
516     if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
517         return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
518     else
519         return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
520 #endif
521 }
522 
523 FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
524 {
525     const BYTE* p = (const BYTE*)input;
526     const BYTE* bEnd = p + len;
527     U64 h64;
528 #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
529 
530 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
531     if (p==NULL)
532     {
533         len=0;
534         bEnd=p=(const BYTE*)(size_t)32;
535     }
536 #endif
537 
538     if (len>=32)
539     {
540         const BYTE* const limit = bEnd - 32;
541         U64 v1 = seed + PRIME64_1 + PRIME64_2;
542         U64 v2 = seed + PRIME64_2;
543         U64 v3 = seed + 0;
544         U64 v4 = seed - PRIME64_1;
545 
546         do
547         {
548             v1 += XXH_get64bits(p) * PRIME64_2;
549             p+=8;
550             v1 = XXH_rotl64(v1, 31);
551             v1 *= PRIME64_1;
552             v2 += XXH_get64bits(p) * PRIME64_2;
553             p+=8;
554             v2 = XXH_rotl64(v2, 31);
555             v2 *= PRIME64_1;
556             v3 += XXH_get64bits(p) * PRIME64_2;
557             p+=8;
558             v3 = XXH_rotl64(v3, 31);
559             v3 *= PRIME64_1;
560             v4 += XXH_get64bits(p) * PRIME64_2;
561             p+=8;
562             v4 = XXH_rotl64(v4, 31);
563             v4 *= PRIME64_1;
564         }
565         while (p<=limit);
566 
567         h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
568 
569         v1 *= PRIME64_2;
570         v1 = XXH_rotl64(v1, 31);
571         v1 *= PRIME64_1;
572         h64 ^= v1;
573         h64 = h64 * PRIME64_1 + PRIME64_4;
574 
575         v2 *= PRIME64_2;
576         v2 = XXH_rotl64(v2, 31);
577         v2 *= PRIME64_1;
578         h64 ^= v2;
579         h64 = h64 * PRIME64_1 + PRIME64_4;
580 
581         v3 *= PRIME64_2;
582         v3 = XXH_rotl64(v3, 31);
583         v3 *= PRIME64_1;
584         h64 ^= v3;
585         h64 = h64 * PRIME64_1 + PRIME64_4;
586 
587         v4 *= PRIME64_2;
588         v4 = XXH_rotl64(v4, 31);
589         v4 *= PRIME64_1;
590         h64 ^= v4;
591         h64 = h64 * PRIME64_1 + PRIME64_4;
592     }
593     else
594     {
595         h64  = seed + PRIME64_5;
596     }
597 
598     h64 += (U64) len;
599 
600     while (p+8<=bEnd)
601     {
602         U64 k1 = XXH_get64bits(p);
603         k1 *= PRIME64_2;
604         k1 = XXH_rotl64(k1,31);
605         k1 *= PRIME64_1;
606         h64 ^= k1;
607         h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
608         p+=8;
609     }
610 
611     if (p+4<=bEnd)
612     {
613         h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
614         h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
615         p+=4;
616     }
617 
618     while (p<bEnd)
619     {
620         h64 ^= (*p) * PRIME64_5;
621         h64 = XXH_rotl64(h64, 11) * PRIME64_1;
622         p++;
623     }
624 
625     h64 ^= h64 >> 33;
626     h64 *= PRIME64_2;
627     h64 ^= h64 >> 29;
628     h64 *= PRIME64_3;
629     h64 ^= h64 >> 32;
630 
631     return h64;
632 }
633 
634 
635 unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
636 {
637 #if 0
638     /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
639     XXH64_state_t state;
640     XXH64_reset(&state, seed);
641     XXH64_update(&state, input, len);
642     return XXH64_digest(&state);
643 #else
644     XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
645 
646 #  if !defined(XXH_USE_UNALIGNED_ACCESS)
647     if ((((size_t)input) & 7)==0)   /* Input is aligned, let's leverage the speed advantage */
648     {
649         if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
650             return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
651         else
652             return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
653     }
654 #  endif
655 
656     if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
657         return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
658     else
659         return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
660 #endif
661 }
662 
663