xref: /mOS-networking-stack/core/src/util.c (revision e5df9dc1)
1 /*
2     Borrowed XXH32 and XXH64 implementation from xxHash project
3 
4     Added Copyright notice just above the function definition
5 
6     TODO 1 : We might wanna implement our version of xxh for copyright reason
7     TODO 2 : We might wanna gather all copyright notice and create a single file.
8 */
9 
10 
11 
12 #include <stdint.h>
13 #include <netinet/in.h>
14 #include <arpa/inet.h>
15 #include <string.h>
16 #include <ctype.h>
17 #include <mtcp_util.h>
18 #include <limits.h>
19 #include <stdio.h>
20 #include <errno.h>
21 #include <stdlib.h>
22 
23 /*-------------------------------------------------------------*/
24 extern int
25 FetchEndianType();
26 /*-------------------------------------------------------------*/
27 static void
28 BuildKeyCache(uint32_t *cache, int cache_len)
29 {
30 #ifndef NBBY
31 #define NBBY 8 /* number of bits per byte */
32 #endif
33 
34 	/* Both of DPDK and Netmap uses this key for hash calculation.
35 	 * Do not change any single bit of this key. */
36 	static const uint8_t key[] = {
37 		 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
38 		 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
39 		 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
40 		 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
41 		 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05
42 	};
43 
44 	uint32_t result = (((uint32_t)key[0]) << 24) |
45                           (((uint32_t)key[1]) << 16) |
46                           (((uint32_t)key[2]) << 8)  | ((uint32_t)key[3]);
47 	uint32_t idx = 32;
48 	int i;
49 
50 	for (i = 0; i < cache_len; i++, idx++) {
51 		uint8_t shift = (idx % NBBY);
52 		uint32_t bit;
53 
54 		cache[i] = result;
55 		bit = ((key[idx/NBBY] << shift) & 0x80) ? 1 : 0;
56 		result = ((result << 1) | bit);
57 	}
58 
59 }
60 /*-------------------------------------------------------------*/
61 uint32_t
62 GetRSSHash(in_addr_t sip, in_addr_t dip, in_port_t sp, in_port_t dp)
63 {
64 #define MSB32 0x80000000
65 #define MSB16 0x8000
66 #define KEY_CACHE_LEN 96
67 
68 	uint32_t res = 0;
69 	int i;
70 	static int first = 1;
71 	static uint32_t key_cache[KEY_CACHE_LEN] = {0};
72 
73 	if (first) {
74 		BuildKeyCache(key_cache, KEY_CACHE_LEN);
75 		first = 0;
76 	}
77 
78 	for (i = 0; i < 32; i++) {
79 		if (sip & MSB32)
80 			res ^= key_cache[i];
81 		sip <<= 1;
82 	}
83 	for (i = 0; i < 32; i++) {
84 		if (dip & MSB32)
85 			res ^= key_cache[32+i];
86 		dip <<= 1;
87 	}
88 	for (i = 0; i < 16; i++) {
89 		if (sp & MSB16)
90 			res ^= key_cache[64+i];
91 		sp <<= 1;
92 	}
93 	for (i = 0; i < 16; i++) {
94 		if (dp & MSB16)
95 			res ^= key_cache[80+i];
96 		dp <<= 1;
97 	}
98 	return res;
99 }
100 /*-------------------------------------------------------------------*/
101 /* RSS redirection table is in the little endian byte order (intel)  */
102 /*                                                                   */
103 /* idx: 0 1 2 3 | 4 5 6 7 | 8 9 10 11 | 12 13 14 15 | 16 17 18 19 ...*/
104 /* val: 3 2 1 0 | 7 6 5 4 | 11 10 9 8 | 15 14 13 12 | 19 18 17 16 ...*/
105 /* qid = val % num_queues */
106 /*-------------------------------------------------------------------*/
107 int
108 GetRSSCPUCore(in_addr_t sip, in_addr_t dip,
109 	      in_port_t sp, in_port_t dp, int num_queues/*,
110 							  int endian_type*/)
111 {
112 	#define RSS_BIT_MASK 0x0000007F
113 
114 	uint32_t masked = GetRSSHash(sip, dip, sp, dp) & RSS_BIT_MASK;
115 	int endian_type = FetchEndianType();
116 
117 	if (endian_type) {
118 		static const uint32_t off[4] = {3, 1, -1, -3};
119 		masked += off[masked & 0x3];
120 	}
121 
122 	return (masked % num_queues);
123 
124 }
125 /*-------------------------------------------------------------*/
126 int
127 mystrtol(const char *nptr, int base)
128 {
129 	int rval;
130 	char *endptr;
131 
132 	errno = 0;
133 	rval = strtol(nptr, &endptr, 10);
134 	/* check for strtol errors */
135 	if ((errno == ERANGE && (rval == LONG_MAX ||
136 				 rval == LONG_MIN))
137 	    || (errno != 0 && rval == 0)) {
138 		perror("strtol");
139 		exit(EXIT_FAILURE);
140 	}
141 	if (endptr == nptr) {
142 		fprintf(stderr, "Parsing strtol error!\n");
143 		exit(EXIT_FAILURE);
144 	}
145 
146 	return rval;
147 }
148 /*---------------------------------------------------------------*/
149 int
150 StrToArgs(char *str, int *argc, char **argv, int max_argc)
151 {
152 
153 	uint8_t single_quotes;
154 	uint8_t double_quotes;
155 	uint8_t delim;
156 
157 	single_quotes = 0;
158 	double_quotes = 0;
159 	delim = 1;
160 
161 	*argc = 0;
162 
163 	int i;
164 	int len = strlen(str);
165 	for (i = 0; i < len; i++) {
166 
167 		if (str[i] == '\'') {
168 			if (single_quotes)
169 				str[i] = '\0';
170 			else
171 				i++;
172 			single_quotes = !single_quotes;
173 			goto __non_space;
174 		} else if (str[i] == '\"') {
175 			if (double_quotes)
176 				str[i] = '\0';
177 			else
178 				i++;
179 			double_quotes = !double_quotes;
180 			goto __non_space;
181 		}
182 
183 		if (single_quotes || double_quotes)
184 			continue;
185 
186 		if (isspace(str[i])) {
187 			delim = 1;
188 			str[i] = '\0';
189 			continue;
190 		}
191 __non_space:
192 		if (delim == 1) {
193 			delim = 0;
194 			argv[(*argc)++] = &str[i];
195 			if (*argc > max_argc)
196 				break;
197 		}
198 	}
199 
200 	argv[*argc] = NULL;
201 
202 	return 0;
203 }
204 
205 /*
206 xxHash - Fast Hash algorithm
207 Copyright (C) 2012-2015, Yann Collet
208 
209 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
210 
211 Redistribution and use in source and binary forms, with or without
212 modification, are permitted provided that the following conditions are
213 met:
214 
215 * Redistributions of source code must retain the above copyright
216 notice, this list of conditions and the following disclaimer.
217 * Redistributions in binary form must reproduce the above
218 copyright notice, this list of conditions and the following disclaimer
219 in the documentation and/or other materials provided with the
220 distribution.
221 
222 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
223 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
224 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
225 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
226 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
227 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
228 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
229 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
230 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
231 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
232 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
233 
234 You can contact the author at :
235 - xxHash source repository : https://github.com/Cyan4973/xxHash
236 */
237 
238 
239 
240 
241 /**************************************
242 *  Tuning parameters
243 **************************************/
244 /* Unaligned memory access is automatically enabled for "common" CPU, such as x86.
245  * For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected.
246  * If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance.
247  * You can also enable this parameter if you know your input data will always be aligned (boundaries of 4, for U32).
248  */
249 #if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
250 #  define XXH_USE_UNALIGNED_ACCESS 1
251 #endif
252 
253 /* XXH_ACCEPT_NULL_INPUT_POINTER :
254  * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
255  * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
256  * By default, this option is disabled. To enable it, uncomment below define :
257  */
258 /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
259 
260 /* XXH_FORCE_NATIVE_FORMAT :
261  * By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
262  * Results are therefore identical for little-endian and big-endian CPU.
263  * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
264  * Should endian-independance be of no importance for your application, you may set the #define below to 1.
265  * It will improve speed for Big-endian CPU.
266  * This option has no impact on Little_Endian CPU.
267  */
268 #define XXH_FORCE_NATIVE_FORMAT 0
269 
270 
271 /**************************************
272 *  Compiler Specific Options
273 ***************************************/
274 #ifdef _MSC_VER    /* Visual Studio */
275 #  pragma warning(disable : 4127)      /* disable: C4127: conditional expression is constant */
276 #  define FORCE_INLINE static __forceinline
277 #else
278 #  if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
279 #    ifdef __GNUC__
280 #      define FORCE_INLINE static inline __attribute__((always_inline))
281 #    else
282 #      define FORCE_INLINE static inline
283 #    endif
284 #  else
285 #    define FORCE_INLINE static
286 #  endif /* __STDC_VERSION__ */
287 #endif
288 
289 
290 /**************************************
291 *  Basic Types
292 ***************************************/
293 #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
294 # include <stdint.h>
295   typedef uint8_t  BYTE;
296   typedef uint16_t U16;
297   typedef uint32_t U32;
298   typedef  int32_t S32;
299   typedef uint64_t U64;
300 #else
301   typedef unsigned char      BYTE;
302   typedef unsigned short     U16;
303   typedef unsigned int       U32;
304   typedef   signed int       S32;
305   typedef unsigned long long U64;
306 #endif
307 
308 static U32 XXH_read32(const void* memPtr)
309 {
310     U32 val32;
311     memcpy(&val32, memPtr, 4);
312     return val32;
313 }
314 
315 static U64 XXH_read64(const void* memPtr)
316 {
317     U64 val64;
318     memcpy(&val64, memPtr, 8);
319     return val64;
320 }
321 
322 
323 
324 /******************************************
325 *  Compiler-specific Functions and Macros
326 ******************************************/
327 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
328 
329 /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
330 #if defined(_MSC_VER)
331 #  define XXH_rotl32(x,r) _rotl(x,r)
332 #  define XXH_rotl64(x,r) _rotl64(x,r)
333 #else
334 #  define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
335 #  define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
336 #endif
337 
338 #if defined(_MSC_VER)     /* Visual Studio */
339 #  define XXH_swap32 _byteswap_ulong
340 #  define XXH_swap64 _byteswap_uint64
341 #elif GCC_VERSION >= 403
342 #  define XXH_swap32 __builtin_bswap32
343 #  define XXH_swap64 __builtin_bswap64
344 #else
345 static U32 XXH_swap32 (U32 x)
346 {
347     return  ((x << 24) & 0xff000000 ) |
348             ((x <<  8) & 0x00ff0000 ) |
349             ((x >>  8) & 0x0000ff00 ) |
350             ((x >> 24) & 0x000000ff );
351 }
352 static U64 XXH_swap64 (U64 x)
353 {
354     return  ((x << 56) & 0xff00000000000000ULL) |
355             ((x << 40) & 0x00ff000000000000ULL) |
356             ((x << 24) & 0x0000ff0000000000ULL) |
357             ((x << 8)  & 0x000000ff00000000ULL) |
358             ((x >> 8)  & 0x00000000ff000000ULL) |
359             ((x >> 24) & 0x0000000000ff0000ULL) |
360             ((x >> 40) & 0x000000000000ff00ULL) |
361             ((x >> 56) & 0x00000000000000ffULL);
362 }
363 #endif
364 
365 
366 /***************************************
367 *  Architecture Macros
368 ***************************************/
369 typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
370 #ifndef XXH_CPU_LITTLE_ENDIAN   /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example using a compiler switch */
371 static const int one = 1;
372 #   define XXH_CPU_LITTLE_ENDIAN   (*(const char*)(&one))
373 #endif
374 
375 
376 /*****************************
377 *  Memory reads
378 *****************************/
379 typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
380 
381 FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
382 {
383     if (align==XXH_unaligned)
384         return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
385     else
386         return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
387 }
388 
389 FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
390 {
391     if (align==XXH_unaligned)
392         return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
393     else
394         return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
395 }
396 
397 /***************************************
398 *  Macros
399 ***************************************/
400 #define XXH_STATIC_ASSERT(c)   { enum { XXH_static_assert = 1/(!!(c)) }; }    /* use only *after* variable declarations */
401 
402 
403 /***************************************
404 *  Constants
405 ***************************************/
406 #define PRIME32_1   2654435761U
407 #define PRIME32_2   2246822519U
408 #define PRIME32_3   3266489917U
409 #define PRIME32_4    668265263U
410 #define PRIME32_5    374761393U
411 
412 #define PRIME64_1 11400714785074694791ULL
413 #define PRIME64_2 14029467366897019727ULL
414 #define PRIME64_3  1609587929392839161ULL
415 #define PRIME64_4  9650029242287828579ULL
416 #define PRIME64_5  2870177450012600261ULL
417 
418 
419 /*****************************
420 *  Simple Hash Functions
421 *****************************/
422 FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
423 {
424     const BYTE* p = (const BYTE*)input;
425     const BYTE* bEnd = p + len;
426     U32 h32;
427 #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
428 
429 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
430     if (p==NULL)
431     {
432         len=0;
433         bEnd=p=(const BYTE*)(size_t)16;
434     }
435 #endif
436 
437     if (len>=16)
438     {
439         const BYTE* const limit = bEnd - 16;
440         U32 v1 = seed + PRIME32_1 + PRIME32_2;
441         U32 v2 = seed + PRIME32_2;
442         U32 v3 = seed + 0;
443         U32 v4 = seed - PRIME32_1;
444 
445         do
446         {
447             v1 += XXH_get32bits(p) * PRIME32_2;
448             v1 = XXH_rotl32(v1, 13);
449             v1 *= PRIME32_1;
450             p+=4;
451             v2 += XXH_get32bits(p) * PRIME32_2;
452             v2 = XXH_rotl32(v2, 13);
453             v2 *= PRIME32_1;
454             p+=4;
455             v3 += XXH_get32bits(p) * PRIME32_2;
456             v3 = XXH_rotl32(v3, 13);
457             v3 *= PRIME32_1;
458             p+=4;
459             v4 += XXH_get32bits(p) * PRIME32_2;
460             v4 = XXH_rotl32(v4, 13);
461             v4 *= PRIME32_1;
462             p+=4;
463         }
464         while (p<=limit);
465 
466         h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
467     }
468     else
469     {
470         h32  = seed + PRIME32_5;
471     }
472 
473     h32 += (U32) len;
474 
475     while (p+4<=bEnd)
476     {
477         h32 += XXH_get32bits(p) * PRIME32_3;
478         h32  = XXH_rotl32(h32, 17) * PRIME32_4 ;
479         p+=4;
480     }
481 
482     while (p<bEnd)
483     {
484         h32 += (*p) * PRIME32_5;
485         h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
486         p++;
487     }
488 
489     h32 ^= h32 >> 15;
490     h32 *= PRIME32_2;
491     h32 ^= h32 >> 13;
492     h32 *= PRIME32_3;
493     h32 ^= h32 >> 16;
494 
495     return h32;
496 }
497 
498 
499 unsigned XXH32 (const void* input, size_t len, unsigned seed)
500 {
501 #if 0
502     /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
503     XXH32_state_t state;
504     XXH32_reset(&state, seed);
505     XXH32_update(&state, input, len);
506     return XXH32_digest(&state);
507 #else
508     XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
509 
510 #  if !defined(XXH_USE_UNALIGNED_ACCESS)
511     if ((((size_t)input) & 3) == 0)   /* Input is 4-bytes aligned, leverage the speed benefit */
512     {
513         if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
514             return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
515         else
516             return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
517     }
518 #  endif
519 
520     if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
521         return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
522     else
523         return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
524 #endif
525 }
526 
527 FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
528 {
529     const BYTE* p = (const BYTE*)input;
530     const BYTE* bEnd = p + len;
531     U64 h64;
532 #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
533 
534 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
535     if (p==NULL)
536     {
537         len=0;
538         bEnd=p=(const BYTE*)(size_t)32;
539     }
540 #endif
541 
542     if (len>=32)
543     {
544         const BYTE* const limit = bEnd - 32;
545         U64 v1 = seed + PRIME64_1 + PRIME64_2;
546         U64 v2 = seed + PRIME64_2;
547         U64 v3 = seed + 0;
548         U64 v4 = seed - PRIME64_1;
549 
550         do
551         {
552             v1 += XXH_get64bits(p) * PRIME64_2;
553             p+=8;
554             v1 = XXH_rotl64(v1, 31);
555             v1 *= PRIME64_1;
556             v2 += XXH_get64bits(p) * PRIME64_2;
557             p+=8;
558             v2 = XXH_rotl64(v2, 31);
559             v2 *= PRIME64_1;
560             v3 += XXH_get64bits(p) * PRIME64_2;
561             p+=8;
562             v3 = XXH_rotl64(v3, 31);
563             v3 *= PRIME64_1;
564             v4 += XXH_get64bits(p) * PRIME64_2;
565             p+=8;
566             v4 = XXH_rotl64(v4, 31);
567             v4 *= PRIME64_1;
568         }
569         while (p<=limit);
570 
571         h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
572 
573         v1 *= PRIME64_2;
574         v1 = XXH_rotl64(v1, 31);
575         v1 *= PRIME64_1;
576         h64 ^= v1;
577         h64 = h64 * PRIME64_1 + PRIME64_4;
578 
579         v2 *= PRIME64_2;
580         v2 = XXH_rotl64(v2, 31);
581         v2 *= PRIME64_1;
582         h64 ^= v2;
583         h64 = h64 * PRIME64_1 + PRIME64_4;
584 
585         v3 *= PRIME64_2;
586         v3 = XXH_rotl64(v3, 31);
587         v3 *= PRIME64_1;
588         h64 ^= v3;
589         h64 = h64 * PRIME64_1 + PRIME64_4;
590 
591         v4 *= PRIME64_2;
592         v4 = XXH_rotl64(v4, 31);
593         v4 *= PRIME64_1;
594         h64 ^= v4;
595         h64 = h64 * PRIME64_1 + PRIME64_4;
596     }
597     else
598     {
599         h64  = seed + PRIME64_5;
600     }
601 
602     h64 += (U64) len;
603 
604     while (p+8<=bEnd)
605     {
606         U64 k1 = XXH_get64bits(p);
607         k1 *= PRIME64_2;
608         k1 = XXH_rotl64(k1,31);
609         k1 *= PRIME64_1;
610         h64 ^= k1;
611         h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
612         p+=8;
613     }
614 
615     if (p+4<=bEnd)
616     {
617         h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
618         h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
619         p+=4;
620     }
621 
622     while (p<bEnd)
623     {
624         h64 ^= (*p) * PRIME64_5;
625         h64 = XXH_rotl64(h64, 11) * PRIME64_1;
626         p++;
627     }
628 
629     h64 ^= h64 >> 33;
630     h64 *= PRIME64_2;
631     h64 ^= h64 >> 29;
632     h64 *= PRIME64_3;
633     h64 ^= h64 >> 32;
634 
635     return h64;
636 }
637 
638 
639 unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
640 {
641 #if 0
642     /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
643     XXH64_state_t state;
644     XXH64_reset(&state, seed);
645     XXH64_update(&state, input, len);
646     return XXH64_digest(&state);
647 #else
648     XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
649 
650 #  if !defined(XXH_USE_UNALIGNED_ACCESS)
651     if ((((size_t)input) & 7)==0)   /* Input is aligned, let's leverage the speed advantage */
652     {
653         if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
654             return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
655         else
656             return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
657     }
658 #  endif
659 
660     if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
661         return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
662     else
663         return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
664 #endif
665 }
666 
667