xref: /mOS-networking-stack/core/src/util.c (revision dcdbbb98)
1 /*
2     Borrowed XXH32 and XXH64 implementation from xxHash project
3 
4     Added Copyright notice just above the function definition
5 
6     TODO 1 : We might wanna implement our version of xxh for copyright reason
7     TODO 2 : We might wanna gather all copyright notice and create a single file.
8 */
9 
10 
11 
12 #include <stdint.h>
13 #include <netinet/in.h>
14 #include <arpa/inet.h>
15 #include <string.h>
16 #include <ctype.h>
17 #include <mtcp_util.h>
18 #include <limits.h>
19 #include <stdio.h>
20 #include <errno.h>
21 #include <stdlib.h>
22 
23 /*-------------------------------------------------------------*/
24 static void
25 BuildKeyCache(uint32_t *cache, int cache_len)
26 {
27 #ifndef NBBY
28 #define NBBY 8 /* number of bits per byte */
29 #endif
30 
31 	/* Both of DPDK and Netmap uses this key for hash calculation.
32 	 * Do not change any single bit of this key. */
33 	static const uint8_t key[] = {
34 		 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
35 		 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
36 		 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
37 		 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05,
38 		 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05
39 	};
40 
41 	uint32_t result = (((uint32_t)key[0]) << 24) |
42                           (((uint32_t)key[1]) << 16) |
43                           (((uint32_t)key[2]) << 8)  | ((uint32_t)key[3]);
44 	uint32_t idx = 32;
45 	int i;
46 
47 	for (i = 0; i < cache_len; i++, idx++) {
48 		uint8_t shift = (idx % NBBY);
49 		uint32_t bit;
50 
51 		cache[i] = result;
52 		bit = ((key[idx/NBBY] << shift) & 0x80) ? 1 : 0;
53 		result = ((result << 1) | bit);
54 	}
55 
56 }
57 /*-------------------------------------------------------------*/
58 uint32_t
59 GetRSSHash(in_addr_t sip, in_addr_t dip, in_port_t sp, in_port_t dp)
60 {
61 #define MSB32 0x80000000
62 #define MSB16 0x8000
63 #define KEY_CACHE_LEN 96
64 
65 	uint32_t res = 0;
66 	int i;
67 	static int first = 1;
68 	static uint32_t key_cache[KEY_CACHE_LEN] = {0};
69 
70 	if (first) {
71 		BuildKeyCache(key_cache, KEY_CACHE_LEN);
72 		first = 0;
73 	}
74 
75 	for (i = 0; i < 32; i++) {
76 		if (sip & MSB32)
77 			res ^= key_cache[i];
78 		sip <<= 1;
79 	}
80 	for (i = 0; i < 32; i++) {
81 		if (dip & MSB32)
82 			res ^= key_cache[32+i];
83 		dip <<= 1;
84 	}
85 	for (i = 0; i < 16; i++) {
86 		if (sp & MSB16)
87 			res ^= key_cache[64+i];
88 		sp <<= 1;
89 	}
90 	for (i = 0; i < 16; i++) {
91 		if (dp & MSB16)
92 			res ^= key_cache[80+i];
93 		dp <<= 1;
94 	}
95 	return res;
96 }
97 /*-------------------------------------------------------------------*/
98 /* RSS redirection table is in the little endian byte order (intel)  */
99 /*                                                                   */
100 /* idx: 0 1 2 3 | 4 5 6 7 | 8 9 10 11 | 12 13 14 15 | 16 17 18 19 ...*/
101 /* val: 3 2 1 0 | 7 6 5 4 | 11 10 9 8 | 15 14 13 12 | 19 18 17 16 ...*/
102 /* qid = val % num_queues */
103 /*-------------------------------------------------------------------*/
104 int
105 GetRSSCPUCore(in_addr_t sip, in_addr_t dip,
106 			  in_port_t sp, in_port_t dp, int num_queues)
107 {
108 	#define RSS_BIT_MASK 0x0000007F
109 
110 	uint32_t masked = GetRSSHash(sip, dip, sp, dp) & RSS_BIT_MASK;
111 
112 #ifdef ENABLE_NETMAP
113 	static const uint32_t off[4] = {3, 1, -1, -3};
114 	masked += off[masked & 0x3];
115 #endif
116 	return (masked % num_queues);
117 
118 }
119 /*-------------------------------------------------------------*/
120 int
121 mystrtol(const char *nptr, int base)
122 {
123 	int rval;
124 	char *endptr;
125 
126 	rval = strtol(nptr, &endptr, 10);
127 	/* check for strtol errors */
128 	if ((errno == ERANGE && (rval == LONG_MAX ||
129 				 rval == LONG_MIN))
130 	    || (errno != 0 && rval == 0)) {
131 		perror("strtol");
132 		exit(EXIT_FAILURE);
133 	}
134 	if (endptr == nptr) {
135 		fprintf(stderr, "Parsing strtol error!\n");
136 		exit(EXIT_FAILURE);
137 	}
138 
139 	return rval;
140 }
141 /*---------------------------------------------------------------*/
142 int
143 StrToArgs(char *str, int *argc, char **argv, int max_argc)
144 {
145 
146 	uint8_t single_quotes;
147 	uint8_t double_quotes;
148 	uint8_t delim;
149 
150 	single_quotes = 0;
151 	double_quotes = 0;
152 	delim = 1;
153 
154 	*argc = 0;
155 
156 	int i;
157 	int len = strlen(str);
158 	for (i = 0; i < len; i++) {
159 
160 		if (str[i] == '\'') {
161 			if (single_quotes)
162 				str[i] = '\0';
163 			else
164 				i++;
165 			single_quotes = !single_quotes;
166 			goto __non_space;
167 		} else if (str[i] == '\"') {
168 			if (double_quotes)
169 				str[i] = '\0';
170 			else
171 				i++;
172 			double_quotes = !double_quotes;
173 			goto __non_space;
174 		}
175 
176 		if (single_quotes || double_quotes)
177 			continue;
178 
179 		if (isspace(str[i])) {
180 			delim = 1;
181 			str[i] = '\0';
182 			continue;
183 		}
184 __non_space:
185 		if (delim == 1) {
186 			delim = 0;
187 			argv[(*argc)++] = &str[i];
188 			if (*argc > max_argc)
189 				break;
190 		}
191 	}
192 
193 	argv[*argc] = NULL;
194 
195 	return 0;
196 }
197 
198 /*
199 xxHash - Fast Hash algorithm
200 Copyright (C) 2012-2015, Yann Collet
201 
202 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
203 
204 Redistribution and use in source and binary forms, with or without
205 modification, are permitted provided that the following conditions are
206 met:
207 
208 * Redistributions of source code must retain the above copyright
209 notice, this list of conditions and the following disclaimer.
210 * Redistributions in binary form must reproduce the above
211 copyright notice, this list of conditions and the following disclaimer
212 in the documentation and/or other materials provided with the
213 distribution.
214 
215 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
216 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
217 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
218 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
219 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
220 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
221 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
222 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
223 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
224 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
225 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
226 
227 You can contact the author at :
228 - xxHash source repository : https://github.com/Cyan4973/xxHash
229 */
230 
231 
232 
233 
234 /**************************************
235 *  Tuning parameters
236 **************************************/
237 /* Unaligned memory access is automatically enabled for "common" CPU, such as x86.
238  * For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected.
239  * If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance.
240  * You can also enable this parameter if you know your input data will always be aligned (boundaries of 4, for U32).
241  */
242 #if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
243 #  define XXH_USE_UNALIGNED_ACCESS 1
244 #endif
245 
246 /* XXH_ACCEPT_NULL_INPUT_POINTER :
247  * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
248  * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
249  * By default, this option is disabled. To enable it, uncomment below define :
250  */
251 /* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
252 
253 /* XXH_FORCE_NATIVE_FORMAT :
254  * By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
255  * Results are therefore identical for little-endian and big-endian CPU.
256  * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
257  * Should endian-independance be of no importance for your application, you may set the #define below to 1.
258  * It will improve speed for Big-endian CPU.
259  * This option has no impact on Little_Endian CPU.
260  */
261 #define XXH_FORCE_NATIVE_FORMAT 0
262 
263 
264 /**************************************
265 *  Compiler Specific Options
266 ***************************************/
267 #ifdef _MSC_VER    /* Visual Studio */
268 #  pragma warning(disable : 4127)      /* disable: C4127: conditional expression is constant */
269 #  define FORCE_INLINE static __forceinline
270 #else
271 #  if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
272 #    ifdef __GNUC__
273 #      define FORCE_INLINE static inline __attribute__((always_inline))
274 #    else
275 #      define FORCE_INLINE static inline
276 #    endif
277 #  else
278 #    define FORCE_INLINE static
279 #  endif /* __STDC_VERSION__ */
280 #endif
281 
282 
283 /**************************************
284 *  Basic Types
285 ***************************************/
286 #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
287 # include <stdint.h>
288   typedef uint8_t  BYTE;
289   typedef uint16_t U16;
290   typedef uint32_t U32;
291   typedef  int32_t S32;
292   typedef uint64_t U64;
293 #else
294   typedef unsigned char      BYTE;
295   typedef unsigned short     U16;
296   typedef unsigned int       U32;
297   typedef   signed int       S32;
298   typedef unsigned long long U64;
299 #endif
300 
301 static U32 XXH_read32(const void* memPtr)
302 {
303     U32 val32;
304     memcpy(&val32, memPtr, 4);
305     return val32;
306 }
307 
308 static U64 XXH_read64(const void* memPtr)
309 {
310     U64 val64;
311     memcpy(&val64, memPtr, 8);
312     return val64;
313 }
314 
315 
316 
317 /******************************************
318 *  Compiler-specific Functions and Macros
319 ******************************************/
320 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
321 
322 /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
323 #if defined(_MSC_VER)
324 #  define XXH_rotl32(x,r) _rotl(x,r)
325 #  define XXH_rotl64(x,r) _rotl64(x,r)
326 #else
327 #  define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
328 #  define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
329 #endif
330 
331 #if defined(_MSC_VER)     /* Visual Studio */
332 #  define XXH_swap32 _byteswap_ulong
333 #  define XXH_swap64 _byteswap_uint64
334 #elif GCC_VERSION >= 403
335 #  define XXH_swap32 __builtin_bswap32
336 #  define XXH_swap64 __builtin_bswap64
337 #else
338 static U32 XXH_swap32 (U32 x)
339 {
340     return  ((x << 24) & 0xff000000 ) |
341             ((x <<  8) & 0x00ff0000 ) |
342             ((x >>  8) & 0x0000ff00 ) |
343             ((x >> 24) & 0x000000ff );
344 }
345 static U64 XXH_swap64 (U64 x)
346 {
347     return  ((x << 56) & 0xff00000000000000ULL) |
348             ((x << 40) & 0x00ff000000000000ULL) |
349             ((x << 24) & 0x0000ff0000000000ULL) |
350             ((x << 8)  & 0x000000ff00000000ULL) |
351             ((x >> 8)  & 0x00000000ff000000ULL) |
352             ((x >> 24) & 0x0000000000ff0000ULL) |
353             ((x >> 40) & 0x000000000000ff00ULL) |
354             ((x >> 56) & 0x00000000000000ffULL);
355 }
356 #endif
357 
358 
359 /***************************************
360 *  Architecture Macros
361 ***************************************/
362 typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
363 #ifndef XXH_CPU_LITTLE_ENDIAN   /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example using a compiler switch */
364 static const int one = 1;
365 #   define XXH_CPU_LITTLE_ENDIAN   (*(const char*)(&one))
366 #endif
367 
368 
369 /*****************************
370 *  Memory reads
371 *****************************/
372 typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
373 
374 FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
375 {
376     if (align==XXH_unaligned)
377         return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
378     else
379         return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr);
380 }
381 
382 FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
383 {
384     if (align==XXH_unaligned)
385         return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
386     else
387         return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr);
388 }
389 
390 /***************************************
391 *  Macros
392 ***************************************/
393 #define XXH_STATIC_ASSERT(c)   { enum { XXH_static_assert = 1/(!!(c)) }; }    /* use only *after* variable declarations */
394 
395 
396 /***************************************
397 *  Constants
398 ***************************************/
399 #define PRIME32_1   2654435761U
400 #define PRIME32_2   2246822519U
401 #define PRIME32_3   3266489917U
402 #define PRIME32_4    668265263U
403 #define PRIME32_5    374761393U
404 
405 #define PRIME64_1 11400714785074694791ULL
406 #define PRIME64_2 14029467366897019727ULL
407 #define PRIME64_3  1609587929392839161ULL
408 #define PRIME64_4  9650029242287828579ULL
409 #define PRIME64_5  2870177450012600261ULL
410 
411 
412 /*****************************
413 *  Simple Hash Functions
414 *****************************/
415 FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
416 {
417     const BYTE* p = (const BYTE*)input;
418     const BYTE* bEnd = p + len;
419     U32 h32;
420 #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
421 
422 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
423     if (p==NULL)
424     {
425         len=0;
426         bEnd=p=(const BYTE*)(size_t)16;
427     }
428 #endif
429 
430     if (len>=16)
431     {
432         const BYTE* const limit = bEnd - 16;
433         U32 v1 = seed + PRIME32_1 + PRIME32_2;
434         U32 v2 = seed + PRIME32_2;
435         U32 v3 = seed + 0;
436         U32 v4 = seed - PRIME32_1;
437 
438         do
439         {
440             v1 += XXH_get32bits(p) * PRIME32_2;
441             v1 = XXH_rotl32(v1, 13);
442             v1 *= PRIME32_1;
443             p+=4;
444             v2 += XXH_get32bits(p) * PRIME32_2;
445             v2 = XXH_rotl32(v2, 13);
446             v2 *= PRIME32_1;
447             p+=4;
448             v3 += XXH_get32bits(p) * PRIME32_2;
449             v3 = XXH_rotl32(v3, 13);
450             v3 *= PRIME32_1;
451             p+=4;
452             v4 += XXH_get32bits(p) * PRIME32_2;
453             v4 = XXH_rotl32(v4, 13);
454             v4 *= PRIME32_1;
455             p+=4;
456         }
457         while (p<=limit);
458 
459         h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
460     }
461     else
462     {
463         h32  = seed + PRIME32_5;
464     }
465 
466     h32 += (U32) len;
467 
468     while (p+4<=bEnd)
469     {
470         h32 += XXH_get32bits(p) * PRIME32_3;
471         h32  = XXH_rotl32(h32, 17) * PRIME32_4 ;
472         p+=4;
473     }
474 
475     while (p<bEnd)
476     {
477         h32 += (*p) * PRIME32_5;
478         h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
479         p++;
480     }
481 
482     h32 ^= h32 >> 15;
483     h32 *= PRIME32_2;
484     h32 ^= h32 >> 13;
485     h32 *= PRIME32_3;
486     h32 ^= h32 >> 16;
487 
488     return h32;
489 }
490 
491 
492 unsigned XXH32 (const void* input, size_t len, unsigned seed)
493 {
494 #if 0
495     /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
496     XXH32_state_t state;
497     XXH32_reset(&state, seed);
498     XXH32_update(&state, input, len);
499     return XXH32_digest(&state);
500 #else
501     XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
502 
503 #  if !defined(XXH_USE_UNALIGNED_ACCESS)
504     if ((((size_t)input) & 3) == 0)   /* Input is 4-bytes aligned, leverage the speed benefit */
505     {
506         if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
507             return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
508         else
509             return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
510     }
511 #  endif
512 
513     if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
514         return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
515     else
516         return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
517 #endif
518 }
519 
520 FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
521 {
522     const BYTE* p = (const BYTE*)input;
523     const BYTE* bEnd = p + len;
524     U64 h64;
525 #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
526 
527 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
528     if (p==NULL)
529     {
530         len=0;
531         bEnd=p=(const BYTE*)(size_t)32;
532     }
533 #endif
534 
535     if (len>=32)
536     {
537         const BYTE* const limit = bEnd - 32;
538         U64 v1 = seed + PRIME64_1 + PRIME64_2;
539         U64 v2 = seed + PRIME64_2;
540         U64 v3 = seed + 0;
541         U64 v4 = seed - PRIME64_1;
542 
543         do
544         {
545             v1 += XXH_get64bits(p) * PRIME64_2;
546             p+=8;
547             v1 = XXH_rotl64(v1, 31);
548             v1 *= PRIME64_1;
549             v2 += XXH_get64bits(p) * PRIME64_2;
550             p+=8;
551             v2 = XXH_rotl64(v2, 31);
552             v2 *= PRIME64_1;
553             v3 += XXH_get64bits(p) * PRIME64_2;
554             p+=8;
555             v3 = XXH_rotl64(v3, 31);
556             v3 *= PRIME64_1;
557             v4 += XXH_get64bits(p) * PRIME64_2;
558             p+=8;
559             v4 = XXH_rotl64(v4, 31);
560             v4 *= PRIME64_1;
561         }
562         while (p<=limit);
563 
564         h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
565 
566         v1 *= PRIME64_2;
567         v1 = XXH_rotl64(v1, 31);
568         v1 *= PRIME64_1;
569         h64 ^= v1;
570         h64 = h64 * PRIME64_1 + PRIME64_4;
571 
572         v2 *= PRIME64_2;
573         v2 = XXH_rotl64(v2, 31);
574         v2 *= PRIME64_1;
575         h64 ^= v2;
576         h64 = h64 * PRIME64_1 + PRIME64_4;
577 
578         v3 *= PRIME64_2;
579         v3 = XXH_rotl64(v3, 31);
580         v3 *= PRIME64_1;
581         h64 ^= v3;
582         h64 = h64 * PRIME64_1 + PRIME64_4;
583 
584         v4 *= PRIME64_2;
585         v4 = XXH_rotl64(v4, 31);
586         v4 *= PRIME64_1;
587         h64 ^= v4;
588         h64 = h64 * PRIME64_1 + PRIME64_4;
589     }
590     else
591     {
592         h64  = seed + PRIME64_5;
593     }
594 
595     h64 += (U64) len;
596 
597     while (p+8<=bEnd)
598     {
599         U64 k1 = XXH_get64bits(p);
600         k1 *= PRIME64_2;
601         k1 = XXH_rotl64(k1,31);
602         k1 *= PRIME64_1;
603         h64 ^= k1;
604         h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
605         p+=8;
606     }
607 
608     if (p+4<=bEnd)
609     {
610         h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
611         h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
612         p+=4;
613     }
614 
615     while (p<bEnd)
616     {
617         h64 ^= (*p) * PRIME64_5;
618         h64 = XXH_rotl64(h64, 11) * PRIME64_1;
619         p++;
620     }
621 
622     h64 ^= h64 >> 33;
623     h64 *= PRIME64_2;
624     h64 ^= h64 >> 29;
625     h64 *= PRIME64_3;
626     h64 ^= h64 >> 32;
627 
628     return h64;
629 }
630 
631 
632 unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
633 {
634 #if 0
635     /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
636     XXH64_state_t state;
637     XXH64_reset(&state, seed);
638     XXH64_update(&state, input, len);
639     return XXH64_digest(&state);
640 #else
641     XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
642 
643 #  if !defined(XXH_USE_UNALIGNED_ACCESS)
644     if ((((size_t)input) & 7)==0)   /* Input is aligned, let's leverage the speed advantage */
645     {
646         if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
647             return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
648         else
649             return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
650     }
651 #  endif
652 
653     if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
654         return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
655     else
656         return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
657 #endif
658 }
659 
660