1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (C) 2016 Gvozden Neskovic <[email protected]>.
23  */
24 
25 /*
26  * USER API:
27  *
28  * Kernel fpu methods:
29  *	kfpu_allowed()
30  *	kfpu_begin()
31  *	kfpu_end()
32  *	kfpu_init()
33  *	kfpu_fini()
34  *
35  * SIMD support:
36  *
37  * Following functions should be called to determine whether CPU feature
38  * is supported. All functions are usable in kernel and user space.
39  * If a SIMD algorithm is using more than one instruction set
40  * all relevant feature test functions should be called.
41  *
42  * Supported features:
43  *	zfs_sse_available()
44  *	zfs_sse2_available()
45  *	zfs_sse3_available()
46  *	zfs_ssse3_available()
47  *	zfs_sse4_1_available()
48  *	zfs_sse4_2_available()
49  *
50  *	zfs_avx_available()
51  *	zfs_avx2_available()
52  *
53  *	zfs_bmi1_available()
54  *	zfs_bmi2_available()
55  *
56  *	zfs_avx512f_available()
57  *	zfs_avx512cd_available()
58  *	zfs_avx512er_available()
59  *	zfs_avx512pf_available()
60  *	zfs_avx512bw_available()
61  *	zfs_avx512dq_available()
62  *	zfs_avx512vl_available()
63  *	zfs_avx512ifma_available()
64  *	zfs_avx512vbmi_available()
65  *
66  * NOTE(AVX-512VL):	If using AVX-512 instructions with 128Bit registers
67  *			also add zfs_avx512vl_available() to feature check.
68  */
69 
70 #ifndef _LINUX_SIMD_X86_H
71 #define	_LINUX_SIMD_X86_H
72 
73 /* only for __x86 */
74 #if defined(__x86)
75 
76 #include <sys/types.h>
77 #include <asm/cpufeature.h>
78 
79 /*
80  * Disable the WARN_ON_FPU() macro to prevent additional dependencies
81  * when providing the kfpu_* functions.  Relevant warnings are included
82  * as appropriate and are unconditionally enabled.
83  */
84 #if defined(CONFIG_X86_DEBUG_FPU) && !defined(KERNEL_EXPORTS_X86_FPU)
85 #undef CONFIG_X86_DEBUG_FPU
86 #endif
87 
88 #if defined(HAVE_KERNEL_FPU_API_HEADER)
89 #include <asm/fpu/api.h>
90 #include <asm/fpu/internal.h>
91 #if defined(HAVE_KERNEL_FPU_XCR_HEADER)
92 #include <asm/fpu/xcr.h>
93 #endif
94 #else
95 #include <asm/i387.h>
96 #include <asm/xcr.h>
97 #endif
98 
99 /*
100  * The following cases are for kernels which export either the
101  * kernel_fpu_* or __kernel_fpu_* functions.
102  */
103 #if defined(KERNEL_EXPORTS_X86_FPU)
104 
105 #define	kfpu_allowed()		1
106 #define	kfpu_init()		0
107 #define	kfpu_fini()		((void) 0)
108 
109 #if defined(HAVE_UNDERSCORE_KERNEL_FPU)
110 #define	kfpu_begin()		\
111 {				\
112 	preempt_disable();	\
113 	__kernel_fpu_begin();	\
114 }
115 #define	kfpu_end()		\
116 {				\
117 	__kernel_fpu_end();	\
118 	preempt_enable();	\
119 }
120 
121 #elif defined(HAVE_KERNEL_FPU)
122 #define	kfpu_begin()		kernel_fpu_begin()
123 #define	kfpu_end()		kernel_fpu_end()
124 
125 #else
126 /*
127  * This case is unreachable.  When KERNEL_EXPORTS_X86_FPU is defined then
128  * either HAVE_UNDERSCORE_KERNEL_FPU or HAVE_KERNEL_FPU must be defined.
129  */
130 #error "Unreachable kernel configuration"
131 #endif
132 
133 #else /* defined(KERNEL_EXPORTS_X86_FPU) */
134 
135 /*
136  * When the kernel_fpu_* symbols are unavailable then provide our own
137  * versions which allow the FPU to be safely used.
138  */
139 #if defined(HAVE_KERNEL_FPU_INTERNAL) || defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL)
140 
141 #if defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL)
142 /*
143  * Some sanity checks.
144  * HAVE_KERNEL_FPU_INTERNAL and HAVE_KERNEL_FPU_XSAVE_INTERNAL are exclusive.
145  */
146 #if defined(HAVE_KERNEL_FPU_INTERNAL)
147 #error "HAVE_KERNEL_FPU_INTERNAL and HAVE_KERNEL_FPU_XSAVE_INTERNAL defined"
148 #endif
149 /*
150  * For kernels >= 5.16 we have to use inline assembly with the XSAVE{,OPT,S}
151  * instructions, so we need the toolchain to support at least XSAVE.
152  */
153 #if !defined(HAVE_XSAVE)
154 #error "Toolchain needs to support the XSAVE assembler instruction"
155 #endif
156 #endif
157 
158 #include <linux/mm.h>
159 #include <linux/slab.h>
160 
161 extern union fpregs_state **zfs_kfpu_fpregs;
162 
163 /*
164  * Initialize per-cpu variables to store FPU state.
165  */
166 static inline void
kfpu_fini(void)167 kfpu_fini(void)
168 {
169 	int cpu;
170 
171 	for_each_possible_cpu(cpu) {
172 		if (zfs_kfpu_fpregs[cpu] != NULL) {
173 			free_pages((unsigned long)zfs_kfpu_fpregs[cpu],
174 			    get_order(sizeof (union fpregs_state)));
175 		}
176 	}
177 
178 	kfree(zfs_kfpu_fpregs);
179 }
180 
181 static inline int
kfpu_init(void)182 kfpu_init(void)
183 {
184 	zfs_kfpu_fpregs = kzalloc(num_possible_cpus() *
185 	    sizeof (union fpregs_state *), GFP_KERNEL);
186 	if (zfs_kfpu_fpregs == NULL)
187 		return (-ENOMEM);
188 
189 	/*
190 	 * The fxsave and xsave operations require 16-/64-byte alignment of
191 	 * the target memory. Since kmalloc() provides no alignment
192 	 * guarantee instead use alloc_pages_node().
193 	 */
194 	unsigned int order = get_order(sizeof (union fpregs_state));
195 	int cpu;
196 
197 	for_each_possible_cpu(cpu) {
198 		struct page *page = alloc_pages_node(cpu_to_node(cpu),
199 		    GFP_KERNEL | __GFP_ZERO, order);
200 		if (page == NULL) {
201 			kfpu_fini();
202 			return (-ENOMEM);
203 		}
204 
205 		zfs_kfpu_fpregs[cpu] = page_address(page);
206 	}
207 
208 	return (0);
209 }
210 
211 #define	kfpu_allowed()		1
212 #if defined(HAVE_KERNEL_FPU_INTERNAL)
213 #define	ex_handler_fprestore	ex_handler_default
214 #endif
215 
216 /*
217  * FPU save and restore instructions.
218  */
219 #define	__asm			__asm__ __volatile__
220 #define	kfpu_fxsave(addr)	__asm("fxsave %0" : "=m" (*(addr)))
221 #define	kfpu_fxsaveq(addr)	__asm("fxsaveq %0" : "=m" (*(addr)))
222 #define	kfpu_fnsave(addr)	__asm("fnsave %0; fwait" : "=m" (*(addr)))
223 #define	kfpu_fxrstor(addr)	__asm("fxrstor %0" : : "m" (*(addr)))
224 #define	kfpu_fxrstorq(addr)	__asm("fxrstorq %0" : : "m" (*(addr)))
225 #define	kfpu_frstor(addr)	__asm("frstor %0" : : "m" (*(addr)))
226 #define	kfpu_fxsr_clean(rval)	__asm("fnclex; emms; fildl %P[addr]" \
227 				    : : [addr] "m" (rval));
228 
229 #if defined(HAVE_KERNEL_FPU_INTERNAL)
230 static inline void
kfpu_save_xsave(struct xregs_state * addr,uint64_t mask)231 kfpu_save_xsave(struct xregs_state *addr, uint64_t mask)
232 {
233 	uint32_t low, hi;
234 	int err;
235 
236 	low = mask;
237 	hi = mask >> 32;
238 	XSTATE_XSAVE(addr, low, hi, err);
239 	WARN_ON_ONCE(err);
240 }
241 #endif /* defined(HAVE_KERNEL_FPU_INTERNAL) */
242 
243 #if defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL)
244 #define	kfpu_do_xsave(instruction, addr, mask)			\
245 {								\
246 	uint32_t low, hi;					\
247 								\
248 	low = mask;						\
249 	hi = (uint64_t)(mask) >> 32;				\
250 	__asm(instruction " %[dst]\n\t"				\
251 	    :							\
252 	    : [dst] "m" (*(addr)), "a" (low), "d" (hi)		\
253 	    : "memory");					\
254 }
255 #endif /* defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL) */
256 
257 static inline void
kfpu_save_fxsr(struct fxregs_state * addr)258 kfpu_save_fxsr(struct fxregs_state *addr)
259 {
260 	if (IS_ENABLED(CONFIG_X86_32))
261 		kfpu_fxsave(addr);
262 	else
263 		kfpu_fxsaveq(addr);
264 }
265 
266 static inline void
kfpu_save_fsave(struct fregs_state * addr)267 kfpu_save_fsave(struct fregs_state *addr)
268 {
269 	kfpu_fnsave(addr);
270 }
271 
272 #if defined(HAVE_KERNEL_FPU_INTERNAL)
273 static inline void
kfpu_begin(void)274 kfpu_begin(void)
275 {
276 	/*
277 	 * Preemption and interrupts must be disabled for the critical
278 	 * region where the FPU state is being modified.
279 	 */
280 	preempt_disable();
281 	local_irq_disable();
282 
283 	/*
284 	 * The current FPU registers need to be preserved by kfpu_begin()
285 	 * and restored by kfpu_end().  They are stored in a dedicated
286 	 * per-cpu variable, not in the task struct, this allows any user
287 	 * FPU state to be correctly preserved and restored.
288 	 */
289 	union fpregs_state *state = zfs_kfpu_fpregs[smp_processor_id()];
290 	if (static_cpu_has(X86_FEATURE_XSAVE)) {
291 		kfpu_save_xsave(&state->xsave, ~0);
292 	} else if (static_cpu_has(X86_FEATURE_FXSR)) {
293 		kfpu_save_fxsr(&state->fxsave);
294 	} else {
295 		kfpu_save_fsave(&state->fsave);
296 	}
297 }
298 #endif /* defined(HAVE_KERNEL_FPU_INTERNAL) */
299 
300 #if defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL)
301 static inline void
kfpu_begin(void)302 kfpu_begin(void)
303 {
304 	/*
305 	 * Preemption and interrupts must be disabled for the critical
306 	 * region where the FPU state is being modified.
307 	 */
308 	preempt_disable();
309 	local_irq_disable();
310 
311 	/*
312 	 * The current FPU registers need to be preserved by kfpu_begin()
313 	 * and restored by kfpu_end().  They are stored in a dedicated
314 	 * per-cpu variable, not in the task struct, this allows any user
315 	 * FPU state to be correctly preserved and restored.
316 	 */
317 	union fpregs_state *state = zfs_kfpu_fpregs[smp_processor_id()];
318 #if defined(HAVE_XSAVES)
319 	if (static_cpu_has(X86_FEATURE_XSAVES)) {
320 		kfpu_do_xsave("xsaves", &state->xsave, ~0);
321 		return;
322 	}
323 #endif
324 #if defined(HAVE_XSAVEOPT)
325 	if (static_cpu_has(X86_FEATURE_XSAVEOPT)) {
326 		kfpu_do_xsave("xsaveopt", &state->xsave, ~0);
327 		return;
328 	}
329 #endif
330 	if (static_cpu_has(X86_FEATURE_XSAVE)) {
331 		kfpu_do_xsave("xsave", &state->xsave, ~0);
332 	} else if (static_cpu_has(X86_FEATURE_FXSR)) {
333 		kfpu_save_fxsr(&state->fxsave);
334 	} else {
335 		kfpu_save_fsave(&state->fsave);
336 	}
337 }
338 #endif /* defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL) */
339 
340 #if defined(HAVE_KERNEL_FPU_INTERNAL)
341 static inline void
kfpu_restore_xsave(struct xregs_state * addr,uint64_t mask)342 kfpu_restore_xsave(struct xregs_state *addr, uint64_t mask)
343 {
344 	uint32_t low, hi;
345 
346 	low = mask;
347 	hi = mask >> 32;
348 	XSTATE_XRESTORE(addr, low, hi);
349 }
350 #endif /* defined(HAVE_KERNEL_FPU_INTERNAL) */
351 
352 #if defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL)
353 #define	kfpu_do_xrstor(instruction, addr, mask)			\
354 {								\
355 	uint32_t low, hi;					\
356 								\
357 	low = mask;						\
358 	hi = (uint64_t)(mask) >> 32;				\
359 	__asm(instruction " %[src]"				\
360 	    :							\
361 	    : [src] "m" (*(addr)), "a" (low), "d" (hi)		\
362 	    : "memory");					\
363 }
364 #endif /* defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL) */
365 
366 static inline void
kfpu_restore_fxsr(struct fxregs_state * addr)367 kfpu_restore_fxsr(struct fxregs_state *addr)
368 {
369 	/*
370 	 * On AuthenticAMD K7 and K8 processors the fxrstor instruction only
371 	 * restores the _x87 FOP, FIP, and FDP registers when an exception
372 	 * is pending.  Clean the _x87 state to force the restore.
373 	 */
374 	if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK)))
375 		kfpu_fxsr_clean(addr);
376 
377 	if (IS_ENABLED(CONFIG_X86_32)) {
378 		kfpu_fxrstor(addr);
379 	} else {
380 		kfpu_fxrstorq(addr);
381 	}
382 }
383 
384 static inline void
kfpu_restore_fsave(struct fregs_state * addr)385 kfpu_restore_fsave(struct fregs_state *addr)
386 {
387 	kfpu_frstor(addr);
388 }
389 
390 #if defined(HAVE_KERNEL_FPU_INTERNAL)
391 static inline void
kfpu_end(void)392 kfpu_end(void)
393 {
394 	union fpregs_state *state = zfs_kfpu_fpregs[smp_processor_id()];
395 
396 	if (static_cpu_has(X86_FEATURE_XSAVE)) {
397 		kfpu_restore_xsave(&state->xsave, ~0);
398 	} else if (static_cpu_has(X86_FEATURE_FXSR)) {
399 		kfpu_restore_fxsr(&state->fxsave);
400 	} else {
401 		kfpu_restore_fsave(&state->fsave);
402 	}
403 
404 	local_irq_enable();
405 	preempt_enable();
406 }
407 #endif /* defined(HAVE_KERNEL_FPU_INTERNAL) */
408 
409 #if defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL)
410 static inline void
kfpu_end(void)411 kfpu_end(void)
412 {
413 	union fpregs_state *state = zfs_kfpu_fpregs[smp_processor_id()];
414 #if defined(HAVE_XSAVES)
415 	if (static_cpu_has(X86_FEATURE_XSAVES)) {
416 		kfpu_do_xrstor("xrstors", &state->xsave, ~0);
417 		goto out;
418 	}
419 #endif
420 	if (static_cpu_has(X86_FEATURE_XSAVE)) {
421 		kfpu_do_xrstor("xrstor", &state->xsave, ~0);
422 	} else if (static_cpu_has(X86_FEATURE_FXSR)) {
423 		kfpu_restore_fxsr(&state->fxsave);
424 	} else {
425 		kfpu_restore_fsave(&state->fsave);
426 	}
427 out:
428 	local_irq_enable();
429 	preempt_enable();
430 
431 }
432 #endif /* defined(HAVE_KERNEL_FPU_XSAVE_INTERNAL) */
433 
434 #else
435 
436 /*
437  * FPU support is unavailable.
438  */
439 #define	kfpu_allowed()		0
440 #define	kfpu_begin()		do {} while (0)
441 #define	kfpu_end()		do {} while (0)
442 #define	kfpu_init()		0
443 #define	kfpu_fini()		((void) 0)
444 
445 #endif /* defined(HAVE_KERNEL_FPU_INTERNAL || HAVE_KERNEL_FPU_XSAVE_INTERNAL) */
446 #endif /* defined(KERNEL_EXPORTS_X86_FPU) */
447 
448 /*
449  * Linux kernel provides an interface for CPU feature testing.
450  */
451 
452 /*
453  * Detect register set support
454  */
455 static inline boolean_t
__simd_state_enabled(const uint64_t state)456 __simd_state_enabled(const uint64_t state)
457 {
458 	boolean_t has_osxsave;
459 	uint64_t xcr0;
460 
461 #if defined(X86_FEATURE_OSXSAVE)
462 	has_osxsave = !!boot_cpu_has(X86_FEATURE_OSXSAVE);
463 #else
464 	has_osxsave = B_FALSE;
465 #endif
466 	if (!has_osxsave)
467 		return (B_FALSE);
468 
469 	xcr0 = xgetbv(0);
470 	return ((xcr0 & state) == state);
471 }
472 
473 #define	_XSTATE_SSE_AVX		(0x2 | 0x4)
474 #define	_XSTATE_AVX512		(0xE0 | _XSTATE_SSE_AVX)
475 
476 #define	__ymm_enabled() __simd_state_enabled(_XSTATE_SSE_AVX)
477 #define	__zmm_enabled() __simd_state_enabled(_XSTATE_AVX512)
478 
479 /*
480  * Check if SSE instruction set is available
481  */
482 static inline boolean_t
zfs_sse_available(void)483 zfs_sse_available(void)
484 {
485 	return (!!boot_cpu_has(X86_FEATURE_XMM));
486 }
487 
488 /*
489  * Check if SSE2 instruction set is available
490  */
491 static inline boolean_t
zfs_sse2_available(void)492 zfs_sse2_available(void)
493 {
494 	return (!!boot_cpu_has(X86_FEATURE_XMM2));
495 }
496 
497 /*
498  * Check if SSE3 instruction set is available
499  */
500 static inline boolean_t
zfs_sse3_available(void)501 zfs_sse3_available(void)
502 {
503 	return (!!boot_cpu_has(X86_FEATURE_XMM3));
504 }
505 
506 /*
507  * Check if SSSE3 instruction set is available
508  */
509 static inline boolean_t
zfs_ssse3_available(void)510 zfs_ssse3_available(void)
511 {
512 	return (!!boot_cpu_has(X86_FEATURE_SSSE3));
513 }
514 
515 /*
516  * Check if SSE4.1 instruction set is available
517  */
518 static inline boolean_t
zfs_sse4_1_available(void)519 zfs_sse4_1_available(void)
520 {
521 	return (!!boot_cpu_has(X86_FEATURE_XMM4_1));
522 }
523 
524 /*
525  * Check if SSE4.2 instruction set is available
526  */
527 static inline boolean_t
zfs_sse4_2_available(void)528 zfs_sse4_2_available(void)
529 {
530 	return (!!boot_cpu_has(X86_FEATURE_XMM4_2));
531 }
532 
533 /*
534  * Check if AVX instruction set is available
535  */
536 static inline boolean_t
zfs_avx_available(void)537 zfs_avx_available(void)
538 {
539 	return (boot_cpu_has(X86_FEATURE_AVX) && __ymm_enabled());
540 }
541 
542 /*
543  * Check if AVX2 instruction set is available
544  */
545 static inline boolean_t
zfs_avx2_available(void)546 zfs_avx2_available(void)
547 {
548 	return (boot_cpu_has(X86_FEATURE_AVX2) && __ymm_enabled());
549 }
550 
551 /*
552  * Check if BMI1 instruction set is available
553  */
554 static inline boolean_t
zfs_bmi1_available(void)555 zfs_bmi1_available(void)
556 {
557 #if defined(X86_FEATURE_BMI1)
558 	return (!!boot_cpu_has(X86_FEATURE_BMI1));
559 #else
560 	return (B_FALSE);
561 #endif
562 }
563 
564 /*
565  * Check if BMI2 instruction set is available
566  */
567 static inline boolean_t
zfs_bmi2_available(void)568 zfs_bmi2_available(void)
569 {
570 #if defined(X86_FEATURE_BMI2)
571 	return (!!boot_cpu_has(X86_FEATURE_BMI2));
572 #else
573 	return (B_FALSE);
574 #endif
575 }
576 
577 /*
578  * Check if AES instruction set is available
579  */
580 static inline boolean_t
zfs_aes_available(void)581 zfs_aes_available(void)
582 {
583 #if defined(X86_FEATURE_AES)
584 	return (!!boot_cpu_has(X86_FEATURE_AES));
585 #else
586 	return (B_FALSE);
587 #endif
588 }
589 
590 /*
591  * Check if PCLMULQDQ instruction set is available
592  */
593 static inline boolean_t
zfs_pclmulqdq_available(void)594 zfs_pclmulqdq_available(void)
595 {
596 #if defined(X86_FEATURE_PCLMULQDQ)
597 	return (!!boot_cpu_has(X86_FEATURE_PCLMULQDQ));
598 #else
599 	return (B_FALSE);
600 #endif
601 }
602 
603 /*
604  * Check if MOVBE instruction is available
605  */
606 static inline boolean_t
zfs_movbe_available(void)607 zfs_movbe_available(void)
608 {
609 #if defined(X86_FEATURE_MOVBE)
610 	return (!!boot_cpu_has(X86_FEATURE_MOVBE));
611 #else
612 	return (B_FALSE);
613 #endif
614 }
615 
616 /*
617  * AVX-512 family of instruction sets:
618  *
619  * AVX512F	Foundation
620  * AVX512CD	Conflict Detection Instructions
621  * AVX512ER	Exponential and Reciprocal Instructions
622  * AVX512PF	Prefetch Instructions
623  *
624  * AVX512BW	Byte and Word Instructions
625  * AVX512DQ	Double-word and Quadword Instructions
626  * AVX512VL	Vector Length Extensions
627  *
628  * AVX512IFMA	Integer Fused Multiply Add (Not supported by kernel 4.4)
629  * AVX512VBMI	Vector Byte Manipulation Instructions
630  */
631 
632 /*
633  * Check if AVX512F instruction set is available
634  */
635 static inline boolean_t
zfs_avx512f_available(void)636 zfs_avx512f_available(void)
637 {
638 	boolean_t has_avx512 = B_FALSE;
639 
640 #if defined(X86_FEATURE_AVX512F)
641 	has_avx512 = !!boot_cpu_has(X86_FEATURE_AVX512F);
642 #endif
643 	return (has_avx512 && __zmm_enabled());
644 }
645 
646 /*
647  * Check if AVX512CD instruction set is available
648  */
649 static inline boolean_t
zfs_avx512cd_available(void)650 zfs_avx512cd_available(void)
651 {
652 	boolean_t has_avx512 = B_FALSE;
653 
654 #if defined(X86_FEATURE_AVX512CD)
655 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
656 	    boot_cpu_has(X86_FEATURE_AVX512CD);
657 #endif
658 	return (has_avx512 && __zmm_enabled());
659 }
660 
661 /*
662  * Check if AVX512ER instruction set is available
663  */
664 static inline boolean_t
zfs_avx512er_available(void)665 zfs_avx512er_available(void)
666 {
667 	boolean_t has_avx512 = B_FALSE;
668 
669 #if defined(X86_FEATURE_AVX512ER)
670 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
671 	    boot_cpu_has(X86_FEATURE_AVX512ER);
672 #endif
673 	return (has_avx512 && __zmm_enabled());
674 }
675 
676 /*
677  * Check if AVX512PF instruction set is available
678  */
679 static inline boolean_t
zfs_avx512pf_available(void)680 zfs_avx512pf_available(void)
681 {
682 	boolean_t has_avx512 = B_FALSE;
683 
684 #if defined(X86_FEATURE_AVX512PF)
685 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
686 	    boot_cpu_has(X86_FEATURE_AVX512PF);
687 #endif
688 	return (has_avx512 && __zmm_enabled());
689 }
690 
691 /*
692  * Check if AVX512BW instruction set is available
693  */
694 static inline boolean_t
zfs_avx512bw_available(void)695 zfs_avx512bw_available(void)
696 {
697 	boolean_t has_avx512 = B_FALSE;
698 
699 #if defined(X86_FEATURE_AVX512BW)
700 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
701 	    boot_cpu_has(X86_FEATURE_AVX512BW);
702 #endif
703 
704 	return (has_avx512 && __zmm_enabled());
705 }
706 
707 /*
708  * Check if AVX512DQ instruction set is available
709  */
710 static inline boolean_t
zfs_avx512dq_available(void)711 zfs_avx512dq_available(void)
712 {
713 	boolean_t has_avx512 = B_FALSE;
714 
715 #if defined(X86_FEATURE_AVX512DQ)
716 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
717 	    boot_cpu_has(X86_FEATURE_AVX512DQ);
718 #endif
719 	return (has_avx512 && __zmm_enabled());
720 }
721 
722 /*
723  * Check if AVX512VL instruction set is available
724  */
725 static inline boolean_t
zfs_avx512vl_available(void)726 zfs_avx512vl_available(void)
727 {
728 	boolean_t has_avx512 = B_FALSE;
729 
730 #if defined(X86_FEATURE_AVX512VL)
731 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
732 	    boot_cpu_has(X86_FEATURE_AVX512VL);
733 #endif
734 	return (has_avx512 && __zmm_enabled());
735 }
736 
737 /*
738  * Check if AVX512IFMA instruction set is available
739  */
740 static inline boolean_t
zfs_avx512ifma_available(void)741 zfs_avx512ifma_available(void)
742 {
743 	boolean_t has_avx512 = B_FALSE;
744 
745 #if defined(X86_FEATURE_AVX512IFMA)
746 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
747 	    boot_cpu_has(X86_FEATURE_AVX512IFMA);
748 #endif
749 	return (has_avx512 && __zmm_enabled());
750 }
751 
752 /*
753  * Check if AVX512VBMI instruction set is available
754  */
755 static inline boolean_t
zfs_avx512vbmi_available(void)756 zfs_avx512vbmi_available(void)
757 {
758 	boolean_t has_avx512 = B_FALSE;
759 
760 #if defined(X86_FEATURE_AVX512VBMI)
761 	has_avx512 = boot_cpu_has(X86_FEATURE_AVX512F) &&
762 	    boot_cpu_has(X86_FEATURE_AVX512VBMI);
763 #endif
764 	return (has_avx512 && __zmm_enabled());
765 }
766 
767 #endif /* defined(__x86) */
768 
769 #endif /* _LINUX_SIMD_X86_H */
770