xref: /freebsd-12.1/sys/dev/sfxge/common/efsys.h (revision 8ef603d2)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2010-2016 Solarflare Communications Inc.
5  * All rights reserved.
6  *
7  * This software was developed in part by Philip Paeps under contract for
8  * Solarflare Communications, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright notice,
14  *    this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright notice,
16  *    this list of conditions and the following disclaimer in the documentation
17  *    and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  * The views and conclusions contained in the software and documentation are
32  * those of the authors and should not be interpreted as representing official
33  * policies, either expressed or implied, of the FreeBSD Project.
34  *
35  * $FreeBSD$
36  */
37 
38 #ifndef	_SYS_EFSYS_H
39 #define	_SYS_EFSYS_H
40 
41 #ifdef	__cplusplus
42 extern "C" {
43 #endif
44 
45 #include <sys/param.h>
46 #include <sys/bus.h>
47 #include <sys/endian.h>
48 #include <sys/lock.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/mutex.h>
52 #include <sys/rwlock.h>
53 #include <sys/sdt.h>
54 #include <sys/systm.h>
55 
56 #include <machine/bus.h>
57 #include <machine/endian.h>
58 
59 #define	EFSYS_HAS_UINT64 1
60 #if defined(__x86_64__)
61 #define	EFSYS_USE_UINT64 1
62 #else
63 #define	EFSYS_USE_UINT64 0
64 #endif
65 #define	EFSYS_HAS_SSE2_M128 0
66 #if _BYTE_ORDER == _BIG_ENDIAN
67 #define	EFSYS_IS_BIG_ENDIAN 1
68 #define	EFSYS_IS_LITTLE_ENDIAN 0
69 #elif _BYTE_ORDER == _LITTLE_ENDIAN
70 #define	EFSYS_IS_BIG_ENDIAN 0
71 #define	EFSYS_IS_LITTLE_ENDIAN 1
72 #endif
73 #include "efx_types.h"
74 
75 /* Common code requires this */
76 #if __FreeBSD_version < 800068
77 #define	memmove(d, s, l) bcopy(s, d, l)
78 #endif
79 
80 /* FreeBSD equivalents of Solaris things */
81 #ifndef _NOTE
82 #define	_NOTE(s)
83 #endif
84 
85 #ifndef B_FALSE
86 #define	B_FALSE	FALSE
87 #endif
88 #ifndef B_TRUE
89 #define	B_TRUE	TRUE
90 #endif
91 
92 #ifndef IS2P
93 #define	ISP2(x)			(((x) & ((x) - 1)) == 0)
94 #endif
95 
96 #if defined(__x86_64__) && __FreeBSD_version >= 1000000
97 
98 #define	SFXGE_USE_BUS_SPACE_8		1
99 
100 #if !defined(bus_space_read_stream_8)
101 
102 #define	bus_space_read_stream_8(t, h, o)				\
103 	bus_space_read_8((t), (h), (o))
104 
105 #define	bus_space_write_stream_8(t, h, o, v)				\
106 	bus_space_write_8((t), (h), (o), (v))
107 
108 #endif
109 
110 #endif
111 
112 #define	ENOTACTIVE EINVAL
113 
114 /* Memory type to use on FreeBSD */
115 MALLOC_DECLARE(M_SFXGE);
116 
117 /* Machine dependend prefetch wrappers */
118 #if defined(__i386__) || defined(__amd64__)
119 static __inline void
prefetch_read_many(void * addr)120 prefetch_read_many(void *addr)
121 {
122 
123 	__asm__(
124 	    "prefetcht0 (%0)"
125 	    :
126 	    : "r" (addr));
127 }
128 
129 static __inline void
prefetch_read_once(void * addr)130 prefetch_read_once(void *addr)
131 {
132 
133 	__asm__(
134 	    "prefetchnta (%0)"
135 	    :
136 	    : "r" (addr));
137 }
138 #elif defined(__sparc64__)
139 static __inline void
prefetch_read_many(void * addr)140 prefetch_read_many(void *addr)
141 {
142 
143 	__asm__(
144 	    "prefetch [%0], 0"
145 	    :
146 	    : "r" (addr));
147 }
148 
149 static __inline void
prefetch_read_once(void * addr)150 prefetch_read_once(void *addr)
151 {
152 
153 	__asm__(
154 	    "prefetch [%0], 1"
155 	    :
156 	    : "r" (addr));
157 }
158 #else
159 static __inline void
prefetch_read_many(void * addr)160 prefetch_read_many(void *addr)
161 {
162 
163 }
164 
165 static __inline void
prefetch_read_once(void * addr)166 prefetch_read_once(void *addr)
167 {
168 
169 }
170 #endif
171 
172 #if defined(__i386__) || defined(__amd64__)
173 #include <vm/vm.h>
174 #include <vm/pmap.h>
175 #endif
176 static __inline void
sfxge_map_mbuf_fast(bus_dma_tag_t tag,bus_dmamap_t map,struct mbuf * m,bus_dma_segment_t * seg)177 sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map,
178 		    struct mbuf *m, bus_dma_segment_t *seg)
179 {
180 #if defined(__i386__) || defined(__amd64__)
181 	seg->ds_addr = pmap_kextract(mtod(m, vm_offset_t));
182 	seg->ds_len = m->m_len;
183 #else
184 	int nsegstmp;
185 
186 	bus_dmamap_load_mbuf_sg(tag, map, m, seg, &nsegstmp, 0);
187 #endif
188 }
189 
190 /* Modifiers used for Windows builds */
191 #define	__in
192 #define	__in_opt
193 #define	__in_ecount(_n)
194 #define	__in_ecount_opt(_n)
195 #define	__in_bcount(_n)
196 #define	__in_bcount_opt(_n)
197 
198 #define	__out
199 #define	__out_opt
200 #define	__out_ecount(_n)
201 #define	__out_ecount_opt(_n)
202 #define	__out_bcount(_n)
203 #define	__out_bcount_opt(_n)
204 #define	__out_bcount_part(_n, _l)
205 #define	__out_bcount_part_opt(_n, _l)
206 
207 #define	__deref_out
208 
209 #define	__inout
210 #define	__inout_opt
211 #define	__inout_ecount(_n)
212 #define	__inout_ecount_opt(_n)
213 #define	__inout_bcount(_n)
214 #define	__inout_bcount_opt(_n)
215 #define	__inout_bcount_full_opt(_n)
216 
217 #define	__deref_out_bcount_opt(n)
218 
219 #define	__checkReturn
220 #define	__success(_x)
221 
222 #define	__drv_when(_p, _c)
223 
224 /* Code inclusion options */
225 
226 
227 #define	EFSYS_OPT_NAMES 1
228 
229 #define	EFSYS_OPT_SIENA 1
230 #define	EFSYS_OPT_HUNTINGTON 1
231 #define	EFSYS_OPT_MEDFORD 1
232 #ifdef DEBUG
233 #define	EFSYS_OPT_CHECK_REG 1
234 #else
235 #define	EFSYS_OPT_CHECK_REG 0
236 #endif
237 
238 #define	EFSYS_OPT_MCDI 1
239 #define	EFSYS_OPT_MCDI_LOGGING 0
240 #define	EFSYS_OPT_MCDI_PROXY_AUTH 0
241 
242 #define	EFSYS_OPT_MAC_STATS 1
243 
244 #define	EFSYS_OPT_LOOPBACK 0
245 
246 #define	EFSYS_OPT_MON_MCDI 0
247 #define	EFSYS_OPT_MON_STATS 0
248 
249 #define	EFSYS_OPT_PHY_STATS 1
250 #define	EFSYS_OPT_BIST 1
251 #define	EFSYS_OPT_PHY_LED_CONTROL 1
252 #define	EFSYS_OPT_PHY_FLAGS 0
253 
254 #define	EFSYS_OPT_VPD 1
255 #define	EFSYS_OPT_NVRAM 1
256 #define	EFSYS_OPT_BOOTCFG 0
257 
258 #define	EFSYS_OPT_DIAG 0
259 #define	EFSYS_OPT_RX_SCALE 1
260 #define	EFSYS_OPT_QSTATS 1
261 #define	EFSYS_OPT_FILTER 1
262 #define	EFSYS_OPT_RX_SCATTER 0
263 
264 #define	EFSYS_OPT_EV_PREFETCH 0
265 
266 #define	EFSYS_OPT_DECODE_INTR_FATAL 1
267 
268 #define	EFSYS_OPT_LICENSING 0
269 
270 #define	EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
271 
272 /* ID */
273 
274 typedef struct __efsys_identifier_s	efsys_identifier_t;
275 
276 /* PROBE */
277 
278 #ifndef DTRACE_PROBE
279 
280 #define	EFSYS_PROBE(_name)
281 
282 #define	EFSYS_PROBE1(_name, _type1, _arg1)
283 
284 #define	EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)
285 
286 #define	EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
287 	    _type3, _arg3)
288 
289 #define	EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
290 	    _type3, _arg3, _type4, _arg4)
291 
292 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
293 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
294 
295 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
296 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
297 	    _type6, _arg6)
298 
299 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
300 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
301 	    _type6, _arg6, _type7, _arg7)
302 
303 #else /* DTRACE_PROBE */
304 
305 #define	EFSYS_PROBE(_name)						\
306 	DTRACE_PROBE(_name)
307 
308 #define	EFSYS_PROBE1(_name, _type1, _arg1)				\
309 	DTRACE_PROBE1(_name, _type1, _arg1)
310 
311 #define	EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)		\
312 	DTRACE_PROBE2(_name, _type1, _arg1, _type2, _arg2)
313 
314 #define	EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
315 	    _type3, _arg3)						\
316 	DTRACE_PROBE3(_name, _type1, _arg1, _type2, _arg2,		\
317 	    _type3, _arg3)
318 
319 #define	EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
320 	    _type3, _arg3, _type4, _arg4)				\
321 	DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
322 	    _type3, _arg3, _type4, _arg4)
323 
324 #ifdef DTRACE_PROBE5
325 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
326 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)		\
327 	DTRACE_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
328 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
329 #else
330 #define	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
331 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)		\
332 	DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2,		\
333 	    _type3, _arg3, _type4, _arg4)
334 #endif
335 
336 #ifdef DTRACE_PROBE6
337 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
338 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
339 	    _type6, _arg6)						\
340 	DTRACE_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
341 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
342 	    _type6, _arg6)
343 #else
344 #define	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
345 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
346 	    _type6, _arg6)						\
347 	EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2,		\
348 	    _type3, _arg3, _type4, _arg4, _type5, _arg5)
349 #endif
350 
351 #ifdef DTRACE_PROBE7
352 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
353 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
354 	    _type6, _arg6, _type7, _arg7)				\
355 	DTRACE_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
356 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
357 	    _type6, _arg6, _type7, _arg7)
358 #else
359 #define	EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2,		\
360 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
361 	    _type6, _arg6, _type7, _arg7)				\
362 	EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2,		\
363 	    _type3, _arg3, _type4, _arg4, _type5, _arg5,		\
364 	    _type6, _arg6)
365 #endif
366 
367 #endif /* DTRACE_PROBE */
368 
369 /* DMA */
370 
371 typedef uint64_t		efsys_dma_addr_t;
372 
373 typedef struct efsys_mem_s {
374 	bus_dma_tag_t		esm_tag;
375 	bus_dmamap_t		esm_map;
376 	caddr_t			esm_base;
377 	efsys_dma_addr_t	esm_addr;
378 } efsys_mem_t;
379 
380 
381 #define	EFSYS_MEM_ZERO(_esmp, _size)					\
382 	do {								\
383 		(void) memset((_esmp)->esm_base, 0, (_size));		\
384 									\
385 	_NOTE(CONSTANTCONDITION)					\
386 	} while (B_FALSE)
387 
388 #define	EFSYS_MEM_READD(_esmp, _offset, _edp)				\
389 	do {								\
390 		uint32_t *addr;						\
391 									\
392 		_NOTE(CONSTANTCONDITION)				\
393 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
394 		    sizeof (efx_dword_t)),				\
395 		    ("not power of 2 aligned"));			\
396 									\
397 		addr = (void *)((_esmp)->esm_base + (_offset));		\
398 									\
399 		(_edp)->ed_u32[0] = *addr;				\
400 									\
401 		EFSYS_PROBE2(mem_readd, unsigned int, (_offset),	\
402 		    uint32_t, (_edp)->ed_u32[0]);			\
403 									\
404 	_NOTE(CONSTANTCONDITION)					\
405 	} while (B_FALSE)
406 
407 #if defined(__x86_64__)
408 #define	EFSYS_MEM_READQ(_esmp, _offset, _eqp)				\
409 	do {								\
410 		uint64_t *addr;						\
411 									\
412 		_NOTE(CONSTANTCONDITION)				\
413 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
414 		    sizeof (efx_qword_t)),				\
415 		    ("not power of 2 aligned"));			\
416 									\
417 		addr = (void *)((_esmp)->esm_base + (_offset));		\
418 									\
419 		(_eqp)->eq_u64[0] = *addr;				\
420 									\
421 		EFSYS_PROBE3(mem_readq, unsigned int, (_offset),	\
422 		    uint32_t, (_eqp)->eq_u32[1],			\
423 		    uint32_t, (_eqp)->eq_u32[0]);			\
424 									\
425 	_NOTE(CONSTANTCONDITION)					\
426 	} while (B_FALSE)
427 #else
428 #define	EFSYS_MEM_READQ(_esmp, _offset, _eqp)				\
429 	do {								\
430 		uint32_t *addr;						\
431 									\
432 		_NOTE(CONSTANTCONDITION)				\
433 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
434 		    sizeof (efx_qword_t)),				\
435 		    ("not power of 2 aligned"));			\
436 									\
437 		addr = (void *)((_esmp)->esm_base + (_offset));		\
438 									\
439 		(_eqp)->eq_u32[0] = *addr++;				\
440 		(_eqp)->eq_u32[1] = *addr;				\
441 									\
442 		EFSYS_PROBE3(mem_readq, unsigned int, (_offset),	\
443 		    uint32_t, (_eqp)->eq_u32[1],			\
444 		    uint32_t, (_eqp)->eq_u32[0]);			\
445 									\
446 	_NOTE(CONSTANTCONDITION)					\
447 	} while (B_FALSE)
448 #endif
449 
450 #if defined(__x86_64__)
451 #define	EFSYS_MEM_READO(_esmp, _offset, _eop)				\
452 	do {								\
453 		uint64_t *addr;						\
454 									\
455 		_NOTE(CONSTANTCONDITION)				\
456 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
457 		    sizeof (efx_oword_t)),				\
458 		    ("not power of 2 aligned"));			\
459 									\
460 		addr = (void *)((_esmp)->esm_base + (_offset));		\
461 									\
462 		(_eop)->eo_u64[0] = *addr++;				\
463 		(_eop)->eo_u64[1] = *addr;				\
464 									\
465 		EFSYS_PROBE5(mem_reado, unsigned int, (_offset),	\
466 		    uint32_t, (_eop)->eo_u32[3],			\
467 		    uint32_t, (_eop)->eo_u32[2],			\
468 		    uint32_t, (_eop)->eo_u32[1],			\
469 		    uint32_t, (_eop)->eo_u32[0]);			\
470 									\
471 	_NOTE(CONSTANTCONDITION)					\
472 	} while (B_FALSE)
473 #else
474 #define	EFSYS_MEM_READO(_esmp, _offset, _eop)				\
475 	do {								\
476 		uint32_t *addr;						\
477 									\
478 		_NOTE(CONSTANTCONDITION)				\
479 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
480 		    sizeof (efx_oword_t)),				\
481 		    ("not power of 2 aligned"));			\
482 									\
483 		addr = (void *)((_esmp)->esm_base + (_offset));		\
484 									\
485 		(_eop)->eo_u32[0] = *addr++;				\
486 		(_eop)->eo_u32[1] = *addr++;				\
487 		(_eop)->eo_u32[2] = *addr++;				\
488 		(_eop)->eo_u32[3] = *addr;				\
489 									\
490 		EFSYS_PROBE5(mem_reado, unsigned int, (_offset),	\
491 		    uint32_t, (_eop)->eo_u32[3],			\
492 		    uint32_t, (_eop)->eo_u32[2],			\
493 		    uint32_t, (_eop)->eo_u32[1],			\
494 		    uint32_t, (_eop)->eo_u32[0]);			\
495 									\
496 	_NOTE(CONSTANTCONDITION)					\
497 	} while (B_FALSE)
498 #endif
499 
500 #define	EFSYS_MEM_WRITED(_esmp, _offset, _edp)				\
501 	do {								\
502 		uint32_t *addr;						\
503 									\
504 		_NOTE(CONSTANTCONDITION)				\
505 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
506 		    sizeof (efx_dword_t)),				\
507 		    ("not power of 2 aligned"));			\
508 									\
509 		EFSYS_PROBE2(mem_writed, unsigned int, (_offset),	\
510 		    uint32_t, (_edp)->ed_u32[0]);			\
511 									\
512 		addr = (void *)((_esmp)->esm_base + (_offset));		\
513 									\
514 		*addr = (_edp)->ed_u32[0];				\
515 									\
516 	_NOTE(CONSTANTCONDITION)					\
517 	} while (B_FALSE)
518 
519 #if defined(__x86_64__)
520 #define	EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp)				\
521 	do {								\
522 		uint64_t *addr;						\
523 									\
524 		_NOTE(CONSTANTCONDITION)				\
525 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
526 		    sizeof (efx_qword_t)),				\
527 		    ("not power of 2 aligned"));			\
528 									\
529 		EFSYS_PROBE3(mem_writeq, unsigned int, (_offset),	\
530 		    uint32_t, (_eqp)->eq_u32[1],			\
531 		    uint32_t, (_eqp)->eq_u32[0]);			\
532 									\
533 		addr = (void *)((_esmp)->esm_base + (_offset));		\
534 									\
535 		*addr   = (_eqp)->eq_u64[0];				\
536 									\
537 	_NOTE(CONSTANTCONDITION)					\
538 	} while (B_FALSE)
539 
540 #else
541 #define	EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp)				\
542 	do {								\
543 		uint32_t *addr;						\
544 									\
545 		_NOTE(CONSTANTCONDITION)				\
546 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
547 		    sizeof (efx_qword_t)),				\
548 		    ("not power of 2 aligned"));			\
549 									\
550 		EFSYS_PROBE3(mem_writeq, unsigned int, (_offset),	\
551 		    uint32_t, (_eqp)->eq_u32[1],			\
552 		    uint32_t, (_eqp)->eq_u32[0]);			\
553 									\
554 		addr = (void *)((_esmp)->esm_base + (_offset));		\
555 									\
556 		*addr++ = (_eqp)->eq_u32[0];				\
557 		*addr   = (_eqp)->eq_u32[1];				\
558 									\
559 	_NOTE(CONSTANTCONDITION)					\
560 	} while (B_FALSE)
561 #endif
562 
563 #if defined(__x86_64__)
564 #define	EFSYS_MEM_WRITEO(_esmp, _offset, _eop)				\
565 	do {								\
566 		uint64_t *addr;						\
567 									\
568 		_NOTE(CONSTANTCONDITION)				\
569 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
570 		    sizeof (efx_oword_t)),				\
571 		    ("not power of 2 aligned"));			\
572 									\
573 		EFSYS_PROBE5(mem_writeo, unsigned int, (_offset),	\
574 		    uint32_t, (_eop)->eo_u32[3],			\
575 		    uint32_t, (_eop)->eo_u32[2],			\
576 		    uint32_t, (_eop)->eo_u32[1],			\
577 		    uint32_t, (_eop)->eo_u32[0]);			\
578 									\
579 		addr = (void *)((_esmp)->esm_base + (_offset));		\
580 									\
581 		*addr++ = (_eop)->eo_u64[0];				\
582 		*addr   = (_eop)->eo_u64[1];				\
583 									\
584 	_NOTE(CONSTANTCONDITION)					\
585 	} while (B_FALSE)
586 #else
587 #define	EFSYS_MEM_WRITEO(_esmp, _offset, _eop)				\
588 	do {								\
589 		uint32_t *addr;						\
590 									\
591 		_NOTE(CONSTANTCONDITION)				\
592 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
593 		    sizeof (efx_oword_t)),				\
594 		    ("not power of 2 aligned"));			\
595 									\
596 		EFSYS_PROBE5(mem_writeo, unsigned int, (_offset),	\
597 		    uint32_t, (_eop)->eo_u32[3],			\
598 		    uint32_t, (_eop)->eo_u32[2],			\
599 		    uint32_t, (_eop)->eo_u32[1],			\
600 		    uint32_t, (_eop)->eo_u32[0]);			\
601 									\
602 		addr = (void *)((_esmp)->esm_base + (_offset));		\
603 									\
604 		*addr++ = (_eop)->eo_u32[0];				\
605 		*addr++ = (_eop)->eo_u32[1];				\
606 		*addr++ = (_eop)->eo_u32[2];				\
607 		*addr   = (_eop)->eo_u32[3];				\
608 									\
609 	_NOTE(CONSTANTCONDITION)					\
610 	} while (B_FALSE)
611 #endif
612 
613 #define	EFSYS_MEM_ADDR(_esmp)						\
614 	((_esmp)->esm_addr)
615 
616 #define	EFSYS_MEM_IS_NULL(_esmp)					\
617 	((_esmp)->esm_base == NULL)
618 
619 /* BAR */
620 
621 #define	SFXGE_LOCK_NAME_MAX	16
622 
623 typedef struct efsys_bar_s {
624 	struct mtx		esb_lock;
625 	char			esb_lock_name[SFXGE_LOCK_NAME_MAX];
626 	bus_space_tag_t		esb_tag;
627 	bus_space_handle_t	esb_handle;
628 	int			esb_rid;
629 	struct resource		*esb_res;
630 } efsys_bar_t;
631 
632 #define	SFXGE_BAR_LOCK_INIT(_esbp, _ifname)				\
633 	do {								\
634 		snprintf((_esbp)->esb_lock_name,			\
635 			 sizeof((_esbp)->esb_lock_name),		\
636 			 "%s:bar", (_ifname));				\
637 		mtx_init(&(_esbp)->esb_lock, (_esbp)->esb_lock_name,	\
638 			 NULL, MTX_DEF);				\
639 	_NOTE(CONSTANTCONDITION)					\
640 	} while (B_FALSE)
641 #define	SFXGE_BAR_LOCK_DESTROY(_esbp)					\
642 	mtx_destroy(&(_esbp)->esb_lock)
643 #define	SFXGE_BAR_LOCK(_esbp)						\
644 	mtx_lock(&(_esbp)->esb_lock)
645 #define	SFXGE_BAR_UNLOCK(_esbp)						\
646 	mtx_unlock(&(_esbp)->esb_lock)
647 
648 #define	EFSYS_BAR_READD(_esbp, _offset, _edp, _lock)			\
649 	do {								\
650 		_NOTE(CONSTANTCONDITION)				\
651 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
652 		    sizeof (efx_dword_t)),				\
653 		    ("not power of 2 aligned"));			\
654 									\
655 		_NOTE(CONSTANTCONDITION)				\
656 		if (_lock)						\
657 			SFXGE_BAR_LOCK(_esbp);				\
658 									\
659 		(_edp)->ed_u32[0] = bus_space_read_stream_4(		\
660 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
661 		    (_offset));						\
662 									\
663 		EFSYS_PROBE2(bar_readd, unsigned int, (_offset),	\
664 		    uint32_t, (_edp)->ed_u32[0]);			\
665 									\
666 		_NOTE(CONSTANTCONDITION)				\
667 		if (_lock)						\
668 			SFXGE_BAR_UNLOCK(_esbp);			\
669 	_NOTE(CONSTANTCONDITION)					\
670 	} while (B_FALSE)
671 
672 #if defined(SFXGE_USE_BUS_SPACE_8)
673 #define	EFSYS_BAR_READQ(_esbp, _offset, _eqp)				\
674 	do {								\
675 		_NOTE(CONSTANTCONDITION)				\
676 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
677 		    sizeof (efx_qword_t)),				\
678 		    ("not power of 2 aligned"));			\
679 									\
680 		SFXGE_BAR_LOCK(_esbp);					\
681 									\
682 		(_eqp)->eq_u64[0] = bus_space_read_stream_8(		\
683 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
684 		    (_offset));						\
685 									\
686 		EFSYS_PROBE3(bar_readq, unsigned int, (_offset),	\
687 		    uint32_t, (_eqp)->eq_u32[1],			\
688 		    uint32_t, (_eqp)->eq_u32[0]);			\
689 									\
690 		SFXGE_BAR_UNLOCK(_esbp);				\
691 	_NOTE(CONSTANTCONDITION)					\
692 	} while (B_FALSE)
693 
694 #define	EFSYS_BAR_READO(_esbp, _offset, _eop, _lock)			\
695 	do {								\
696 		_NOTE(CONSTANTCONDITION)				\
697 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
698 		    sizeof (efx_oword_t)),				\
699 		    ("not power of 2 aligned"));			\
700 									\
701 		_NOTE(CONSTANTCONDITION)				\
702 		if (_lock)						\
703 			SFXGE_BAR_LOCK(_esbp);				\
704 									\
705 		(_eop)->eo_u64[0] = bus_space_read_stream_8(		\
706 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
707 		    (_offset));						\
708 		(_eop)->eo_u64[1] = bus_space_read_stream_8(		\
709 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
710 		    (_offset) + 8);					\
711 									\
712 		EFSYS_PROBE5(bar_reado, unsigned int, (_offset),	\
713 		    uint32_t, (_eop)->eo_u32[3],			\
714 		    uint32_t, (_eop)->eo_u32[2],			\
715 		    uint32_t, (_eop)->eo_u32[1],			\
716 		    uint32_t, (_eop)->eo_u32[0]);			\
717 									\
718 		_NOTE(CONSTANTCONDITION)				\
719 		if (_lock)						\
720 			SFXGE_BAR_UNLOCK(_esbp);			\
721 	_NOTE(CONSTANTCONDITION)					\
722 	} while (B_FALSE)
723 
724 #else
725 #define	EFSYS_BAR_READQ(_esbp, _offset, _eqp)				\
726 	do {								\
727 		_NOTE(CONSTANTCONDITION)				\
728 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
729 		    sizeof (efx_qword_t)),				\
730 		    ("not power of 2 aligned"));			\
731 									\
732 		SFXGE_BAR_LOCK(_esbp);					\
733 									\
734 		(_eqp)->eq_u32[0] = bus_space_read_stream_4(		\
735 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
736 		    (_offset));						\
737 		(_eqp)->eq_u32[1] = bus_space_read_stream_4(		\
738 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
739 		    (_offset) + 4);					\
740 									\
741 		EFSYS_PROBE3(bar_readq, unsigned int, (_offset),	\
742 		    uint32_t, (_eqp)->eq_u32[1],			\
743 		    uint32_t, (_eqp)->eq_u32[0]);			\
744 									\
745 		SFXGE_BAR_UNLOCK(_esbp);				\
746 	_NOTE(CONSTANTCONDITION)					\
747 	} while (B_FALSE)
748 
749 #define	EFSYS_BAR_READO(_esbp, _offset, _eop, _lock)			\
750 	do {								\
751 		_NOTE(CONSTANTCONDITION)				\
752 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
753 		    sizeof (efx_oword_t)),				\
754 		    ("not power of 2 aligned"));			\
755 									\
756 		_NOTE(CONSTANTCONDITION)				\
757 		if (_lock)						\
758 			SFXGE_BAR_LOCK(_esbp);				\
759 									\
760 		(_eop)->eo_u32[0] = bus_space_read_stream_4(		\
761 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
762 		    (_offset));						\
763 		(_eop)->eo_u32[1] = bus_space_read_stream_4(		\
764 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
765 		    (_offset) + 4);					\
766 		(_eop)->eo_u32[2] = bus_space_read_stream_4(		\
767 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
768 		    (_offset) + 8);					\
769 		(_eop)->eo_u32[3] = bus_space_read_stream_4(		\
770 		    (_esbp)->esb_tag, (_esbp)->esb_handle,		\
771 		    (_offset) + 12);					\
772 									\
773 		EFSYS_PROBE5(bar_reado, unsigned int, (_offset),	\
774 		    uint32_t, (_eop)->eo_u32[3],			\
775 		    uint32_t, (_eop)->eo_u32[2],			\
776 		    uint32_t, (_eop)->eo_u32[1],			\
777 		    uint32_t, (_eop)->eo_u32[0]);			\
778 									\
779 		_NOTE(CONSTANTCONDITION)				\
780 		if (_lock)						\
781 			SFXGE_BAR_UNLOCK(_esbp);			\
782 	_NOTE(CONSTANTCONDITION)					\
783 	} while (B_FALSE)
784 #endif
785 
786 #define	EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock)			\
787 	do {								\
788 		_NOTE(CONSTANTCONDITION)				\
789 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
790 		    sizeof (efx_dword_t)),				\
791 		    ("not power of 2 aligned"));			\
792 									\
793 		_NOTE(CONSTANTCONDITION)				\
794 		if (_lock)						\
795 			SFXGE_BAR_LOCK(_esbp);				\
796 									\
797 		EFSYS_PROBE2(bar_writed, unsigned int, (_offset),	\
798 		    uint32_t, (_edp)->ed_u32[0]);			\
799 									\
800 		/*							\
801 		 * Make sure that previous writes to the dword have	\
802 		 * been done. It should be cheaper than barrier just	\
803 		 * after the write below.				\
804 		 */							\
805 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
806 		    (_offset), sizeof (efx_dword_t),			\
807 		    BUS_SPACE_BARRIER_WRITE);				\
808 		bus_space_write_stream_4((_esbp)->esb_tag,		\
809 		    (_esbp)->esb_handle,				\
810 		    (_offset), (_edp)->ed_u32[0]);			\
811 									\
812 		_NOTE(CONSTANTCONDITION)				\
813 		if (_lock)						\
814 			SFXGE_BAR_UNLOCK(_esbp);			\
815 	_NOTE(CONSTANTCONDITION)					\
816 	} while (B_FALSE)
817 
818 #if defined(SFXGE_USE_BUS_SPACE_8)
819 #define	EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp)				\
820 	do {								\
821 		_NOTE(CONSTANTCONDITION)				\
822 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
823 		    sizeof (efx_qword_t)),				\
824 		    ("not power of 2 aligned"));			\
825 									\
826 		SFXGE_BAR_LOCK(_esbp);					\
827 									\
828 		EFSYS_PROBE3(bar_writeq, unsigned int, (_offset),	\
829 		    uint32_t, (_eqp)->eq_u32[1],			\
830 		    uint32_t, (_eqp)->eq_u32[0]);			\
831 									\
832 		/*							\
833 		 * Make sure that previous writes to the qword have	\
834 		 * been done. It should be cheaper than barrier just	\
835 		 * after the write below.				\
836 		 */							\
837 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
838 		    (_offset), sizeof (efx_qword_t),			\
839 		    BUS_SPACE_BARRIER_WRITE);				\
840 		bus_space_write_stream_8((_esbp)->esb_tag,		\
841 		    (_esbp)->esb_handle,				\
842 		    (_offset), (_eqp)->eq_u64[0]);			\
843 									\
844 		SFXGE_BAR_UNLOCK(_esbp);				\
845 	_NOTE(CONSTANTCONDITION)					\
846 	} while (B_FALSE)
847 #else
848 #define	EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp)				\
849 	do {								\
850 		_NOTE(CONSTANTCONDITION)				\
851 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
852 		    sizeof (efx_qword_t)),				\
853 		    ("not power of 2 aligned"));			\
854 									\
855 		SFXGE_BAR_LOCK(_esbp);					\
856 									\
857 		EFSYS_PROBE3(bar_writeq, unsigned int, (_offset),	\
858 		    uint32_t, (_eqp)->eq_u32[1],			\
859 		    uint32_t, (_eqp)->eq_u32[0]);			\
860 									\
861 		/*							\
862 		 * Make sure that previous writes to the qword have	\
863 		 * been done. It should be cheaper than barrier just	\
864 		 * after the last write below.				\
865 		 */							\
866 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
867 		    (_offset), sizeof (efx_qword_t),			\
868 		    BUS_SPACE_BARRIER_WRITE);				\
869 		bus_space_write_stream_4((_esbp)->esb_tag,		\
870 		    (_esbp)->esb_handle,				\
871 		    (_offset), (_eqp)->eq_u32[0]);			\
872 		/*							\
873 		 * It should be guaranteed that the last dword comes	\
874 		 * the last, so barrier entire qword to be sure that	\
875 		 * neither above nor below writes are reordered.	\
876 		 */							\
877 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
878 		    (_offset), sizeof (efx_qword_t),			\
879 		    BUS_SPACE_BARRIER_WRITE);				\
880 		bus_space_write_stream_4((_esbp)->esb_tag,		\
881 		    (_esbp)->esb_handle,				\
882 		    (_offset) + 4, (_eqp)->eq_u32[1]);			\
883 									\
884 		SFXGE_BAR_UNLOCK(_esbp);				\
885 	_NOTE(CONSTANTCONDITION)					\
886 	} while (B_FALSE)
887 #endif
888 
889 /*
890  * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
891  * (required by PIO hardware)
892  */
893 #define	EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp)			\
894 	do {								\
895 		_NOTE(CONSTANTCONDITION)				\
896 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
897 		    sizeof (efx_qword_t)),				\
898 		    ("not power of 2 aligned"));			\
899 									\
900 		(void) (_esbp);						\
901 									\
902 		/* FIXME: Perform a 64-bit write */			\
903 		KASSERT(0, ("not implemented"));			\
904 									\
905 	_NOTE(CONSTANTCONDITION)					\
906 	} while (B_FALSE)
907 
908 #if defined(SFXGE_USE_BUS_SPACE_8)
909 #define	EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock)			\
910 	do {								\
911 		_NOTE(CONSTANTCONDITION)				\
912 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
913 		    sizeof (efx_oword_t)),				\
914 		    ("not power of 2 aligned"));			\
915 									\
916 		_NOTE(CONSTANTCONDITION)				\
917 		if (_lock)						\
918 			SFXGE_BAR_LOCK(_esbp);				\
919 									\
920 		EFSYS_PROBE5(bar_writeo, unsigned int, (_offset),	\
921 		    uint32_t, (_eop)->eo_u32[3],			\
922 		    uint32_t, (_eop)->eo_u32[2],			\
923 		    uint32_t, (_eop)->eo_u32[1],			\
924 		    uint32_t, (_eop)->eo_u32[0]);			\
925 									\
926 		/*							\
927 		 * Make sure that previous writes to the oword have	\
928 		 * been done. It should be cheaper than barrier just	\
929 		 * after the last write below.				\
930 		 */							\
931 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
932 		    (_offset), sizeof (efx_oword_t),			\
933 		    BUS_SPACE_BARRIER_WRITE);				\
934 		bus_space_write_stream_8((_esbp)->esb_tag,		\
935 		    (_esbp)->esb_handle,				\
936 		    (_offset), (_eop)->eo_u64[0]);			\
937 		/*							\
938 		 * It should be guaranteed that the last qword comes	\
939 		 * the last, so barrier entire oword to be sure that	\
940 		 * neither above nor below writes are reordered.	\
941 		 */							\
942 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
943 		    (_offset), sizeof (efx_oword_t),			\
944 		    BUS_SPACE_BARRIER_WRITE);				\
945 		bus_space_write_stream_8((_esbp)->esb_tag,		\
946 		    (_esbp)->esb_handle,				\
947 		    (_offset) + 8, (_eop)->eo_u64[1]);			\
948 									\
949 		_NOTE(CONSTANTCONDITION)				\
950 		if (_lock)						\
951 			SFXGE_BAR_UNLOCK(_esbp);			\
952 	_NOTE(CONSTANTCONDITION)					\
953 	} while (B_FALSE)
954 
955 #else
956 #define	EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock)			\
957 	do {								\
958 		_NOTE(CONSTANTCONDITION)				\
959 		KASSERT(EFX_IS_P2ALIGNED(size_t, _offset,		\
960 		    sizeof (efx_oword_t)),				\
961 		    ("not power of 2 aligned"));			\
962 									\
963 		_NOTE(CONSTANTCONDITION)				\
964 		if (_lock)						\
965 			SFXGE_BAR_LOCK(_esbp);				\
966 									\
967 		EFSYS_PROBE5(bar_writeo, unsigned int, (_offset),	\
968 		    uint32_t, (_eop)->eo_u32[3],			\
969 		    uint32_t, (_eop)->eo_u32[2],			\
970 		    uint32_t, (_eop)->eo_u32[1],			\
971 		    uint32_t, (_eop)->eo_u32[0]);			\
972 									\
973 		/*							\
974 		 * Make sure that previous writes to the oword have	\
975 		 * been done. It should be cheaper than barrier just	\
976 		 * after the last write below.				\
977 		 */							\
978 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
979 		    (_offset), sizeof (efx_oword_t),			\
980 		    BUS_SPACE_BARRIER_WRITE);				\
981 		bus_space_write_stream_4((_esbp)->esb_tag,		\
982 		    (_esbp)->esb_handle,				\
983 		    (_offset), (_eop)->eo_u32[0]);			\
984 		bus_space_write_stream_4((_esbp)->esb_tag,		\
985 		    (_esbp)->esb_handle,				\
986 		    (_offset) + 4, (_eop)->eo_u32[1]);			\
987 		bus_space_write_stream_4((_esbp)->esb_tag,		\
988 		    (_esbp)->esb_handle,				\
989 		    (_offset) + 8, (_eop)->eo_u32[2]);			\
990 		/*							\
991 		 * It should be guaranteed that the last dword comes	\
992 		 * the last, so barrier entire oword to be sure that	\
993 		 * neither above nor below writes are reordered.	\
994 		 */							\
995 		bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
996 		    (_offset), sizeof (efx_oword_t),			\
997 		    BUS_SPACE_BARRIER_WRITE);				\
998 		bus_space_write_stream_4((_esbp)->esb_tag,		\
999 		    (_esbp)->esb_handle,				\
1000 		    (_offset) + 12, (_eop)->eo_u32[3]);			\
1001 									\
1002 		_NOTE(CONSTANTCONDITION)				\
1003 		if (_lock)						\
1004 			SFXGE_BAR_UNLOCK(_esbp);			\
1005 	_NOTE(CONSTANTCONDITION)					\
1006 	} while (B_FALSE)
1007 #endif
1008 
1009 /* Use the standard octo-word write for doorbell writes */
1010 #define	EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop)			\
1011 	do {								\
1012 		EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE);	\
1013 	_NOTE(CONSTANTCONDITION)					\
1014 	} while (B_FALSE)
1015 
1016 /* SPIN */
1017 
1018 #define	EFSYS_SPIN(_us)							\
1019 	do {								\
1020 		DELAY(_us);						\
1021 	_NOTE(CONSTANTCONDITION)					\
1022 	} while (B_FALSE)
1023 
1024 #define	EFSYS_SLEEP	EFSYS_SPIN
1025 
1026 /* BARRIERS */
1027 
1028 #define	EFSYS_MEM_READ_BARRIER()	rmb()
1029 #define	EFSYS_PIO_WRITE_BARRIER()
1030 
1031 /* DMA SYNC */
1032 #define	EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size)		\
1033 	do {								\
1034 		bus_dmamap_sync((_esmp)->esm_tag,			\
1035 		    (_esmp)->esm_map,					\
1036 		    BUS_DMASYNC_POSTREAD);				\
1037 	_NOTE(CONSTANTCONDITION)					\
1038 	} while (B_FALSE)
1039 
1040 #define	EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size)		\
1041 	do {								\
1042 		bus_dmamap_sync((_esmp)->esm_tag,			\
1043 		    (_esmp)->esm_map,					\
1044 		    BUS_DMASYNC_PREWRITE);				\
1045 	_NOTE(CONSTANTCONDITION)					\
1046 	} while (B_FALSE)
1047 
1048 /* TIMESTAMP */
1049 
1050 typedef	clock_t	efsys_timestamp_t;
1051 
1052 #define	EFSYS_TIMESTAMP(_usp)						\
1053 	do {								\
1054 		clock_t now;						\
1055 									\
1056 		now = ticks;						\
1057 		*(_usp) = now * hz / 1000000;				\
1058 	_NOTE(CONSTANTCONDITION)					\
1059 	} while (B_FALSE)
1060 
1061 /* KMEM */
1062 
1063 #define	EFSYS_KMEM_ALLOC(_esip, _size, _p)				\
1064 	do {								\
1065 		(_esip) = (_esip);					\
1066 		/*							\
1067 		 * The macro is used in non-sleepable contexts, for	\
1068 		 * example, holding a mutex.				\
1069 		 */							\
1070 		(_p) = malloc((_size), M_SFXGE, M_NOWAIT|M_ZERO);	\
1071 	_NOTE(CONSTANTCONDITION)					\
1072 	} while (B_FALSE)
1073 
1074 #define	EFSYS_KMEM_FREE(_esip, _size, _p)				\
1075 	do {								\
1076 		(void) (_esip);						\
1077 		(void) (_size);						\
1078 		free((_p), M_SFXGE);					\
1079 	_NOTE(CONSTANTCONDITION)					\
1080 	} while (B_FALSE)
1081 
1082 /* LOCK */
1083 
1084 typedef struct efsys_lock_s {
1085 	struct mtx	lock;
1086 	char		lock_name[SFXGE_LOCK_NAME_MAX];
1087 } efsys_lock_t;
1088 
1089 #define	SFXGE_EFSYS_LOCK_INIT(_eslp, _ifname, _label)			\
1090 	do {								\
1091 		efsys_lock_t *__eslp = (_eslp);				\
1092 									\
1093 		snprintf((__eslp)->lock_name,				\
1094 			 sizeof((__eslp)->lock_name),			\
1095 			 "%s:%s", (_ifname), (_label));			\
1096 		mtx_init(&(__eslp)->lock, (__eslp)->lock_name,		\
1097 			 NULL, MTX_DEF);				\
1098 	} while (B_FALSE)
1099 #define	SFXGE_EFSYS_LOCK_DESTROY(_eslp)					\
1100 	mtx_destroy(&(_eslp)->lock)
1101 #define	SFXGE_EFSYS_LOCK(_eslp)						\
1102 	mtx_lock(&(_eslp)->lock)
1103 #define	SFXGE_EFSYS_UNLOCK(_eslp)					\
1104 	mtx_unlock(&(_eslp)->lock)
1105 #define	SFXGE_EFSYS_LOCK_ASSERT_OWNED(_eslp)				\
1106 	mtx_assert(&(_eslp)->lock, MA_OWNED)
1107 
1108 typedef int efsys_lock_state_t;
1109 
1110 #define	EFSYS_LOCK_MAGIC	0x000010c4
1111 
1112 #define	EFSYS_LOCK(_lockp, _state)					\
1113 	do {								\
1114 		SFXGE_EFSYS_LOCK(_lockp);				\
1115 		(_state) = EFSYS_LOCK_MAGIC;				\
1116 	_NOTE(CONSTANTCONDITION)					\
1117 	} while (B_FALSE)
1118 
1119 #define	EFSYS_UNLOCK(_lockp, _state)					\
1120 	do {								\
1121 		if ((_state) != EFSYS_LOCK_MAGIC)			\
1122 			KASSERT(B_FALSE, ("not locked"));		\
1123 		SFXGE_EFSYS_UNLOCK(_lockp);				\
1124 	_NOTE(CONSTANTCONDITION)					\
1125 	} while (B_FALSE)
1126 
1127 /* STAT */
1128 
1129 typedef uint64_t		efsys_stat_t;
1130 
1131 #define	EFSYS_STAT_INCR(_knp, _delta) 					\
1132 	do {								\
1133 		*(_knp) += (_delta);					\
1134 	_NOTE(CONSTANTCONDITION)					\
1135 	} while (B_FALSE)
1136 
1137 #define	EFSYS_STAT_DECR(_knp, _delta) 					\
1138 	do {								\
1139 		*(_knp) -= (_delta);					\
1140 	_NOTE(CONSTANTCONDITION)					\
1141 	} while (B_FALSE)
1142 
1143 #define	EFSYS_STAT_SET(_knp, _val)					\
1144 	do {								\
1145 		*(_knp) = (_val);					\
1146 	_NOTE(CONSTANTCONDITION)					\
1147 	} while (B_FALSE)
1148 
1149 #define	EFSYS_STAT_SET_QWORD(_knp, _valp)				\
1150 	do {								\
1151 		*(_knp) = le64toh((_valp)->eq_u64[0]);			\
1152 	_NOTE(CONSTANTCONDITION)					\
1153 	} while (B_FALSE)
1154 
1155 #define	EFSYS_STAT_SET_DWORD(_knp, _valp)				\
1156 	do {								\
1157 		*(_knp) = le32toh((_valp)->ed_u32[0]);			\
1158 	_NOTE(CONSTANTCONDITION)					\
1159 	} while (B_FALSE)
1160 
1161 #define	EFSYS_STAT_INCR_QWORD(_knp, _valp)				\
1162 	do {								\
1163 		*(_knp) += le64toh((_valp)->eq_u64[0]);			\
1164 	_NOTE(CONSTANTCONDITION)					\
1165 	} while (B_FALSE)
1166 
1167 #define	EFSYS_STAT_SUBR_QWORD(_knp, _valp)				\
1168 	do {								\
1169 		*(_knp) -= le64toh((_valp)->eq_u64[0]);			\
1170 	_NOTE(CONSTANTCONDITION)					\
1171 	} while (B_FALSE)
1172 
1173 /* ERR */
1174 
1175 extern void	sfxge_err(efsys_identifier_t *, unsigned int,
1176 		    uint32_t, uint32_t);
1177 
1178 #if EFSYS_OPT_DECODE_INTR_FATAL
1179 #define	EFSYS_ERR(_esip, _code, _dword0, _dword1)			\
1180 	do {								\
1181 		sfxge_err((_esip), (_code), (_dword0), (_dword1));	\
1182 	_NOTE(CONSTANTCONDITION)					\
1183 	} while (B_FALSE)
1184 #endif
1185 
1186 /* ASSERT */
1187 
1188 #define	EFSYS_ASSERT(_exp) do {						\
1189 	if (!(_exp))							\
1190 		panic("%s", #_exp);					\
1191 	} while (0)
1192 
1193 #define	EFSYS_ASSERT3(_x, _op, _y, _t) do {				\
1194 	const _t __x = (_t)(_x);					\
1195 	const _t __y = (_t)(_y);					\
1196 	if (!(__x _op __y))						\
1197 		panic("assertion failed at %s:%u", __FILE__, __LINE__);	\
1198 	} while(0)
1199 
1200 #define	EFSYS_ASSERT3U(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uint64_t)
1201 #define	EFSYS_ASSERT3S(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, int64_t)
1202 #define	EFSYS_ASSERT3P(_x, _op, _y)	EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
1203 
1204 /* ROTATE */
1205 
1206 #define	EFSYS_HAS_ROTL_DWORD 0
1207 
1208 #ifdef	__cplusplus
1209 }
1210 #endif
1211 
1212 #endif	/* _SYS_EFSYS_H */
1213