1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31 #ifndef _LINUX_IO_H_
32 #define _LINUX_IO_H_
33
34 #include <machine/vm.h>
35 #include <sys/endian.h>
36 #include <sys/types.h>
37
38 #include <linux/compiler.h>
39 #include <linux/types.h>
40
41 /*
42 * XXX This is all x86 specific. It should be bus space access.
43 */
44
45 /* Access MMIO registers atomically without barriers and byte swapping. */
46
47 static inline uint8_t
__raw_readb(const volatile void * addr)48 __raw_readb(const volatile void *addr)
49 {
50 return (*(const volatile uint8_t *)addr);
51 }
52 #define __raw_readb(addr) __raw_readb(addr)
53
54 static inline void
__raw_writeb(uint8_t v,volatile void * addr)55 __raw_writeb(uint8_t v, volatile void *addr)
56 {
57 *(volatile uint8_t *)addr = v;
58 }
59 #define __raw_writeb(v, addr) __raw_writeb(v, addr)
60
61 static inline uint16_t
__raw_readw(const volatile void * addr)62 __raw_readw(const volatile void *addr)
63 {
64 return (*(const volatile uint16_t *)addr);
65 }
66 #define __raw_readw(addr) __raw_readw(addr)
67
68 static inline void
__raw_writew(uint16_t v,volatile void * addr)69 __raw_writew(uint16_t v, volatile void *addr)
70 {
71 *(volatile uint16_t *)addr = v;
72 }
73 #define __raw_writew(v, addr) __raw_writew(v, addr)
74
75 static inline uint32_t
__raw_readl(const volatile void * addr)76 __raw_readl(const volatile void *addr)
77 {
78 return (*(const volatile uint32_t *)addr);
79 }
80 #define __raw_readl(addr) __raw_readl(addr)
81
82 static inline void
__raw_writel(uint32_t v,volatile void * addr)83 __raw_writel(uint32_t v, volatile void *addr)
84 {
85 *(volatile uint32_t *)addr = v;
86 }
87 #define __raw_writel(v, addr) __raw_writel(v, addr)
88
89 #ifdef __LP64__
90 static inline uint64_t
__raw_readq(const volatile void * addr)91 __raw_readq(const volatile void *addr)
92 {
93 return (*(const volatile uint64_t *)addr);
94 }
95 #define __raw_readq(addr) __raw_readq(addr)
96
97 static inline void
__raw_writeq(uint64_t v,volatile void * addr)98 __raw_writeq(uint64_t v, volatile void *addr)
99 {
100 *(volatile uint64_t *)addr = v;
101 }
102 #define __raw_writeq(v, addr) __raw_writeq(v, addr)
103 #endif
104
105 #define mmiowb() barrier()
106
107 /* Access little-endian MMIO registers atomically with memory barriers. */
108
109 #undef readb
110 static inline uint8_t
readb(const volatile void * addr)111 readb(const volatile void *addr)
112 {
113 uint8_t v;
114
115 __compiler_membar();
116 v = *(const volatile uint8_t *)addr;
117 __compiler_membar();
118 return (v);
119 }
120 #define readb(addr) readb(addr)
121
122 #undef writeb
123 static inline void
writeb(uint8_t v,volatile void * addr)124 writeb(uint8_t v, volatile void *addr)
125 {
126 __compiler_membar();
127 *(volatile uint8_t *)addr = v;
128 __compiler_membar();
129 }
130 #define writeb(v, addr) writeb(v, addr)
131
132 #undef readw
133 static inline uint16_t
readw(const volatile void * addr)134 readw(const volatile void *addr)
135 {
136 uint16_t v;
137
138 __compiler_membar();
139 v = *(const volatile uint16_t *)addr;
140 __compiler_membar();
141 return (v);
142 }
143 #define readw(addr) readw(addr)
144
145 #undef writew
146 static inline void
writew(uint16_t v,volatile void * addr)147 writew(uint16_t v, volatile void *addr)
148 {
149 __compiler_membar();
150 *(volatile uint16_t *)addr = v;
151 __compiler_membar();
152 }
153 #define writew(v, addr) writew(v, addr)
154
155 #undef readl
156 static inline uint32_t
readl(const volatile void * addr)157 readl(const volatile void *addr)
158 {
159 uint32_t v;
160
161 __compiler_membar();
162 v = *(const volatile uint32_t *)addr;
163 __compiler_membar();
164 return (v);
165 }
166 #define readl(addr) readl(addr)
167
168 #undef writel
169 static inline void
writel(uint32_t v,volatile void * addr)170 writel(uint32_t v, volatile void *addr)
171 {
172 __compiler_membar();
173 *(volatile uint32_t *)addr = v;
174 __compiler_membar();
175 }
176 #define writel(v, addr) writel(v, addr)
177
178 #undef readq
179 #undef writeq
180 #ifdef __LP64__
181 static inline uint64_t
readq(const volatile void * addr)182 readq(const volatile void *addr)
183 {
184 uint64_t v;
185
186 __compiler_membar();
187 v = *(const volatile uint64_t *)addr;
188 __compiler_membar();
189 return (v);
190 }
191 #define readq(addr) readq(addr)
192
193 static inline void
writeq(uint64_t v,volatile void * addr)194 writeq(uint64_t v, volatile void *addr)
195 {
196 __compiler_membar();
197 *(volatile uint64_t *)addr = v;
198 __compiler_membar();
199 }
200 #define writeq(v, addr) writeq(v, addr)
201 #endif
202
203 /* Access little-endian MMIO registers atomically without memory barriers. */
204
205 #undef readb_relaxed
206 static inline uint8_t
readb_relaxed(const volatile void * addr)207 readb_relaxed(const volatile void *addr)
208 {
209 return (*(const volatile uint8_t *)addr);
210 }
211 #define readb_relaxed(addr) readb_relaxed(addr)
212
213 #undef writeb_relaxed
214 static inline void
writeb_relaxed(uint8_t v,volatile void * addr)215 writeb_relaxed(uint8_t v, volatile void *addr)
216 {
217 *(volatile uint8_t *)addr = v;
218 }
219 #define writeb_relaxed(v, addr) writeb_relaxed(v, addr)
220
221 #undef readw_relaxed
222 static inline uint16_t
readw_relaxed(const volatile void * addr)223 readw_relaxed(const volatile void *addr)
224 {
225 return (*(const volatile uint16_t *)addr);
226 }
227 #define readw_relaxed(addr) readw_relaxed(addr)
228
229 #undef writew_relaxed
230 static inline void
writew_relaxed(uint16_t v,volatile void * addr)231 writew_relaxed(uint16_t v, volatile void *addr)
232 {
233 *(volatile uint16_t *)addr = v;
234 }
235 #define writew_relaxed(v, addr) writew_relaxed(v, addr)
236
237 #undef readl_relaxed
238 static inline uint32_t
readl_relaxed(const volatile void * addr)239 readl_relaxed(const volatile void *addr)
240 {
241 return (*(const volatile uint32_t *)addr);
242 }
243 #define readl_relaxed(addr) readl_relaxed(addr)
244
245 #undef writel_relaxed
246 static inline void
writel_relaxed(uint32_t v,volatile void * addr)247 writel_relaxed(uint32_t v, volatile void *addr)
248 {
249 *(volatile uint32_t *)addr = v;
250 }
251 #define writel_relaxed(v, addr) writel_relaxed(v, addr)
252
253 #undef readq_relaxed
254 #undef writeq_relaxed
255 #ifdef __LP64__
256 static inline uint64_t
readq_relaxed(const volatile void * addr)257 readq_relaxed(const volatile void *addr)
258 {
259 return (*(const volatile uint64_t *)addr);
260 }
261 #define readq_relaxed(addr) readq_relaxed(addr)
262
263 static inline void
writeq_relaxed(uint64_t v,volatile void * addr)264 writeq_relaxed(uint64_t v, volatile void *addr)
265 {
266 *(volatile uint64_t *)addr = v;
267 }
268 #define writeq_relaxed(v, addr) writeq_relaxed(v, addr)
269 #endif
270
271 /* XXX On Linux ioread and iowrite handle both MMIO and port IO. */
272
273 #undef ioread8
274 static inline uint8_t
ioread8(const volatile void * addr)275 ioread8(const volatile void *addr)
276 {
277 return (readb(addr));
278 }
279 #define ioread8(addr) ioread8(addr)
280
281 #undef ioread16
282 static inline uint16_t
ioread16(const volatile void * addr)283 ioread16(const volatile void *addr)
284 {
285 return (readw(addr));
286 }
287 #define ioread16(addr) ioread16(addr)
288
289 #undef ioread16be
290 static inline uint16_t
ioread16be(const volatile void * addr)291 ioread16be(const volatile void *addr)
292 {
293 return (bswap16(readw(addr)));
294 }
295 #define ioread16be(addr) ioread16be(addr)
296
297 #undef ioread32
298 static inline uint32_t
ioread32(const volatile void * addr)299 ioread32(const volatile void *addr)
300 {
301 return (readl(addr));
302 }
303 #define ioread32(addr) ioread32(addr)
304
305 #undef ioread32be
306 static inline uint32_t
ioread32be(const volatile void * addr)307 ioread32be(const volatile void *addr)
308 {
309 return (bswap32(readl(addr)));
310 }
311 #define ioread32be(addr) ioread32be(addr)
312
313 #undef iowrite8
314 static inline void
iowrite8(uint8_t v,volatile void * addr)315 iowrite8(uint8_t v, volatile void *addr)
316 {
317 writeb(v, addr);
318 }
319 #define iowrite8(v, addr) iowrite8(v, addr)
320
321 #undef iowrite16
322 static inline void
iowrite16(uint16_t v,volatile void * addr)323 iowrite16(uint16_t v, volatile void *addr)
324 {
325 writew(v, addr);
326 }
327 #define iowrite16 iowrite16
328
329 #undef iowrite32
330 static inline void
iowrite32(uint32_t v,volatile void * addr)331 iowrite32(uint32_t v, volatile void *addr)
332 {
333 writel(v, addr);
334 }
335 #define iowrite32(v, addr) iowrite32(v, addr)
336
337 #undef iowrite32be
338 static inline void
iowrite32be(uint32_t v,volatile void * addr)339 iowrite32be(uint32_t v, volatile void *addr)
340 {
341 writel(bswap32(v), addr);
342 }
343 #define iowrite32be(v, addr) iowrite32be(v, addr)
344
345 #if defined(__i386__) || defined(__amd64__)
346 static inline void
_outb(u_char data,u_int port)347 _outb(u_char data, u_int port)
348 {
349 __asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
350 }
351 #endif
352
353 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__)
354 void *_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr);
355 #else
356 #define _ioremap_attr(...) NULL
357 #endif
358
359 #ifdef VM_MEMATTR_DEVICE
360 #define ioremap_nocache(addr, size) \
361 _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
362 #define ioremap_wt(addr, size) \
363 _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
364 #define ioremap(addr, size) \
365 _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
366 #else
367 #define ioremap_nocache(addr, size) \
368 _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
369 #define ioremap_wt(addr, size) \
370 _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_THROUGH)
371 #define ioremap(addr, size) \
372 _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
373 #endif
374 #define ioremap_wc(addr, size) \
375 _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_COMBINING)
376 #define ioremap_wb(addr, size) \
377 _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_BACK)
378 void iounmap(void *addr);
379
380 #define memset_io(a, b, c) memset((a), (b), (c))
381 #define memcpy_fromio(a, b, c) memcpy((a), (b), (c))
382 #define memcpy_toio(a, b, c) memcpy((a), (b), (c))
383
384 static inline void
__iowrite32_copy(void * to,void * from,size_t count)385 __iowrite32_copy(void *to, void *from, size_t count)
386 {
387 uint32_t *src;
388 uint32_t *dst;
389 int i;
390
391 for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
392 __raw_writel(*src, dst);
393 }
394
395 static inline void
__iowrite64_copy(void * to,void * from,size_t count)396 __iowrite64_copy(void *to, void *from, size_t count)
397 {
398 #ifdef __LP64__
399 uint64_t *src;
400 uint64_t *dst;
401 int i;
402
403 for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
404 __raw_writeq(*src, dst);
405 #else
406 __iowrite32_copy(to, from, count * 2);
407 #endif
408 }
409
410 enum {
411 MEMREMAP_WB = 1 << 0,
412 MEMREMAP_WT = 1 << 1,
413 MEMREMAP_WC = 1 << 2,
414 };
415
416 static inline void *
memremap(resource_size_t offset,size_t size,unsigned long flags)417 memremap(resource_size_t offset, size_t size, unsigned long flags)
418 {
419 void *addr = NULL;
420
421 if ((flags & MEMREMAP_WB) &&
422 (addr = ioremap_wb(offset, size)) != NULL)
423 goto done;
424 if ((flags & MEMREMAP_WT) &&
425 (addr = ioremap_wt(offset, size)) != NULL)
426 goto done;
427 if ((flags & MEMREMAP_WC) &&
428 (addr = ioremap_wc(offset, size)) != NULL)
429 goto done;
430 done:
431 return (addr);
432 }
433
434 static inline void
memunmap(void * addr)435 memunmap(void *addr)
436 {
437 /* XXX May need to check if this is RAM */
438 iounmap(addr);
439 }
440
441 #endif /* _LINUX_IO_H_ */
442