1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2 *
3 * Copyright 2011 Freescale Semiconductor, Inc.
4 * All rights reserved.
5 * Copyright 2019-2020 NXP
6 *
7 */
8
9 #ifndef __COMPAT_H
10 #define __COMPAT_H
11
12 #include <sched.h>
13 #include <stdint.h>
14 #include <stdlib.h>
15 #include <stddef.h>
16 #include <stdio.h>
17 #include <errno.h>
18 #include <string.h>
19 #include <pthread.h>
20 #include <linux/types.h>
21 #include <stdbool.h>
22 #include <ctype.h>
23 #include <malloc.h>
24 #include <sys/types.h>
25 #include <sys/stat.h>
26 #include <fcntl.h>
27 #include <unistd.h>
28 #include <sys/mman.h>
29 #include <limits.h>
30 #include <assert.h>
31 #include <dirent.h>
32 #include <inttypes.h>
33 #include <rte_byteorder.h>
34 #include <rte_atomic.h>
35 #include <rte_spinlock.h>
36 #include <rte_common.h>
37 #include <rte_debug.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40
41 /* The following definitions are primarily to allow the single-source driver
42 * interfaces to be included by arbitrary program code. Ie. for interfaces that
43 * are also available in kernel-space, these definitions provide compatibility
44 * with certain attributes and types used in those interfaces.
45 */
46
47 /* Required compiler attributes */
48 #ifndef __maybe_unused
49 #define __maybe_unused __rte_unused
50 #endif
51 #ifndef __always_unused
52 #define __always_unused __rte_unused
53 #endif
54 #ifndef __packed
55 #define __packed __rte_packed
56 #endif
57 #ifndef noinline
58 #define noinline __rte_noinline
59 #endif
60 #define L1_CACHE_BYTES 64
61 #define ____cacheline_aligned __rte_aligned(L1_CACHE_BYTES)
62 #define __stringify_1(x) #x
63 #define __stringify(x) __stringify_1(x)
64
65 #ifdef ARRAY_SIZE
66 #undef ARRAY_SIZE
67 #endif
68 #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
69
70 /* Debugging */
71 #define prflush(fmt, args...) \
72 do { \
73 printf(fmt, ##args); \
74 fflush(stdout); \
75 } while (0)
76 #ifndef pr_crit
77 #define pr_crit(fmt, args...) prflush("CRIT:" fmt, ##args)
78 #endif
79 #ifndef pr_err
80 #define pr_err(fmt, args...) prflush("ERR:" fmt, ##args)
81 #endif
82 #ifndef pr_warn
83 #define pr_warn(fmt, args...) prflush("WARN:" fmt, ##args)
84 #endif
85 #ifndef pr_info
86 #define pr_info(fmt, args...) prflush(fmt, ##args)
87 #endif
88 #ifndef pr_debug
89 #ifdef RTE_LIBRTE_DPAA_DEBUG_BUS
90 #define pr_debug(fmt, args...) printf(fmt, ##args)
91 #else
92 #define pr_debug(fmt, args...) {}
93 #endif
94 #endif
95
96 #define DPAA_BUG_ON(x) RTE_ASSERT(x)
97
98 /* Required types */
99 typedef uint8_t u8;
100 typedef uint16_t u16;
101 typedef uint32_t u32;
102 typedef uint64_t u64;
103 typedef uint64_t dma_addr_t;
104 typedef cpu_set_t cpumask_t;
105 typedef uint32_t phandle;
106 typedef uint32_t gfp_t;
107 typedef uint32_t irqreturn_t;
108
109 #define ETHER_ADDR_LEN 6
110
111 #define IRQ_HANDLED 0
112 #define request_irq qbman_request_irq
113 #define free_irq qbman_free_irq
114
115 #define __iomem
116 #define GFP_KERNEL 0
117 #define __raw_readb(p) (*(const volatile unsigned char *)(p))
118 #define __raw_readl(p) (*(const volatile unsigned int *)(p))
119 #define __raw_writel(v, p) {*(volatile unsigned int *)(p) = (v); }
120
121 /* to be used as an upper-limit only */
122 #define NR_CPUS 64
123
124 /* Waitqueue stuff */
125 typedef struct { } wait_queue_head_t;
126 #define DECLARE_WAIT_QUEUE_HEAD(x) int dummy_##x __always_unused
127 #define wake_up(x) do { } while (0)
128
129 /* I/O operations */
in_be32(volatile void * __p)130 static inline u32 in_be32(volatile void *__p)
131 {
132 volatile u32 *p = __p;
133 return rte_be_to_cpu_32(*p);
134 }
135
out_be32(volatile void * __p,u32 val)136 static inline void out_be32(volatile void *__p, u32 val)
137 {
138 volatile u32 *p = __p;
139 *p = rte_cpu_to_be_32(val);
140 }
141
142 #define hwsync() rte_rmb()
143 #define lwsync() rte_wmb()
144
145 #define dcbt_ro(p) __builtin_prefetch(p, 0)
146 #define dcbt_rw(p) __builtin_prefetch(p, 1)
147
148 #if defined(RTE_ARCH_ARM)
149 #if defined(RTE_ARCH_64)
150 #define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
151 #define dcbz_64(p) dcbz(p)
152 #define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
153 #define dcbf_64(p) dcbf(p)
154 #define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); }
155
156 #define dcbit_ro(p) \
157 do { \
158 dccivac(p); \
159 asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); \
160 } while (0)
161
162 #else /* RTE_ARCH_32 */
163 #define dcbz(p) memset((p), 0, 32)
164 #define dcbz_64(p) memset((p), 0, 64)
165 #define dcbf(p) RTE_SET_USED(p)
166 #define dcbf_64(p) dcbf(p)
167 #define dccivac(p) RTE_SET_USED(p)
168 #define dcbit_ro(p) RTE_SET_USED(p)
169 #endif
170
171 #else
172 #define dcbz(p) RTE_SET_USED(p)
173 #define dcbz_64(p) dcbz(p)
174 #define dcbf(p) RTE_SET_USED(p)
175 #define dcbf_64(p) dcbf(p)
176 #define dccivac(p) RTE_SET_USED(p)
177 #define dcbit_ro(p) RTE_SET_USED(p)
178 #endif
179
180 #define barrier() { asm volatile ("" : : : "memory"); }
181 #define cpu_relax barrier
182
183 #if defined(RTE_ARCH_ARM64)
mfatb(void)184 static inline uint64_t mfatb(void)
185 {
186 uint64_t ret, ret_new, timeout = 200;
187
188 asm volatile ("mrs %0, cntvct_el0" : "=r" (ret));
189 asm volatile ("mrs %0, cntvct_el0" : "=r" (ret_new));
190 while (ret != ret_new && timeout--) {
191 ret = ret_new;
192 asm volatile ("mrs %0, cntvct_el0" : "=r" (ret_new));
193 }
194 DPAA_BUG_ON(!timeout && (ret != ret_new));
195 return ret * 64;
196 }
197 #else
198
199 #define mfatb rte_rdtsc
200
201 #endif
202
203 /* Spin for a few cycles without bothering the bus */
cpu_spin(int cycles)204 static inline void cpu_spin(int cycles)
205 {
206 uint64_t now = mfatb();
207
208 while (mfatb() < (now + cycles))
209 ;
210 }
211
212 /* Qman/Bman API inlines and macros; */
213 #ifdef lower_32_bits
214 #undef lower_32_bits
215 #endif
216 #define lower_32_bits(x) ((u32)(x))
217
218 #ifdef upper_32_bits
219 #undef upper_32_bits
220 #endif
221 #define upper_32_bits(x) ((u32)(((x) >> 16) >> 16))
222
223 /*
224 * Swap bytes of a 48-bit value.
225 */
226 static inline uint64_t
__bswap_48(uint64_t x)227 __bswap_48(uint64_t x)
228 {
229 return ((x & 0x0000000000ffULL) << 40) |
230 ((x & 0x00000000ff00ULL) << 24) |
231 ((x & 0x000000ff0000ULL) << 8) |
232 ((x & 0x0000ff000000ULL) >> 8) |
233 ((x & 0x00ff00000000ULL) >> 24) |
234 ((x & 0xff0000000000ULL) >> 40);
235 }
236
237 /*
238 * Swap bytes of a 40-bit value.
239 */
240 static inline uint64_t
__bswap_40(uint64_t x)241 __bswap_40(uint64_t x)
242 {
243 return ((x & 0x00000000ffULL) << 32) |
244 ((x & 0x000000ff00ULL) << 16) |
245 ((x & 0x0000ff0000ULL)) |
246 ((x & 0x00ff000000ULL) >> 16) |
247 ((x & 0xff00000000ULL) >> 32);
248 }
249
250 /*
251 * Swap bytes of a 24-bit value.
252 */
253 static inline uint32_t
__bswap_24(uint32_t x)254 __bswap_24(uint32_t x)
255 {
256 return ((x & 0x0000ffULL) << 16) |
257 ((x & 0x00ff00ULL)) |
258 ((x & 0xff0000ULL) >> 16);
259 }
260
261 #define be64_to_cpu(x) rte_be_to_cpu_64(x)
262 #define be32_to_cpu(x) rte_be_to_cpu_32(x)
263 #define be16_to_cpu(x) rte_be_to_cpu_16(x)
264
265 #define cpu_to_be64(x) rte_cpu_to_be_64(x)
266 #if !defined(cpu_to_be32)
267 #define cpu_to_be32(x) rte_cpu_to_be_32(x)
268 #endif
269 #define cpu_to_be16(x) rte_cpu_to_be_16(x)
270
271 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
272
273 #define cpu_to_be48(x) __bswap_48(x)
274 #define be48_to_cpu(x) __bswap_48(x)
275
276 #define cpu_to_be40(x) __bswap_40(x)
277 #define be40_to_cpu(x) __bswap_40(x)
278
279 #define cpu_to_be24(x) __bswap_24(x)
280 #define be24_to_cpu(x) __bswap_24(x)
281
282 #else /* RTE_BIG_ENDIAN */
283
284 #define cpu_to_be48(x) (x)
285 #define be48_to_cpu(x) (x)
286
287 #define cpu_to_be40(x) (x)
288 #define be40_to_cpu(x) (x)
289
290 #define cpu_to_be24(x) (x)
291 #define be24_to_cpu(x) (x)
292
293 #endif /* RTE_BIG_ENDIAN */
294
295 /* When copying aligned words or shorts, try to avoid memcpy() */
296 /* memcpy() stuff - when you know alignments in advance */
297 #define CONFIG_TRY_BETTER_MEMCPY
298
299 #ifdef CONFIG_TRY_BETTER_MEMCPY
copy_words(void * dest,const void * src,size_t sz)300 static inline void copy_words(void *dest, const void *src, size_t sz)
301 {
302 u32 *__dest = dest;
303 const u32 *__src = src;
304 size_t __sz = sz >> 2;
305
306 DPAA_BUG_ON((unsigned long)dest & 0x3);
307 DPAA_BUG_ON((unsigned long)src & 0x3);
308 DPAA_BUG_ON(sz & 0x3);
309 while (__sz--)
310 *(__dest++) = *(__src++);
311 }
312
copy_shorts(void * dest,const void * src,size_t sz)313 static inline void copy_shorts(void *dest, const void *src, size_t sz)
314 {
315 u16 *__dest = dest;
316 const u16 *__src = src;
317 size_t __sz = sz >> 1;
318
319 DPAA_BUG_ON((unsigned long)dest & 0x1);
320 DPAA_BUG_ON((unsigned long)src & 0x1);
321 DPAA_BUG_ON(sz & 0x1);
322 while (__sz--)
323 *(__dest++) = *(__src++);
324 }
325
copy_bytes(void * dest,const void * src,size_t sz)326 static inline void copy_bytes(void *dest, const void *src, size_t sz)
327 {
328 u8 *__dest = dest;
329 const u8 *__src = src;
330
331 while (sz--)
332 *(__dest++) = *(__src++);
333 }
334 #else
335 #define copy_words memcpy
336 #define copy_shorts memcpy
337 #define copy_bytes memcpy
338 #endif
339
340 /* Allocator stuff */
341 #define kmalloc(sz, t) rte_malloc(NULL, sz, 0)
342 #define kzalloc(sz, t) rte_zmalloc(NULL, sz, 0)
343 #define vmalloc(sz) rte_malloc(NULL, sz, 0)
344 #define kfree(p) rte_free(p)
345
get_zeroed_page(gfp_t __foo __rte_unused)346 static inline unsigned long get_zeroed_page(gfp_t __foo __rte_unused)
347 {
348 void *p;
349
350 if (posix_memalign(&p, 4096, 4096))
351 return 0;
352 memset(p, 0, 4096);
353 return (unsigned long)p;
354 }
355
356 /* Spinlock stuff */
357 #define spinlock_t rte_spinlock_t
358 #define __SPIN_LOCK_UNLOCKED(x) RTE_SPINLOCK_INITIALIZER
359 #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
360 #define spin_lock_init(x) rte_spinlock_init(x)
361 #define spin_lock_destroy(x)
362 #define spin_lock(x) rte_spinlock_lock(x)
363 #define spin_unlock(x) rte_spinlock_unlock(x)
364 #define spin_lock_irq(x) spin_lock(x)
365 #define spin_unlock_irq(x) spin_unlock(x)
366 #define spin_lock_irqsave(x, f) spin_lock_irq(x)
367 #define spin_unlock_irqrestore(x, f) spin_unlock_irq(x)
368
369 #define atomic_t rte_atomic32_t
370 #define atomic_read(v) rte_atomic32_read(v)
371 #define atomic_set(v, i) rte_atomic32_set(v, i)
372
373 #define atomic_inc(v) rte_atomic32_add(v, 1)
374 #define atomic_dec(v) rte_atomic32_sub(v, 1)
375
376 #define atomic_inc_and_test(v) rte_atomic32_inc_and_test(v)
377 #define atomic_dec_and_test(v) rte_atomic32_dec_and_test(v)
378
379 #define atomic_inc_return(v) rte_atomic32_add_return(v, 1)
380 #define atomic_dec_return(v) rte_atomic32_sub_return(v, 1)
381 #define atomic_sub_and_test(i, v) (rte_atomic32_sub_return(v, i) == 0)
382
383 /* Interface name len*/
384 #define IF_NAME_MAX_LEN 16
385
386 #endif /* __COMPAT_H */
387