1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Inc. ([email protected]). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 *
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17
18 * * Neither the name of Cavium Inc. nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
21 * permission.
22
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40
41 /**
42 * @file
43 *
44 * Interface to the hardware Fetch and Add Unit.
45 *
46 * <hr>$Revision: 70030 $<hr>
47 */
48
49 #ifndef __CVMX_FAU_H__
50 #define __CVMX_FAU_H__
51
52 #ifndef CVMX_DONT_INCLUDE_CONFIG
53 #include "cvmx-config.h"
54 #else
55 typedef int cvmx_fau_reg_64_t;
56 typedef int cvmx_fau_reg_32_t;
57 typedef int cvmx_fau_reg_16_t;
58 typedef int cvmx_fau_reg_8_t;
59 #endif
60
61 #ifdef __cplusplus
62 extern "C" {
63 #endif
64
65 /*
66 * Octeon Fetch and Add Unit (FAU)
67 */
68
69 #define CVMX_FAU_LOAD_IO_ADDRESS cvmx_build_io_address(0x1e, 0)
70 #define CVMX_FAU_BITS_SCRADDR 63,56
71 #define CVMX_FAU_BITS_LEN 55,48
72 #define CVMX_FAU_BITS_INEVAL 35,14
73 #define CVMX_FAU_BITS_TAGWAIT 13,13
74 #define CVMX_FAU_BITS_NOADD 13,13
75 #define CVMX_FAU_BITS_SIZE 12,11
76 #define CVMX_FAU_BITS_REGISTER 10,0
77
78
79 typedef enum {
80 CVMX_FAU_OP_SIZE_8 = 0,
81 CVMX_FAU_OP_SIZE_16 = 1,
82 CVMX_FAU_OP_SIZE_32 = 2,
83 CVMX_FAU_OP_SIZE_64 = 3
84 } cvmx_fau_op_size_t;
85
86 /**
87 * Tagwait return definition. If a timeout occurs, the error
88 * bit will be set. Otherwise the value of the register before
89 * the update will be returned.
90 */
91 typedef struct
92 {
93 uint64_t error : 1;
94 int64_t value : 63;
95 } cvmx_fau_tagwait64_t;
96
97 /**
98 * Tagwait return definition. If a timeout occurs, the error
99 * bit will be set. Otherwise the value of the register before
100 * the update will be returned.
101 */
102 typedef struct
103 {
104 uint64_t error : 1;
105 int32_t value : 31;
106 } cvmx_fau_tagwait32_t;
107
108 /**
109 * Tagwait return definition. If a timeout occurs, the error
110 * bit will be set. Otherwise the value of the register before
111 * the update will be returned.
112 */
113 typedef struct
114 {
115 uint64_t error : 1;
116 int16_t value : 15;
117 } cvmx_fau_tagwait16_t;
118
119 /**
120 * Tagwait return definition. If a timeout occurs, the error
121 * bit will be set. Otherwise the value of the register before
122 * the update will be returned.
123 */
124 typedef struct
125 {
126 uint64_t error : 1;
127 int8_t value : 7;
128 } cvmx_fau_tagwait8_t;
129
130 /**
131 * Asynchronous tagwait return definition. If a timeout occurs,
132 * the error bit will be set. Otherwise the value of the
133 * register before the update will be returned.
134 */
135 typedef union {
136 uint64_t u64;
137 struct {
138 uint64_t invalid: 1;
139 uint64_t data :63; /* unpredictable if invalid is set */
140 } s;
141 } cvmx_fau_async_tagwait_result_t;
142
143 /**
144 * @INTERNAL
145 * Builds a store I/O address for writing to the FAU
146 *
147 * @param noadd 0 = Store value is atomically added to the current value
148 * 1 = Store value is atomically written over the current value
149 * @param reg FAU atomic register to access. 0 <= reg < 2048.
150 * - Step by 2 for 16 bit access.
151 * - Step by 4 for 32 bit access.
152 * - Step by 8 for 64 bit access.
153 * @return Address to store for atomic update
154 */
__cvmx_fau_store_address(uint64_t noadd,uint64_t reg)155 static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
156 {
157 return (CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
158 cvmx_build_bits(CVMX_FAU_BITS_NOADD, noadd) |
159 cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
160 }
161
162 /**
163 * @INTERNAL
164 * Builds a I/O address for accessing the FAU
165 *
166 * @param tagwait Should the atomic add wait for the current tag switch
167 * operation to complete.
168 * - 0 = Don't wait
169 * - 1 = Wait for tag switch to complete
170 * @param reg FAU atomic register to access. 0 <= reg < 2048.
171 * - Step by 2 for 16 bit access.
172 * - Step by 4 for 32 bit access.
173 * - Step by 8 for 64 bit access.
174 * @param value Signed value to add.
175 * Note: When performing 32 and 64 bit access, only the low
176 * 22 bits are available.
177 * @return Address to read from for atomic update
178 */
__cvmx_fau_atomic_address(uint64_t tagwait,uint64_t reg,int64_t value)179 static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg, int64_t value)
180 {
181 return (CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
182 cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
183 cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
184 cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
185 }
186
187 /**
188 * Perform an atomic 64 bit add
189 *
190 * @param reg FAU atomic register to access. 0 <= reg < 2048.
191 * - Step by 8 for 64 bit access.
192 * @param value Signed value to add.
193 * Note: Only the low 22 bits are available.
194 * @return Value of the register before the update
195 */
cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,int64_t value)196 static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
197 {
198 return cvmx_read64_int64(__cvmx_fau_atomic_address(0, reg, value));
199 }
200
201 /**
202 * Perform an atomic 32 bit add
203 *
204 * @param reg FAU atomic register to access. 0 <= reg < 2048.
205 * - Step by 4 for 32 bit access.
206 * @param value Signed value to add.
207 * Note: Only the low 22 bits are available.
208 * @return Value of the register before the update
209 */
cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,int32_t value)210 static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
211 {
212 return cvmx_read64_int32(__cvmx_fau_atomic_address(0, reg, value));
213 }
214
215 /**
216 * Perform an atomic 16 bit add
217 *
218 * @param reg FAU atomic register to access. 0 <= reg < 2048.
219 * - Step by 2 for 16 bit access.
220 * @param value Signed value to add.
221 * @return Value of the register before the update
222 */
cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg,int16_t value)223 static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
224 {
225 return cvmx_read64_int16(__cvmx_fau_atomic_address(0, reg, value));
226 }
227
228 /**
229 * Perform an atomic 8 bit add
230 *
231 * @param reg FAU atomic register to access. 0 <= reg < 2048.
232 * @param value Signed value to add.
233 * @return Value of the register before the update
234 */
cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg,int8_t value)235 static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
236 {
237 return cvmx_read64_int8(__cvmx_fau_atomic_address(0, reg, value));
238 }
239
240 /**
241 * Perform an atomic 64 bit add after the current tag switch
242 * completes
243 *
244 * @param reg FAU atomic register to access. 0 <= reg < 2048.
245 * - Step by 8 for 64 bit access.
246 * @param value Signed value to add.
247 * Note: Only the low 22 bits are available.
248 * @return If a timeout occurs, the error bit will be set. Otherwise
249 * the value of the register before the update will be
250 * returned
251 */
cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg,int64_t value)252 static inline cvmx_fau_tagwait64_t cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
253 {
254 union
255 {
256 uint64_t i64;
257 cvmx_fau_tagwait64_t t;
258 } result;
259 result.i64 = cvmx_read64_int64(__cvmx_fau_atomic_address(1, reg, value));
260 return result.t;
261 }
262
263 /**
264 * Perform an atomic 32 bit add after the current tag switch
265 * completes
266 *
267 * @param reg FAU atomic register to access. 0 <= reg < 2048.
268 * - Step by 4 for 32 bit access.
269 * @param value Signed value to add.
270 * Note: Only the low 22 bits are available.
271 * @return If a timeout occurs, the error bit will be set. Otherwise
272 * the value of the register before the update will be
273 * returned
274 */
cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg,int32_t value)275 static inline cvmx_fau_tagwait32_t cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
276 {
277 union
278 {
279 uint64_t i32;
280 cvmx_fau_tagwait32_t t;
281 } result;
282 result.i32 = cvmx_read64_int32(__cvmx_fau_atomic_address(1, reg, value));
283 return result.t;
284 }
285
286 /**
287 * Perform an atomic 16 bit add after the current tag switch
288 * completes
289 *
290 * @param reg FAU atomic register to access. 0 <= reg < 2048.
291 * - Step by 2 for 16 bit access.
292 * @param value Signed value to add.
293 * @return If a timeout occurs, the error bit will be set. Otherwise
294 * the value of the register before the update will be
295 * returned
296 */
cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg,int16_t value)297 static inline cvmx_fau_tagwait16_t cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
298 {
299 union
300 {
301 uint64_t i16;
302 cvmx_fau_tagwait16_t t;
303 } result;
304 result.i16 = cvmx_read64_int16(__cvmx_fau_atomic_address(1, reg, value));
305 return result.t;
306 }
307
308 /**
309 * Perform an atomic 8 bit add after the current tag switch
310 * completes
311 *
312 * @param reg FAU atomic register to access. 0 <= reg < 2048.
313 * @param value Signed value to add.
314 * @return If a timeout occurs, the error bit will be set. Otherwise
315 * the value of the register before the update will be
316 * returned
317 */
cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg,int8_t value)318 static inline cvmx_fau_tagwait8_t cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
319 {
320 union
321 {
322 uint64_t i8;
323 cvmx_fau_tagwait8_t t;
324 } result;
325 result.i8 = cvmx_read64_int8(__cvmx_fau_atomic_address(1, reg, value));
326 return result.t;
327 }
328
329 /**
330 * @INTERNAL
331 * Builds I/O data for async operations
332 *
333 * @param scraddr Scratch pad byte addres to write to. Must be 8 byte aligned
334 * @param value Signed value to add.
335 * Note: When performing 32 and 64 bit access, only the low
336 * 22 bits are available.
337 * @param tagwait Should the atomic add wait for the current tag switch
338 * operation to complete.
339 * - 0 = Don't wait
340 * - 1 = Wait for tag switch to complete
341 * @param size The size of the operation:
342 * - CVMX_FAU_OP_SIZE_8 (0) = 8 bits
343 * - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
344 * - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
345 * - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
346 * @param reg FAU atomic register to access. 0 <= reg < 2048.
347 * - Step by 2 for 16 bit access.
348 * - Step by 4 for 32 bit access.
349 * - Step by 8 for 64 bit access.
350 * @return Data to write using cvmx_send_single
351 */
__cvmx_fau_iobdma_data(uint64_t scraddr,int64_t value,uint64_t tagwait,cvmx_fau_op_size_t size,uint64_t reg)352 static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value, uint64_t tagwait,
353 cvmx_fau_op_size_t size, uint64_t reg)
354 {
355 return (CVMX_FAU_LOAD_IO_ADDRESS |
356 cvmx_build_bits(CVMX_FAU_BITS_SCRADDR, scraddr>>3) |
357 cvmx_build_bits(CVMX_FAU_BITS_LEN, 1) |
358 cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
359 cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
360 cvmx_build_bits(CVMX_FAU_BITS_SIZE, size) |
361 cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
362 }
363
364 /**
365 * Perform an async atomic 64 bit add. The old value is
366 * placed in the scratch memory at byte address scraddr.
367 *
368 * @param scraddr Scratch memory byte address to put response in.
369 * Must be 8 byte aligned.
370 * @param reg FAU atomic register to access. 0 <= reg < 2048.
371 * - Step by 8 for 64 bit access.
372 * @param value Signed value to add.
373 * Note: Only the low 22 bits are available.
374 * @return Placed in the scratch pad register
375 */
cvmx_fau_async_fetch_and_add64(uint64_t scraddr,cvmx_fau_reg_64_t reg,int64_t value)376 static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr, cvmx_fau_reg_64_t reg, int64_t value)
377 {
378 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_64, reg));
379 }
380
381 /**
382 * Perform an async atomic 32 bit add. The old value is
383 * placed in the scratch memory at byte address scraddr.
384 *
385 * @param scraddr Scratch memory byte address to put response in.
386 * Must be 8 byte aligned.
387 * @param reg FAU atomic register to access. 0 <= reg < 2048.
388 * - Step by 4 for 32 bit access.
389 * @param value Signed value to add.
390 * Note: Only the low 22 bits are available.
391 * @return Placed in the scratch pad register
392 */
cvmx_fau_async_fetch_and_add32(uint64_t scraddr,cvmx_fau_reg_32_t reg,int32_t value)393 static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr, cvmx_fau_reg_32_t reg, int32_t value)
394 {
395 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_32, reg));
396 }
397
398 /**
399 * Perform an async atomic 16 bit add. The old value is
400 * placed in the scratch memory at byte address scraddr.
401 *
402 * @param scraddr Scratch memory byte address to put response in.
403 * Must be 8 byte aligned.
404 * @param reg FAU atomic register to access. 0 <= reg < 2048.
405 * - Step by 2 for 16 bit access.
406 * @param value Signed value to add.
407 * @return Placed in the scratch pad register
408 */
cvmx_fau_async_fetch_and_add16(uint64_t scraddr,cvmx_fau_reg_16_t reg,int16_t value)409 static inline void cvmx_fau_async_fetch_and_add16(uint64_t scraddr, cvmx_fau_reg_16_t reg, int16_t value)
410 {
411 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_16, reg));
412 }
413
414 /**
415 * Perform an async atomic 8 bit add. The old value is
416 * placed in the scratch memory at byte address scraddr.
417 *
418 * @param scraddr Scratch memory byte address to put response in.
419 * Must be 8 byte aligned.
420 * @param reg FAU atomic register to access. 0 <= reg < 2048.
421 * @param value Signed value to add.
422 * @return Placed in the scratch pad register
423 */
cvmx_fau_async_fetch_and_add8(uint64_t scraddr,cvmx_fau_reg_8_t reg,int8_t value)424 static inline void cvmx_fau_async_fetch_and_add8(uint64_t scraddr, cvmx_fau_reg_8_t reg, int8_t value)
425 {
426 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_8, reg));
427 }
428
429 /**
430 * Perform an async atomic 64 bit add after the current tag
431 * switch completes.
432 *
433 * @param scraddr Scratch memory byte address to put response in.
434 * Must be 8 byte aligned.
435 * If a timeout occurs, the error bit (63) will be set. Otherwise
436 * the value of the register before the update will be
437 * returned
438 * @param reg FAU atomic register to access. 0 <= reg < 2048.
439 * - Step by 8 for 64 bit access.
440 * @param value Signed value to add.
441 * Note: Only the low 22 bits are available.
442 * @return Placed in the scratch pad register
443 */
cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr,cvmx_fau_reg_64_t reg,int64_t value)444 static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr, cvmx_fau_reg_64_t reg, int64_t value)
445 {
446 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_64, reg));
447 }
448
449 /**
450 * Perform an async atomic 32 bit add after the current tag
451 * switch completes.
452 *
453 * @param scraddr Scratch memory byte address to put response in.
454 * Must be 8 byte aligned.
455 * If a timeout occurs, the error bit (63) will be set. Otherwise
456 * the value of the register before the update will be
457 * returned
458 * @param reg FAU atomic register to access. 0 <= reg < 2048.
459 * - Step by 4 for 32 bit access.
460 * @param value Signed value to add.
461 * Note: Only the low 22 bits are available.
462 * @return Placed in the scratch pad register
463 */
cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr,cvmx_fau_reg_32_t reg,int32_t value)464 static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr, cvmx_fau_reg_32_t reg, int32_t value)
465 {
466 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_32, reg));
467 }
468
469 /**
470 * Perform an async atomic 16 bit add after the current tag
471 * switch completes.
472 *
473 * @param scraddr Scratch memory byte address to put response in.
474 * Must be 8 byte aligned.
475 * If a timeout occurs, the error bit (63) will be set. Otherwise
476 * the value of the register before the update will be
477 * returned
478 * @param reg FAU atomic register to access. 0 <= reg < 2048.
479 * - Step by 2 for 16 bit access.
480 * @param value Signed value to add.
481 * @return Placed in the scratch pad register
482 */
cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr,cvmx_fau_reg_16_t reg,int16_t value)483 static inline void cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr, cvmx_fau_reg_16_t reg, int16_t value)
484 {
485 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_16, reg));
486 }
487
488 /**
489 * Perform an async atomic 8 bit add after the current tag
490 * switch completes.
491 *
492 * @param scraddr Scratch memory byte address to put response in.
493 * Must be 8 byte aligned.
494 * If a timeout occurs, the error bit (63) will be set. Otherwise
495 * the value of the register before the update will be
496 * returned
497 * @param reg FAU atomic register to access. 0 <= reg < 2048.
498 * @param value Signed value to add.
499 * @return Placed in the scratch pad register
500 */
cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr,cvmx_fau_reg_8_t reg,int8_t value)501 static inline void cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr, cvmx_fau_reg_8_t reg, int8_t value)
502 {
503 cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_8, reg));
504 }
505
506 /**
507 * Perform an atomic 64 bit add
508 *
509 * @param reg FAU atomic register to access. 0 <= reg < 2048.
510 * - Step by 8 for 64 bit access.
511 * @param value Signed value to add.
512 */
cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg,int64_t value)513 static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
514 {
515 cvmx_write64_int64(__cvmx_fau_store_address(0, reg), value);
516 }
517
518 /**
519 * Perform an atomic 32 bit add
520 *
521 * @param reg FAU atomic register to access. 0 <= reg < 2048.
522 * - Step by 4 for 32 bit access.
523 * @param value Signed value to add.
524 */
cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg,int32_t value)525 static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
526 {
527 cvmx_write64_int32(__cvmx_fau_store_address(0, reg), value);
528 }
529
530 /**
531 * Perform an atomic 16 bit add
532 *
533 * @param reg FAU atomic register to access. 0 <= reg < 2048.
534 * - Step by 2 for 16 bit access.
535 * @param value Signed value to add.
536 */
cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg,int16_t value)537 static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
538 {
539 cvmx_write64_int16(__cvmx_fau_store_address(0, reg), value);
540 }
541
542 /**
543 * Perform an atomic 8 bit add
544 *
545 * @param reg FAU atomic register to access. 0 <= reg < 2048.
546 * @param value Signed value to add.
547 */
cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg,int8_t value)548 static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value)
549 {
550 cvmx_write64_int8(__cvmx_fau_store_address(0, reg), value);
551 }
552
553 /**
554 * Perform an atomic 64 bit write
555 *
556 * @param reg FAU atomic register to access. 0 <= reg < 2048.
557 * - Step by 8 for 64 bit access.
558 * @param value Signed value to write.
559 */
cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg,int64_t value)560 static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
561 {
562 cvmx_write64_int64(__cvmx_fau_store_address(1, reg), value);
563 }
564
565 /**
566 * Perform an atomic 32 bit write
567 *
568 * @param reg FAU atomic register to access. 0 <= reg < 2048.
569 * - Step by 4 for 32 bit access.
570 * @param value Signed value to write.
571 */
cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg,int32_t value)572 static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
573 {
574 cvmx_write64_int32(__cvmx_fau_store_address(1, reg), value);
575 }
576
577 /**
578 * Perform an atomic 16 bit write
579 *
580 * @param reg FAU atomic register to access. 0 <= reg < 2048.
581 * - Step by 2 for 16 bit access.
582 * @param value Signed value to write.
583 */
cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg,int16_t value)584 static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
585 {
586 cvmx_write64_int16(__cvmx_fau_store_address(1, reg), value);
587 }
588
589 /**
590 * Perform an atomic 8 bit write
591 *
592 * @param reg FAU atomic register to access. 0 <= reg < 2048.
593 * @param value Signed value to write.
594 */
cvmx_fau_atomic_write8(cvmx_fau_reg_8_t reg,int8_t value)595 static inline void cvmx_fau_atomic_write8(cvmx_fau_reg_8_t reg, int8_t value)
596 {
597 cvmx_write64_int8(__cvmx_fau_store_address(1, reg), value);
598 }
599
600 #ifdef __cplusplus
601 }
602 #endif
603
604 #endif /* __CVMX_FAU_H__ */
605