1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Inc. ([email protected]). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 *
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17
18 * * Neither the name of Cavium Inc. nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
21 * permission.
22
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40
41 /**
42 * @file
43 *
44 * Interface to the hardware Free Pool Allocator.
45 *
46 * <hr>$Revision: 70030 $<hr>
47 *
48 */
49
50 #ifndef __CVMX_FPA_H__
51 #define __CVMX_FPA_H__
52
53 #include "cvmx-scratch.h"
54
55 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
56 #include "cvmx-fpa-defs.h"
57 #endif
58
59 #ifdef __cplusplus
60 extern "C" {
61 #endif
62
63 #define CVMX_FPA_NUM_POOLS 8
64 #define CVMX_FPA_MIN_BLOCK_SIZE 128
65 #define CVMX_FPA_ALIGNMENT 128
66
67 /**
68 * Structure describing the data format used for stores to the FPA.
69 */
70 typedef union
71 {
72 uint64_t u64;
73 struct {
74 uint64_t scraddr : 8; /**< the (64-bit word) location in scratchpad to write to (if len != 0) */
75 uint64_t len : 8; /**< the number of words in the response (0 => no response) */
76 uint64_t did : 8; /**< the ID of the device on the non-coherent bus */
77 uint64_t addr :40; /**< the address that will appear in the first tick on the NCB bus */
78 } s;
79 } cvmx_fpa_iobdma_data_t;
80
81 /**
82 * Structure describing the current state of a FPA pool.
83 */
84 typedef struct
85 {
86 const char *name; /**< Name it was created under */
87 uint64_t size; /**< Size of each block */
88 void * base; /**< The base memory address of whole block */
89 uint64_t starting_element_count; /**< The number of elements in the pool at creation */
90 } cvmx_fpa_pool_info_t;
91
92 /**
93 * Current state of all the pools. Use access functions
94 * instead of using it directly.
95 */
96 extern cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
97
98 /* CSR typedefs have been moved to cvmx-fpa-defs.h */
99
100 /**
101 * Return the name of the pool
102 *
103 * @param pool Pool to get the name of
104 * @return The name
105 */
cvmx_fpa_get_name(uint64_t pool)106 static inline const char *cvmx_fpa_get_name(uint64_t pool)
107 {
108 return cvmx_fpa_pool_info[pool].name;
109 }
110
111 /**
112 * Return the base of the pool
113 *
114 * @param pool Pool to get the base of
115 * @return The base
116 */
cvmx_fpa_get_base(uint64_t pool)117 static inline void *cvmx_fpa_get_base(uint64_t pool)
118 {
119 return cvmx_fpa_pool_info[pool].base;
120 }
121
122 /**
123 * Check if a pointer belongs to an FPA pool. Return non-zero
124 * if the supplied pointer is inside the memory controlled by
125 * an FPA pool.
126 *
127 * @param pool Pool to check
128 * @param ptr Pointer to check
129 * @return Non-zero if pointer is in the pool. Zero if not
130 */
cvmx_fpa_is_member(uint64_t pool,void * ptr)131 static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr)
132 {
133 return ((ptr >= cvmx_fpa_pool_info[pool].base) &&
134 ((char*)ptr < ((char*)(cvmx_fpa_pool_info[pool].base)) + cvmx_fpa_pool_info[pool].size * cvmx_fpa_pool_info[pool].starting_element_count));
135 }
136
137 /**
138 * Enable the FPA for use. Must be performed after any CSR
139 * configuration but before any other FPA functions.
140 */
cvmx_fpa_enable(void)141 static inline void cvmx_fpa_enable(void)
142 {
143 cvmx_fpa_ctl_status_t status;
144
145 status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
146 if (status.s.enb)
147 {
148 /*
149 * CN68XXP1 should not reset the FPA (doing so may break the
150 * SSO, so we may end up enabling it more than once. Just
151 * return and don't spew messages.
152 */
153 return;
154 }
155
156 status.u64 = 0;
157 status.s.enb = 1;
158 cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
159 }
160
161 /**
162 * Reset FPA to disable. Make sure buffers from all FPA pools are freed
163 * before disabling FPA.
164 */
cvmx_fpa_disable(void)165 static inline void cvmx_fpa_disable(void)
166 {
167 cvmx_fpa_ctl_status_t status;
168
169 status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
170 status.s.reset = 1;
171 cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
172 }
173
174 /**
175 * Get a new block from the FPA
176 *
177 * @param pool Pool to get the block from
178 * @return Pointer to the block or NULL on failure
179 */
cvmx_fpa_alloc(uint64_t pool)180 static inline void *cvmx_fpa_alloc(uint64_t pool)
181 {
182 uint64_t address;
183
184 for (;;) {
185 address = cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA,pool)));
186 if (cvmx_likely(address)) {
187 return cvmx_phys_to_ptr(address);
188 } else {
189 /* If pointers are available, continuously retry. */
190 if (cvmx_read_csr(CVMX_FPA_QUEX_AVAILABLE(pool)) > 0)
191 cvmx_wait(50);
192 else
193 return NULL;
194 }
195 }
196 }
197
198 /**
199 * Asynchronously get a new block from the FPA
200 *
201 * The result of cvmx_fpa_async_alloc() may be retrieved using
202 * cvmx_fpa_async_alloc_finish().
203 *
204 * @param scr_addr Local scratch address to put response in. This is a byte address,
205 * but must be 8 byte aligned.
206 * @param pool Pool to get the block from
207 */
cvmx_fpa_async_alloc(uint64_t scr_addr,uint64_t pool)208 static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
209 {
210 cvmx_fpa_iobdma_data_t data;
211
212 /* Hardware only uses 64 bit aligned locations, so convert from byte address
213 ** to 64-bit index
214 */
215 data.s.scraddr = scr_addr >> 3;
216 data.s.len = 1;
217 data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA,pool);
218 data.s.addr = 0;
219 cvmx_send_single(data.u64);
220 }
221
222 /**
223 * Retrieve the result of cvmx_fpa_async_alloc
224 *
225 * @param scr_addr The Local scratch address. Must be the same value
226 * passed to cvmx_fpa_async_alloc().
227 *
228 * @param pool Pool the block came from. Must be the same value
229 * passed to cvmx_fpa_async_alloc.
230 *
231 * @return Pointer to the block or NULL on failure
232 */
cvmx_fpa_async_alloc_finish(uint64_t scr_addr,uint64_t pool)233 static inline void *cvmx_fpa_async_alloc_finish(uint64_t scr_addr, uint64_t pool)
234 {
235 uint64_t address;
236
237 CVMX_SYNCIOBDMA;
238
239 address = cvmx_scratch_read64(scr_addr);
240 if (cvmx_likely(address))
241 return cvmx_phys_to_ptr(address);
242 else
243 return cvmx_fpa_alloc(pool);
244 }
245
246 /**
247 * Free a block allocated with a FPA pool.
248 * Does NOT provide memory ordering in cases where the memory block was modified by the core.
249 *
250 * @param ptr Block to free
251 * @param pool Pool to put it in
252 * @param num_cache_lines
253 * Cache lines to invalidate
254 */
cvmx_fpa_free_nosync(void * ptr,uint64_t pool,uint64_t num_cache_lines)255 static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool, uint64_t num_cache_lines)
256 {
257 cvmx_addr_t newptr;
258 newptr.u64 = cvmx_ptr_to_phys(ptr);
259 newptr.sfilldidspace.didspace = CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA,pool));
260 asm volatile ("" : : : "memory"); /* Prevent GCC from reordering around free */
261 /* value written is number of cache lines not written back */
262 cvmx_write_io(newptr.u64, num_cache_lines);
263 }
264
265 /**
266 * Free a block allocated with a FPA pool. Provides required memory
267 * ordering in cases where memory block was modified by core.
268 *
269 * @param ptr Block to free
270 * @param pool Pool to put it in
271 * @param num_cache_lines
272 * Cache lines to invalidate
273 */
cvmx_fpa_free(void * ptr,uint64_t pool,uint64_t num_cache_lines)274 static inline void cvmx_fpa_free(void *ptr, uint64_t pool, uint64_t num_cache_lines)
275 {
276 cvmx_addr_t newptr;
277 newptr.u64 = cvmx_ptr_to_phys(ptr);
278 newptr.sfilldidspace.didspace = CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA,pool));
279 /* Make sure that any previous writes to memory go out before we free this buffer.
280 ** This also serves as a barrier to prevent GCC from reordering operations to after
281 ** the free. */
282 CVMX_SYNCWS;
283 /* value written is number of cache lines not written back */
284 cvmx_write_io(newptr.u64, num_cache_lines);
285 }
286
287 /**
288 * Setup a FPA pool to control a new block of memory.
289 * This can only be called once per pool. Make sure proper
290 * locking enforces this.
291 *
292 * @param pool Pool to initialize
293 * 0 <= pool < 8
294 * @param name Constant character string to name this pool.
295 * String is not copied.
296 * @param buffer Pointer to the block of memory to use. This must be
297 * accessable by all processors and external hardware.
298 * @param block_size Size for each block controlled by the FPA
299 * @param num_blocks Number of blocks
300 *
301 * @return 0 on Success,
302 * -1 on failure
303 */
304 extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
305 uint64_t block_size, uint64_t num_blocks);
306
307 /**
308 * Shutdown a Memory pool and validate that it had all of
309 * the buffers originally placed in it. This should only be
310 * called by one processor after all hardware has finished
311 * using the pool. Most like you will want to have called
312 * cvmx_helper_shutdown_packet_io_global() before this
313 * function to make sure all FPA buffers are out of the packet
314 * IO hardware.
315 *
316 * @param pool Pool to shutdown
317 *
318 * @return Zero on success
319 * - Positive is count of missing buffers
320 * - Negative is too many buffers or corrupted pointers
321 */
322 extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
323
324 /**
325 * Get the size of blocks controlled by the pool
326 * This is resolved to a constant at compile time.
327 *
328 * @param pool Pool to access
329 * @return Size of the block in bytes
330 */
331 uint64_t cvmx_fpa_get_block_size(uint64_t pool);
332
333 #ifdef __cplusplus
334 }
335 #endif
336
337 #endif /* __CVM_FPA_H__ */
338