1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Inc. ([email protected]). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 *
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17
18 * * Neither the name of Cavium Inc. nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
21 * permission.
22
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40
41
42
43
44
45 /**
46 * @file
47 * Simple allocate only memory allocator. Used to allocate memory at application
48 * start time.
49 *
50 * <hr>$Revision: 70030 $<hr>
51 *
52 */
53 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
54 #include <linux/module.h>
55 #include <asm/octeon/cvmx.h>
56 #include <asm/octeon/cvmx-bootmem.h>
57 #else
58 #if !defined(__FreeBSD__) || !defined(_KERNEL)
59 #include "executive-config.h"
60 #endif
61 #include "cvmx.h"
62 #include "cvmx-bootmem.h"
63 #endif
64 typedef uint32_t cvmx_spinlock_t;
65
66
67 //#define DEBUG
68
69 #define ULL unsigned long long
70 #undef MAX
71 #define MAX(a, b) (((a) > (b)) ? (a) : (b))
72
73 #undef MIN
74 #define MIN(a, b) (((a) < (b)) ? (a) : (b))
75
76 #define ALIGN_ADDR_UP(addr, align) (((addr) + (~(align))) & (align))
77
78 /**
79 * This is the physical location of a cvmx_bootmem_desc_t
80 * structure in Octeon's memory. Note that dues to addressing
81 * limits or runtime environment it might not be possible to
82 * create a C pointer to this structure.
83 */
84 static CVMX_SHARED uint64_t cvmx_bootmem_desc_addr = 0;
85
86 /**
87 * This macro returns the size of a member of a structure.
88 * Logically it is the same as "sizeof(s::field)" in C++, but
89 * C lacks the "::" operator.
90 */
91 #define SIZEOF_FIELD(s, field) sizeof(((s*)NULL)->field)
92
93 /**
94 * This macro returns a member of the cvmx_bootmem_desc_t
95 * structure. These members can't be directly addressed as
96 * they might be in memory not directly reachable. In the case
97 * where bootmem is compiled with LINUX_HOST, the structure
98 * itself might be located on a remote Octeon. The argument
99 * "field" is the member name of the cvmx_bootmem_desc_t to read.
100 * Regardless of the type of the field, the return type is always
101 * a uint64_t.
102 */
103 #define CVMX_BOOTMEM_DESC_GET_FIELD(field) \
104 __cvmx_bootmem_desc_get(cvmx_bootmem_desc_addr, \
105 offsetof(cvmx_bootmem_desc_t, field), \
106 SIZEOF_FIELD(cvmx_bootmem_desc_t, field))
107
108 /**
109 * This macro writes a member of the cvmx_bootmem_desc_t
110 * structure. These members can't be directly addressed as
111 * they might be in memory not directly reachable. In the case
112 * where bootmem is compiled with LINUX_HOST, the structure
113 * itself might be located on a remote Octeon. The argument
114 * "field" is the member name of the cvmx_bootmem_desc_t to write.
115 */
116 #define CVMX_BOOTMEM_DESC_SET_FIELD(field, value) \
117 __cvmx_bootmem_desc_set(cvmx_bootmem_desc_addr, \
118 offsetof(cvmx_bootmem_desc_t, field), \
119 SIZEOF_FIELD(cvmx_bootmem_desc_t, field), value)
120
121 /**
122 * This macro returns a member of the
123 * cvmx_bootmem_named_block_desc_t structure. These members can't
124 * be directly addressed as they might be in memory not directly
125 * reachable. In the case where bootmem is compiled with
126 * LINUX_HOST, the structure itself might be located on a remote
127 * Octeon. The argument "field" is the member name of the
128 * cvmx_bootmem_named_block_desc_t to read. Regardless of the type
129 * of the field, the return type is always a uint64_t. The "addr"
130 * parameter is the physical address of the structure.
131 */
132 #define CVMX_BOOTMEM_NAMED_GET_FIELD(addr, field) \
133 __cvmx_bootmem_desc_get(addr, \
134 offsetof(cvmx_bootmem_named_block_desc_t, field), \
135 SIZEOF_FIELD(cvmx_bootmem_named_block_desc_t, field))
136
137 /**
138 * This macro writes a member of the cvmx_bootmem_named_block_desc_t
139 * structure. These members can't be directly addressed as
140 * they might be in memory not directly reachable. In the case
141 * where bootmem is compiled with LINUX_HOST, the structure
142 * itself might be located on a remote Octeon. The argument
143 * "field" is the member name of the
144 * cvmx_bootmem_named_block_desc_t to write. The "addr" parameter
145 * is the physical address of the structure.
146 */
147 #define CVMX_BOOTMEM_NAMED_SET_FIELD(addr, field, value) \
148 __cvmx_bootmem_desc_set(addr, \
149 offsetof(cvmx_bootmem_named_block_desc_t, field), \
150 SIZEOF_FIELD(cvmx_bootmem_named_block_desc_t, field), value)
151
152 /**
153 * This function is the implementation of the get macros defined
154 * for individual structure members. The argument are generated
155 * by the macros inorder to read only the needed memory.
156 *
157 * @param base 64bit physical address of the complete structure
158 * @param offset Offset from the beginning of the structure to the member being
159 * accessed.
160 * @param size Size of the structure member.
161 *
162 * @return Value of the structure member promoted into a uint64_t.
163 */
__cvmx_bootmem_desc_get(uint64_t base,int offset,int size)164 static inline uint64_t __cvmx_bootmem_desc_get(uint64_t base, int offset, int size)
165 {
166 base = (1ull << 63) | (base + offset);
167 switch (size)
168 {
169 case 4:
170 return cvmx_read64_uint32(base);
171 case 8:
172 return cvmx_read64_uint64(base);
173 default:
174 return 0;
175 }
176 }
177
178 /**
179 * This function is the implementation of the set macros defined
180 * for individual structure members. The argument are generated
181 * by the macros in order to write only the needed memory.
182 *
183 * @param base 64bit physical address of the complete structure
184 * @param offset Offset from the beginning of the structure to the member being
185 * accessed.
186 * @param size Size of the structure member.
187 * @param value Value to write into the structure
188 */
__cvmx_bootmem_desc_set(uint64_t base,int offset,int size,uint64_t value)189 static inline void __cvmx_bootmem_desc_set(uint64_t base, int offset, int size, uint64_t value)
190 {
191 base = (1ull << 63) | (base + offset);
192 switch (size)
193 {
194 case 4:
195 cvmx_write64_uint32(base, value);
196 break;
197 case 8:
198 cvmx_write64_uint64(base, value);
199 break;
200 default:
201 break;
202 }
203 }
204
205 /**
206 * This function retrieves the string name of a named block. It is
207 * more complicated than a simple memcpy() since the named block
208 * descriptor may not be directly accessable.
209 *
210 * @param addr Physical address of the named block descriptor
211 * @param str String to receive the named block string name
212 * @param len Length of the string buffer, which must match the length
213 * stored in the bootmem descriptor.
214 */
CVMX_BOOTMEM_NAMED_GET_NAME(uint64_t addr,char * str,int len)215 static void CVMX_BOOTMEM_NAMED_GET_NAME(uint64_t addr, char *str, int len)
216 {
217 #ifndef CVMX_BUILD_FOR_LINUX_HOST
218 int l = len;
219 char *ptr = str;
220 addr |= (1ull << 63);
221 addr += offsetof(cvmx_bootmem_named_block_desc_t, name);
222 while (l--)
223 *ptr++ = cvmx_read64_uint8(addr++);
224 str[len] = 0;
225 #else
226 extern void octeon_remote_read_mem(void *buffer, uint64_t physical_address, int length);
227 addr += offsetof(cvmx_bootmem_named_block_desc_t, name);
228 octeon_remote_read_mem(str, addr, len);
229 str[len] = 0;
230 #endif
231 }
232
233 /**
234 * This function stores the string name of a named block. It is
235 * more complicated than a simple memcpy() since the named block
236 * descriptor may not be directly accessable.
237 *
238 * @param addr Physical address of the named block descriptor
239 * @param str String to store into the named block string name
240 * @param len Length of the string buffer, which must match the length
241 * stored in the bootmem descriptor.
242 */
CVMX_BOOTMEM_NAMED_SET_NAME(uint64_t addr,const char * str,int len)243 static void CVMX_BOOTMEM_NAMED_SET_NAME(uint64_t addr, const char *str, int len)
244 {
245 #ifndef CVMX_BUILD_FOR_LINUX_HOST
246 int l = len;
247 addr |= (1ull << 63);
248 addr += offsetof(cvmx_bootmem_named_block_desc_t, name);
249 while (l--)
250 {
251 if (l)
252 cvmx_write64_uint8(addr++, *str++);
253 else
254 cvmx_write64_uint8(addr++, 0);
255 }
256 #else
257 extern void octeon_remote_write_mem(uint64_t physical_address, const void *buffer, int length);
258 char zero = 0;
259 addr += offsetof(cvmx_bootmem_named_block_desc_t, name);
260 octeon_remote_write_mem(addr, str, len-1);
261 octeon_remote_write_mem(addr+len-1, &zero, 1);
262 #endif
263 }
264
265 /* See header file for descriptions of functions */
266
267 /* Wrapper functions are provided for reading/writing the size and next block
268 ** values as these may not be directly addressible (in 32 bit applications, for instance.)
269 */
270 /* Offsets of data elements in bootmem list, must match cvmx_bootmem_block_header_t */
271 #define NEXT_OFFSET 0
272 #define SIZE_OFFSET 8
cvmx_bootmem_phy_set_size(uint64_t addr,uint64_t size)273 static void cvmx_bootmem_phy_set_size(uint64_t addr, uint64_t size)
274 {
275 cvmx_write64_uint64((addr + SIZE_OFFSET) | (1ull << 63), size);
276 }
cvmx_bootmem_phy_set_next(uint64_t addr,uint64_t next)277 static void cvmx_bootmem_phy_set_next(uint64_t addr, uint64_t next)
278 {
279 cvmx_write64_uint64((addr + NEXT_OFFSET) | (1ull << 63), next);
280 }
cvmx_bootmem_phy_get_size(uint64_t addr)281 static uint64_t cvmx_bootmem_phy_get_size(uint64_t addr)
282 {
283 return(cvmx_read64_uint64((addr + SIZE_OFFSET) | (1ull << 63)));
284 }
cvmx_bootmem_phy_get_next(uint64_t addr)285 static uint64_t cvmx_bootmem_phy_get_next(uint64_t addr)
286 {
287 return(cvmx_read64_uint64((addr + NEXT_OFFSET) | (1ull << 63)));
288 }
289
290 /**
291 * Check the version information on the bootmem descriptor
292 *
293 * @param exact_match
294 * Exact major version to check against. A zero means
295 * check that the version supports named blocks.
296 *
297 * @return Zero if the version is correct. Negative if the version is
298 * incorrect. Failures also cause a message to be displayed.
299 */
__cvmx_bootmem_check_version(int exact_match)300 static int __cvmx_bootmem_check_version(int exact_match)
301 {
302 int major_version;
303 #ifdef CVMX_BUILD_FOR_LINUX_HOST
304 if (!cvmx_bootmem_desc_addr)
305 cvmx_bootmem_desc_addr = cvmx_read64_uint64(0x48100);
306 #endif
307 major_version = CVMX_BOOTMEM_DESC_GET_FIELD(major_version);
308 if ((major_version > 3) || (exact_match && major_version != exact_match))
309 {
310 cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: %d.%d at addr: 0x%llx\n",
311 major_version, (int)CVMX_BOOTMEM_DESC_GET_FIELD(minor_version),
312 (ULL)cvmx_bootmem_desc_addr);
313 return -1;
314 }
315 else
316 return 0;
317 }
318
319 /**
320 * Get the low level bootmem descriptor lock. If no locking
321 * is specified in the flags, then nothing is done.
322 *
323 * @param flags CVMX_BOOTMEM_FLAG_NO_LOCKING means this functions should do
324 * nothing. This is used to support nested bootmem calls.
325 */
__cvmx_bootmem_lock(uint32_t flags)326 static inline void __cvmx_bootmem_lock(uint32_t flags)
327 {
328 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
329 {
330 #ifndef CVMX_BUILD_FOR_LINUX_HOST
331 /* Unfortunately we can't use the normal cvmx-spinlock code as the
332 memory for the bootmem descriptor may be not accessable by a C
333 pointer. We use a 64bit XKPHYS address to access the memory
334 directly */
335 uint64_t lock_addr = (1ull << 63) | (cvmx_bootmem_desc_addr + offsetof(cvmx_bootmem_desc_t, lock));
336 unsigned int tmp;
337
338 __asm__ __volatile__(
339 ".set noreorder \n"
340 "1: ll %[tmp], 0(%[addr])\n"
341 " bnez %[tmp], 1b \n"
342 " li %[tmp], 1 \n"
343 " sc %[tmp], 0(%[addr])\n"
344 " beqz %[tmp], 1b \n"
345 " nop \n"
346 ".set reorder \n"
347 : [tmp] "=&r" (tmp)
348 : [addr] "r" (lock_addr)
349 : "memory");
350 #endif
351 }
352 }
353
354 /**
355 * Release the low level bootmem descriptor lock. If no locking
356 * is specified in the flags, then nothing is done.
357 *
358 * @param flags CVMX_BOOTMEM_FLAG_NO_LOCKING means this functions should do
359 * nothing. This is used to support nested bootmem calls.
360 */
__cvmx_bootmem_unlock(uint32_t flags)361 static inline void __cvmx_bootmem_unlock(uint32_t flags)
362 {
363 if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
364 {
365 #ifndef CVMX_BUILD_FOR_LINUX_HOST
366 /* Unfortunately we can't use the normal cvmx-spinlock code as the
367 memory for the bootmem descriptor may be not accessable by a C
368 pointer. We use a 64bit XKPHYS address to access the memory
369 directly */
370 uint64_t lock_addr = (1ull << 63) | (cvmx_bootmem_desc_addr + offsetof(cvmx_bootmem_desc_t, lock));
371
372 CVMX_SYNCW;
373 __asm__ __volatile__("sw $0, 0(%[addr])\n"
374 :: [addr] "r" (lock_addr)
375 : "memory");
376 CVMX_SYNCW;
377 #endif
378 }
379 }
380
381 /* Some of the cvmx-bootmem functions dealing with C pointers are not supported
382 when we are compiling for CVMX_BUILD_FOR_LINUX_HOST. This ifndef removes
383 these functions when they aren't needed */
384 #ifndef CVMX_BUILD_FOR_LINUX_HOST
385 /* This functions takes an address range and adjusts it as necessary to
386 ** match the ABI that is currently being used. This is required to ensure
387 ** that bootmem_alloc* functions only return valid pointers for 32 bit ABIs */
__cvmx_validate_mem_range(uint64_t * min_addr_ptr,uint64_t * max_addr_ptr)388 static int __cvmx_validate_mem_range(uint64_t *min_addr_ptr, uint64_t *max_addr_ptr)
389 {
390
391 #if defined(__linux__) && defined(CVMX_ABI_N32)
392 {
393 extern uint64_t linux_mem32_min;
394 extern uint64_t linux_mem32_max;
395 /* For 32 bit Linux apps, we need to restrict the allocations to the range
396 ** of memory configured for access from userspace. Also, we need to add mappings
397 ** for the data structures that we access.*/
398
399 /* Narrow range requests to be bounded by the 32 bit limits. octeon_phy_mem_block_alloc()
400 ** will reject inconsistent req_size/range requests, so we don't repeat those checks here.
401 ** If max unspecified, set to 32 bit maximum. */
402 *min_addr_ptr = MIN(MAX(*min_addr_ptr, linux_mem32_min), linux_mem32_max);
403 if (!*max_addr_ptr)
404 *max_addr_ptr = linux_mem32_max;
405 else
406 *max_addr_ptr = MAX(MIN(*max_addr_ptr, linux_mem32_max), linux_mem32_min);
407 }
408 #elif defined(CVMX_ABI_N32)
409 {
410 uint32_t max_phys = 0x0FFFFFFF; /* Max physical address when 1-1 mappings not used */
411 #if CVMX_USE_1_TO_1_TLB_MAPPINGS
412 max_phys = 0x7FFFFFFF;
413 #endif
414 /* We are are running standalone simple executive, so we need to limit the range
415 ** that we allocate from */
416
417 /* Narrow range requests to be bounded by the 32 bit limits. octeon_phy_mem_block_alloc()
418 ** will reject inconsistent req_size/range requests, so we don't repeat those checks here.
419 ** If max unspecified, set to 32 bit maximum. */
420 *min_addr_ptr = MIN(MAX(*min_addr_ptr, 0x0), max_phys);
421 if (!*max_addr_ptr)
422 *max_addr_ptr = max_phys;
423 else
424 *max_addr_ptr = MAX(MIN(*max_addr_ptr, max_phys), 0x0);
425 }
426 #endif
427
428 return 0;
429 }
430
431
cvmx_bootmem_alloc_range(uint64_t size,uint64_t alignment,uint64_t min_addr,uint64_t max_addr)432 void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment, uint64_t min_addr, uint64_t max_addr)
433 {
434 int64_t address;
435
436 __cvmx_validate_mem_range(&min_addr, &max_addr);
437 address = cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, 0);
438
439 if (address > 0)
440 return cvmx_phys_to_ptr(address);
441 else
442 return NULL;
443 }
444 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
445 EXPORT_SYMBOL(cvmx_bootmem_alloc_range);
446 #endif
447
cvmx_bootmem_alloc_address(uint64_t size,uint64_t address,uint64_t alignment)448 void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address, uint64_t alignment)
449 {
450 return cvmx_bootmem_alloc_range(size, alignment, address, address + size);
451 }
452
453
cvmx_bootmem_alloc(uint64_t size,uint64_t alignment)454 void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment)
455 {
456 return cvmx_bootmem_alloc_range(size, alignment, 0, 0);
457 }
458 #ifdef CVMX_BUILD_FOR_LINUX_KERNEL
459 EXPORT_SYMBOL(cvmx_bootmem_alloc);
460 #endif
461
cvmx_bootmem_alloc_named_range_once(uint64_t size,uint64_t min_addr,uint64_t max_addr,uint64_t align,const char * name,void (* init)(void *))462 void *cvmx_bootmem_alloc_named_range_once(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, const char *name, void (*init)(void*))
463 {
464 int64_t addr;
465 void *ptr;
466 uint64_t named_block_desc_addr;
467
468 __cvmx_bootmem_lock(0);
469
470 __cvmx_validate_mem_range(&min_addr, &max_addr);
471 named_block_desc_addr = cvmx_bootmem_phy_named_block_find(name, CVMX_BOOTMEM_FLAG_NO_LOCKING);
472
473 if (named_block_desc_addr)
474 {
475 addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_desc_addr, base_addr);
476 __cvmx_bootmem_unlock(0);
477 return cvmx_phys_to_ptr(addr);
478 }
479
480 addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr, align, name, CVMX_BOOTMEM_FLAG_NO_LOCKING);
481
482 if (addr < 0)
483 {
484 __cvmx_bootmem_unlock(0);
485 return NULL;
486 }
487 ptr = cvmx_phys_to_ptr(addr);
488 init(ptr);
489 __cvmx_bootmem_unlock(0);
490 return ptr;
491 }
492
cvmx_bootmem_alloc_named_range_flags(uint64_t size,uint64_t min_addr,uint64_t max_addr,uint64_t align,const char * name,uint32_t flags)493 static void *cvmx_bootmem_alloc_named_range_flags(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, const char *name, uint32_t flags)
494 {
495 int64_t addr;
496
497 __cvmx_validate_mem_range(&min_addr, &max_addr);
498 addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr, align, name, flags);
499 if (addr >= 0)
500 return cvmx_phys_to_ptr(addr);
501 else
502 return NULL;
503
504 }
505
cvmx_bootmem_alloc_named_range(uint64_t size,uint64_t min_addr,uint64_t max_addr,uint64_t align,const char * name)506 void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, const char *name)
507 {
508 return cvmx_bootmem_alloc_named_range_flags(size, min_addr, max_addr, align, name, 0);
509 }
510
cvmx_bootmem_alloc_named_address(uint64_t size,uint64_t address,const char * name)511 void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address, const char *name)
512 {
513 return(cvmx_bootmem_alloc_named_range(size, address, address + size, 0, name));
514 }
515
cvmx_bootmem_alloc_named(uint64_t size,uint64_t alignment,const char * name)516 void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, const char *name)
517 {
518 return(cvmx_bootmem_alloc_named_range(size, 0, 0, alignment, name));
519 }
520
cvmx_bootmem_alloc_named_flags(uint64_t size,uint64_t alignment,const char * name,uint32_t flags)521 void *cvmx_bootmem_alloc_named_flags(uint64_t size, uint64_t alignment, const char *name, uint32_t flags)
522 {
523 return cvmx_bootmem_alloc_named_range_flags(size, 0, 0, alignment, name, flags);
524 }
525
cvmx_bootmem_free_named(const char * name)526 int cvmx_bootmem_free_named(const char *name)
527 {
528 return(cvmx_bootmem_phy_named_block_free(name, 0));
529 }
530 #endif
531
cvmx_bootmem_find_named_block(const char * name)532 const cvmx_bootmem_named_block_desc_t *cvmx_bootmem_find_named_block(const char *name)
533 {
534 /* FIXME: Returning a single static object is probably a bad thing */
535 static cvmx_bootmem_named_block_desc_t desc;
536 uint64_t named_addr = cvmx_bootmem_phy_named_block_find(name, 0);
537 if (named_addr)
538 {
539 desc.base_addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_addr, base_addr);
540 desc.size = CVMX_BOOTMEM_NAMED_GET_FIELD(named_addr, size);
541 strncpy(desc.name, name, sizeof(desc.name));
542 desc.name[sizeof(desc.name)-1] = 0;
543 return &desc;
544 }
545 else
546 return NULL;
547 }
548
cvmx_bootmem_print_named(void)549 void cvmx_bootmem_print_named(void)
550 {
551 cvmx_bootmem_phy_named_block_print();
552 }
553
cvmx_bootmem_init(uint64_t mem_desc_addr)554 int cvmx_bootmem_init(uint64_t mem_desc_addr)
555 {
556 /* Verify that the size of cvmx_spinlock_t meets our assumptions */
557 if (sizeof(cvmx_spinlock_t) != 4)
558 {
559 cvmx_dprintf("ERROR: Unexpected size of cvmx_spinlock_t\n");
560 return(-1);
561 }
562 if (!cvmx_bootmem_desc_addr)
563 cvmx_bootmem_desc_addr = mem_desc_addr;
564 return(0);
565 }
566
567
cvmx_bootmem_available_mem(uint64_t min_block_size)568 uint64_t cvmx_bootmem_available_mem(uint64_t min_block_size)
569 {
570 return(cvmx_bootmem_phy_available_mem(min_block_size));
571 }
572
573
574
575
576
577 /*********************************************************************
578 ** The cvmx_bootmem_phy* functions below return 64 bit physical addresses,
579 ** and expose more features that the cvmx_bootmem_functions above. These are
580 ** required for full memory space access in 32 bit applications, as well as for
581 ** using some advance features.
582 ** Most applications should not need to use these.
583 **
584 **/
585
586
cvmx_bootmem_phy_alloc(uint64_t req_size,uint64_t address_min,uint64_t address_max,uint64_t alignment,uint32_t flags)587 int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, uint64_t address_max, uint64_t alignment, uint32_t flags)
588 {
589
590 uint64_t head_addr;
591 uint64_t ent_addr;
592 uint64_t prev_addr = 0; /* points to previous list entry, NULL current entry is head of list */
593 uint64_t new_ent_addr = 0;
594 uint64_t desired_min_addr;
595 uint64_t alignment_mask = ~(alignment - 1);
596
597 #ifdef DEBUG
598 cvmx_dprintf("cvmx_bootmem_phy_alloc: req_size: 0x%llx, min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx\n",
599 (ULL)req_size, (ULL)address_min, (ULL)address_max, (ULL)alignment);
600 #endif
601
602 if (__cvmx_bootmem_check_version(0))
603 goto error_out;
604
605 /* Do a variety of checks to validate the arguments. The allocator code will later assume
606 ** that these checks have been made. We validate that the requested constraints are not
607 ** self-contradictory before we look through the list of available memory
608 */
609
610 /* 0 is not a valid req_size for this allocator */
611 if (!req_size)
612 goto error_out;
613
614 /* Round req_size up to mult of minimum alignment bytes */
615 req_size = (req_size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) & ~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);
616
617
618 /* Enforce minimum alignment (this also keeps the minimum free block
619 ** req_size the same as the alignment req_size */
620 if (alignment < CVMX_BOOTMEM_ALIGNMENT_SIZE)
621 {
622 alignment = CVMX_BOOTMEM_ALIGNMENT_SIZE;
623 }
624 alignment_mask = ~(alignment - 1);
625
626 /* Adjust address minimum based on requested alignment (round up to meet alignment). Do this here so we can
627 ** reject impossible requests up front. (NOP for address_min == 0) */
628 if (alignment)
629 address_min = (address_min + (alignment - 1)) & ~(alignment - 1);
630
631 /* Convert !0 address_min and 0 address_max to special case of range that specifies an exact
632 ** memory block to allocate. Do this before other checks and adjustments so that this tranformation will be validated */
633 if (address_min && !address_max)
634 address_max = address_min + req_size;
635 else if (!address_min && !address_max)
636 address_max = ~0ull; /* If no limits given, use max limits */
637
638 /* Reject inconsistent args. We have adjusted these, so this may fail due to our internal changes
639 ** even if this check would pass for the values the user supplied. */
640 if (req_size > address_max - address_min)
641 goto error_out;
642
643 /* Walk through the list entries - first fit found is returned */
644
645 __cvmx_bootmem_lock(flags);
646 head_addr = CVMX_BOOTMEM_DESC_GET_FIELD(head_addr);
647 ent_addr = head_addr;
648 while (ent_addr)
649 {
650 uint64_t usable_base, usable_max;
651 uint64_t ent_size = cvmx_bootmem_phy_get_size(ent_addr);
652
653 if (cvmx_bootmem_phy_get_next(ent_addr) && ent_addr > cvmx_bootmem_phy_get_next(ent_addr))
654 {
655 cvmx_dprintf("Internal bootmem_alloc() error: ent: 0x%llx, next: 0x%llx\n",
656 (ULL)ent_addr, (ULL)cvmx_bootmem_phy_get_next(ent_addr));
657 goto error_out;
658 }
659
660 /* Determine if this is an entry that can satisify the request */
661 /* Check to make sure entry is large enough to satisfy request */
662 usable_base = ALIGN_ADDR_UP(MAX(address_min, ent_addr), alignment_mask);
663 usable_max = MIN(address_max, ent_addr + ent_size);
664 /* We should be able to allocate block at address usable_base */
665
666 desired_min_addr = usable_base;
667
668 /* Determine if request can be satisfied from the current entry */
669 if ((((ent_addr + ent_size) > usable_base && ent_addr < address_max))
670 && req_size <= usable_max - usable_base)
671 {
672 /* We have found an entry that has room to satisfy the request, so allocate it from this entry */
673
674 /* If end CVMX_BOOTMEM_FLAG_END_ALLOC set, then allocate from the end of this block
675 ** rather than the beginning */
676 if (flags & CVMX_BOOTMEM_FLAG_END_ALLOC)
677 {
678 desired_min_addr = usable_max - req_size;
679 /* Align desired address down to required alignment */
680 desired_min_addr &= alignment_mask;
681 }
682
683 /* Match at start of entry */
684 if (desired_min_addr == ent_addr)
685 {
686 if (req_size < ent_size)
687 {
688 /* big enough to create a new block from top portion of block */
689 new_ent_addr = ent_addr + req_size;
690 cvmx_bootmem_phy_set_next(new_ent_addr, cvmx_bootmem_phy_get_next(ent_addr));
691 cvmx_bootmem_phy_set_size(new_ent_addr, ent_size - req_size);
692
693 /* Adjust next pointer as following code uses this */
694 cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr);
695 }
696
697 /* adjust prev ptr or head to remove this entry from list */
698 if (prev_addr)
699 {
700 cvmx_bootmem_phy_set_next(prev_addr, cvmx_bootmem_phy_get_next(ent_addr));
701 }
702 else
703 {
704 /* head of list being returned, so update head ptr */
705 CVMX_BOOTMEM_DESC_SET_FIELD(head_addr, cvmx_bootmem_phy_get_next(ent_addr));
706 }
707 __cvmx_bootmem_unlock(flags);
708 return(desired_min_addr);
709 }
710
711
712 /* block returned doesn't start at beginning of entry, so we know
713 ** that we will be splitting a block off the front of this one. Create a new block
714 ** from the beginning, add to list, and go to top of loop again.
715 **
716 ** create new block from high portion of block, so that top block
717 ** starts at desired addr
718 **/
719 new_ent_addr = desired_min_addr;
720 cvmx_bootmem_phy_set_next(new_ent_addr, cvmx_bootmem_phy_get_next(ent_addr));
721 cvmx_bootmem_phy_set_size(new_ent_addr, cvmx_bootmem_phy_get_size(ent_addr) - (desired_min_addr - ent_addr));
722 cvmx_bootmem_phy_set_size(ent_addr, desired_min_addr - ent_addr);
723 cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr);
724 /* Loop again to handle actual alloc from new block */
725 }
726
727 prev_addr = ent_addr;
728 ent_addr = cvmx_bootmem_phy_get_next(ent_addr);
729 }
730 error_out:
731 /* We didn't find anything, so return error */
732 __cvmx_bootmem_unlock(flags);
733 return(-1);
734 }
735
736
737
__cvmx_bootmem_phy_free(uint64_t phy_addr,uint64_t size,uint32_t flags)738 int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
739 {
740 uint64_t cur_addr;
741 uint64_t prev_addr = 0; /* zero is invalid */
742 int retval = 0;
743
744 #ifdef DEBUG
745 cvmx_dprintf("__cvmx_bootmem_phy_free addr: 0x%llx, size: 0x%llx\n", (ULL)phy_addr, (ULL)size);
746 #endif
747 if (__cvmx_bootmem_check_version(0))
748 return(0);
749
750 /* 0 is not a valid size for this allocator */
751 if (!size)
752 return(0);
753
754
755 __cvmx_bootmem_lock(flags);
756 cur_addr = CVMX_BOOTMEM_DESC_GET_FIELD(head_addr);
757 if (cur_addr == 0 || phy_addr < cur_addr)
758 {
759 /* add at front of list - special case with changing head ptr */
760 if (cur_addr && phy_addr + size > cur_addr)
761 goto bootmem_free_done; /* error, overlapping section */
762 else if (phy_addr + size == cur_addr)
763 {
764 /* Add to front of existing first block */
765 cvmx_bootmem_phy_set_next(phy_addr, cvmx_bootmem_phy_get_next(cur_addr));
766 cvmx_bootmem_phy_set_size(phy_addr, cvmx_bootmem_phy_get_size(cur_addr) + size);
767 CVMX_BOOTMEM_DESC_SET_FIELD(head_addr, phy_addr);
768
769 }
770 else
771 {
772 /* New block before first block */
773 cvmx_bootmem_phy_set_next(phy_addr, cur_addr); /* OK if cur_addr is 0 */
774 cvmx_bootmem_phy_set_size(phy_addr, size);
775 CVMX_BOOTMEM_DESC_SET_FIELD(head_addr, phy_addr);
776 }
777 retval = 1;
778 goto bootmem_free_done;
779 }
780
781 /* Find place in list to add block */
782 while (cur_addr && phy_addr > cur_addr)
783 {
784 prev_addr = cur_addr;
785 cur_addr = cvmx_bootmem_phy_get_next(cur_addr);
786 }
787
788 if (!cur_addr)
789 {
790 /* We have reached the end of the list, add on to end, checking
791 ** to see if we need to combine with last block
792 **/
793 if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) == phy_addr)
794 {
795 cvmx_bootmem_phy_set_size(prev_addr, cvmx_bootmem_phy_get_size(prev_addr) + size);
796 }
797 else
798 {
799 cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
800 cvmx_bootmem_phy_set_size(phy_addr, size);
801 cvmx_bootmem_phy_set_next(phy_addr, 0);
802 }
803 retval = 1;
804 goto bootmem_free_done;
805 }
806 else
807 {
808 /* insert between prev and cur nodes, checking for merge with either/both */
809
810 if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) == phy_addr)
811 {
812 /* Merge with previous */
813 cvmx_bootmem_phy_set_size(prev_addr, cvmx_bootmem_phy_get_size(prev_addr) + size);
814 if (phy_addr + size == cur_addr)
815 {
816 /* Also merge with current */
817 cvmx_bootmem_phy_set_size(prev_addr, cvmx_bootmem_phy_get_size(cur_addr) + cvmx_bootmem_phy_get_size(prev_addr));
818 cvmx_bootmem_phy_set_next(prev_addr, cvmx_bootmem_phy_get_next(cur_addr));
819 }
820 retval = 1;
821 goto bootmem_free_done;
822 }
823 else if (phy_addr + size == cur_addr)
824 {
825 /* Merge with current */
826 cvmx_bootmem_phy_set_size(phy_addr, cvmx_bootmem_phy_get_size(cur_addr) + size);
827 cvmx_bootmem_phy_set_next(phy_addr, cvmx_bootmem_phy_get_next(cur_addr));
828 cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
829 retval = 1;
830 goto bootmem_free_done;
831 }
832
833 /* It is a standalone block, add in between prev and cur */
834 cvmx_bootmem_phy_set_size(phy_addr, size);
835 cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
836 cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
837
838
839 }
840 retval = 1;
841
842 bootmem_free_done:
843 __cvmx_bootmem_unlock(flags);
844 return(retval);
845
846 }
847
848
849
cvmx_bootmem_phy_list_print(void)850 void cvmx_bootmem_phy_list_print(void)
851 {
852 uint64_t addr;
853
854 addr = CVMX_BOOTMEM_DESC_GET_FIELD(head_addr);
855 cvmx_dprintf("\n\n\nPrinting bootmem block list, descriptor: 0x%llx, head is 0x%llx\n",
856 (ULL)cvmx_bootmem_desc_addr, (ULL)addr);
857 cvmx_dprintf("Descriptor version: %d.%d\n",
858 (int)CVMX_BOOTMEM_DESC_GET_FIELD(major_version),
859 (int)CVMX_BOOTMEM_DESC_GET_FIELD(minor_version));
860 if (CVMX_BOOTMEM_DESC_GET_FIELD(major_version) > 3)
861 {
862 cvmx_dprintf("Warning: Bootmem descriptor version is newer than expected\n");
863 }
864 if (!addr)
865 {
866 cvmx_dprintf("mem list is empty!\n");
867 }
868 while (addr)
869 {
870 cvmx_dprintf("Block address: 0x%08llx, size: 0x%08llx, next: 0x%08llx\n",
871 (ULL)addr,
872 (ULL)cvmx_bootmem_phy_get_size(addr),
873 (ULL)cvmx_bootmem_phy_get_next(addr));
874 addr = cvmx_bootmem_phy_get_next(addr);
875 }
876 cvmx_dprintf("\n\n");
877
878 }
879
880
cvmx_bootmem_phy_available_mem(uint64_t min_block_size)881 uint64_t cvmx_bootmem_phy_available_mem(uint64_t min_block_size)
882 {
883 uint64_t addr;
884
885 uint64_t available_mem = 0;
886
887 __cvmx_bootmem_lock(0);
888 addr = CVMX_BOOTMEM_DESC_GET_FIELD(head_addr);
889 while (addr)
890 {
891 if (cvmx_bootmem_phy_get_size(addr) >= min_block_size)
892 available_mem += cvmx_bootmem_phy_get_size(addr);
893 addr = cvmx_bootmem_phy_get_next(addr);
894 }
895 __cvmx_bootmem_unlock(0);
896 return(available_mem);
897
898 }
899
900
901
cvmx_bootmem_phy_named_block_find(const char * name,uint32_t flags)902 uint64_t cvmx_bootmem_phy_named_block_find(const char *name, uint32_t flags)
903 {
904 uint64_t result = 0;
905
906 #ifdef DEBUG
907 cvmx_dprintf("cvmx_bootmem_phy_named_block_find: %s\n", name);
908 #endif
909 __cvmx_bootmem_lock(flags);
910 if (!__cvmx_bootmem_check_version(3))
911 {
912 int i;
913 uint64_t named_block_array_addr = CVMX_BOOTMEM_DESC_GET_FIELD(named_block_array_addr);
914 int num_blocks = CVMX_BOOTMEM_DESC_GET_FIELD(named_block_num_blocks);
915 int name_length = CVMX_BOOTMEM_DESC_GET_FIELD(named_block_name_len);
916 uint64_t named_addr = named_block_array_addr;
917 for (i = 0; i < num_blocks; i++)
918 {
919 uint64_t named_size = CVMX_BOOTMEM_NAMED_GET_FIELD(named_addr, size);
920 if (name && named_size)
921 {
922 char name_tmp[name_length];
923 CVMX_BOOTMEM_NAMED_GET_NAME(named_addr, name_tmp, name_length);
924 if (!strncmp(name, name_tmp, name_length - 1))
925 {
926 result = named_addr;
927 break;
928 }
929 }
930 else if (!name && !named_size)
931 {
932 result = named_addr;
933 break;
934 }
935 named_addr += sizeof(cvmx_bootmem_named_block_desc_t);
936 }
937 }
938 __cvmx_bootmem_unlock(flags);
939 return result;
940 }
941
cvmx_bootmem_phy_named_block_free(const char * name,uint32_t flags)942 int cvmx_bootmem_phy_named_block_free(const char *name, uint32_t flags)
943 {
944 uint64_t named_block_addr;
945
946 if (__cvmx_bootmem_check_version(3))
947 return(0);
948 #ifdef DEBUG
949 cvmx_dprintf("cvmx_bootmem_phy_named_block_free: %s\n", name);
950 #endif
951
952 /* Take lock here, as name lookup/block free/name free need to be atomic */
953 __cvmx_bootmem_lock(flags);
954
955 named_block_addr = cvmx_bootmem_phy_named_block_find(name, CVMX_BOOTMEM_FLAG_NO_LOCKING);
956 if (named_block_addr)
957 {
958 uint64_t named_addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_addr, base_addr);
959 uint64_t named_size = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_addr, size);
960 #ifdef DEBUG
961 cvmx_dprintf("cvmx_bootmem_phy_named_block_free: %s, base: 0x%llx, size: 0x%llx\n",
962 name, (ULL)named_addr, (ULL)named_size);
963 #endif
964 __cvmx_bootmem_phy_free(named_addr, named_size, CVMX_BOOTMEM_FLAG_NO_LOCKING);
965 /* Set size to zero to indicate block not used. */
966 CVMX_BOOTMEM_NAMED_SET_FIELD(named_block_addr, size, 0);
967 }
968 __cvmx_bootmem_unlock(flags);
969 return(!!named_block_addr); /* 0 on failure, 1 on success */
970 }
971
972
973
974
975
cvmx_bootmem_phy_named_block_alloc(uint64_t size,uint64_t min_addr,uint64_t max_addr,uint64_t alignment,const char * name,uint32_t flags)976 int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t alignment, const char *name, uint32_t flags)
977 {
978 int64_t addr_allocated;
979 uint64_t named_block_desc_addr;
980
981 #ifdef DEBUG
982 cvmx_dprintf("cvmx_bootmem_phy_named_block_alloc: size: 0x%llx, min: 0x%llx, max: 0x%llx, align: 0x%llx, name: %s\n",
983 (ULL)size,
984 (ULL)min_addr,
985 (ULL)max_addr,
986 (ULL)alignment,
987 name);
988 #endif
989
990 if (__cvmx_bootmem_check_version(3))
991 return(-1);
992
993 /* Take lock here, as name lookup/block alloc/name add need to be atomic */
994
995 __cvmx_bootmem_lock(flags);
996
997 named_block_desc_addr = cvmx_bootmem_phy_named_block_find(name, flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
998 if (named_block_desc_addr)
999 {
1000 __cvmx_bootmem_unlock(flags);
1001 return(-1);
1002 }
1003
1004 /* Get pointer to first available named block descriptor */
1005 named_block_desc_addr = cvmx_bootmem_phy_named_block_find(NULL, flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
1006 if (!named_block_desc_addr)
1007 {
1008 __cvmx_bootmem_unlock(flags);
1009 return(-1);
1010 }
1011
1012 /* Round size up to mult of minimum alignment bytes
1013 ** We need the actual size allocated to allow for blocks to be coallesced
1014 ** when they are freed. The alloc routine does the same rounding up
1015 ** on all allocations. */
1016 size = (size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) & ~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);
1017
1018 addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, flags | CVMX_BOOTMEM_FLAG_NO_LOCKING);
1019 if (addr_allocated >= 0)
1020 {
1021 CVMX_BOOTMEM_NAMED_SET_FIELD(named_block_desc_addr, base_addr, addr_allocated);
1022 CVMX_BOOTMEM_NAMED_SET_FIELD(named_block_desc_addr, size, size);
1023 CVMX_BOOTMEM_NAMED_SET_NAME(named_block_desc_addr, name, CVMX_BOOTMEM_DESC_GET_FIELD(named_block_name_len));
1024 }
1025
1026 __cvmx_bootmem_unlock(flags);
1027 return(addr_allocated);
1028 }
1029
1030
1031
1032
cvmx_bootmem_phy_named_block_print(void)1033 void cvmx_bootmem_phy_named_block_print(void)
1034 {
1035 int i;
1036 int printed = 0;
1037
1038 uint64_t named_block_array_addr = CVMX_BOOTMEM_DESC_GET_FIELD(named_block_array_addr);
1039 int num_blocks = CVMX_BOOTMEM_DESC_GET_FIELD(named_block_num_blocks);
1040 int name_length = CVMX_BOOTMEM_DESC_GET_FIELD(named_block_name_len);
1041 uint64_t named_block_addr = named_block_array_addr;
1042
1043 #ifdef DEBUG
1044 cvmx_dprintf("cvmx_bootmem_phy_named_block_print, desc addr: 0x%llx\n",
1045 (ULL)cvmx_bootmem_desc_addr);
1046 #endif
1047 if (__cvmx_bootmem_check_version(3))
1048 return;
1049 cvmx_dprintf("List of currently allocated named bootmem blocks:\n");
1050 for (i = 0; i < num_blocks; i++)
1051 {
1052 uint64_t named_size = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_addr, size);
1053 if (named_size)
1054 {
1055 char name_tmp[name_length];
1056 uint64_t named_addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_addr, base_addr);
1057 CVMX_BOOTMEM_NAMED_GET_NAME(named_block_addr, name_tmp, name_length);
1058 printed++;
1059 cvmx_dprintf("Name: %s, address: 0x%08llx, size: 0x%08llx, index: %d\n",
1060 name_tmp, (ULL)named_addr, (ULL)named_size, i);
1061 }
1062 named_block_addr += sizeof(cvmx_bootmem_named_block_desc_t);
1063 }
1064 if (!printed)
1065 {
1066 cvmx_dprintf("No named bootmem blocks exist.\n");
1067 }
1068
1069 }
1070
1071
cvmx_bootmem_phy_mem_list_init(uint64_t mem_size,uint32_t low_reserved_bytes,cvmx_bootmem_desc_t * desc_buffer)1072 int64_t cvmx_bootmem_phy_mem_list_init(uint64_t mem_size, uint32_t low_reserved_bytes, cvmx_bootmem_desc_t *desc_buffer)
1073 {
1074 uint64_t cur_block_addr;
1075 int64_t addr;
1076 int i;
1077
1078 #ifdef DEBUG
1079 cvmx_dprintf("cvmx_bootmem_phy_mem_list_init (arg desc ptr: %p, cvmx_bootmem_desc: 0x%llx)\n",
1080 desc_buffer, (ULL)cvmx_bootmem_desc_addr);
1081 #endif
1082
1083 /* Descriptor buffer needs to be in 32 bit addressable space to be compatible with
1084 ** 32 bit applications */
1085 if (!desc_buffer)
1086 {
1087 cvmx_dprintf("ERROR: no memory for cvmx_bootmem descriptor provided\n");
1088 return 0;
1089 }
1090
1091 if (mem_size > OCTEON_MAX_PHY_MEM_SIZE)
1092 {
1093 mem_size = OCTEON_MAX_PHY_MEM_SIZE;
1094 cvmx_dprintf("ERROR: requested memory size too large, truncating to maximum size\n");
1095 }
1096
1097 if (cvmx_bootmem_desc_addr)
1098 return 1;
1099
1100 /* Initialize cvmx pointer to descriptor */
1101 #ifndef CVMX_BUILD_FOR_LINUX_HOST
1102 cvmx_bootmem_init(cvmx_ptr_to_phys(desc_buffer));
1103 #else
1104 cvmx_bootmem_init((unsigned long)desc_buffer);
1105 #endif
1106
1107 /* Fill the bootmem descriptor */
1108 CVMX_BOOTMEM_DESC_SET_FIELD(lock, 0);
1109 CVMX_BOOTMEM_DESC_SET_FIELD(flags, 0);
1110 CVMX_BOOTMEM_DESC_SET_FIELD(head_addr, 0);
1111 CVMX_BOOTMEM_DESC_SET_FIELD(major_version, CVMX_BOOTMEM_DESC_MAJ_VER);
1112 CVMX_BOOTMEM_DESC_SET_FIELD(minor_version, CVMX_BOOTMEM_DESC_MIN_VER);
1113 CVMX_BOOTMEM_DESC_SET_FIELD(app_data_addr, 0);
1114 CVMX_BOOTMEM_DESC_SET_FIELD(app_data_size, 0);
1115
1116 /* Set up global pointer to start of list, exclude low 64k for exception vectors, space for global descriptor */
1117 cur_block_addr = (OCTEON_DDR0_BASE + low_reserved_bytes);
1118
1119 if (mem_size <= OCTEON_DDR0_SIZE)
1120 {
1121 __cvmx_bootmem_phy_free(cur_block_addr, mem_size - low_reserved_bytes, 0);
1122 goto frees_done;
1123 }
1124
1125 __cvmx_bootmem_phy_free(cur_block_addr, OCTEON_DDR0_SIZE - low_reserved_bytes, 0);
1126
1127 mem_size -= OCTEON_DDR0_SIZE;
1128
1129 /* Add DDR2 block next if present */
1130 if (mem_size > OCTEON_DDR1_SIZE)
1131 {
1132 __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, OCTEON_DDR1_SIZE, 0);
1133 __cvmx_bootmem_phy_free(OCTEON_DDR2_BASE, mem_size - OCTEON_DDR1_SIZE, 0);
1134 }
1135 else
1136 {
1137 __cvmx_bootmem_phy_free(OCTEON_DDR1_BASE, mem_size, 0);
1138
1139 }
1140 frees_done:
1141
1142 /* Initialize the named block structure */
1143 CVMX_BOOTMEM_DESC_SET_FIELD(named_block_name_len, CVMX_BOOTMEM_NAME_LEN);
1144 CVMX_BOOTMEM_DESC_SET_FIELD(named_block_num_blocks, CVMX_BOOTMEM_NUM_NAMED_BLOCKS);
1145 CVMX_BOOTMEM_DESC_SET_FIELD(named_block_array_addr, 0);
1146
1147 /* Allocate this near the top of the low 256 MBytes of memory */
1148 addr = cvmx_bootmem_phy_alloc(CVMX_BOOTMEM_NUM_NAMED_BLOCKS * sizeof(cvmx_bootmem_named_block_desc_t),0, 0x10000000, 0 ,CVMX_BOOTMEM_FLAG_END_ALLOC);
1149 if (addr >= 0)
1150 CVMX_BOOTMEM_DESC_SET_FIELD(named_block_array_addr, addr);
1151
1152 #ifdef DEBUG
1153 cvmx_dprintf("cvmx_bootmem_phy_mem_list_init: named_block_array_addr: 0x%llx)\n",
1154 (ULL)addr);
1155 #endif
1156 if (!addr)
1157 {
1158 cvmx_dprintf("FATAL ERROR: unable to allocate memory for bootmem descriptor!\n");
1159 return(0);
1160 }
1161 for (i=0; i<CVMX_BOOTMEM_NUM_NAMED_BLOCKS; i++)
1162 {
1163 CVMX_BOOTMEM_NAMED_SET_FIELD(addr, base_addr, 0);
1164 CVMX_BOOTMEM_NAMED_SET_FIELD(addr, size, 0);
1165 addr += sizeof(cvmx_bootmem_named_block_desc_t);
1166 }
1167
1168 return(1);
1169 }
1170
1171
cvmx_bootmem_lock(void)1172 void cvmx_bootmem_lock(void)
1173 {
1174 __cvmx_bootmem_lock(0);
1175 }
1176
cvmx_bootmem_unlock(void)1177 void cvmx_bootmem_unlock(void)
1178 {
1179 __cvmx_bootmem_unlock(0);
1180 }
1181
1182 #ifndef CVMX_BUILD_FOR_LINUX_HOST
__cvmx_bootmem_internal_get_desc_ptr(void)1183 void *__cvmx_bootmem_internal_get_desc_ptr(void)
1184 {
1185 return cvmx_phys_to_ptr(cvmx_bootmem_desc_addr);
1186 }
1187 #endif
1188