1 /***********************license start***************
2  * Copyright (c) 2003-2010  Cavium Inc. ([email protected]). All rights
3  * reserved.
4  *
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  *
10  *   * Redistributions of source code must retain the above copyright
11  *     notice, this list of conditions and the following disclaimer.
12  *
13  *   * Redistributions in binary form must reproduce the above
14  *     copyright notice, this list of conditions and the following
15  *     disclaimer in the documentation and/or other materials provided
16  *     with the distribution.
17 
18  *   * Neither the name of Cavium Inc. nor the names of
19  *     its contributors may be used to endorse or promote products
20  *     derived from this software without specific prior written
21  *     permission.
22 
23  * This Software, including technical data, may be subject to U.S. export  control
24  * laws, including the U.S. Export Administration Act and its  associated
25  * regulations, and may be subject to export or import  regulations in other
26  * countries.
27 
28  * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29  * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30  * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31  * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32  * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33  * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34  * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35  * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36  * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37  * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38  ***********************license end**************************************/
39 
40 
41 
42 
43 /**
44  * @file
45  *
46  * cvmx-tlb supplies per core TLB access functions for simple executive
47  * applications.
48  *
49  * <hr>$Revision: 41586 $<hr>
50  */
51 #include "cvmx.h"
52 #include "cvmx-tlb.h"
53 #include "cvmx-core.h"
54 #include <math.h>
55 
56 extern __uint32_t  __log2(__uint32_t);
57 //#define DEBUG
58 
59 /**
60  * @INTERNAL
61  * issue the tlb read instruction
62  */
__tlb_read(void)63 static inline void __tlb_read(void){
64     CVMX_EHB;
65     CVMX_TLBR;
66     CVMX_EHB;
67 }
68 
69 /**
70  * @INTERNAL
71  * issue the tlb write instruction
72  */
__tlb_write(void)73 static inline void __tlb_write(void){
74 
75     CVMX_EHB;
76     CVMX_TLBWI;
77     CVMX_EHB;
78 }
79 
80 /**
81  * @INTERNAL
82  * issue the tlb read instruction
83  */
__tlb_probe(uint64_t hi)84 static inline int __tlb_probe(uint64_t hi){
85     int index;
86     CVMX_EHB;
87     CVMX_MT_ENTRY_HIGH(hi);
88     CVMX_TLBP;
89     CVMX_EHB;
90 
91     CVMX_MF_TLB_INDEX(index);
92 
93     if (index < 0) index = -1;
94 
95     return index;
96 }
97 
98 /**
99  * @INTERNAL
100  * read a single tlb entry
101  *
102  * return 0: tlb entry is read
103  *    -1: index is invalid
104  */
__tlb_read_index(uint32_t tlbi)105 static inline int __tlb_read_index(uint32_t tlbi){
106 
107     if (tlbi >= (uint32_t)cvmx_core_get_tlb_entries()) {
108         return -1;
109     }
110 
111     CVMX_MT_TLB_INDEX(tlbi);
112     __tlb_read();
113 
114     return 0;
115 }
116 
117 /**
118  * @INTERNAL
119  * write a single tlb entry
120  *
121  * return 0: tlb entry is read
122  *    -1: index is invalid
123  */
__tlb_write_index(uint32_t tlbi,uint64_t hi,uint64_t lo0,uint64_t lo1,uint64_t pagemask)124 static inline int __tlb_write_index(uint32_t tlbi,
125         			    uint64_t hi, uint64_t lo0,
126 				    uint64_t lo1, uint64_t pagemask)
127 {
128 
129     if (tlbi >= (uint32_t)cvmx_core_get_tlb_entries()) {
130         return -1;
131     }
132 
133 #ifdef DEBUG
134     cvmx_dprintf("cvmx-tlb-dbg: "
135 	    "write TLB %d: hi %lx, lo0 %lx, lo1 %lx, pagemask %lx \n",
136 		tlbi, hi, lo0, lo1, pagemask);
137 #endif
138 
139     CVMX_MT_TLB_INDEX(tlbi);
140     CVMX_MT_ENTRY_HIGH(hi);
141     CVMX_MT_ENTRY_LO_0(lo0);
142     CVMX_MT_ENTRY_LO_1(lo1);
143     CVMX_MT_PAGEMASK(pagemask);
144     __tlb_write();
145 
146     return 0;
147 }
148 
149 /**
150  * @INTERNAL
151  * Determine if a TLB entry is free to use
152  */
__tlb_entry_is_free(uint32_t tlbi)153 static inline int __tlb_entry_is_free(uint32_t tlbi) {
154     int ret = 0;
155     uint64_t lo0 = 0, lo1 = 0;
156 
157     if (tlbi < (uint32_t)cvmx_core_get_tlb_entries()) {
158 
159         __tlb_read_index(tlbi);
160 
161         /* Unused entries have neither even nor odd page mapped */
162     	CVMX_MF_ENTRY_LO_0(lo0);
163     	CVMX_MF_ENTRY_LO_1(lo1);
164 
165         if ( !(lo0 & TLB_VALID) && !(lo1 & TLB_VALID)) {
166             ret = 1;
167         }
168     }
169 
170     return ret;
171 }
172 
173 
174 /**
175  * @INTERNAL
176  * dump a single tlb entry
177  */
__tlb_dump_index(uint32_t tlbi)178 static inline void __tlb_dump_index(uint32_t tlbi)
179 {
180     if (tlbi < (uint32_t)cvmx_core_get_tlb_entries()) {
181 
182         if (__tlb_entry_is_free(tlbi)) {
183 #ifdef DEBUG
184             cvmx_dprintf("Index: %3d Free \n", tlbi);
185 #endif
186         } else {
187             uint64_t lo0, lo1, pgmask;
188             uint32_t hi;
189 #ifdef DEBUG
190             uint32_t c0, c1;
191             int width = 13;
192 #endif
193 
194             __tlb_read_index(tlbi);
195 
196             CVMX_MF_ENTRY_HIGH(hi);
197             CVMX_MF_ENTRY_LO_0(lo0);
198             CVMX_MF_ENTRY_LO_1(lo1);
199             CVMX_MF_PAGEMASK(pgmask);
200 
201 #ifdef DEBUG
202             c0 = ( lo0 >> 3 ) & 7;
203             c1 = ( lo1 >> 3 ) & 7;
204 
205             cvmx_dprintf("va=%0*lx asid=%02x\n",
206                                width, (hi & ~0x1fffUL), hi & 0xff);
207 
208             cvmx_dprintf("\t[pa=%0*lx c=%d d=%d v=%d g=%d] ",
209                                width,
210                                (lo0 << 6) & PAGE_MASK, c0,
211                                (lo0 & 4) ? 1 : 0,
212                                (lo0 & 2) ? 1 : 0,
213                                (lo0 & 1) ? 1 : 0);
214             cvmx_dprintf("[pa=%0*lx c=%d d=%d v=%d g=%d]\n",
215                                width,
216                                (lo1 << 6) & PAGE_MASK, c1,
217                                (lo1 & 4) ? 1 : 0,
218                                (lo1 & 2) ? 1 : 0,
219                                (lo1 & 1) ? 1 : 0);
220 
221 #endif
222         }
223     }
224 }
225 
226 /**
227  * @INTERNAL
228  * dump a single tlb entry
229  */
__tlb_wired_index()230 static inline uint32_t __tlb_wired_index() {
231     uint32_t  tlbi;
232 
233     CVMX_MF_TLB_WIRED(tlbi);
234     return tlbi;
235 }
236 
237 /**
238  *  Find a free entry that can be used for share memory mapping.
239  *
240  *  @return -1: no free entry found
241  *  @return :  a free entry
242  */
cvmx_tlb_allocate_runtime_entry(void)243 int cvmx_tlb_allocate_runtime_entry(void)
244 {
245     uint32_t i, ret = -1;
246 
247     for (i = __tlb_wired_index(); i< (uint32_t)cvmx_core_get_tlb_entries(); i++) {
248 
249     	/* Check to make sure the index is free to use */
250         if (__tlb_entry_is_free(i)) {
251 		/* Found and return */
252         	ret = i;
253         	break;
254 	}
255     }
256 
257     return ret;
258 }
259 
260 /**
261  *  Invalidate the TLB entry. Remove previous mapping if one was set up
262  */
cvmx_tlb_free_runtime_entry(uint32_t tlbi)263 void cvmx_tlb_free_runtime_entry(uint32_t tlbi)
264 {
265     /* Invalidate an unwired TLB entry */
266     if ((tlbi < (uint32_t)cvmx_core_get_tlb_entries()) && (tlbi >= __tlb_wired_index())) {
267         __tlb_write_index(tlbi, 0xffffffff80000000ULL, 0, 0, 0);
268     }
269 }
270 
271 
272 /**
273  *  Program a single TLB entry to enable the provided vaddr to paddr mapping.
274  *
275  *  @param index  Index of the TLB entry
276  *  @param vaddr  The virtual address for this mapping
277  *  @param paddr  The physical address for this mapping
278  *  @param size   Size of the mapping
279  *  @param tlb_flags  Entry mapping flags
280  */
281 
cvmx_tlb_write_entry(int index,uint64_t vaddr,uint64_t paddr,uint64_t size,uint64_t tlb_flags)282 void cvmx_tlb_write_entry(int index, uint64_t vaddr, uint64_t paddr,
283 			uint64_t size, uint64_t tlb_flags) {
284 	uint64_t lo0, lo1, hi, pagemask;
285 
286 	if ( __is_power_of_two(size) ) {
287 		if ( (__log2(size) & 1 ) == 0) {
288 			/* size is not power of 4,  we only need to map
289   			   one page, figure out even or odd page to map */
290 			if ((vaddr >> __log2(size) & 1))  {
291 				lo0 =  0;
292 				lo1 =  ((paddr >> 12) << 6) | tlb_flags;
293 				hi =   ((vaddr - size) >> 12) << 12;
294 			}else {
295 				lo0 =  ((paddr >> 12) << 6) | tlb_flags;
296 				lo1 =  0;
297 				hi =   ((vaddr) >> 12) << 12;
298 			}
299 			pagemask = (size - 1) & (~1<<11);
300 		}else {
301 			lo0 =  ((paddr >> 12)<< 6) | tlb_flags;
302 			lo1 =  (((paddr + size /2) >> 12) << 6) | tlb_flags;
303 			hi =   ((vaddr) >> 12) << 12;
304 			pagemask = ((size/2) -1) & (~1<<11);
305 		}
306 
307 
308         	__tlb_write_index(index, hi, lo0, lo1, pagemask);
309 
310 	}
311 }
312 
313 
314 /**
315  *  Program a single TLB entry to enable the provided vaddr to paddr mapping.
316  *  This version adds a wired entry that should not be changed at run time
317  *
318  *  @param vaddr  The virtual address for this mapping
319  *  @param paddr  The physical address for this mapping
320  *  @param size   Size of the mapping
321  *  @param tlb_flags  Entry mapping flags
322  *  @return -1: TLB out of entries
323  * 	     0:  fixed entry added
324  */
cvmx_tlb_add_fixed_entry(uint64_t vaddr,uint64_t paddr,uint64_t size,uint64_t tlb_flags)325 int cvmx_tlb_add_fixed_entry( uint64_t vaddr, uint64_t paddr, uint64_t size, uint64_t tlb_flags) {
326 
327     uint64_t index;
328     int ret = 0;
329 
330     CVMX_MF_TLB_WIRED(index);
331 
332     /* Check to make sure if the index is free to use */
333     if (index < (uint32_t)cvmx_core_get_tlb_entries() && __tlb_entry_is_free(index) ) {
334 	cvmx_tlb_write_entry(index, vaddr, paddr, size, tlb_flags);
335 
336 	if (!__tlb_entry_is_free(index)) {
337         	/* Bump up the wired register*/
338         	CVMX_MT_TLB_WIRED(index + 1);
339 		ret  = 1;
340 	}
341     }
342     return ret;
343 }
344 
345 
346 /**
347  *  Program a single TLB entry to enable the provided vaddr to paddr mapping.
348  *  This version writes a runtime entry. It will check the index to make sure
349  *  not to overwrite any fixed entries.
350  *
351  *  @param index  Index of the TLB entry
352  *  @param vaddr  The virtual address for this mapping
353  *  @param paddr  The physical address for this mapping
354  *  @param size   Size of the mapping
355  *  @param tlb_flags  Entry mapping flags
356  */
cvmx_tlb_write_runtime_entry(int index,uint64_t vaddr,uint64_t paddr,uint64_t size,uint64_t tlb_flags)357 void cvmx_tlb_write_runtime_entry(int index, uint64_t vaddr, uint64_t paddr,
358                           uint64_t size, uint64_t tlb_flags)
359 {
360 
361     int wired_index;
362     CVMX_MF_TLB_WIRED(wired_index);
363 
364     if (index >= wired_index) {
365 	cvmx_tlb_write_entry(index, vaddr, paddr, size, tlb_flags);
366     }
367 
368 }
369 
370 
371 
372 /**
373  * Find the TLB index of a given virtual address
374  *
375  *  @param vaddr  The virtual address to look up
376  *  @return  -1  not TLB mapped
377  *           >=0 TLB TLB index
378  */
cvmx_tlb_lookup(uint64_t vaddr)379 int cvmx_tlb_lookup(uint64_t vaddr) {
380 	uint64_t hi= (vaddr >> 13 ) << 13; /* We always use ASID 0 */
381 
382 	return  __tlb_probe(hi);
383 }
384 
385 /**
386  *  Debug routine to show all shared memory mapping
387  */
cvmx_tlb_dump_shared_mapping(void)388 void cvmx_tlb_dump_shared_mapping(void) {
389     uint32_t tlbi;
390 
391     for ( tlbi = __tlb_wired_index(); tlbi<(uint32_t)cvmx_core_get_tlb_entries(); tlbi++ ) {
392         __tlb_dump_index(tlbi);
393     }
394 }
395 
396 /**
397  *  Debug routine to show all TLB entries of this core
398  *
399  */
cvmx_tlb_dump_all(void)400 void cvmx_tlb_dump_all(void) {
401 
402     uint32_t tlbi;
403 
404     for (tlbi = 0; tlbi<= (uint32_t)cvmx_core_get_tlb_entries(); tlbi++ ) {
405         __tlb_dump_index(tlbi);
406     }
407 }
408 
409