1 /***********************license start***************
2 * Copyright (c) 2003-2010 Cavium Inc. ([email protected]). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 *
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17
18 * * Neither the name of Cavium Inc. nor the names of
19 * its contributors may be used to endorse or promote products
20 * derived from this software without specific prior written
21 * permission.
22
23 * This Software, including technical data, may be subject to U.S. export control
24 * laws, including the U.S. Export Administration Act and its associated
25 * regulations, and may be subject to export or import regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40
41
42
43
44
45
46 /**
47 * @file
48 *
49 * Implementation of spinlocks.
50 *
51 * <hr>$Revision: 70030 $<hr>
52 */
53
54
55 #ifndef __CVMX_SPINLOCK_H__
56 #define __CVMX_SPINLOCK_H__
57
58 #include "cvmx-asm.h"
59
60 #ifdef __cplusplus
61 extern "C" {
62 #endif
63
64 /* Spinlocks for Octeon */
65
66
67 // define these to enable recursive spinlock debugging
68 //#define CVMX_SPINLOCK_DEBUG
69
70
71 /**
72 * Spinlocks for Octeon
73 */
74 typedef struct {
75 volatile uint32_t value;
76 } cvmx_spinlock_t;
77
78 // note - macros not expanded in inline ASM, so values hardcoded
79 #define CVMX_SPINLOCK_UNLOCKED_VAL 0
80 #define CVMX_SPINLOCK_LOCKED_VAL 1
81
82
83 #define CVMX_SPINLOCK_UNLOCKED_INITIALIZER {CVMX_SPINLOCK_UNLOCKED_VAL}
84
85
86 /**
87 * Initialize a spinlock
88 *
89 * @param lock Lock to initialize
90 */
cvmx_spinlock_init(cvmx_spinlock_t * lock)91 static inline void cvmx_spinlock_init(cvmx_spinlock_t *lock)
92 {
93 lock->value = CVMX_SPINLOCK_UNLOCKED_VAL;
94 }
95
96
97 /**
98 * Return non-zero if the spinlock is currently locked
99 *
100 * @param lock Lock to check
101 * @return Non-zero if locked
102 */
cvmx_spinlock_locked(cvmx_spinlock_t * lock)103 static inline int cvmx_spinlock_locked(cvmx_spinlock_t *lock)
104 {
105 return (lock->value != CVMX_SPINLOCK_UNLOCKED_VAL);
106 }
107
108
109 /**
110 * Releases lock
111 *
112 * @param lock pointer to lock structure
113 */
cvmx_spinlock_unlock(cvmx_spinlock_t * lock)114 static inline void cvmx_spinlock_unlock(cvmx_spinlock_t *lock)
115 {
116 CVMX_SYNCWS;
117 lock->value = 0;
118 CVMX_SYNCWS;
119 }
120
121
122 /**
123 * Attempts to take the lock, but does not spin if lock is not available.
124 * May take some time to acquire the lock even if it is available
125 * due to the ll/sc not succeeding.
126 *
127 * @param lock pointer to lock structure
128 *
129 * @return 0: lock successfully taken
130 * 1: lock not taken, held by someone else
131 * These return values match the Linux semantics.
132 */
133
cvmx_spinlock_trylock(cvmx_spinlock_t * lock)134 static inline unsigned int cvmx_spinlock_trylock(cvmx_spinlock_t *lock)
135 {
136 unsigned int tmp;
137
138 __asm__ __volatile__(
139 ".set noreorder \n"
140 "1: ll %[tmp], %[val] \n"
141 " bnez %[tmp], 2f \n" // if lock held, fail immediately
142 " li %[tmp], 1 \n"
143 " sc %[tmp], %[val] \n"
144 " beqz %[tmp], 1b \n"
145 " li %[tmp], 0 \n"
146 "2: \n"
147 ".set reorder \n"
148 : [val] "+m" (lock->value), [tmp] "=&r" (tmp)
149 :
150 : "memory");
151
152 return (!!tmp); /* normalize to 0 or 1 */
153 }
154
155 /**
156 * Gets lock, spins until lock is taken
157 *
158 * @param lock pointer to lock structure
159 */
cvmx_spinlock_lock(cvmx_spinlock_t * lock)160 static inline void cvmx_spinlock_lock(cvmx_spinlock_t *lock)
161 {
162 unsigned int tmp;
163
164 __asm__ __volatile__(
165 ".set noreorder \n"
166 "1: ll %[tmp], %[val] \n"
167 " bnez %[tmp], 1b \n"
168 " li %[tmp], 1 \n"
169 " sc %[tmp], %[val] \n"
170 " beqz %[tmp], 1b \n"
171 " nop \n"
172 ".set reorder \n"
173 : [val] "+m" (lock->value), [tmp] "=&r" (tmp)
174 :
175 : "memory");
176
177 }
178
179
180
181 /** ********************************************************************
182 * Bit spinlocks
183 * These spinlocks use a single bit (bit 31) of a 32 bit word for locking.
184 * The rest of the bits in the word are left undisturbed. This enables more
185 * compact data structures as only 1 bit is consumed for the lock.
186 *
187 */
188
189 /**
190 * Gets lock, spins until lock is taken
191 * Preserves the low 31 bits of the 32 bit
192 * word used for the lock.
193 *
194 *
195 * @param word word to lock bit 31 of
196 */
cvmx_spinlock_bit_lock(uint32_t * word)197 static inline void cvmx_spinlock_bit_lock(uint32_t *word)
198 {
199 unsigned int tmp;
200 unsigned int sav;
201
202 __asm__ __volatile__(
203 ".set noreorder \n"
204 ".set noat \n"
205 "1: ll %[tmp], %[val] \n"
206 " bbit1 %[tmp], 31, 1b \n"
207 " li $at, 1 \n"
208 " ins %[tmp], $at, 31, 1 \n"
209 " sc %[tmp], %[val] \n"
210 " beqz %[tmp], 1b \n"
211 " nop \n"
212 ".set at \n"
213 ".set reorder \n"
214 : [val] "+m" (*word), [tmp] "=&r" (tmp), [sav] "=&r" (sav)
215 :
216 : "memory");
217
218 }
219
220 /**
221 * Attempts to get lock, returns immediately with success/failure
222 * Preserves the low 31 bits of the 32 bit
223 * word used for the lock.
224 *
225 *
226 * @param word word to lock bit 31 of
227 * @return 0: lock successfully taken
228 * 1: lock not taken, held by someone else
229 * These return values match the Linux semantics.
230 */
cvmx_spinlock_bit_trylock(uint32_t * word)231 static inline unsigned int cvmx_spinlock_bit_trylock(uint32_t *word)
232 {
233 unsigned int tmp;
234
235 __asm__ __volatile__(
236 ".set noreorder \n"
237 ".set noat \n"
238 "1: ll %[tmp], %[val] \n"
239 " bbit1 %[tmp], 31, 2f \n" // if lock held, fail immediately
240 " li $at, 1 \n"
241 " ins %[tmp], $at, 31, 1 \n"
242 " sc %[tmp], %[val] \n"
243 " beqz %[tmp], 1b \n"
244 " li %[tmp], 0 \n"
245 "2: \n"
246 ".set at \n"
247 ".set reorder \n"
248 : [val] "+m" (*word), [tmp] "=&r" (tmp)
249 :
250 : "memory");
251
252 return (!!tmp); /* normalize to 0 or 1 */
253 }
254 /**
255 * Releases bit lock
256 *
257 * Unconditionally clears bit 31 of the lock word. Note that this is
258 * done non-atomically, as this implementation assumes that the rest
259 * of the bits in the word are protected by the lock.
260 *
261 * @param word word to unlock bit 31 in
262 */
cvmx_spinlock_bit_unlock(uint32_t * word)263 static inline void cvmx_spinlock_bit_unlock(uint32_t *word)
264 {
265 CVMX_SYNCWS;
266 *word &= ~(1UL << 31) ;
267 CVMX_SYNCWS;
268 }
269
270
271
272 /** ********************************************************************
273 * Recursive spinlocks
274 */
275 typedef struct {
276 volatile unsigned int value;
277 volatile unsigned int core_num;
278 } cvmx_spinlock_rec_t;
279
280
281 /**
282 * Initialize a recursive spinlock
283 *
284 * @param lock Lock to initialize
285 */
cvmx_spinlock_rec_init(cvmx_spinlock_rec_t * lock)286 static inline void cvmx_spinlock_rec_init(cvmx_spinlock_rec_t *lock)
287 {
288 lock->value = CVMX_SPINLOCK_UNLOCKED_VAL;
289 }
290
291
292 /**
293 * Return non-zero if the recursive spinlock is currently locked
294 *
295 * @param lock Lock to check
296 * @return Non-zero if locked
297 */
cvmx_spinlock_rec_locked(cvmx_spinlock_rec_t * lock)298 static inline int cvmx_spinlock_rec_locked(cvmx_spinlock_rec_t *lock)
299 {
300 return (lock->value != CVMX_SPINLOCK_UNLOCKED_VAL);
301 }
302
303
304 /**
305 * Unlocks one level of recursive spinlock. Lock is not unlocked
306 * unless this is the final unlock call for that spinlock
307 *
308 * @param lock ptr to recursive spinlock structure
309 */
310 static inline void cvmx_spinlock_rec_unlock(cvmx_spinlock_rec_t *lock);
311
312 #ifdef CVMX_SPINLOCK_DEBUG
313 #define cvmx_spinlock_rec_unlock(x) _int_cvmx_spinlock_rec_unlock((x), __FILE__, __LINE__)
_int_cvmx_spinlock_rec_unlock(cvmx_spinlock_rec_t * lock,char * filename,int linenum)314 static inline void _int_cvmx_spinlock_rec_unlock(cvmx_spinlock_rec_t *lock, char *filename, int linenum)
315 #else
316 static inline void cvmx_spinlock_rec_unlock(cvmx_spinlock_rec_t *lock)
317 #endif
318 {
319
320 unsigned int temp, result;
321 int core_num;
322 core_num = cvmx_get_core_num();
323
324 #ifdef CVMX_SPINLOCK_DEBUG
325 {
326 if (lock->core_num != core_num)
327 {
328 cvmx_dprintf("ERROR: Recursive spinlock release attemped by non-owner! file: %s, line: %d\n", filename, linenum);
329 return;
330 }
331 }
332 #endif
333
334 __asm__ __volatile__(
335 ".set noreorder \n"
336 " addi %[tmp], %[pid], 0x80 \n"
337 " sw %[tmp], %[lid] # set lid to invalid value\n"
338 CVMX_SYNCWS_STR
339 "1: ll %[tmp], %[val] \n"
340 " addu %[res], %[tmp], -1 # decrement lock count\n"
341 " sc %[res], %[val] \n"
342 " beqz %[res], 1b \n"
343 " nop \n"
344 " beq %[tmp], %[res], 2f # res is 1 on successful sc \n"
345 " nop \n"
346 " sw %[pid], %[lid] # set lid to pid, only if lock still held\n"
347 "2: \n"
348 CVMX_SYNCWS_STR
349 ".set reorder \n"
350 : [res] "=&r" (result), [tmp] "=&r" (temp), [val] "+m" (lock->value), [lid] "+m" (lock->core_num)
351 : [pid] "r" (core_num)
352 : "memory");
353
354
355 #ifdef CVMX_SPINLOCK_DEBUG
356 {
357 if (lock->value == ~0UL)
358 {
359 cvmx_dprintf("ERROR: Recursive spinlock released too many times! file: %s, line: %d\n", filename, linenum);
360 }
361 }
362 #endif
363
364
365 }
366
367 /**
368 * Takes recursive spinlock for a given core. A core can take the lock multiple
369 * times, and the lock is released only when the corresponding number of
370 * unlocks have taken place.
371 *
372 * NOTE: This assumes only one thread per core, and that the core ID is used as
373 * the lock 'key'. (This implementation cannot be generalized to allow
374 * multiple threads to use the same key (core id) .)
375 *
376 * @param lock address of recursive spinlock structure. Note that this is
377 * distinct from the standard spinlock
378 */
379 static inline void cvmx_spinlock_rec_lock(cvmx_spinlock_rec_t *lock);
380
381 #ifdef CVMX_SPINLOCK_DEBUG
382 #define cvmx_spinlock_rec_lock(x) _int_cvmx_spinlock_rec_lock((x), __FILE__, __LINE__)
_int_cvmx_spinlock_rec_lock(cvmx_spinlock_rec_t * lock,char * filename,int linenum)383 static inline void _int_cvmx_spinlock_rec_lock(cvmx_spinlock_rec_t *lock, char *filename, int linenum)
384 #else
385 static inline void cvmx_spinlock_rec_lock(cvmx_spinlock_rec_t *lock)
386 #endif
387 {
388
389
390 volatile unsigned int tmp;
391 volatile int core_num;
392
393 core_num = cvmx_get_core_num();
394
395
396 __asm__ __volatile__(
397 ".set noreorder \n"
398 "1: ll %[tmp], %[val] # load the count\n"
399 " bnez %[tmp], 2f # if count!=zero branch to 2\n"
400 " addu %[tmp], %[tmp], 1 \n"
401 " sc %[tmp], %[val] \n"
402 " beqz %[tmp], 1b # go back if not success\n"
403 " nop \n"
404 " j 3f # go to write core_num \n"
405 "2: lw %[tmp], %[lid] # load the core_num \n"
406 " bne %[tmp], %[pid], 1b # core_num no match, restart\n"
407 " nop \n"
408 " lw %[tmp], %[val] \n"
409 " addu %[tmp], %[tmp], 1 \n"
410 " sw %[tmp], %[val] # update the count\n"
411 "3: sw %[pid], %[lid] # store the core_num\n"
412 CVMX_SYNCWS_STR
413 ".set reorder \n"
414 : [tmp] "=&r" (tmp), [val] "+m" (lock->value), [lid] "+m" (lock->core_num)
415 : [pid] "r" (core_num)
416 : "memory");
417
418 #ifdef CVMX_SPINLOCK_DEBUG
419 if (lock->core_num != core_num)
420 {
421 cvmx_dprintf("cvmx_spinlock_rec_lock: lock taken, but core_num is incorrect. file: %s, line: %d\n", filename, linenum);
422 }
423 #endif
424
425
426 }
427
428 #ifdef __cplusplus
429 }
430 #endif
431
432 #endif /* __CVMX_SPINLOCK_H__ */
433