1*f038cc13SKent Overstreet /* SPDX-License-Identifier: GPL-2.0 */ 2*f038cc13SKent Overstreet #ifndef __LINUX_SEQLOCK_TYPES_H 3*f038cc13SKent Overstreet #define __LINUX_SEQLOCK_TYPES_H 4*f038cc13SKent Overstreet 5*f038cc13SKent Overstreet #include <linux/lockdep_types.h> 6*f038cc13SKent Overstreet #include <linux/mutex_types.h> 7*f038cc13SKent Overstreet #include <linux/spinlock_types.h> 8*f038cc13SKent Overstreet 9*f038cc13SKent Overstreet /* 10*f038cc13SKent Overstreet * Sequence counters (seqcount_t) 11*f038cc13SKent Overstreet * 12*f038cc13SKent Overstreet * This is the raw counting mechanism, without any writer protection. 13*f038cc13SKent Overstreet * 14*f038cc13SKent Overstreet * Write side critical sections must be serialized and non-preemptible. 15*f038cc13SKent Overstreet * 16*f038cc13SKent Overstreet * If readers can be invoked from hardirq or softirq contexts, 17*f038cc13SKent Overstreet * interrupts or bottom halves must also be respectively disabled before 18*f038cc13SKent Overstreet * entering the write section. 19*f038cc13SKent Overstreet * 20*f038cc13SKent Overstreet * This mechanism can't be used if the protected data contains pointers, 21*f038cc13SKent Overstreet * as the writer can invalidate a pointer that a reader is following. 22*f038cc13SKent Overstreet * 23*f038cc13SKent Overstreet * If the write serialization mechanism is one of the common kernel 24*f038cc13SKent Overstreet * locking primitives, use a sequence counter with associated lock 25*f038cc13SKent Overstreet * (seqcount_LOCKNAME_t) instead. 26*f038cc13SKent Overstreet * 27*f038cc13SKent Overstreet * If it's desired to automatically handle the sequence counter writer 28*f038cc13SKent Overstreet * serialization and non-preemptibility requirements, use a sequential 29*f038cc13SKent Overstreet * lock (seqlock_t) instead. 30*f038cc13SKent Overstreet * 31*f038cc13SKent Overstreet * See Documentation/locking/seqlock.rst 32*f038cc13SKent Overstreet */ 33*f038cc13SKent Overstreet typedef struct seqcount { 34*f038cc13SKent Overstreet unsigned sequence; 35*f038cc13SKent Overstreet #ifdef CONFIG_DEBUG_LOCK_ALLOC 36*f038cc13SKent Overstreet struct lockdep_map dep_map; 37*f038cc13SKent Overstreet #endif 38*f038cc13SKent Overstreet } seqcount_t; 39*f038cc13SKent Overstreet 40*f038cc13SKent Overstreet /* 41*f038cc13SKent Overstreet * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot 42*f038cc13SKent Overstreet * disable preemption. It can lead to higher latencies, and the write side 43*f038cc13SKent Overstreet * sections will not be able to acquire locks which become sleeping locks 44*f038cc13SKent Overstreet * (e.g. spinlock_t). 45*f038cc13SKent Overstreet * 46*f038cc13SKent Overstreet * To remain preemptible while avoiding a possible livelock caused by the 47*f038cc13SKent Overstreet * reader preempting the writer, use a different technique: let the reader 48*f038cc13SKent Overstreet * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the 49*f038cc13SKent Overstreet * case, acquire then release the associated LOCKNAME writer serialization 50*f038cc13SKent Overstreet * lock. This will allow any possibly-preempted writer to make progress 51*f038cc13SKent Overstreet * until the end of its writer serialization lock critical section. 52*f038cc13SKent Overstreet * 53*f038cc13SKent Overstreet * This lock-unlock technique must be implemented for all of PREEMPT_RT 54*f038cc13SKent Overstreet * sleeping locks. See Documentation/locking/locktypes.rst 55*f038cc13SKent Overstreet */ 56*f038cc13SKent Overstreet #if defined(CONFIG_LOCKDEP) || defined(CONFIG_PREEMPT_RT) 57*f038cc13SKent Overstreet #define __SEQ_LOCK(expr) expr 58*f038cc13SKent Overstreet #else 59*f038cc13SKent Overstreet #define __SEQ_LOCK(expr) 60*f038cc13SKent Overstreet #endif 61*f038cc13SKent Overstreet 62*f038cc13SKent Overstreet #define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \ 63*f038cc13SKent Overstreet typedef struct seqcount_##lockname { \ 64*f038cc13SKent Overstreet seqcount_t seqcount; \ 65*f038cc13SKent Overstreet __SEQ_LOCK(locktype *lock); \ 66*f038cc13SKent Overstreet } seqcount_##lockname##_t; 67*f038cc13SKent Overstreet 68*f038cc13SKent Overstreet SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin) 69*f038cc13SKent Overstreet SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin) 70*f038cc13SKent Overstreet SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read) 71*f038cc13SKent Overstreet SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex) 72*f038cc13SKent Overstreet #undef SEQCOUNT_LOCKNAME 73*f038cc13SKent Overstreet 74*f038cc13SKent Overstreet /* 75*f038cc13SKent Overstreet * Sequential locks (seqlock_t) 76*f038cc13SKent Overstreet * 77*f038cc13SKent Overstreet * Sequence counters with an embedded spinlock for writer serialization 78*f038cc13SKent Overstreet * and non-preemptibility. 79*f038cc13SKent Overstreet * 80*f038cc13SKent Overstreet * For more info, see: 81*f038cc13SKent Overstreet * - Comments on top of seqcount_t 82*f038cc13SKent Overstreet * - Documentation/locking/seqlock.rst 83*f038cc13SKent Overstreet */ 84*f038cc13SKent Overstreet typedef struct { 85*f038cc13SKent Overstreet /* 86*f038cc13SKent Overstreet * Make sure that readers don't starve writers on PREEMPT_RT: use 87*f038cc13SKent Overstreet * seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK(). 88*f038cc13SKent Overstreet */ 89*f038cc13SKent Overstreet seqcount_spinlock_t seqcount; 90*f038cc13SKent Overstreet spinlock_t lock; 91*f038cc13SKent Overstreet } seqlock_t; 92*f038cc13SKent Overstreet 93*f038cc13SKent Overstreet #endif /* __LINUX_SEQLOCK_TYPES_H */ 94