1 /* SPDX-License-Identifier: GPL-2.0+ */ 2 /* 3 * Sleepable Read-Copy Update mechanism for mutual exclusion, 4 * tiny variant. 5 * 6 * Copyright (C) IBM Corporation, 2017 7 * 8 * Author: Paul McKenney <[email protected]> 9 */ 10 11 #ifndef _LINUX_SRCU_TINY_H 12 #define _LINUX_SRCU_TINY_H 13 14 #include <linux/swait.h> 15 16 struct srcu_struct { 17 short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */ 18 u8 srcu_gp_running; /* GP workqueue running? */ 19 u8 srcu_gp_waiting; /* GP waiting for readers? */ 20 unsigned long srcu_idx; /* Current reader array element in bit 0x2. */ 21 unsigned long srcu_idx_max; /* Furthest future srcu_idx request. */ 22 struct swait_queue_head srcu_wq; 23 /* Last srcu_read_unlock() wakes GP. */ 24 struct rcu_head *srcu_cb_head; /* Pending callbacks: Head. */ 25 struct rcu_head **srcu_cb_tail; /* Pending callbacks: Tail. */ 26 struct work_struct srcu_work; /* For driving grace periods. */ 27 #ifdef CONFIG_DEBUG_LOCK_ALLOC 28 struct lockdep_map dep_map; 29 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 30 }; 31 32 void srcu_drive_gp(struct work_struct *wp); 33 34 #define __SRCU_STRUCT_INIT(name, __ignored, ___ignored) \ 35 { \ 36 .srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \ 37 .srcu_cb_tail = &name.srcu_cb_head, \ 38 .srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp), \ 39 __SRCU_DEP_MAP_INIT(name) \ 40 } 41 42 /* 43 * This odd _STATIC_ arrangement is needed for API compatibility with 44 * Tree SRCU, which needs some per-CPU data. 45 */ 46 #define DEFINE_SRCU(name) \ 47 struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name) 48 #define DEFINE_STATIC_SRCU(name) \ 49 static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name) 50 51 // Dummy structure for srcu_notifier_head. 52 struct srcu_usage { }; 53 #define __SRCU_USAGE_INIT(name) { } 54 55 void synchronize_srcu(struct srcu_struct *ssp); 56 57 /* 58 * Counts the new reader in the appropriate per-CPU element of the 59 * srcu_struct. Can be invoked from irq/bh handlers, but the matching 60 * __srcu_read_unlock() must be in the same handler instance. Returns an 61 * index that must be passed to the matching srcu_read_unlock(). 62 */ 63 static inline int __srcu_read_lock(struct srcu_struct *ssp) 64 { 65 int idx; 66 67 preempt_disable(); // Needed for PREEMPT_LAZY 68 idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; 69 WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1); 70 preempt_enable(); 71 return idx; 72 } 73 74 struct srcu_ctr; 75 76 static inline bool __srcu_ptr_to_ctr(struct srcu_struct *ssp, struct srcu_ctr __percpu *scpp) 77 { 78 return (int)(intptr_t)(struct srcu_ctr __force __kernel *)scpp; 79 } 80 81 static inline struct srcu_ctr __percpu *__srcu_ctr_to_ptr(struct srcu_struct *ssp, int idx) 82 { 83 return (struct srcu_ctr __percpu *)(intptr_t)idx; 84 } 85 86 static inline struct srcu_ctr __percpu *__srcu_read_lock_fast(struct srcu_struct *ssp) 87 { 88 return __srcu_ctr_to_ptr(ssp, __srcu_read_lock(ssp)); 89 } 90 91 static inline void __srcu_read_unlock_fast(struct srcu_struct *ssp, struct srcu_ctr __percpu *scp) 92 { 93 __srcu_read_unlock(ssp, __srcu_ptr_to_ctr(ssp, scp)); 94 } 95 96 #define __srcu_read_lock_lite __srcu_read_lock 97 #define __srcu_read_unlock_lite __srcu_read_unlock 98 99 static inline void synchronize_srcu_expedited(struct srcu_struct *ssp) 100 { 101 synchronize_srcu(ssp); 102 } 103 104 static inline void srcu_barrier(struct srcu_struct *ssp) 105 { 106 synchronize_srcu(ssp); 107 } 108 109 #define srcu_check_read_flavor(ssp, read_flavor) do { } while (0) 110 #define srcu_check_read_flavor_force(ssp, read_flavor) do { } while (0) 111 112 /* Defined here to avoid size increase for non-torture kernels. */ 113 static inline void srcu_torture_stats_print(struct srcu_struct *ssp, 114 char *tt, char *tf) 115 { 116 int idx; 117 118 idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1; 119 pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd) gp: %lu->%lu\n", 120 tt, tf, idx, 121 data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])), 122 data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])), 123 data_race(READ_ONCE(ssp->srcu_idx)), 124 data_race(READ_ONCE(ssp->srcu_idx_max))); 125 } 126 127 #endif 128