1c935cd62SHerbert Xu /* SPDX-License-Identifier: GPL-2.0 */ 2c935cd62SHerbert Xu /* 3c935cd62SHerbert Xu * Runtime locking correctness validator 4c935cd62SHerbert Xu * 5c935cd62SHerbert Xu * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <[email protected]> 6c935cd62SHerbert Xu * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra 7c935cd62SHerbert Xu * 8c935cd62SHerbert Xu * see Documentation/locking/lockdep-design.rst for more details. 9c935cd62SHerbert Xu */ 10c935cd62SHerbert Xu #ifndef __LINUX_LOCKDEP_TYPES_H 11c935cd62SHerbert Xu #define __LINUX_LOCKDEP_TYPES_H 12c935cd62SHerbert Xu 13c935cd62SHerbert Xu #include <linux/types.h> 14c935cd62SHerbert Xu 15c935cd62SHerbert Xu #define MAX_LOCKDEP_SUBCLASSES 8UL 16c935cd62SHerbert Xu 17c935cd62SHerbert Xu enum lockdep_wait_type { 18c935cd62SHerbert Xu LD_WAIT_INV = 0, /* not checked, catch all */ 19c935cd62SHerbert Xu 20c935cd62SHerbert Xu LD_WAIT_FREE, /* wait free, rcu etc.. */ 21c935cd62SHerbert Xu LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */ 22c935cd62SHerbert Xu 23c935cd62SHerbert Xu #ifdef CONFIG_PROVE_RAW_LOCK_NESTING 24a2e05dddSZhouyi Zhou LD_WAIT_CONFIG, /* preemptible in PREEMPT_RT, spinlock_t etc.. */ 25c935cd62SHerbert Xu #else 26c935cd62SHerbert Xu LD_WAIT_CONFIG = LD_WAIT_SPIN, 27c935cd62SHerbert Xu #endif 28c935cd62SHerbert Xu LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */ 29c935cd62SHerbert Xu 30c935cd62SHerbert Xu LD_WAIT_MAX, /* must be last */ 31c935cd62SHerbert Xu }; 32c935cd62SHerbert Xu 33dfd5e3f5SPeter Zijlstra enum lockdep_lock_type { 34dfd5e3f5SPeter Zijlstra LD_LOCK_NORMAL = 0, /* normal, catch all */ 35dfd5e3f5SPeter Zijlstra LD_LOCK_PERCPU, /* percpu */ 360cce06baSPeter Zijlstra LD_LOCK_WAIT_OVERRIDE, /* annotation */ 37dfd5e3f5SPeter Zijlstra LD_LOCK_MAX, 38dfd5e3f5SPeter Zijlstra }; 39dfd5e3f5SPeter Zijlstra 40c935cd62SHerbert Xu #ifdef CONFIG_LOCKDEP 41c935cd62SHerbert Xu 42c935cd62SHerbert Xu /* 43c935cd62SHerbert Xu * We'd rather not expose kernel/lockdep_states.h this wide, but we do need 44c935cd62SHerbert Xu * the total number of states... :-( 452bb8945bSPeter Zijlstra * 462bb8945bSPeter Zijlstra * XXX_LOCK_USAGE_STATES is the number of lines in lockdep_states.h, for each 472bb8945bSPeter Zijlstra * of those we generates 4 states, Additionally we report on USED and USED_READ. 48c935cd62SHerbert Xu */ 492bb8945bSPeter Zijlstra #define XXX_LOCK_USAGE_STATES 2 502bb8945bSPeter Zijlstra #define LOCK_TRACE_STATES (XXX_LOCK_USAGE_STATES*4 + 2) 51c935cd62SHerbert Xu 52c935cd62SHerbert Xu /* 53c935cd62SHerbert Xu * NR_LOCKDEP_CACHING_CLASSES ... Number of classes 54c935cd62SHerbert Xu * cached in the instance of lockdep_map 55c935cd62SHerbert Xu * 5693d0955eSIngo Molnar * Currently main class (subclass == 0) and single depth subclass 57c935cd62SHerbert Xu * are cached in lockdep_map. This optimization is mainly targeting 58c935cd62SHerbert Xu * on rq->lock. double_rq_lock() acquires this highly competitive with 59c935cd62SHerbert Xu * single depth. 60c935cd62SHerbert Xu */ 61c935cd62SHerbert Xu #define NR_LOCKDEP_CACHING_CLASSES 2 62c935cd62SHerbert Xu 63c935cd62SHerbert Xu /* 64c935cd62SHerbert Xu * A lockdep key is associated with each lock object. For static locks we use 65c935cd62SHerbert Xu * the lock address itself as the key. Dynamically allocated lock objects can 66c935cd62SHerbert Xu * have a statically or dynamically allocated key. Dynamically allocated lock 67c935cd62SHerbert Xu * keys must be registered before being used and must be unregistered before 68c935cd62SHerbert Xu * the key memory is freed. 69c935cd62SHerbert Xu */ 70c935cd62SHerbert Xu struct lockdep_subclass_key { 71c935cd62SHerbert Xu char __one_byte; 72c935cd62SHerbert Xu } __attribute__ ((__packed__)); 73c935cd62SHerbert Xu 74c935cd62SHerbert Xu /* hash_entry is used to keep track of dynamically allocated keys. */ 75c935cd62SHerbert Xu struct lock_class_key { 76c935cd62SHerbert Xu union { 77c935cd62SHerbert Xu struct hlist_node hash_entry; 78c935cd62SHerbert Xu struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; 79c935cd62SHerbert Xu }; 80c935cd62SHerbert Xu }; 81c935cd62SHerbert Xu 82c935cd62SHerbert Xu extern struct lock_class_key __lockdep_no_validate__; 83*1a616c2fSKent Overstreet extern struct lock_class_key __lockdep_no_track__; 84c935cd62SHerbert Xu 85c935cd62SHerbert Xu struct lock_trace; 86c935cd62SHerbert Xu 87c935cd62SHerbert Xu #define LOCKSTAT_POINTS 4 88c935cd62SHerbert Xu 89eb1cfd09SKent Overstreet struct lockdep_map; 90eb1cfd09SKent Overstreet typedef int (*lock_cmp_fn)(const struct lockdep_map *a, 91eb1cfd09SKent Overstreet const struct lockdep_map *b); 92eb1cfd09SKent Overstreet typedef void (*lock_print_fn)(const struct lockdep_map *map); 93eb1cfd09SKent Overstreet 94c935cd62SHerbert Xu /* 95c935cd62SHerbert Xu * The lock-class itself. The order of the structure members matters. 96c935cd62SHerbert Xu * reinit_class() zeroes the key member and all subsequent members. 97c935cd62SHerbert Xu */ 98c935cd62SHerbert Xu struct lock_class { 99c935cd62SHerbert Xu /* 100c935cd62SHerbert Xu * class-hash: 101c935cd62SHerbert Xu */ 102c935cd62SHerbert Xu struct hlist_node hash_entry; 103c935cd62SHerbert Xu 104c935cd62SHerbert Xu /* 105c935cd62SHerbert Xu * Entry in all_lock_classes when in use. Entry in free_lock_classes 106c935cd62SHerbert Xu * when not in use. Instances that are being freed are on one of the 107c935cd62SHerbert Xu * zapped_classes lists. 108c935cd62SHerbert Xu */ 109c935cd62SHerbert Xu struct list_head lock_entry; 110c935cd62SHerbert Xu 111c935cd62SHerbert Xu /* 112c935cd62SHerbert Xu * These fields represent a directed graph of lock dependencies, 113c935cd62SHerbert Xu * to every node we attach a list of "forward" and a list of 114c935cd62SHerbert Xu * "backward" graph nodes. 115c935cd62SHerbert Xu */ 116c935cd62SHerbert Xu struct list_head locks_after, locks_before; 117c935cd62SHerbert Xu 118c935cd62SHerbert Xu const struct lockdep_subclass_key *key; 119eb1cfd09SKent Overstreet lock_cmp_fn cmp_fn; 120eb1cfd09SKent Overstreet lock_print_fn print_fn; 121eb1cfd09SKent Overstreet 122c935cd62SHerbert Xu unsigned int subclass; 123c935cd62SHerbert Xu unsigned int dep_gen_id; 124c935cd62SHerbert Xu 125c935cd62SHerbert Xu /* 126c935cd62SHerbert Xu * IRQ/softirq usage tracking bits: 127c935cd62SHerbert Xu */ 128c935cd62SHerbert Xu unsigned long usage_mask; 1292bb8945bSPeter Zijlstra const struct lock_trace *usage_traces[LOCK_TRACE_STATES]; 130c935cd62SHerbert Xu 13118caaedaSChristophe JAILLET const char *name; 132c935cd62SHerbert Xu /* 133c935cd62SHerbert Xu * Generation counter, when doing certain classes of graph walking, 134c935cd62SHerbert Xu * to ensure that we check one node only once: 135c935cd62SHerbert Xu */ 136c935cd62SHerbert Xu int name_version; 137c935cd62SHerbert Xu 138dfd5e3f5SPeter Zijlstra u8 wait_type_inner; 139dfd5e3f5SPeter Zijlstra u8 wait_type_outer; 140dfd5e3f5SPeter Zijlstra u8 lock_type; 141dfd5e3f5SPeter Zijlstra /* u8 hole; */ 142c935cd62SHerbert Xu 143c935cd62SHerbert Xu #ifdef CONFIG_LOCK_STAT 144c935cd62SHerbert Xu unsigned long contention_point[LOCKSTAT_POINTS]; 145c935cd62SHerbert Xu unsigned long contending_point[LOCKSTAT_POINTS]; 146c935cd62SHerbert Xu #endif 147c935cd62SHerbert Xu } __no_randomize_layout; 148c935cd62SHerbert Xu 149c935cd62SHerbert Xu #ifdef CONFIG_LOCK_STAT 150c935cd62SHerbert Xu struct lock_time { 151c935cd62SHerbert Xu s64 min; 152c935cd62SHerbert Xu s64 max; 153c935cd62SHerbert Xu s64 total; 154c935cd62SHerbert Xu unsigned long nr; 155c935cd62SHerbert Xu }; 156c935cd62SHerbert Xu 157c935cd62SHerbert Xu enum bounce_type { 158c935cd62SHerbert Xu bounce_acquired_write, 159c935cd62SHerbert Xu bounce_acquired_read, 160c935cd62SHerbert Xu bounce_contended_write, 161c935cd62SHerbert Xu bounce_contended_read, 162c935cd62SHerbert Xu nr_bounce_types, 163c935cd62SHerbert Xu 164c935cd62SHerbert Xu bounce_acquired = bounce_acquired_write, 165c935cd62SHerbert Xu bounce_contended = bounce_contended_write, 166c935cd62SHerbert Xu }; 167c935cd62SHerbert Xu 168c935cd62SHerbert Xu struct lock_class_stats { 169c935cd62SHerbert Xu unsigned long contention_point[LOCKSTAT_POINTS]; 170c935cd62SHerbert Xu unsigned long contending_point[LOCKSTAT_POINTS]; 171c935cd62SHerbert Xu struct lock_time read_waittime; 172c935cd62SHerbert Xu struct lock_time write_waittime; 173c935cd62SHerbert Xu struct lock_time read_holdtime; 174c935cd62SHerbert Xu struct lock_time write_holdtime; 175c935cd62SHerbert Xu unsigned long bounces[nr_bounce_types]; 176c935cd62SHerbert Xu }; 177c935cd62SHerbert Xu 178c935cd62SHerbert Xu struct lock_class_stats lock_stats(struct lock_class *class); 179c935cd62SHerbert Xu void clear_lock_stats(struct lock_class *class); 180c935cd62SHerbert Xu #endif 181c935cd62SHerbert Xu 182c935cd62SHerbert Xu /* 183c935cd62SHerbert Xu * Map the lock object (the lock instance) to the lock-class object. 184c935cd62SHerbert Xu * This is embedded into specific lock instances: 185c935cd62SHerbert Xu */ 186c935cd62SHerbert Xu struct lockdep_map { 187c935cd62SHerbert Xu struct lock_class_key *key; 188c935cd62SHerbert Xu struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; 189c935cd62SHerbert Xu const char *name; 190dfd5e3f5SPeter Zijlstra u8 wait_type_outer; /* can be taken in this context */ 191dfd5e3f5SPeter Zijlstra u8 wait_type_inner; /* presents this context */ 192dfd5e3f5SPeter Zijlstra u8 lock_type; 193dfd5e3f5SPeter Zijlstra /* u8 hole; */ 194c935cd62SHerbert Xu #ifdef CONFIG_LOCK_STAT 195c935cd62SHerbert Xu int cpu; 196c935cd62SHerbert Xu unsigned long ip; 197c935cd62SHerbert Xu #endif 198c935cd62SHerbert Xu }; 199c935cd62SHerbert Xu 200c935cd62SHerbert Xu struct pin_cookie { unsigned int val; }; 201c935cd62SHerbert Xu 20299bac366SKent Overstreet #define MAX_LOCKDEP_KEYS_BITS 13 20399bac366SKent Overstreet #define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS) 20499bac366SKent Overstreet #define INITIAL_CHAIN_KEY -1 20599bac366SKent Overstreet 20699bac366SKent Overstreet struct held_lock { 20799bac366SKent Overstreet /* 20899bac366SKent Overstreet * One-way hash of the dependency chain up to this point. We 20999bac366SKent Overstreet * hash the hashes step by step as the dependency chain grows. 21099bac366SKent Overstreet * 21199bac366SKent Overstreet * We use it for dependency-caching and we skip detection 21299bac366SKent Overstreet * passes and dependency-updates if there is a cache-hit, so 21399bac366SKent Overstreet * it is absolutely critical for 100% coverage of the validator 21499bac366SKent Overstreet * to have a unique key value for every unique dependency path 21599bac366SKent Overstreet * that can occur in the system, to make a unique hash value 21699bac366SKent Overstreet * as likely as possible - hence the 64-bit width. 21799bac366SKent Overstreet * 21899bac366SKent Overstreet * The task struct holds the current hash value (initialized 21999bac366SKent Overstreet * with zero), here we store the previous hash value: 22099bac366SKent Overstreet */ 22199bac366SKent Overstreet u64 prev_chain_key; 22299bac366SKent Overstreet unsigned long acquire_ip; 22399bac366SKent Overstreet struct lockdep_map *instance; 22499bac366SKent Overstreet struct lockdep_map *nest_lock; 22599bac366SKent Overstreet #ifdef CONFIG_LOCK_STAT 22699bac366SKent Overstreet u64 waittime_stamp; 22799bac366SKent Overstreet u64 holdtime_stamp; 22899bac366SKent Overstreet #endif 22999bac366SKent Overstreet /* 23099bac366SKent Overstreet * class_idx is zero-indexed; it points to the element in 23199bac366SKent Overstreet * lock_classes this held lock instance belongs to. class_idx is in 23299bac366SKent Overstreet * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive. 23399bac366SKent Overstreet */ 23499bac366SKent Overstreet unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; 23599bac366SKent Overstreet /* 23699bac366SKent Overstreet * The lock-stack is unified in that the lock chains of interrupt 23799bac366SKent Overstreet * contexts nest ontop of process context chains, but we 'separate' 23899bac366SKent Overstreet * the hashes by starting with 0 if we cross into an interrupt 23999bac366SKent Overstreet * context, and we also keep do not add cross-context lock 24099bac366SKent Overstreet * dependencies - the lock usage graph walking covers that area 24199bac366SKent Overstreet * anyway, and we'd just unnecessarily increase the number of 24299bac366SKent Overstreet * dependencies otherwise. [Note: hardirq and softirq contexts 24399bac366SKent Overstreet * are separated from each other too.] 24499bac366SKent Overstreet * 24599bac366SKent Overstreet * The following field is used to detect when we cross into an 24699bac366SKent Overstreet * interrupt context: 24799bac366SKent Overstreet */ 24899bac366SKent Overstreet unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ 24999bac366SKent Overstreet unsigned int trylock:1; /* 16 bits */ 25099bac366SKent Overstreet 25199bac366SKent Overstreet unsigned int read:2; /* see lock_acquire() comment */ 25299bac366SKent Overstreet unsigned int check:1; /* see lock_acquire() comment */ 25399bac366SKent Overstreet unsigned int hardirqs_off:1; 25499bac366SKent Overstreet unsigned int sync:1; 25599bac366SKent Overstreet unsigned int references:11; /* 32 bits */ 25699bac366SKent Overstreet unsigned int pin_count; 25799bac366SKent Overstreet }; 25899bac366SKent Overstreet 259c935cd62SHerbert Xu #else /* !CONFIG_LOCKDEP */ 260c935cd62SHerbert Xu 261c935cd62SHerbert Xu /* 262c935cd62SHerbert Xu * The class key takes no space if lockdep is disabled: 263c935cd62SHerbert Xu */ 264c935cd62SHerbert Xu struct lock_class_key { }; 265c935cd62SHerbert Xu 266c935cd62SHerbert Xu /* 267c935cd62SHerbert Xu * The lockdep_map takes no space if lockdep is disabled: 268c935cd62SHerbert Xu */ 269c935cd62SHerbert Xu struct lockdep_map { }; 270c935cd62SHerbert Xu 271c935cd62SHerbert Xu struct pin_cookie { }; 272c935cd62SHerbert Xu 273c935cd62SHerbert Xu #endif /* !LOCKDEP */ 274c935cd62SHerbert Xu 275c935cd62SHerbert Xu #endif /* __LINUX_LOCKDEP_TYPES_H */ 276