1 #ifndef _LINUX_BLOCKGROUP_LOCK_H
2 #define _LINUX_BLOCKGROUP_LOCK_H
3 /*
4  * Per-blockgroup locking for ext2 and ext3.
5  *
6  * Simple hashed spinlocking.
7  */
8 
9 #include <linux/config.h>
10 #include <linux/spinlock.h>
11 #include <linux/cache.h>
12 
13 #ifdef CONFIG_SMP
14 
15 /*
16  * We want a power-of-two.  Is there a better way than this?
17  */
18 
19 #if NR_CPUS >= 32
20 #define NR_BG_LOCKS	128
21 #elif NR_CPUS >= 16
22 #define NR_BG_LOCKS	64
23 #elif NR_CPUS >= 8
24 #define NR_BG_LOCKS	32
25 #elif NR_CPUS >= 4
26 #define NR_BG_LOCKS	16
27 #elif NR_CPUS >= 2
28 #define NR_BG_LOCKS	8
29 #else
30 #define NR_BG_LOCKS	4
31 #endif
32 
33 #else	/* CONFIG_SMP */
34 #define NR_BG_LOCKS	1
35 #endif	/* CONFIG_SMP */
36 
37 struct bgl_lock {
38 	spinlock_t lock;
39 } ____cacheline_aligned_in_smp;
40 
41 struct blockgroup_lock {
42 	struct bgl_lock locks[NR_BG_LOCKS];
43 };
44 
45 static inline void bgl_lock_init(struct blockgroup_lock *bgl)
46 {
47 	int i;
48 
49 	for (i = 0; i < NR_BG_LOCKS; i++)
50 		spin_lock_init(&bgl->locks[i].lock);
51 }
52 
53 /*
54  * The accessor is a macro so we can embed a blockgroup_lock into different
55  * superblock types
56  */
57 #define sb_bgl_lock(sb, block_group) \
58 	(&(sb)->s_blockgroup_lock.locks[(block_group) & (NR_BG_LOCKS-1)].lock)
59 
60 #endif
61