1 /* 2 * The "user cache". 3 * 4 * (C) Copyright 1991-2000 Linus Torvalds 5 * 6 * We have a per-user structure to keep track of how many 7 * processes, files etc the user has claimed, in order to be 8 * able to have per-user limits for system resources. 9 */ 10 11 #include <linux/init.h> 12 #include <linux/sched.h> 13 #include <linux/slab.h> 14 #include <linux/bitops.h> 15 #include <linux/key.h> 16 #include <linux/sched/user.h> 17 #include <linux/interrupt.h> 18 #include <linux/export.h> 19 #include <linux/user_namespace.h> 20 #include <linux/proc_ns.h> 21 22 /* 23 * userns count is 1 for root user, 1 for init_uts_ns, 24 * and 1 for... ? 25 */ 26 struct user_namespace init_user_ns = { 27 .uid_map = { 28 .nr_extents = 1, 29 { 30 .extent[0] = { 31 .first = 0, 32 .lower_first = 0, 33 .count = 4294967295U, 34 }, 35 }, 36 }, 37 .gid_map = { 38 .nr_extents = 1, 39 { 40 .extent[0] = { 41 .first = 0, 42 .lower_first = 0, 43 .count = 4294967295U, 44 }, 45 }, 46 }, 47 .projid_map = { 48 .nr_extents = 1, 49 { 50 .extent[0] = { 51 .first = 0, 52 .lower_first = 0, 53 .count = 4294967295U, 54 }, 55 }, 56 }, 57 .count = ATOMIC_INIT(3), 58 .owner = GLOBAL_ROOT_UID, 59 .group = GLOBAL_ROOT_GID, 60 .ns.inum = PROC_USER_INIT_INO, 61 #ifdef CONFIG_USER_NS 62 .ns.ops = &userns_operations, 63 #endif 64 .flags = USERNS_INIT_FLAGS, 65 #ifdef CONFIG_KEYS 66 .keyring_name_list = LIST_HEAD_INIT(init_user_ns.keyring_name_list), 67 .keyring_sem = __RWSEM_INITIALIZER(init_user_ns.keyring_sem), 68 #endif 69 }; 70 EXPORT_SYMBOL_GPL(init_user_ns); 71 72 /* 73 * UID task count cache, to get fast user lookup in "alloc_uid" 74 * when changing user ID's (ie setuid() and friends). 75 */ 76 77 #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7) 78 #define UIDHASH_SZ (1 << UIDHASH_BITS) 79 #define UIDHASH_MASK (UIDHASH_SZ - 1) 80 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) 81 #define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid)))) 82 83 static struct kmem_cache *uid_cachep; 84 struct hlist_head uidhash_table[UIDHASH_SZ]; 85 86 /* 87 * The uidhash_lock is mostly taken from process context, but it is 88 * occasionally also taken from softirq/tasklet context, when 89 * task-structs get RCU-freed. Hence all locking must be softirq-safe. 90 * But free_uid() is also called with local interrupts disabled, and running 91 * local_bh_enable() with local interrupts disabled is an error - we'll run 92 * softirq callbacks, and they can unconditionally enable interrupts, and 93 * the caller of free_uid() didn't expect that.. 94 */ 95 static DEFINE_SPINLOCK(uidhash_lock); 96 97 /* root_user.__count is 1, for init task cred */ 98 struct user_struct root_user = { 99 .__count = REFCOUNT_INIT(1), 100 .processes = ATOMIC_INIT(1), 101 .sigpending = ATOMIC_INIT(0), 102 .locked_shm = 0, 103 .uid = GLOBAL_ROOT_UID, 104 .ratelimit = RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0), 105 }; 106 107 /* 108 * These routines must be called with the uidhash spinlock held! 109 */ 110 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) 111 { 112 hlist_add_head(&up->uidhash_node, hashent); 113 } 114 115 static void uid_hash_remove(struct user_struct *up) 116 { 117 hlist_del_init(&up->uidhash_node); 118 } 119 120 static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent) 121 { 122 struct user_struct *user; 123 124 hlist_for_each_entry(user, hashent, uidhash_node) { 125 if (uid_eq(user->uid, uid)) { 126 refcount_inc(&user->__count); 127 return user; 128 } 129 } 130 131 return NULL; 132 } 133 134 /* IRQs are disabled and uidhash_lock is held upon function entry. 135 * IRQ state (as stored in flags) is restored and uidhash_lock released 136 * upon function exit. 137 */ 138 static void free_user(struct user_struct *up, unsigned long flags) 139 __releases(&uidhash_lock) 140 { 141 uid_hash_remove(up); 142 spin_unlock_irqrestore(&uidhash_lock, flags); 143 kmem_cache_free(uid_cachep, up); 144 } 145 146 /* 147 * Locate the user_struct for the passed UID. If found, take a ref on it. The 148 * caller must undo that ref with free_uid(). 149 * 150 * If the user_struct could not be found, return NULL. 151 */ 152 struct user_struct *find_user(kuid_t uid) 153 { 154 struct user_struct *ret; 155 unsigned long flags; 156 157 spin_lock_irqsave(&uidhash_lock, flags); 158 ret = uid_hash_find(uid, uidhashentry(uid)); 159 spin_unlock_irqrestore(&uidhash_lock, flags); 160 return ret; 161 } 162 163 void free_uid(struct user_struct *up) 164 { 165 unsigned long flags; 166 167 if (!up) 168 return; 169 170 if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags)) 171 free_user(up, flags); 172 } 173 174 struct user_struct *alloc_uid(kuid_t uid) 175 { 176 struct hlist_head *hashent = uidhashentry(uid); 177 struct user_struct *up, *new; 178 179 spin_lock_irq(&uidhash_lock); 180 up = uid_hash_find(uid, hashent); 181 spin_unlock_irq(&uidhash_lock); 182 183 if (!up) { 184 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); 185 if (!new) 186 return NULL; 187 188 new->uid = uid; 189 refcount_set(&new->__count, 1); 190 ratelimit_state_init(&new->ratelimit, HZ, 100); 191 ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE); 192 193 /* 194 * Before adding this, check whether we raced 195 * on adding the same user already.. 196 */ 197 spin_lock_irq(&uidhash_lock); 198 up = uid_hash_find(uid, hashent); 199 if (up) { 200 kmem_cache_free(uid_cachep, new); 201 } else { 202 uid_hash_insert(new, hashent); 203 up = new; 204 } 205 spin_unlock_irq(&uidhash_lock); 206 } 207 208 return up; 209 } 210 211 static int __init uid_cache_init(void) 212 { 213 int n; 214 215 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), 216 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 217 218 for(n = 0; n < UIDHASH_SZ; ++n) 219 INIT_HLIST_HEAD(uidhash_table + n); 220 221 /* Insert the root user immediately (init already runs as root) */ 222 spin_lock_irq(&uidhash_lock); 223 uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID)); 224 spin_unlock_irq(&uidhash_lock); 225 226 return 0; 227 } 228 subsys_initcall(uid_cache_init); 229