1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #ifndef _LINUX_RANDOM_H 4 #define _LINUX_RANDOM_H 5 6 #include <linux/bug.h> 7 #include <linux/kernel.h> 8 #include <linux/list.h> 9 #include <linux/once.h> 10 11 #include <uapi/linux/random.h> 12 13 struct notifier_block; 14 15 void add_device_randomness(const void *buf, size_t len); 16 void __init add_bootloader_randomness(const void *buf, size_t len); 17 void add_input_randomness(unsigned int type, unsigned int code, 18 unsigned int value) __latent_entropy; 19 void add_interrupt_randomness(int irq) __latent_entropy; 20 void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy); 21 22 #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) 23 static inline void add_latent_entropy(void) 24 { 25 add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy)); 26 } 27 #else 28 static inline void add_latent_entropy(void) { } 29 #endif 30 31 #if IS_ENABLED(CONFIG_VMGENID) 32 void add_vmfork_randomness(const void *unique_vm_id, size_t len); 33 int register_random_vmfork_notifier(struct notifier_block *nb); 34 int unregister_random_vmfork_notifier(struct notifier_block *nb); 35 #else 36 static inline int register_random_vmfork_notifier(struct notifier_block *nb) { return 0; } 37 static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { return 0; } 38 #endif 39 40 void get_random_bytes(void *buf, size_t len); 41 u8 get_random_u8(void); 42 u16 get_random_u16(void); 43 u32 get_random_u32(void); 44 u64 get_random_u64(void); 45 static inline unsigned long get_random_long(void) 46 { 47 #if BITS_PER_LONG == 64 48 return get_random_u64(); 49 #else 50 return get_random_u32(); 51 #endif 52 } 53 54 u32 __get_random_u32_below(u32 ceil); 55 56 /* 57 * Returns a random integer in the interval [0, ceil), with uniform 58 * distribution, suitable for all uses. Fastest when ceil is a constant, but 59 * still fast for variable ceil as well. 60 */ 61 static inline u32 get_random_u32_below(u32 ceil) 62 { 63 if (!__builtin_constant_p(ceil)) 64 return __get_random_u32_below(ceil); 65 66 /* 67 * For the fast path, below, all operations on ceil are precomputed by 68 * the compiler, so this incurs no overhead for checking pow2, doing 69 * divisions, or branching based on integer size. The resultant 70 * algorithm does traditional reciprocal multiplication (typically 71 * optimized by the compiler into shifts and adds), rejecting samples 72 * whose lower half would indicate a range indivisible by ceil. 73 */ 74 BUILD_BUG_ON_MSG(!ceil, "get_random_u32_below() must take ceil > 0"); 75 if (ceil <= 1) 76 return 0; 77 for (;;) { 78 if (ceil <= 1U << 8) { 79 u32 mult = ceil * get_random_u8(); 80 if (likely(is_power_of_2(ceil) || (u8)mult >= (1U << 8) % ceil)) 81 return mult >> 8; 82 } else if (ceil <= 1U << 16) { 83 u32 mult = ceil * get_random_u16(); 84 if (likely(is_power_of_2(ceil) || (u16)mult >= (1U << 16) % ceil)) 85 return mult >> 16; 86 } else { 87 u64 mult = (u64)ceil * get_random_u32(); 88 if (likely(is_power_of_2(ceil) || (u32)mult >= -ceil % ceil)) 89 return mult >> 32; 90 } 91 } 92 } 93 94 /* 95 * On 64-bit architectures, protect against non-terminated C string overflows 96 * by zeroing out the first byte of the canary; this leaves 56 bits of entropy. 97 */ 98 #ifdef CONFIG_64BIT 99 # ifdef __LITTLE_ENDIAN 100 # define CANARY_MASK 0xffffffffffffff00UL 101 # else /* big endian, 64 bits: */ 102 # define CANARY_MASK 0x00ffffffffffffffUL 103 # endif 104 #else /* 32 bits: */ 105 # define CANARY_MASK 0xffffffffUL 106 #endif 107 108 static inline unsigned long get_random_canary(void) 109 { 110 return get_random_long() & CANARY_MASK; 111 } 112 113 void __init random_init_early(const char *command_line); 114 void __init random_init(void); 115 bool rng_is_initialized(void); 116 int wait_for_random_bytes(void); 117 118 /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes). 119 * Returns the result of the call to wait_for_random_bytes. */ 120 static inline int get_random_bytes_wait(void *buf, size_t nbytes) 121 { 122 int ret = wait_for_random_bytes(); 123 get_random_bytes(buf, nbytes); 124 return ret; 125 } 126 127 #define declare_get_random_var_wait(name, ret_type) \ 128 static inline int get_random_ ## name ## _wait(ret_type *out) { \ 129 int ret = wait_for_random_bytes(); \ 130 if (unlikely(ret)) \ 131 return ret; \ 132 *out = get_random_ ## name(); \ 133 return 0; \ 134 } 135 declare_get_random_var_wait(u8, u8) 136 declare_get_random_var_wait(u16, u16) 137 declare_get_random_var_wait(u32, u32) 138 declare_get_random_var_wait(u64, u32) 139 declare_get_random_var_wait(long, unsigned long) 140 #undef declare_get_random_var 141 142 /* 143 * This is designed to be standalone for just prandom 144 * users, but for now we include it from <linux/random.h> 145 * for legacy reasons. 146 */ 147 #include <linux/prandom.h> 148 149 #include <asm/archrandom.h> 150 151 /* 152 * Called from the boot CPU during startup; not valid to call once 153 * secondary CPUs are up and preemption is possible. 154 */ 155 #ifndef arch_get_random_seed_longs_early 156 static inline size_t __init arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs) 157 { 158 WARN_ON(system_state != SYSTEM_BOOTING); 159 return arch_get_random_seed_longs(v, max_longs); 160 } 161 #endif 162 163 #ifndef arch_get_random_longs_early 164 static inline bool __init arch_get_random_longs_early(unsigned long *v, size_t max_longs) 165 { 166 WARN_ON(system_state != SYSTEM_BOOTING); 167 return arch_get_random_longs(v, max_longs); 168 } 169 #endif 170 171 #ifdef CONFIG_SMP 172 int random_prepare_cpu(unsigned int cpu); 173 int random_online_cpu(unsigned int cpu); 174 #endif 175 176 #ifndef MODULE 177 extern const struct file_operations random_fops, urandom_fops; 178 #endif 179 180 #endif /* _LINUX_RANDOM_H */ 181