1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Cryptographic API for algorithms (i.e., low-level API). 4 * 5 * Copyright (c) 2006 Herbert Xu <[email protected]> 6 */ 7 #ifndef _CRYPTO_ALGAPI_H 8 #define _CRYPTO_ALGAPI_H 9 10 #include <linux/crypto.h> 11 #include <linux/list.h> 12 #include <linux/kernel.h> 13 14 /* 15 * Maximum values for blocksize and alignmask, used to allocate 16 * static buffers that are big enough for any combination of 17 * algs and architectures. Ciphers have a lower maximum size. 18 */ 19 #define MAX_ALGAPI_BLOCKSIZE 160 20 #define MAX_ALGAPI_ALIGNMASK 63 21 #define MAX_CIPHER_BLOCKSIZE 16 22 #define MAX_CIPHER_ALIGNMASK 15 23 24 struct crypto_aead; 25 struct crypto_instance; 26 struct module; 27 struct rtattr; 28 struct seq_file; 29 struct sk_buff; 30 31 struct crypto_type { 32 unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); 33 unsigned int (*extsize)(struct crypto_alg *alg); 34 int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask); 35 int (*init_tfm)(struct crypto_tfm *tfm); 36 void (*show)(struct seq_file *m, struct crypto_alg *alg); 37 int (*report)(struct sk_buff *skb, struct crypto_alg *alg); 38 void (*free)(struct crypto_instance *inst); 39 40 unsigned int type; 41 unsigned int maskclear; 42 unsigned int maskset; 43 unsigned int tfmsize; 44 }; 45 46 struct crypto_instance { 47 struct crypto_alg alg; 48 49 struct crypto_template *tmpl; 50 51 union { 52 /* Node in list of instances after registration. */ 53 struct hlist_node list; 54 /* List of attached spawns before registration. */ 55 struct crypto_spawn *spawns; 56 }; 57 58 void *__ctx[] CRYPTO_MINALIGN_ATTR; 59 }; 60 61 struct crypto_template { 62 struct list_head list; 63 struct hlist_head instances; 64 struct module *module; 65 66 int (*create)(struct crypto_template *tmpl, struct rtattr **tb); 67 68 char name[CRYPTO_MAX_ALG_NAME]; 69 }; 70 71 struct crypto_spawn { 72 struct list_head list; 73 struct crypto_alg *alg; 74 union { 75 /* Back pointer to instance after registration.*/ 76 struct crypto_instance *inst; 77 /* Spawn list pointer prior to registration. */ 78 struct crypto_spawn *next; 79 }; 80 const struct crypto_type *frontend; 81 u32 mask; 82 bool dead; 83 bool registered; 84 }; 85 86 struct crypto_queue { 87 struct list_head list; 88 struct list_head *backlog; 89 90 unsigned int qlen; 91 unsigned int max_qlen; 92 }; 93 94 struct scatter_walk { 95 struct scatterlist *sg; 96 unsigned int offset; 97 }; 98 99 void crypto_mod_put(struct crypto_alg *alg); 100 101 int crypto_register_template(struct crypto_template *tmpl); 102 int crypto_register_templates(struct crypto_template *tmpls, int count); 103 void crypto_unregister_template(struct crypto_template *tmpl); 104 void crypto_unregister_templates(struct crypto_template *tmpls, int count); 105 struct crypto_template *crypto_lookup_template(const char *name); 106 107 int crypto_register_instance(struct crypto_template *tmpl, 108 struct crypto_instance *inst); 109 void crypto_unregister_instance(struct crypto_instance *inst); 110 111 int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst, 112 const char *name, u32 type, u32 mask); 113 void crypto_drop_spawn(struct crypto_spawn *spawn); 114 struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, 115 u32 mask); 116 void *crypto_spawn_tfm2(struct crypto_spawn *spawn); 117 118 struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); 119 int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret); 120 const char *crypto_attr_alg_name(struct rtattr *rta); 121 int crypto_inst_setname(struct crypto_instance *inst, const char *name, 122 struct crypto_alg *alg); 123 124 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); 125 int crypto_enqueue_request(struct crypto_queue *queue, 126 struct crypto_async_request *request); 127 void crypto_enqueue_request_head(struct crypto_queue *queue, 128 struct crypto_async_request *request); 129 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); 130 static inline unsigned int crypto_queue_len(struct crypto_queue *queue) 131 { 132 return queue->qlen; 133 } 134 135 void crypto_inc(u8 *a, unsigned int size); 136 void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size); 137 138 static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size) 139 { 140 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 141 __builtin_constant_p(size) && 142 (size % sizeof(unsigned long)) == 0) { 143 unsigned long *d = (unsigned long *)dst; 144 unsigned long *s = (unsigned long *)src; 145 146 while (size > 0) { 147 *d++ ^= *s++; 148 size -= sizeof(unsigned long); 149 } 150 } else { 151 __crypto_xor(dst, dst, src, size); 152 } 153 } 154 155 static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2, 156 unsigned int size) 157 { 158 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 159 __builtin_constant_p(size) && 160 (size % sizeof(unsigned long)) == 0) { 161 unsigned long *d = (unsigned long *)dst; 162 unsigned long *s1 = (unsigned long *)src1; 163 unsigned long *s2 = (unsigned long *)src2; 164 165 while (size > 0) { 166 *d++ = *s1++ ^ *s2++; 167 size -= sizeof(unsigned long); 168 } 169 } else { 170 __crypto_xor(dst, src1, src2, size); 171 } 172 } 173 174 static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) 175 { 176 return PTR_ALIGN(crypto_tfm_ctx(tfm), 177 crypto_tfm_alg_alignmask(tfm) + 1); 178 } 179 180 static inline struct crypto_instance *crypto_tfm_alg_instance( 181 struct crypto_tfm *tfm) 182 { 183 return container_of(tfm->__crt_alg, struct crypto_instance, alg); 184 } 185 186 static inline void *crypto_instance_ctx(struct crypto_instance *inst) 187 { 188 return inst->__ctx; 189 } 190 191 static inline struct crypto_async_request *crypto_get_backlog( 192 struct crypto_queue *queue) 193 { 194 return queue->backlog == &queue->list ? NULL : 195 container_of(queue->backlog, struct crypto_async_request, list); 196 } 197 198 static inline u32 crypto_requires_off(struct crypto_attr_type *algt, u32 off) 199 { 200 return (algt->type ^ off) & algt->mask & off; 201 } 202 203 /* 204 * When an algorithm uses another algorithm (e.g., if it's an instance of a 205 * template), these are the flags that should always be set on the "outer" 206 * algorithm if any "inner" algorithm has them set. 207 */ 208 #define CRYPTO_ALG_INHERITED_FLAGS \ 209 (CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | \ 210 CRYPTO_ALG_ALLOCATES_MEMORY) 211 212 /* 213 * Given the type and mask that specify the flags restrictions on a template 214 * instance being created, return the mask that should be passed to 215 * crypto_grab_*() (along with type=0) to honor any request the user made to 216 * have any of the CRYPTO_ALG_INHERITED_FLAGS clear. 217 */ 218 static inline u32 crypto_algt_inherited_mask(struct crypto_attr_type *algt) 219 { 220 return crypto_requires_off(algt, CRYPTO_ALG_INHERITED_FLAGS); 221 } 222 223 noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); 224 225 /** 226 * crypto_memneq - Compare two areas of memory without leaking 227 * timing information. 228 * 229 * @a: One area of memory 230 * @b: Another area of memory 231 * @size: The size of the area. 232 * 233 * Returns 0 when data is equal, 1 otherwise. 234 */ 235 static inline int crypto_memneq(const void *a, const void *b, size_t size) 236 { 237 return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; 238 } 239 240 int crypto_register_notifier(struct notifier_block *nb); 241 int crypto_unregister_notifier(struct notifier_block *nb); 242 243 /* Crypto notification events. */ 244 enum { 245 CRYPTO_MSG_ALG_REQUEST, 246 CRYPTO_MSG_ALG_REGISTER, 247 CRYPTO_MSG_ALG_LOADED, 248 }; 249 250 #endif /* _CRYPTO_ALGAPI_H */ 251