1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Linear symmetric key cipher operations.
5 * Generic encrypt/decrypt wrapper for ciphers.
7 * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
10 #include <linux/cryptouser.h>
11 #include <linux/err.h>
12 #include <linux/export.h>
13 #include <linux/kernel.h>
14 #include <linux/seq_file.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <net/netlink.h>
20 static inline struct crypto_lskcipher *__crypto_lskcipher_cast(
21 struct crypto_tfm *tfm)
23 return container_of(tfm, struct crypto_lskcipher, base);
26 static inline struct lskcipher_alg *__crypto_lskcipher_alg(
27 struct crypto_alg *alg)
29 return container_of(alg, struct lskcipher_alg, co.base);
32 static inline struct crypto_istat_cipher *lskcipher_get_stat(
33 struct lskcipher_alg *alg)
35 return skcipher_get_stat_common(&alg->co);
38 static inline int crypto_lskcipher_errstat(struct lskcipher_alg *alg, int err)
40 struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
42 if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
46 atomic64_inc(&istat->err_cnt);
51 static int lskcipher_setkey_unaligned(struct crypto_lskcipher *tfm,
52 const u8 *key, unsigned int keylen)
54 unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
55 struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm);
56 u8 *buffer, *alignbuffer;
60 absize = keylen + alignmask;
61 buffer = kmalloc(absize, GFP_ATOMIC);
65 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
66 memcpy(alignbuffer, key, keylen);
67 ret = cipher->setkey(tfm, alignbuffer, keylen);
68 kfree_sensitive(buffer);
72 int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm, const u8 *key,
75 unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
76 struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm);
78 if (keylen < cipher->co.min_keysize || keylen > cipher->co.max_keysize)
81 if ((unsigned long)key & alignmask)
82 return lskcipher_setkey_unaligned(tfm, key, keylen);
84 return cipher->setkey(tfm, key, keylen);
86 EXPORT_SYMBOL_GPL(crypto_lskcipher_setkey);
88 static int crypto_lskcipher_crypt_unaligned(
89 struct crypto_lskcipher *tfm, const u8 *src, u8 *dst, unsigned len,
90 u8 *iv, int (*crypt)(struct crypto_lskcipher *tfm, const u8 *src,
91 u8 *dst, unsigned len, u8 *iv, bool final))
93 unsigned ivsize = crypto_lskcipher_ivsize(tfm);
94 unsigned bs = crypto_lskcipher_blocksize(tfm);
95 unsigned cs = crypto_lskcipher_chunksize(tfm);
100 BUILD_BUG_ON(MAX_CIPHER_BLOCKSIZE > PAGE_SIZE ||
101 MAX_CIPHER_ALIGNMASK >= PAGE_SIZE);
103 tiv = kmalloc(PAGE_SIZE, GFP_ATOMIC);
107 memcpy(tiv, iv, ivsize);
109 p = kmalloc(PAGE_SIZE, GFP_ATOMIC);
115 unsigned chunk = min((unsigned)PAGE_SIZE, len);
121 memcpy(p, src, chunk);
122 err = crypt(tfm, p, p, chunk, tiv, true);
126 memcpy(dst, p, chunk);
132 err = len ? -EINVAL : 0;
135 memcpy(iv, tiv, ivsize);
137 kfree_sensitive(tiv);
141 static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src,
142 u8 *dst, unsigned len, u8 *iv,
143 int (*crypt)(struct crypto_lskcipher *tfm,
144 const u8 *src, u8 *dst,
145 unsigned len, u8 *iv,
148 unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
149 struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
152 if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) &
154 ret = crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv,
159 ret = crypt(tfm, src, dst, len, iv, true);
162 return crypto_lskcipher_errstat(alg, ret);
165 int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
166 u8 *dst, unsigned len, u8 *iv)
168 struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
170 if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
171 struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
173 atomic64_inc(&istat->encrypt_cnt);
174 atomic64_add(len, &istat->encrypt_tlen);
177 return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt);
179 EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt);
181 int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
182 u8 *dst, unsigned len, u8 *iv)
184 struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
186 if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
187 struct crypto_istat_cipher *istat = lskcipher_get_stat(alg);
189 atomic64_inc(&istat->decrypt_cnt);
190 atomic64_add(len, &istat->decrypt_tlen);
193 return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt);
195 EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt);
197 int crypto_lskcipher_setkey_sg(struct crypto_skcipher *tfm, const u8 *key,
200 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm);
202 return crypto_lskcipher_setkey(*ctx, key, keylen);
205 static int crypto_lskcipher_crypt_sg(struct skcipher_request *req,
206 int (*crypt)(struct crypto_lskcipher *tfm,
207 const u8 *src, u8 *dst,
208 unsigned len, u8 *iv,
211 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
212 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
213 struct crypto_lskcipher *tfm = *ctx;
214 struct skcipher_walk walk;
217 err = skcipher_walk_virt(&walk, req, false);
219 while (walk.nbytes) {
220 err = crypt(tfm, walk.src.virt.addr, walk.dst.virt.addr,
221 walk.nbytes, walk.iv, walk.nbytes == walk.total);
222 err = skcipher_walk_done(&walk, err);
228 int crypto_lskcipher_encrypt_sg(struct skcipher_request *req)
230 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
231 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
232 struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx);
234 return crypto_lskcipher_crypt_sg(req, alg->encrypt);
237 int crypto_lskcipher_decrypt_sg(struct skcipher_request *req)
239 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
240 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
241 struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx);
243 return crypto_lskcipher_crypt_sg(req, alg->decrypt);
246 static void crypto_lskcipher_exit_tfm(struct crypto_tfm *tfm)
248 struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm);
249 struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher);
254 static int crypto_lskcipher_init_tfm(struct crypto_tfm *tfm)
256 struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm);
257 struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher);
260 skcipher->base.exit = crypto_lskcipher_exit_tfm;
263 return alg->init(skcipher);
268 static void crypto_lskcipher_free_instance(struct crypto_instance *inst)
270 struct lskcipher_instance *skcipher =
271 container_of(inst, struct lskcipher_instance, s.base);
273 skcipher->free(skcipher);
276 static void __maybe_unused crypto_lskcipher_show(
277 struct seq_file *m, struct crypto_alg *alg)
279 struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
281 seq_printf(m, "type : lskcipher\n");
282 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
283 seq_printf(m, "min keysize : %u\n", skcipher->co.min_keysize);
284 seq_printf(m, "max keysize : %u\n", skcipher->co.max_keysize);
285 seq_printf(m, "ivsize : %u\n", skcipher->co.ivsize);
286 seq_printf(m, "chunksize : %u\n", skcipher->co.chunksize);
289 static int __maybe_unused crypto_lskcipher_report(
290 struct sk_buff *skb, struct crypto_alg *alg)
292 struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
293 struct crypto_report_blkcipher rblkcipher;
295 memset(&rblkcipher, 0, sizeof(rblkcipher));
297 strscpy(rblkcipher.type, "lskcipher", sizeof(rblkcipher.type));
298 strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
300 rblkcipher.blocksize = alg->cra_blocksize;
301 rblkcipher.min_keysize = skcipher->co.min_keysize;
302 rblkcipher.max_keysize = skcipher->co.max_keysize;
303 rblkcipher.ivsize = skcipher->co.ivsize;
305 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
306 sizeof(rblkcipher), &rblkcipher);
309 static int __maybe_unused crypto_lskcipher_report_stat(
310 struct sk_buff *skb, struct crypto_alg *alg)
312 struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
313 struct crypto_istat_cipher *istat;
314 struct crypto_stat_cipher rcipher;
316 istat = lskcipher_get_stat(skcipher);
318 memset(&rcipher, 0, sizeof(rcipher));
320 strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
322 rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
323 rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
324 rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
325 rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
326 rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
328 return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
331 static const struct crypto_type crypto_lskcipher_type = {
332 .extsize = crypto_alg_extsize,
333 .init_tfm = crypto_lskcipher_init_tfm,
334 .free = crypto_lskcipher_free_instance,
335 #ifdef CONFIG_PROC_FS
336 .show = crypto_lskcipher_show,
338 #if IS_ENABLED(CONFIG_CRYPTO_USER)
339 .report = crypto_lskcipher_report,
341 #ifdef CONFIG_CRYPTO_STATS
342 .report_stat = crypto_lskcipher_report_stat,
344 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
345 .maskset = CRYPTO_ALG_TYPE_MASK,
346 .type = CRYPTO_ALG_TYPE_LSKCIPHER,
347 .tfmsize = offsetof(struct crypto_lskcipher, base),
350 static void crypto_lskcipher_exit_tfm_sg(struct crypto_tfm *tfm)
352 struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm);
354 crypto_free_lskcipher(*ctx);
357 int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm)
359 struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm);
360 struct crypto_alg *calg = tfm->__crt_alg;
361 struct crypto_lskcipher *skcipher;
363 if (!crypto_mod_get(calg))
366 skcipher = crypto_create_tfm(calg, &crypto_lskcipher_type);
367 if (IS_ERR(skcipher)) {
368 crypto_mod_put(calg);
369 return PTR_ERR(skcipher);
373 tfm->exit = crypto_lskcipher_exit_tfm_sg;
378 int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn,
379 struct crypto_instance *inst,
380 const char *name, u32 type, u32 mask)
382 spawn->base.frontend = &crypto_lskcipher_type;
383 return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
385 EXPORT_SYMBOL_GPL(crypto_grab_lskcipher);
387 struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name,
390 return crypto_alloc_tfm(alg_name, &crypto_lskcipher_type, type, mask);
392 EXPORT_SYMBOL_GPL(crypto_alloc_lskcipher);
394 static int lskcipher_prepare_alg(struct lskcipher_alg *alg)
396 struct crypto_alg *base = &alg->co.base;
399 err = skcipher_prepare_alg_common(&alg->co);
403 if (alg->co.chunksize & (alg->co.chunksize - 1))
406 base->cra_type = &crypto_lskcipher_type;
407 base->cra_flags |= CRYPTO_ALG_TYPE_LSKCIPHER;
412 int crypto_register_lskcipher(struct lskcipher_alg *alg)
414 struct crypto_alg *base = &alg->co.base;
417 err = lskcipher_prepare_alg(alg);
421 return crypto_register_alg(base);
423 EXPORT_SYMBOL_GPL(crypto_register_lskcipher);
425 void crypto_unregister_lskcipher(struct lskcipher_alg *alg)
427 crypto_unregister_alg(&alg->co.base);
429 EXPORT_SYMBOL_GPL(crypto_unregister_lskcipher);
431 int crypto_register_lskciphers(struct lskcipher_alg *algs, int count)
435 for (i = 0; i < count; i++) {
436 ret = crypto_register_lskcipher(&algs[i]);
444 for (--i; i >= 0; --i)
445 crypto_unregister_lskcipher(&algs[i]);
449 EXPORT_SYMBOL_GPL(crypto_register_lskciphers);
451 void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count)
455 for (i = count - 1; i >= 0; --i)
456 crypto_unregister_lskcipher(&algs[i]);
458 EXPORT_SYMBOL_GPL(crypto_unregister_lskciphers);
460 int lskcipher_register_instance(struct crypto_template *tmpl,
461 struct lskcipher_instance *inst)
465 if (WARN_ON(!inst->free))
468 err = lskcipher_prepare_alg(&inst->alg);
472 return crypto_register_instance(tmpl, lskcipher_crypto_instance(inst));
474 EXPORT_SYMBOL_GPL(lskcipher_register_instance);
476 static int lskcipher_setkey_simple(struct crypto_lskcipher *tfm, const u8 *key,
479 struct crypto_lskcipher *cipher = lskcipher_cipher_simple(tfm);
481 crypto_lskcipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
482 crypto_lskcipher_set_flags(cipher, crypto_lskcipher_get_flags(tfm) &
483 CRYPTO_TFM_REQ_MASK);
484 return crypto_lskcipher_setkey(cipher, key, keylen);
487 static int lskcipher_init_tfm_simple(struct crypto_lskcipher *tfm)
489 struct lskcipher_instance *inst = lskcipher_alg_instance(tfm);
490 struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
491 struct crypto_lskcipher_spawn *spawn;
492 struct crypto_lskcipher *cipher;
494 spawn = lskcipher_instance_ctx(inst);
495 cipher = crypto_spawn_lskcipher(spawn);
497 return PTR_ERR(cipher);
503 static void lskcipher_exit_tfm_simple(struct crypto_lskcipher *tfm)
505 struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
507 crypto_free_lskcipher(*ctx);
510 static void lskcipher_free_instance_simple(struct lskcipher_instance *inst)
512 crypto_drop_lskcipher(lskcipher_instance_ctx(inst));
517 * lskcipher_alloc_instance_simple - allocate instance of simple block cipher
519 * Allocate an lskcipher_instance for a simple block cipher mode of operation,
520 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
521 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
522 * alignmask, and priority are set from the underlying cipher but can be
523 * overridden if needed. The tfm context defaults to
524 * struct crypto_lskcipher *, and default ->setkey(), ->init(), and
525 * ->exit() methods are installed.
527 * @tmpl: the template being instantiated
528 * @tb: the template parameters
530 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
531 * needs to register the instance.
533 struct lskcipher_instance *lskcipher_alloc_instance_simple(
534 struct crypto_template *tmpl, struct rtattr **tb)
537 struct lskcipher_instance *inst;
538 struct crypto_lskcipher_spawn *spawn;
539 char ecb_name[CRYPTO_MAX_ALG_NAME];
540 struct lskcipher_alg *cipher_alg;
541 const char *cipher_name;
544 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask);
548 cipher_name = crypto_attr_alg_name(tb[1]);
549 if (IS_ERR(cipher_name))
550 return ERR_CAST(cipher_name);
552 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
554 return ERR_PTR(-ENOMEM);
556 spawn = lskcipher_instance_ctx(inst);
557 err = crypto_grab_lskcipher(spawn,
558 lskcipher_crypto_instance(inst),
559 cipher_name, 0, mask);
562 if (err == -ENOENT && !!memcmp(tmpl->name, "ecb", 4)) {
564 if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
565 cipher_name) >= CRYPTO_MAX_ALG_NAME)
568 err = crypto_grab_lskcipher(spawn,
569 lskcipher_crypto_instance(inst),
576 cipher_alg = crypto_lskcipher_spawn_alg(spawn);
578 err = crypto_inst_setname(lskcipher_crypto_instance(inst), tmpl->name,
579 &cipher_alg->co.base);
587 len = strscpy(ecb_name, &cipher_alg->co.base.cra_name[4],
592 if (ecb_name[len - 1] != ')')
595 ecb_name[len - 1] = 0;
598 if (snprintf(inst->alg.co.base.cra_name, CRYPTO_MAX_ALG_NAME,
599 "%s(%s)", tmpl->name, ecb_name) >=
603 if (strcmp(ecb_name, cipher_name) &&
604 snprintf(inst->alg.co.base.cra_driver_name,
606 "%s(%s)", tmpl->name, cipher_name) >=
610 /* Don't allow nesting. */
612 if ((cipher_alg->co.base.cra_flags & CRYPTO_ALG_INSTANCE))
617 if (cipher_alg->co.ivsize)
620 inst->free = lskcipher_free_instance_simple;
622 /* Default algorithm properties, can be overridden */
623 inst->alg.co.base.cra_blocksize = cipher_alg->co.base.cra_blocksize;
624 inst->alg.co.base.cra_alignmask = cipher_alg->co.base.cra_alignmask;
625 inst->alg.co.base.cra_priority = cipher_alg->co.base.cra_priority;
626 inst->alg.co.min_keysize = cipher_alg->co.min_keysize;
627 inst->alg.co.max_keysize = cipher_alg->co.max_keysize;
628 inst->alg.co.ivsize = cipher_alg->co.base.cra_blocksize;
630 /* Use struct crypto_lskcipher * by default, can be overridden */
631 inst->alg.co.base.cra_ctxsize = sizeof(struct crypto_lskcipher *);
632 inst->alg.setkey = lskcipher_setkey_simple;
633 inst->alg.init = lskcipher_init_tfm_simple;
634 inst->alg.exit = lskcipher_exit_tfm_simple;
639 lskcipher_free_instance_simple(inst);
642 EXPORT_SYMBOL_GPL(lskcipher_alloc_instance_simple);