dmaengine: ti: edma: Remove 'Assignment in if condition'
[linux-2.6-block.git] / crypto / skcipher.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
7a7ffe65
HX
2/*
3 * Symmetric key cipher operations.
4 *
5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
6 * multiple page boundaries by using temporary blocks. In user context,
7 * the kernel is given a chance to schedule us once per page.
8 *
9 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
7a7ffe65
HX
10 */
11
b286d8b1 12#include <crypto/internal/aead.h>
7a7ffe65 13#include <crypto/internal/skcipher.h>
b286d8b1 14#include <crypto/scatterwalk.h>
7a7ffe65 15#include <linux/bug.h>
4e6c3df4 16#include <linux/cryptouser.h>
d8c34b94 17#include <linux/compiler.h>
b286d8b1 18#include <linux/list.h>
7a7ffe65 19#include <linux/module.h>
4e6c3df4
HX
20#include <linux/rtnetlink.h>
21#include <linux/seq_file.h>
22#include <net/netlink.h>
7a7ffe65
HX
23
24#include "internal.h"
25
b286d8b1
HX
26enum {
27 SKCIPHER_WALK_PHYS = 1 << 0,
28 SKCIPHER_WALK_SLOW = 1 << 1,
29 SKCIPHER_WALK_COPY = 1 << 2,
30 SKCIPHER_WALK_DIFF = 1 << 3,
31 SKCIPHER_WALK_SLEEP = 1 << 4,
32};
33
34struct skcipher_walk_buffer {
35 struct list_head entry;
36 struct scatter_walk dst;
37 unsigned int len;
38 u8 *data;
39 u8 buffer[];
40};
41
42static int skcipher_walk_next(struct skcipher_walk *walk);
43
44static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
45{
46 if (PageHighMem(scatterwalk_page(walk)))
47 kunmap_atomic(vaddr);
48}
49
50static inline void *skcipher_map(struct scatter_walk *walk)
51{
52 struct page *page = scatterwalk_page(walk);
53
54 return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
55 offset_in_page(walk->offset);
56}
57
58static inline void skcipher_map_src(struct skcipher_walk *walk)
59{
60 walk->src.virt.addr = skcipher_map(&walk->in);
61}
62
63static inline void skcipher_map_dst(struct skcipher_walk *walk)
64{
65 walk->dst.virt.addr = skcipher_map(&walk->out);
66}
67
68static inline void skcipher_unmap_src(struct skcipher_walk *walk)
69{
70 skcipher_unmap(&walk->in, walk->src.virt.addr);
71}
72
73static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
74{
75 skcipher_unmap(&walk->out, walk->dst.virt.addr);
76}
77
78static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
79{
80 return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
81}
82
83/* Get a spot of the specified length that does not straddle a page.
84 * The caller needs to ensure that there is enough space for this operation.
85 */
86static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
87{
88 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
89
90 return max(start, end_page);
91}
92
8088d3dd 93static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
b286d8b1
HX
94{
95 u8 *addr;
96
97 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
98 addr = skcipher_get_spot(addr, bsize);
99 scatterwalk_copychunks(addr, &walk->out, bsize,
100 (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
b286d8b1
HX
101}
102
103int skcipher_walk_done(struct skcipher_walk *walk, int err)
104{
8088d3dd
EB
105 unsigned int n; /* bytes processed */
106 bool more;
107
108 if (unlikely(err < 0))
109 goto finish;
110
111 n = walk->nbytes - err;
112 walk->total -= n;
113 more = (walk->total != 0);
114
115 if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
116 SKCIPHER_WALK_SLOW |
117 SKCIPHER_WALK_COPY |
118 SKCIPHER_WALK_DIFF)))) {
b286d8b1
HX
119unmap_src:
120 skcipher_unmap_src(walk);
121 } else if (walk->flags & SKCIPHER_WALK_DIFF) {
122 skcipher_unmap_dst(walk);
123 goto unmap_src;
124 } else if (walk->flags & SKCIPHER_WALK_COPY) {
125 skcipher_map_dst(walk);
126 memcpy(walk->dst.virt.addr, walk->page, n);
127 skcipher_unmap_dst(walk);
128 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
dcaca01a
EB
129 if (err) {
130 /*
131 * Didn't process all bytes. Either the algorithm is
132 * broken, or this was the last step and it turned out
133 * the message wasn't evenly divisible into blocks but
134 * the algorithm requires it.
135 */
b286d8b1 136 err = -EINVAL;
8088d3dd
EB
137 goto finish;
138 }
139 skcipher_done_slow(walk, n);
140 goto already_advanced;
b286d8b1
HX
141 }
142
b286d8b1
HX
143 scatterwalk_advance(&walk->in, n);
144 scatterwalk_advance(&walk->out, n);
8088d3dd
EB
145already_advanced:
146 scatterwalk_done(&walk->in, 0, more);
147 scatterwalk_done(&walk->out, 1, more);
b286d8b1 148
8088d3dd 149 if (more) {
b286d8b1
HX
150 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
151 CRYPTO_TFM_REQ_MAY_SLEEP : 0);
152 return skcipher_walk_next(walk);
153 }
8088d3dd
EB
154 err = 0;
155finish:
156 walk->nbytes = 0;
b286d8b1
HX
157
158 /* Short-circuit for the common/fast path. */
159 if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
160 goto out;
161
162 if (walk->flags & SKCIPHER_WALK_PHYS)
163 goto out;
164
165 if (walk->iv != walk->oiv)
166 memcpy(walk->oiv, walk->iv, walk->ivsize);
167 if (walk->buffer != walk->page)
168 kfree(walk->buffer);
169 if (walk->page)
170 free_page((unsigned long)walk->page);
171
172out:
173 return err;
174}
175EXPORT_SYMBOL_GPL(skcipher_walk_done);
176
177void skcipher_walk_complete(struct skcipher_walk *walk, int err)
178{
179 struct skcipher_walk_buffer *p, *tmp;
180
181 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
182 u8 *data;
183
184 if (err)
185 goto done;
186
187 data = p->data;
188 if (!data) {
189 data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
c821f6ab 190 data = skcipher_get_spot(data, walk->stride);
b286d8b1
HX
191 }
192
193 scatterwalk_copychunks(data, &p->dst, p->len, 1);
194
c821f6ab 195 if (offset_in_page(p->data) + p->len + walk->stride >
b286d8b1
HX
196 PAGE_SIZE)
197 free_page((unsigned long)p->data);
198
199done:
200 list_del(&p->entry);
201 kfree(p);
202 }
203
204 if (!err && walk->iv != walk->oiv)
205 memcpy(walk->oiv, walk->iv, walk->ivsize);
206 if (walk->buffer != walk->page)
207 kfree(walk->buffer);
208 if (walk->page)
209 free_page((unsigned long)walk->page);
210}
211EXPORT_SYMBOL_GPL(skcipher_walk_complete);
212
213static void skcipher_queue_write(struct skcipher_walk *walk,
214 struct skcipher_walk_buffer *p)
215{
216 p->dst = walk->out;
217 list_add_tail(&p->entry, &walk->buffers);
218}
219
220static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
221{
222 bool phys = walk->flags & SKCIPHER_WALK_PHYS;
223 unsigned alignmask = walk->alignmask;
224 struct skcipher_walk_buffer *p;
225 unsigned a;
226 unsigned n;
227 u8 *buffer;
228 void *v;
229
230 if (!phys) {
18e615ad
AB
231 if (!walk->buffer)
232 walk->buffer = walk->page;
233 buffer = walk->buffer;
b286d8b1
HX
234 if (buffer)
235 goto ok;
236 }
237
238 /* Start with the minimum alignment of kmalloc. */
239 a = crypto_tfm_ctx_alignment() - 1;
240 n = bsize;
241
242 if (phys) {
243 /* Calculate the minimum alignment of p->buffer. */
244 a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
245 n += sizeof(*p);
246 }
247
248 /* Minimum size to align p->buffer by alignmask. */
249 n += alignmask & ~a;
250
251 /* Minimum size to ensure p->buffer does not straddle a page. */
252 n += (bsize - 1) & ~(alignmask | a);
253
254 v = kzalloc(n, skcipher_walk_gfp(walk));
255 if (!v)
256 return skcipher_walk_done(walk, -ENOMEM);
257
258 if (phys) {
259 p = v;
260 p->len = bsize;
261 skcipher_queue_write(walk, p);
262 buffer = p->buffer;
263 } else {
264 walk->buffer = v;
265 buffer = v;
266 }
267
268ok:
269 walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
270 walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
271 walk->src.virt.addr = walk->dst.virt.addr;
272
273 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
274
275 walk->nbytes = bsize;
276 walk->flags |= SKCIPHER_WALK_SLOW;
277
278 return 0;
279}
280
281static int skcipher_next_copy(struct skcipher_walk *walk)
282{
283 struct skcipher_walk_buffer *p;
284 u8 *tmp = walk->page;
285
286 skcipher_map_src(walk);
287 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
288 skcipher_unmap_src(walk);
289
290 walk->src.virt.addr = tmp;
291 walk->dst.virt.addr = tmp;
292
293 if (!(walk->flags & SKCIPHER_WALK_PHYS))
294 return 0;
295
296 p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
297 if (!p)
298 return -ENOMEM;
299
300 p->data = walk->page;
301 p->len = walk->nbytes;
302 skcipher_queue_write(walk, p);
303
c821f6ab 304 if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
b286d8b1
HX
305 PAGE_SIZE)
306 walk->page = NULL;
307 else
308 walk->page += walk->nbytes;
309
310 return 0;
311}
312
313static int skcipher_next_fast(struct skcipher_walk *walk)
314{
315 unsigned long diff;
316
317 walk->src.phys.page = scatterwalk_page(&walk->in);
318 walk->src.phys.offset = offset_in_page(walk->in.offset);
319 walk->dst.phys.page = scatterwalk_page(&walk->out);
320 walk->dst.phys.offset = offset_in_page(walk->out.offset);
321
322 if (walk->flags & SKCIPHER_WALK_PHYS)
323 return 0;
324
325 diff = walk->src.phys.offset - walk->dst.phys.offset;
326 diff |= walk->src.virt.page - walk->dst.virt.page;
327
328 skcipher_map_src(walk);
329 walk->dst.virt.addr = walk->src.virt.addr;
330
331 if (diff) {
332 walk->flags |= SKCIPHER_WALK_DIFF;
333 skcipher_map_dst(walk);
334 }
335
336 return 0;
337}
338
339static int skcipher_walk_next(struct skcipher_walk *walk)
340{
341 unsigned int bsize;
342 unsigned int n;
343 int err;
344
345 walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
346 SKCIPHER_WALK_DIFF);
347
348 n = walk->total;
c821f6ab 349 bsize = min(walk->stride, max(n, walk->blocksize));
b286d8b1
HX
350 n = scatterwalk_clamp(&walk->in, n);
351 n = scatterwalk_clamp(&walk->out, n);
352
353 if (unlikely(n < bsize)) {
354 if (unlikely(walk->total < walk->blocksize))
355 return skcipher_walk_done(walk, -EINVAL);
356
357slow_path:
358 err = skcipher_next_slow(walk, bsize);
359 goto set_phys_lowmem;
360 }
361
362 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
363 if (!walk->page) {
364 gfp_t gfp = skcipher_walk_gfp(walk);
365
366 walk->page = (void *)__get_free_page(gfp);
367 if (!walk->page)
368 goto slow_path;
369 }
370
371 walk->nbytes = min_t(unsigned, n,
372 PAGE_SIZE - offset_in_page(walk->page));
373 walk->flags |= SKCIPHER_WALK_COPY;
374 err = skcipher_next_copy(walk);
375 goto set_phys_lowmem;
376 }
377
378 walk->nbytes = n;
379
380 return skcipher_next_fast(walk);
381
382set_phys_lowmem:
383 if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
384 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
385 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
386 walk->src.phys.offset &= PAGE_SIZE - 1;
387 walk->dst.phys.offset &= PAGE_SIZE - 1;
388 }
389 return err;
390}
b286d8b1
HX
391
392static int skcipher_copy_iv(struct skcipher_walk *walk)
393{
394 unsigned a = crypto_tfm_ctx_alignment() - 1;
395 unsigned alignmask = walk->alignmask;
396 unsigned ivsize = walk->ivsize;
c821f6ab 397 unsigned bs = walk->stride;
b286d8b1
HX
398 unsigned aligned_bs;
399 unsigned size;
400 u8 *iv;
401
0567fc9e 402 aligned_bs = ALIGN(bs, alignmask + 1);
b286d8b1
HX
403
404 /* Minimum size to align buffer by alignmask. */
405 size = alignmask & ~a;
406
407 if (walk->flags & SKCIPHER_WALK_PHYS)
408 size += ivsize;
409 else {
410 size += aligned_bs + ivsize;
411
412 /* Minimum size to ensure buffer does not straddle a page. */
413 size += (bs - 1) & ~(alignmask | a);
414 }
415
416 walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
417 if (!walk->buffer)
418 return -ENOMEM;
419
420 iv = PTR_ALIGN(walk->buffer, alignmask + 1);
421 iv = skcipher_get_spot(iv, bs) + aligned_bs;
422
423 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
424 return 0;
425}
426
427static int skcipher_walk_first(struct skcipher_walk *walk)
428{
b286d8b1
HX
429 if (WARN_ON_ONCE(in_irq()))
430 return -EDEADLK;
431
b286d8b1
HX
432 walk->buffer = NULL;
433 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
434 int err = skcipher_copy_iv(walk);
435 if (err)
436 return err;
437 }
438
439 walk->page = NULL;
b286d8b1
HX
440
441 return skcipher_walk_next(walk);
442}
443
444static int skcipher_walk_skcipher(struct skcipher_walk *walk,
445 struct skcipher_request *req)
446{
447 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
448
0cabf2af
HX
449 walk->total = req->cryptlen;
450 walk->nbytes = 0;
2b4f27c3
EB
451 walk->iv = req->iv;
452 walk->oiv = req->iv;
0cabf2af
HX
453
454 if (unlikely(!walk->total))
455 return 0;
456
b286d8b1
HX
457 scatterwalk_start(&walk->in, req->src);
458 scatterwalk_start(&walk->out, req->dst);
459
b286d8b1
HX
460 walk->flags &= ~SKCIPHER_WALK_SLEEP;
461 walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
462 SKCIPHER_WALK_SLEEP : 0;
463
464 walk->blocksize = crypto_skcipher_blocksize(tfm);
c821f6ab 465 walk->stride = crypto_skcipher_walksize(tfm);
b286d8b1
HX
466 walk->ivsize = crypto_skcipher_ivsize(tfm);
467 walk->alignmask = crypto_skcipher_alignmask(tfm);
468
469 return skcipher_walk_first(walk);
470}
471
472int skcipher_walk_virt(struct skcipher_walk *walk,
473 struct skcipher_request *req, bool atomic)
474{
475 int err;
476
bb648291
EB
477 might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
478
b286d8b1
HX
479 walk->flags &= ~SKCIPHER_WALK_PHYS;
480
481 err = skcipher_walk_skcipher(walk, req);
482
483 walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
484
485 return err;
486}
487EXPORT_SYMBOL_GPL(skcipher_walk_virt);
488
489void skcipher_walk_atomise(struct skcipher_walk *walk)
490{
491 walk->flags &= ~SKCIPHER_WALK_SLEEP;
492}
493EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
494
495int skcipher_walk_async(struct skcipher_walk *walk,
496 struct skcipher_request *req)
497{
498 walk->flags |= SKCIPHER_WALK_PHYS;
499
500 INIT_LIST_HEAD(&walk->buffers);
501
502 return skcipher_walk_skcipher(walk, req);
503}
504EXPORT_SYMBOL_GPL(skcipher_walk_async);
505
34bc085c
HX
506static int skcipher_walk_aead_common(struct skcipher_walk *walk,
507 struct aead_request *req, bool atomic)
b286d8b1
HX
508{
509 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
510 int err;
511
0cabf2af 512 walk->nbytes = 0;
2b4f27c3
EB
513 walk->iv = req->iv;
514 walk->oiv = req->iv;
0cabf2af
HX
515
516 if (unlikely(!walk->total))
517 return 0;
518
3cbf61fb
AB
519 walk->flags &= ~SKCIPHER_WALK_PHYS;
520
b286d8b1
HX
521 scatterwalk_start(&walk->in, req->src);
522 scatterwalk_start(&walk->out, req->dst);
523
524 scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
525 scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
526
c14ca838
OM
527 scatterwalk_done(&walk->in, 0, walk->total);
528 scatterwalk_done(&walk->out, 0, walk->total);
529
b286d8b1
HX
530 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
531 walk->flags |= SKCIPHER_WALK_SLEEP;
532 else
533 walk->flags &= ~SKCIPHER_WALK_SLEEP;
534
535 walk->blocksize = crypto_aead_blocksize(tfm);
c821f6ab 536 walk->stride = crypto_aead_chunksize(tfm);
b286d8b1
HX
537 walk->ivsize = crypto_aead_ivsize(tfm);
538 walk->alignmask = crypto_aead_alignmask(tfm);
539
540 err = skcipher_walk_first(walk);
541
542 if (atomic)
543 walk->flags &= ~SKCIPHER_WALK_SLEEP;
544
545 return err;
546}
34bc085c
HX
547
548int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
549 bool atomic)
550{
551 walk->total = req->cryptlen;
552
553 return skcipher_walk_aead_common(walk, req, atomic);
554}
b286d8b1
HX
555EXPORT_SYMBOL_GPL(skcipher_walk_aead);
556
34bc085c
HX
557int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
558 struct aead_request *req, bool atomic)
559{
560 walk->total = req->cryptlen;
561
562 return skcipher_walk_aead_common(walk, req, atomic);
563}
564EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
565
566int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
567 struct aead_request *req, bool atomic)
568{
569 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
570
571 walk->total = req->cryptlen - crypto_aead_authsize(tfm);
572
573 return skcipher_walk_aead_common(walk, req, atomic);
574}
575EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
576
7a7ffe65
HX
577static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
578{
579 if (alg->cra_type == &crypto_blkcipher_type)
580 return sizeof(struct crypto_blkcipher *);
581
c79b411e 582 if (alg->cra_type == &crypto_ablkcipher_type)
4e6c3df4 583 return sizeof(struct crypto_ablkcipher *);
7a7ffe65 584
4e6c3df4 585 return crypto_alg_extsize(alg);
7a7ffe65
HX
586}
587
b1f6b4bf
EB
588static void skcipher_set_needkey(struct crypto_skcipher *tfm)
589{
590 if (tfm->keysize)
591 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
592}
593
7a7ffe65
HX
594static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
595 const u8 *key, unsigned int keylen)
596{
597 struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
598 struct crypto_blkcipher *blkcipher = *ctx;
599 int err;
600
601 crypto_blkcipher_clear_flags(blkcipher, ~0);
602 crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
603 CRYPTO_TFM_REQ_MASK);
604 err = crypto_blkcipher_setkey(blkcipher, key, keylen);
605 crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
606 CRYPTO_TFM_RES_MASK);
b1f6b4bf
EB
607 if (unlikely(err)) {
608 skcipher_set_needkey(tfm);
f8d33fac 609 return err;
b1f6b4bf 610 }
7a7ffe65 611
f8d33fac
EB
612 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
613 return 0;
7a7ffe65
HX
614}
615
616static int skcipher_crypt_blkcipher(struct skcipher_request *req,
617 int (*crypt)(struct blkcipher_desc *,
618 struct scatterlist *,
619 struct scatterlist *,
620 unsigned int))
621{
622 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
623 struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
624 struct blkcipher_desc desc = {
625 .tfm = *ctx,
626 .info = req->iv,
627 .flags = req->base.flags,
628 };
629
630
631 return crypt(&desc, req->dst, req->src, req->cryptlen);
632}
633
634static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
635{
636 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
637 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
638 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
639
640 return skcipher_crypt_blkcipher(req, alg->encrypt);
641}
642
643static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
644{
645 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
646 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
647 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
648
649 return skcipher_crypt_blkcipher(req, alg->decrypt);
650}
651
652static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
653{
654 struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
655
656 crypto_free_blkcipher(*ctx);
657}
658
ecdd6bed 659static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
7a7ffe65
HX
660{
661 struct crypto_alg *calg = tfm->__crt_alg;
662 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
663 struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
664 struct crypto_blkcipher *blkcipher;
665 struct crypto_tfm *btfm;
666
667 if (!crypto_mod_get(calg))
668 return -EAGAIN;
669
670 btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
671 CRYPTO_ALG_TYPE_MASK);
672 if (IS_ERR(btfm)) {
673 crypto_mod_put(calg);
674 return PTR_ERR(btfm);
675 }
676
677 blkcipher = __crypto_blkcipher_cast(btfm);
678 *ctx = blkcipher;
679 tfm->exit = crypto_exit_skcipher_ops_blkcipher;
680
681 skcipher->setkey = skcipher_setkey_blkcipher;
682 skcipher->encrypt = skcipher_encrypt_blkcipher;
683 skcipher->decrypt = skcipher_decrypt_blkcipher;
684
685 skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
973fb3fb 686 skcipher->keysize = calg->cra_blkcipher.max_keysize;
7a7ffe65 687
b1f6b4bf 688 skcipher_set_needkey(skcipher);
f8d33fac 689
7a7ffe65
HX
690 return 0;
691}
692
693static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
694 const u8 *key, unsigned int keylen)
695{
696 struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
697 struct crypto_ablkcipher *ablkcipher = *ctx;
698 int err;
699
700 crypto_ablkcipher_clear_flags(ablkcipher, ~0);
701 crypto_ablkcipher_set_flags(ablkcipher,
702 crypto_skcipher_get_flags(tfm) &
703 CRYPTO_TFM_REQ_MASK);
704 err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
705 crypto_skcipher_set_flags(tfm,
706 crypto_ablkcipher_get_flags(ablkcipher) &
707 CRYPTO_TFM_RES_MASK);
b1f6b4bf
EB
708 if (unlikely(err)) {
709 skcipher_set_needkey(tfm);
f8d33fac 710 return err;
b1f6b4bf 711 }
7a7ffe65 712
f8d33fac
EB
713 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
714 return 0;
7a7ffe65
HX
715}
716
717static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
718 int (*crypt)(struct ablkcipher_request *))
719{
720 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
721 struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
722 struct ablkcipher_request *subreq = skcipher_request_ctx(req);
723
724 ablkcipher_request_set_tfm(subreq, *ctx);
725 ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
726 req->base.complete, req->base.data);
727 ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
728 req->iv);
729
730 return crypt(subreq);
731}
732
733static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
734{
735 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
736 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
737 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
738
739 return skcipher_crypt_ablkcipher(req, alg->encrypt);
740}
741
742static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
743{
744 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
745 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
746 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
747
748 return skcipher_crypt_ablkcipher(req, alg->decrypt);
749}
750
751static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
752{
753 struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
754
755 crypto_free_ablkcipher(*ctx);
756}
757
ecdd6bed 758static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
7a7ffe65
HX
759{
760 struct crypto_alg *calg = tfm->__crt_alg;
761 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
762 struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
763 struct crypto_ablkcipher *ablkcipher;
764 struct crypto_tfm *abtfm;
765
766 if (!crypto_mod_get(calg))
767 return -EAGAIN;
768
769 abtfm = __crypto_alloc_tfm(calg, 0, 0);
770 if (IS_ERR(abtfm)) {
771 crypto_mod_put(calg);
772 return PTR_ERR(abtfm);
773 }
774
775 ablkcipher = __crypto_ablkcipher_cast(abtfm);
776 *ctx = ablkcipher;
777 tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
778
779 skcipher->setkey = skcipher_setkey_ablkcipher;
780 skcipher->encrypt = skcipher_encrypt_ablkcipher;
781 skcipher->decrypt = skcipher_decrypt_ablkcipher;
782
783 skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
784 skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
785 sizeof(struct ablkcipher_request);
973fb3fb 786 skcipher->keysize = calg->cra_ablkcipher.max_keysize;
7a7ffe65 787
b1f6b4bf 788 skcipher_set_needkey(skcipher);
f8d33fac 789
7a7ffe65
HX
790 return 0;
791}
792
9933e113
HX
793static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
794 const u8 *key, unsigned int keylen)
795{
796 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
797 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
798 u8 *buffer, *alignbuffer;
799 unsigned long absize;
800 int ret;
801
802 absize = keylen + alignmask;
803 buffer = kmalloc(absize, GFP_ATOMIC);
804 if (!buffer)
805 return -ENOMEM;
806
807 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
808 memcpy(alignbuffer, key, keylen);
809 ret = cipher->setkey(tfm, alignbuffer, keylen);
810 kzfree(buffer);
811 return ret;
812}
813
814static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
815 unsigned int keylen)
816{
817 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
818 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
f8d33fac 819 int err;
9933e113
HX
820
821 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
822 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
823 return -EINVAL;
824 }
825
826 if ((unsigned long)key & alignmask)
f8d33fac
EB
827 err = skcipher_setkey_unaligned(tfm, key, keylen);
828 else
829 err = cipher->setkey(tfm, key, keylen);
830
b1f6b4bf
EB
831 if (unlikely(err)) {
832 skcipher_set_needkey(tfm);
f8d33fac 833 return err;
b1f6b4bf 834 }
9933e113 835
f8d33fac
EB
836 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
837 return 0;
9933e113
HX
838}
839
81bcbb1e
EB
840int crypto_skcipher_encrypt(struct skcipher_request *req)
841{
842 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
843 struct crypto_alg *alg = tfm->base.__crt_alg;
844 unsigned int cryptlen = req->cryptlen;
845 int ret;
846
847 crypto_stats_get(alg);
848 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
849 ret = -ENOKEY;
850 else
851 ret = tfm->encrypt(req);
852 crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
853 return ret;
854}
855EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
856
857int crypto_skcipher_decrypt(struct skcipher_request *req)
858{
859 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
860 struct crypto_alg *alg = tfm->base.__crt_alg;
861 unsigned int cryptlen = req->cryptlen;
862 int ret;
863
864 crypto_stats_get(alg);
865 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
866 ret = -ENOKEY;
867 else
868 ret = tfm->decrypt(req);
869 crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
870 return ret;
871}
872EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
873
4e6c3df4
HX
874static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
875{
876 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
877 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
878
879 alg->exit(skcipher);
880}
881
7a7ffe65
HX
882static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
883{
4e6c3df4
HX
884 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
885 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
886
7a7ffe65
HX
887 if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
888 return crypto_init_skcipher_ops_blkcipher(tfm);
889
c79b411e 890 if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type)
4e6c3df4
HX
891 return crypto_init_skcipher_ops_ablkcipher(tfm);
892
9933e113 893 skcipher->setkey = skcipher_setkey;
4e6c3df4
HX
894 skcipher->encrypt = alg->encrypt;
895 skcipher->decrypt = alg->decrypt;
896 skcipher->ivsize = alg->ivsize;
897 skcipher->keysize = alg->max_keysize;
898
b1f6b4bf 899 skcipher_set_needkey(skcipher);
f8d33fac 900
4e6c3df4
HX
901 if (alg->exit)
902 skcipher->base.exit = crypto_skcipher_exit_tfm;
7a7ffe65 903
4e6c3df4
HX
904 if (alg->init)
905 return alg->init(skcipher);
906
907 return 0;
908}
909
910static void crypto_skcipher_free_instance(struct crypto_instance *inst)
911{
912 struct skcipher_instance *skcipher =
913 container_of(inst, struct skcipher_instance, s.base);
914
915 skcipher->free(skcipher);
916}
917
918static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
d8c34b94 919 __maybe_unused;
4e6c3df4
HX
920static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
921{
922 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
923 base);
924
925 seq_printf(m, "type : skcipher\n");
926 seq_printf(m, "async : %s\n",
927 alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
928 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
929 seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
930 seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
931 seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
932 seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
c821f6ab 933 seq_printf(m, "walksize : %u\n", skcipher->walksize);
7a7ffe65
HX
934}
935
4e6c3df4
HX
936#ifdef CONFIG_NET
937static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
938{
939 struct crypto_report_blkcipher rblkcipher;
940 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
941 base);
942
37db69e0
EB
943 memset(&rblkcipher, 0, sizeof(rblkcipher));
944
945 strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
946 strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
4e6c3df4
HX
947
948 rblkcipher.blocksize = alg->cra_blocksize;
949 rblkcipher.min_keysize = skcipher->min_keysize;
950 rblkcipher.max_keysize = skcipher->max_keysize;
951 rblkcipher.ivsize = skcipher->ivsize;
952
37db69e0
EB
953 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
954 sizeof(rblkcipher), &rblkcipher);
4e6c3df4
HX
955}
956#else
957static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
958{
959 return -ENOSYS;
960}
961#endif
962
7a7ffe65
HX
963static const struct crypto_type crypto_skcipher_type2 = {
964 .extsize = crypto_skcipher_extsize,
965 .init_tfm = crypto_skcipher_init_tfm,
4e6c3df4
HX
966 .free = crypto_skcipher_free_instance,
967#ifdef CONFIG_PROC_FS
968 .show = crypto_skcipher_show,
969#endif
970 .report = crypto_skcipher_report,
7a7ffe65
HX
971 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
972 .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
4e6c3df4 973 .type = CRYPTO_ALG_TYPE_SKCIPHER,
7a7ffe65
HX
974 .tfmsize = offsetof(struct crypto_skcipher, base),
975};
976
3a01d0ee 977int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
4e6c3df4
HX
978 const char *name, u32 type, u32 mask)
979{
980 spawn->base.frontend = &crypto_skcipher_type2;
981 return crypto_grab_spawn(&spawn->base, name, type, mask);
982}
3a01d0ee 983EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
4e6c3df4 984
7a7ffe65
HX
985struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
986 u32 type, u32 mask)
987{
988 return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
989}
990EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
991
b350bee5
KC
992struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
993 const char *alg_name, u32 type, u32 mask)
994{
995 struct crypto_skcipher *tfm;
996
997 /* Only sync algorithms allowed. */
998 mask |= CRYPTO_ALG_ASYNC;
999
1000 tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
1001
1002 /*
1003 * Make sure we do not allocate something that might get used with
1004 * an on-stack request: check the request size.
1005 */
1006 if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
1007 MAX_SYNC_SKCIPHER_REQSIZE)) {
1008 crypto_free_skcipher(tfm);
1009 return ERR_PTR(-EINVAL);
1010 }
1011
1012 return (struct crypto_sync_skcipher *)tfm;
1013}
1014EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
1015
4e6c3df4
HX
1016int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
1017{
1018 return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
1019 type, mask);
1020}
1021EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
1022
1023static int skcipher_prepare_alg(struct skcipher_alg *alg)
1024{
1025 struct crypto_alg *base = &alg->base;
1026
c821f6ab
AB
1027 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
1028 alg->walksize > PAGE_SIZE / 8)
4e6c3df4
HX
1029 return -EINVAL;
1030
1031 if (!alg->chunksize)
1032 alg->chunksize = base->cra_blocksize;
c821f6ab
AB
1033 if (!alg->walksize)
1034 alg->walksize = alg->chunksize;
4e6c3df4
HX
1035
1036 base->cra_type = &crypto_skcipher_type2;
1037 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
1038 base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
1039
1040 return 0;
1041}
1042
1043int crypto_register_skcipher(struct skcipher_alg *alg)
1044{
1045 struct crypto_alg *base = &alg->base;
1046 int err;
1047
1048 err = skcipher_prepare_alg(alg);
1049 if (err)
1050 return err;
1051
1052 return crypto_register_alg(base);
1053}
1054EXPORT_SYMBOL_GPL(crypto_register_skcipher);
1055
1056void crypto_unregister_skcipher(struct skcipher_alg *alg)
1057{
1058 crypto_unregister_alg(&alg->base);
1059}
1060EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
1061
1062int crypto_register_skciphers(struct skcipher_alg *algs, int count)
1063{
1064 int i, ret;
1065
1066 for (i = 0; i < count; i++) {
1067 ret = crypto_register_skcipher(&algs[i]);
1068 if (ret)
1069 goto err;
1070 }
1071
1072 return 0;
1073
1074err:
1075 for (--i; i >= 0; --i)
1076 crypto_unregister_skcipher(&algs[i]);
1077
1078 return ret;
1079}
1080EXPORT_SYMBOL_GPL(crypto_register_skciphers);
1081
1082void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
1083{
1084 int i;
1085
1086 for (i = count - 1; i >= 0; --i)
1087 crypto_unregister_skcipher(&algs[i]);
1088}
1089EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
1090
1091int skcipher_register_instance(struct crypto_template *tmpl,
1092 struct skcipher_instance *inst)
1093{
1094 int err;
1095
1096 err = skcipher_prepare_alg(&inst->alg);
1097 if (err)
1098 return err;
1099
1100 return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
1101}
1102EXPORT_SYMBOL_GPL(skcipher_register_instance);
1103
0872da16
EB
1104static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
1105 unsigned int keylen)
1106{
1107 struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
1108 int err;
1109
1110 crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
1111 crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
1112 CRYPTO_TFM_REQ_MASK);
1113 err = crypto_cipher_setkey(cipher, key, keylen);
1114 crypto_skcipher_set_flags(tfm, crypto_cipher_get_flags(cipher) &
1115 CRYPTO_TFM_RES_MASK);
1116 return err;
1117}
1118
1119static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
1120{
1121 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
1122 struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
1123 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
1124 struct crypto_cipher *cipher;
1125
1126 cipher = crypto_spawn_cipher(spawn);
1127 if (IS_ERR(cipher))
1128 return PTR_ERR(cipher);
1129
1130 ctx->cipher = cipher;
1131 return 0;
1132}
1133
1134static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
1135{
1136 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
1137
1138 crypto_free_cipher(ctx->cipher);
1139}
1140
1141static void skcipher_free_instance_simple(struct skcipher_instance *inst)
1142{
1143 crypto_drop_spawn(skcipher_instance_ctx(inst));
1144 kfree(inst);
1145}
1146
1147/**
1148 * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
1149 *
1150 * Allocate an skcipher_instance for a simple block cipher mode of operation,
1151 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
1152 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
1153 * alignmask, and priority are set from the underlying cipher but can be
1154 * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and
1155 * default ->setkey(), ->init(), and ->exit() methods are installed.
1156 *
1157 * @tmpl: the template being instantiated
1158 * @tb: the template parameters
1159 * @cipher_alg_ret: on success, a pointer to the underlying cipher algorithm is
1160 * returned here. It must be dropped with crypto_mod_put().
1161 *
1162 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
1163 * needs to register the instance.
1164 */
1165struct skcipher_instance *
1166skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb,
1167 struct crypto_alg **cipher_alg_ret)
1168{
1169 struct crypto_attr_type *algt;
1170 struct crypto_alg *cipher_alg;
1171 struct skcipher_instance *inst;
1172 struct crypto_spawn *spawn;
1173 u32 mask;
1174 int err;
1175
1176 algt = crypto_get_attr_type(tb);
1177 if (IS_ERR(algt))
1178 return ERR_CAST(algt);
1179
1180 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
1181 return ERR_PTR(-EINVAL);
1182
1183 mask = CRYPTO_ALG_TYPE_MASK |
1184 crypto_requires_off(algt->type, algt->mask,
1185 CRYPTO_ALG_NEED_FALLBACK);
1186
1187 cipher_alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
1188 if (IS_ERR(cipher_alg))
1189 return ERR_CAST(cipher_alg);
1190
1191 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
1192 if (!inst) {
1193 err = -ENOMEM;
1194 goto err_put_cipher_alg;
1195 }
1196 spawn = skcipher_instance_ctx(inst);
1197
1198 err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
1199 cipher_alg);
1200 if (err)
1201 goto err_free_inst;
1202
1203 err = crypto_init_spawn(spawn, cipher_alg,
1204 skcipher_crypto_instance(inst),
1205 CRYPTO_ALG_TYPE_MASK);
1206 if (err)
1207 goto err_free_inst;
1208 inst->free = skcipher_free_instance_simple;
1209
1210 /* Default algorithm properties, can be overridden */
1211 inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
1212 inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
1213 inst->alg.base.cra_priority = cipher_alg->cra_priority;
1214 inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
1215 inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
1216 inst->alg.ivsize = cipher_alg->cra_blocksize;
1217
1218 /* Use skcipher_ctx_simple by default, can be overridden */
1219 inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
1220 inst->alg.setkey = skcipher_setkey_simple;
1221 inst->alg.init = skcipher_init_tfm_simple;
1222 inst->alg.exit = skcipher_exit_tfm_simple;
1223
1224 *cipher_alg_ret = cipher_alg;
1225 return inst;
1226
1227err_free_inst:
1228 kfree(inst);
1229err_put_cipher_alg:
1230 crypto_mod_put(cipher_alg);
1231 return ERR_PTR(err);
1232}
1233EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
1234
7a7ffe65
HX
1235MODULE_LICENSE("GPL");
1236MODULE_DESCRIPTION("Symmetric key cipher type");