Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
7a7ffe65 HX |
2 | /* |
3 | * Symmetric key cipher operations. | |
4 | * | |
5 | * Generic encrypt/decrypt wrapper for ciphers, handles operations across | |
6 | * multiple page boundaries by using temporary blocks. In user context, | |
7 | * the kernel is given a chance to schedule us once per page. | |
8 | * | |
9 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> | |
7a7ffe65 HX |
10 | */ |
11 | ||
b286d8b1 | 12 | #include <crypto/internal/aead.h> |
0eb76ba2 | 13 | #include <crypto/internal/cipher.h> |
7a7ffe65 | 14 | #include <crypto/internal/skcipher.h> |
b286d8b1 | 15 | #include <crypto/scatterwalk.h> |
7a7ffe65 | 16 | #include <linux/bug.h> |
4e6c3df4 | 17 | #include <linux/cryptouser.h> |
1085680b HX |
18 | #include <linux/err.h> |
19 | #include <linux/kernel.h> | |
b286d8b1 | 20 | #include <linux/list.h> |
1085680b | 21 | #include <linux/mm.h> |
7a7ffe65 | 22 | #include <linux/module.h> |
4e6c3df4 | 23 | #include <linux/seq_file.h> |
1085680b HX |
24 | #include <linux/slab.h> |
25 | #include <linux/string.h> | |
4e6c3df4 | 26 | #include <net/netlink.h> |
31865c4c | 27 | #include "skcipher.h" |
7a7ffe65 | 28 | |
31865c4c | 29 | #define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e |
7a7ffe65 | 30 | |
b286d8b1 HX |
31 | enum { |
32 | SKCIPHER_WALK_PHYS = 1 << 0, | |
33 | SKCIPHER_WALK_SLOW = 1 << 1, | |
34 | SKCIPHER_WALK_COPY = 1 << 2, | |
35 | SKCIPHER_WALK_DIFF = 1 << 3, | |
36 | SKCIPHER_WALK_SLEEP = 1 << 4, | |
37 | }; | |
38 | ||
39 | struct skcipher_walk_buffer { | |
40 | struct list_head entry; | |
41 | struct scatter_walk dst; | |
42 | unsigned int len; | |
43 | u8 *data; | |
44 | u8 buffer[]; | |
45 | }; | |
46 | ||
31865c4c HX |
47 | static const struct crypto_type crypto_skcipher_type; |
48 | ||
b286d8b1 HX |
49 | static int skcipher_walk_next(struct skcipher_walk *walk); |
50 | ||
b286d8b1 HX |
51 | static inline void skcipher_map_src(struct skcipher_walk *walk) |
52 | { | |
d07bd950 | 53 | walk->src.virt.addr = scatterwalk_map(&walk->in); |
b286d8b1 HX |
54 | } |
55 | ||
56 | static inline void skcipher_map_dst(struct skcipher_walk *walk) | |
57 | { | |
d07bd950 | 58 | walk->dst.virt.addr = scatterwalk_map(&walk->out); |
b286d8b1 HX |
59 | } |
60 | ||
61 | static inline void skcipher_unmap_src(struct skcipher_walk *walk) | |
62 | { | |
d07bd950 | 63 | scatterwalk_unmap(walk->src.virt.addr); |
b286d8b1 HX |
64 | } |
65 | ||
66 | static inline void skcipher_unmap_dst(struct skcipher_walk *walk) | |
67 | { | |
d07bd950 | 68 | scatterwalk_unmap(walk->dst.virt.addr); |
b286d8b1 HX |
69 | } |
70 | ||
71 | static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) | |
72 | { | |
73 | return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; | |
74 | } | |
75 | ||
76 | /* Get a spot of the specified length that does not straddle a page. | |
77 | * The caller needs to ensure that there is enough space for this operation. | |
78 | */ | |
79 | static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) | |
80 | { | |
81 | u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); | |
82 | ||
83 | return max(start, end_page); | |
84 | } | |
85 | ||
1085680b HX |
86 | static inline struct skcipher_alg *__crypto_skcipher_alg( |
87 | struct crypto_alg *alg) | |
88 | { | |
89 | return container_of(alg, struct skcipher_alg, base); | |
90 | } | |
91 | ||
92 | static inline struct crypto_istat_cipher *skcipher_get_stat( | |
93 | struct skcipher_alg *alg) | |
94 | { | |
31865c4c | 95 | return skcipher_get_stat_common(&alg->co); |
1085680b HX |
96 | } |
97 | ||
98 | static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err) | |
99 | { | |
100 | struct crypto_istat_cipher *istat = skcipher_get_stat(alg); | |
101 | ||
102 | if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) | |
103 | return err; | |
104 | ||
105 | if (err && err != -EINPROGRESS && err != -EBUSY) | |
106 | atomic64_inc(&istat->err_cnt); | |
107 | ||
108 | return err; | |
109 | } | |
110 | ||
0ba3c026 | 111 | static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) |
b286d8b1 HX |
112 | { |
113 | u8 *addr; | |
114 | ||
115 | addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); | |
116 | addr = skcipher_get_spot(addr, bsize); | |
117 | scatterwalk_copychunks(addr, &walk->out, bsize, | |
118 | (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); | |
0ba3c026 | 119 | return 0; |
b286d8b1 HX |
120 | } |
121 | ||
122 | int skcipher_walk_done(struct skcipher_walk *walk, int err) | |
123 | { | |
0ba3c026 HX |
124 | unsigned int n = walk->nbytes; |
125 | unsigned int nbytes = 0; | |
8088d3dd | 126 | |
0ba3c026 | 127 | if (!n) |
8088d3dd EB |
128 | goto finish; |
129 | ||
0ba3c026 HX |
130 | if (likely(err >= 0)) { |
131 | n -= err; | |
132 | nbytes = walk->total - n; | |
133 | } | |
8088d3dd EB |
134 | |
135 | if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | | |
136 | SKCIPHER_WALK_SLOW | | |
137 | SKCIPHER_WALK_COPY | | |
138 | SKCIPHER_WALK_DIFF)))) { | |
b286d8b1 HX |
139 | unmap_src: |
140 | skcipher_unmap_src(walk); | |
141 | } else if (walk->flags & SKCIPHER_WALK_DIFF) { | |
142 | skcipher_unmap_dst(walk); | |
143 | goto unmap_src; | |
144 | } else if (walk->flags & SKCIPHER_WALK_COPY) { | |
145 | skcipher_map_dst(walk); | |
146 | memcpy(walk->dst.virt.addr, walk->page, n); | |
147 | skcipher_unmap_dst(walk); | |
148 | } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { | |
0ba3c026 | 149 | if (err > 0) { |
dcaca01a EB |
150 | /* |
151 | * Didn't process all bytes. Either the algorithm is | |
152 | * broken, or this was the last step and it turned out | |
153 | * the message wasn't evenly divisible into blocks but | |
154 | * the algorithm requires it. | |
155 | */ | |
b286d8b1 | 156 | err = -EINVAL; |
0ba3c026 HX |
157 | nbytes = 0; |
158 | } else | |
159 | n = skcipher_done_slow(walk, n); | |
b286d8b1 HX |
160 | } |
161 | ||
0ba3c026 HX |
162 | if (err > 0) |
163 | err = 0; | |
164 | ||
165 | walk->total = nbytes; | |
166 | walk->nbytes = 0; | |
167 | ||
b286d8b1 HX |
168 | scatterwalk_advance(&walk->in, n); |
169 | scatterwalk_advance(&walk->out, n); | |
0ba3c026 HX |
170 | scatterwalk_done(&walk->in, 0, nbytes); |
171 | scatterwalk_done(&walk->out, 1, nbytes); | |
b286d8b1 | 172 | |
0ba3c026 | 173 | if (nbytes) { |
b286d8b1 HX |
174 | crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? |
175 | CRYPTO_TFM_REQ_MAY_SLEEP : 0); | |
176 | return skcipher_walk_next(walk); | |
177 | } | |
178 | ||
0ba3c026 | 179 | finish: |
b286d8b1 HX |
180 | /* Short-circuit for the common/fast path. */ |
181 | if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) | |
182 | goto out; | |
183 | ||
184 | if (walk->flags & SKCIPHER_WALK_PHYS) | |
185 | goto out; | |
186 | ||
187 | if (walk->iv != walk->oiv) | |
188 | memcpy(walk->oiv, walk->iv, walk->ivsize); | |
189 | if (walk->buffer != walk->page) | |
190 | kfree(walk->buffer); | |
191 | if (walk->page) | |
192 | free_page((unsigned long)walk->page); | |
193 | ||
194 | out: | |
195 | return err; | |
196 | } | |
197 | EXPORT_SYMBOL_GPL(skcipher_walk_done); | |
198 | ||
199 | void skcipher_walk_complete(struct skcipher_walk *walk, int err) | |
200 | { | |
201 | struct skcipher_walk_buffer *p, *tmp; | |
202 | ||
203 | list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { | |
204 | u8 *data; | |
205 | ||
206 | if (err) | |
207 | goto done; | |
208 | ||
209 | data = p->data; | |
210 | if (!data) { | |
211 | data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1); | |
c821f6ab | 212 | data = skcipher_get_spot(data, walk->stride); |
b286d8b1 HX |
213 | } |
214 | ||
215 | scatterwalk_copychunks(data, &p->dst, p->len, 1); | |
216 | ||
c821f6ab | 217 | if (offset_in_page(p->data) + p->len + walk->stride > |
b286d8b1 HX |
218 | PAGE_SIZE) |
219 | free_page((unsigned long)p->data); | |
220 | ||
221 | done: | |
222 | list_del(&p->entry); | |
223 | kfree(p); | |
224 | } | |
225 | ||
226 | if (!err && walk->iv != walk->oiv) | |
227 | memcpy(walk->oiv, walk->iv, walk->ivsize); | |
228 | if (walk->buffer != walk->page) | |
229 | kfree(walk->buffer); | |
230 | if (walk->page) | |
231 | free_page((unsigned long)walk->page); | |
232 | } | |
233 | EXPORT_SYMBOL_GPL(skcipher_walk_complete); | |
234 | ||
235 | static void skcipher_queue_write(struct skcipher_walk *walk, | |
236 | struct skcipher_walk_buffer *p) | |
237 | { | |
238 | p->dst = walk->out; | |
239 | list_add_tail(&p->entry, &walk->buffers); | |
240 | } | |
241 | ||
242 | static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) | |
243 | { | |
244 | bool phys = walk->flags & SKCIPHER_WALK_PHYS; | |
245 | unsigned alignmask = walk->alignmask; | |
246 | struct skcipher_walk_buffer *p; | |
247 | unsigned a; | |
248 | unsigned n; | |
249 | u8 *buffer; | |
250 | void *v; | |
251 | ||
252 | if (!phys) { | |
18e615ad AB |
253 | if (!walk->buffer) |
254 | walk->buffer = walk->page; | |
255 | buffer = walk->buffer; | |
b286d8b1 HX |
256 | if (buffer) |
257 | goto ok; | |
258 | } | |
259 | ||
260 | /* Start with the minimum alignment of kmalloc. */ | |
261 | a = crypto_tfm_ctx_alignment() - 1; | |
262 | n = bsize; | |
263 | ||
264 | if (phys) { | |
265 | /* Calculate the minimum alignment of p->buffer. */ | |
266 | a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1; | |
267 | n += sizeof(*p); | |
268 | } | |
269 | ||
270 | /* Minimum size to align p->buffer by alignmask. */ | |
271 | n += alignmask & ~a; | |
272 | ||
273 | /* Minimum size to ensure p->buffer does not straddle a page. */ | |
274 | n += (bsize - 1) & ~(alignmask | a); | |
275 | ||
276 | v = kzalloc(n, skcipher_walk_gfp(walk)); | |
277 | if (!v) | |
278 | return skcipher_walk_done(walk, -ENOMEM); | |
279 | ||
280 | if (phys) { | |
281 | p = v; | |
282 | p->len = bsize; | |
283 | skcipher_queue_write(walk, p); | |
284 | buffer = p->buffer; | |
285 | } else { | |
286 | walk->buffer = v; | |
287 | buffer = v; | |
288 | } | |
289 | ||
290 | ok: | |
291 | walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1); | |
292 | walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize); | |
293 | walk->src.virt.addr = walk->dst.virt.addr; | |
294 | ||
295 | scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); | |
296 | ||
297 | walk->nbytes = bsize; | |
298 | walk->flags |= SKCIPHER_WALK_SLOW; | |
299 | ||
300 | return 0; | |
301 | } | |
302 | ||
303 | static int skcipher_next_copy(struct skcipher_walk *walk) | |
304 | { | |
305 | struct skcipher_walk_buffer *p; | |
306 | u8 *tmp = walk->page; | |
307 | ||
308 | skcipher_map_src(walk); | |
309 | memcpy(tmp, walk->src.virt.addr, walk->nbytes); | |
310 | skcipher_unmap_src(walk); | |
311 | ||
312 | walk->src.virt.addr = tmp; | |
313 | walk->dst.virt.addr = tmp; | |
314 | ||
315 | if (!(walk->flags & SKCIPHER_WALK_PHYS)) | |
316 | return 0; | |
317 | ||
318 | p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk)); | |
319 | if (!p) | |
320 | return -ENOMEM; | |
321 | ||
322 | p->data = walk->page; | |
323 | p->len = walk->nbytes; | |
324 | skcipher_queue_write(walk, p); | |
325 | ||
c821f6ab | 326 | if (offset_in_page(walk->page) + walk->nbytes + walk->stride > |
b286d8b1 HX |
327 | PAGE_SIZE) |
328 | walk->page = NULL; | |
329 | else | |
330 | walk->page += walk->nbytes; | |
331 | ||
332 | return 0; | |
333 | } | |
334 | ||
335 | static int skcipher_next_fast(struct skcipher_walk *walk) | |
336 | { | |
337 | unsigned long diff; | |
338 | ||
339 | walk->src.phys.page = scatterwalk_page(&walk->in); | |
340 | walk->src.phys.offset = offset_in_page(walk->in.offset); | |
341 | walk->dst.phys.page = scatterwalk_page(&walk->out); | |
342 | walk->dst.phys.offset = offset_in_page(walk->out.offset); | |
343 | ||
344 | if (walk->flags & SKCIPHER_WALK_PHYS) | |
345 | return 0; | |
346 | ||
347 | diff = walk->src.phys.offset - walk->dst.phys.offset; | |
348 | diff |= walk->src.virt.page - walk->dst.virt.page; | |
349 | ||
350 | skcipher_map_src(walk); | |
351 | walk->dst.virt.addr = walk->src.virt.addr; | |
352 | ||
353 | if (diff) { | |
354 | walk->flags |= SKCIPHER_WALK_DIFF; | |
355 | skcipher_map_dst(walk); | |
356 | } | |
357 | ||
358 | return 0; | |
359 | } | |
360 | ||
361 | static int skcipher_walk_next(struct skcipher_walk *walk) | |
362 | { | |
363 | unsigned int bsize; | |
364 | unsigned int n; | |
365 | int err; | |
366 | ||
367 | walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | | |
368 | SKCIPHER_WALK_DIFF); | |
369 | ||
370 | n = walk->total; | |
c821f6ab | 371 | bsize = min(walk->stride, max(n, walk->blocksize)); |
b286d8b1 HX |
372 | n = scatterwalk_clamp(&walk->in, n); |
373 | n = scatterwalk_clamp(&walk->out, n); | |
374 | ||
375 | if (unlikely(n < bsize)) { | |
376 | if (unlikely(walk->total < walk->blocksize)) | |
377 | return skcipher_walk_done(walk, -EINVAL); | |
378 | ||
379 | slow_path: | |
380 | err = skcipher_next_slow(walk, bsize); | |
381 | goto set_phys_lowmem; | |
382 | } | |
383 | ||
384 | if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { | |
385 | if (!walk->page) { | |
386 | gfp_t gfp = skcipher_walk_gfp(walk); | |
387 | ||
388 | walk->page = (void *)__get_free_page(gfp); | |
389 | if (!walk->page) | |
390 | goto slow_path; | |
391 | } | |
392 | ||
393 | walk->nbytes = min_t(unsigned, n, | |
394 | PAGE_SIZE - offset_in_page(walk->page)); | |
395 | walk->flags |= SKCIPHER_WALK_COPY; | |
396 | err = skcipher_next_copy(walk); | |
397 | goto set_phys_lowmem; | |
398 | } | |
399 | ||
400 | walk->nbytes = n; | |
401 | ||
402 | return skcipher_next_fast(walk); | |
403 | ||
404 | set_phys_lowmem: | |
405 | if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) { | |
406 | walk->src.phys.page = virt_to_page(walk->src.virt.addr); | |
407 | walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); | |
408 | walk->src.phys.offset &= PAGE_SIZE - 1; | |
409 | walk->dst.phys.offset &= PAGE_SIZE - 1; | |
410 | } | |
411 | return err; | |
412 | } | |
b286d8b1 HX |
413 | |
414 | static int skcipher_copy_iv(struct skcipher_walk *walk) | |
415 | { | |
416 | unsigned a = crypto_tfm_ctx_alignment() - 1; | |
417 | unsigned alignmask = walk->alignmask; | |
418 | unsigned ivsize = walk->ivsize; | |
c821f6ab | 419 | unsigned bs = walk->stride; |
b286d8b1 HX |
420 | unsigned aligned_bs; |
421 | unsigned size; | |
422 | u8 *iv; | |
423 | ||
0567fc9e | 424 | aligned_bs = ALIGN(bs, alignmask + 1); |
b286d8b1 HX |
425 | |
426 | /* Minimum size to align buffer by alignmask. */ | |
427 | size = alignmask & ~a; | |
428 | ||
429 | if (walk->flags & SKCIPHER_WALK_PHYS) | |
430 | size += ivsize; | |
431 | else { | |
432 | size += aligned_bs + ivsize; | |
433 | ||
434 | /* Minimum size to ensure buffer does not straddle a page. */ | |
435 | size += (bs - 1) & ~(alignmask | a); | |
436 | } | |
437 | ||
438 | walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); | |
439 | if (!walk->buffer) | |
440 | return -ENOMEM; | |
441 | ||
442 | iv = PTR_ALIGN(walk->buffer, alignmask + 1); | |
443 | iv = skcipher_get_spot(iv, bs) + aligned_bs; | |
444 | ||
445 | walk->iv = memcpy(iv, walk->iv, walk->ivsize); | |
446 | return 0; | |
447 | } | |
448 | ||
449 | static int skcipher_walk_first(struct skcipher_walk *walk) | |
450 | { | |
abfc7fad | 451 | if (WARN_ON_ONCE(in_hardirq())) |
b286d8b1 HX |
452 | return -EDEADLK; |
453 | ||
b286d8b1 HX |
454 | walk->buffer = NULL; |
455 | if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { | |
456 | int err = skcipher_copy_iv(walk); | |
457 | if (err) | |
458 | return err; | |
459 | } | |
460 | ||
461 | walk->page = NULL; | |
b286d8b1 HX |
462 | |
463 | return skcipher_walk_next(walk); | |
464 | } | |
465 | ||
466 | static int skcipher_walk_skcipher(struct skcipher_walk *walk, | |
467 | struct skcipher_request *req) | |
468 | { | |
469 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
31865c4c | 470 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm); |
b286d8b1 | 471 | |
0cabf2af HX |
472 | walk->total = req->cryptlen; |
473 | walk->nbytes = 0; | |
2b4f27c3 EB |
474 | walk->iv = req->iv; |
475 | walk->oiv = req->iv; | |
0cabf2af HX |
476 | |
477 | if (unlikely(!walk->total)) | |
478 | return 0; | |
479 | ||
b286d8b1 HX |
480 | scatterwalk_start(&walk->in, req->src); |
481 | scatterwalk_start(&walk->out, req->dst); | |
482 | ||
b286d8b1 HX |
483 | walk->flags &= ~SKCIPHER_WALK_SLEEP; |
484 | walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? | |
485 | SKCIPHER_WALK_SLEEP : 0; | |
486 | ||
487 | walk->blocksize = crypto_skcipher_blocksize(tfm); | |
b286d8b1 HX |
488 | walk->ivsize = crypto_skcipher_ivsize(tfm); |
489 | walk->alignmask = crypto_skcipher_alignmask(tfm); | |
490 | ||
31865c4c HX |
491 | if (alg->co.base.cra_type != &crypto_skcipher_type) |
492 | walk->stride = alg->co.chunksize; | |
493 | else | |
494 | walk->stride = alg->walksize; | |
495 | ||
b286d8b1 HX |
496 | return skcipher_walk_first(walk); |
497 | } | |
498 | ||
499 | int skcipher_walk_virt(struct skcipher_walk *walk, | |
500 | struct skcipher_request *req, bool atomic) | |
501 | { | |
502 | int err; | |
503 | ||
bb648291 EB |
504 | might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); |
505 | ||
b286d8b1 HX |
506 | walk->flags &= ~SKCIPHER_WALK_PHYS; |
507 | ||
508 | err = skcipher_walk_skcipher(walk, req); | |
509 | ||
510 | walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0; | |
511 | ||
512 | return err; | |
513 | } | |
514 | EXPORT_SYMBOL_GPL(skcipher_walk_virt); | |
515 | ||
b286d8b1 HX |
516 | int skcipher_walk_async(struct skcipher_walk *walk, |
517 | struct skcipher_request *req) | |
518 | { | |
519 | walk->flags |= SKCIPHER_WALK_PHYS; | |
520 | ||
521 | INIT_LIST_HEAD(&walk->buffers); | |
522 | ||
523 | return skcipher_walk_skcipher(walk, req); | |
524 | } | |
525 | EXPORT_SYMBOL_GPL(skcipher_walk_async); | |
526 | ||
34bc085c HX |
527 | static int skcipher_walk_aead_common(struct skcipher_walk *walk, |
528 | struct aead_request *req, bool atomic) | |
b286d8b1 HX |
529 | { |
530 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
531 | int err; | |
532 | ||
0cabf2af | 533 | walk->nbytes = 0; |
2b4f27c3 EB |
534 | walk->iv = req->iv; |
535 | walk->oiv = req->iv; | |
0cabf2af HX |
536 | |
537 | if (unlikely(!walk->total)) | |
538 | return 0; | |
539 | ||
3cbf61fb AB |
540 | walk->flags &= ~SKCIPHER_WALK_PHYS; |
541 | ||
b286d8b1 HX |
542 | scatterwalk_start(&walk->in, req->src); |
543 | scatterwalk_start(&walk->out, req->dst); | |
544 | ||
545 | scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2); | |
546 | scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2); | |
547 | ||
c14ca838 OM |
548 | scatterwalk_done(&walk->in, 0, walk->total); |
549 | scatterwalk_done(&walk->out, 0, walk->total); | |
550 | ||
b286d8b1 HX |
551 | if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) |
552 | walk->flags |= SKCIPHER_WALK_SLEEP; | |
553 | else | |
554 | walk->flags &= ~SKCIPHER_WALK_SLEEP; | |
555 | ||
556 | walk->blocksize = crypto_aead_blocksize(tfm); | |
c821f6ab | 557 | walk->stride = crypto_aead_chunksize(tfm); |
b286d8b1 HX |
558 | walk->ivsize = crypto_aead_ivsize(tfm); |
559 | walk->alignmask = crypto_aead_alignmask(tfm); | |
560 | ||
561 | err = skcipher_walk_first(walk); | |
562 | ||
563 | if (atomic) | |
564 | walk->flags &= ~SKCIPHER_WALK_SLEEP; | |
565 | ||
566 | return err; | |
567 | } | |
34bc085c | 568 | |
34bc085c HX |
569 | int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, |
570 | struct aead_request *req, bool atomic) | |
571 | { | |
572 | walk->total = req->cryptlen; | |
573 | ||
574 | return skcipher_walk_aead_common(walk, req, atomic); | |
575 | } | |
576 | EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt); | |
577 | ||
578 | int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, | |
579 | struct aead_request *req, bool atomic) | |
580 | { | |
581 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
582 | ||
583 | walk->total = req->cryptlen - crypto_aead_authsize(tfm); | |
584 | ||
585 | return skcipher_walk_aead_common(walk, req, atomic); | |
586 | } | |
587 | EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt); | |
588 | ||
b1f6b4bf EB |
589 | static void skcipher_set_needkey(struct crypto_skcipher *tfm) |
590 | { | |
9ac0d136 | 591 | if (crypto_skcipher_max_keysize(tfm) != 0) |
b1f6b4bf EB |
592 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY); |
593 | } | |
594 | ||
9933e113 HX |
595 | static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm, |
596 | const u8 *key, unsigned int keylen) | |
597 | { | |
598 | unsigned long alignmask = crypto_skcipher_alignmask(tfm); | |
599 | struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); | |
600 | u8 *buffer, *alignbuffer; | |
601 | unsigned long absize; | |
602 | int ret; | |
603 | ||
604 | absize = keylen + alignmask; | |
605 | buffer = kmalloc(absize, GFP_ATOMIC); | |
606 | if (!buffer) | |
607 | return -ENOMEM; | |
608 | ||
609 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | |
610 | memcpy(alignbuffer, key, keylen); | |
611 | ret = cipher->setkey(tfm, alignbuffer, keylen); | |
453431a5 | 612 | kfree_sensitive(buffer); |
9933e113 HX |
613 | return ret; |
614 | } | |
615 | ||
15252d94 | 616 | int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, |
9933e113 HX |
617 | unsigned int keylen) |
618 | { | |
619 | struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); | |
620 | unsigned long alignmask = crypto_skcipher_alignmask(tfm); | |
f8d33fac | 621 | int err; |
9933e113 | 622 | |
31865c4c | 623 | if (cipher->co.base.cra_type != &crypto_skcipher_type) { |
7ec0a09d EB |
624 | struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm); |
625 | ||
626 | crypto_lskcipher_clear_flags(*ctx, CRYPTO_TFM_REQ_MASK); | |
627 | crypto_lskcipher_set_flags(*ctx, | |
628 | crypto_skcipher_get_flags(tfm) & | |
629 | CRYPTO_TFM_REQ_MASK); | |
630 | err = crypto_lskcipher_setkey(*ctx, key, keylen); | |
31865c4c HX |
631 | goto out; |
632 | } | |
633 | ||
674f368a | 634 | if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) |
9933e113 | 635 | return -EINVAL; |
9933e113 HX |
636 | |
637 | if ((unsigned long)key & alignmask) | |
f8d33fac EB |
638 | err = skcipher_setkey_unaligned(tfm, key, keylen); |
639 | else | |
640 | err = cipher->setkey(tfm, key, keylen); | |
641 | ||
31865c4c | 642 | out: |
b1f6b4bf EB |
643 | if (unlikely(err)) { |
644 | skcipher_set_needkey(tfm); | |
f8d33fac | 645 | return err; |
b1f6b4bf | 646 | } |
9933e113 | 647 | |
f8d33fac EB |
648 | crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); |
649 | return 0; | |
9933e113 | 650 | } |
15252d94 | 651 | EXPORT_SYMBOL_GPL(crypto_skcipher_setkey); |
9933e113 | 652 | |
81bcbb1e EB |
653 | int crypto_skcipher_encrypt(struct skcipher_request *req) |
654 | { | |
655 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
1085680b | 656 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm); |
81bcbb1e EB |
657 | int ret; |
658 | ||
1085680b HX |
659 | if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { |
660 | struct crypto_istat_cipher *istat = skcipher_get_stat(alg); | |
661 | ||
662 | atomic64_inc(&istat->encrypt_cnt); | |
663 | atomic64_add(req->cryptlen, &istat->encrypt_tlen); | |
664 | } | |
665 | ||
81bcbb1e EB |
666 | if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
667 | ret = -ENOKEY; | |
31865c4c HX |
668 | else if (alg->co.base.cra_type != &crypto_skcipher_type) |
669 | ret = crypto_lskcipher_encrypt_sg(req); | |
81bcbb1e | 670 | else |
1085680b HX |
671 | ret = alg->encrypt(req); |
672 | ||
673 | return crypto_skcipher_errstat(alg, ret); | |
81bcbb1e EB |
674 | } |
675 | EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt); | |
676 | ||
677 | int crypto_skcipher_decrypt(struct skcipher_request *req) | |
678 | { | |
679 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
1085680b | 680 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm); |
81bcbb1e EB |
681 | int ret; |
682 | ||
1085680b HX |
683 | if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { |
684 | struct crypto_istat_cipher *istat = skcipher_get_stat(alg); | |
685 | ||
686 | atomic64_inc(&istat->decrypt_cnt); | |
687 | atomic64_add(req->cryptlen, &istat->decrypt_tlen); | |
688 | } | |
689 | ||
81bcbb1e EB |
690 | if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
691 | ret = -ENOKEY; | |
31865c4c HX |
692 | else if (alg->co.base.cra_type != &crypto_skcipher_type) |
693 | ret = crypto_lskcipher_decrypt_sg(req); | |
81bcbb1e | 694 | else |
1085680b HX |
695 | ret = alg->decrypt(req); |
696 | ||
697 | return crypto_skcipher_errstat(alg, ret); | |
81bcbb1e EB |
698 | } |
699 | EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt); | |
700 | ||
662ea18d HX |
701 | static int crypto_lskcipher_export(struct skcipher_request *req, void *out) |
702 | { | |
703 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
704 | u8 *ivs = skcipher_request_ctx(req); | |
705 | ||
706 | ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1); | |
707 | ||
708 | memcpy(out, ivs + crypto_skcipher_ivsize(tfm), | |
709 | crypto_skcipher_statesize(tfm)); | |
710 | ||
711 | return 0; | |
712 | } | |
713 | ||
714 | static int crypto_lskcipher_import(struct skcipher_request *req, const void *in) | |
715 | { | |
716 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
717 | u8 *ivs = skcipher_request_ctx(req); | |
718 | ||
719 | ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1); | |
720 | ||
721 | memcpy(ivs + crypto_skcipher_ivsize(tfm), in, | |
722 | crypto_skcipher_statesize(tfm)); | |
723 | ||
724 | return 0; | |
725 | } | |
726 | ||
727 | static int skcipher_noexport(struct skcipher_request *req, void *out) | |
728 | { | |
729 | return 0; | |
730 | } | |
731 | ||
732 | static int skcipher_noimport(struct skcipher_request *req, const void *in) | |
733 | { | |
734 | return 0; | |
735 | } | |
736 | ||
737 | int crypto_skcipher_export(struct skcipher_request *req, void *out) | |
738 | { | |
739 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
740 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm); | |
741 | ||
742 | if (alg->co.base.cra_type != &crypto_skcipher_type) | |
743 | return crypto_lskcipher_export(req, out); | |
744 | return alg->export(req, out); | |
745 | } | |
746 | EXPORT_SYMBOL_GPL(crypto_skcipher_export); | |
747 | ||
748 | int crypto_skcipher_import(struct skcipher_request *req, const void *in) | |
749 | { | |
750 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
751 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm); | |
752 | ||
753 | if (alg->co.base.cra_type != &crypto_skcipher_type) | |
754 | return crypto_lskcipher_import(req, in); | |
755 | return alg->import(req, in); | |
756 | } | |
757 | EXPORT_SYMBOL_GPL(crypto_skcipher_import); | |
758 | ||
4e6c3df4 HX |
759 | static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) |
760 | { | |
761 | struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); | |
762 | struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); | |
763 | ||
764 | alg->exit(skcipher); | |
765 | } | |
766 | ||
7a7ffe65 HX |
767 | static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) |
768 | { | |
4e6c3df4 HX |
769 | struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); |
770 | struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); | |
771 | ||
b1f6b4bf | 772 | skcipher_set_needkey(skcipher); |
f8d33fac | 773 | |
662ea18d HX |
774 | if (tfm->__crt_alg->cra_type != &crypto_skcipher_type) { |
775 | unsigned am = crypto_skcipher_alignmask(skcipher); | |
776 | unsigned reqsize; | |
777 | ||
778 | reqsize = am & ~(crypto_tfm_ctx_alignment() - 1); | |
779 | reqsize += crypto_skcipher_ivsize(skcipher); | |
780 | reqsize += crypto_skcipher_statesize(skcipher); | |
781 | crypto_skcipher_set_reqsize(skcipher, reqsize); | |
782 | ||
31865c4c | 783 | return crypto_init_lskcipher_ops_sg(tfm); |
662ea18d | 784 | } |
31865c4c | 785 | |
4e6c3df4 HX |
786 | if (alg->exit) |
787 | skcipher->base.exit = crypto_skcipher_exit_tfm; | |
7a7ffe65 | 788 | |
4e6c3df4 HX |
789 | if (alg->init) |
790 | return alg->init(skcipher); | |
791 | ||
792 | return 0; | |
793 | } | |
794 | ||
31865c4c HX |
795 | static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) |
796 | { | |
797 | if (alg->cra_type != &crypto_skcipher_type) | |
798 | return sizeof(struct crypto_lskcipher *); | |
799 | ||
800 | return crypto_alg_extsize(alg); | |
801 | } | |
802 | ||
4e6c3df4 HX |
803 | static void crypto_skcipher_free_instance(struct crypto_instance *inst) |
804 | { | |
805 | struct skcipher_instance *skcipher = | |
806 | container_of(inst, struct skcipher_instance, s.base); | |
807 | ||
808 | skcipher->free(skcipher); | |
809 | } | |
810 | ||
811 | static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) | |
d8c34b94 | 812 | __maybe_unused; |
4e6c3df4 HX |
813 | static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) |
814 | { | |
1085680b | 815 | struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg); |
4e6c3df4 HX |
816 | |
817 | seq_printf(m, "type : skcipher\n"); | |
818 | seq_printf(m, "async : %s\n", | |
819 | alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no"); | |
820 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | |
821 | seq_printf(m, "min keysize : %u\n", skcipher->min_keysize); | |
822 | seq_printf(m, "max keysize : %u\n", skcipher->max_keysize); | |
823 | seq_printf(m, "ivsize : %u\n", skcipher->ivsize); | |
824 | seq_printf(m, "chunksize : %u\n", skcipher->chunksize); | |
c821f6ab | 825 | seq_printf(m, "walksize : %u\n", skcipher->walksize); |
662ea18d | 826 | seq_printf(m, "statesize : %u\n", skcipher->statesize); |
7a7ffe65 HX |
827 | } |
828 | ||
c0f9e01d HX |
829 | static int __maybe_unused crypto_skcipher_report( |
830 | struct sk_buff *skb, struct crypto_alg *alg) | |
4e6c3df4 | 831 | { |
1085680b | 832 | struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg); |
4e6c3df4 | 833 | struct crypto_report_blkcipher rblkcipher; |
4e6c3df4 | 834 | |
37db69e0 EB |
835 | memset(&rblkcipher, 0, sizeof(rblkcipher)); |
836 | ||
837 | strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type)); | |
838 | strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv)); | |
4e6c3df4 HX |
839 | |
840 | rblkcipher.blocksize = alg->cra_blocksize; | |
841 | rblkcipher.min_keysize = skcipher->min_keysize; | |
842 | rblkcipher.max_keysize = skcipher->max_keysize; | |
843 | rblkcipher.ivsize = skcipher->ivsize; | |
844 | ||
37db69e0 EB |
845 | return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, |
846 | sizeof(rblkcipher), &rblkcipher); | |
4e6c3df4 | 847 | } |
4e6c3df4 | 848 | |
1085680b HX |
849 | static int __maybe_unused crypto_skcipher_report_stat( |
850 | struct sk_buff *skb, struct crypto_alg *alg) | |
851 | { | |
852 | struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg); | |
853 | struct crypto_istat_cipher *istat; | |
854 | struct crypto_stat_cipher rcipher; | |
855 | ||
856 | istat = skcipher_get_stat(skcipher); | |
857 | ||
858 | memset(&rcipher, 0, sizeof(rcipher)); | |
859 | ||
860 | strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); | |
861 | ||
862 | rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); | |
863 | rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); | |
864 | rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); | |
865 | rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); | |
866 | rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt); | |
867 | ||
868 | return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); | |
869 | } | |
870 | ||
53253064 | 871 | static const struct crypto_type crypto_skcipher_type = { |
31865c4c | 872 | .extsize = crypto_skcipher_extsize, |
7a7ffe65 | 873 | .init_tfm = crypto_skcipher_init_tfm, |
4e6c3df4 HX |
874 | .free = crypto_skcipher_free_instance, |
875 | #ifdef CONFIG_PROC_FS | |
876 | .show = crypto_skcipher_show, | |
877 | #endif | |
b8969a1b | 878 | #if IS_ENABLED(CONFIG_CRYPTO_USER) |
4e6c3df4 | 879 | .report = crypto_skcipher_report, |
c0f9e01d | 880 | #endif |
1085680b HX |
881 | #ifdef CONFIG_CRYPTO_STATS |
882 | .report_stat = crypto_skcipher_report_stat, | |
883 | #endif | |
7a7ffe65 | 884 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, |
31865c4c | 885 | .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK, |
4e6c3df4 | 886 | .type = CRYPTO_ALG_TYPE_SKCIPHER, |
7a7ffe65 HX |
887 | .tfmsize = offsetof(struct crypto_skcipher, base), |
888 | }; | |
889 | ||
3a01d0ee | 890 | int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, |
b9f76ddd EB |
891 | struct crypto_instance *inst, |
892 | const char *name, u32 type, u32 mask) | |
4e6c3df4 | 893 | { |
53253064 | 894 | spawn->base.frontend = &crypto_skcipher_type; |
de95c957 | 895 | return crypto_grab_spawn(&spawn->base, inst, name, type, mask); |
4e6c3df4 | 896 | } |
3a01d0ee | 897 | EXPORT_SYMBOL_GPL(crypto_grab_skcipher); |
4e6c3df4 | 898 | |
7a7ffe65 HX |
899 | struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, |
900 | u32 type, u32 mask) | |
901 | { | |
53253064 | 902 | return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask); |
7a7ffe65 HX |
903 | } |
904 | EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); | |
905 | ||
b350bee5 KC |
906 | struct crypto_sync_skcipher *crypto_alloc_sync_skcipher( |
907 | const char *alg_name, u32 type, u32 mask) | |
908 | { | |
909 | struct crypto_skcipher *tfm; | |
910 | ||
911 | /* Only sync algorithms allowed. */ | |
e6cb02bd | 912 | mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE; |
b350bee5 | 913 | |
53253064 | 914 | tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask); |
b350bee5 KC |
915 | |
916 | /* | |
917 | * Make sure we do not allocate something that might get used with | |
918 | * an on-stack request: check the request size. | |
919 | */ | |
920 | if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) > | |
921 | MAX_SYNC_SKCIPHER_REQSIZE)) { | |
922 | crypto_free_skcipher(tfm); | |
923 | return ERR_PTR(-EINVAL); | |
924 | } | |
925 | ||
926 | return (struct crypto_sync_skcipher *)tfm; | |
927 | } | |
928 | EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher); | |
929 | ||
d3ca75a8 | 930 | int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask) |
4e6c3df4 | 931 | { |
53253064 | 932 | return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask); |
4e6c3df4 | 933 | } |
d3ca75a8 | 934 | EXPORT_SYMBOL_GPL(crypto_has_skcipher); |
4e6c3df4 | 935 | |
31865c4c | 936 | int skcipher_prepare_alg_common(struct skcipher_alg_common *alg) |
4e6c3df4 | 937 | { |
31865c4c | 938 | struct crypto_istat_cipher *istat = skcipher_get_stat_common(alg); |
4e6c3df4 HX |
939 | struct crypto_alg *base = &alg->base; |
940 | ||
662ea18d HX |
941 | if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 || |
942 | alg->statesize > PAGE_SIZE / 2 || | |
943 | (alg->ivsize + alg->statesize) > PAGE_SIZE / 2) | |
4e6c3df4 HX |
944 | return -EINVAL; |
945 | ||
946 | if (!alg->chunksize) | |
947 | alg->chunksize = base->cra_blocksize; | |
948 | ||
4e6c3df4 | 949 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; |
4e6c3df4 | 950 | |
1085680b HX |
951 | if (IS_ENABLED(CONFIG_CRYPTO_STATS)) |
952 | memset(istat, 0, sizeof(*istat)); | |
953 | ||
4e6c3df4 HX |
954 | return 0; |
955 | } | |
956 | ||
31865c4c HX |
957 | static int skcipher_prepare_alg(struct skcipher_alg *alg) |
958 | { | |
959 | struct crypto_alg *base = &alg->base; | |
960 | int err; | |
961 | ||
962 | err = skcipher_prepare_alg_common(&alg->co); | |
963 | if (err) | |
964 | return err; | |
965 | ||
966 | if (alg->walksize > PAGE_SIZE / 8) | |
967 | return -EINVAL; | |
968 | ||
969 | if (!alg->walksize) | |
970 | alg->walksize = alg->chunksize; | |
971 | ||
662ea18d HX |
972 | if (!alg->statesize) { |
973 | alg->import = skcipher_noimport; | |
974 | alg->export = skcipher_noexport; | |
975 | } else if (!(alg->import && alg->export)) | |
976 | return -EINVAL; | |
977 | ||
31865c4c HX |
978 | base->cra_type = &crypto_skcipher_type; |
979 | base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; | |
980 | ||
981 | return 0; | |
982 | } | |
983 | ||
4e6c3df4 HX |
984 | int crypto_register_skcipher(struct skcipher_alg *alg) |
985 | { | |
986 | struct crypto_alg *base = &alg->base; | |
987 | int err; | |
988 | ||
989 | err = skcipher_prepare_alg(alg); | |
990 | if (err) | |
991 | return err; | |
992 | ||
993 | return crypto_register_alg(base); | |
994 | } | |
995 | EXPORT_SYMBOL_GPL(crypto_register_skcipher); | |
996 | ||
997 | void crypto_unregister_skcipher(struct skcipher_alg *alg) | |
998 | { | |
999 | crypto_unregister_alg(&alg->base); | |
1000 | } | |
1001 | EXPORT_SYMBOL_GPL(crypto_unregister_skcipher); | |
1002 | ||
1003 | int crypto_register_skciphers(struct skcipher_alg *algs, int count) | |
1004 | { | |
1005 | int i, ret; | |
1006 | ||
1007 | for (i = 0; i < count; i++) { | |
1008 | ret = crypto_register_skcipher(&algs[i]); | |
1009 | if (ret) | |
1010 | goto err; | |
1011 | } | |
1012 | ||
1013 | return 0; | |
1014 | ||
1015 | err: | |
1016 | for (--i; i >= 0; --i) | |
1017 | crypto_unregister_skcipher(&algs[i]); | |
1018 | ||
1019 | return ret; | |
1020 | } | |
1021 | EXPORT_SYMBOL_GPL(crypto_register_skciphers); | |
1022 | ||
1023 | void crypto_unregister_skciphers(struct skcipher_alg *algs, int count) | |
1024 | { | |
1025 | int i; | |
1026 | ||
1027 | for (i = count - 1; i >= 0; --i) | |
1028 | crypto_unregister_skcipher(&algs[i]); | |
1029 | } | |
1030 | EXPORT_SYMBOL_GPL(crypto_unregister_skciphers); | |
1031 | ||
1032 | int skcipher_register_instance(struct crypto_template *tmpl, | |
1033 | struct skcipher_instance *inst) | |
1034 | { | |
1035 | int err; | |
1036 | ||
d4fdc2df EB |
1037 | if (WARN_ON(!inst->free)) |
1038 | return -EINVAL; | |
1039 | ||
4e6c3df4 HX |
1040 | err = skcipher_prepare_alg(&inst->alg); |
1041 | if (err) | |
1042 | return err; | |
1043 | ||
1044 | return crypto_register_instance(tmpl, skcipher_crypto_instance(inst)); | |
1045 | } | |
1046 | EXPORT_SYMBOL_GPL(skcipher_register_instance); | |
1047 | ||
0872da16 EB |
1048 | static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key, |
1049 | unsigned int keylen) | |
1050 | { | |
1051 | struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); | |
0872da16 EB |
1052 | |
1053 | crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK); | |
1054 | crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) & | |
1055 | CRYPTO_TFM_REQ_MASK); | |
af5034e8 | 1056 | return crypto_cipher_setkey(cipher, key, keylen); |
0872da16 EB |
1057 | } |
1058 | ||
1059 | static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm) | |
1060 | { | |
1061 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); | |
d5ed3b65 | 1062 | struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst); |
0872da16 EB |
1063 | struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); |
1064 | struct crypto_cipher *cipher; | |
1065 | ||
1066 | cipher = crypto_spawn_cipher(spawn); | |
1067 | if (IS_ERR(cipher)) | |
1068 | return PTR_ERR(cipher); | |
1069 | ||
1070 | ctx->cipher = cipher; | |
1071 | return 0; | |
1072 | } | |
1073 | ||
1074 | static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm) | |
1075 | { | |
1076 | struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); | |
1077 | ||
1078 | crypto_free_cipher(ctx->cipher); | |
1079 | } | |
1080 | ||
1081 | static void skcipher_free_instance_simple(struct skcipher_instance *inst) | |
1082 | { | |
aacd5b4c | 1083 | crypto_drop_cipher(skcipher_instance_ctx(inst)); |
0872da16 EB |
1084 | kfree(inst); |
1085 | } | |
1086 | ||
1087 | /** | |
1088 | * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode | |
1089 | * | |
1090 | * Allocate an skcipher_instance for a simple block cipher mode of operation, | |
1091 | * e.g. cbc or ecb. The instance context will have just a single crypto_spawn, | |
1092 | * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize, | |
1093 | * alignmask, and priority are set from the underlying cipher but can be | |
1094 | * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and | |
1095 | * default ->setkey(), ->init(), and ->exit() methods are installed. | |
1096 | * | |
1097 | * @tmpl: the template being instantiated | |
1098 | * @tb: the template parameters | |
0872da16 EB |
1099 | * |
1100 | * Return: a pointer to the new instance, or an ERR_PTR(). The caller still | |
1101 | * needs to register the instance. | |
1102 | */ | |
b3c16bfc HX |
1103 | struct skcipher_instance *skcipher_alloc_instance_simple( |
1104 | struct crypto_template *tmpl, struct rtattr **tb) | |
0872da16 | 1105 | { |
0872da16 | 1106 | u32 mask; |
aacd5b4c EB |
1107 | struct skcipher_instance *inst; |
1108 | struct crypto_cipher_spawn *spawn; | |
1109 | struct crypto_alg *cipher_alg; | |
0872da16 EB |
1110 | int err; |
1111 | ||
7bcb2c99 EB |
1112 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); |
1113 | if (err) | |
1114 | return ERR_PTR(err); | |
0872da16 EB |
1115 | |
1116 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | |
aacd5b4c EB |
1117 | if (!inst) |
1118 | return ERR_PTR(-ENOMEM); | |
0872da16 EB |
1119 | spawn = skcipher_instance_ctx(inst); |
1120 | ||
aacd5b4c EB |
1121 | err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst), |
1122 | crypto_attr_alg_name(tb[1]), 0, mask); | |
0872da16 EB |
1123 | if (err) |
1124 | goto err_free_inst; | |
aacd5b4c | 1125 | cipher_alg = crypto_spawn_cipher_alg(spawn); |
0872da16 | 1126 | |
aacd5b4c EB |
1127 | err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name, |
1128 | cipher_alg); | |
0872da16 EB |
1129 | if (err) |
1130 | goto err_free_inst; | |
aacd5b4c | 1131 | |
0872da16 EB |
1132 | inst->free = skcipher_free_instance_simple; |
1133 | ||
1134 | /* Default algorithm properties, can be overridden */ | |
1135 | inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize; | |
1136 | inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask; | |
1137 | inst->alg.base.cra_priority = cipher_alg->cra_priority; | |
1138 | inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize; | |
1139 | inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize; | |
1140 | inst->alg.ivsize = cipher_alg->cra_blocksize; | |
1141 | ||
1142 | /* Use skcipher_ctx_simple by default, can be overridden */ | |
1143 | inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple); | |
1144 | inst->alg.setkey = skcipher_setkey_simple; | |
1145 | inst->alg.init = skcipher_init_tfm_simple; | |
1146 | inst->alg.exit = skcipher_exit_tfm_simple; | |
1147 | ||
0872da16 EB |
1148 | return inst; |
1149 | ||
1150 | err_free_inst: | |
aacd5b4c | 1151 | skcipher_free_instance_simple(inst); |
0872da16 EB |
1152 | return ERR_PTR(err); |
1153 | } | |
1154 | EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple); | |
1155 | ||
7a7ffe65 HX |
1156 | MODULE_LICENSE("GPL"); |
1157 | MODULE_DESCRIPTION("Symmetric key cipher type"); | |
0eb76ba2 | 1158 | MODULE_IMPORT_NS(CRYPTO_INTERNAL); |