Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
7a7ffe65 HX |
2 | /* |
3 | * Symmetric key cipher operations. | |
4 | * | |
5 | * Generic encrypt/decrypt wrapper for ciphers, handles operations across | |
6 | * multiple page boundaries by using temporary blocks. In user context, | |
7 | * the kernel is given a chance to schedule us once per page. | |
8 | * | |
9 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> | |
7a7ffe65 HX |
10 | */ |
11 | ||
b286d8b1 | 12 | #include <crypto/internal/aead.h> |
0eb76ba2 | 13 | #include <crypto/internal/cipher.h> |
7a7ffe65 | 14 | #include <crypto/internal/skcipher.h> |
b286d8b1 | 15 | #include <crypto/scatterwalk.h> |
7a7ffe65 | 16 | #include <linux/bug.h> |
4e6c3df4 | 17 | #include <linux/cryptouser.h> |
d8c34b94 | 18 | #include <linux/compiler.h> |
b286d8b1 | 19 | #include <linux/list.h> |
7a7ffe65 | 20 | #include <linux/module.h> |
4e6c3df4 HX |
21 | #include <linux/rtnetlink.h> |
22 | #include <linux/seq_file.h> | |
23 | #include <net/netlink.h> | |
7a7ffe65 HX |
24 | |
25 | #include "internal.h" | |
26 | ||
b286d8b1 HX |
27 | enum { |
28 | SKCIPHER_WALK_PHYS = 1 << 0, | |
29 | SKCIPHER_WALK_SLOW = 1 << 1, | |
30 | SKCIPHER_WALK_COPY = 1 << 2, | |
31 | SKCIPHER_WALK_DIFF = 1 << 3, | |
32 | SKCIPHER_WALK_SLEEP = 1 << 4, | |
33 | }; | |
34 | ||
35 | struct skcipher_walk_buffer { | |
36 | struct list_head entry; | |
37 | struct scatter_walk dst; | |
38 | unsigned int len; | |
39 | u8 *data; | |
40 | u8 buffer[]; | |
41 | }; | |
42 | ||
43 | static int skcipher_walk_next(struct skcipher_walk *walk); | |
44 | ||
45 | static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr) | |
46 | { | |
47 | if (PageHighMem(scatterwalk_page(walk))) | |
48 | kunmap_atomic(vaddr); | |
49 | } | |
50 | ||
51 | static inline void *skcipher_map(struct scatter_walk *walk) | |
52 | { | |
53 | struct page *page = scatterwalk_page(walk); | |
54 | ||
55 | return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) + | |
56 | offset_in_page(walk->offset); | |
57 | } | |
58 | ||
59 | static inline void skcipher_map_src(struct skcipher_walk *walk) | |
60 | { | |
61 | walk->src.virt.addr = skcipher_map(&walk->in); | |
62 | } | |
63 | ||
64 | static inline void skcipher_map_dst(struct skcipher_walk *walk) | |
65 | { | |
66 | walk->dst.virt.addr = skcipher_map(&walk->out); | |
67 | } | |
68 | ||
69 | static inline void skcipher_unmap_src(struct skcipher_walk *walk) | |
70 | { | |
71 | skcipher_unmap(&walk->in, walk->src.virt.addr); | |
72 | } | |
73 | ||
74 | static inline void skcipher_unmap_dst(struct skcipher_walk *walk) | |
75 | { | |
76 | skcipher_unmap(&walk->out, walk->dst.virt.addr); | |
77 | } | |
78 | ||
79 | static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) | |
80 | { | |
81 | return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; | |
82 | } | |
83 | ||
84 | /* Get a spot of the specified length that does not straddle a page. | |
85 | * The caller needs to ensure that there is enough space for this operation. | |
86 | */ | |
87 | static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) | |
88 | { | |
89 | u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); | |
90 | ||
91 | return max(start, end_page); | |
92 | } | |
93 | ||
0ba3c026 | 94 | static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) |
b286d8b1 HX |
95 | { |
96 | u8 *addr; | |
97 | ||
98 | addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); | |
99 | addr = skcipher_get_spot(addr, bsize); | |
100 | scatterwalk_copychunks(addr, &walk->out, bsize, | |
101 | (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); | |
0ba3c026 | 102 | return 0; |
b286d8b1 HX |
103 | } |
104 | ||
105 | int skcipher_walk_done(struct skcipher_walk *walk, int err) | |
106 | { | |
0ba3c026 HX |
107 | unsigned int n = walk->nbytes; |
108 | unsigned int nbytes = 0; | |
8088d3dd | 109 | |
0ba3c026 | 110 | if (!n) |
8088d3dd EB |
111 | goto finish; |
112 | ||
0ba3c026 HX |
113 | if (likely(err >= 0)) { |
114 | n -= err; | |
115 | nbytes = walk->total - n; | |
116 | } | |
8088d3dd EB |
117 | |
118 | if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | | |
119 | SKCIPHER_WALK_SLOW | | |
120 | SKCIPHER_WALK_COPY | | |
121 | SKCIPHER_WALK_DIFF)))) { | |
b286d8b1 HX |
122 | unmap_src: |
123 | skcipher_unmap_src(walk); | |
124 | } else if (walk->flags & SKCIPHER_WALK_DIFF) { | |
125 | skcipher_unmap_dst(walk); | |
126 | goto unmap_src; | |
127 | } else if (walk->flags & SKCIPHER_WALK_COPY) { | |
128 | skcipher_map_dst(walk); | |
129 | memcpy(walk->dst.virt.addr, walk->page, n); | |
130 | skcipher_unmap_dst(walk); | |
131 | } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { | |
0ba3c026 | 132 | if (err > 0) { |
dcaca01a EB |
133 | /* |
134 | * Didn't process all bytes. Either the algorithm is | |
135 | * broken, or this was the last step and it turned out | |
136 | * the message wasn't evenly divisible into blocks but | |
137 | * the algorithm requires it. | |
138 | */ | |
b286d8b1 | 139 | err = -EINVAL; |
0ba3c026 HX |
140 | nbytes = 0; |
141 | } else | |
142 | n = skcipher_done_slow(walk, n); | |
b286d8b1 HX |
143 | } |
144 | ||
0ba3c026 HX |
145 | if (err > 0) |
146 | err = 0; | |
147 | ||
148 | walk->total = nbytes; | |
149 | walk->nbytes = 0; | |
150 | ||
b286d8b1 HX |
151 | scatterwalk_advance(&walk->in, n); |
152 | scatterwalk_advance(&walk->out, n); | |
0ba3c026 HX |
153 | scatterwalk_done(&walk->in, 0, nbytes); |
154 | scatterwalk_done(&walk->out, 1, nbytes); | |
b286d8b1 | 155 | |
0ba3c026 | 156 | if (nbytes) { |
b286d8b1 HX |
157 | crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? |
158 | CRYPTO_TFM_REQ_MAY_SLEEP : 0); | |
159 | return skcipher_walk_next(walk); | |
160 | } | |
161 | ||
0ba3c026 | 162 | finish: |
b286d8b1 HX |
163 | /* Short-circuit for the common/fast path. */ |
164 | if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) | |
165 | goto out; | |
166 | ||
167 | if (walk->flags & SKCIPHER_WALK_PHYS) | |
168 | goto out; | |
169 | ||
170 | if (walk->iv != walk->oiv) | |
171 | memcpy(walk->oiv, walk->iv, walk->ivsize); | |
172 | if (walk->buffer != walk->page) | |
173 | kfree(walk->buffer); | |
174 | if (walk->page) | |
175 | free_page((unsigned long)walk->page); | |
176 | ||
177 | out: | |
178 | return err; | |
179 | } | |
180 | EXPORT_SYMBOL_GPL(skcipher_walk_done); | |
181 | ||
182 | void skcipher_walk_complete(struct skcipher_walk *walk, int err) | |
183 | { | |
184 | struct skcipher_walk_buffer *p, *tmp; | |
185 | ||
186 | list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { | |
187 | u8 *data; | |
188 | ||
189 | if (err) | |
190 | goto done; | |
191 | ||
192 | data = p->data; | |
193 | if (!data) { | |
194 | data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1); | |
c821f6ab | 195 | data = skcipher_get_spot(data, walk->stride); |
b286d8b1 HX |
196 | } |
197 | ||
198 | scatterwalk_copychunks(data, &p->dst, p->len, 1); | |
199 | ||
c821f6ab | 200 | if (offset_in_page(p->data) + p->len + walk->stride > |
b286d8b1 HX |
201 | PAGE_SIZE) |
202 | free_page((unsigned long)p->data); | |
203 | ||
204 | done: | |
205 | list_del(&p->entry); | |
206 | kfree(p); | |
207 | } | |
208 | ||
209 | if (!err && walk->iv != walk->oiv) | |
210 | memcpy(walk->oiv, walk->iv, walk->ivsize); | |
211 | if (walk->buffer != walk->page) | |
212 | kfree(walk->buffer); | |
213 | if (walk->page) | |
214 | free_page((unsigned long)walk->page); | |
215 | } | |
216 | EXPORT_SYMBOL_GPL(skcipher_walk_complete); | |
217 | ||
218 | static void skcipher_queue_write(struct skcipher_walk *walk, | |
219 | struct skcipher_walk_buffer *p) | |
220 | { | |
221 | p->dst = walk->out; | |
222 | list_add_tail(&p->entry, &walk->buffers); | |
223 | } | |
224 | ||
225 | static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) | |
226 | { | |
227 | bool phys = walk->flags & SKCIPHER_WALK_PHYS; | |
228 | unsigned alignmask = walk->alignmask; | |
229 | struct skcipher_walk_buffer *p; | |
230 | unsigned a; | |
231 | unsigned n; | |
232 | u8 *buffer; | |
233 | void *v; | |
234 | ||
235 | if (!phys) { | |
18e615ad AB |
236 | if (!walk->buffer) |
237 | walk->buffer = walk->page; | |
238 | buffer = walk->buffer; | |
b286d8b1 HX |
239 | if (buffer) |
240 | goto ok; | |
241 | } | |
242 | ||
243 | /* Start with the minimum alignment of kmalloc. */ | |
244 | a = crypto_tfm_ctx_alignment() - 1; | |
245 | n = bsize; | |
246 | ||
247 | if (phys) { | |
248 | /* Calculate the minimum alignment of p->buffer. */ | |
249 | a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1; | |
250 | n += sizeof(*p); | |
251 | } | |
252 | ||
253 | /* Minimum size to align p->buffer by alignmask. */ | |
254 | n += alignmask & ~a; | |
255 | ||
256 | /* Minimum size to ensure p->buffer does not straddle a page. */ | |
257 | n += (bsize - 1) & ~(alignmask | a); | |
258 | ||
259 | v = kzalloc(n, skcipher_walk_gfp(walk)); | |
260 | if (!v) | |
261 | return skcipher_walk_done(walk, -ENOMEM); | |
262 | ||
263 | if (phys) { | |
264 | p = v; | |
265 | p->len = bsize; | |
266 | skcipher_queue_write(walk, p); | |
267 | buffer = p->buffer; | |
268 | } else { | |
269 | walk->buffer = v; | |
270 | buffer = v; | |
271 | } | |
272 | ||
273 | ok: | |
274 | walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1); | |
275 | walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize); | |
276 | walk->src.virt.addr = walk->dst.virt.addr; | |
277 | ||
278 | scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); | |
279 | ||
280 | walk->nbytes = bsize; | |
281 | walk->flags |= SKCIPHER_WALK_SLOW; | |
282 | ||
283 | return 0; | |
284 | } | |
285 | ||
286 | static int skcipher_next_copy(struct skcipher_walk *walk) | |
287 | { | |
288 | struct skcipher_walk_buffer *p; | |
289 | u8 *tmp = walk->page; | |
290 | ||
291 | skcipher_map_src(walk); | |
292 | memcpy(tmp, walk->src.virt.addr, walk->nbytes); | |
293 | skcipher_unmap_src(walk); | |
294 | ||
295 | walk->src.virt.addr = tmp; | |
296 | walk->dst.virt.addr = tmp; | |
297 | ||
298 | if (!(walk->flags & SKCIPHER_WALK_PHYS)) | |
299 | return 0; | |
300 | ||
301 | p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk)); | |
302 | if (!p) | |
303 | return -ENOMEM; | |
304 | ||
305 | p->data = walk->page; | |
306 | p->len = walk->nbytes; | |
307 | skcipher_queue_write(walk, p); | |
308 | ||
c821f6ab | 309 | if (offset_in_page(walk->page) + walk->nbytes + walk->stride > |
b286d8b1 HX |
310 | PAGE_SIZE) |
311 | walk->page = NULL; | |
312 | else | |
313 | walk->page += walk->nbytes; | |
314 | ||
315 | return 0; | |
316 | } | |
317 | ||
318 | static int skcipher_next_fast(struct skcipher_walk *walk) | |
319 | { | |
320 | unsigned long diff; | |
321 | ||
322 | walk->src.phys.page = scatterwalk_page(&walk->in); | |
323 | walk->src.phys.offset = offset_in_page(walk->in.offset); | |
324 | walk->dst.phys.page = scatterwalk_page(&walk->out); | |
325 | walk->dst.phys.offset = offset_in_page(walk->out.offset); | |
326 | ||
327 | if (walk->flags & SKCIPHER_WALK_PHYS) | |
328 | return 0; | |
329 | ||
330 | diff = walk->src.phys.offset - walk->dst.phys.offset; | |
331 | diff |= walk->src.virt.page - walk->dst.virt.page; | |
332 | ||
333 | skcipher_map_src(walk); | |
334 | walk->dst.virt.addr = walk->src.virt.addr; | |
335 | ||
336 | if (diff) { | |
337 | walk->flags |= SKCIPHER_WALK_DIFF; | |
338 | skcipher_map_dst(walk); | |
339 | } | |
340 | ||
341 | return 0; | |
342 | } | |
343 | ||
344 | static int skcipher_walk_next(struct skcipher_walk *walk) | |
345 | { | |
346 | unsigned int bsize; | |
347 | unsigned int n; | |
348 | int err; | |
349 | ||
350 | walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | | |
351 | SKCIPHER_WALK_DIFF); | |
352 | ||
353 | n = walk->total; | |
c821f6ab | 354 | bsize = min(walk->stride, max(n, walk->blocksize)); |
b286d8b1 HX |
355 | n = scatterwalk_clamp(&walk->in, n); |
356 | n = scatterwalk_clamp(&walk->out, n); | |
357 | ||
358 | if (unlikely(n < bsize)) { | |
359 | if (unlikely(walk->total < walk->blocksize)) | |
360 | return skcipher_walk_done(walk, -EINVAL); | |
361 | ||
362 | slow_path: | |
363 | err = skcipher_next_slow(walk, bsize); | |
364 | goto set_phys_lowmem; | |
365 | } | |
366 | ||
367 | if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { | |
368 | if (!walk->page) { | |
369 | gfp_t gfp = skcipher_walk_gfp(walk); | |
370 | ||
371 | walk->page = (void *)__get_free_page(gfp); | |
372 | if (!walk->page) | |
373 | goto slow_path; | |
374 | } | |
375 | ||
376 | walk->nbytes = min_t(unsigned, n, | |
377 | PAGE_SIZE - offset_in_page(walk->page)); | |
378 | walk->flags |= SKCIPHER_WALK_COPY; | |
379 | err = skcipher_next_copy(walk); | |
380 | goto set_phys_lowmem; | |
381 | } | |
382 | ||
383 | walk->nbytes = n; | |
384 | ||
385 | return skcipher_next_fast(walk); | |
386 | ||
387 | set_phys_lowmem: | |
388 | if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) { | |
389 | walk->src.phys.page = virt_to_page(walk->src.virt.addr); | |
390 | walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); | |
391 | walk->src.phys.offset &= PAGE_SIZE - 1; | |
392 | walk->dst.phys.offset &= PAGE_SIZE - 1; | |
393 | } | |
394 | return err; | |
395 | } | |
b286d8b1 HX |
396 | |
397 | static int skcipher_copy_iv(struct skcipher_walk *walk) | |
398 | { | |
399 | unsigned a = crypto_tfm_ctx_alignment() - 1; | |
400 | unsigned alignmask = walk->alignmask; | |
401 | unsigned ivsize = walk->ivsize; | |
c821f6ab | 402 | unsigned bs = walk->stride; |
b286d8b1 HX |
403 | unsigned aligned_bs; |
404 | unsigned size; | |
405 | u8 *iv; | |
406 | ||
0567fc9e | 407 | aligned_bs = ALIGN(bs, alignmask + 1); |
b286d8b1 HX |
408 | |
409 | /* Minimum size to align buffer by alignmask. */ | |
410 | size = alignmask & ~a; | |
411 | ||
412 | if (walk->flags & SKCIPHER_WALK_PHYS) | |
413 | size += ivsize; | |
414 | else { | |
415 | size += aligned_bs + ivsize; | |
416 | ||
417 | /* Minimum size to ensure buffer does not straddle a page. */ | |
418 | size += (bs - 1) & ~(alignmask | a); | |
419 | } | |
420 | ||
421 | walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); | |
422 | if (!walk->buffer) | |
423 | return -ENOMEM; | |
424 | ||
425 | iv = PTR_ALIGN(walk->buffer, alignmask + 1); | |
426 | iv = skcipher_get_spot(iv, bs) + aligned_bs; | |
427 | ||
428 | walk->iv = memcpy(iv, walk->iv, walk->ivsize); | |
429 | return 0; | |
430 | } | |
431 | ||
432 | static int skcipher_walk_first(struct skcipher_walk *walk) | |
433 | { | |
abfc7fad | 434 | if (WARN_ON_ONCE(in_hardirq())) |
b286d8b1 HX |
435 | return -EDEADLK; |
436 | ||
b286d8b1 HX |
437 | walk->buffer = NULL; |
438 | if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { | |
439 | int err = skcipher_copy_iv(walk); | |
440 | if (err) | |
441 | return err; | |
442 | } | |
443 | ||
444 | walk->page = NULL; | |
b286d8b1 HX |
445 | |
446 | return skcipher_walk_next(walk); | |
447 | } | |
448 | ||
449 | static int skcipher_walk_skcipher(struct skcipher_walk *walk, | |
450 | struct skcipher_request *req) | |
451 | { | |
452 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
453 | ||
0cabf2af HX |
454 | walk->total = req->cryptlen; |
455 | walk->nbytes = 0; | |
2b4f27c3 EB |
456 | walk->iv = req->iv; |
457 | walk->oiv = req->iv; | |
0cabf2af HX |
458 | |
459 | if (unlikely(!walk->total)) | |
460 | return 0; | |
461 | ||
b286d8b1 HX |
462 | scatterwalk_start(&walk->in, req->src); |
463 | scatterwalk_start(&walk->out, req->dst); | |
464 | ||
b286d8b1 HX |
465 | walk->flags &= ~SKCIPHER_WALK_SLEEP; |
466 | walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? | |
467 | SKCIPHER_WALK_SLEEP : 0; | |
468 | ||
469 | walk->blocksize = crypto_skcipher_blocksize(tfm); | |
c821f6ab | 470 | walk->stride = crypto_skcipher_walksize(tfm); |
b286d8b1 HX |
471 | walk->ivsize = crypto_skcipher_ivsize(tfm); |
472 | walk->alignmask = crypto_skcipher_alignmask(tfm); | |
473 | ||
474 | return skcipher_walk_first(walk); | |
475 | } | |
476 | ||
477 | int skcipher_walk_virt(struct skcipher_walk *walk, | |
478 | struct skcipher_request *req, bool atomic) | |
479 | { | |
480 | int err; | |
481 | ||
bb648291 EB |
482 | might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); |
483 | ||
b286d8b1 HX |
484 | walk->flags &= ~SKCIPHER_WALK_PHYS; |
485 | ||
486 | err = skcipher_walk_skcipher(walk, req); | |
487 | ||
488 | walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0; | |
489 | ||
490 | return err; | |
491 | } | |
492 | EXPORT_SYMBOL_GPL(skcipher_walk_virt); | |
493 | ||
b286d8b1 HX |
494 | int skcipher_walk_async(struct skcipher_walk *walk, |
495 | struct skcipher_request *req) | |
496 | { | |
497 | walk->flags |= SKCIPHER_WALK_PHYS; | |
498 | ||
499 | INIT_LIST_HEAD(&walk->buffers); | |
500 | ||
501 | return skcipher_walk_skcipher(walk, req); | |
502 | } | |
503 | EXPORT_SYMBOL_GPL(skcipher_walk_async); | |
504 | ||
34bc085c HX |
505 | static int skcipher_walk_aead_common(struct skcipher_walk *walk, |
506 | struct aead_request *req, bool atomic) | |
b286d8b1 HX |
507 | { |
508 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
509 | int err; | |
510 | ||
0cabf2af | 511 | walk->nbytes = 0; |
2b4f27c3 EB |
512 | walk->iv = req->iv; |
513 | walk->oiv = req->iv; | |
0cabf2af HX |
514 | |
515 | if (unlikely(!walk->total)) | |
516 | return 0; | |
517 | ||
3cbf61fb AB |
518 | walk->flags &= ~SKCIPHER_WALK_PHYS; |
519 | ||
b286d8b1 HX |
520 | scatterwalk_start(&walk->in, req->src); |
521 | scatterwalk_start(&walk->out, req->dst); | |
522 | ||
523 | scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2); | |
524 | scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2); | |
525 | ||
c14ca838 OM |
526 | scatterwalk_done(&walk->in, 0, walk->total); |
527 | scatterwalk_done(&walk->out, 0, walk->total); | |
528 | ||
b286d8b1 HX |
529 | if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) |
530 | walk->flags |= SKCIPHER_WALK_SLEEP; | |
531 | else | |
532 | walk->flags &= ~SKCIPHER_WALK_SLEEP; | |
533 | ||
534 | walk->blocksize = crypto_aead_blocksize(tfm); | |
c821f6ab | 535 | walk->stride = crypto_aead_chunksize(tfm); |
b286d8b1 HX |
536 | walk->ivsize = crypto_aead_ivsize(tfm); |
537 | walk->alignmask = crypto_aead_alignmask(tfm); | |
538 | ||
539 | err = skcipher_walk_first(walk); | |
540 | ||
541 | if (atomic) | |
542 | walk->flags &= ~SKCIPHER_WALK_SLEEP; | |
543 | ||
544 | return err; | |
545 | } | |
34bc085c | 546 | |
34bc085c HX |
547 | int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, |
548 | struct aead_request *req, bool atomic) | |
549 | { | |
550 | walk->total = req->cryptlen; | |
551 | ||
552 | return skcipher_walk_aead_common(walk, req, atomic); | |
553 | } | |
554 | EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt); | |
555 | ||
556 | int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, | |
557 | struct aead_request *req, bool atomic) | |
558 | { | |
559 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
560 | ||
561 | walk->total = req->cryptlen - crypto_aead_authsize(tfm); | |
562 | ||
563 | return skcipher_walk_aead_common(walk, req, atomic); | |
564 | } | |
565 | EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt); | |
566 | ||
b1f6b4bf EB |
567 | static void skcipher_set_needkey(struct crypto_skcipher *tfm) |
568 | { | |
9ac0d136 | 569 | if (crypto_skcipher_max_keysize(tfm) != 0) |
b1f6b4bf EB |
570 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY); |
571 | } | |
572 | ||
9933e113 HX |
573 | static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm, |
574 | const u8 *key, unsigned int keylen) | |
575 | { | |
576 | unsigned long alignmask = crypto_skcipher_alignmask(tfm); | |
577 | struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); | |
578 | u8 *buffer, *alignbuffer; | |
579 | unsigned long absize; | |
580 | int ret; | |
581 | ||
582 | absize = keylen + alignmask; | |
583 | buffer = kmalloc(absize, GFP_ATOMIC); | |
584 | if (!buffer) | |
585 | return -ENOMEM; | |
586 | ||
587 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | |
588 | memcpy(alignbuffer, key, keylen); | |
589 | ret = cipher->setkey(tfm, alignbuffer, keylen); | |
453431a5 | 590 | kfree_sensitive(buffer); |
9933e113 HX |
591 | return ret; |
592 | } | |
593 | ||
15252d94 | 594 | int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, |
9933e113 HX |
595 | unsigned int keylen) |
596 | { | |
597 | struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); | |
598 | unsigned long alignmask = crypto_skcipher_alignmask(tfm); | |
f8d33fac | 599 | int err; |
9933e113 | 600 | |
674f368a | 601 | if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) |
9933e113 | 602 | return -EINVAL; |
9933e113 HX |
603 | |
604 | if ((unsigned long)key & alignmask) | |
f8d33fac EB |
605 | err = skcipher_setkey_unaligned(tfm, key, keylen); |
606 | else | |
607 | err = cipher->setkey(tfm, key, keylen); | |
608 | ||
b1f6b4bf EB |
609 | if (unlikely(err)) { |
610 | skcipher_set_needkey(tfm); | |
f8d33fac | 611 | return err; |
b1f6b4bf | 612 | } |
9933e113 | 613 | |
f8d33fac EB |
614 | crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); |
615 | return 0; | |
9933e113 | 616 | } |
15252d94 | 617 | EXPORT_SYMBOL_GPL(crypto_skcipher_setkey); |
9933e113 | 618 | |
81bcbb1e EB |
619 | int crypto_skcipher_encrypt(struct skcipher_request *req) |
620 | { | |
621 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
622 | struct crypto_alg *alg = tfm->base.__crt_alg; | |
623 | unsigned int cryptlen = req->cryptlen; | |
624 | int ret; | |
625 | ||
626 | crypto_stats_get(alg); | |
627 | if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) | |
628 | ret = -ENOKEY; | |
629 | else | |
848755e3 | 630 | ret = crypto_skcipher_alg(tfm)->encrypt(req); |
81bcbb1e EB |
631 | crypto_stats_skcipher_encrypt(cryptlen, ret, alg); |
632 | return ret; | |
633 | } | |
634 | EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt); | |
635 | ||
636 | int crypto_skcipher_decrypt(struct skcipher_request *req) | |
637 | { | |
638 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
639 | struct crypto_alg *alg = tfm->base.__crt_alg; | |
640 | unsigned int cryptlen = req->cryptlen; | |
641 | int ret; | |
642 | ||
643 | crypto_stats_get(alg); | |
644 | if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) | |
645 | ret = -ENOKEY; | |
646 | else | |
7e1c1099 | 647 | ret = crypto_skcipher_alg(tfm)->decrypt(req); |
81bcbb1e EB |
648 | crypto_stats_skcipher_decrypt(cryptlen, ret, alg); |
649 | return ret; | |
650 | } | |
651 | EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt); | |
652 | ||
4e6c3df4 HX |
653 | static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) |
654 | { | |
655 | struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); | |
656 | struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); | |
657 | ||
658 | alg->exit(skcipher); | |
659 | } | |
660 | ||
7a7ffe65 HX |
661 | static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) |
662 | { | |
4e6c3df4 HX |
663 | struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); |
664 | struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); | |
665 | ||
b1f6b4bf | 666 | skcipher_set_needkey(skcipher); |
f8d33fac | 667 | |
4e6c3df4 HX |
668 | if (alg->exit) |
669 | skcipher->base.exit = crypto_skcipher_exit_tfm; | |
7a7ffe65 | 670 | |
4e6c3df4 HX |
671 | if (alg->init) |
672 | return alg->init(skcipher); | |
673 | ||
674 | return 0; | |
675 | } | |
676 | ||
677 | static void crypto_skcipher_free_instance(struct crypto_instance *inst) | |
678 | { | |
679 | struct skcipher_instance *skcipher = | |
680 | container_of(inst, struct skcipher_instance, s.base); | |
681 | ||
682 | skcipher->free(skcipher); | |
683 | } | |
684 | ||
685 | static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) | |
d8c34b94 | 686 | __maybe_unused; |
4e6c3df4 HX |
687 | static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) |
688 | { | |
689 | struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, | |
690 | base); | |
691 | ||
692 | seq_printf(m, "type : skcipher\n"); | |
693 | seq_printf(m, "async : %s\n", | |
694 | alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no"); | |
695 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | |
696 | seq_printf(m, "min keysize : %u\n", skcipher->min_keysize); | |
697 | seq_printf(m, "max keysize : %u\n", skcipher->max_keysize); | |
698 | seq_printf(m, "ivsize : %u\n", skcipher->ivsize); | |
699 | seq_printf(m, "chunksize : %u\n", skcipher->chunksize); | |
c821f6ab | 700 | seq_printf(m, "walksize : %u\n", skcipher->walksize); |
7a7ffe65 HX |
701 | } |
702 | ||
4e6c3df4 HX |
703 | #ifdef CONFIG_NET |
704 | static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) | |
705 | { | |
706 | struct crypto_report_blkcipher rblkcipher; | |
707 | struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, | |
708 | base); | |
709 | ||
37db69e0 EB |
710 | memset(&rblkcipher, 0, sizeof(rblkcipher)); |
711 | ||
712 | strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type)); | |
713 | strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv)); | |
4e6c3df4 HX |
714 | |
715 | rblkcipher.blocksize = alg->cra_blocksize; | |
716 | rblkcipher.min_keysize = skcipher->min_keysize; | |
717 | rblkcipher.max_keysize = skcipher->max_keysize; | |
718 | rblkcipher.ivsize = skcipher->ivsize; | |
719 | ||
37db69e0 EB |
720 | return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, |
721 | sizeof(rblkcipher), &rblkcipher); | |
4e6c3df4 HX |
722 | } |
723 | #else | |
724 | static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) | |
725 | { | |
726 | return -ENOSYS; | |
727 | } | |
728 | #endif | |
729 | ||
53253064 | 730 | static const struct crypto_type crypto_skcipher_type = { |
89873b44 | 731 | .extsize = crypto_alg_extsize, |
7a7ffe65 | 732 | .init_tfm = crypto_skcipher_init_tfm, |
4e6c3df4 HX |
733 | .free = crypto_skcipher_free_instance, |
734 | #ifdef CONFIG_PROC_FS | |
735 | .show = crypto_skcipher_show, | |
736 | #endif | |
737 | .report = crypto_skcipher_report, | |
7a7ffe65 | 738 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, |
c65058b7 | 739 | .maskset = CRYPTO_ALG_TYPE_MASK, |
4e6c3df4 | 740 | .type = CRYPTO_ALG_TYPE_SKCIPHER, |
7a7ffe65 HX |
741 | .tfmsize = offsetof(struct crypto_skcipher, base), |
742 | }; | |
743 | ||
3a01d0ee | 744 | int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, |
b9f76ddd EB |
745 | struct crypto_instance *inst, |
746 | const char *name, u32 type, u32 mask) | |
4e6c3df4 | 747 | { |
53253064 | 748 | spawn->base.frontend = &crypto_skcipher_type; |
de95c957 | 749 | return crypto_grab_spawn(&spawn->base, inst, name, type, mask); |
4e6c3df4 | 750 | } |
3a01d0ee | 751 | EXPORT_SYMBOL_GPL(crypto_grab_skcipher); |
4e6c3df4 | 752 | |
7a7ffe65 HX |
753 | struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, |
754 | u32 type, u32 mask) | |
755 | { | |
53253064 | 756 | return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask); |
7a7ffe65 HX |
757 | } |
758 | EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); | |
759 | ||
b350bee5 KC |
760 | struct crypto_sync_skcipher *crypto_alloc_sync_skcipher( |
761 | const char *alg_name, u32 type, u32 mask) | |
762 | { | |
763 | struct crypto_skcipher *tfm; | |
764 | ||
765 | /* Only sync algorithms allowed. */ | |
e6cb02bd | 766 | mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE; |
b350bee5 | 767 | |
53253064 | 768 | tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask); |
b350bee5 KC |
769 | |
770 | /* | |
771 | * Make sure we do not allocate something that might get used with | |
772 | * an on-stack request: check the request size. | |
773 | */ | |
774 | if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) > | |
775 | MAX_SYNC_SKCIPHER_REQSIZE)) { | |
776 | crypto_free_skcipher(tfm); | |
777 | return ERR_PTR(-EINVAL); | |
778 | } | |
779 | ||
780 | return (struct crypto_sync_skcipher *)tfm; | |
781 | } | |
782 | EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher); | |
783 | ||
d3ca75a8 | 784 | int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask) |
4e6c3df4 | 785 | { |
53253064 | 786 | return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask); |
4e6c3df4 | 787 | } |
d3ca75a8 | 788 | EXPORT_SYMBOL_GPL(crypto_has_skcipher); |
4e6c3df4 HX |
789 | |
790 | static int skcipher_prepare_alg(struct skcipher_alg *alg) | |
791 | { | |
792 | struct crypto_alg *base = &alg->base; | |
793 | ||
c821f6ab AB |
794 | if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 || |
795 | alg->walksize > PAGE_SIZE / 8) | |
4e6c3df4 HX |
796 | return -EINVAL; |
797 | ||
798 | if (!alg->chunksize) | |
799 | alg->chunksize = base->cra_blocksize; | |
c821f6ab AB |
800 | if (!alg->walksize) |
801 | alg->walksize = alg->chunksize; | |
4e6c3df4 | 802 | |
53253064 | 803 | base->cra_type = &crypto_skcipher_type; |
4e6c3df4 HX |
804 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; |
805 | base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; | |
806 | ||
807 | return 0; | |
808 | } | |
809 | ||
810 | int crypto_register_skcipher(struct skcipher_alg *alg) | |
811 | { | |
812 | struct crypto_alg *base = &alg->base; | |
813 | int err; | |
814 | ||
815 | err = skcipher_prepare_alg(alg); | |
816 | if (err) | |
817 | return err; | |
818 | ||
819 | return crypto_register_alg(base); | |
820 | } | |
821 | EXPORT_SYMBOL_GPL(crypto_register_skcipher); | |
822 | ||
823 | void crypto_unregister_skcipher(struct skcipher_alg *alg) | |
824 | { | |
825 | crypto_unregister_alg(&alg->base); | |
826 | } | |
827 | EXPORT_SYMBOL_GPL(crypto_unregister_skcipher); | |
828 | ||
829 | int crypto_register_skciphers(struct skcipher_alg *algs, int count) | |
830 | { | |
831 | int i, ret; | |
832 | ||
833 | for (i = 0; i < count; i++) { | |
834 | ret = crypto_register_skcipher(&algs[i]); | |
835 | if (ret) | |
836 | goto err; | |
837 | } | |
838 | ||
839 | return 0; | |
840 | ||
841 | err: | |
842 | for (--i; i >= 0; --i) | |
843 | crypto_unregister_skcipher(&algs[i]); | |
844 | ||
845 | return ret; | |
846 | } | |
847 | EXPORT_SYMBOL_GPL(crypto_register_skciphers); | |
848 | ||
849 | void crypto_unregister_skciphers(struct skcipher_alg *algs, int count) | |
850 | { | |
851 | int i; | |
852 | ||
853 | for (i = count - 1; i >= 0; --i) | |
854 | crypto_unregister_skcipher(&algs[i]); | |
855 | } | |
856 | EXPORT_SYMBOL_GPL(crypto_unregister_skciphers); | |
857 | ||
858 | int skcipher_register_instance(struct crypto_template *tmpl, | |
859 | struct skcipher_instance *inst) | |
860 | { | |
861 | int err; | |
862 | ||
d4fdc2df EB |
863 | if (WARN_ON(!inst->free)) |
864 | return -EINVAL; | |
865 | ||
4e6c3df4 HX |
866 | err = skcipher_prepare_alg(&inst->alg); |
867 | if (err) | |
868 | return err; | |
869 | ||
870 | return crypto_register_instance(tmpl, skcipher_crypto_instance(inst)); | |
871 | } | |
872 | EXPORT_SYMBOL_GPL(skcipher_register_instance); | |
873 | ||
0872da16 EB |
874 | static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key, |
875 | unsigned int keylen) | |
876 | { | |
877 | struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); | |
0872da16 EB |
878 | |
879 | crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK); | |
880 | crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) & | |
881 | CRYPTO_TFM_REQ_MASK); | |
af5034e8 | 882 | return crypto_cipher_setkey(cipher, key, keylen); |
0872da16 EB |
883 | } |
884 | ||
885 | static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm) | |
886 | { | |
887 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); | |
d5ed3b65 | 888 | struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst); |
0872da16 EB |
889 | struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); |
890 | struct crypto_cipher *cipher; | |
891 | ||
892 | cipher = crypto_spawn_cipher(spawn); | |
893 | if (IS_ERR(cipher)) | |
894 | return PTR_ERR(cipher); | |
895 | ||
896 | ctx->cipher = cipher; | |
897 | return 0; | |
898 | } | |
899 | ||
900 | static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm) | |
901 | { | |
902 | struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); | |
903 | ||
904 | crypto_free_cipher(ctx->cipher); | |
905 | } | |
906 | ||
907 | static void skcipher_free_instance_simple(struct skcipher_instance *inst) | |
908 | { | |
aacd5b4c | 909 | crypto_drop_cipher(skcipher_instance_ctx(inst)); |
0872da16 EB |
910 | kfree(inst); |
911 | } | |
912 | ||
913 | /** | |
914 | * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode | |
915 | * | |
916 | * Allocate an skcipher_instance for a simple block cipher mode of operation, | |
917 | * e.g. cbc or ecb. The instance context will have just a single crypto_spawn, | |
918 | * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize, | |
919 | * alignmask, and priority are set from the underlying cipher but can be | |
920 | * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and | |
921 | * default ->setkey(), ->init(), and ->exit() methods are installed. | |
922 | * | |
923 | * @tmpl: the template being instantiated | |
924 | * @tb: the template parameters | |
0872da16 EB |
925 | * |
926 | * Return: a pointer to the new instance, or an ERR_PTR(). The caller still | |
927 | * needs to register the instance. | |
928 | */ | |
b3c16bfc HX |
929 | struct skcipher_instance *skcipher_alloc_instance_simple( |
930 | struct crypto_template *tmpl, struct rtattr **tb) | |
0872da16 | 931 | { |
0872da16 | 932 | u32 mask; |
aacd5b4c EB |
933 | struct skcipher_instance *inst; |
934 | struct crypto_cipher_spawn *spawn; | |
935 | struct crypto_alg *cipher_alg; | |
0872da16 EB |
936 | int err; |
937 | ||
7bcb2c99 EB |
938 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); |
939 | if (err) | |
940 | return ERR_PTR(err); | |
0872da16 EB |
941 | |
942 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | |
aacd5b4c EB |
943 | if (!inst) |
944 | return ERR_PTR(-ENOMEM); | |
0872da16 EB |
945 | spawn = skcipher_instance_ctx(inst); |
946 | ||
aacd5b4c EB |
947 | err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst), |
948 | crypto_attr_alg_name(tb[1]), 0, mask); | |
0872da16 EB |
949 | if (err) |
950 | goto err_free_inst; | |
aacd5b4c | 951 | cipher_alg = crypto_spawn_cipher_alg(spawn); |
0872da16 | 952 | |
aacd5b4c EB |
953 | err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name, |
954 | cipher_alg); | |
0872da16 EB |
955 | if (err) |
956 | goto err_free_inst; | |
aacd5b4c | 957 | |
0872da16 EB |
958 | inst->free = skcipher_free_instance_simple; |
959 | ||
960 | /* Default algorithm properties, can be overridden */ | |
961 | inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize; | |
962 | inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask; | |
963 | inst->alg.base.cra_priority = cipher_alg->cra_priority; | |
964 | inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize; | |
965 | inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize; | |
966 | inst->alg.ivsize = cipher_alg->cra_blocksize; | |
967 | ||
968 | /* Use skcipher_ctx_simple by default, can be overridden */ | |
969 | inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple); | |
970 | inst->alg.setkey = skcipher_setkey_simple; | |
971 | inst->alg.init = skcipher_init_tfm_simple; | |
972 | inst->alg.exit = skcipher_exit_tfm_simple; | |
973 | ||
0872da16 EB |
974 | return inst; |
975 | ||
976 | err_free_inst: | |
aacd5b4c | 977 | skcipher_free_instance_simple(inst); |
0872da16 EB |
978 | return ERR_PTR(err); |
979 | } | |
980 | EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple); | |
981 | ||
7a7ffe65 HX |
982 | MODULE_LICENSE("GPL"); |
983 | MODULE_DESCRIPTION("Symmetric key cipher type"); | |
0eb76ba2 | 984 | MODULE_IMPORT_NS(CRYPTO_INTERNAL); |