Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Cryptographic API. | |
3 | * | |
4 | * Cipher operations. | |
5 | * | |
6 | * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> | |
c774e93e | 7 | * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au> |
1da177e4 LT |
8 | * |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms of the GNU General Public License as published by the Free | |
11 | * Software Foundation; either version 2 of the License, or (at your option) | |
12 | * any later version. | |
13 | * | |
14 | */ | |
15 | #include <linux/compiler.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/crypto.h> | |
18 | #include <linux/errno.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/slab.h> | |
21 | #include <linux/string.h> | |
22 | #include <asm/scatterlist.h> | |
23 | #include "internal.h" | |
24 | #include "scatterwalk.h" | |
25 | ||
7226bc87 HX |
26 | struct cipher_alg_compat { |
27 | unsigned int cia_min_keysize; | |
28 | unsigned int cia_max_keysize; | |
29 | int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, | |
30 | unsigned int keylen); | |
31 | void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); | |
32 | void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); | |
33 | ||
34 | unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc, | |
35 | u8 *dst, const u8 *src, | |
36 | unsigned int nbytes); | |
37 | unsigned int (*cia_decrypt_ecb)(const struct cipher_desc *desc, | |
38 | u8 *dst, const u8 *src, | |
39 | unsigned int nbytes); | |
40 | unsigned int (*cia_encrypt_cbc)(const struct cipher_desc *desc, | |
41 | u8 *dst, const u8 *src, | |
42 | unsigned int nbytes); | |
43 | unsigned int (*cia_decrypt_cbc)(const struct cipher_desc *desc, | |
44 | u8 *dst, const u8 *src, | |
45 | unsigned int nbytes); | |
46 | }; | |
47 | ||
1da177e4 LT |
48 | static inline void xor_64(u8 *a, const u8 *b) |
49 | { | |
50 | ((u32 *)a)[0] ^= ((u32 *)b)[0]; | |
51 | ((u32 *)a)[1] ^= ((u32 *)b)[1]; | |
52 | } | |
53 | ||
54 | static inline void xor_128(u8 *a, const u8 *b) | |
55 | { | |
56 | ((u32 *)a)[0] ^= ((u32 *)b)[0]; | |
57 | ((u32 *)a)[1] ^= ((u32 *)b)[1]; | |
58 | ((u32 *)a)[2] ^= ((u32 *)b)[2]; | |
59 | ((u32 *)a)[3] ^= ((u32 *)b)[3]; | |
60 | } | |
c774e93e HX |
61 | |
62 | static unsigned int crypt_slow(const struct cipher_desc *desc, | |
63 | struct scatter_walk *in, | |
64 | struct scatter_walk *out, unsigned int bsize) | |
1da177e4 | 65 | { |
9d853c37 | 66 | unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm); |
95477377 HX |
67 | u8 buffer[bsize * 2 + alignmask]; |
68 | u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | |
69 | u8 *dst = src + bsize; | |
1da177e4 | 70 | |
5c64097a | 71 | scatterwalk_copychunks(src, in, bsize, 0); |
c774e93e | 72 | desc->prfn(desc, dst, src, bsize); |
5c64097a | 73 | scatterwalk_copychunks(dst, out, bsize, 1); |
1da177e4 | 74 | |
c774e93e | 75 | return bsize; |
1da177e4 LT |
76 | } |
77 | ||
c774e93e HX |
78 | static inline unsigned int crypt_fast(const struct cipher_desc *desc, |
79 | struct scatter_walk *in, | |
80 | struct scatter_walk *out, | |
95477377 | 81 | unsigned int nbytes, u8 *tmp) |
1da177e4 | 82 | { |
c774e93e | 83 | u8 *src, *dst; |
5c64097a HX |
84 | u8 *real_src, *real_dst; |
85 | ||
86 | real_src = scatterwalk_map(in, 0); | |
87 | real_dst = scatterwalk_map(out, 1); | |
c774e93e | 88 | |
5c64097a HX |
89 | src = real_src; |
90 | dst = scatterwalk_samebuf(in, out) ? src : real_dst; | |
c774e93e | 91 | |
95477377 | 92 | if (tmp) { |
5c64097a | 93 | memcpy(tmp, src, nbytes); |
95477377 HX |
94 | src = tmp; |
95 | dst = tmp; | |
96 | } | |
97 | ||
c774e93e HX |
98 | nbytes = desc->prfn(desc, dst, src, nbytes); |
99 | ||
95477377 | 100 | if (tmp) |
5c64097a HX |
101 | memcpy(real_dst, tmp, nbytes); |
102 | ||
103 | scatterwalk_unmap(real_src, 0); | |
104 | scatterwalk_unmap(real_dst, 1); | |
95477377 | 105 | |
c774e93e HX |
106 | scatterwalk_advance(in, nbytes); |
107 | scatterwalk_advance(out, nbytes); | |
1da177e4 | 108 | |
c774e93e | 109 | return nbytes; |
1da177e4 LT |
110 | } |
111 | ||
112 | /* | |
113 | * Generic encrypt/decrypt wrapper for ciphers, handles operations across | |
114 | * multiple page boundaries by using temporary blocks. In user context, | |
c774e93e | 115 | * the kernel is given a chance to schedule us once per page. |
1da177e4 | 116 | */ |
c774e93e | 117 | static int crypt(const struct cipher_desc *desc, |
1da177e4 LT |
118 | struct scatterlist *dst, |
119 | struct scatterlist *src, | |
c774e93e | 120 | unsigned int nbytes) |
1da177e4 LT |
121 | { |
122 | struct scatter_walk walk_in, walk_out; | |
c774e93e | 123 | struct crypto_tfm *tfm = desc->tfm; |
1da177e4 | 124 | const unsigned int bsize = crypto_tfm_alg_blocksize(tfm); |
fbdae9f3 | 125 | unsigned int alignmask = crypto_tfm_alg_alignmask(tfm); |
95477377 | 126 | unsigned long buffer = 0; |
1da177e4 LT |
127 | |
128 | if (!nbytes) | |
129 | return 0; | |
130 | ||
131 | if (nbytes % bsize) { | |
132 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; | |
133 | return -EINVAL; | |
134 | } | |
135 | ||
136 | scatterwalk_start(&walk_in, src); | |
137 | scatterwalk_start(&walk_out, dst); | |
138 | ||
139 | for(;;) { | |
95477377 HX |
140 | unsigned int n = nbytes; |
141 | u8 *tmp = NULL; | |
142 | ||
143 | if (!scatterwalk_aligned(&walk_in, alignmask) || | |
144 | !scatterwalk_aligned(&walk_out, alignmask)) { | |
145 | if (!buffer) { | |
146 | buffer = __get_free_page(GFP_ATOMIC); | |
147 | if (!buffer) | |
148 | n = 0; | |
149 | } | |
150 | tmp = (u8 *)buffer; | |
151 | } | |
1da177e4 | 152 | |
95477377 | 153 | n = scatterwalk_clamp(&walk_in, n); |
c774e93e | 154 | n = scatterwalk_clamp(&walk_out, n); |
1da177e4 | 155 | |
c774e93e | 156 | if (likely(n >= bsize)) |
95477377 | 157 | n = crypt_fast(desc, &walk_in, &walk_out, n, tmp); |
c774e93e HX |
158 | else |
159 | n = crypt_slow(desc, &walk_in, &walk_out, bsize); | |
1da177e4 | 160 | |
c774e93e | 161 | nbytes -= n; |
1da177e4 LT |
162 | |
163 | scatterwalk_done(&walk_in, 0, nbytes); | |
164 | scatterwalk_done(&walk_out, 1, nbytes); | |
165 | ||
166 | if (!nbytes) | |
95477377 | 167 | break; |
1da177e4 | 168 | |
8f21cf0d | 169 | crypto_yield(tfm->crt_flags); |
1da177e4 | 170 | } |
95477377 HX |
171 | |
172 | if (buffer) | |
173 | free_page(buffer); | |
174 | ||
175 | return 0; | |
1da177e4 LT |
176 | } |
177 | ||
915e8561 HX |
178 | static int crypt_iv_unaligned(struct cipher_desc *desc, |
179 | struct scatterlist *dst, | |
180 | struct scatterlist *src, | |
181 | unsigned int nbytes) | |
182 | { | |
183 | struct crypto_tfm *tfm = desc->tfm; | |
9d853c37 | 184 | unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); |
915e8561 HX |
185 | u8 *iv = desc->info; |
186 | ||
187 | if (unlikely(((unsigned long)iv & alignmask))) { | |
188 | unsigned int ivsize = tfm->crt_cipher.cit_ivsize; | |
189 | u8 buffer[ivsize + alignmask]; | |
190 | u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | |
191 | int err; | |
192 | ||
193 | desc->info = memcpy(tmp, iv, ivsize); | |
194 | err = crypt(desc, dst, src, nbytes); | |
195 | memcpy(iv, tmp, ivsize); | |
196 | ||
197 | return err; | |
198 | } | |
199 | ||
200 | return crypt(desc, dst, src, nbytes); | |
201 | } | |
202 | ||
c774e93e HX |
203 | static unsigned int cbc_process_encrypt(const struct cipher_desc *desc, |
204 | u8 *dst, const u8 *src, | |
205 | unsigned int nbytes) | |
1da177e4 | 206 | { |
c774e93e HX |
207 | struct crypto_tfm *tfm = desc->tfm; |
208 | void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block; | |
209 | int bsize = crypto_tfm_alg_blocksize(tfm); | |
210 | ||
6c2bb98b | 211 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn; |
c774e93e HX |
212 | u8 *iv = desc->info; |
213 | unsigned int done = 0; | |
214 | ||
fe2d5295 HX |
215 | nbytes -= bsize; |
216 | ||
c774e93e HX |
217 | do { |
218 | xor(iv, src); | |
6c2bb98b | 219 | fn(tfm, dst, iv); |
c774e93e | 220 | memcpy(iv, dst, bsize); |
1da177e4 | 221 | |
c774e93e HX |
222 | src += bsize; |
223 | dst += bsize; | |
fe2d5295 | 224 | } while ((done += bsize) <= nbytes); |
c774e93e HX |
225 | |
226 | return done; | |
1da177e4 LT |
227 | } |
228 | ||
c774e93e HX |
229 | static unsigned int cbc_process_decrypt(const struct cipher_desc *desc, |
230 | u8 *dst, const u8 *src, | |
231 | unsigned int nbytes) | |
1da177e4 | 232 | { |
c774e93e HX |
233 | struct crypto_tfm *tfm = desc->tfm; |
234 | void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block; | |
235 | int bsize = crypto_tfm_alg_blocksize(tfm); | |
827c3911 | 236 | unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm); |
c774e93e | 237 | |
827c3911 HX |
238 | u8 stack[src == dst ? bsize + alignmask : 0]; |
239 | u8 *buf = (u8 *)ALIGN((unsigned long)stack, alignmask + 1); | |
c774e93e HX |
240 | u8 **dst_p = src == dst ? &buf : &dst; |
241 | ||
6c2bb98b | 242 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn; |
c774e93e HX |
243 | u8 *iv = desc->info; |
244 | unsigned int done = 0; | |
245 | ||
fe2d5295 HX |
246 | nbytes -= bsize; |
247 | ||
c774e93e HX |
248 | do { |
249 | u8 *tmp_dst = *dst_p; | |
1da177e4 | 250 | |
6c2bb98b | 251 | fn(tfm, tmp_dst, src); |
c774e93e HX |
252 | xor(tmp_dst, iv); |
253 | memcpy(iv, src, bsize); | |
254 | if (tmp_dst != dst) | |
255 | memcpy(dst, tmp_dst, bsize); | |
256 | ||
257 | src += bsize; | |
258 | dst += bsize; | |
fe2d5295 | 259 | } while ((done += bsize) <= nbytes); |
c774e93e HX |
260 | |
261 | return done; | |
1da177e4 LT |
262 | } |
263 | ||
c774e93e HX |
264 | static unsigned int ecb_process(const struct cipher_desc *desc, u8 *dst, |
265 | const u8 *src, unsigned int nbytes) | |
1da177e4 | 266 | { |
c774e93e HX |
267 | struct crypto_tfm *tfm = desc->tfm; |
268 | int bsize = crypto_tfm_alg_blocksize(tfm); | |
6c2bb98b | 269 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn; |
c774e93e HX |
270 | unsigned int done = 0; |
271 | ||
fe2d5295 HX |
272 | nbytes -= bsize; |
273 | ||
c774e93e | 274 | do { |
6c2bb98b | 275 | fn(tfm, dst, src); |
c774e93e HX |
276 | |
277 | src += bsize; | |
278 | dst += bsize; | |
fe2d5295 | 279 | } while ((done += bsize) <= nbytes); |
c774e93e HX |
280 | |
281 | return done; | |
1da177e4 LT |
282 | } |
283 | ||
284 | static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) | |
285 | { | |
286 | struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher; | |
287 | ||
560c06ae | 288 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; |
1da177e4 LT |
289 | if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) { |
290 | tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; | |
291 | return -EINVAL; | |
292 | } else | |
560c06ae | 293 | return cia->cia_setkey(tfm, key, keylen); |
1da177e4 LT |
294 | } |
295 | ||
296 | static int ecb_encrypt(struct crypto_tfm *tfm, | |
297 | struct scatterlist *dst, | |
298 | struct scatterlist *src, unsigned int nbytes) | |
299 | { | |
c774e93e | 300 | struct cipher_desc desc; |
7226bc87 | 301 | struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher; |
c774e93e HX |
302 | |
303 | desc.tfm = tfm; | |
40725181 HX |
304 | desc.crfn = cipher->cia_encrypt; |
305 | desc.prfn = cipher->cia_encrypt_ecb ?: ecb_process; | |
c774e93e HX |
306 | |
307 | return crypt(&desc, dst, src, nbytes); | |
1da177e4 LT |
308 | } |
309 | ||
310 | static int ecb_decrypt(struct crypto_tfm *tfm, | |
311 | struct scatterlist *dst, | |
312 | struct scatterlist *src, | |
313 | unsigned int nbytes) | |
314 | { | |
c774e93e | 315 | struct cipher_desc desc; |
7226bc87 | 316 | struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher; |
c774e93e HX |
317 | |
318 | desc.tfm = tfm; | |
40725181 HX |
319 | desc.crfn = cipher->cia_decrypt; |
320 | desc.prfn = cipher->cia_decrypt_ecb ?: ecb_process; | |
c774e93e HX |
321 | |
322 | return crypt(&desc, dst, src, nbytes); | |
1da177e4 LT |
323 | } |
324 | ||
325 | static int cbc_encrypt(struct crypto_tfm *tfm, | |
326 | struct scatterlist *dst, | |
327 | struct scatterlist *src, | |
328 | unsigned int nbytes) | |
329 | { | |
c774e93e | 330 | struct cipher_desc desc; |
7226bc87 | 331 | struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher; |
c774e93e HX |
332 | |
333 | desc.tfm = tfm; | |
40725181 HX |
334 | desc.crfn = cipher->cia_encrypt; |
335 | desc.prfn = cipher->cia_encrypt_cbc ?: cbc_process_encrypt; | |
c774e93e HX |
336 | desc.info = tfm->crt_cipher.cit_iv; |
337 | ||
338 | return crypt(&desc, dst, src, nbytes); | |
1da177e4 LT |
339 | } |
340 | ||
341 | static int cbc_encrypt_iv(struct crypto_tfm *tfm, | |
342 | struct scatterlist *dst, | |
343 | struct scatterlist *src, | |
344 | unsigned int nbytes, u8 *iv) | |
345 | { | |
c774e93e | 346 | struct cipher_desc desc; |
7226bc87 | 347 | struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher; |
c774e93e HX |
348 | |
349 | desc.tfm = tfm; | |
40725181 HX |
350 | desc.crfn = cipher->cia_encrypt; |
351 | desc.prfn = cipher->cia_encrypt_cbc ?: cbc_process_encrypt; | |
c774e93e HX |
352 | desc.info = iv; |
353 | ||
915e8561 | 354 | return crypt_iv_unaligned(&desc, dst, src, nbytes); |
1da177e4 LT |
355 | } |
356 | ||
357 | static int cbc_decrypt(struct crypto_tfm *tfm, | |
358 | struct scatterlist *dst, | |
359 | struct scatterlist *src, | |
360 | unsigned int nbytes) | |
361 | { | |
c774e93e | 362 | struct cipher_desc desc; |
7226bc87 | 363 | struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher; |
c774e93e HX |
364 | |
365 | desc.tfm = tfm; | |
40725181 HX |
366 | desc.crfn = cipher->cia_decrypt; |
367 | desc.prfn = cipher->cia_decrypt_cbc ?: cbc_process_decrypt; | |
c774e93e HX |
368 | desc.info = tfm->crt_cipher.cit_iv; |
369 | ||
370 | return crypt(&desc, dst, src, nbytes); | |
1da177e4 LT |
371 | } |
372 | ||
373 | static int cbc_decrypt_iv(struct crypto_tfm *tfm, | |
374 | struct scatterlist *dst, | |
375 | struct scatterlist *src, | |
376 | unsigned int nbytes, u8 *iv) | |
377 | { | |
c774e93e | 378 | struct cipher_desc desc; |
7226bc87 | 379 | struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher; |
c774e93e HX |
380 | |
381 | desc.tfm = tfm; | |
40725181 HX |
382 | desc.crfn = cipher->cia_decrypt; |
383 | desc.prfn = cipher->cia_decrypt_cbc ?: cbc_process_decrypt; | |
c774e93e HX |
384 | desc.info = iv; |
385 | ||
915e8561 | 386 | return crypt_iv_unaligned(&desc, dst, src, nbytes); |
1da177e4 LT |
387 | } |
388 | ||
389 | static int nocrypt(struct crypto_tfm *tfm, | |
390 | struct scatterlist *dst, | |
391 | struct scatterlist *src, | |
392 | unsigned int nbytes) | |
393 | { | |
394 | return -ENOSYS; | |
395 | } | |
396 | ||
397 | static int nocrypt_iv(struct crypto_tfm *tfm, | |
398 | struct scatterlist *dst, | |
399 | struct scatterlist *src, | |
400 | unsigned int nbytes, u8 *iv) | |
401 | { | |
402 | return -ENOSYS; | |
403 | } | |
404 | ||
405 | int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags) | |
406 | { | |
407 | u32 mode = flags & CRYPTO_TFM_MODE_MASK; | |
1da177e4 | 408 | tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB; |
1da177e4 LT |
409 | return 0; |
410 | } | |
411 | ||
f28776a3 HX |
412 | static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *, |
413 | const u8 *), | |
414 | struct crypto_tfm *tfm, | |
415 | u8 *dst, const u8 *src) | |
416 | { | |
417 | unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); | |
418 | unsigned int size = crypto_tfm_alg_blocksize(tfm); | |
419 | u8 buffer[size + alignmask]; | |
420 | u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | |
421 | ||
422 | memcpy(tmp, src, size); | |
423 | fn(tfm, tmp, tmp); | |
424 | memcpy(dst, tmp, size); | |
425 | } | |
426 | ||
427 | static void cipher_encrypt_unaligned(struct crypto_tfm *tfm, | |
428 | u8 *dst, const u8 *src) | |
429 | { | |
430 | unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); | |
431 | struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; | |
432 | ||
433 | if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) { | |
434 | cipher_crypt_unaligned(cipher->cia_encrypt, tfm, dst, src); | |
435 | return; | |
436 | } | |
437 | ||
438 | cipher->cia_encrypt(tfm, dst, src); | |
439 | } | |
440 | ||
441 | static void cipher_decrypt_unaligned(struct crypto_tfm *tfm, | |
442 | u8 *dst, const u8 *src) | |
443 | { | |
444 | unsigned long alignmask = crypto_tfm_alg_alignmask(tfm); | |
445 | struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; | |
446 | ||
447 | if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) { | |
448 | cipher_crypt_unaligned(cipher->cia_decrypt, tfm, dst, src); | |
449 | return; | |
450 | } | |
451 | ||
452 | cipher->cia_decrypt(tfm, dst, src); | |
453 | } | |
454 | ||
1da177e4 LT |
455 | int crypto_init_cipher_ops(struct crypto_tfm *tfm) |
456 | { | |
457 | int ret = 0; | |
458 | struct cipher_tfm *ops = &tfm->crt_cipher; | |
f28776a3 | 459 | struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher; |
1da177e4 LT |
460 | |
461 | ops->cit_setkey = setkey; | |
f28776a3 HX |
462 | ops->cit_encrypt_one = crypto_tfm_alg_alignmask(tfm) ? |
463 | cipher_encrypt_unaligned : cipher->cia_encrypt; | |
464 | ops->cit_decrypt_one = crypto_tfm_alg_alignmask(tfm) ? | |
465 | cipher_decrypt_unaligned : cipher->cia_decrypt; | |
1da177e4 LT |
466 | |
467 | switch (tfm->crt_cipher.cit_mode) { | |
468 | case CRYPTO_TFM_MODE_ECB: | |
469 | ops->cit_encrypt = ecb_encrypt; | |
470 | ops->cit_decrypt = ecb_decrypt; | |
df89820e HX |
471 | ops->cit_encrypt_iv = nocrypt_iv; |
472 | ops->cit_decrypt_iv = nocrypt_iv; | |
1da177e4 LT |
473 | break; |
474 | ||
475 | case CRYPTO_TFM_MODE_CBC: | |
476 | ops->cit_encrypt = cbc_encrypt; | |
477 | ops->cit_decrypt = cbc_decrypt; | |
478 | ops->cit_encrypt_iv = cbc_encrypt_iv; | |
479 | ops->cit_decrypt_iv = cbc_decrypt_iv; | |
480 | break; | |
481 | ||
482 | case CRYPTO_TFM_MODE_CFB: | |
483 | ops->cit_encrypt = nocrypt; | |
484 | ops->cit_decrypt = nocrypt; | |
485 | ops->cit_encrypt_iv = nocrypt_iv; | |
486 | ops->cit_decrypt_iv = nocrypt_iv; | |
487 | break; | |
488 | ||
489 | case CRYPTO_TFM_MODE_CTR: | |
490 | ops->cit_encrypt = nocrypt; | |
491 | ops->cit_decrypt = nocrypt; | |
492 | ops->cit_encrypt_iv = nocrypt_iv; | |
493 | ops->cit_decrypt_iv = nocrypt_iv; | |
494 | break; | |
495 | ||
496 | default: | |
497 | BUG(); | |
498 | } | |
499 | ||
500 | if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) { | |
9d853c37 | 501 | unsigned long align; |
fbdae9f3 | 502 | unsigned long addr; |
1da177e4 LT |
503 | |
504 | switch (crypto_tfm_alg_blocksize(tfm)) { | |
505 | case 8: | |
506 | ops->cit_xor_block = xor_64; | |
507 | break; | |
508 | ||
509 | case 16: | |
510 | ops->cit_xor_block = xor_128; | |
511 | break; | |
512 | ||
513 | default: | |
514 | printk(KERN_WARNING "%s: block size %u not supported\n", | |
515 | crypto_tfm_alg_name(tfm), | |
516 | crypto_tfm_alg_blocksize(tfm)); | |
517 | ret = -EINVAL; | |
518 | goto out; | |
519 | } | |
520 | ||
521 | ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm); | |
fbdae9f3 HX |
522 | align = crypto_tfm_alg_alignmask(tfm) + 1; |
523 | addr = (unsigned long)crypto_tfm_ctx(tfm); | |
524 | addr = ALIGN(addr, align); | |
525 | addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align); | |
526 | ops->cit_iv = (void *)addr; | |
1da177e4 LT |
527 | } |
528 | ||
529 | out: | |
530 | return ret; | |
531 | } | |
532 | ||
533 | void crypto_exit_cipher_ops(struct crypto_tfm *tfm) | |
534 | { | |
1da177e4 | 535 | } |