Merge branch 'regulator-5.2' into regulator-linus
[linux-block.git] / include / linux / crypto.h
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
1da177e4
LT
2/*
3 * Scatterlist Cryptographic API.
4 *
5 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
6 * Copyright (c) 2002 David S. Miller (davem@redhat.com)
5cb1454b 7 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
1da177e4
LT
8 *
9 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
18735dd8 10 * and Nettle, by Niels Möller.
1da177e4
LT
11 */
12#ifndef _LINUX_CRYPTO_H
13#define _LINUX_CRYPTO_H
14
60063497 15#include <linux/atomic.h>
1da177e4 16#include <linux/kernel.h>
1da177e4 17#include <linux/list.h>
187f1882 18#include <linux/bug.h>
79911102 19#include <linux/slab.h>
1da177e4 20#include <linux/string.h>
79911102 21#include <linux/uaccess.h>
ada69a16 22#include <linux/completion.h>
1da177e4 23
5d26a105
KC
24/*
25 * Autoloaded crypto modules should only use a prefixed name to avoid allowing
26 * arbitrary modules to be loaded. Loading from userspace may still need the
27 * unprefixed names, so retains those aliases as well.
28 * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
29 * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
30 * expands twice on the same line. Instead, use a separate base name for the
31 * alias.
32 */
33#define MODULE_ALIAS_CRYPTO(name) \
34 __MODULE_INFO(alias, alias_userspace, name); \
35 __MODULE_INFO(alias, alias_crypto, "crypto-" name)
36
1da177e4
LT
37/*
38 * Algorithm masks and types.
39 */
2825982d 40#define CRYPTO_ALG_TYPE_MASK 0x0000000f
1da177e4 41#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
004a403c
LH
42#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002
43#define CRYPTO_ALG_TYPE_AEAD 0x00000003
055bcee3 44#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
332f8840 45#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
4e6c3df4 46#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
4e5f2c40 47#define CRYPTO_ALG_TYPE_KPP 0x00000008
2ebda74f 48#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
1ab53a77 49#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
17f0f4a4 50#define CRYPTO_ALG_TYPE_RNG 0x0000000c
3c339ab8 51#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
63044c4f
GC
52#define CRYPTO_ALG_TYPE_DIGEST 0x0000000e
53#define CRYPTO_ALG_TYPE_HASH 0x0000000e
54#define CRYPTO_ALG_TYPE_SHASH 0x0000000e
55#define CRYPTO_ALG_TYPE_AHASH 0x0000000f
055bcee3
HX
56
57#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
63044c4f 58#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
332f8840 59#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
1ab53a77 60#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
1da177e4 61
2825982d 62#define CRYPTO_ALG_LARVAL 0x00000010
6bfd4809
HX
63#define CRYPTO_ALG_DEAD 0x00000020
64#define CRYPTO_ALG_DYING 0x00000040
f3f632d6 65#define CRYPTO_ALG_ASYNC 0x00000080
2825982d 66
6010439f
HX
67/*
68 * Set this bit if and only if the algorithm requires another algorithm of
69 * the same type to handle corner cases.
70 */
71#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
72
73d3864a
HX
73/*
74 * Set if the algorithm has passed automated run-time testing. Note that
75 * if there is no run-time testing for a given algorithm it is considered
76 * to have passed.
77 */
78
79#define CRYPTO_ALG_TESTED 0x00000400
80
64a947b1 81/*
864e0981 82 * Set if the algorithm is an instance that is built from templates.
64a947b1
SK
83 */
84#define CRYPTO_ALG_INSTANCE 0x00000800
85
d912bb76
NM
86/* Set this bit if the algorithm provided is hardware accelerated but
87 * not available to userspace via instruction set or so.
88 */
89#define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000
90
06ca7f68
SM
91/*
92 * Mark a cipher as a service implementation only usable by another
93 * cipher and never by a normal user of the kernel crypto API
94 */
95#define CRYPTO_ALG_INTERNAL 0x00002000
96
a208fa8f
EB
97/*
98 * Set if the algorithm has a ->setkey() method but can be used without
99 * calling it first, i.e. there is a default key.
100 */
101#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000
102
e2861fa7
MG
103/*
104 * Don't trigger module loading
105 */
106#define CRYPTO_NOLOAD 0x00008000
107
1da177e4
LT
108/*
109 * Transform masks and values (for crt_flags).
110 */
9fa68f62
EB
111#define CRYPTO_TFM_NEED_KEY 0x00000001
112
1da177e4
LT
113#define CRYPTO_TFM_REQ_MASK 0x000fff00
114#define CRYPTO_TFM_RES_MASK 0xfff00000
115
231baecd 116#define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100
64baf3cf 117#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
32e3983f 118#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400
1da177e4
LT
119#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
120#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
121#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
122#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000
123#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000
124
125/*
126 * Miscellaneous stuff.
127 */
f437a3f4 128#define CRYPTO_MAX_ALG_NAME 128
1da177e4 129
79911102
HX
130/*
131 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
132 * declaration) is used to ensure that the crypto_tfm context structure is
133 * aligned correctly for the given architecture so that there are no alignment
134 * faults for C data types. In particular, this is required on platforms such
135 * as arm where pointers are 32-bit aligned but there are data types such as
136 * u64 which require 64-bit alignment.
137 */
79911102 138#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
79911102 139
79911102 140#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
79911102 141
1da177e4 142struct scatterlist;
32e3983f
HX
143struct crypto_ablkcipher;
144struct crypto_async_request;
5cde0af2 145struct crypto_blkcipher;
40725181 146struct crypto_tfm;
e853c3cf 147struct crypto_type;
40725181 148
32e3983f
HX
149typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
150
0d7f488f
SM
151/**
152 * DOC: Block Cipher Context Data Structures
153 *
154 * These data structures define the operating context for each block cipher
155 * type.
156 */
157
32e3983f
HX
158struct crypto_async_request {
159 struct list_head list;
160 crypto_completion_t complete;
161 void *data;
162 struct crypto_tfm *tfm;
163
164 u32 flags;
165};
166
167struct ablkcipher_request {
168 struct crypto_async_request base;
169
170 unsigned int nbytes;
171
172 void *info;
173
174 struct scatterlist *src;
175 struct scatterlist *dst;
176
177 void *__ctx[] CRYPTO_MINALIGN_ATTR;
178};
179
5cde0af2
HX
180struct blkcipher_desc {
181 struct crypto_blkcipher *tfm;
182 void *info;
183 u32 flags;
184};
185
0d7f488f
SM
186/**
187 * DOC: Block Cipher Algorithm Definitions
188 *
189 * These data structures define modular crypto algorithm implementations,
190 * managed via crypto_register_alg() and crypto_unregister_alg().
191 */
192
193/**
194 * struct ablkcipher_alg - asynchronous block cipher definition
195 * @min_keysize: Minimum key size supported by the transformation. This is the
196 * smallest key length supported by this transformation algorithm.
197 * This must be set to one of the pre-defined values as this is
198 * not hardware specific. Possible values for this field can be
199 * found via git grep "_MIN_KEY_SIZE" include/crypto/
200 * @max_keysize: Maximum key size supported by the transformation. This is the
201 * largest key length supported by this transformation algorithm.
202 * This must be set to one of the pre-defined values as this is
203 * not hardware specific. Possible values for this field can be
204 * found via git grep "_MAX_KEY_SIZE" include/crypto/
205 * @setkey: Set key for the transformation. This function is used to either
206 * program a supplied key into the hardware or store the key in the
207 * transformation context for programming it later. Note that this
208 * function does modify the transformation context. This function can
209 * be called multiple times during the existence of the transformation
210 * object, so one must make sure the key is properly reprogrammed into
211 * the hardware. This function is also responsible for checking the key
212 * length for validity. In case a software fallback was put in place in
213 * the @cra_init call, this function might need to use the fallback if
214 * the algorithm doesn't support all of the key sizes.
215 * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
216 * the supplied scatterlist containing the blocks of data. The crypto
217 * API consumer is responsible for aligning the entries of the
218 * scatterlist properly and making sure the chunks are correctly
219 * sized. In case a software fallback was put in place in the
220 * @cra_init call, this function might need to use the fallback if
221 * the algorithm doesn't support all of the key sizes. In case the
222 * key was stored in transformation context, the key might need to be
223 * re-programmed into the hardware in this function. This function
224 * shall not modify the transformation context, as this function may
225 * be called in parallel with the same transformation object.
226 * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
227 * and the conditions are exactly the same.
0d7f488f
SM
228 * @ivsize: IV size applicable for transformation. The consumer must provide an
229 * IV of exactly that size to perform the encrypt or decrypt operation.
230 *
c79b411e 231 * All fields except @ivsize are mandatory and must be filled.
1da177e4 232 */
b5b7f088
HX
233struct ablkcipher_alg {
234 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
235 unsigned int keylen);
236 int (*encrypt)(struct ablkcipher_request *req);
237 int (*decrypt)(struct ablkcipher_request *req);
23508e11 238
b5b7f088
HX
239 unsigned int min_keysize;
240 unsigned int max_keysize;
241 unsigned int ivsize;
242};
243
0d7f488f
SM
244/**
245 * struct blkcipher_alg - synchronous block cipher definition
246 * @min_keysize: see struct ablkcipher_alg
247 * @max_keysize: see struct ablkcipher_alg
248 * @setkey: see struct ablkcipher_alg
249 * @encrypt: see struct ablkcipher_alg
250 * @decrypt: see struct ablkcipher_alg
0d7f488f
SM
251 * @ivsize: see struct ablkcipher_alg
252 *
c79b411e 253 * All fields except @ivsize are mandatory and must be filled.
0d7f488f 254 */
5cde0af2
HX
255struct blkcipher_alg {
256 int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
257 unsigned int keylen);
258 int (*encrypt)(struct blkcipher_desc *desc,
259 struct scatterlist *dst, struct scatterlist *src,
260 unsigned int nbytes);
261 int (*decrypt)(struct blkcipher_desc *desc,
262 struct scatterlist *dst, struct scatterlist *src,
263 unsigned int nbytes);
264
265 unsigned int min_keysize;
266 unsigned int max_keysize;
267 unsigned int ivsize;
268};
269
0d7f488f
SM
270/**
271 * struct cipher_alg - single-block symmetric ciphers definition
272 * @cia_min_keysize: Minimum key size supported by the transformation. This is
273 * the smallest key length supported by this transformation
274 * algorithm. This must be set to one of the pre-defined
275 * values as this is not hardware specific. Possible values
276 * for this field can be found via git grep "_MIN_KEY_SIZE"
277 * include/crypto/
278 * @cia_max_keysize: Maximum key size supported by the transformation. This is
279 * the largest key length supported by this transformation
280 * algorithm. This must be set to one of the pre-defined values
281 * as this is not hardware specific. Possible values for this
282 * field can be found via git grep "_MAX_KEY_SIZE"
283 * include/crypto/
284 * @cia_setkey: Set key for the transformation. This function is used to either
285 * program a supplied key into the hardware or store the key in the
286 * transformation context for programming it later. Note that this
287 * function does modify the transformation context. This function
288 * can be called multiple times during the existence of the
289 * transformation object, so one must make sure the key is properly
290 * reprogrammed into the hardware. This function is also
291 * responsible for checking the key length for validity.
292 * @cia_encrypt: Encrypt a single block. This function is used to encrypt a
293 * single block of data, which must be @cra_blocksize big. This
294 * always operates on a full @cra_blocksize and it is not possible
295 * to encrypt a block of smaller size. The supplied buffers must
296 * therefore also be at least of @cra_blocksize size. Both the
297 * input and output buffers are always aligned to @cra_alignmask.
298 * In case either of the input or output buffer supplied by user
299 * of the crypto API is not aligned to @cra_alignmask, the crypto
300 * API will re-align the buffers. The re-alignment means that a
301 * new buffer will be allocated, the data will be copied into the
302 * new buffer, then the processing will happen on the new buffer,
303 * then the data will be copied back into the original buffer and
304 * finally the new buffer will be freed. In case a software
305 * fallback was put in place in the @cra_init call, this function
306 * might need to use the fallback if the algorithm doesn't support
307 * all of the key sizes. In case the key was stored in
308 * transformation context, the key might need to be re-programmed
309 * into the hardware in this function. This function shall not
310 * modify the transformation context, as this function may be
311 * called in parallel with the same transformation object.
312 * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to
313 * @cia_encrypt, and the conditions are exactly the same.
314 *
315 * All fields are mandatory and must be filled.
316 */
1da177e4
LT
317struct cipher_alg {
318 unsigned int cia_min_keysize;
319 unsigned int cia_max_keysize;
6c2bb98b 320 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
560c06ae 321 unsigned int keylen);
6c2bb98b
HX
322 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
323 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
1da177e4
LT
324};
325
1da177e4 326struct compress_alg {
6c2bb98b
HX
327 int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
328 unsigned int slen, u8 *dst, unsigned int *dlen);
329 int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src,
330 unsigned int slen, u8 *dst, unsigned int *dlen);
1da177e4
LT
331};
332
17c18f9e
CL
333#ifdef CONFIG_CRYPTO_STATS
334/*
335 * struct crypto_istat_aead - statistics for AEAD algorithm
336 * @encrypt_cnt: number of encrypt requests
337 * @encrypt_tlen: total data size handled by encrypt requests
338 * @decrypt_cnt: number of decrypt requests
339 * @decrypt_tlen: total data size handled by decrypt requests
44f13133 340 * @err_cnt: number of error for AEAD requests
17c18f9e
CL
341 */
342struct crypto_istat_aead {
343 atomic64_t encrypt_cnt;
344 atomic64_t encrypt_tlen;
345 atomic64_t decrypt_cnt;
346 atomic64_t decrypt_tlen;
44f13133 347 atomic64_t err_cnt;
17c18f9e
CL
348};
349
350/*
351 * struct crypto_istat_akcipher - statistics for akcipher algorithm
352 * @encrypt_cnt: number of encrypt requests
353 * @encrypt_tlen: total data size handled by encrypt requests
354 * @decrypt_cnt: number of decrypt requests
355 * @decrypt_tlen: total data size handled by decrypt requests
356 * @verify_cnt: number of verify operation
357 * @sign_cnt: number of sign requests
44f13133 358 * @err_cnt: number of error for akcipher requests
17c18f9e
CL
359 */
360struct crypto_istat_akcipher {
361 atomic64_t encrypt_cnt;
362 atomic64_t encrypt_tlen;
363 atomic64_t decrypt_cnt;
364 atomic64_t decrypt_tlen;
365 atomic64_t verify_cnt;
366 atomic64_t sign_cnt;
44f13133 367 atomic64_t err_cnt;
17c18f9e
CL
368};
369
370/*
371 * struct crypto_istat_cipher - statistics for cipher algorithm
372 * @encrypt_cnt: number of encrypt requests
373 * @encrypt_tlen: total data size handled by encrypt requests
374 * @decrypt_cnt: number of decrypt requests
375 * @decrypt_tlen: total data size handled by decrypt requests
44f13133 376 * @err_cnt: number of error for cipher requests
17c18f9e
CL
377 */
378struct crypto_istat_cipher {
379 atomic64_t encrypt_cnt;
380 atomic64_t encrypt_tlen;
381 atomic64_t decrypt_cnt;
382 atomic64_t decrypt_tlen;
44f13133 383 atomic64_t err_cnt;
17c18f9e
CL
384};
385
386/*
387 * struct crypto_istat_compress - statistics for compress algorithm
388 * @compress_cnt: number of compress requests
389 * @compress_tlen: total data size handled by compress requests
390 * @decompress_cnt: number of decompress requests
391 * @decompress_tlen: total data size handled by decompress requests
44f13133 392 * @err_cnt: number of error for compress requests
17c18f9e
CL
393 */
394struct crypto_istat_compress {
395 atomic64_t compress_cnt;
396 atomic64_t compress_tlen;
397 atomic64_t decompress_cnt;
398 atomic64_t decompress_tlen;
44f13133 399 atomic64_t err_cnt;
17c18f9e
CL
400};
401
402/*
403 * struct crypto_istat_hash - statistics for has algorithm
404 * @hash_cnt: number of hash requests
405 * @hash_tlen: total data size hashed
44f13133 406 * @err_cnt: number of error for hash requests
17c18f9e
CL
407 */
408struct crypto_istat_hash {
409 atomic64_t hash_cnt;
410 atomic64_t hash_tlen;
44f13133 411 atomic64_t err_cnt;
17c18f9e
CL
412};
413
414/*
415 * struct crypto_istat_kpp - statistics for KPP algorithm
416 * @setsecret_cnt: number of setsecrey operation
417 * @generate_public_key_cnt: number of generate_public_key operation
418 * @compute_shared_secret_cnt: number of compute_shared_secret operation
44f13133 419 * @err_cnt: number of error for KPP requests
17c18f9e
CL
420 */
421struct crypto_istat_kpp {
422 atomic64_t setsecret_cnt;
423 atomic64_t generate_public_key_cnt;
424 atomic64_t compute_shared_secret_cnt;
44f13133 425 atomic64_t err_cnt;
17c18f9e
CL
426};
427
428/*
429 * struct crypto_istat_rng: statistics for RNG algorithm
430 * @generate_cnt: number of RNG generate requests
431 * @generate_tlen: total data size of generated data by the RNG
432 * @seed_cnt: number of times the RNG was seeded
44f13133 433 * @err_cnt: number of error for RNG requests
17c18f9e
CL
434 */
435struct crypto_istat_rng {
436 atomic64_t generate_cnt;
437 atomic64_t generate_tlen;
438 atomic64_t seed_cnt;
44f13133 439 atomic64_t err_cnt;
17c18f9e
CL
440};
441#endif /* CONFIG_CRYPTO_STATS */
17f0f4a4 442
b5b7f088 443#define cra_ablkcipher cra_u.ablkcipher
5cde0af2 444#define cra_blkcipher cra_u.blkcipher
1da177e4 445#define cra_cipher cra_u.cipher
1da177e4
LT
446#define cra_compress cra_u.compress
447
0d7f488f
SM
448/**
449 * struct crypto_alg - definition of a cryptograpic cipher algorithm
450 * @cra_flags: Flags describing this transformation. See include/linux/crypto.h
451 * CRYPTO_ALG_* flags for the flags which go in here. Those are
452 * used for fine-tuning the description of the transformation
453 * algorithm.
454 * @cra_blocksize: Minimum block size of this transformation. The size in bytes
455 * of the smallest possible unit which can be transformed with
456 * this algorithm. The users must respect this value.
457 * In case of HASH transformation, it is possible for a smaller
458 * block than @cra_blocksize to be passed to the crypto API for
459 * transformation, in case of any other transformation type, an
460 * error will be returned upon any attempt to transform smaller
461 * than @cra_blocksize chunks.
462 * @cra_ctxsize: Size of the operational context of the transformation. This
463 * value informs the kernel crypto API about the memory size
464 * needed to be allocated for the transformation context.
465 * @cra_alignmask: Alignment mask for the input and output data buffer. The data
466 * buffer containing the input data for the algorithm must be
467 * aligned to this alignment mask. The data buffer for the
468 * output data must be aligned to this alignment mask. Note that
469 * the Crypto API will do the re-alignment in software, but
470 * only under special conditions and there is a performance hit.
471 * The re-alignment happens at these occasions for different
472 * @cra_u types: cipher -- For both input data and output data
473 * buffer; ahash -- For output hash destination buf; shash --
474 * For output hash destination buf.
475 * This is needed on hardware which is flawed by design and
476 * cannot pick data from arbitrary addresses.
477 * @cra_priority: Priority of this transformation implementation. In case
478 * multiple transformations with same @cra_name are available to
479 * the Crypto API, the kernel will use the one with highest
480 * @cra_priority.
481 * @cra_name: Generic name (usable by multiple implementations) of the
482 * transformation algorithm. This is the name of the transformation
483 * itself. This field is used by the kernel when looking up the
484 * providers of particular transformation.
485 * @cra_driver_name: Unique name of the transformation provider. This is the
486 * name of the provider of the transformation. This can be any
487 * arbitrary value, but in the usual case, this contains the
488 * name of the chip or provider and the name of the
489 * transformation algorithm.
490 * @cra_type: Type of the cryptographic transformation. This is a pointer to
491 * struct crypto_type, which implements callbacks common for all
12f7c14a 492 * transformation types. There are multiple options:
0d7f488f 493 * &crypto_blkcipher_type, &crypto_ablkcipher_type,
b0d955ba 494 * &crypto_ahash_type, &crypto_rng_type.
0d7f488f
SM
495 * This field might be empty. In that case, there are no common
496 * callbacks. This is the case for: cipher, compress, shash.
497 * @cra_u: Callbacks implementing the transformation. This is a union of
498 * multiple structures. Depending on the type of transformation selected
499 * by @cra_type and @cra_flags above, the associated structure must be
500 * filled with callbacks. This field might be empty. This is the case
501 * for ahash, shash.
502 * @cra_init: Initialize the cryptographic transformation object. This function
503 * is used to initialize the cryptographic transformation object.
504 * This function is called only once at the instantiation time, right
505 * after the transformation context was allocated. In case the
506 * cryptographic hardware has some special requirements which need to
507 * be handled by software, this function shall check for the precise
508 * requirement of the transformation and put any software fallbacks
509 * in place.
510 * @cra_exit: Deinitialize the cryptographic transformation object. This is a
511 * counterpart to @cra_init, used to remove various changes set in
512 * @cra_init.
0063ec44
GH
513 * @cra_u.ablkcipher: Union member which contains an asynchronous block cipher
514 * definition. See @struct @ablkcipher_alg.
515 * @cra_u.blkcipher: Union member which contains a synchronous block cipher
516 * definition See @struct @blkcipher_alg.
517 * @cra_u.cipher: Union member which contains a single-block symmetric cipher
518 * definition. See @struct @cipher_alg.
519 * @cra_u.compress: Union member which contains a (de)compression algorithm.
520 * See @struct @compress_alg.
0d7f488f
SM
521 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
522 * @cra_list: internally used
523 * @cra_users: internally used
524 * @cra_refcnt: internally used
525 * @cra_destroy: internally used
526 *
17c18f9e 527 * @stats: union of all possible crypto_istat_xxx structures
bfad6cb3
CL
528 * @stats.aead: statistics for AEAD algorithm
529 * @stats.akcipher: statistics for akcipher algorithm
530 * @stats.cipher: statistics for cipher algorithm
531 * @stats.compress: statistics for compress algorithm
532 * @stats.hash: statistics for hash algorithm
533 * @stats.rng: statistics for rng algorithm
534 * @stats.kpp: statistics for KPP algorithm
cac5818c 535 *
0d7f488f
SM
536 * The struct crypto_alg describes a generic Crypto API algorithm and is common
537 * for all of the transformations. Any variable not documented here shall not
538 * be used by a cipher implementation as it is internal to the Crypto API.
539 */
1da177e4
LT
540struct crypto_alg {
541 struct list_head cra_list;
6bfd4809
HX
542 struct list_head cra_users;
543
1da177e4
LT
544 u32 cra_flags;
545 unsigned int cra_blocksize;
546 unsigned int cra_ctxsize;
95477377 547 unsigned int cra_alignmask;
5cb1454b
HX
548
549 int cra_priority;
ce8614a3 550 refcount_t cra_refcnt;
5cb1454b 551
d913ea0d
HX
552 char cra_name[CRYPTO_MAX_ALG_NAME];
553 char cra_driver_name[CRYPTO_MAX_ALG_NAME];
1da177e4 554
e853c3cf
HX
555 const struct crypto_type *cra_type;
556
1da177e4 557 union {
b5b7f088 558 struct ablkcipher_alg ablkcipher;
5cde0af2 559 struct blkcipher_alg blkcipher;
1da177e4 560 struct cipher_alg cipher;
1da177e4
LT
561 struct compress_alg compress;
562 } cra_u;
c7fc0599
HX
563
564 int (*cra_init)(struct crypto_tfm *tfm);
565 void (*cra_exit)(struct crypto_tfm *tfm);
6521f302 566 void (*cra_destroy)(struct crypto_alg *alg);
1da177e4
LT
567
568 struct module *cra_module;
cac5818c 569
2ced2607 570#ifdef CONFIG_CRYPTO_STATS
cac5818c 571 union {
17c18f9e
CL
572 struct crypto_istat_aead aead;
573 struct crypto_istat_akcipher akcipher;
574 struct crypto_istat_cipher cipher;
575 struct crypto_istat_compress compress;
576 struct crypto_istat_hash hash;
577 struct crypto_istat_rng rng;
578 struct crypto_istat_kpp kpp;
579 } stats;
2ced2607 580#endif /* CONFIG_CRYPTO_STATS */
cac5818c 581
edf18b91 582} CRYPTO_MINALIGN_ATTR;
1da177e4 583
f7d76e05 584#ifdef CONFIG_CRYPTO_STATS
1f6669b9 585void crypto_stats_init(struct crypto_alg *alg);
f7d76e05
CL
586void crypto_stats_get(struct crypto_alg *alg);
587void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg);
588void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg);
589void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
590void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
591void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg);
592void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg);
593void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
594void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
595void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg);
596void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg);
597void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg);
598void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg);
599void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret);
600void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret);
601void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret);
602void crypto_stats_rng_seed(struct crypto_alg *alg, int ret);
603void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret);
604void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
605void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
606#else
1f6669b9
CL
607static inline void crypto_stats_init(struct crypto_alg *alg)
608{}
f7d76e05
CL
609static inline void crypto_stats_get(struct crypto_alg *alg)
610{}
611static inline void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg)
612{}
613static inline void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg)
614{}
615static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
616{}
617static inline void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
618{}
619static inline void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg)
620{}
621static inline void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg)
622{}
623static inline void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg)
624{}
625static inline void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg)
626{}
627static inline void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg)
628{}
629static inline void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg)
630{}
631static inline void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg)
632{}
633static inline void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg)
634{}
635static inline void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret)
636{}
637static inline void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret)
638{}
639static inline void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret)
640{}
641static inline void crypto_stats_rng_seed(struct crypto_alg *alg, int ret)
642{}
643static inline void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret)
644{}
645static inline void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
646{}
647static inline void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
648{}
649#endif
ada69a16
GBY
650/*
651 * A helper struct for waiting for completion of async crypto ops
652 */
653struct crypto_wait {
654 struct completion completion;
655 int err;
656};
657
658/*
659 * Macro for declaring a crypto op async wait object on stack
660 */
661#define DECLARE_CRYPTO_WAIT(_wait) \
662 struct crypto_wait _wait = { \
663 COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 }
664
665/*
666 * Async ops completion helper functioons
667 */
668void crypto_req_done(struct crypto_async_request *req, int err);
669
670static inline int crypto_wait_req(int err, struct crypto_wait *wait)
671{
672 switch (err) {
673 case -EINPROGRESS:
674 case -EBUSY:
675 wait_for_completion(&wait->completion);
676 reinit_completion(&wait->completion);
677 err = wait->err;
678 break;
679 };
680
681 return err;
682}
683
684static inline void crypto_init_wait(struct crypto_wait *wait)
685{
686 init_completion(&wait->completion);
687}
688
1da177e4
LT
689/*
690 * Algorithm registration interface.
691 */
692int crypto_register_alg(struct crypto_alg *alg);
693int crypto_unregister_alg(struct crypto_alg *alg);
4b004346
MB
694int crypto_register_algs(struct crypto_alg *algs, int count);
695int crypto_unregister_algs(struct crypto_alg *algs, int count);
1da177e4
LT
696
697/*
698 * Algorithm query interface.
699 */
fce32d70 700int crypto_has_alg(const char *name, u32 type, u32 mask);
1da177e4
LT
701
702/*
703 * Transforms: user-instantiated objects which encapsulate algorithms
6d7d684d
HX
704 * and core processing logic. Managed via crypto_alloc_*() and
705 * crypto_free_*(), as well as the various helpers below.
1da177e4 706 */
1da177e4 707
32e3983f
HX
708struct ablkcipher_tfm {
709 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
710 unsigned int keylen);
711 int (*encrypt)(struct ablkcipher_request *req);
712 int (*decrypt)(struct ablkcipher_request *req);
61da88e2 713
ecfc4329
HX
714 struct crypto_ablkcipher *base;
715
32e3983f
HX
716 unsigned int ivsize;
717 unsigned int reqsize;
718};
719
5cde0af2
HX
720struct blkcipher_tfm {
721 void *iv;
722 int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
723 unsigned int keylen);
724 int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
725 struct scatterlist *src, unsigned int nbytes);
726 int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
727 struct scatterlist *src, unsigned int nbytes);
728};
729
1da177e4 730struct cipher_tfm {
1da177e4
LT
731 int (*cit_setkey)(struct crypto_tfm *tfm,
732 const u8 *key, unsigned int keylen);
f28776a3
HX
733 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
734 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
1da177e4
LT
735};
736
1da177e4
LT
737struct compress_tfm {
738 int (*cot_compress)(struct crypto_tfm *tfm,
739 const u8 *src, unsigned int slen,
740 u8 *dst, unsigned int *dlen);
741 int (*cot_decompress)(struct crypto_tfm *tfm,
742 const u8 *src, unsigned int slen,
743 u8 *dst, unsigned int *dlen);
744};
745
32e3983f 746#define crt_ablkcipher crt_u.ablkcipher
5cde0af2 747#define crt_blkcipher crt_u.blkcipher
1da177e4 748#define crt_cipher crt_u.cipher
1da177e4
LT
749#define crt_compress crt_u.compress
750
751struct crypto_tfm {
752
753 u32 crt_flags;
754
755 union {
32e3983f 756 struct ablkcipher_tfm ablkcipher;
5cde0af2 757 struct blkcipher_tfm blkcipher;
1da177e4 758 struct cipher_tfm cipher;
1da177e4
LT
759 struct compress_tfm compress;
760 } crt_u;
4a779486
HX
761
762 void (*exit)(struct crypto_tfm *tfm);
1da177e4
LT
763
764 struct crypto_alg *__crt_alg;
f10b7897 765
79911102 766 void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
1da177e4
LT
767};
768
32e3983f
HX
769struct crypto_ablkcipher {
770 struct crypto_tfm base;
771};
772
5cde0af2
HX
773struct crypto_blkcipher {
774 struct crypto_tfm base;
775};
776
78a1fe4f
HX
777struct crypto_cipher {
778 struct crypto_tfm base;
779};
780
781struct crypto_comp {
782 struct crypto_tfm base;
783};
784
2b8c19db
HX
785enum {
786 CRYPTOA_UNSPEC,
787 CRYPTOA_ALG,
ebc610e5 788 CRYPTOA_TYPE,
39e1ee01 789 CRYPTOA_U32,
ebc610e5 790 __CRYPTOA_MAX,
2b8c19db
HX
791};
792
ebc610e5
HX
793#define CRYPTOA_MAX (__CRYPTOA_MAX - 1)
794
39e1ee01
HX
795/* Maximum number of (rtattr) parameters for each template. */
796#define CRYPTO_MAX_ATTRS 32
797
2b8c19db
HX
798struct crypto_attr_alg {
799 char name[CRYPTO_MAX_ALG_NAME];
800};
801
ebc610e5
HX
802struct crypto_attr_type {
803 u32 type;
804 u32 mask;
805};
806
39e1ee01
HX
807struct crypto_attr_u32 {
808 u32 num;
809};
810
1da177e4
LT
811/*
812 * Transform user interface.
813 */
814
6d7d684d 815struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
7b2cd92a
HX
816void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
817
818static inline void crypto_free_tfm(struct crypto_tfm *tfm)
819{
820 return crypto_destroy_tfm(tfm, tfm);
821}
1da177e4 822
da7f033d
HX
823int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
824
1da177e4
LT
825/*
826 * Transform helpers which query the underlying algorithm.
827 */
828static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
829{
830 return tfm->__crt_alg->cra_name;
831}
832
b14cdd67
ML
833static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
834{
835 return tfm->__crt_alg->cra_driver_name;
836}
837
838static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
839{
840 return tfm->__crt_alg->cra_priority;
841}
842
1da177e4
LT
843static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
844{
845 return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
846}
847
1da177e4
LT
848static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
849{
850 return tfm->__crt_alg->cra_blocksize;
851}
852
fbdae9f3
HX
853static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
854{
855 return tfm->__crt_alg->cra_alignmask;
856}
857
f28776a3
HX
858static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
859{
860 return tfm->crt_flags;
861}
862
863static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags)
864{
865 tfm->crt_flags |= flags;
866}
867
868static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
869{
870 tfm->crt_flags &= ~flags;
871}
872
40725181
HX
873static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
874{
f10b7897
HX
875 return tfm->__crt_ctx;
876}
877
878static inline unsigned int crypto_tfm_ctx_alignment(void)
879{
880 struct crypto_tfm *tfm;
881 return __alignof__(tfm->__crt_ctx);
40725181
HX
882}
883
1da177e4
LT
884/*
885 * API wrappers.
886 */
32e3983f
HX
887static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast(
888 struct crypto_tfm *tfm)
889{
890 return (struct crypto_ablkcipher *)tfm;
891}
892
378f4f51 893static inline u32 crypto_skcipher_type(u32 type)
32e3983f 894{
c79b411e 895 type &= ~CRYPTO_ALG_TYPE_MASK;
32e3983f 896 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
378f4f51
HX
897 return type;
898}
899
900static inline u32 crypto_skcipher_mask(u32 mask)
901{
c79b411e 902 mask &= ~CRYPTO_ALG_TYPE_MASK;
332f8840 903 mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
378f4f51
HX
904 return mask;
905}
32e3983f 906
f13ec330
SM
907/**
908 * DOC: Asynchronous Block Cipher API
909 *
910 * Asynchronous block cipher API is used with the ciphers of type
911 * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto).
912 *
913 * Asynchronous cipher operations imply that the function invocation for a
914 * cipher request returns immediately before the completion of the operation.
915 * The cipher request is scheduled as a separate kernel thread and therefore
916 * load-balanced on the different CPUs via the process scheduler. To allow
917 * the kernel crypto API to inform the caller about the completion of a cipher
918 * request, the caller must provide a callback function. That function is
919 * invoked with the cipher handle when the request completes.
920 *
921 * To support the asynchronous operation, additional information than just the
922 * cipher handle must be supplied to the kernel crypto API. That additional
923 * information is given by filling in the ablkcipher_request data structure.
924 *
925 * For the asynchronous block cipher API, the state is maintained with the tfm
926 * cipher handle. A single tfm can be used across multiple calls and in
927 * parallel. For asynchronous block cipher calls, context data supplied and
928 * only used by the caller can be referenced the request data structure in
929 * addition to the IV used for the cipher request. The maintenance of such
930 * state information would be important for a crypto driver implementer to
931 * have, because when calling the callback function upon completion of the
932 * cipher operation, that callback function may need some information about
933 * which operation just finished if it invoked multiple in parallel. This
934 * state information is unused by the kernel crypto API.
935 */
936
32e3983f
HX
937static inline struct crypto_tfm *crypto_ablkcipher_tfm(
938 struct crypto_ablkcipher *tfm)
939{
940 return &tfm->base;
941}
942
f13ec330
SM
943/**
944 * crypto_free_ablkcipher() - zeroize and free cipher handle
945 * @tfm: cipher handle to be freed
946 */
32e3983f
HX
947static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
948{
949 crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
950}
951
f13ec330
SM
952/**
953 * crypto_has_ablkcipher() - Search for the availability of an ablkcipher.
954 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
955 * ablkcipher
956 * @type: specifies the type of the cipher
957 * @mask: specifies the mask for the cipher
958 *
959 * Return: true when the ablkcipher is known to the kernel crypto API; false
960 * otherwise
961 */
32e3983f
HX
962static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
963 u32 mask)
964{
378f4f51
HX
965 return crypto_has_alg(alg_name, crypto_skcipher_type(type),
966 crypto_skcipher_mask(mask));
32e3983f
HX
967}
968
969static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
970 struct crypto_ablkcipher *tfm)
971{
972 return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
973}
974
f13ec330
SM
975/**
976 * crypto_ablkcipher_ivsize() - obtain IV size
977 * @tfm: cipher handle
978 *
979 * The size of the IV for the ablkcipher referenced by the cipher handle is
980 * returned. This IV size may be zero if the cipher does not need an IV.
981 *
982 * Return: IV size in bytes
983 */
32e3983f
HX
984static inline unsigned int crypto_ablkcipher_ivsize(
985 struct crypto_ablkcipher *tfm)
986{
987 return crypto_ablkcipher_crt(tfm)->ivsize;
988}
989
f13ec330
SM
990/**
991 * crypto_ablkcipher_blocksize() - obtain block size of cipher
992 * @tfm: cipher handle
993 *
994 * The block size for the ablkcipher referenced with the cipher handle is
995 * returned. The caller may use that information to allocate appropriate
996 * memory for the data returned by the encryption or decryption operation
997 *
998 * Return: block size of cipher
999 */
32e3983f
HX
1000static inline unsigned int crypto_ablkcipher_blocksize(
1001 struct crypto_ablkcipher *tfm)
1002{
1003 return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm));
1004}
1005
1006static inline unsigned int crypto_ablkcipher_alignmask(
1007 struct crypto_ablkcipher *tfm)
1008{
1009 return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm));
1010}
1011
1012static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm)
1013{
1014 return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm));
1015}
1016
1017static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm,
1018 u32 flags)
1019{
1020 crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags);
1021}
1022
1023static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
1024 u32 flags)
1025{
1026 crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
1027}
1028
f13ec330
SM
1029/**
1030 * crypto_ablkcipher_setkey() - set key for cipher
1031 * @tfm: cipher handle
1032 * @key: buffer holding the key
1033 * @keylen: length of the key in bytes
1034 *
1035 * The caller provided key is set for the ablkcipher referenced by the cipher
1036 * handle.
1037 *
1038 * Note, the key length determines the cipher type. Many block ciphers implement
1039 * different cipher modes depending on the key size, such as AES-128 vs AES-192
1040 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1041 * is performed.
1042 *
1043 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1044 */
32e3983f
HX
1045static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
1046 const u8 *key, unsigned int keylen)
1047{
ecfc4329
HX
1048 struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm);
1049
1050 return crt->setkey(crt->base, key, keylen);
32e3983f
HX
1051}
1052
f13ec330
SM
1053/**
1054 * crypto_ablkcipher_reqtfm() - obtain cipher handle from request
1055 * @req: ablkcipher_request out of which the cipher handle is to be obtained
1056 *
1057 * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request
1058 * data structure.
1059 *
1060 * Return: crypto_ablkcipher handle
1061 */
32e3983f
HX
1062static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
1063 struct ablkcipher_request *req)
1064{
1065 return __crypto_ablkcipher_cast(req->base.tfm);
1066}
1067
f13ec330
SM
1068/**
1069 * crypto_ablkcipher_encrypt() - encrypt plaintext
1070 * @req: reference to the ablkcipher_request handle that holds all information
1071 * needed to perform the cipher operation
1072 *
1073 * Encrypt plaintext data using the ablkcipher_request handle. That data
1074 * structure and how it is filled with data is discussed with the
1075 * ablkcipher_request_* functions.
1076 *
1077 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1078 */
32e3983f
HX
1079static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
1080{
1081 struct ablkcipher_tfm *crt =
1082 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
f7d76e05
CL
1083 struct crypto_alg *alg = crt->base->base.__crt_alg;
1084 unsigned int nbytes = req->nbytes;
cac5818c
CL
1085 int ret;
1086
f7d76e05 1087 crypto_stats_get(alg);
cac5818c 1088 ret = crt->encrypt(req);
f7d76e05 1089 crypto_stats_ablkcipher_encrypt(nbytes, ret, alg);
cac5818c 1090 return ret;
32e3983f
HX
1091}
1092
f13ec330
SM
1093/**
1094 * crypto_ablkcipher_decrypt() - decrypt ciphertext
1095 * @req: reference to the ablkcipher_request handle that holds all information
1096 * needed to perform the cipher operation
1097 *
1098 * Decrypt ciphertext data using the ablkcipher_request handle. That data
1099 * structure and how it is filled with data is discussed with the
1100 * ablkcipher_request_* functions.
1101 *
1102 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1103 */
32e3983f
HX
1104static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
1105{
1106 struct ablkcipher_tfm *crt =
1107 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
f7d76e05
CL
1108 struct crypto_alg *alg = crt->base->base.__crt_alg;
1109 unsigned int nbytes = req->nbytes;
cac5818c
CL
1110 int ret;
1111
f7d76e05 1112 crypto_stats_get(alg);
cac5818c 1113 ret = crt->decrypt(req);
f7d76e05 1114 crypto_stats_ablkcipher_decrypt(nbytes, ret, alg);
cac5818c 1115 return ret;
32e3983f
HX
1116}
1117
f13ec330
SM
1118/**
1119 * DOC: Asynchronous Cipher Request Handle
1120 *
1121 * The ablkcipher_request data structure contains all pointers to data
1122 * required for the asynchronous cipher operation. This includes the cipher
1123 * handle (which can be used by multiple ablkcipher_request instances), pointer
1124 * to plaintext and ciphertext, asynchronous callback function, etc. It acts
1125 * as a handle to the ablkcipher_request_* API calls in a similar way as
1126 * ablkcipher handle to the crypto_ablkcipher_* API calls.
1127 */
1128
1129/**
1130 * crypto_ablkcipher_reqsize() - obtain size of the request data structure
1131 * @tfm: cipher handle
1132 *
1133 * Return: number of bytes
1134 */
b16c3a2e
HX
1135static inline unsigned int crypto_ablkcipher_reqsize(
1136 struct crypto_ablkcipher *tfm)
32e3983f
HX
1137{
1138 return crypto_ablkcipher_crt(tfm)->reqsize;
1139}
1140
f13ec330
SM
1141/**
1142 * ablkcipher_request_set_tfm() - update cipher handle reference in request
1143 * @req: request handle to be modified
1144 * @tfm: cipher handle that shall be added to the request handle
1145 *
1146 * Allow the caller to replace the existing ablkcipher handle in the request
1147 * data structure with a different one.
1148 */
e196d625
HX
1149static inline void ablkcipher_request_set_tfm(
1150 struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
1151{
ecfc4329 1152 req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base);
e196d625
HX
1153}
1154
b5b7f088
HX
1155static inline struct ablkcipher_request *ablkcipher_request_cast(
1156 struct crypto_async_request *req)
1157{
1158 return container_of(req, struct ablkcipher_request, base);
1159}
1160
f13ec330
SM
1161/**
1162 * ablkcipher_request_alloc() - allocate request data structure
1163 * @tfm: cipher handle to be registered with the request
1164 * @gfp: memory allocation flag that is handed to kmalloc by the API call.
1165 *
1166 * Allocate the request data structure that must be used with the ablkcipher
1167 * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
1168 * handle is registered in the request data structure.
1169 *
6eae29e7 1170 * Return: allocated request handle in case of success, or NULL if out of memory
f13ec330 1171 */
32e3983f
HX
1172static inline struct ablkcipher_request *ablkcipher_request_alloc(
1173 struct crypto_ablkcipher *tfm, gfp_t gfp)
1174{
1175 struct ablkcipher_request *req;
1176
1177 req = kmalloc(sizeof(struct ablkcipher_request) +
1178 crypto_ablkcipher_reqsize(tfm), gfp);
1179
1180 if (likely(req))
e196d625 1181 ablkcipher_request_set_tfm(req, tfm);
32e3983f
HX
1182
1183 return req;
1184}
1185
f13ec330
SM
1186/**
1187 * ablkcipher_request_free() - zeroize and free request data structure
1188 * @req: request data structure cipher handle to be freed
1189 */
32e3983f
HX
1190static inline void ablkcipher_request_free(struct ablkcipher_request *req)
1191{
aef73cfc 1192 kzfree(req);
32e3983f
HX
1193}
1194
f13ec330
SM
1195/**
1196 * ablkcipher_request_set_callback() - set asynchronous callback function
1197 * @req: request handle
1198 * @flags: specify zero or an ORing of the flags
0184cfe7 1199 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
f13ec330
SM
1200 * increase the wait queue beyond the initial maximum size;
1201 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
1202 * @compl: callback function pointer to be registered with the request handle
1203 * @data: The data pointer refers to memory that is not used by the kernel
1204 * crypto API, but provided to the callback function for it to use. Here,
1205 * the caller can provide a reference to memory the callback function can
1206 * operate on. As the callback function is invoked asynchronously to the
1207 * related functionality, it may need to access data structures of the
1208 * related functionality which can be referenced using this pointer. The
1209 * callback function can access the memory via the "data" field in the
1210 * crypto_async_request data structure provided to the callback function.
1211 *
1212 * This function allows setting the callback function that is triggered once the
1213 * cipher operation completes.
1214 *
1215 * The callback function is registered with the ablkcipher_request handle and
0184cfe7 1216 * must comply with the following template::
f13ec330
SM
1217 *
1218 * void callback_function(struct crypto_async_request *req, int error)
1219 */
32e3983f
HX
1220static inline void ablkcipher_request_set_callback(
1221 struct ablkcipher_request *req,
3e3dc25f 1222 u32 flags, crypto_completion_t compl, void *data)
32e3983f 1223{
3e3dc25f 1224 req->base.complete = compl;
32e3983f
HX
1225 req->base.data = data;
1226 req->base.flags = flags;
1227}
1228
f13ec330
SM
1229/**
1230 * ablkcipher_request_set_crypt() - set data buffers
1231 * @req: request handle
1232 * @src: source scatter / gather list
1233 * @dst: destination scatter / gather list
1234 * @nbytes: number of bytes to process from @src
1235 * @iv: IV for the cipher operation which must comply with the IV size defined
1236 * by crypto_ablkcipher_ivsize
1237 *
1238 * This function allows setting of the source data and destination data
1239 * scatter / gather lists.
1240 *
1241 * For encryption, the source is treated as the plaintext and the
1242 * destination is the ciphertext. For a decryption operation, the use is
379dcfb4 1243 * reversed - the source is the ciphertext and the destination is the plaintext.
f13ec330 1244 */
32e3983f
HX
1245static inline void ablkcipher_request_set_crypt(
1246 struct ablkcipher_request *req,
1247 struct scatterlist *src, struct scatterlist *dst,
1248 unsigned int nbytes, void *iv)
1249{
1250 req->src = src;
1251 req->dst = dst;
1252 req->nbytes = nbytes;
1253 req->info = iv;
1254}
1255
58284f0d
SM
1256/**
1257 * DOC: Synchronous Block Cipher API
1258 *
1259 * The synchronous block cipher API is used with the ciphers of type
1260 * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
1261 *
1262 * Synchronous calls, have a context in the tfm. But since a single tfm can be
1263 * used in multiple calls and in parallel, this info should not be changeable
1264 * (unless a lock is used). This applies, for example, to the symmetric key.
1265 * However, the IV is changeable, so there is an iv field in blkcipher_tfm
1266 * structure for synchronous blkcipher api. So, its the only state info that can
1267 * be kept for synchronous calls without using a big lock across a tfm.
1268 *
1269 * The block cipher API allows the use of a complete cipher, i.e. a cipher
1270 * consisting of a template (a block chaining mode) and a single block cipher
1271 * primitive (e.g. AES).
1272 *
1273 * The plaintext data buffer and the ciphertext data buffer are pointed to
1274 * by using scatter/gather lists. The cipher operation is performed
1275 * on all segments of the provided scatter/gather lists.
1276 *
1277 * The kernel crypto API supports a cipher operation "in-place" which means that
1278 * the caller may provide the same scatter/gather list for the plaintext and
1279 * cipher text. After the completion of the cipher operation, the plaintext
1280 * data is replaced with the ciphertext data in case of an encryption and vice
1281 * versa for a decryption. The caller must ensure that the scatter/gather lists
1282 * for the output data point to sufficiently large buffers, i.e. multiples of
1283 * the block size of the cipher.
1284 */
1285
5cde0af2
HX
1286static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
1287 struct crypto_tfm *tfm)
1288{
1289 return (struct crypto_blkcipher *)tfm;
1290}
1291
1292static inline struct crypto_blkcipher *crypto_blkcipher_cast(
1293 struct crypto_tfm *tfm)
1294{
1295 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER);
1296 return __crypto_blkcipher_cast(tfm);
1297}
1298
58284f0d
SM
1299/**
1300 * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
1301 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1302 * blkcipher cipher
1303 * @type: specifies the type of the cipher
1304 * @mask: specifies the mask for the cipher
1305 *
1306 * Allocate a cipher handle for a block cipher. The returned struct
1307 * crypto_blkcipher is the cipher handle that is required for any subsequent
1308 * API invocation for that block cipher.
1309 *
1310 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1311 * of an error, PTR_ERR() returns the error code.
1312 */
5cde0af2
HX
1313static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
1314 const char *alg_name, u32 type, u32 mask)
1315{
332f8840 1316 type &= ~CRYPTO_ALG_TYPE_MASK;
5cde0af2 1317 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
332f8840 1318 mask |= CRYPTO_ALG_TYPE_MASK;
5cde0af2
HX
1319
1320 return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
1321}
1322
1323static inline struct crypto_tfm *crypto_blkcipher_tfm(
1324 struct crypto_blkcipher *tfm)
1325{
1326 return &tfm->base;
1327}
1328
58284f0d
SM
1329/**
1330 * crypto_free_blkcipher() - zeroize and free the block cipher handle
1331 * @tfm: cipher handle to be freed
1332 */
5cde0af2
HX
1333static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
1334{
1335 crypto_free_tfm(crypto_blkcipher_tfm(tfm));
1336}
1337
58284f0d
SM
1338/**
1339 * crypto_has_blkcipher() - Search for the availability of a block cipher
1340 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1341 * block cipher
1342 * @type: specifies the type of the cipher
1343 * @mask: specifies the mask for the cipher
1344 *
1345 * Return: true when the block cipher is known to the kernel crypto API; false
1346 * otherwise
1347 */
fce32d70
HX
1348static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
1349{
332f8840 1350 type &= ~CRYPTO_ALG_TYPE_MASK;
fce32d70 1351 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
332f8840 1352 mask |= CRYPTO_ALG_TYPE_MASK;
fce32d70
HX
1353
1354 return crypto_has_alg(alg_name, type, mask);
1355}
1356
58284f0d
SM
1357/**
1358 * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
1359 * @tfm: cipher handle
1360 *
1361 * Return: The character string holding the name of the cipher
1362 */
5cde0af2
HX
1363static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
1364{
1365 return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
1366}
1367
1368static inline struct blkcipher_tfm *crypto_blkcipher_crt(
1369 struct crypto_blkcipher *tfm)
1370{
1371 return &crypto_blkcipher_tfm(tfm)->crt_blkcipher;
1372}
1373
1374static inline struct blkcipher_alg *crypto_blkcipher_alg(
1375 struct crypto_blkcipher *tfm)
1376{
1377 return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
1378}
1379
58284f0d
SM
1380/**
1381 * crypto_blkcipher_ivsize() - obtain IV size
1382 * @tfm: cipher handle
1383 *
1384 * The size of the IV for the block cipher referenced by the cipher handle is
1385 * returned. This IV size may be zero if the cipher does not need an IV.
1386 *
1387 * Return: IV size in bytes
1388 */
5cde0af2
HX
1389static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
1390{
1391 return crypto_blkcipher_alg(tfm)->ivsize;
1392}
1393
58284f0d
SM
1394/**
1395 * crypto_blkcipher_blocksize() - obtain block size of cipher
1396 * @tfm: cipher handle
1397 *
1398 * The block size for the block cipher referenced with the cipher handle is
1399 * returned. The caller may use that information to allocate appropriate
1400 * memory for the data returned by the encryption or decryption operation.
1401 *
1402 * Return: block size of cipher
1403 */
5cde0af2
HX
1404static inline unsigned int crypto_blkcipher_blocksize(
1405 struct crypto_blkcipher *tfm)
1406{
1407 return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
1408}
1409
1410static inline unsigned int crypto_blkcipher_alignmask(
1411 struct crypto_blkcipher *tfm)
1412{
1413 return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
1414}
1415
1416static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
1417{
1418 return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
1419}
1420
1421static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
1422 u32 flags)
1423{
1424 crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
1425}
1426
1427static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
1428 u32 flags)
1429{
1430 crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
1431}
1432
58284f0d
SM
1433/**
1434 * crypto_blkcipher_setkey() - set key for cipher
1435 * @tfm: cipher handle
1436 * @key: buffer holding the key
1437 * @keylen: length of the key in bytes
1438 *
1439 * The caller provided key is set for the block cipher referenced by the cipher
1440 * handle.
1441 *
1442 * Note, the key length determines the cipher type. Many block ciphers implement
1443 * different cipher modes depending on the key size, such as AES-128 vs AES-192
1444 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1445 * is performed.
1446 *
1447 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1448 */
5cde0af2
HX
1449static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
1450 const u8 *key, unsigned int keylen)
1451{
1452 return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
1453 key, keylen);
1454}
1455
58284f0d
SM
1456/**
1457 * crypto_blkcipher_encrypt() - encrypt plaintext
1458 * @desc: reference to the block cipher handle with meta data
1459 * @dst: scatter/gather list that is filled by the cipher operation with the
1460 * ciphertext
1461 * @src: scatter/gather list that holds the plaintext
1462 * @nbytes: number of bytes of the plaintext to encrypt.
1463 *
1464 * Encrypt plaintext data using the IV set by the caller with a preceding
1465 * call of crypto_blkcipher_set_iv.
1466 *
1467 * The blkcipher_desc data structure must be filled by the caller and can
1468 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1469 * with the block cipher handle; desc.flags is filled with either
1470 * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1471 *
1472 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1473 */
5cde0af2
HX
1474static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
1475 struct scatterlist *dst,
1476 struct scatterlist *src,
1477 unsigned int nbytes)
1478{
1479 desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
1480 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
1481}
1482
58284f0d
SM
1483/**
1484 * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
1485 * @desc: reference to the block cipher handle with meta data
1486 * @dst: scatter/gather list that is filled by the cipher operation with the
1487 * ciphertext
1488 * @src: scatter/gather list that holds the plaintext
1489 * @nbytes: number of bytes of the plaintext to encrypt.
1490 *
1491 * Encrypt plaintext data with the use of an IV that is solely used for this
1492 * cipher operation. Any previously set IV is not used.
1493 *
1494 * The blkcipher_desc data structure must be filled by the caller and can
1495 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1496 * with the block cipher handle; desc.info is filled with the IV to be used for
1497 * the current operation; desc.flags is filled with either
1498 * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1499 *
1500 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1501 */
5cde0af2
HX
1502static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
1503 struct scatterlist *dst,
1504 struct scatterlist *src,
1505 unsigned int nbytes)
1506{
1507 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
1508}
1509
58284f0d
SM
1510/**
1511 * crypto_blkcipher_decrypt() - decrypt ciphertext
1512 * @desc: reference to the block cipher handle with meta data
1513 * @dst: scatter/gather list that is filled by the cipher operation with the
1514 * plaintext
1515 * @src: scatter/gather list that holds the ciphertext
1516 * @nbytes: number of bytes of the ciphertext to decrypt.
1517 *
1518 * Decrypt ciphertext data using the IV set by the caller with a preceding
1519 * call of crypto_blkcipher_set_iv.
1520 *
1521 * The blkcipher_desc data structure must be filled by the caller as documented
1522 * for the crypto_blkcipher_encrypt call above.
1523 *
1524 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1525 *
1526 */
5cde0af2
HX
1527static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
1528 struct scatterlist *dst,
1529 struct scatterlist *src,
1530 unsigned int nbytes)
1531{
1532 desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
1533 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1534}
1535
58284f0d
SM
1536/**
1537 * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
1538 * @desc: reference to the block cipher handle with meta data
1539 * @dst: scatter/gather list that is filled by the cipher operation with the
1540 * plaintext
1541 * @src: scatter/gather list that holds the ciphertext
1542 * @nbytes: number of bytes of the ciphertext to decrypt.
1543 *
1544 * Decrypt ciphertext data with the use of an IV that is solely used for this
1545 * cipher operation. Any previously set IV is not used.
1546 *
1547 * The blkcipher_desc data structure must be filled by the caller as documented
1548 * for the crypto_blkcipher_encrypt_iv call above.
1549 *
1550 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1551 */
5cde0af2
HX
1552static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
1553 struct scatterlist *dst,
1554 struct scatterlist *src,
1555 unsigned int nbytes)
1556{
1557 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1558}
1559
58284f0d
SM
1560/**
1561 * crypto_blkcipher_set_iv() - set IV for cipher
1562 * @tfm: cipher handle
1563 * @src: buffer holding the IV
1564 * @len: length of the IV in bytes
1565 *
1566 * The caller provided IV is set for the block cipher referenced by the cipher
1567 * handle.
1568 */
5cde0af2
HX
1569static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
1570 const u8 *src, unsigned int len)
1571{
1572 memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
1573}
1574
58284f0d
SM
1575/**
1576 * crypto_blkcipher_get_iv() - obtain IV from cipher
1577 * @tfm: cipher handle
1578 * @dst: buffer filled with the IV
1579 * @len: length of the buffer dst
1580 *
1581 * The caller can obtain the IV set for the block cipher referenced by the
1582 * cipher handle and store it into the user-provided buffer. If the buffer
1583 * has an insufficient space, the IV is truncated to fit the buffer.
1584 */
5cde0af2
HX
1585static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
1586 u8 *dst, unsigned int len)
1587{
1588 memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
1589}
1590
16e61030
SM
1591/**
1592 * DOC: Single Block Cipher API
1593 *
1594 * The single block cipher API is used with the ciphers of type
1595 * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
1596 *
1597 * Using the single block cipher API calls, operations with the basic cipher
1598 * primitive can be implemented. These cipher primitives exclude any block
1599 * chaining operations including IV handling.
1600 *
1601 * The purpose of this single block cipher API is to support the implementation
1602 * of templates or other concepts that only need to perform the cipher operation
1603 * on one block at a time. Templates invoke the underlying cipher primitive
1604 * block-wise and process either the input or the output data of these cipher
1605 * operations.
1606 */
1607
f28776a3
HX
1608static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
1609{
1610 return (struct crypto_cipher *)tfm;
1611}
1612
1613static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
1614{
1615 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
1616 return __crypto_cipher_cast(tfm);
1617}
1618
16e61030
SM
1619/**
1620 * crypto_alloc_cipher() - allocate single block cipher handle
1621 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1622 * single block cipher
1623 * @type: specifies the type of the cipher
1624 * @mask: specifies the mask for the cipher
1625 *
1626 * Allocate a cipher handle for a single block cipher. The returned struct
1627 * crypto_cipher is the cipher handle that is required for any subsequent API
1628 * invocation for that single block cipher.
1629 *
1630 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1631 * of an error, PTR_ERR() returns the error code.
1632 */
f28776a3
HX
1633static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
1634 u32 type, u32 mask)
1635{
1636 type &= ~CRYPTO_ALG_TYPE_MASK;
1637 type |= CRYPTO_ALG_TYPE_CIPHER;
1638 mask |= CRYPTO_ALG_TYPE_MASK;
1639
1640 return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
1641}
1642
1643static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
1644{
78a1fe4f 1645 return &tfm->base;
f28776a3
HX
1646}
1647
16e61030
SM
1648/**
1649 * crypto_free_cipher() - zeroize and free the single block cipher handle
1650 * @tfm: cipher handle to be freed
1651 */
f28776a3
HX
1652static inline void crypto_free_cipher(struct crypto_cipher *tfm)
1653{
1654 crypto_free_tfm(crypto_cipher_tfm(tfm));
1655}
1656
16e61030
SM
1657/**
1658 * crypto_has_cipher() - Search for the availability of a single block cipher
1659 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1660 * single block cipher
1661 * @type: specifies the type of the cipher
1662 * @mask: specifies the mask for the cipher
1663 *
1664 * Return: true when the single block cipher is known to the kernel crypto API;
1665 * false otherwise
1666 */
fce32d70
HX
1667static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
1668{
1669 type &= ~CRYPTO_ALG_TYPE_MASK;
1670 type |= CRYPTO_ALG_TYPE_CIPHER;
1671 mask |= CRYPTO_ALG_TYPE_MASK;
1672
1673 return crypto_has_alg(alg_name, type, mask);
1674}
1675
f28776a3
HX
1676static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
1677{
1678 return &crypto_cipher_tfm(tfm)->crt_cipher;
1679}
1680
16e61030
SM
1681/**
1682 * crypto_cipher_blocksize() - obtain block size for cipher
1683 * @tfm: cipher handle
1684 *
1685 * The block size for the single block cipher referenced with the cipher handle
1686 * tfm is returned. The caller may use that information to allocate appropriate
1687 * memory for the data returned by the encryption or decryption operation
1688 *
1689 * Return: block size of cipher
1690 */
f28776a3
HX
1691static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
1692{
1693 return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
1694}
1695
1696static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
1697{
1698 return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
1699}
1700
1701static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
1702{
1703 return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
1704}
1705
1706static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
1707 u32 flags)
1708{
1709 crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
1710}
1711
1712static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
1713 u32 flags)
1714{
1715 crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
1716}
1717
16e61030
SM
1718/**
1719 * crypto_cipher_setkey() - set key for cipher
1720 * @tfm: cipher handle
1721 * @key: buffer holding the key
1722 * @keylen: length of the key in bytes
1723 *
1724 * The caller provided key is set for the single block cipher referenced by the
1725 * cipher handle.
1726 *
1727 * Note, the key length determines the cipher type. Many block ciphers implement
1728 * different cipher modes depending on the key size, such as AES-128 vs AES-192
1729 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1730 * is performed.
1731 *
1732 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1733 */
7226bc87
HX
1734static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
1735 const u8 *key, unsigned int keylen)
1736{
1737 return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
1738 key, keylen);
1739}
1740
16e61030
SM
1741/**
1742 * crypto_cipher_encrypt_one() - encrypt one block of plaintext
1743 * @tfm: cipher handle
1744 * @dst: points to the buffer that will be filled with the ciphertext
1745 * @src: buffer holding the plaintext to be encrypted
1746 *
1747 * Invoke the encryption operation of one block. The caller must ensure that
1748 * the plaintext and ciphertext buffers are at least one block in size.
1749 */
f28776a3
HX
1750static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
1751 u8 *dst, const u8 *src)
1752{
1753 crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
1754 dst, src);
1755}
1756
16e61030
SM
1757/**
1758 * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
1759 * @tfm: cipher handle
1760 * @dst: points to the buffer that will be filled with the plaintext
1761 * @src: buffer holding the ciphertext to be decrypted
1762 *
1763 * Invoke the decryption operation of one block. The caller must ensure that
1764 * the plaintext and ciphertext buffers are at least one block in size.
1765 */
f28776a3
HX
1766static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
1767 u8 *dst, const u8 *src)
1768{
1769 crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
1770 dst, src);
1771}
1772
fce32d70
HX
1773static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
1774{
1775 return (struct crypto_comp *)tfm;
1776}
1777
1778static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm)
1779{
1780 BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) &
1781 CRYPTO_ALG_TYPE_MASK);
1782 return __crypto_comp_cast(tfm);
1783}
1784
1785static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
1786 u32 type, u32 mask)
1787{
1788 type &= ~CRYPTO_ALG_TYPE_MASK;
1789 type |= CRYPTO_ALG_TYPE_COMPRESS;
1790 mask |= CRYPTO_ALG_TYPE_MASK;
1791
1792 return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask));
1793}
1794
1795static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm)
1796{
78a1fe4f 1797 return &tfm->base;
fce32d70
HX
1798}
1799
1800static inline void crypto_free_comp(struct crypto_comp *tfm)
1801{
1802 crypto_free_tfm(crypto_comp_tfm(tfm));
1803}
1804
1805static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask)
1806{
1807 type &= ~CRYPTO_ALG_TYPE_MASK;
1808 type |= CRYPTO_ALG_TYPE_COMPRESS;
1809 mask |= CRYPTO_ALG_TYPE_MASK;
1810
1811 return crypto_has_alg(alg_name, type, mask);
1812}
1813
e4d5b79c
HX
1814static inline const char *crypto_comp_name(struct crypto_comp *tfm)
1815{
1816 return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
1817}
1818
fce32d70
HX
1819static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm)
1820{
1821 return &crypto_comp_tfm(tfm)->crt_compress;
1822}
1823
1824static inline int crypto_comp_compress(struct crypto_comp *tfm,
1da177e4
LT
1825 const u8 *src, unsigned int slen,
1826 u8 *dst, unsigned int *dlen)
1827{
78a1fe4f
HX
1828 return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm),
1829 src, slen, dst, dlen);
1da177e4
LT
1830}
1831
fce32d70 1832static inline int crypto_comp_decompress(struct crypto_comp *tfm,
1da177e4
LT
1833 const u8 *src, unsigned int slen,
1834 u8 *dst, unsigned int *dlen)
1835{
78a1fe4f
HX
1836 return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm),
1837 src, slen, dst, dlen);
1da177e4
LT
1838}
1839
1da177e4
LT
1840#endif /* _LINUX_CRYPTO_H */
1841