nvme: Add tertiary number to NVME_VS
[linux-2.6-block.git] / include / linux / crypto.h
CommitLineData
1da177e4
LT
1/*
2 * Scatterlist Cryptographic API.
3 *
4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
5 * Copyright (c) 2002 David S. Miller (davem@redhat.com)
5cb1454b 6 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
1da177e4
LT
7 *
8 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
18735dd8 9 * and Nettle, by Niels Möller.
1da177e4
LT
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 *
16 */
17#ifndef _LINUX_CRYPTO_H
18#define _LINUX_CRYPTO_H
19
60063497 20#include <linux/atomic.h>
1da177e4 21#include <linux/kernel.h>
1da177e4 22#include <linux/list.h>
187f1882 23#include <linux/bug.h>
79911102 24#include <linux/slab.h>
1da177e4 25#include <linux/string.h>
79911102 26#include <linux/uaccess.h>
1da177e4 27
5d26a105
KC
28/*
29 * Autoloaded crypto modules should only use a prefixed name to avoid allowing
30 * arbitrary modules to be loaded. Loading from userspace may still need the
31 * unprefixed names, so retains those aliases as well.
32 * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
33 * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
34 * expands twice on the same line. Instead, use a separate base name for the
35 * alias.
36 */
37#define MODULE_ALIAS_CRYPTO(name) \
38 __MODULE_INFO(alias, alias_userspace, name); \
39 __MODULE_INFO(alias, alias_crypto, "crypto-" name)
40
1da177e4
LT
41/*
42 * Algorithm masks and types.
43 */
2825982d 44#define CRYPTO_ALG_TYPE_MASK 0x0000000f
1da177e4 45#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
004a403c
LH
46#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002
47#define CRYPTO_ALG_TYPE_AEAD 0x00000003
055bcee3 48#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
332f8840 49#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
4e6c3df4 50#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
61da88e2 51#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
4e5f2c40 52#define CRYPTO_ALG_TYPE_KPP 0x00000008
17f0f4a4 53#define CRYPTO_ALG_TYPE_RNG 0x0000000c
3c339ab8 54#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
63044c4f
GC
55#define CRYPTO_ALG_TYPE_DIGEST 0x0000000e
56#define CRYPTO_ALG_TYPE_HASH 0x0000000e
57#define CRYPTO_ALG_TYPE_SHASH 0x0000000e
58#define CRYPTO_ALG_TYPE_AHASH 0x0000000f
055bcee3
HX
59
60#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
63044c4f 61#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
332f8840 62#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
1da177e4 63
2825982d 64#define CRYPTO_ALG_LARVAL 0x00000010
6bfd4809
HX
65#define CRYPTO_ALG_DEAD 0x00000020
66#define CRYPTO_ALG_DYING 0x00000040
f3f632d6 67#define CRYPTO_ALG_ASYNC 0x00000080
2825982d 68
6010439f
HX
69/*
70 * Set this bit if and only if the algorithm requires another algorithm of
71 * the same type to handle corner cases.
72 */
73#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
74
ecfc4329
HX
75/*
76 * This bit is set for symmetric key ciphers that have already been wrapped
77 * with a generic IV generator to prevent them from being wrapped again.
78 */
79#define CRYPTO_ALG_GENIV 0x00000200
80
73d3864a
HX
81/*
82 * Set if the algorithm has passed automated run-time testing. Note that
83 * if there is no run-time testing for a given algorithm it is considered
84 * to have passed.
85 */
86
87#define CRYPTO_ALG_TESTED 0x00000400
88
64a947b1
SK
89/*
90 * Set if the algorithm is an instance that is build from templates.
91 */
92#define CRYPTO_ALG_INSTANCE 0x00000800
93
d912bb76
NM
94/* Set this bit if the algorithm provided is hardware accelerated but
95 * not available to userspace via instruction set or so.
96 */
97#define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000
98
06ca7f68
SM
99/*
100 * Mark a cipher as a service implementation only usable by another
101 * cipher and never by a normal user of the kernel crypto API
102 */
103#define CRYPTO_ALG_INTERNAL 0x00002000
104
1da177e4
LT
105/*
106 * Transform masks and values (for crt_flags).
107 */
1da177e4
LT
108#define CRYPTO_TFM_REQ_MASK 0x000fff00
109#define CRYPTO_TFM_RES_MASK 0xfff00000
110
1da177e4 111#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100
64baf3cf 112#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
32e3983f 113#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400
1da177e4
LT
114#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
115#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
116#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
117#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000
118#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000
119
120/*
121 * Miscellaneous stuff.
122 */
1da177e4
LT
123#define CRYPTO_MAX_ALG_NAME 64
124
79911102
HX
125/*
126 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
127 * declaration) is used to ensure that the crypto_tfm context structure is
128 * aligned correctly for the given architecture so that there are no alignment
129 * faults for C data types. In particular, this is required on platforms such
130 * as arm where pointers are 32-bit aligned but there are data types such as
131 * u64 which require 64-bit alignment.
132 */
79911102 133#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
79911102 134
79911102 135#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
79911102 136
1da177e4 137struct scatterlist;
32e3983f
HX
138struct crypto_ablkcipher;
139struct crypto_async_request;
5cde0af2 140struct crypto_blkcipher;
40725181 141struct crypto_tfm;
e853c3cf 142struct crypto_type;
61da88e2 143struct skcipher_givcrypt_request;
40725181 144
32e3983f
HX
145typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
146
0d7f488f
SM
147/**
148 * DOC: Block Cipher Context Data Structures
149 *
150 * These data structures define the operating context for each block cipher
151 * type.
152 */
153
32e3983f
HX
154struct crypto_async_request {
155 struct list_head list;
156 crypto_completion_t complete;
157 void *data;
158 struct crypto_tfm *tfm;
159
160 u32 flags;
161};
162
163struct ablkcipher_request {
164 struct crypto_async_request base;
165
166 unsigned int nbytes;
167
168 void *info;
169
170 struct scatterlist *src;
171 struct scatterlist *dst;
172
173 void *__ctx[] CRYPTO_MINALIGN_ATTR;
174};
175
5cde0af2
HX
176struct blkcipher_desc {
177 struct crypto_blkcipher *tfm;
178 void *info;
179 u32 flags;
180};
181
40725181
HX
182struct cipher_desc {
183 struct crypto_tfm *tfm;
6c2bb98b 184 void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
40725181
HX
185 unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
186 const u8 *src, unsigned int nbytes);
187 void *info;
188};
1da177e4 189
0d7f488f
SM
190/**
191 * DOC: Block Cipher Algorithm Definitions
192 *
193 * These data structures define modular crypto algorithm implementations,
194 * managed via crypto_register_alg() and crypto_unregister_alg().
195 */
196
197/**
198 * struct ablkcipher_alg - asynchronous block cipher definition
199 * @min_keysize: Minimum key size supported by the transformation. This is the
200 * smallest key length supported by this transformation algorithm.
201 * This must be set to one of the pre-defined values as this is
202 * not hardware specific. Possible values for this field can be
203 * found via git grep "_MIN_KEY_SIZE" include/crypto/
204 * @max_keysize: Maximum key size supported by the transformation. This is the
205 * largest key length supported by this transformation algorithm.
206 * This must be set to one of the pre-defined values as this is
207 * not hardware specific. Possible values for this field can be
208 * found via git grep "_MAX_KEY_SIZE" include/crypto/
209 * @setkey: Set key for the transformation. This function is used to either
210 * program a supplied key into the hardware or store the key in the
211 * transformation context for programming it later. Note that this
212 * function does modify the transformation context. This function can
213 * be called multiple times during the existence of the transformation
214 * object, so one must make sure the key is properly reprogrammed into
215 * the hardware. This function is also responsible for checking the key
216 * length for validity. In case a software fallback was put in place in
217 * the @cra_init call, this function might need to use the fallback if
218 * the algorithm doesn't support all of the key sizes.
219 * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
220 * the supplied scatterlist containing the blocks of data. The crypto
221 * API consumer is responsible for aligning the entries of the
222 * scatterlist properly and making sure the chunks are correctly
223 * sized. In case a software fallback was put in place in the
224 * @cra_init call, this function might need to use the fallback if
225 * the algorithm doesn't support all of the key sizes. In case the
226 * key was stored in transformation context, the key might need to be
227 * re-programmed into the hardware in this function. This function
228 * shall not modify the transformation context, as this function may
229 * be called in parallel with the same transformation object.
230 * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
231 * and the conditions are exactly the same.
232 * @givencrypt: Update the IV for encryption. With this function, a cipher
233 * implementation may provide the function on how to update the IV
234 * for encryption.
235 * @givdecrypt: Update the IV for decryption. This is the reverse of
236 * @givencrypt .
237 * @geniv: The transformation implementation may use an "IV generator" provided
238 * by the kernel crypto API. Several use cases have a predefined
239 * approach how IVs are to be updated. For such use cases, the kernel
240 * crypto API provides ready-to-use implementations that can be
241 * referenced with this variable.
242 * @ivsize: IV size applicable for transformation. The consumer must provide an
243 * IV of exactly that size to perform the encrypt or decrypt operation.
244 *
245 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
246 * mandatory and must be filled.
1da177e4 247 */
b5b7f088
HX
248struct ablkcipher_alg {
249 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
250 unsigned int keylen);
251 int (*encrypt)(struct ablkcipher_request *req);
252 int (*decrypt)(struct ablkcipher_request *req);
61da88e2
HX
253 int (*givencrypt)(struct skcipher_givcrypt_request *req);
254 int (*givdecrypt)(struct skcipher_givcrypt_request *req);
b5b7f088 255
23508e11
HX
256 const char *geniv;
257
b5b7f088
HX
258 unsigned int min_keysize;
259 unsigned int max_keysize;
260 unsigned int ivsize;
261};
262
0d7f488f
SM
263/**
264 * struct blkcipher_alg - synchronous block cipher definition
265 * @min_keysize: see struct ablkcipher_alg
266 * @max_keysize: see struct ablkcipher_alg
267 * @setkey: see struct ablkcipher_alg
268 * @encrypt: see struct ablkcipher_alg
269 * @decrypt: see struct ablkcipher_alg
270 * @geniv: see struct ablkcipher_alg
271 * @ivsize: see struct ablkcipher_alg
272 *
273 * All fields except @geniv and @ivsize are mandatory and must be filled.
274 */
5cde0af2
HX
275struct blkcipher_alg {
276 int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
277 unsigned int keylen);
278 int (*encrypt)(struct blkcipher_desc *desc,
279 struct scatterlist *dst, struct scatterlist *src,
280 unsigned int nbytes);
281 int (*decrypt)(struct blkcipher_desc *desc,
282 struct scatterlist *dst, struct scatterlist *src,
283 unsigned int nbytes);
284
23508e11
HX
285 const char *geniv;
286
5cde0af2
HX
287 unsigned int min_keysize;
288 unsigned int max_keysize;
289 unsigned int ivsize;
290};
291
0d7f488f
SM
292/**
293 * struct cipher_alg - single-block symmetric ciphers definition
294 * @cia_min_keysize: Minimum key size supported by the transformation. This is
295 * the smallest key length supported by this transformation
296 * algorithm. This must be set to one of the pre-defined
297 * values as this is not hardware specific. Possible values
298 * for this field can be found via git grep "_MIN_KEY_SIZE"
299 * include/crypto/
300 * @cia_max_keysize: Maximum key size supported by the transformation. This is
301 * the largest key length supported by this transformation
302 * algorithm. This must be set to one of the pre-defined values
303 * as this is not hardware specific. Possible values for this
304 * field can be found via git grep "_MAX_KEY_SIZE"
305 * include/crypto/
306 * @cia_setkey: Set key for the transformation. This function is used to either
307 * program a supplied key into the hardware or store the key in the
308 * transformation context for programming it later. Note that this
309 * function does modify the transformation context. This function
310 * can be called multiple times during the existence of the
311 * transformation object, so one must make sure the key is properly
312 * reprogrammed into the hardware. This function is also
313 * responsible for checking the key length for validity.
314 * @cia_encrypt: Encrypt a single block. This function is used to encrypt a
315 * single block of data, which must be @cra_blocksize big. This
316 * always operates on a full @cra_blocksize and it is not possible
317 * to encrypt a block of smaller size. The supplied buffers must
318 * therefore also be at least of @cra_blocksize size. Both the
319 * input and output buffers are always aligned to @cra_alignmask.
320 * In case either of the input or output buffer supplied by user
321 * of the crypto API is not aligned to @cra_alignmask, the crypto
322 * API will re-align the buffers. The re-alignment means that a
323 * new buffer will be allocated, the data will be copied into the
324 * new buffer, then the processing will happen on the new buffer,
325 * then the data will be copied back into the original buffer and
326 * finally the new buffer will be freed. In case a software
327 * fallback was put in place in the @cra_init call, this function
328 * might need to use the fallback if the algorithm doesn't support
329 * all of the key sizes. In case the key was stored in
330 * transformation context, the key might need to be re-programmed
331 * into the hardware in this function. This function shall not
332 * modify the transformation context, as this function may be
333 * called in parallel with the same transformation object.
334 * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to
335 * @cia_encrypt, and the conditions are exactly the same.
336 *
337 * All fields are mandatory and must be filled.
338 */
1da177e4
LT
339struct cipher_alg {
340 unsigned int cia_min_keysize;
341 unsigned int cia_max_keysize;
6c2bb98b 342 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
560c06ae 343 unsigned int keylen);
6c2bb98b
HX
344 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
345 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
1da177e4
LT
346};
347
1da177e4 348struct compress_alg {
6c2bb98b
HX
349 int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
350 unsigned int slen, u8 *dst, unsigned int *dlen);
351 int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src,
352 unsigned int slen, u8 *dst, unsigned int *dlen);
1da177e4
LT
353};
354
17f0f4a4 355
b5b7f088 356#define cra_ablkcipher cra_u.ablkcipher
5cde0af2 357#define cra_blkcipher cra_u.blkcipher
1da177e4 358#define cra_cipher cra_u.cipher
1da177e4
LT
359#define cra_compress cra_u.compress
360
0d7f488f
SM
361/**
362 * struct crypto_alg - definition of a cryptograpic cipher algorithm
363 * @cra_flags: Flags describing this transformation. See include/linux/crypto.h
364 * CRYPTO_ALG_* flags for the flags which go in here. Those are
365 * used for fine-tuning the description of the transformation
366 * algorithm.
367 * @cra_blocksize: Minimum block size of this transformation. The size in bytes
368 * of the smallest possible unit which can be transformed with
369 * this algorithm. The users must respect this value.
370 * In case of HASH transformation, it is possible for a smaller
371 * block than @cra_blocksize to be passed to the crypto API for
372 * transformation, in case of any other transformation type, an
373 * error will be returned upon any attempt to transform smaller
374 * than @cra_blocksize chunks.
375 * @cra_ctxsize: Size of the operational context of the transformation. This
376 * value informs the kernel crypto API about the memory size
377 * needed to be allocated for the transformation context.
378 * @cra_alignmask: Alignment mask for the input and output data buffer. The data
379 * buffer containing the input data for the algorithm must be
380 * aligned to this alignment mask. The data buffer for the
381 * output data must be aligned to this alignment mask. Note that
382 * the Crypto API will do the re-alignment in software, but
383 * only under special conditions and there is a performance hit.
384 * The re-alignment happens at these occasions for different
385 * @cra_u types: cipher -- For both input data and output data
386 * buffer; ahash -- For output hash destination buf; shash --
387 * For output hash destination buf.
388 * This is needed on hardware which is flawed by design and
389 * cannot pick data from arbitrary addresses.
390 * @cra_priority: Priority of this transformation implementation. In case
391 * multiple transformations with same @cra_name are available to
392 * the Crypto API, the kernel will use the one with highest
393 * @cra_priority.
394 * @cra_name: Generic name (usable by multiple implementations) of the
395 * transformation algorithm. This is the name of the transformation
396 * itself. This field is used by the kernel when looking up the
397 * providers of particular transformation.
398 * @cra_driver_name: Unique name of the transformation provider. This is the
399 * name of the provider of the transformation. This can be any
400 * arbitrary value, but in the usual case, this contains the
401 * name of the chip or provider and the name of the
402 * transformation algorithm.
403 * @cra_type: Type of the cryptographic transformation. This is a pointer to
404 * struct crypto_type, which implements callbacks common for all
12f7c14a 405 * transformation types. There are multiple options:
0d7f488f 406 * &crypto_blkcipher_type, &crypto_ablkcipher_type,
b0d955ba 407 * &crypto_ahash_type, &crypto_rng_type.
0d7f488f
SM
408 * This field might be empty. In that case, there are no common
409 * callbacks. This is the case for: cipher, compress, shash.
410 * @cra_u: Callbacks implementing the transformation. This is a union of
411 * multiple structures. Depending on the type of transformation selected
412 * by @cra_type and @cra_flags above, the associated structure must be
413 * filled with callbacks. This field might be empty. This is the case
414 * for ahash, shash.
415 * @cra_init: Initialize the cryptographic transformation object. This function
416 * is used to initialize the cryptographic transformation object.
417 * This function is called only once at the instantiation time, right
418 * after the transformation context was allocated. In case the
419 * cryptographic hardware has some special requirements which need to
420 * be handled by software, this function shall check for the precise
421 * requirement of the transformation and put any software fallbacks
422 * in place.
423 * @cra_exit: Deinitialize the cryptographic transformation object. This is a
424 * counterpart to @cra_init, used to remove various changes set in
425 * @cra_init.
426 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
427 * @cra_list: internally used
428 * @cra_users: internally used
429 * @cra_refcnt: internally used
430 * @cra_destroy: internally used
431 *
432 * The struct crypto_alg describes a generic Crypto API algorithm and is common
433 * for all of the transformations. Any variable not documented here shall not
434 * be used by a cipher implementation as it is internal to the Crypto API.
435 */
1da177e4
LT
436struct crypto_alg {
437 struct list_head cra_list;
6bfd4809
HX
438 struct list_head cra_users;
439
1da177e4
LT
440 u32 cra_flags;
441 unsigned int cra_blocksize;
442 unsigned int cra_ctxsize;
95477377 443 unsigned int cra_alignmask;
5cb1454b
HX
444
445 int cra_priority;
6521f302 446 atomic_t cra_refcnt;
5cb1454b 447
d913ea0d
HX
448 char cra_name[CRYPTO_MAX_ALG_NAME];
449 char cra_driver_name[CRYPTO_MAX_ALG_NAME];
1da177e4 450
e853c3cf
HX
451 const struct crypto_type *cra_type;
452
1da177e4 453 union {
b5b7f088 454 struct ablkcipher_alg ablkcipher;
5cde0af2 455 struct blkcipher_alg blkcipher;
1da177e4 456 struct cipher_alg cipher;
1da177e4
LT
457 struct compress_alg compress;
458 } cra_u;
c7fc0599
HX
459
460 int (*cra_init)(struct crypto_tfm *tfm);
461 void (*cra_exit)(struct crypto_tfm *tfm);
6521f302 462 void (*cra_destroy)(struct crypto_alg *alg);
1da177e4
LT
463
464 struct module *cra_module;
edf18b91 465} CRYPTO_MINALIGN_ATTR;
1da177e4
LT
466
467/*
468 * Algorithm registration interface.
469 */
470int crypto_register_alg(struct crypto_alg *alg);
471int crypto_unregister_alg(struct crypto_alg *alg);
4b004346
MB
472int crypto_register_algs(struct crypto_alg *algs, int count);
473int crypto_unregister_algs(struct crypto_alg *algs, int count);
1da177e4
LT
474
475/*
476 * Algorithm query interface.
477 */
fce32d70 478int crypto_has_alg(const char *name, u32 type, u32 mask);
1da177e4
LT
479
480/*
481 * Transforms: user-instantiated objects which encapsulate algorithms
6d7d684d
HX
482 * and core processing logic. Managed via crypto_alloc_*() and
483 * crypto_free_*(), as well as the various helpers below.
1da177e4 484 */
1da177e4 485
32e3983f
HX
486struct ablkcipher_tfm {
487 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
488 unsigned int keylen);
489 int (*encrypt)(struct ablkcipher_request *req);
490 int (*decrypt)(struct ablkcipher_request *req);
61da88e2 491
ecfc4329
HX
492 struct crypto_ablkcipher *base;
493
32e3983f
HX
494 unsigned int ivsize;
495 unsigned int reqsize;
496};
497
5cde0af2
HX
498struct blkcipher_tfm {
499 void *iv;
500 int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
501 unsigned int keylen);
502 int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
503 struct scatterlist *src, unsigned int nbytes);
504 int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
505 struct scatterlist *src, unsigned int nbytes);
506};
507
1da177e4 508struct cipher_tfm {
1da177e4
LT
509 int (*cit_setkey)(struct crypto_tfm *tfm,
510 const u8 *key, unsigned int keylen);
f28776a3
HX
511 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
512 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
1da177e4
LT
513};
514
1da177e4
LT
515struct compress_tfm {
516 int (*cot_compress)(struct crypto_tfm *tfm,
517 const u8 *src, unsigned int slen,
518 u8 *dst, unsigned int *dlen);
519 int (*cot_decompress)(struct crypto_tfm *tfm,
520 const u8 *src, unsigned int slen,
521 u8 *dst, unsigned int *dlen);
522};
523
32e3983f 524#define crt_ablkcipher crt_u.ablkcipher
5cde0af2 525#define crt_blkcipher crt_u.blkcipher
1da177e4 526#define crt_cipher crt_u.cipher
1da177e4
LT
527#define crt_compress crt_u.compress
528
529struct crypto_tfm {
530
531 u32 crt_flags;
532
533 union {
32e3983f 534 struct ablkcipher_tfm ablkcipher;
5cde0af2 535 struct blkcipher_tfm blkcipher;
1da177e4 536 struct cipher_tfm cipher;
1da177e4
LT
537 struct compress_tfm compress;
538 } crt_u;
4a779486
HX
539
540 void (*exit)(struct crypto_tfm *tfm);
1da177e4
LT
541
542 struct crypto_alg *__crt_alg;
f10b7897 543
79911102 544 void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
1da177e4
LT
545};
546
32e3983f
HX
547struct crypto_ablkcipher {
548 struct crypto_tfm base;
549};
550
5cde0af2
HX
551struct crypto_blkcipher {
552 struct crypto_tfm base;
553};
554
78a1fe4f
HX
555struct crypto_cipher {
556 struct crypto_tfm base;
557};
558
559struct crypto_comp {
560 struct crypto_tfm base;
561};
562
2b8c19db
HX
563enum {
564 CRYPTOA_UNSPEC,
565 CRYPTOA_ALG,
ebc610e5 566 CRYPTOA_TYPE,
39e1ee01 567 CRYPTOA_U32,
ebc610e5 568 __CRYPTOA_MAX,
2b8c19db
HX
569};
570
ebc610e5
HX
571#define CRYPTOA_MAX (__CRYPTOA_MAX - 1)
572
39e1ee01
HX
573/* Maximum number of (rtattr) parameters for each template. */
574#define CRYPTO_MAX_ATTRS 32
575
2b8c19db
HX
576struct crypto_attr_alg {
577 char name[CRYPTO_MAX_ALG_NAME];
578};
579
ebc610e5
HX
580struct crypto_attr_type {
581 u32 type;
582 u32 mask;
583};
584
39e1ee01
HX
585struct crypto_attr_u32 {
586 u32 num;
587};
588
1da177e4
LT
589/*
590 * Transform user interface.
591 */
592
6d7d684d 593struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
7b2cd92a
HX
594void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
595
596static inline void crypto_free_tfm(struct crypto_tfm *tfm)
597{
598 return crypto_destroy_tfm(tfm, tfm);
599}
1da177e4 600
da7f033d
HX
601int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
602
1da177e4
LT
603/*
604 * Transform helpers which query the underlying algorithm.
605 */
606static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
607{
608 return tfm->__crt_alg->cra_name;
609}
610
b14cdd67
ML
611static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
612{
613 return tfm->__crt_alg->cra_driver_name;
614}
615
616static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
617{
618 return tfm->__crt_alg->cra_priority;
619}
620
1da177e4
LT
621static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
622{
623 return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
624}
625
1da177e4
LT
626static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
627{
628 return tfm->__crt_alg->cra_blocksize;
629}
630
fbdae9f3
HX
631static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
632{
633 return tfm->__crt_alg->cra_alignmask;
634}
635
f28776a3
HX
636static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
637{
638 return tfm->crt_flags;
639}
640
641static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags)
642{
643 tfm->crt_flags |= flags;
644}
645
646static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
647{
648 tfm->crt_flags &= ~flags;
649}
650
40725181
HX
651static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
652{
f10b7897
HX
653 return tfm->__crt_ctx;
654}
655
656static inline unsigned int crypto_tfm_ctx_alignment(void)
657{
658 struct crypto_tfm *tfm;
659 return __alignof__(tfm->__crt_ctx);
40725181
HX
660}
661
1da177e4
LT
662/*
663 * API wrappers.
664 */
32e3983f
HX
665static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast(
666 struct crypto_tfm *tfm)
667{
668 return (struct crypto_ablkcipher *)tfm;
669}
670
378f4f51 671static inline u32 crypto_skcipher_type(u32 type)
32e3983f 672{
ecfc4329 673 type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
32e3983f 674 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
378f4f51
HX
675 return type;
676}
677
678static inline u32 crypto_skcipher_mask(u32 mask)
679{
ecfc4329 680 mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
332f8840 681 mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
378f4f51
HX
682 return mask;
683}
32e3983f 684
f13ec330
SM
685/**
686 * DOC: Asynchronous Block Cipher API
687 *
688 * Asynchronous block cipher API is used with the ciphers of type
689 * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto).
690 *
691 * Asynchronous cipher operations imply that the function invocation for a
692 * cipher request returns immediately before the completion of the operation.
693 * The cipher request is scheduled as a separate kernel thread and therefore
694 * load-balanced on the different CPUs via the process scheduler. To allow
695 * the kernel crypto API to inform the caller about the completion of a cipher
696 * request, the caller must provide a callback function. That function is
697 * invoked with the cipher handle when the request completes.
698 *
699 * To support the asynchronous operation, additional information than just the
700 * cipher handle must be supplied to the kernel crypto API. That additional
701 * information is given by filling in the ablkcipher_request data structure.
702 *
703 * For the asynchronous block cipher API, the state is maintained with the tfm
704 * cipher handle. A single tfm can be used across multiple calls and in
705 * parallel. For asynchronous block cipher calls, context data supplied and
706 * only used by the caller can be referenced the request data structure in
707 * addition to the IV used for the cipher request. The maintenance of such
708 * state information would be important for a crypto driver implementer to
709 * have, because when calling the callback function upon completion of the
710 * cipher operation, that callback function may need some information about
711 * which operation just finished if it invoked multiple in parallel. This
712 * state information is unused by the kernel crypto API.
713 */
714
32e3983f
HX
715static inline struct crypto_tfm *crypto_ablkcipher_tfm(
716 struct crypto_ablkcipher *tfm)
717{
718 return &tfm->base;
719}
720
f13ec330
SM
721/**
722 * crypto_free_ablkcipher() - zeroize and free cipher handle
723 * @tfm: cipher handle to be freed
724 */
32e3983f
HX
725static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
726{
727 crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
728}
729
f13ec330
SM
730/**
731 * crypto_has_ablkcipher() - Search for the availability of an ablkcipher.
732 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
733 * ablkcipher
734 * @type: specifies the type of the cipher
735 * @mask: specifies the mask for the cipher
736 *
737 * Return: true when the ablkcipher is known to the kernel crypto API; false
738 * otherwise
739 */
32e3983f
HX
740static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
741 u32 mask)
742{
378f4f51
HX
743 return crypto_has_alg(alg_name, crypto_skcipher_type(type),
744 crypto_skcipher_mask(mask));
32e3983f
HX
745}
746
747static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
748 struct crypto_ablkcipher *tfm)
749{
750 return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
751}
752
f13ec330
SM
753/**
754 * crypto_ablkcipher_ivsize() - obtain IV size
755 * @tfm: cipher handle
756 *
757 * The size of the IV for the ablkcipher referenced by the cipher handle is
758 * returned. This IV size may be zero if the cipher does not need an IV.
759 *
760 * Return: IV size in bytes
761 */
32e3983f
HX
762static inline unsigned int crypto_ablkcipher_ivsize(
763 struct crypto_ablkcipher *tfm)
764{
765 return crypto_ablkcipher_crt(tfm)->ivsize;
766}
767
f13ec330
SM
768/**
769 * crypto_ablkcipher_blocksize() - obtain block size of cipher
770 * @tfm: cipher handle
771 *
772 * The block size for the ablkcipher referenced with the cipher handle is
773 * returned. The caller may use that information to allocate appropriate
774 * memory for the data returned by the encryption or decryption operation
775 *
776 * Return: block size of cipher
777 */
32e3983f
HX
778static inline unsigned int crypto_ablkcipher_blocksize(
779 struct crypto_ablkcipher *tfm)
780{
781 return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm));
782}
783
784static inline unsigned int crypto_ablkcipher_alignmask(
785 struct crypto_ablkcipher *tfm)
786{
787 return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm));
788}
789
790static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm)
791{
792 return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm));
793}
794
795static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm,
796 u32 flags)
797{
798 crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags);
799}
800
801static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
802 u32 flags)
803{
804 crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
805}
806
f13ec330
SM
807/**
808 * crypto_ablkcipher_setkey() - set key for cipher
809 * @tfm: cipher handle
810 * @key: buffer holding the key
811 * @keylen: length of the key in bytes
812 *
813 * The caller provided key is set for the ablkcipher referenced by the cipher
814 * handle.
815 *
816 * Note, the key length determines the cipher type. Many block ciphers implement
817 * different cipher modes depending on the key size, such as AES-128 vs AES-192
818 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
819 * is performed.
820 *
821 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
822 */
32e3983f
HX
823static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
824 const u8 *key, unsigned int keylen)
825{
ecfc4329
HX
826 struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm);
827
828 return crt->setkey(crt->base, key, keylen);
32e3983f
HX
829}
830
f13ec330
SM
831/**
832 * crypto_ablkcipher_reqtfm() - obtain cipher handle from request
833 * @req: ablkcipher_request out of which the cipher handle is to be obtained
834 *
835 * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request
836 * data structure.
837 *
838 * Return: crypto_ablkcipher handle
839 */
32e3983f
HX
840static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
841 struct ablkcipher_request *req)
842{
843 return __crypto_ablkcipher_cast(req->base.tfm);
844}
845
f13ec330
SM
846/**
847 * crypto_ablkcipher_encrypt() - encrypt plaintext
848 * @req: reference to the ablkcipher_request handle that holds all information
849 * needed to perform the cipher operation
850 *
851 * Encrypt plaintext data using the ablkcipher_request handle. That data
852 * structure and how it is filled with data is discussed with the
853 * ablkcipher_request_* functions.
854 *
855 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
856 */
32e3983f
HX
857static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
858{
859 struct ablkcipher_tfm *crt =
860 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
861 return crt->encrypt(req);
862}
863
f13ec330
SM
864/**
865 * crypto_ablkcipher_decrypt() - decrypt ciphertext
866 * @req: reference to the ablkcipher_request handle that holds all information
867 * needed to perform the cipher operation
868 *
869 * Decrypt ciphertext data using the ablkcipher_request handle. That data
870 * structure and how it is filled with data is discussed with the
871 * ablkcipher_request_* functions.
872 *
873 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
874 */
32e3983f
HX
875static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
876{
877 struct ablkcipher_tfm *crt =
878 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
879 return crt->decrypt(req);
880}
881
f13ec330
SM
882/**
883 * DOC: Asynchronous Cipher Request Handle
884 *
885 * The ablkcipher_request data structure contains all pointers to data
886 * required for the asynchronous cipher operation. This includes the cipher
887 * handle (which can be used by multiple ablkcipher_request instances), pointer
888 * to plaintext and ciphertext, asynchronous callback function, etc. It acts
889 * as a handle to the ablkcipher_request_* API calls in a similar way as
890 * ablkcipher handle to the crypto_ablkcipher_* API calls.
891 */
892
893/**
894 * crypto_ablkcipher_reqsize() - obtain size of the request data structure
895 * @tfm: cipher handle
896 *
897 * Return: number of bytes
898 */
b16c3a2e
HX
899static inline unsigned int crypto_ablkcipher_reqsize(
900 struct crypto_ablkcipher *tfm)
32e3983f
HX
901{
902 return crypto_ablkcipher_crt(tfm)->reqsize;
903}
904
f13ec330
SM
905/**
906 * ablkcipher_request_set_tfm() - update cipher handle reference in request
907 * @req: request handle to be modified
908 * @tfm: cipher handle that shall be added to the request handle
909 *
910 * Allow the caller to replace the existing ablkcipher handle in the request
911 * data structure with a different one.
912 */
e196d625
HX
913static inline void ablkcipher_request_set_tfm(
914 struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
915{
ecfc4329 916 req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base);
e196d625
HX
917}
918
b5b7f088
HX
919static inline struct ablkcipher_request *ablkcipher_request_cast(
920 struct crypto_async_request *req)
921{
922 return container_of(req, struct ablkcipher_request, base);
923}
924
f13ec330
SM
925/**
926 * ablkcipher_request_alloc() - allocate request data structure
927 * @tfm: cipher handle to be registered with the request
928 * @gfp: memory allocation flag that is handed to kmalloc by the API call.
929 *
930 * Allocate the request data structure that must be used with the ablkcipher
931 * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
932 * handle is registered in the request data structure.
933 *
6eae29e7 934 * Return: allocated request handle in case of success, or NULL if out of memory
f13ec330 935 */
32e3983f
HX
936static inline struct ablkcipher_request *ablkcipher_request_alloc(
937 struct crypto_ablkcipher *tfm, gfp_t gfp)
938{
939 struct ablkcipher_request *req;
940
941 req = kmalloc(sizeof(struct ablkcipher_request) +
942 crypto_ablkcipher_reqsize(tfm), gfp);
943
944 if (likely(req))
e196d625 945 ablkcipher_request_set_tfm(req, tfm);
32e3983f
HX
946
947 return req;
948}
949
f13ec330
SM
950/**
951 * ablkcipher_request_free() - zeroize and free request data structure
952 * @req: request data structure cipher handle to be freed
953 */
32e3983f
HX
954static inline void ablkcipher_request_free(struct ablkcipher_request *req)
955{
aef73cfc 956 kzfree(req);
32e3983f
HX
957}
958
f13ec330
SM
959/**
960 * ablkcipher_request_set_callback() - set asynchronous callback function
961 * @req: request handle
962 * @flags: specify zero or an ORing of the flags
963 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
964 * increase the wait queue beyond the initial maximum size;
965 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
966 * @compl: callback function pointer to be registered with the request handle
967 * @data: The data pointer refers to memory that is not used by the kernel
968 * crypto API, but provided to the callback function for it to use. Here,
969 * the caller can provide a reference to memory the callback function can
970 * operate on. As the callback function is invoked asynchronously to the
971 * related functionality, it may need to access data structures of the
972 * related functionality which can be referenced using this pointer. The
973 * callback function can access the memory via the "data" field in the
974 * crypto_async_request data structure provided to the callback function.
975 *
976 * This function allows setting the callback function that is triggered once the
977 * cipher operation completes.
978 *
979 * The callback function is registered with the ablkcipher_request handle and
379dcfb4 980 * must comply with the following template
f13ec330
SM
981 *
982 * void callback_function(struct crypto_async_request *req, int error)
983 */
32e3983f
HX
984static inline void ablkcipher_request_set_callback(
985 struct ablkcipher_request *req,
3e3dc25f 986 u32 flags, crypto_completion_t compl, void *data)
32e3983f 987{
3e3dc25f 988 req->base.complete = compl;
32e3983f
HX
989 req->base.data = data;
990 req->base.flags = flags;
991}
992
f13ec330
SM
993/**
994 * ablkcipher_request_set_crypt() - set data buffers
995 * @req: request handle
996 * @src: source scatter / gather list
997 * @dst: destination scatter / gather list
998 * @nbytes: number of bytes to process from @src
999 * @iv: IV for the cipher operation which must comply with the IV size defined
1000 * by crypto_ablkcipher_ivsize
1001 *
1002 * This function allows setting of the source data and destination data
1003 * scatter / gather lists.
1004 *
1005 * For encryption, the source is treated as the plaintext and the
1006 * destination is the ciphertext. For a decryption operation, the use is
379dcfb4 1007 * reversed - the source is the ciphertext and the destination is the plaintext.
f13ec330 1008 */
32e3983f
HX
1009static inline void ablkcipher_request_set_crypt(
1010 struct ablkcipher_request *req,
1011 struct scatterlist *src, struct scatterlist *dst,
1012 unsigned int nbytes, void *iv)
1013{
1014 req->src = src;
1015 req->dst = dst;
1016 req->nbytes = nbytes;
1017 req->info = iv;
1018}
1019
58284f0d
SM
1020/**
1021 * DOC: Synchronous Block Cipher API
1022 *
1023 * The synchronous block cipher API is used with the ciphers of type
1024 * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
1025 *
1026 * Synchronous calls, have a context in the tfm. But since a single tfm can be
1027 * used in multiple calls and in parallel, this info should not be changeable
1028 * (unless a lock is used). This applies, for example, to the symmetric key.
1029 * However, the IV is changeable, so there is an iv field in blkcipher_tfm
1030 * structure for synchronous blkcipher api. So, its the only state info that can
1031 * be kept for synchronous calls without using a big lock across a tfm.
1032 *
1033 * The block cipher API allows the use of a complete cipher, i.e. a cipher
1034 * consisting of a template (a block chaining mode) and a single block cipher
1035 * primitive (e.g. AES).
1036 *
1037 * The plaintext data buffer and the ciphertext data buffer are pointed to
1038 * by using scatter/gather lists. The cipher operation is performed
1039 * on all segments of the provided scatter/gather lists.
1040 *
1041 * The kernel crypto API supports a cipher operation "in-place" which means that
1042 * the caller may provide the same scatter/gather list for the plaintext and
1043 * cipher text. After the completion of the cipher operation, the plaintext
1044 * data is replaced with the ciphertext data in case of an encryption and vice
1045 * versa for a decryption. The caller must ensure that the scatter/gather lists
1046 * for the output data point to sufficiently large buffers, i.e. multiples of
1047 * the block size of the cipher.
1048 */
1049
5cde0af2
HX
1050static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
1051 struct crypto_tfm *tfm)
1052{
1053 return (struct crypto_blkcipher *)tfm;
1054}
1055
1056static inline struct crypto_blkcipher *crypto_blkcipher_cast(
1057 struct crypto_tfm *tfm)
1058{
1059 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER);
1060 return __crypto_blkcipher_cast(tfm);
1061}
1062
58284f0d
SM
1063/**
1064 * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
1065 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1066 * blkcipher cipher
1067 * @type: specifies the type of the cipher
1068 * @mask: specifies the mask for the cipher
1069 *
1070 * Allocate a cipher handle for a block cipher. The returned struct
1071 * crypto_blkcipher is the cipher handle that is required for any subsequent
1072 * API invocation for that block cipher.
1073 *
1074 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1075 * of an error, PTR_ERR() returns the error code.
1076 */
5cde0af2
HX
1077static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
1078 const char *alg_name, u32 type, u32 mask)
1079{
332f8840 1080 type &= ~CRYPTO_ALG_TYPE_MASK;
5cde0af2 1081 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
332f8840 1082 mask |= CRYPTO_ALG_TYPE_MASK;
5cde0af2
HX
1083
1084 return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
1085}
1086
1087static inline struct crypto_tfm *crypto_blkcipher_tfm(
1088 struct crypto_blkcipher *tfm)
1089{
1090 return &tfm->base;
1091}
1092
58284f0d
SM
1093/**
1094 * crypto_free_blkcipher() - zeroize and free the block cipher handle
1095 * @tfm: cipher handle to be freed
1096 */
5cde0af2
HX
1097static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
1098{
1099 crypto_free_tfm(crypto_blkcipher_tfm(tfm));
1100}
1101
58284f0d
SM
1102/**
1103 * crypto_has_blkcipher() - Search for the availability of a block cipher
1104 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1105 * block cipher
1106 * @type: specifies the type of the cipher
1107 * @mask: specifies the mask for the cipher
1108 *
1109 * Return: true when the block cipher is known to the kernel crypto API; false
1110 * otherwise
1111 */
fce32d70
HX
1112static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
1113{
332f8840 1114 type &= ~CRYPTO_ALG_TYPE_MASK;
fce32d70 1115 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
332f8840 1116 mask |= CRYPTO_ALG_TYPE_MASK;
fce32d70
HX
1117
1118 return crypto_has_alg(alg_name, type, mask);
1119}
1120
58284f0d
SM
1121/**
1122 * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
1123 * @tfm: cipher handle
1124 *
1125 * Return: The character string holding the name of the cipher
1126 */
5cde0af2
HX
1127static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
1128{
1129 return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
1130}
1131
1132static inline struct blkcipher_tfm *crypto_blkcipher_crt(
1133 struct crypto_blkcipher *tfm)
1134{
1135 return &crypto_blkcipher_tfm(tfm)->crt_blkcipher;
1136}
1137
1138static inline struct blkcipher_alg *crypto_blkcipher_alg(
1139 struct crypto_blkcipher *tfm)
1140{
1141 return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
1142}
1143
58284f0d
SM
1144/**
1145 * crypto_blkcipher_ivsize() - obtain IV size
1146 * @tfm: cipher handle
1147 *
1148 * The size of the IV for the block cipher referenced by the cipher handle is
1149 * returned. This IV size may be zero if the cipher does not need an IV.
1150 *
1151 * Return: IV size in bytes
1152 */
5cde0af2
HX
1153static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
1154{
1155 return crypto_blkcipher_alg(tfm)->ivsize;
1156}
1157
58284f0d
SM
1158/**
1159 * crypto_blkcipher_blocksize() - obtain block size of cipher
1160 * @tfm: cipher handle
1161 *
1162 * The block size for the block cipher referenced with the cipher handle is
1163 * returned. The caller may use that information to allocate appropriate
1164 * memory for the data returned by the encryption or decryption operation.
1165 *
1166 * Return: block size of cipher
1167 */
5cde0af2
HX
1168static inline unsigned int crypto_blkcipher_blocksize(
1169 struct crypto_blkcipher *tfm)
1170{
1171 return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
1172}
1173
1174static inline unsigned int crypto_blkcipher_alignmask(
1175 struct crypto_blkcipher *tfm)
1176{
1177 return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
1178}
1179
1180static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
1181{
1182 return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
1183}
1184
1185static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
1186 u32 flags)
1187{
1188 crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
1189}
1190
1191static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
1192 u32 flags)
1193{
1194 crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
1195}
1196
58284f0d
SM
1197/**
1198 * crypto_blkcipher_setkey() - set key for cipher
1199 * @tfm: cipher handle
1200 * @key: buffer holding the key
1201 * @keylen: length of the key in bytes
1202 *
1203 * The caller provided key is set for the block cipher referenced by the cipher
1204 * handle.
1205 *
1206 * Note, the key length determines the cipher type. Many block ciphers implement
1207 * different cipher modes depending on the key size, such as AES-128 vs AES-192
1208 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1209 * is performed.
1210 *
1211 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1212 */
5cde0af2
HX
1213static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
1214 const u8 *key, unsigned int keylen)
1215{
1216 return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
1217 key, keylen);
1218}
1219
58284f0d
SM
1220/**
1221 * crypto_blkcipher_encrypt() - encrypt plaintext
1222 * @desc: reference to the block cipher handle with meta data
1223 * @dst: scatter/gather list that is filled by the cipher operation with the
1224 * ciphertext
1225 * @src: scatter/gather list that holds the plaintext
1226 * @nbytes: number of bytes of the plaintext to encrypt.
1227 *
1228 * Encrypt plaintext data using the IV set by the caller with a preceding
1229 * call of crypto_blkcipher_set_iv.
1230 *
1231 * The blkcipher_desc data structure must be filled by the caller and can
1232 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1233 * with the block cipher handle; desc.flags is filled with either
1234 * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1235 *
1236 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1237 */
5cde0af2
HX
1238static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
1239 struct scatterlist *dst,
1240 struct scatterlist *src,
1241 unsigned int nbytes)
1242{
1243 desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
1244 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
1245}
1246
58284f0d
SM
1247/**
1248 * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
1249 * @desc: reference to the block cipher handle with meta data
1250 * @dst: scatter/gather list that is filled by the cipher operation with the
1251 * ciphertext
1252 * @src: scatter/gather list that holds the plaintext
1253 * @nbytes: number of bytes of the plaintext to encrypt.
1254 *
1255 * Encrypt plaintext data with the use of an IV that is solely used for this
1256 * cipher operation. Any previously set IV is not used.
1257 *
1258 * The blkcipher_desc data structure must be filled by the caller and can
1259 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1260 * with the block cipher handle; desc.info is filled with the IV to be used for
1261 * the current operation; desc.flags is filled with either
1262 * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1263 *
1264 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1265 */
5cde0af2
HX
1266static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
1267 struct scatterlist *dst,
1268 struct scatterlist *src,
1269 unsigned int nbytes)
1270{
1271 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
1272}
1273
58284f0d
SM
1274/**
1275 * crypto_blkcipher_decrypt() - decrypt ciphertext
1276 * @desc: reference to the block cipher handle with meta data
1277 * @dst: scatter/gather list that is filled by the cipher operation with the
1278 * plaintext
1279 * @src: scatter/gather list that holds the ciphertext
1280 * @nbytes: number of bytes of the ciphertext to decrypt.
1281 *
1282 * Decrypt ciphertext data using the IV set by the caller with a preceding
1283 * call of crypto_blkcipher_set_iv.
1284 *
1285 * The blkcipher_desc data structure must be filled by the caller as documented
1286 * for the crypto_blkcipher_encrypt call above.
1287 *
1288 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1289 *
1290 */
5cde0af2
HX
1291static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
1292 struct scatterlist *dst,
1293 struct scatterlist *src,
1294 unsigned int nbytes)
1295{
1296 desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
1297 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1298}
1299
58284f0d
SM
1300/**
1301 * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
1302 * @desc: reference to the block cipher handle with meta data
1303 * @dst: scatter/gather list that is filled by the cipher operation with the
1304 * plaintext
1305 * @src: scatter/gather list that holds the ciphertext
1306 * @nbytes: number of bytes of the ciphertext to decrypt.
1307 *
1308 * Decrypt ciphertext data with the use of an IV that is solely used for this
1309 * cipher operation. Any previously set IV is not used.
1310 *
1311 * The blkcipher_desc data structure must be filled by the caller as documented
1312 * for the crypto_blkcipher_encrypt_iv call above.
1313 *
1314 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1315 */
5cde0af2
HX
1316static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
1317 struct scatterlist *dst,
1318 struct scatterlist *src,
1319 unsigned int nbytes)
1320{
1321 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1322}
1323
58284f0d
SM
1324/**
1325 * crypto_blkcipher_set_iv() - set IV for cipher
1326 * @tfm: cipher handle
1327 * @src: buffer holding the IV
1328 * @len: length of the IV in bytes
1329 *
1330 * The caller provided IV is set for the block cipher referenced by the cipher
1331 * handle.
1332 */
5cde0af2
HX
1333static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
1334 const u8 *src, unsigned int len)
1335{
1336 memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
1337}
1338
58284f0d
SM
1339/**
1340 * crypto_blkcipher_get_iv() - obtain IV from cipher
1341 * @tfm: cipher handle
1342 * @dst: buffer filled with the IV
1343 * @len: length of the buffer dst
1344 *
1345 * The caller can obtain the IV set for the block cipher referenced by the
1346 * cipher handle and store it into the user-provided buffer. If the buffer
1347 * has an insufficient space, the IV is truncated to fit the buffer.
1348 */
5cde0af2
HX
1349static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
1350 u8 *dst, unsigned int len)
1351{
1352 memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
1353}
1354
16e61030
SM
1355/**
1356 * DOC: Single Block Cipher API
1357 *
1358 * The single block cipher API is used with the ciphers of type
1359 * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
1360 *
1361 * Using the single block cipher API calls, operations with the basic cipher
1362 * primitive can be implemented. These cipher primitives exclude any block
1363 * chaining operations including IV handling.
1364 *
1365 * The purpose of this single block cipher API is to support the implementation
1366 * of templates or other concepts that only need to perform the cipher operation
1367 * on one block at a time. Templates invoke the underlying cipher primitive
1368 * block-wise and process either the input or the output data of these cipher
1369 * operations.
1370 */
1371
f28776a3
HX
1372static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
1373{
1374 return (struct crypto_cipher *)tfm;
1375}
1376
1377static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
1378{
1379 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
1380 return __crypto_cipher_cast(tfm);
1381}
1382
16e61030
SM
1383/**
1384 * crypto_alloc_cipher() - allocate single block cipher handle
1385 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1386 * single block cipher
1387 * @type: specifies the type of the cipher
1388 * @mask: specifies the mask for the cipher
1389 *
1390 * Allocate a cipher handle for a single block cipher. The returned struct
1391 * crypto_cipher is the cipher handle that is required for any subsequent API
1392 * invocation for that single block cipher.
1393 *
1394 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1395 * of an error, PTR_ERR() returns the error code.
1396 */
f28776a3
HX
1397static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
1398 u32 type, u32 mask)
1399{
1400 type &= ~CRYPTO_ALG_TYPE_MASK;
1401 type |= CRYPTO_ALG_TYPE_CIPHER;
1402 mask |= CRYPTO_ALG_TYPE_MASK;
1403
1404 return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
1405}
1406
1407static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
1408{
78a1fe4f 1409 return &tfm->base;
f28776a3
HX
1410}
1411
16e61030
SM
1412/**
1413 * crypto_free_cipher() - zeroize and free the single block cipher handle
1414 * @tfm: cipher handle to be freed
1415 */
f28776a3
HX
1416static inline void crypto_free_cipher(struct crypto_cipher *tfm)
1417{
1418 crypto_free_tfm(crypto_cipher_tfm(tfm));
1419}
1420
16e61030
SM
1421/**
1422 * crypto_has_cipher() - Search for the availability of a single block cipher
1423 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1424 * single block cipher
1425 * @type: specifies the type of the cipher
1426 * @mask: specifies the mask for the cipher
1427 *
1428 * Return: true when the single block cipher is known to the kernel crypto API;
1429 * false otherwise
1430 */
fce32d70
HX
1431static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
1432{
1433 type &= ~CRYPTO_ALG_TYPE_MASK;
1434 type |= CRYPTO_ALG_TYPE_CIPHER;
1435 mask |= CRYPTO_ALG_TYPE_MASK;
1436
1437 return crypto_has_alg(alg_name, type, mask);
1438}
1439
f28776a3
HX
1440static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
1441{
1442 return &crypto_cipher_tfm(tfm)->crt_cipher;
1443}
1444
16e61030
SM
1445/**
1446 * crypto_cipher_blocksize() - obtain block size for cipher
1447 * @tfm: cipher handle
1448 *
1449 * The block size for the single block cipher referenced with the cipher handle
1450 * tfm is returned. The caller may use that information to allocate appropriate
1451 * memory for the data returned by the encryption or decryption operation
1452 *
1453 * Return: block size of cipher
1454 */
f28776a3
HX
1455static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
1456{
1457 return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
1458}
1459
1460static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
1461{
1462 return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
1463}
1464
1465static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
1466{
1467 return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
1468}
1469
1470static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
1471 u32 flags)
1472{
1473 crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
1474}
1475
1476static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
1477 u32 flags)
1478{
1479 crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
1480}
1481
16e61030
SM
1482/**
1483 * crypto_cipher_setkey() - set key for cipher
1484 * @tfm: cipher handle
1485 * @key: buffer holding the key
1486 * @keylen: length of the key in bytes
1487 *
1488 * The caller provided key is set for the single block cipher referenced by the
1489 * cipher handle.
1490 *
1491 * Note, the key length determines the cipher type. Many block ciphers implement
1492 * different cipher modes depending on the key size, such as AES-128 vs AES-192
1493 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1494 * is performed.
1495 *
1496 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1497 */
7226bc87
HX
1498static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
1499 const u8 *key, unsigned int keylen)
1500{
1501 return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
1502 key, keylen);
1503}
1504
16e61030
SM
1505/**
1506 * crypto_cipher_encrypt_one() - encrypt one block of plaintext
1507 * @tfm: cipher handle
1508 * @dst: points to the buffer that will be filled with the ciphertext
1509 * @src: buffer holding the plaintext to be encrypted
1510 *
1511 * Invoke the encryption operation of one block. The caller must ensure that
1512 * the plaintext and ciphertext buffers are at least one block in size.
1513 */
f28776a3
HX
1514static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
1515 u8 *dst, const u8 *src)
1516{
1517 crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
1518 dst, src);
1519}
1520
16e61030
SM
1521/**
1522 * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
1523 * @tfm: cipher handle
1524 * @dst: points to the buffer that will be filled with the plaintext
1525 * @src: buffer holding the ciphertext to be decrypted
1526 *
1527 * Invoke the decryption operation of one block. The caller must ensure that
1528 * the plaintext and ciphertext buffers are at least one block in size.
1529 */
f28776a3
HX
1530static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
1531 u8 *dst, const u8 *src)
1532{
1533 crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
1534 dst, src);
1535}
1536
fce32d70
HX
1537static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
1538{
1539 return (struct crypto_comp *)tfm;
1540}
1541
1542static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm)
1543{
1544 BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) &
1545 CRYPTO_ALG_TYPE_MASK);
1546 return __crypto_comp_cast(tfm);
1547}
1548
1549static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
1550 u32 type, u32 mask)
1551{
1552 type &= ~CRYPTO_ALG_TYPE_MASK;
1553 type |= CRYPTO_ALG_TYPE_COMPRESS;
1554 mask |= CRYPTO_ALG_TYPE_MASK;
1555
1556 return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask));
1557}
1558
1559static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm)
1560{
78a1fe4f 1561 return &tfm->base;
fce32d70
HX
1562}
1563
1564static inline void crypto_free_comp(struct crypto_comp *tfm)
1565{
1566 crypto_free_tfm(crypto_comp_tfm(tfm));
1567}
1568
1569static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask)
1570{
1571 type &= ~CRYPTO_ALG_TYPE_MASK;
1572 type |= CRYPTO_ALG_TYPE_COMPRESS;
1573 mask |= CRYPTO_ALG_TYPE_MASK;
1574
1575 return crypto_has_alg(alg_name, type, mask);
1576}
1577
e4d5b79c
HX
1578static inline const char *crypto_comp_name(struct crypto_comp *tfm)
1579{
1580 return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
1581}
1582
fce32d70
HX
1583static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm)
1584{
1585 return &crypto_comp_tfm(tfm)->crt_compress;
1586}
1587
1588static inline int crypto_comp_compress(struct crypto_comp *tfm,
1da177e4
LT
1589 const u8 *src, unsigned int slen,
1590 u8 *dst, unsigned int *dlen)
1591{
78a1fe4f
HX
1592 return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm),
1593 src, slen, dst, dlen);
1da177e4
LT
1594}
1595
fce32d70 1596static inline int crypto_comp_decompress(struct crypto_comp *tfm,
1da177e4
LT
1597 const u8 *src, unsigned int slen,
1598 u8 *dst, unsigned int *dlen)
1599{
78a1fe4f
HX
1600 return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm),
1601 src, slen, dst, dlen);
1da177e4
LT
1602}
1603
1da177e4
LT
1604#endif /* _LINUX_CRYPTO_H */
1605