PCI: pnv_php: Add missing of_node_put()
[linux-2.6-block.git] / include / linux / crypto.h
CommitLineData
1da177e4
LT
1/*
2 * Scatterlist Cryptographic API.
3 *
4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
5 * Copyright (c) 2002 David S. Miller (davem@redhat.com)
5cb1454b 6 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
1da177e4
LT
7 *
8 * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
18735dd8 9 * and Nettle, by Niels Möller.
1da177e4
LT
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 *
16 */
17#ifndef _LINUX_CRYPTO_H
18#define _LINUX_CRYPTO_H
19
60063497 20#include <linux/atomic.h>
1da177e4 21#include <linux/kernel.h>
1da177e4 22#include <linux/list.h>
187f1882 23#include <linux/bug.h>
79911102 24#include <linux/slab.h>
1da177e4 25#include <linux/string.h>
79911102 26#include <linux/uaccess.h>
ada69a16 27#include <linux/completion.h>
1da177e4 28
5d26a105
KC
29/*
30 * Autoloaded crypto modules should only use a prefixed name to avoid allowing
31 * arbitrary modules to be loaded. Loading from userspace may still need the
32 * unprefixed names, so retains those aliases as well.
33 * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
34 * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
35 * expands twice on the same line. Instead, use a separate base name for the
36 * alias.
37 */
38#define MODULE_ALIAS_CRYPTO(name) \
39 __MODULE_INFO(alias, alias_userspace, name); \
40 __MODULE_INFO(alias, alias_crypto, "crypto-" name)
41
1da177e4
LT
42/*
43 * Algorithm masks and types.
44 */
2825982d 45#define CRYPTO_ALG_TYPE_MASK 0x0000000f
1da177e4 46#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
004a403c
LH
47#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002
48#define CRYPTO_ALG_TYPE_AEAD 0x00000003
055bcee3 49#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
332f8840 50#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
4e6c3df4 51#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
61da88e2 52#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
4e5f2c40 53#define CRYPTO_ALG_TYPE_KPP 0x00000008
2ebda74f 54#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
1ab53a77 55#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
17f0f4a4 56#define CRYPTO_ALG_TYPE_RNG 0x0000000c
3c339ab8 57#define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
63044c4f
GC
58#define CRYPTO_ALG_TYPE_DIGEST 0x0000000e
59#define CRYPTO_ALG_TYPE_HASH 0x0000000e
60#define CRYPTO_ALG_TYPE_SHASH 0x0000000e
61#define CRYPTO_ALG_TYPE_AHASH 0x0000000f
055bcee3
HX
62
63#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
63044c4f 64#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
332f8840 65#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
1ab53a77 66#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
1da177e4 67
2825982d 68#define CRYPTO_ALG_LARVAL 0x00000010
6bfd4809
HX
69#define CRYPTO_ALG_DEAD 0x00000020
70#define CRYPTO_ALG_DYING 0x00000040
f3f632d6 71#define CRYPTO_ALG_ASYNC 0x00000080
2825982d 72
6010439f
HX
73/*
74 * Set this bit if and only if the algorithm requires another algorithm of
75 * the same type to handle corner cases.
76 */
77#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
78
ecfc4329
HX
79/*
80 * This bit is set for symmetric key ciphers that have already been wrapped
81 * with a generic IV generator to prevent them from being wrapped again.
82 */
83#define CRYPTO_ALG_GENIV 0x00000200
84
73d3864a
HX
85/*
86 * Set if the algorithm has passed automated run-time testing. Note that
87 * if there is no run-time testing for a given algorithm it is considered
88 * to have passed.
89 */
90
91#define CRYPTO_ALG_TESTED 0x00000400
92
64a947b1 93/*
864e0981 94 * Set if the algorithm is an instance that is built from templates.
64a947b1
SK
95 */
96#define CRYPTO_ALG_INSTANCE 0x00000800
97
d912bb76
NM
98/* Set this bit if the algorithm provided is hardware accelerated but
99 * not available to userspace via instruction set or so.
100 */
101#define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000
102
06ca7f68
SM
103/*
104 * Mark a cipher as a service implementation only usable by another
105 * cipher and never by a normal user of the kernel crypto API
106 */
107#define CRYPTO_ALG_INTERNAL 0x00002000
108
a208fa8f
EB
109/*
110 * Set if the algorithm has a ->setkey() method but can be used without
111 * calling it first, i.e. there is a default key.
112 */
113#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000
114
1da177e4
LT
115/*
116 * Transform masks and values (for crt_flags).
117 */
9fa68f62
EB
118#define CRYPTO_TFM_NEED_KEY 0x00000001
119
1da177e4
LT
120#define CRYPTO_TFM_REQ_MASK 0x000fff00
121#define CRYPTO_TFM_RES_MASK 0xfff00000
122
1da177e4 123#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100
64baf3cf 124#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
32e3983f 125#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400
1da177e4
LT
126#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
127#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
128#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
129#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000
130#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000
131
132/*
133 * Miscellaneous stuff.
134 */
f437a3f4 135#define CRYPTO_MAX_ALG_NAME 128
1da177e4 136
79911102
HX
137/*
138 * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
139 * declaration) is used to ensure that the crypto_tfm context structure is
140 * aligned correctly for the given architecture so that there are no alignment
141 * faults for C data types. In particular, this is required on platforms such
142 * as arm where pointers are 32-bit aligned but there are data types such as
143 * u64 which require 64-bit alignment.
144 */
79911102 145#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
79911102 146
79911102 147#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
79911102 148
1da177e4 149struct scatterlist;
32e3983f
HX
150struct crypto_ablkcipher;
151struct crypto_async_request;
5cde0af2 152struct crypto_blkcipher;
40725181 153struct crypto_tfm;
e853c3cf 154struct crypto_type;
61da88e2 155struct skcipher_givcrypt_request;
40725181 156
32e3983f
HX
157typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
158
0d7f488f
SM
159/**
160 * DOC: Block Cipher Context Data Structures
161 *
162 * These data structures define the operating context for each block cipher
163 * type.
164 */
165
32e3983f
HX
166struct crypto_async_request {
167 struct list_head list;
168 crypto_completion_t complete;
169 void *data;
170 struct crypto_tfm *tfm;
171
172 u32 flags;
173};
174
175struct ablkcipher_request {
176 struct crypto_async_request base;
177
178 unsigned int nbytes;
179
180 void *info;
181
182 struct scatterlist *src;
183 struct scatterlist *dst;
184
185 void *__ctx[] CRYPTO_MINALIGN_ATTR;
186};
187
5cde0af2
HX
188struct blkcipher_desc {
189 struct crypto_blkcipher *tfm;
190 void *info;
191 u32 flags;
192};
193
40725181
HX
194struct cipher_desc {
195 struct crypto_tfm *tfm;
6c2bb98b 196 void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
40725181
HX
197 unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst,
198 const u8 *src, unsigned int nbytes);
199 void *info;
200};
1da177e4 201
0d7f488f
SM
202/**
203 * DOC: Block Cipher Algorithm Definitions
204 *
205 * These data structures define modular crypto algorithm implementations,
206 * managed via crypto_register_alg() and crypto_unregister_alg().
207 */
208
209/**
210 * struct ablkcipher_alg - asynchronous block cipher definition
211 * @min_keysize: Minimum key size supported by the transformation. This is the
212 * smallest key length supported by this transformation algorithm.
213 * This must be set to one of the pre-defined values as this is
214 * not hardware specific. Possible values for this field can be
215 * found via git grep "_MIN_KEY_SIZE" include/crypto/
216 * @max_keysize: Maximum key size supported by the transformation. This is the
217 * largest key length supported by this transformation algorithm.
218 * This must be set to one of the pre-defined values as this is
219 * not hardware specific. Possible values for this field can be
220 * found via git grep "_MAX_KEY_SIZE" include/crypto/
221 * @setkey: Set key for the transformation. This function is used to either
222 * program a supplied key into the hardware or store the key in the
223 * transformation context for programming it later. Note that this
224 * function does modify the transformation context. This function can
225 * be called multiple times during the existence of the transformation
226 * object, so one must make sure the key is properly reprogrammed into
227 * the hardware. This function is also responsible for checking the key
228 * length for validity. In case a software fallback was put in place in
229 * the @cra_init call, this function might need to use the fallback if
230 * the algorithm doesn't support all of the key sizes.
231 * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
232 * the supplied scatterlist containing the blocks of data. The crypto
233 * API consumer is responsible for aligning the entries of the
234 * scatterlist properly and making sure the chunks are correctly
235 * sized. In case a software fallback was put in place in the
236 * @cra_init call, this function might need to use the fallback if
237 * the algorithm doesn't support all of the key sizes. In case the
238 * key was stored in transformation context, the key might need to be
239 * re-programmed into the hardware in this function. This function
240 * shall not modify the transformation context, as this function may
241 * be called in parallel with the same transformation object.
242 * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
243 * and the conditions are exactly the same.
244 * @givencrypt: Update the IV for encryption. With this function, a cipher
245 * implementation may provide the function on how to update the IV
246 * for encryption.
247 * @givdecrypt: Update the IV for decryption. This is the reverse of
248 * @givencrypt .
249 * @geniv: The transformation implementation may use an "IV generator" provided
250 * by the kernel crypto API. Several use cases have a predefined
251 * approach how IVs are to be updated. For such use cases, the kernel
252 * crypto API provides ready-to-use implementations that can be
253 * referenced with this variable.
254 * @ivsize: IV size applicable for transformation. The consumer must provide an
255 * IV of exactly that size to perform the encrypt or decrypt operation.
256 *
257 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
258 * mandatory and must be filled.
1da177e4 259 */
b5b7f088
HX
260struct ablkcipher_alg {
261 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
262 unsigned int keylen);
263 int (*encrypt)(struct ablkcipher_request *req);
264 int (*decrypt)(struct ablkcipher_request *req);
61da88e2
HX
265 int (*givencrypt)(struct skcipher_givcrypt_request *req);
266 int (*givdecrypt)(struct skcipher_givcrypt_request *req);
b5b7f088 267
23508e11
HX
268 const char *geniv;
269
b5b7f088
HX
270 unsigned int min_keysize;
271 unsigned int max_keysize;
272 unsigned int ivsize;
273};
274
0d7f488f
SM
275/**
276 * struct blkcipher_alg - synchronous block cipher definition
277 * @min_keysize: see struct ablkcipher_alg
278 * @max_keysize: see struct ablkcipher_alg
279 * @setkey: see struct ablkcipher_alg
280 * @encrypt: see struct ablkcipher_alg
281 * @decrypt: see struct ablkcipher_alg
282 * @geniv: see struct ablkcipher_alg
283 * @ivsize: see struct ablkcipher_alg
284 *
285 * All fields except @geniv and @ivsize are mandatory and must be filled.
286 */
5cde0af2
HX
287struct blkcipher_alg {
288 int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
289 unsigned int keylen);
290 int (*encrypt)(struct blkcipher_desc *desc,
291 struct scatterlist *dst, struct scatterlist *src,
292 unsigned int nbytes);
293 int (*decrypt)(struct blkcipher_desc *desc,
294 struct scatterlist *dst, struct scatterlist *src,
295 unsigned int nbytes);
296
23508e11
HX
297 const char *geniv;
298
5cde0af2
HX
299 unsigned int min_keysize;
300 unsigned int max_keysize;
301 unsigned int ivsize;
302};
303
0d7f488f
SM
304/**
305 * struct cipher_alg - single-block symmetric ciphers definition
306 * @cia_min_keysize: Minimum key size supported by the transformation. This is
307 * the smallest key length supported by this transformation
308 * algorithm. This must be set to one of the pre-defined
309 * values as this is not hardware specific. Possible values
310 * for this field can be found via git grep "_MIN_KEY_SIZE"
311 * include/crypto/
312 * @cia_max_keysize: Maximum key size supported by the transformation. This is
313 * the largest key length supported by this transformation
314 * algorithm. This must be set to one of the pre-defined values
315 * as this is not hardware specific. Possible values for this
316 * field can be found via git grep "_MAX_KEY_SIZE"
317 * include/crypto/
318 * @cia_setkey: Set key for the transformation. This function is used to either
319 * program a supplied key into the hardware or store the key in the
320 * transformation context for programming it later. Note that this
321 * function does modify the transformation context. This function
322 * can be called multiple times during the existence of the
323 * transformation object, so one must make sure the key is properly
324 * reprogrammed into the hardware. This function is also
325 * responsible for checking the key length for validity.
326 * @cia_encrypt: Encrypt a single block. This function is used to encrypt a
327 * single block of data, which must be @cra_blocksize big. This
328 * always operates on a full @cra_blocksize and it is not possible
329 * to encrypt a block of smaller size. The supplied buffers must
330 * therefore also be at least of @cra_blocksize size. Both the
331 * input and output buffers are always aligned to @cra_alignmask.
332 * In case either of the input or output buffer supplied by user
333 * of the crypto API is not aligned to @cra_alignmask, the crypto
334 * API will re-align the buffers. The re-alignment means that a
335 * new buffer will be allocated, the data will be copied into the
336 * new buffer, then the processing will happen on the new buffer,
337 * then the data will be copied back into the original buffer and
338 * finally the new buffer will be freed. In case a software
339 * fallback was put in place in the @cra_init call, this function
340 * might need to use the fallback if the algorithm doesn't support
341 * all of the key sizes. In case the key was stored in
342 * transformation context, the key might need to be re-programmed
343 * into the hardware in this function. This function shall not
344 * modify the transformation context, as this function may be
345 * called in parallel with the same transformation object.
346 * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to
347 * @cia_encrypt, and the conditions are exactly the same.
348 *
349 * All fields are mandatory and must be filled.
350 */
1da177e4
LT
351struct cipher_alg {
352 unsigned int cia_min_keysize;
353 unsigned int cia_max_keysize;
6c2bb98b 354 int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
560c06ae 355 unsigned int keylen);
6c2bb98b
HX
356 void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
357 void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
1da177e4
LT
358};
359
1da177e4 360struct compress_alg {
6c2bb98b
HX
361 int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src,
362 unsigned int slen, u8 *dst, unsigned int *dlen);
363 int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src,
364 unsigned int slen, u8 *dst, unsigned int *dlen);
1da177e4
LT
365};
366
17f0f4a4 367
b5b7f088 368#define cra_ablkcipher cra_u.ablkcipher
5cde0af2 369#define cra_blkcipher cra_u.blkcipher
1da177e4 370#define cra_cipher cra_u.cipher
1da177e4
LT
371#define cra_compress cra_u.compress
372
0d7f488f
SM
373/**
374 * struct crypto_alg - definition of a cryptograpic cipher algorithm
375 * @cra_flags: Flags describing this transformation. See include/linux/crypto.h
376 * CRYPTO_ALG_* flags for the flags which go in here. Those are
377 * used for fine-tuning the description of the transformation
378 * algorithm.
379 * @cra_blocksize: Minimum block size of this transformation. The size in bytes
380 * of the smallest possible unit which can be transformed with
381 * this algorithm. The users must respect this value.
382 * In case of HASH transformation, it is possible for a smaller
383 * block than @cra_blocksize to be passed to the crypto API for
384 * transformation, in case of any other transformation type, an
385 * error will be returned upon any attempt to transform smaller
386 * than @cra_blocksize chunks.
387 * @cra_ctxsize: Size of the operational context of the transformation. This
388 * value informs the kernel crypto API about the memory size
389 * needed to be allocated for the transformation context.
390 * @cra_alignmask: Alignment mask for the input and output data buffer. The data
391 * buffer containing the input data for the algorithm must be
392 * aligned to this alignment mask. The data buffer for the
393 * output data must be aligned to this alignment mask. Note that
394 * the Crypto API will do the re-alignment in software, but
395 * only under special conditions and there is a performance hit.
396 * The re-alignment happens at these occasions for different
397 * @cra_u types: cipher -- For both input data and output data
398 * buffer; ahash -- For output hash destination buf; shash --
399 * For output hash destination buf.
400 * This is needed on hardware which is flawed by design and
401 * cannot pick data from arbitrary addresses.
402 * @cra_priority: Priority of this transformation implementation. In case
403 * multiple transformations with same @cra_name are available to
404 * the Crypto API, the kernel will use the one with highest
405 * @cra_priority.
406 * @cra_name: Generic name (usable by multiple implementations) of the
407 * transformation algorithm. This is the name of the transformation
408 * itself. This field is used by the kernel when looking up the
409 * providers of particular transformation.
410 * @cra_driver_name: Unique name of the transformation provider. This is the
411 * name of the provider of the transformation. This can be any
412 * arbitrary value, but in the usual case, this contains the
413 * name of the chip or provider and the name of the
414 * transformation algorithm.
415 * @cra_type: Type of the cryptographic transformation. This is a pointer to
416 * struct crypto_type, which implements callbacks common for all
12f7c14a 417 * transformation types. There are multiple options:
0d7f488f 418 * &crypto_blkcipher_type, &crypto_ablkcipher_type,
b0d955ba 419 * &crypto_ahash_type, &crypto_rng_type.
0d7f488f
SM
420 * This field might be empty. In that case, there are no common
421 * callbacks. This is the case for: cipher, compress, shash.
422 * @cra_u: Callbacks implementing the transformation. This is a union of
423 * multiple structures. Depending on the type of transformation selected
424 * by @cra_type and @cra_flags above, the associated structure must be
425 * filled with callbacks. This field might be empty. This is the case
426 * for ahash, shash.
427 * @cra_init: Initialize the cryptographic transformation object. This function
428 * is used to initialize the cryptographic transformation object.
429 * This function is called only once at the instantiation time, right
430 * after the transformation context was allocated. In case the
431 * cryptographic hardware has some special requirements which need to
432 * be handled by software, this function shall check for the precise
433 * requirement of the transformation and put any software fallbacks
434 * in place.
435 * @cra_exit: Deinitialize the cryptographic transformation object. This is a
436 * counterpart to @cra_init, used to remove various changes set in
437 * @cra_init.
0063ec44
GH
438 * @cra_u.ablkcipher: Union member which contains an asynchronous block cipher
439 * definition. See @struct @ablkcipher_alg.
440 * @cra_u.blkcipher: Union member which contains a synchronous block cipher
441 * definition See @struct @blkcipher_alg.
442 * @cra_u.cipher: Union member which contains a single-block symmetric cipher
443 * definition. See @struct @cipher_alg.
444 * @cra_u.compress: Union member which contains a (de)compression algorithm.
445 * See @struct @compress_alg.
0d7f488f
SM
446 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
447 * @cra_list: internally used
448 * @cra_users: internally used
449 * @cra_refcnt: internally used
450 * @cra_destroy: internally used
451 *
452 * The struct crypto_alg describes a generic Crypto API algorithm and is common
453 * for all of the transformations. Any variable not documented here shall not
454 * be used by a cipher implementation as it is internal to the Crypto API.
455 */
1da177e4
LT
456struct crypto_alg {
457 struct list_head cra_list;
6bfd4809
HX
458 struct list_head cra_users;
459
1da177e4
LT
460 u32 cra_flags;
461 unsigned int cra_blocksize;
462 unsigned int cra_ctxsize;
95477377 463 unsigned int cra_alignmask;
5cb1454b
HX
464
465 int cra_priority;
ce8614a3 466 refcount_t cra_refcnt;
5cb1454b 467
d913ea0d
HX
468 char cra_name[CRYPTO_MAX_ALG_NAME];
469 char cra_driver_name[CRYPTO_MAX_ALG_NAME];
1da177e4 470
e853c3cf
HX
471 const struct crypto_type *cra_type;
472
1da177e4 473 union {
b5b7f088 474 struct ablkcipher_alg ablkcipher;
5cde0af2 475 struct blkcipher_alg blkcipher;
1da177e4 476 struct cipher_alg cipher;
1da177e4
LT
477 struct compress_alg compress;
478 } cra_u;
c7fc0599
HX
479
480 int (*cra_init)(struct crypto_tfm *tfm);
481 void (*cra_exit)(struct crypto_tfm *tfm);
6521f302 482 void (*cra_destroy)(struct crypto_alg *alg);
1da177e4
LT
483
484 struct module *cra_module;
edf18b91 485} CRYPTO_MINALIGN_ATTR;
1da177e4 486
ada69a16
GBY
487/*
488 * A helper struct for waiting for completion of async crypto ops
489 */
490struct crypto_wait {
491 struct completion completion;
492 int err;
493};
494
495/*
496 * Macro for declaring a crypto op async wait object on stack
497 */
498#define DECLARE_CRYPTO_WAIT(_wait) \
499 struct crypto_wait _wait = { \
500 COMPLETION_INITIALIZER_ONSTACK((_wait).completion), 0 }
501
502/*
503 * Async ops completion helper functioons
504 */
505void crypto_req_done(struct crypto_async_request *req, int err);
506
507static inline int crypto_wait_req(int err, struct crypto_wait *wait)
508{
509 switch (err) {
510 case -EINPROGRESS:
511 case -EBUSY:
512 wait_for_completion(&wait->completion);
513 reinit_completion(&wait->completion);
514 err = wait->err;
515 break;
516 };
517
518 return err;
519}
520
521static inline void crypto_init_wait(struct crypto_wait *wait)
522{
523 init_completion(&wait->completion);
524}
525
1da177e4
LT
526/*
527 * Algorithm registration interface.
528 */
529int crypto_register_alg(struct crypto_alg *alg);
530int crypto_unregister_alg(struct crypto_alg *alg);
4b004346
MB
531int crypto_register_algs(struct crypto_alg *algs, int count);
532int crypto_unregister_algs(struct crypto_alg *algs, int count);
1da177e4
LT
533
534/*
535 * Algorithm query interface.
536 */
fce32d70 537int crypto_has_alg(const char *name, u32 type, u32 mask);
1da177e4
LT
538
539/*
540 * Transforms: user-instantiated objects which encapsulate algorithms
6d7d684d
HX
541 * and core processing logic. Managed via crypto_alloc_*() and
542 * crypto_free_*(), as well as the various helpers below.
1da177e4 543 */
1da177e4 544
32e3983f
HX
545struct ablkcipher_tfm {
546 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
547 unsigned int keylen);
548 int (*encrypt)(struct ablkcipher_request *req);
549 int (*decrypt)(struct ablkcipher_request *req);
61da88e2 550
ecfc4329
HX
551 struct crypto_ablkcipher *base;
552
32e3983f
HX
553 unsigned int ivsize;
554 unsigned int reqsize;
555};
556
5cde0af2
HX
557struct blkcipher_tfm {
558 void *iv;
559 int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
560 unsigned int keylen);
561 int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
562 struct scatterlist *src, unsigned int nbytes);
563 int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
564 struct scatterlist *src, unsigned int nbytes);
565};
566
1da177e4 567struct cipher_tfm {
1da177e4
LT
568 int (*cit_setkey)(struct crypto_tfm *tfm,
569 const u8 *key, unsigned int keylen);
f28776a3
HX
570 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
571 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
1da177e4
LT
572};
573
1da177e4
LT
574struct compress_tfm {
575 int (*cot_compress)(struct crypto_tfm *tfm,
576 const u8 *src, unsigned int slen,
577 u8 *dst, unsigned int *dlen);
578 int (*cot_decompress)(struct crypto_tfm *tfm,
579 const u8 *src, unsigned int slen,
580 u8 *dst, unsigned int *dlen);
581};
582
32e3983f 583#define crt_ablkcipher crt_u.ablkcipher
5cde0af2 584#define crt_blkcipher crt_u.blkcipher
1da177e4 585#define crt_cipher crt_u.cipher
1da177e4
LT
586#define crt_compress crt_u.compress
587
588struct crypto_tfm {
589
590 u32 crt_flags;
591
592 union {
32e3983f 593 struct ablkcipher_tfm ablkcipher;
5cde0af2 594 struct blkcipher_tfm blkcipher;
1da177e4 595 struct cipher_tfm cipher;
1da177e4
LT
596 struct compress_tfm compress;
597 } crt_u;
4a779486
HX
598
599 void (*exit)(struct crypto_tfm *tfm);
1da177e4
LT
600
601 struct crypto_alg *__crt_alg;
f10b7897 602
79911102 603 void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
1da177e4
LT
604};
605
32e3983f
HX
606struct crypto_ablkcipher {
607 struct crypto_tfm base;
608};
609
5cde0af2
HX
610struct crypto_blkcipher {
611 struct crypto_tfm base;
612};
613
78a1fe4f
HX
614struct crypto_cipher {
615 struct crypto_tfm base;
616};
617
618struct crypto_comp {
619 struct crypto_tfm base;
620};
621
2b8c19db
HX
622enum {
623 CRYPTOA_UNSPEC,
624 CRYPTOA_ALG,
ebc610e5 625 CRYPTOA_TYPE,
39e1ee01 626 CRYPTOA_U32,
ebc610e5 627 __CRYPTOA_MAX,
2b8c19db
HX
628};
629
ebc610e5
HX
630#define CRYPTOA_MAX (__CRYPTOA_MAX - 1)
631
39e1ee01
HX
632/* Maximum number of (rtattr) parameters for each template. */
633#define CRYPTO_MAX_ATTRS 32
634
2b8c19db
HX
635struct crypto_attr_alg {
636 char name[CRYPTO_MAX_ALG_NAME];
637};
638
ebc610e5
HX
639struct crypto_attr_type {
640 u32 type;
641 u32 mask;
642};
643
39e1ee01
HX
644struct crypto_attr_u32 {
645 u32 num;
646};
647
1da177e4
LT
648/*
649 * Transform user interface.
650 */
651
6d7d684d 652struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
7b2cd92a
HX
653void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
654
655static inline void crypto_free_tfm(struct crypto_tfm *tfm)
656{
657 return crypto_destroy_tfm(tfm, tfm);
658}
1da177e4 659
da7f033d
HX
660int alg_test(const char *driver, const char *alg, u32 type, u32 mask);
661
1da177e4
LT
662/*
663 * Transform helpers which query the underlying algorithm.
664 */
665static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm)
666{
667 return tfm->__crt_alg->cra_name;
668}
669
b14cdd67
ML
670static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm)
671{
672 return tfm->__crt_alg->cra_driver_name;
673}
674
675static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm)
676{
677 return tfm->__crt_alg->cra_priority;
678}
679
1da177e4
LT
680static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
681{
682 return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
683}
684
1da177e4
LT
685static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
686{
687 return tfm->__crt_alg->cra_blocksize;
688}
689
fbdae9f3
HX
690static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
691{
692 return tfm->__crt_alg->cra_alignmask;
693}
694
f28776a3
HX
695static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm)
696{
697 return tfm->crt_flags;
698}
699
700static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags)
701{
702 tfm->crt_flags |= flags;
703}
704
705static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
706{
707 tfm->crt_flags &= ~flags;
708}
709
40725181
HX
710static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
711{
f10b7897
HX
712 return tfm->__crt_ctx;
713}
714
715static inline unsigned int crypto_tfm_ctx_alignment(void)
716{
717 struct crypto_tfm *tfm;
718 return __alignof__(tfm->__crt_ctx);
40725181
HX
719}
720
1da177e4
LT
721/*
722 * API wrappers.
723 */
32e3983f
HX
724static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast(
725 struct crypto_tfm *tfm)
726{
727 return (struct crypto_ablkcipher *)tfm;
728}
729
378f4f51 730static inline u32 crypto_skcipher_type(u32 type)
32e3983f 731{
ecfc4329 732 type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
32e3983f 733 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
378f4f51
HX
734 return type;
735}
736
737static inline u32 crypto_skcipher_mask(u32 mask)
738{
ecfc4329 739 mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
332f8840 740 mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
378f4f51
HX
741 return mask;
742}
32e3983f 743
f13ec330
SM
744/**
745 * DOC: Asynchronous Block Cipher API
746 *
747 * Asynchronous block cipher API is used with the ciphers of type
748 * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto).
749 *
750 * Asynchronous cipher operations imply that the function invocation for a
751 * cipher request returns immediately before the completion of the operation.
752 * The cipher request is scheduled as a separate kernel thread and therefore
753 * load-balanced on the different CPUs via the process scheduler. To allow
754 * the kernel crypto API to inform the caller about the completion of a cipher
755 * request, the caller must provide a callback function. That function is
756 * invoked with the cipher handle when the request completes.
757 *
758 * To support the asynchronous operation, additional information than just the
759 * cipher handle must be supplied to the kernel crypto API. That additional
760 * information is given by filling in the ablkcipher_request data structure.
761 *
762 * For the asynchronous block cipher API, the state is maintained with the tfm
763 * cipher handle. A single tfm can be used across multiple calls and in
764 * parallel. For asynchronous block cipher calls, context data supplied and
765 * only used by the caller can be referenced the request data structure in
766 * addition to the IV used for the cipher request. The maintenance of such
767 * state information would be important for a crypto driver implementer to
768 * have, because when calling the callback function upon completion of the
769 * cipher operation, that callback function may need some information about
770 * which operation just finished if it invoked multiple in parallel. This
771 * state information is unused by the kernel crypto API.
772 */
773
32e3983f
HX
774static inline struct crypto_tfm *crypto_ablkcipher_tfm(
775 struct crypto_ablkcipher *tfm)
776{
777 return &tfm->base;
778}
779
f13ec330
SM
780/**
781 * crypto_free_ablkcipher() - zeroize and free cipher handle
782 * @tfm: cipher handle to be freed
783 */
32e3983f
HX
784static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
785{
786 crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
787}
788
f13ec330
SM
789/**
790 * crypto_has_ablkcipher() - Search for the availability of an ablkcipher.
791 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
792 * ablkcipher
793 * @type: specifies the type of the cipher
794 * @mask: specifies the mask for the cipher
795 *
796 * Return: true when the ablkcipher is known to the kernel crypto API; false
797 * otherwise
798 */
32e3983f
HX
799static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
800 u32 mask)
801{
378f4f51
HX
802 return crypto_has_alg(alg_name, crypto_skcipher_type(type),
803 crypto_skcipher_mask(mask));
32e3983f
HX
804}
805
806static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
807 struct crypto_ablkcipher *tfm)
808{
809 return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
810}
811
f13ec330
SM
812/**
813 * crypto_ablkcipher_ivsize() - obtain IV size
814 * @tfm: cipher handle
815 *
816 * The size of the IV for the ablkcipher referenced by the cipher handle is
817 * returned. This IV size may be zero if the cipher does not need an IV.
818 *
819 * Return: IV size in bytes
820 */
32e3983f
HX
821static inline unsigned int crypto_ablkcipher_ivsize(
822 struct crypto_ablkcipher *tfm)
823{
824 return crypto_ablkcipher_crt(tfm)->ivsize;
825}
826
f13ec330
SM
827/**
828 * crypto_ablkcipher_blocksize() - obtain block size of cipher
829 * @tfm: cipher handle
830 *
831 * The block size for the ablkcipher referenced with the cipher handle is
832 * returned. The caller may use that information to allocate appropriate
833 * memory for the data returned by the encryption or decryption operation
834 *
835 * Return: block size of cipher
836 */
32e3983f
HX
837static inline unsigned int crypto_ablkcipher_blocksize(
838 struct crypto_ablkcipher *tfm)
839{
840 return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm));
841}
842
843static inline unsigned int crypto_ablkcipher_alignmask(
844 struct crypto_ablkcipher *tfm)
845{
846 return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm));
847}
848
849static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm)
850{
851 return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm));
852}
853
854static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm,
855 u32 flags)
856{
857 crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags);
858}
859
860static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
861 u32 flags)
862{
863 crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
864}
865
f13ec330
SM
866/**
867 * crypto_ablkcipher_setkey() - set key for cipher
868 * @tfm: cipher handle
869 * @key: buffer holding the key
870 * @keylen: length of the key in bytes
871 *
872 * The caller provided key is set for the ablkcipher referenced by the cipher
873 * handle.
874 *
875 * Note, the key length determines the cipher type. Many block ciphers implement
876 * different cipher modes depending on the key size, such as AES-128 vs AES-192
877 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
878 * is performed.
879 *
880 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
881 */
32e3983f
HX
882static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
883 const u8 *key, unsigned int keylen)
884{
ecfc4329
HX
885 struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm);
886
887 return crt->setkey(crt->base, key, keylen);
32e3983f
HX
888}
889
f13ec330
SM
890/**
891 * crypto_ablkcipher_reqtfm() - obtain cipher handle from request
892 * @req: ablkcipher_request out of which the cipher handle is to be obtained
893 *
894 * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request
895 * data structure.
896 *
897 * Return: crypto_ablkcipher handle
898 */
32e3983f
HX
899static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
900 struct ablkcipher_request *req)
901{
902 return __crypto_ablkcipher_cast(req->base.tfm);
903}
904
f13ec330
SM
905/**
906 * crypto_ablkcipher_encrypt() - encrypt plaintext
907 * @req: reference to the ablkcipher_request handle that holds all information
908 * needed to perform the cipher operation
909 *
910 * Encrypt plaintext data using the ablkcipher_request handle. That data
911 * structure and how it is filled with data is discussed with the
912 * ablkcipher_request_* functions.
913 *
914 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
915 */
32e3983f
HX
916static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
917{
918 struct ablkcipher_tfm *crt =
919 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
920 return crt->encrypt(req);
921}
922
f13ec330
SM
923/**
924 * crypto_ablkcipher_decrypt() - decrypt ciphertext
925 * @req: reference to the ablkcipher_request handle that holds all information
926 * needed to perform the cipher operation
927 *
928 * Decrypt ciphertext data using the ablkcipher_request handle. That data
929 * structure and how it is filled with data is discussed with the
930 * ablkcipher_request_* functions.
931 *
932 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
933 */
32e3983f
HX
934static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
935{
936 struct ablkcipher_tfm *crt =
937 crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
938 return crt->decrypt(req);
939}
940
f13ec330
SM
941/**
942 * DOC: Asynchronous Cipher Request Handle
943 *
944 * The ablkcipher_request data structure contains all pointers to data
945 * required for the asynchronous cipher operation. This includes the cipher
946 * handle (which can be used by multiple ablkcipher_request instances), pointer
947 * to plaintext and ciphertext, asynchronous callback function, etc. It acts
948 * as a handle to the ablkcipher_request_* API calls in a similar way as
949 * ablkcipher handle to the crypto_ablkcipher_* API calls.
950 */
951
952/**
953 * crypto_ablkcipher_reqsize() - obtain size of the request data structure
954 * @tfm: cipher handle
955 *
956 * Return: number of bytes
957 */
b16c3a2e
HX
958static inline unsigned int crypto_ablkcipher_reqsize(
959 struct crypto_ablkcipher *tfm)
32e3983f
HX
960{
961 return crypto_ablkcipher_crt(tfm)->reqsize;
962}
963
f13ec330
SM
964/**
965 * ablkcipher_request_set_tfm() - update cipher handle reference in request
966 * @req: request handle to be modified
967 * @tfm: cipher handle that shall be added to the request handle
968 *
969 * Allow the caller to replace the existing ablkcipher handle in the request
970 * data structure with a different one.
971 */
e196d625
HX
972static inline void ablkcipher_request_set_tfm(
973 struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
974{
ecfc4329 975 req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base);
e196d625
HX
976}
977
b5b7f088
HX
978static inline struct ablkcipher_request *ablkcipher_request_cast(
979 struct crypto_async_request *req)
980{
981 return container_of(req, struct ablkcipher_request, base);
982}
983
f13ec330
SM
984/**
985 * ablkcipher_request_alloc() - allocate request data structure
986 * @tfm: cipher handle to be registered with the request
987 * @gfp: memory allocation flag that is handed to kmalloc by the API call.
988 *
989 * Allocate the request data structure that must be used with the ablkcipher
990 * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
991 * handle is registered in the request data structure.
992 *
6eae29e7 993 * Return: allocated request handle in case of success, or NULL if out of memory
f13ec330 994 */
32e3983f
HX
995static inline struct ablkcipher_request *ablkcipher_request_alloc(
996 struct crypto_ablkcipher *tfm, gfp_t gfp)
997{
998 struct ablkcipher_request *req;
999
1000 req = kmalloc(sizeof(struct ablkcipher_request) +
1001 crypto_ablkcipher_reqsize(tfm), gfp);
1002
1003 if (likely(req))
e196d625 1004 ablkcipher_request_set_tfm(req, tfm);
32e3983f
HX
1005
1006 return req;
1007}
1008
f13ec330
SM
1009/**
1010 * ablkcipher_request_free() - zeroize and free request data structure
1011 * @req: request data structure cipher handle to be freed
1012 */
32e3983f
HX
1013static inline void ablkcipher_request_free(struct ablkcipher_request *req)
1014{
aef73cfc 1015 kzfree(req);
32e3983f
HX
1016}
1017
f13ec330
SM
1018/**
1019 * ablkcipher_request_set_callback() - set asynchronous callback function
1020 * @req: request handle
1021 * @flags: specify zero or an ORing of the flags
0184cfe7 1022 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
f13ec330
SM
1023 * increase the wait queue beyond the initial maximum size;
1024 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
1025 * @compl: callback function pointer to be registered with the request handle
1026 * @data: The data pointer refers to memory that is not used by the kernel
1027 * crypto API, but provided to the callback function for it to use. Here,
1028 * the caller can provide a reference to memory the callback function can
1029 * operate on. As the callback function is invoked asynchronously to the
1030 * related functionality, it may need to access data structures of the
1031 * related functionality which can be referenced using this pointer. The
1032 * callback function can access the memory via the "data" field in the
1033 * crypto_async_request data structure provided to the callback function.
1034 *
1035 * This function allows setting the callback function that is triggered once the
1036 * cipher operation completes.
1037 *
1038 * The callback function is registered with the ablkcipher_request handle and
0184cfe7 1039 * must comply with the following template::
f13ec330
SM
1040 *
1041 * void callback_function(struct crypto_async_request *req, int error)
1042 */
32e3983f
HX
1043static inline void ablkcipher_request_set_callback(
1044 struct ablkcipher_request *req,
3e3dc25f 1045 u32 flags, crypto_completion_t compl, void *data)
32e3983f 1046{
3e3dc25f 1047 req->base.complete = compl;
32e3983f
HX
1048 req->base.data = data;
1049 req->base.flags = flags;
1050}
1051
f13ec330
SM
1052/**
1053 * ablkcipher_request_set_crypt() - set data buffers
1054 * @req: request handle
1055 * @src: source scatter / gather list
1056 * @dst: destination scatter / gather list
1057 * @nbytes: number of bytes to process from @src
1058 * @iv: IV for the cipher operation which must comply with the IV size defined
1059 * by crypto_ablkcipher_ivsize
1060 *
1061 * This function allows setting of the source data and destination data
1062 * scatter / gather lists.
1063 *
1064 * For encryption, the source is treated as the plaintext and the
1065 * destination is the ciphertext. For a decryption operation, the use is
379dcfb4 1066 * reversed - the source is the ciphertext and the destination is the plaintext.
f13ec330 1067 */
32e3983f
HX
1068static inline void ablkcipher_request_set_crypt(
1069 struct ablkcipher_request *req,
1070 struct scatterlist *src, struct scatterlist *dst,
1071 unsigned int nbytes, void *iv)
1072{
1073 req->src = src;
1074 req->dst = dst;
1075 req->nbytes = nbytes;
1076 req->info = iv;
1077}
1078
58284f0d
SM
1079/**
1080 * DOC: Synchronous Block Cipher API
1081 *
1082 * The synchronous block cipher API is used with the ciphers of type
1083 * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
1084 *
1085 * Synchronous calls, have a context in the tfm. But since a single tfm can be
1086 * used in multiple calls and in parallel, this info should not be changeable
1087 * (unless a lock is used). This applies, for example, to the symmetric key.
1088 * However, the IV is changeable, so there is an iv field in blkcipher_tfm
1089 * structure for synchronous blkcipher api. So, its the only state info that can
1090 * be kept for synchronous calls without using a big lock across a tfm.
1091 *
1092 * The block cipher API allows the use of a complete cipher, i.e. a cipher
1093 * consisting of a template (a block chaining mode) and a single block cipher
1094 * primitive (e.g. AES).
1095 *
1096 * The plaintext data buffer and the ciphertext data buffer are pointed to
1097 * by using scatter/gather lists. The cipher operation is performed
1098 * on all segments of the provided scatter/gather lists.
1099 *
1100 * The kernel crypto API supports a cipher operation "in-place" which means that
1101 * the caller may provide the same scatter/gather list for the plaintext and
1102 * cipher text. After the completion of the cipher operation, the plaintext
1103 * data is replaced with the ciphertext data in case of an encryption and vice
1104 * versa for a decryption. The caller must ensure that the scatter/gather lists
1105 * for the output data point to sufficiently large buffers, i.e. multiples of
1106 * the block size of the cipher.
1107 */
1108
5cde0af2
HX
1109static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
1110 struct crypto_tfm *tfm)
1111{
1112 return (struct crypto_blkcipher *)tfm;
1113}
1114
1115static inline struct crypto_blkcipher *crypto_blkcipher_cast(
1116 struct crypto_tfm *tfm)
1117{
1118 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER);
1119 return __crypto_blkcipher_cast(tfm);
1120}
1121
58284f0d
SM
1122/**
1123 * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
1124 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1125 * blkcipher cipher
1126 * @type: specifies the type of the cipher
1127 * @mask: specifies the mask for the cipher
1128 *
1129 * Allocate a cipher handle for a block cipher. The returned struct
1130 * crypto_blkcipher is the cipher handle that is required for any subsequent
1131 * API invocation for that block cipher.
1132 *
1133 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1134 * of an error, PTR_ERR() returns the error code.
1135 */
5cde0af2
HX
1136static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
1137 const char *alg_name, u32 type, u32 mask)
1138{
332f8840 1139 type &= ~CRYPTO_ALG_TYPE_MASK;
5cde0af2 1140 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
332f8840 1141 mask |= CRYPTO_ALG_TYPE_MASK;
5cde0af2
HX
1142
1143 return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
1144}
1145
1146static inline struct crypto_tfm *crypto_blkcipher_tfm(
1147 struct crypto_blkcipher *tfm)
1148{
1149 return &tfm->base;
1150}
1151
58284f0d
SM
1152/**
1153 * crypto_free_blkcipher() - zeroize and free the block cipher handle
1154 * @tfm: cipher handle to be freed
1155 */
5cde0af2
HX
1156static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
1157{
1158 crypto_free_tfm(crypto_blkcipher_tfm(tfm));
1159}
1160
58284f0d
SM
1161/**
1162 * crypto_has_blkcipher() - Search for the availability of a block cipher
1163 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1164 * block cipher
1165 * @type: specifies the type of the cipher
1166 * @mask: specifies the mask for the cipher
1167 *
1168 * Return: true when the block cipher is known to the kernel crypto API; false
1169 * otherwise
1170 */
fce32d70
HX
1171static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
1172{
332f8840 1173 type &= ~CRYPTO_ALG_TYPE_MASK;
fce32d70 1174 type |= CRYPTO_ALG_TYPE_BLKCIPHER;
332f8840 1175 mask |= CRYPTO_ALG_TYPE_MASK;
fce32d70
HX
1176
1177 return crypto_has_alg(alg_name, type, mask);
1178}
1179
58284f0d
SM
1180/**
1181 * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
1182 * @tfm: cipher handle
1183 *
1184 * Return: The character string holding the name of the cipher
1185 */
5cde0af2
HX
1186static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
1187{
1188 return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
1189}
1190
1191static inline struct blkcipher_tfm *crypto_blkcipher_crt(
1192 struct crypto_blkcipher *tfm)
1193{
1194 return &crypto_blkcipher_tfm(tfm)->crt_blkcipher;
1195}
1196
1197static inline struct blkcipher_alg *crypto_blkcipher_alg(
1198 struct crypto_blkcipher *tfm)
1199{
1200 return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
1201}
1202
58284f0d
SM
1203/**
1204 * crypto_blkcipher_ivsize() - obtain IV size
1205 * @tfm: cipher handle
1206 *
1207 * The size of the IV for the block cipher referenced by the cipher handle is
1208 * returned. This IV size may be zero if the cipher does not need an IV.
1209 *
1210 * Return: IV size in bytes
1211 */
5cde0af2
HX
1212static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
1213{
1214 return crypto_blkcipher_alg(tfm)->ivsize;
1215}
1216
58284f0d
SM
1217/**
1218 * crypto_blkcipher_blocksize() - obtain block size of cipher
1219 * @tfm: cipher handle
1220 *
1221 * The block size for the block cipher referenced with the cipher handle is
1222 * returned. The caller may use that information to allocate appropriate
1223 * memory for the data returned by the encryption or decryption operation.
1224 *
1225 * Return: block size of cipher
1226 */
5cde0af2
HX
1227static inline unsigned int crypto_blkcipher_blocksize(
1228 struct crypto_blkcipher *tfm)
1229{
1230 return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
1231}
1232
1233static inline unsigned int crypto_blkcipher_alignmask(
1234 struct crypto_blkcipher *tfm)
1235{
1236 return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
1237}
1238
1239static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
1240{
1241 return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
1242}
1243
1244static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
1245 u32 flags)
1246{
1247 crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
1248}
1249
1250static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
1251 u32 flags)
1252{
1253 crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
1254}
1255
58284f0d
SM
1256/**
1257 * crypto_blkcipher_setkey() - set key for cipher
1258 * @tfm: cipher handle
1259 * @key: buffer holding the key
1260 * @keylen: length of the key in bytes
1261 *
1262 * The caller provided key is set for the block cipher referenced by the cipher
1263 * handle.
1264 *
1265 * Note, the key length determines the cipher type. Many block ciphers implement
1266 * different cipher modes depending on the key size, such as AES-128 vs AES-192
1267 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1268 * is performed.
1269 *
1270 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1271 */
5cde0af2
HX
1272static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
1273 const u8 *key, unsigned int keylen)
1274{
1275 return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
1276 key, keylen);
1277}
1278
58284f0d
SM
1279/**
1280 * crypto_blkcipher_encrypt() - encrypt plaintext
1281 * @desc: reference to the block cipher handle with meta data
1282 * @dst: scatter/gather list that is filled by the cipher operation with the
1283 * ciphertext
1284 * @src: scatter/gather list that holds the plaintext
1285 * @nbytes: number of bytes of the plaintext to encrypt.
1286 *
1287 * Encrypt plaintext data using the IV set by the caller with a preceding
1288 * call of crypto_blkcipher_set_iv.
1289 *
1290 * The blkcipher_desc data structure must be filled by the caller and can
1291 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1292 * with the block cipher handle; desc.flags is filled with either
1293 * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1294 *
1295 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1296 */
5cde0af2
HX
1297static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
1298 struct scatterlist *dst,
1299 struct scatterlist *src,
1300 unsigned int nbytes)
1301{
1302 desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
1303 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
1304}
1305
58284f0d
SM
1306/**
1307 * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
1308 * @desc: reference to the block cipher handle with meta data
1309 * @dst: scatter/gather list that is filled by the cipher operation with the
1310 * ciphertext
1311 * @src: scatter/gather list that holds the plaintext
1312 * @nbytes: number of bytes of the plaintext to encrypt.
1313 *
1314 * Encrypt plaintext data with the use of an IV that is solely used for this
1315 * cipher operation. Any previously set IV is not used.
1316 *
1317 * The blkcipher_desc data structure must be filled by the caller and can
1318 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1319 * with the block cipher handle; desc.info is filled with the IV to be used for
1320 * the current operation; desc.flags is filled with either
1321 * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1322 *
1323 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1324 */
5cde0af2
HX
1325static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
1326 struct scatterlist *dst,
1327 struct scatterlist *src,
1328 unsigned int nbytes)
1329{
1330 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
1331}
1332
58284f0d
SM
1333/**
1334 * crypto_blkcipher_decrypt() - decrypt ciphertext
1335 * @desc: reference to the block cipher handle with meta data
1336 * @dst: scatter/gather list that is filled by the cipher operation with the
1337 * plaintext
1338 * @src: scatter/gather list that holds the ciphertext
1339 * @nbytes: number of bytes of the ciphertext to decrypt.
1340 *
1341 * Decrypt ciphertext data using the IV set by the caller with a preceding
1342 * call of crypto_blkcipher_set_iv.
1343 *
1344 * The blkcipher_desc data structure must be filled by the caller as documented
1345 * for the crypto_blkcipher_encrypt call above.
1346 *
1347 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1348 *
1349 */
5cde0af2
HX
1350static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
1351 struct scatterlist *dst,
1352 struct scatterlist *src,
1353 unsigned int nbytes)
1354{
1355 desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
1356 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1357}
1358
58284f0d
SM
1359/**
1360 * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
1361 * @desc: reference to the block cipher handle with meta data
1362 * @dst: scatter/gather list that is filled by the cipher operation with the
1363 * plaintext
1364 * @src: scatter/gather list that holds the ciphertext
1365 * @nbytes: number of bytes of the ciphertext to decrypt.
1366 *
1367 * Decrypt ciphertext data with the use of an IV that is solely used for this
1368 * cipher operation. Any previously set IV is not used.
1369 *
1370 * The blkcipher_desc data structure must be filled by the caller as documented
1371 * for the crypto_blkcipher_encrypt_iv call above.
1372 *
1373 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1374 */
5cde0af2
HX
1375static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
1376 struct scatterlist *dst,
1377 struct scatterlist *src,
1378 unsigned int nbytes)
1379{
1380 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1381}
1382
58284f0d
SM
1383/**
1384 * crypto_blkcipher_set_iv() - set IV for cipher
1385 * @tfm: cipher handle
1386 * @src: buffer holding the IV
1387 * @len: length of the IV in bytes
1388 *
1389 * The caller provided IV is set for the block cipher referenced by the cipher
1390 * handle.
1391 */
5cde0af2
HX
1392static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
1393 const u8 *src, unsigned int len)
1394{
1395 memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
1396}
1397
58284f0d
SM
1398/**
1399 * crypto_blkcipher_get_iv() - obtain IV from cipher
1400 * @tfm: cipher handle
1401 * @dst: buffer filled with the IV
1402 * @len: length of the buffer dst
1403 *
1404 * The caller can obtain the IV set for the block cipher referenced by the
1405 * cipher handle and store it into the user-provided buffer. If the buffer
1406 * has an insufficient space, the IV is truncated to fit the buffer.
1407 */
5cde0af2
HX
1408static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
1409 u8 *dst, unsigned int len)
1410{
1411 memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
1412}
1413
16e61030
SM
1414/**
1415 * DOC: Single Block Cipher API
1416 *
1417 * The single block cipher API is used with the ciphers of type
1418 * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
1419 *
1420 * Using the single block cipher API calls, operations with the basic cipher
1421 * primitive can be implemented. These cipher primitives exclude any block
1422 * chaining operations including IV handling.
1423 *
1424 * The purpose of this single block cipher API is to support the implementation
1425 * of templates or other concepts that only need to perform the cipher operation
1426 * on one block at a time. Templates invoke the underlying cipher primitive
1427 * block-wise and process either the input or the output data of these cipher
1428 * operations.
1429 */
1430
f28776a3
HX
1431static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
1432{
1433 return (struct crypto_cipher *)tfm;
1434}
1435
1436static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
1437{
1438 BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
1439 return __crypto_cipher_cast(tfm);
1440}
1441
16e61030
SM
1442/**
1443 * crypto_alloc_cipher() - allocate single block cipher handle
1444 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1445 * single block cipher
1446 * @type: specifies the type of the cipher
1447 * @mask: specifies the mask for the cipher
1448 *
1449 * Allocate a cipher handle for a single block cipher. The returned struct
1450 * crypto_cipher is the cipher handle that is required for any subsequent API
1451 * invocation for that single block cipher.
1452 *
1453 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1454 * of an error, PTR_ERR() returns the error code.
1455 */
f28776a3
HX
1456static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
1457 u32 type, u32 mask)
1458{
1459 type &= ~CRYPTO_ALG_TYPE_MASK;
1460 type |= CRYPTO_ALG_TYPE_CIPHER;
1461 mask |= CRYPTO_ALG_TYPE_MASK;
1462
1463 return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask));
1464}
1465
1466static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
1467{
78a1fe4f 1468 return &tfm->base;
f28776a3
HX
1469}
1470
16e61030
SM
1471/**
1472 * crypto_free_cipher() - zeroize and free the single block cipher handle
1473 * @tfm: cipher handle to be freed
1474 */
f28776a3
HX
1475static inline void crypto_free_cipher(struct crypto_cipher *tfm)
1476{
1477 crypto_free_tfm(crypto_cipher_tfm(tfm));
1478}
1479
16e61030
SM
1480/**
1481 * crypto_has_cipher() - Search for the availability of a single block cipher
1482 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1483 * single block cipher
1484 * @type: specifies the type of the cipher
1485 * @mask: specifies the mask for the cipher
1486 *
1487 * Return: true when the single block cipher is known to the kernel crypto API;
1488 * false otherwise
1489 */
fce32d70
HX
1490static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
1491{
1492 type &= ~CRYPTO_ALG_TYPE_MASK;
1493 type |= CRYPTO_ALG_TYPE_CIPHER;
1494 mask |= CRYPTO_ALG_TYPE_MASK;
1495
1496 return crypto_has_alg(alg_name, type, mask);
1497}
1498
f28776a3
HX
1499static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
1500{
1501 return &crypto_cipher_tfm(tfm)->crt_cipher;
1502}
1503
16e61030
SM
1504/**
1505 * crypto_cipher_blocksize() - obtain block size for cipher
1506 * @tfm: cipher handle
1507 *
1508 * The block size for the single block cipher referenced with the cipher handle
1509 * tfm is returned. The caller may use that information to allocate appropriate
1510 * memory for the data returned by the encryption or decryption operation
1511 *
1512 * Return: block size of cipher
1513 */
f28776a3
HX
1514static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
1515{
1516 return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
1517}
1518
1519static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm)
1520{
1521 return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm));
1522}
1523
1524static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm)
1525{
1526 return crypto_tfm_get_flags(crypto_cipher_tfm(tfm));
1527}
1528
1529static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm,
1530 u32 flags)
1531{
1532 crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags);
1533}
1534
1535static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
1536 u32 flags)
1537{
1538 crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
1539}
1540
16e61030
SM
1541/**
1542 * crypto_cipher_setkey() - set key for cipher
1543 * @tfm: cipher handle
1544 * @key: buffer holding the key
1545 * @keylen: length of the key in bytes
1546 *
1547 * The caller provided key is set for the single block cipher referenced by the
1548 * cipher handle.
1549 *
1550 * Note, the key length determines the cipher type. Many block ciphers implement
1551 * different cipher modes depending on the key size, such as AES-128 vs AES-192
1552 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1553 * is performed.
1554 *
1555 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1556 */
7226bc87
HX
1557static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
1558 const u8 *key, unsigned int keylen)
1559{
1560 return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
1561 key, keylen);
1562}
1563
16e61030
SM
1564/**
1565 * crypto_cipher_encrypt_one() - encrypt one block of plaintext
1566 * @tfm: cipher handle
1567 * @dst: points to the buffer that will be filled with the ciphertext
1568 * @src: buffer holding the plaintext to be encrypted
1569 *
1570 * Invoke the encryption operation of one block. The caller must ensure that
1571 * the plaintext and ciphertext buffers are at least one block in size.
1572 */
f28776a3
HX
1573static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
1574 u8 *dst, const u8 *src)
1575{
1576 crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
1577 dst, src);
1578}
1579
16e61030
SM
1580/**
1581 * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
1582 * @tfm: cipher handle
1583 * @dst: points to the buffer that will be filled with the plaintext
1584 * @src: buffer holding the ciphertext to be decrypted
1585 *
1586 * Invoke the decryption operation of one block. The caller must ensure that
1587 * the plaintext and ciphertext buffers are at least one block in size.
1588 */
f28776a3
HX
1589static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
1590 u8 *dst, const u8 *src)
1591{
1592 crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
1593 dst, src);
1594}
1595
fce32d70
HX
1596static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
1597{
1598 return (struct crypto_comp *)tfm;
1599}
1600
1601static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm)
1602{
1603 BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) &
1604 CRYPTO_ALG_TYPE_MASK);
1605 return __crypto_comp_cast(tfm);
1606}
1607
1608static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
1609 u32 type, u32 mask)
1610{
1611 type &= ~CRYPTO_ALG_TYPE_MASK;
1612 type |= CRYPTO_ALG_TYPE_COMPRESS;
1613 mask |= CRYPTO_ALG_TYPE_MASK;
1614
1615 return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask));
1616}
1617
1618static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm)
1619{
78a1fe4f 1620 return &tfm->base;
fce32d70
HX
1621}
1622
1623static inline void crypto_free_comp(struct crypto_comp *tfm)
1624{
1625 crypto_free_tfm(crypto_comp_tfm(tfm));
1626}
1627
1628static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask)
1629{
1630 type &= ~CRYPTO_ALG_TYPE_MASK;
1631 type |= CRYPTO_ALG_TYPE_COMPRESS;
1632 mask |= CRYPTO_ALG_TYPE_MASK;
1633
1634 return crypto_has_alg(alg_name, type, mask);
1635}
1636
e4d5b79c
HX
1637static inline const char *crypto_comp_name(struct crypto_comp *tfm)
1638{
1639 return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
1640}
1641
fce32d70
HX
1642static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm)
1643{
1644 return &crypto_comp_tfm(tfm)->crt_compress;
1645}
1646
1647static inline int crypto_comp_compress(struct crypto_comp *tfm,
1da177e4
LT
1648 const u8 *src, unsigned int slen,
1649 u8 *dst, unsigned int *dlen)
1650{
78a1fe4f
HX
1651 return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm),
1652 src, slen, dst, dlen);
1da177e4
LT
1653}
1654
fce32d70 1655static inline int crypto_comp_decompress(struct crypto_comp *tfm,
1da177e4
LT
1656 const u8 *src, unsigned int slen,
1657 u8 *dst, unsigned int *dlen)
1658{
78a1fe4f
HX
1659 return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm),
1660 src, slen, dst, dlen);
1da177e4
LT
1661}
1662
1da177e4
LT
1663#endif /* _LINUX_CRYPTO_H */
1664