fscrypt: remove internal key size constants
[linux-block.git] / fs / crypto / crypto.c
CommitLineData
0b81d077
JK
1/*
2 * This contains encryption functions for per-file encryption.
3 *
4 * Copyright (C) 2015, Google, Inc.
5 * Copyright (C) 2015, Motorola Mobility
6 *
7 * Written by Michael Halcrow, 2014.
8 *
9 * Filename encryption additions
10 * Uday Savagaonkar, 2014
11 * Encryption policy handling additions
12 * Ildar Muslukhov, 2014
13 * Add fscrypt_pullback_bio_page()
14 * Jaegeuk Kim, 2015.
15 *
16 * This has not yet undergone a rigorous security audit.
17 *
18 * The usage of AES-XTS should conform to recommendations in NIST
19 * Special Publication 800-38E and IEEE P1619/D16.
20 */
21
0b81d077
JK
22#include <linux/pagemap.h>
23#include <linux/mempool.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/ratelimit.h>
0b81d077 27#include <linux/dcache.h>
03a8bb0e 28#include <linux/namei.h>
b7e7cf7a 29#include <crypto/aes.h>
a575784c 30#include <crypto/skcipher.h>
cc4e0df0 31#include "fscrypt_private.h"
0b81d077
JK
32
33static unsigned int num_prealloc_crypto_pages = 32;
34static unsigned int num_prealloc_crypto_ctxs = 128;
35
36module_param(num_prealloc_crypto_pages, uint, 0444);
37MODULE_PARM_DESC(num_prealloc_crypto_pages,
38 "Number of crypto pages to preallocate");
39module_param(num_prealloc_crypto_ctxs, uint, 0444);
40MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
41 "Number of crypto contexts to preallocate");
42
43static mempool_t *fscrypt_bounce_page_pool = NULL;
44
45static LIST_HEAD(fscrypt_free_ctxs);
46static DEFINE_SPINLOCK(fscrypt_ctx_lock);
47
58ae7468 48struct workqueue_struct *fscrypt_read_workqueue;
0b81d077
JK
49static DEFINE_MUTEX(fscrypt_init_mutex);
50
51static struct kmem_cache *fscrypt_ctx_cachep;
52struct kmem_cache *fscrypt_info_cachep;
53
54/**
55 * fscrypt_release_ctx() - Releases an encryption context
56 * @ctx: The encryption context to release.
57 *
58 * If the encryption context was allocated from the pre-allocated pool, returns
59 * it to that pool. Else, frees it.
60 *
61 * If there's a bounce page in the context, this frees that.
62 */
63void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
64{
65 unsigned long flags;
66
6a34e4d2 67 if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) {
0b81d077
JK
68 mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
69 ctx->w.bounce_page = NULL;
70 }
71 ctx->w.control_page = NULL;
72 if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
73 kmem_cache_free(fscrypt_ctx_cachep, ctx);
74 } else {
75 spin_lock_irqsave(&fscrypt_ctx_lock, flags);
76 list_add(&ctx->free_list, &fscrypt_free_ctxs);
77 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
78 }
79}
80EXPORT_SYMBOL(fscrypt_release_ctx);
81
82/**
83 * fscrypt_get_ctx() - Gets an encryption context
84 * @inode: The inode for which we are doing the crypto
b32e4482 85 * @gfp_flags: The gfp flag for memory allocation
0b81d077
JK
86 *
87 * Allocates and initializes an encryption context.
88 *
89 * Return: An allocated and initialized encryption context on success; error
90 * value or NULL otherwise.
91 */
0b93e1b9 92struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags)
0b81d077
JK
93{
94 struct fscrypt_ctx *ctx = NULL;
95 struct fscrypt_info *ci = inode->i_crypt_info;
96 unsigned long flags;
97
98 if (ci == NULL)
99 return ERR_PTR(-ENOKEY);
100
101 /*
102 * We first try getting the ctx from a free list because in
103 * the common case the ctx will have an allocated and
104 * initialized crypto tfm, so it's probably a worthwhile
105 * optimization. For the bounce page, we first try getting it
106 * from the kernel allocator because that's just about as fast
107 * as getting it from a list and because a cache of free pages
108 * should generally be a "last resort" option for a filesystem
109 * to be able to do its job.
110 */
111 spin_lock_irqsave(&fscrypt_ctx_lock, flags);
112 ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
113 struct fscrypt_ctx, free_list);
114 if (ctx)
115 list_del(&ctx->free_list);
116 spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
117 if (!ctx) {
b32e4482 118 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
0b81d077
JK
119 if (!ctx)
120 return ERR_PTR(-ENOMEM);
121 ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
122 } else {
123 ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
124 }
6a34e4d2 125 ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL;
0b81d077
JK
126 return ctx;
127}
128EXPORT_SYMBOL(fscrypt_get_ctx);
129
58ae7468
RW
130int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
131 u64 lblk_num, struct page *src_page,
132 struct page *dest_page, unsigned int len,
133 unsigned int offs, gfp_t gfp_flags)
0b81d077 134{
fb445437
EB
135 struct {
136 __le64 index;
b7e7cf7a
DW
137 u8 padding[FS_IV_SIZE - sizeof(__le64)];
138 } iv;
d407574e 139 struct skcipher_request *req = NULL;
d0082e1a 140 DECLARE_CRYPTO_WAIT(wait);
0b81d077
JK
141 struct scatterlist dst, src;
142 struct fscrypt_info *ci = inode->i_crypt_info;
d407574e 143 struct crypto_skcipher *tfm = ci->ci_ctfm;
0b81d077
JK
144 int res = 0;
145
1400451f
DG
146 BUG_ON(len == 0);
147
b7e7cf7a
DW
148 BUILD_BUG_ON(sizeof(iv) != FS_IV_SIZE);
149 BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE);
150 iv.index = cpu_to_le64(lblk_num);
151 memset(iv.padding, 0, sizeof(iv.padding));
152
153 if (ci->ci_essiv_tfm != NULL) {
154 crypto_cipher_encrypt_one(ci->ci_essiv_tfm, (u8 *)&iv,
155 (u8 *)&iv);
156 }
157
b32e4482 158 req = skcipher_request_alloc(tfm, gfp_flags);
c90fd775 159 if (!req)
0b81d077 160 return -ENOMEM;
0b81d077 161
d407574e 162 skcipher_request_set_callback(
0b81d077 163 req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
d0082e1a 164 crypto_req_done, &wait);
0b81d077 165
0b81d077 166 sg_init_table(&dst, 1);
1400451f 167 sg_set_page(&dst, dest_page, len, offs);
0b81d077 168 sg_init_table(&src, 1);
1400451f 169 sg_set_page(&src, src_page, len, offs);
b7e7cf7a 170 skcipher_request_set_crypt(req, &src, &dst, len, &iv);
0b81d077 171 if (rw == FS_DECRYPT)
d0082e1a 172 res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
0b81d077 173 else
d0082e1a 174 res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
d407574e 175 skcipher_request_free(req);
0b81d077
JK
176 if (res) {
177 printk_ratelimited(KERN_ERR
d407574e 178 "%s: crypto_skcipher_encrypt() returned %d\n",
0b81d077
JK
179 __func__, res);
180 return res;
181 }
182 return 0;
183}
184
58ae7468
RW
185struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
186 gfp_t gfp_flags)
0b81d077 187{
b32e4482 188 ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
0b81d077
JK
189 if (ctx->w.bounce_page == NULL)
190 return ERR_PTR(-ENOMEM);
6a34e4d2 191 ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL;
0b81d077
JK
192 return ctx->w.bounce_page;
193}
194
195/**
196 * fscypt_encrypt_page() - Encrypts a page
1400451f
DG
197 * @inode: The inode for which the encryption should take place
198 * @page: The page to encrypt. Must be locked for bounce-page
199 * encryption.
200 * @len: Length of data to encrypt in @page and encrypted
201 * data in returned page.
202 * @offs: Offset of data within @page and returned
203 * page holding encrypted data.
204 * @lblk_num: Logical block number. This must be unique for multiple
205 * calls with same inode, except when overwriting
206 * previously written data.
207 * @gfp_flags: The gfp flag for memory allocation
0b81d077 208 *
1400451f
DG
209 * Encrypts @page using the ctx encryption context. Performs encryption
210 * either in-place or into a newly allocated bounce page.
211 * Called on the page write path.
0b81d077 212 *
1400451f
DG
213 * Bounce page allocation is the default.
214 * In this case, the contents of @page are encrypted and stored in an
215 * allocated bounce page. @page has to be locked and the caller must call
0b81d077
JK
216 * fscrypt_restore_control_page() on the returned ciphertext page to
217 * release the bounce buffer and the encryption context.
218 *
bd7b8290 219 * In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in
1400451f
DG
220 * fscrypt_operations. Here, the input-page is returned with its content
221 * encrypted.
222 *
223 * Return: A page with the encrypted content on success. Else, an
0b81d077
JK
224 * error value or NULL.
225 */
0b93e1b9 226struct page *fscrypt_encrypt_page(const struct inode *inode,
1400451f
DG
227 struct page *page,
228 unsigned int len,
229 unsigned int offs,
230 u64 lblk_num, gfp_t gfp_flags)
7821d4dd 231
0b81d077
JK
232{
233 struct fscrypt_ctx *ctx;
1400451f 234 struct page *ciphertext_page = page;
0b81d077
JK
235 int err;
236
1400451f 237 BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
0b81d077 238
bd7b8290 239 if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
9e532772 240 /* with inplace-encryption we just encrypt the page */
58ae7468
RW
241 err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page,
242 ciphertext_page, len, offs,
243 gfp_flags);
9e532772
DG
244 if (err)
245 return ERR_PTR(err);
246
247 return ciphertext_page;
248 }
249
bd7b8290
DG
250 BUG_ON(!PageLocked(page));
251
b32e4482 252 ctx = fscrypt_get_ctx(inode, gfp_flags);
0b81d077
JK
253 if (IS_ERR(ctx))
254 return (struct page *)ctx;
255
9e532772 256 /* The encryption operation will require a bounce page. */
58ae7468 257 ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags);
9e532772
DG
258 if (IS_ERR(ciphertext_page))
259 goto errout;
0b81d077 260
1400451f 261 ctx->w.control_page = page;
58ae7468
RW
262 err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num,
263 page, ciphertext_page, len, offs,
264 gfp_flags);
0b81d077
JK
265 if (err) {
266 ciphertext_page = ERR_PTR(err);
267 goto errout;
268 }
9e532772
DG
269 SetPagePrivate(ciphertext_page);
270 set_page_private(ciphertext_page, (unsigned long)ctx);
271 lock_page(ciphertext_page);
0b81d077
JK
272 return ciphertext_page;
273
274errout:
275 fscrypt_release_ctx(ctx);
276 return ciphertext_page;
277}
278EXPORT_SYMBOL(fscrypt_encrypt_page);
279
280/**
7821d4dd 281 * fscrypt_decrypt_page() - Decrypts a page in-place
1400451f
DG
282 * @inode: The corresponding inode for the page to decrypt.
283 * @page: The page to decrypt. Must be locked in case
bd7b8290 284 * it is a writeback page (FS_CFLG_OWN_PAGES unset).
1400451f
DG
285 * @len: Number of bytes in @page to be decrypted.
286 * @offs: Start of data in @page.
287 * @lblk_num: Logical block number.
0b81d077
JK
288 *
289 * Decrypts page in-place using the ctx encryption context.
290 *
291 * Called from the read completion callback.
292 *
293 * Return: Zero on success, non-zero otherwise.
294 */
0b93e1b9 295int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
1400451f 296 unsigned int len, unsigned int offs, u64 lblk_num)
0b81d077 297{
bd7b8290
DG
298 if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
299 BUG_ON(!PageLocked(page));
300
58ae7468
RW
301 return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
302 len, offs, GFP_NOFS);
0b81d077
JK
303}
304EXPORT_SYMBOL(fscrypt_decrypt_page);
305
0b81d077
JK
306/*
307 * Validate dentries for encrypted directories to make sure we aren't
308 * potentially caching stale data after a key has been added or
309 * removed.
310 */
311static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
312{
d7d75352 313 struct dentry *dir;
0b81d077
JK
314 int dir_has_key, cached_with_key;
315
03a8bb0e
JK
316 if (flags & LOOKUP_RCU)
317 return -ECHILD;
318
d7d75352 319 dir = dget_parent(dentry);
e0428a26 320 if (!IS_ENCRYPTED(d_inode(dir))) {
d7d75352 321 dput(dir);
0b81d077 322 return 0;
d7d75352 323 }
0b81d077 324
0b81d077
JK
325 spin_lock(&dentry->d_lock);
326 cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
327 spin_unlock(&dentry->d_lock);
1b53cf98 328 dir_has_key = (d_inode(dir)->i_crypt_info != NULL);
d7d75352 329 dput(dir);
0b81d077
JK
330
331 /*
332 * If the dentry was cached without the key, and it is a
333 * negative dentry, it might be a valid name. We can't check
334 * if the key has since been made available due to locking
335 * reasons, so we fail the validation so ext4_lookup() can do
336 * this check.
337 *
338 * We also fail the validation if the dentry was created with
339 * the key present, but we no longer have the key, or vice versa.
340 */
341 if ((!cached_with_key && d_is_negative(dentry)) ||
342 (!cached_with_key && dir_has_key) ||
343 (cached_with_key && !dir_has_key))
344 return 0;
345 return 1;
346}
347
348const struct dentry_operations fscrypt_d_ops = {
349 .d_revalidate = fscrypt_d_revalidate,
350};
0b81d077 351
0b81d077
JK
352void fscrypt_restore_control_page(struct page *page)
353{
354 struct fscrypt_ctx *ctx;
355
356 ctx = (struct fscrypt_ctx *)page_private(page);
357 set_page_private(page, (unsigned long)NULL);
358 ClearPagePrivate(page);
359 unlock_page(page);
360 fscrypt_release_ctx(ctx);
361}
362EXPORT_SYMBOL(fscrypt_restore_control_page);
363
364static void fscrypt_destroy(void)
365{
366 struct fscrypt_ctx *pos, *n;
367
368 list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
369 kmem_cache_free(fscrypt_ctx_cachep, pos);
370 INIT_LIST_HEAD(&fscrypt_free_ctxs);
371 mempool_destroy(fscrypt_bounce_page_pool);
372 fscrypt_bounce_page_pool = NULL;
373}
374
375/**
376 * fscrypt_initialize() - allocate major buffers for fs encryption.
f32d7ac2 377 * @cop_flags: fscrypt operations flags
0b81d077
JK
378 *
379 * We only call this when we start accessing encrypted files, since it
380 * results in memory getting allocated that wouldn't otherwise be used.
381 *
382 * Return: Zero on success, non-zero otherwise.
383 */
f32d7ac2 384int fscrypt_initialize(unsigned int cop_flags)
0b81d077
JK
385{
386 int i, res = -ENOMEM;
387
a0b3bc85
EB
388 /* No need to allocate a bounce page pool if this FS won't use it. */
389 if (cop_flags & FS_CFLG_OWN_PAGES)
0b81d077
JK
390 return 0;
391
392 mutex_lock(&fscrypt_init_mutex);
393 if (fscrypt_bounce_page_pool)
394 goto already_initialized;
395
396 for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
397 struct fscrypt_ctx *ctx;
398
399 ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
400 if (!ctx)
401 goto fail;
402 list_add(&ctx->free_list, &fscrypt_free_ctxs);
403 }
404
405 fscrypt_bounce_page_pool =
406 mempool_create_page_pool(num_prealloc_crypto_pages, 0);
407 if (!fscrypt_bounce_page_pool)
408 goto fail;
409
410already_initialized:
411 mutex_unlock(&fscrypt_init_mutex);
412 return 0;
413fail:
414 fscrypt_destroy();
415 mutex_unlock(&fscrypt_init_mutex);
416 return res;
417}
0b81d077
JK
418
419/**
420 * fscrypt_init() - Set up for fs encryption.
421 */
422static int __init fscrypt_init(void)
423{
36dd26e0
EB
424 /*
425 * Use an unbound workqueue to allow bios to be decrypted in parallel
426 * even when they happen to complete on the same CPU. This sacrifices
427 * locality, but it's worthwhile since decryption is CPU-intensive.
428 *
429 * Also use a high-priority workqueue to prioritize decryption work,
430 * which blocks reads from completing, over regular application tasks.
431 */
0b81d077 432 fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
36dd26e0
EB
433 WQ_UNBOUND | WQ_HIGHPRI,
434 num_online_cpus());
0b81d077
JK
435 if (!fscrypt_read_workqueue)
436 goto fail;
437
438 fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
439 if (!fscrypt_ctx_cachep)
440 goto fail_free_queue;
441
442 fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
443 if (!fscrypt_info_cachep)
444 goto fail_free_ctx;
445
446 return 0;
447
448fail_free_ctx:
449 kmem_cache_destroy(fscrypt_ctx_cachep);
450fail_free_queue:
451 destroy_workqueue(fscrypt_read_workqueue);
452fail:
453 return -ENOMEM;
454}
455module_init(fscrypt_init)
456
457/**
458 * fscrypt_exit() - Shutdown the fs encryption system
459 */
460static void __exit fscrypt_exit(void)
461{
462 fscrypt_destroy();
463
464 if (fscrypt_read_workqueue)
465 destroy_workqueue(fscrypt_read_workqueue);
466 kmem_cache_destroy(fscrypt_ctx_cachep);
467 kmem_cache_destroy(fscrypt_info_cachep);
b7e7cf7a
DW
468
469 fscrypt_essiv_cleanup();
0b81d077
JK
470}
471module_exit(fscrypt_exit);
472
473MODULE_LICENSE("GPL");