Commit | Line | Data |
---|---|---|
0b81d077 JK |
1 | /* |
2 | * This contains encryption functions for per-file encryption. | |
3 | * | |
4 | * Copyright (C) 2015, Google, Inc. | |
5 | * Copyright (C) 2015, Motorola Mobility | |
6 | * | |
7 | * Written by Michael Halcrow, 2014. | |
8 | * | |
9 | * Filename encryption additions | |
10 | * Uday Savagaonkar, 2014 | |
11 | * Encryption policy handling additions | |
12 | * Ildar Muslukhov, 2014 | |
13 | * Add fscrypt_pullback_bio_page() | |
14 | * Jaegeuk Kim, 2015. | |
15 | * | |
16 | * This has not yet undergone a rigorous security audit. | |
17 | * | |
18 | * The usage of AES-XTS should conform to recommendations in NIST | |
19 | * Special Publication 800-38E and IEEE P1619/D16. | |
20 | */ | |
21 | ||
0b81d077 JK |
22 | #include <linux/pagemap.h> |
23 | #include <linux/mempool.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/scatterlist.h> | |
26 | #include <linux/ratelimit.h> | |
27 | #include <linux/bio.h> | |
28 | #include <linux/dcache.h> | |
29 | #include <linux/fscrypto.h> | |
d407574e | 30 | #include <linux/ecryptfs.h> |
0b81d077 JK |
31 | |
32 | static unsigned int num_prealloc_crypto_pages = 32; | |
33 | static unsigned int num_prealloc_crypto_ctxs = 128; | |
34 | ||
35 | module_param(num_prealloc_crypto_pages, uint, 0444); | |
36 | MODULE_PARM_DESC(num_prealloc_crypto_pages, | |
37 | "Number of crypto pages to preallocate"); | |
38 | module_param(num_prealloc_crypto_ctxs, uint, 0444); | |
39 | MODULE_PARM_DESC(num_prealloc_crypto_ctxs, | |
40 | "Number of crypto contexts to preallocate"); | |
41 | ||
42 | static mempool_t *fscrypt_bounce_page_pool = NULL; | |
43 | ||
44 | static LIST_HEAD(fscrypt_free_ctxs); | |
45 | static DEFINE_SPINLOCK(fscrypt_ctx_lock); | |
46 | ||
47 | static struct workqueue_struct *fscrypt_read_workqueue; | |
48 | static DEFINE_MUTEX(fscrypt_init_mutex); | |
49 | ||
50 | static struct kmem_cache *fscrypt_ctx_cachep; | |
51 | struct kmem_cache *fscrypt_info_cachep; | |
52 | ||
53 | /** | |
54 | * fscrypt_release_ctx() - Releases an encryption context | |
55 | * @ctx: The encryption context to release. | |
56 | * | |
57 | * If the encryption context was allocated from the pre-allocated pool, returns | |
58 | * it to that pool. Else, frees it. | |
59 | * | |
60 | * If there's a bounce page in the context, this frees that. | |
61 | */ | |
62 | void fscrypt_release_ctx(struct fscrypt_ctx *ctx) | |
63 | { | |
64 | unsigned long flags; | |
65 | ||
66 | if (ctx->flags & FS_WRITE_PATH_FL && ctx->w.bounce_page) { | |
67 | mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool); | |
68 | ctx->w.bounce_page = NULL; | |
69 | } | |
70 | ctx->w.control_page = NULL; | |
71 | if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) { | |
72 | kmem_cache_free(fscrypt_ctx_cachep, ctx); | |
73 | } else { | |
74 | spin_lock_irqsave(&fscrypt_ctx_lock, flags); | |
75 | list_add(&ctx->free_list, &fscrypt_free_ctxs); | |
76 | spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); | |
77 | } | |
78 | } | |
79 | EXPORT_SYMBOL(fscrypt_release_ctx); | |
80 | ||
81 | /** | |
82 | * fscrypt_get_ctx() - Gets an encryption context | |
83 | * @inode: The inode for which we are doing the crypto | |
84 | * | |
85 | * Allocates and initializes an encryption context. | |
86 | * | |
87 | * Return: An allocated and initialized encryption context on success; error | |
88 | * value or NULL otherwise. | |
89 | */ | |
90 | struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode) | |
91 | { | |
92 | struct fscrypt_ctx *ctx = NULL; | |
93 | struct fscrypt_info *ci = inode->i_crypt_info; | |
94 | unsigned long flags; | |
95 | ||
96 | if (ci == NULL) | |
97 | return ERR_PTR(-ENOKEY); | |
98 | ||
99 | /* | |
100 | * We first try getting the ctx from a free list because in | |
101 | * the common case the ctx will have an allocated and | |
102 | * initialized crypto tfm, so it's probably a worthwhile | |
103 | * optimization. For the bounce page, we first try getting it | |
104 | * from the kernel allocator because that's just about as fast | |
105 | * as getting it from a list and because a cache of free pages | |
106 | * should generally be a "last resort" option for a filesystem | |
107 | * to be able to do its job. | |
108 | */ | |
109 | spin_lock_irqsave(&fscrypt_ctx_lock, flags); | |
110 | ctx = list_first_entry_or_null(&fscrypt_free_ctxs, | |
111 | struct fscrypt_ctx, free_list); | |
112 | if (ctx) | |
113 | list_del(&ctx->free_list); | |
114 | spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); | |
115 | if (!ctx) { | |
116 | ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS); | |
117 | if (!ctx) | |
118 | return ERR_PTR(-ENOMEM); | |
119 | ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL; | |
120 | } else { | |
121 | ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL; | |
122 | } | |
123 | ctx->flags &= ~FS_WRITE_PATH_FL; | |
124 | return ctx; | |
125 | } | |
126 | EXPORT_SYMBOL(fscrypt_get_ctx); | |
127 | ||
128 | /** | |
129 | * fscrypt_complete() - The completion callback for page encryption | |
130 | * @req: The asynchronous encryption request context | |
131 | * @res: The result of the encryption operation | |
132 | */ | |
133 | static void fscrypt_complete(struct crypto_async_request *req, int res) | |
134 | { | |
135 | struct fscrypt_completion_result *ecr = req->data; | |
136 | ||
137 | if (res == -EINPROGRESS) | |
138 | return; | |
139 | ecr->res = res; | |
140 | complete(&ecr->completion); | |
141 | } | |
142 | ||
143 | typedef enum { | |
144 | FS_DECRYPT = 0, | |
145 | FS_ENCRYPT, | |
146 | } fscrypt_direction_t; | |
147 | ||
148 | static int do_page_crypto(struct inode *inode, | |
149 | fscrypt_direction_t rw, pgoff_t index, | |
150 | struct page *src_page, struct page *dest_page) | |
151 | { | |
152 | u8 xts_tweak[FS_XTS_TWEAK_SIZE]; | |
d407574e | 153 | struct skcipher_request *req = NULL; |
0b81d077 JK |
154 | DECLARE_FS_COMPLETION_RESULT(ecr); |
155 | struct scatterlist dst, src; | |
156 | struct fscrypt_info *ci = inode->i_crypt_info; | |
d407574e | 157 | struct crypto_skcipher *tfm = ci->ci_ctfm; |
0b81d077 JK |
158 | int res = 0; |
159 | ||
d407574e | 160 | req = skcipher_request_alloc(tfm, GFP_NOFS); |
0b81d077 JK |
161 | if (!req) { |
162 | printk_ratelimited(KERN_ERR | |
163 | "%s: crypto_request_alloc() failed\n", | |
164 | __func__); | |
165 | return -ENOMEM; | |
166 | } | |
167 | ||
d407574e | 168 | skcipher_request_set_callback( |
0b81d077 JK |
169 | req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, |
170 | fscrypt_complete, &ecr); | |
171 | ||
172 | BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index)); | |
02fc59a0 | 173 | memcpy(xts_tweak, &index, sizeof(index)); |
0b81d077 JK |
174 | memset(&xts_tweak[sizeof(index)], 0, |
175 | FS_XTS_TWEAK_SIZE - sizeof(index)); | |
176 | ||
177 | sg_init_table(&dst, 1); | |
09cbfeaf | 178 | sg_set_page(&dst, dest_page, PAGE_SIZE, 0); |
0b81d077 | 179 | sg_init_table(&src, 1); |
09cbfeaf KS |
180 | sg_set_page(&src, src_page, PAGE_SIZE, 0); |
181 | skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, | |
0b81d077 JK |
182 | xts_tweak); |
183 | if (rw == FS_DECRYPT) | |
d407574e | 184 | res = crypto_skcipher_decrypt(req); |
0b81d077 | 185 | else |
d407574e | 186 | res = crypto_skcipher_encrypt(req); |
0b81d077 JK |
187 | if (res == -EINPROGRESS || res == -EBUSY) { |
188 | BUG_ON(req->base.data != &ecr); | |
189 | wait_for_completion(&ecr.completion); | |
190 | res = ecr.res; | |
191 | } | |
d407574e | 192 | skcipher_request_free(req); |
0b81d077 JK |
193 | if (res) { |
194 | printk_ratelimited(KERN_ERR | |
d407574e | 195 | "%s: crypto_skcipher_encrypt() returned %d\n", |
0b81d077 JK |
196 | __func__, res); |
197 | return res; | |
198 | } | |
199 | return 0; | |
200 | } | |
201 | ||
202 | static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx) | |
203 | { | |
204 | ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, | |
205 | GFP_NOWAIT); | |
206 | if (ctx->w.bounce_page == NULL) | |
207 | return ERR_PTR(-ENOMEM); | |
208 | ctx->flags |= FS_WRITE_PATH_FL; | |
209 | return ctx->w.bounce_page; | |
210 | } | |
211 | ||
212 | /** | |
213 | * fscypt_encrypt_page() - Encrypts a page | |
214 | * @inode: The inode for which the encryption should take place | |
215 | * @plaintext_page: The page to encrypt. Must be locked. | |
216 | * | |
217 | * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx | |
218 | * encryption context. | |
219 | * | |
220 | * Called on the page write path. The caller must call | |
221 | * fscrypt_restore_control_page() on the returned ciphertext page to | |
222 | * release the bounce buffer and the encryption context. | |
223 | * | |
224 | * Return: An allocated page with the encrypted content on success. Else, an | |
225 | * error value or NULL. | |
226 | */ | |
227 | struct page *fscrypt_encrypt_page(struct inode *inode, | |
228 | struct page *plaintext_page) | |
229 | { | |
230 | struct fscrypt_ctx *ctx; | |
231 | struct page *ciphertext_page = NULL; | |
232 | int err; | |
233 | ||
234 | BUG_ON(!PageLocked(plaintext_page)); | |
235 | ||
236 | ctx = fscrypt_get_ctx(inode); | |
237 | if (IS_ERR(ctx)) | |
238 | return (struct page *)ctx; | |
239 | ||
240 | /* The encryption operation will require a bounce page. */ | |
241 | ciphertext_page = alloc_bounce_page(ctx); | |
242 | if (IS_ERR(ciphertext_page)) | |
243 | goto errout; | |
244 | ||
245 | ctx->w.control_page = plaintext_page; | |
246 | err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index, | |
247 | plaintext_page, ciphertext_page); | |
248 | if (err) { | |
249 | ciphertext_page = ERR_PTR(err); | |
250 | goto errout; | |
251 | } | |
252 | SetPagePrivate(ciphertext_page); | |
253 | set_page_private(ciphertext_page, (unsigned long)ctx); | |
254 | lock_page(ciphertext_page); | |
255 | return ciphertext_page; | |
256 | ||
257 | errout: | |
258 | fscrypt_release_ctx(ctx); | |
259 | return ciphertext_page; | |
260 | } | |
261 | EXPORT_SYMBOL(fscrypt_encrypt_page); | |
262 | ||
263 | /** | |
264 | * f2crypt_decrypt_page() - Decrypts a page in-place | |
265 | * @page: The page to decrypt. Must be locked. | |
266 | * | |
267 | * Decrypts page in-place using the ctx encryption context. | |
268 | * | |
269 | * Called from the read completion callback. | |
270 | * | |
271 | * Return: Zero on success, non-zero otherwise. | |
272 | */ | |
273 | int fscrypt_decrypt_page(struct page *page) | |
274 | { | |
275 | BUG_ON(!PageLocked(page)); | |
276 | ||
277 | return do_page_crypto(page->mapping->host, | |
278 | FS_DECRYPT, page->index, page, page); | |
279 | } | |
280 | EXPORT_SYMBOL(fscrypt_decrypt_page); | |
281 | ||
282 | int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk, | |
283 | sector_t pblk, unsigned int len) | |
284 | { | |
285 | struct fscrypt_ctx *ctx; | |
286 | struct page *ciphertext_page = NULL; | |
287 | struct bio *bio; | |
288 | int ret, err = 0; | |
289 | ||
09cbfeaf | 290 | BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE); |
0b81d077 JK |
291 | |
292 | ctx = fscrypt_get_ctx(inode); | |
293 | if (IS_ERR(ctx)) | |
294 | return PTR_ERR(ctx); | |
295 | ||
296 | ciphertext_page = alloc_bounce_page(ctx); | |
297 | if (IS_ERR(ciphertext_page)) { | |
298 | err = PTR_ERR(ciphertext_page); | |
299 | goto errout; | |
300 | } | |
301 | ||
302 | while (len--) { | |
303 | err = do_page_crypto(inode, FS_ENCRYPT, lblk, | |
304 | ZERO_PAGE(0), ciphertext_page); | |
305 | if (err) | |
306 | goto errout; | |
307 | ||
308 | bio = bio_alloc(GFP_KERNEL, 1); | |
309 | if (!bio) { | |
310 | err = -ENOMEM; | |
311 | goto errout; | |
312 | } | |
313 | bio->bi_bdev = inode->i_sb->s_bdev; | |
314 | bio->bi_iter.bi_sector = | |
315 | pblk << (inode->i_sb->s_blocksize_bits - 9); | |
316 | ret = bio_add_page(bio, ciphertext_page, | |
317 | inode->i_sb->s_blocksize, 0); | |
318 | if (ret != inode->i_sb->s_blocksize) { | |
319 | /* should never happen! */ | |
320 | WARN_ON(1); | |
321 | bio_put(bio); | |
322 | err = -EIO; | |
323 | goto errout; | |
324 | } | |
325 | err = submit_bio_wait(WRITE, bio); | |
326 | if ((err == 0) && bio->bi_error) | |
327 | err = -EIO; | |
328 | bio_put(bio); | |
329 | if (err) | |
330 | goto errout; | |
331 | lblk++; | |
332 | pblk++; | |
333 | } | |
334 | err = 0; | |
335 | errout: | |
336 | fscrypt_release_ctx(ctx); | |
337 | return err; | |
338 | } | |
339 | EXPORT_SYMBOL(fscrypt_zeroout_range); | |
340 | ||
341 | /* | |
342 | * Validate dentries for encrypted directories to make sure we aren't | |
343 | * potentially caching stale data after a key has been added or | |
344 | * removed. | |
345 | */ | |
346 | static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) | |
347 | { | |
d7d75352 JK |
348 | struct dentry *dir; |
349 | struct fscrypt_info *ci; | |
0b81d077 JK |
350 | int dir_has_key, cached_with_key; |
351 | ||
d7d75352 JK |
352 | dir = dget_parent(dentry); |
353 | if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) { | |
354 | dput(dir); | |
0b81d077 | 355 | return 0; |
d7d75352 | 356 | } |
0b81d077 | 357 | |
d7d75352 | 358 | ci = d_inode(dir)->i_crypt_info; |
0b81d077 JK |
359 | if (ci && ci->ci_keyring_key && |
360 | (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | | |
361 | (1 << KEY_FLAG_REVOKED) | | |
362 | (1 << KEY_FLAG_DEAD)))) | |
363 | ci = NULL; | |
364 | ||
365 | /* this should eventually be an flag in d_flags */ | |
366 | spin_lock(&dentry->d_lock); | |
367 | cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY; | |
368 | spin_unlock(&dentry->d_lock); | |
369 | dir_has_key = (ci != NULL); | |
d7d75352 | 370 | dput(dir); |
0b81d077 JK |
371 | |
372 | /* | |
373 | * If the dentry was cached without the key, and it is a | |
374 | * negative dentry, it might be a valid name. We can't check | |
375 | * if the key has since been made available due to locking | |
376 | * reasons, so we fail the validation so ext4_lookup() can do | |
377 | * this check. | |
378 | * | |
379 | * We also fail the validation if the dentry was created with | |
380 | * the key present, but we no longer have the key, or vice versa. | |
381 | */ | |
382 | if ((!cached_with_key && d_is_negative(dentry)) || | |
383 | (!cached_with_key && dir_has_key) || | |
384 | (cached_with_key && !dir_has_key)) | |
385 | return 0; | |
386 | return 1; | |
387 | } | |
388 | ||
389 | const struct dentry_operations fscrypt_d_ops = { | |
390 | .d_revalidate = fscrypt_d_revalidate, | |
391 | }; | |
392 | EXPORT_SYMBOL(fscrypt_d_ops); | |
393 | ||
394 | /* | |
395 | * Call fscrypt_decrypt_page on every single page, reusing the encryption | |
396 | * context. | |
397 | */ | |
398 | static void completion_pages(struct work_struct *work) | |
399 | { | |
400 | struct fscrypt_ctx *ctx = | |
401 | container_of(work, struct fscrypt_ctx, r.work); | |
402 | struct bio *bio = ctx->r.bio; | |
403 | struct bio_vec *bv; | |
404 | int i; | |
405 | ||
406 | bio_for_each_segment_all(bv, bio, i) { | |
407 | struct page *page = bv->bv_page; | |
408 | int ret = fscrypt_decrypt_page(page); | |
409 | ||
410 | if (ret) { | |
411 | WARN_ON_ONCE(1); | |
412 | SetPageError(page); | |
413 | } else { | |
414 | SetPageUptodate(page); | |
415 | } | |
416 | unlock_page(page); | |
417 | } | |
418 | fscrypt_release_ctx(ctx); | |
419 | bio_put(bio); | |
420 | } | |
421 | ||
422 | void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio) | |
423 | { | |
424 | INIT_WORK(&ctx->r.work, completion_pages); | |
425 | ctx->r.bio = bio; | |
426 | queue_work(fscrypt_read_workqueue, &ctx->r.work); | |
427 | } | |
428 | EXPORT_SYMBOL(fscrypt_decrypt_bio_pages); | |
429 | ||
430 | void fscrypt_pullback_bio_page(struct page **page, bool restore) | |
431 | { | |
432 | struct fscrypt_ctx *ctx; | |
433 | struct page *bounce_page; | |
434 | ||
435 | /* The bounce data pages are unmapped. */ | |
436 | if ((*page)->mapping) | |
437 | return; | |
438 | ||
439 | /* The bounce data page is unmapped. */ | |
440 | bounce_page = *page; | |
441 | ctx = (struct fscrypt_ctx *)page_private(bounce_page); | |
442 | ||
443 | /* restore control page */ | |
444 | *page = ctx->w.control_page; | |
445 | ||
446 | if (restore) | |
447 | fscrypt_restore_control_page(bounce_page); | |
448 | } | |
449 | EXPORT_SYMBOL(fscrypt_pullback_bio_page); | |
450 | ||
451 | void fscrypt_restore_control_page(struct page *page) | |
452 | { | |
453 | struct fscrypt_ctx *ctx; | |
454 | ||
455 | ctx = (struct fscrypt_ctx *)page_private(page); | |
456 | set_page_private(page, (unsigned long)NULL); | |
457 | ClearPagePrivate(page); | |
458 | unlock_page(page); | |
459 | fscrypt_release_ctx(ctx); | |
460 | } | |
461 | EXPORT_SYMBOL(fscrypt_restore_control_page); | |
462 | ||
463 | static void fscrypt_destroy(void) | |
464 | { | |
465 | struct fscrypt_ctx *pos, *n; | |
466 | ||
467 | list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list) | |
468 | kmem_cache_free(fscrypt_ctx_cachep, pos); | |
469 | INIT_LIST_HEAD(&fscrypt_free_ctxs); | |
470 | mempool_destroy(fscrypt_bounce_page_pool); | |
471 | fscrypt_bounce_page_pool = NULL; | |
472 | } | |
473 | ||
474 | /** | |
475 | * fscrypt_initialize() - allocate major buffers for fs encryption. | |
476 | * | |
477 | * We only call this when we start accessing encrypted files, since it | |
478 | * results in memory getting allocated that wouldn't otherwise be used. | |
479 | * | |
480 | * Return: Zero on success, non-zero otherwise. | |
481 | */ | |
482 | int fscrypt_initialize(void) | |
483 | { | |
484 | int i, res = -ENOMEM; | |
485 | ||
486 | if (fscrypt_bounce_page_pool) | |
487 | return 0; | |
488 | ||
489 | mutex_lock(&fscrypt_init_mutex); | |
490 | if (fscrypt_bounce_page_pool) | |
491 | goto already_initialized; | |
492 | ||
493 | for (i = 0; i < num_prealloc_crypto_ctxs; i++) { | |
494 | struct fscrypt_ctx *ctx; | |
495 | ||
496 | ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS); | |
497 | if (!ctx) | |
498 | goto fail; | |
499 | list_add(&ctx->free_list, &fscrypt_free_ctxs); | |
500 | } | |
501 | ||
502 | fscrypt_bounce_page_pool = | |
503 | mempool_create_page_pool(num_prealloc_crypto_pages, 0); | |
504 | if (!fscrypt_bounce_page_pool) | |
505 | goto fail; | |
506 | ||
507 | already_initialized: | |
508 | mutex_unlock(&fscrypt_init_mutex); | |
509 | return 0; | |
510 | fail: | |
511 | fscrypt_destroy(); | |
512 | mutex_unlock(&fscrypt_init_mutex); | |
513 | return res; | |
514 | } | |
515 | EXPORT_SYMBOL(fscrypt_initialize); | |
516 | ||
517 | /** | |
518 | * fscrypt_init() - Set up for fs encryption. | |
519 | */ | |
520 | static int __init fscrypt_init(void) | |
521 | { | |
522 | fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue", | |
523 | WQ_HIGHPRI, 0); | |
524 | if (!fscrypt_read_workqueue) | |
525 | goto fail; | |
526 | ||
527 | fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT); | |
528 | if (!fscrypt_ctx_cachep) | |
529 | goto fail_free_queue; | |
530 | ||
531 | fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT); | |
532 | if (!fscrypt_info_cachep) | |
533 | goto fail_free_ctx; | |
534 | ||
535 | return 0; | |
536 | ||
537 | fail_free_ctx: | |
538 | kmem_cache_destroy(fscrypt_ctx_cachep); | |
539 | fail_free_queue: | |
540 | destroy_workqueue(fscrypt_read_workqueue); | |
541 | fail: | |
542 | return -ENOMEM; | |
543 | } | |
544 | module_init(fscrypt_init) | |
545 | ||
546 | /** | |
547 | * fscrypt_exit() - Shutdown the fs encryption system | |
548 | */ | |
549 | static void __exit fscrypt_exit(void) | |
550 | { | |
551 | fscrypt_destroy(); | |
552 | ||
553 | if (fscrypt_read_workqueue) | |
554 | destroy_workqueue(fscrypt_read_workqueue); | |
555 | kmem_cache_destroy(fscrypt_ctx_cachep); | |
556 | kmem_cache_destroy(fscrypt_info_cachep); | |
557 | } | |
558 | module_exit(fscrypt_exit); | |
559 | ||
560 | MODULE_LICENSE("GPL"); |