Commit | Line | Data |
---|---|---|
0b81d077 JK |
1 | /* |
2 | * This contains encryption functions for per-file encryption. | |
3 | * | |
4 | * Copyright (C) 2015, Google, Inc. | |
5 | * Copyright (C) 2015, Motorola Mobility | |
6 | * | |
7 | * Written by Michael Halcrow, 2014. | |
8 | * | |
9 | * Filename encryption additions | |
10 | * Uday Savagaonkar, 2014 | |
11 | * Encryption policy handling additions | |
12 | * Ildar Muslukhov, 2014 | |
13 | * Add fscrypt_pullback_bio_page() | |
14 | * Jaegeuk Kim, 2015. | |
15 | * | |
16 | * This has not yet undergone a rigorous security audit. | |
17 | * | |
18 | * The usage of AES-XTS should conform to recommendations in NIST | |
19 | * Special Publication 800-38E and IEEE P1619/D16. | |
20 | */ | |
21 | ||
22 | #include <linux/crypto.h> | |
23 | #include <linux/ecryptfs.h> | |
24 | #include <linux/pagemap.h> | |
25 | #include <linux/mempool.h> | |
26 | #include <linux/module.h> | |
27 | #include <linux/scatterlist.h> | |
28 | #include <linux/ratelimit.h> | |
29 | #include <linux/bio.h> | |
30 | #include <linux/dcache.h> | |
31 | #include <linux/fscrypto.h> | |
32 | ||
33 | static unsigned int num_prealloc_crypto_pages = 32; | |
34 | static unsigned int num_prealloc_crypto_ctxs = 128; | |
35 | ||
36 | module_param(num_prealloc_crypto_pages, uint, 0444); | |
37 | MODULE_PARM_DESC(num_prealloc_crypto_pages, | |
38 | "Number of crypto pages to preallocate"); | |
39 | module_param(num_prealloc_crypto_ctxs, uint, 0444); | |
40 | MODULE_PARM_DESC(num_prealloc_crypto_ctxs, | |
41 | "Number of crypto contexts to preallocate"); | |
42 | ||
43 | static mempool_t *fscrypt_bounce_page_pool = NULL; | |
44 | ||
45 | static LIST_HEAD(fscrypt_free_ctxs); | |
46 | static DEFINE_SPINLOCK(fscrypt_ctx_lock); | |
47 | ||
48 | static struct workqueue_struct *fscrypt_read_workqueue; | |
49 | static DEFINE_MUTEX(fscrypt_init_mutex); | |
50 | ||
51 | static struct kmem_cache *fscrypt_ctx_cachep; | |
52 | struct kmem_cache *fscrypt_info_cachep; | |
53 | ||
54 | /** | |
55 | * fscrypt_release_ctx() - Releases an encryption context | |
56 | * @ctx: The encryption context to release. | |
57 | * | |
58 | * If the encryption context was allocated from the pre-allocated pool, returns | |
59 | * it to that pool. Else, frees it. | |
60 | * | |
61 | * If there's a bounce page in the context, this frees that. | |
62 | */ | |
63 | void fscrypt_release_ctx(struct fscrypt_ctx *ctx) | |
64 | { | |
65 | unsigned long flags; | |
66 | ||
67 | if (ctx->flags & FS_WRITE_PATH_FL && ctx->w.bounce_page) { | |
68 | mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool); | |
69 | ctx->w.bounce_page = NULL; | |
70 | } | |
71 | ctx->w.control_page = NULL; | |
72 | if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) { | |
73 | kmem_cache_free(fscrypt_ctx_cachep, ctx); | |
74 | } else { | |
75 | spin_lock_irqsave(&fscrypt_ctx_lock, flags); | |
76 | list_add(&ctx->free_list, &fscrypt_free_ctxs); | |
77 | spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); | |
78 | } | |
79 | } | |
80 | EXPORT_SYMBOL(fscrypt_release_ctx); | |
81 | ||
82 | /** | |
83 | * fscrypt_get_ctx() - Gets an encryption context | |
84 | * @inode: The inode for which we are doing the crypto | |
85 | * | |
86 | * Allocates and initializes an encryption context. | |
87 | * | |
88 | * Return: An allocated and initialized encryption context on success; error | |
89 | * value or NULL otherwise. | |
90 | */ | |
91 | struct fscrypt_ctx *fscrypt_get_ctx(struct inode *inode) | |
92 | { | |
93 | struct fscrypt_ctx *ctx = NULL; | |
94 | struct fscrypt_info *ci = inode->i_crypt_info; | |
95 | unsigned long flags; | |
96 | ||
97 | if (ci == NULL) | |
98 | return ERR_PTR(-ENOKEY); | |
99 | ||
100 | /* | |
101 | * We first try getting the ctx from a free list because in | |
102 | * the common case the ctx will have an allocated and | |
103 | * initialized crypto tfm, so it's probably a worthwhile | |
104 | * optimization. For the bounce page, we first try getting it | |
105 | * from the kernel allocator because that's just about as fast | |
106 | * as getting it from a list and because a cache of free pages | |
107 | * should generally be a "last resort" option for a filesystem | |
108 | * to be able to do its job. | |
109 | */ | |
110 | spin_lock_irqsave(&fscrypt_ctx_lock, flags); | |
111 | ctx = list_first_entry_or_null(&fscrypt_free_ctxs, | |
112 | struct fscrypt_ctx, free_list); | |
113 | if (ctx) | |
114 | list_del(&ctx->free_list); | |
115 | spin_unlock_irqrestore(&fscrypt_ctx_lock, flags); | |
116 | if (!ctx) { | |
117 | ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS); | |
118 | if (!ctx) | |
119 | return ERR_PTR(-ENOMEM); | |
120 | ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL; | |
121 | } else { | |
122 | ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL; | |
123 | } | |
124 | ctx->flags &= ~FS_WRITE_PATH_FL; | |
125 | return ctx; | |
126 | } | |
127 | EXPORT_SYMBOL(fscrypt_get_ctx); | |
128 | ||
129 | /** | |
130 | * fscrypt_complete() - The completion callback for page encryption | |
131 | * @req: The asynchronous encryption request context | |
132 | * @res: The result of the encryption operation | |
133 | */ | |
134 | static void fscrypt_complete(struct crypto_async_request *req, int res) | |
135 | { | |
136 | struct fscrypt_completion_result *ecr = req->data; | |
137 | ||
138 | if (res == -EINPROGRESS) | |
139 | return; | |
140 | ecr->res = res; | |
141 | complete(&ecr->completion); | |
142 | } | |
143 | ||
144 | typedef enum { | |
145 | FS_DECRYPT = 0, | |
146 | FS_ENCRYPT, | |
147 | } fscrypt_direction_t; | |
148 | ||
149 | static int do_page_crypto(struct inode *inode, | |
150 | fscrypt_direction_t rw, pgoff_t index, | |
151 | struct page *src_page, struct page *dest_page) | |
152 | { | |
153 | u8 xts_tweak[FS_XTS_TWEAK_SIZE]; | |
154 | struct ablkcipher_request *req = NULL; | |
155 | DECLARE_FS_COMPLETION_RESULT(ecr); | |
156 | struct scatterlist dst, src; | |
157 | struct fscrypt_info *ci = inode->i_crypt_info; | |
158 | struct crypto_ablkcipher *tfm = ci->ci_ctfm; | |
159 | int res = 0; | |
160 | ||
161 | req = ablkcipher_request_alloc(tfm, GFP_NOFS); | |
162 | if (!req) { | |
163 | printk_ratelimited(KERN_ERR | |
164 | "%s: crypto_request_alloc() failed\n", | |
165 | __func__); | |
166 | return -ENOMEM; | |
167 | } | |
168 | ||
169 | ablkcipher_request_set_callback( | |
170 | req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, | |
171 | fscrypt_complete, &ecr); | |
172 | ||
173 | BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index)); | |
174 | memcpy(xts_tweak, &inode->i_ino, sizeof(index)); | |
175 | memset(&xts_tweak[sizeof(index)], 0, | |
176 | FS_XTS_TWEAK_SIZE - sizeof(index)); | |
177 | ||
178 | sg_init_table(&dst, 1); | |
179 | sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); | |
180 | sg_init_table(&src, 1); | |
181 | sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); | |
182 | ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, | |
183 | xts_tweak); | |
184 | if (rw == FS_DECRYPT) | |
185 | res = crypto_ablkcipher_decrypt(req); | |
186 | else | |
187 | res = crypto_ablkcipher_encrypt(req); | |
188 | if (res == -EINPROGRESS || res == -EBUSY) { | |
189 | BUG_ON(req->base.data != &ecr); | |
190 | wait_for_completion(&ecr.completion); | |
191 | res = ecr.res; | |
192 | } | |
193 | ablkcipher_request_free(req); | |
194 | if (res) { | |
195 | printk_ratelimited(KERN_ERR | |
196 | "%s: crypto_ablkcipher_encrypt() returned %d\n", | |
197 | __func__, res); | |
198 | return res; | |
199 | } | |
200 | return 0; | |
201 | } | |
202 | ||
203 | static struct page *alloc_bounce_page(struct fscrypt_ctx *ctx) | |
204 | { | |
205 | ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, | |
206 | GFP_NOWAIT); | |
207 | if (ctx->w.bounce_page == NULL) | |
208 | return ERR_PTR(-ENOMEM); | |
209 | ctx->flags |= FS_WRITE_PATH_FL; | |
210 | return ctx->w.bounce_page; | |
211 | } | |
212 | ||
213 | /** | |
214 | * fscypt_encrypt_page() - Encrypts a page | |
215 | * @inode: The inode for which the encryption should take place | |
216 | * @plaintext_page: The page to encrypt. Must be locked. | |
217 | * | |
218 | * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx | |
219 | * encryption context. | |
220 | * | |
221 | * Called on the page write path. The caller must call | |
222 | * fscrypt_restore_control_page() on the returned ciphertext page to | |
223 | * release the bounce buffer and the encryption context. | |
224 | * | |
225 | * Return: An allocated page with the encrypted content on success. Else, an | |
226 | * error value or NULL. | |
227 | */ | |
228 | struct page *fscrypt_encrypt_page(struct inode *inode, | |
229 | struct page *plaintext_page) | |
230 | { | |
231 | struct fscrypt_ctx *ctx; | |
232 | struct page *ciphertext_page = NULL; | |
233 | int err; | |
234 | ||
235 | BUG_ON(!PageLocked(plaintext_page)); | |
236 | ||
237 | ctx = fscrypt_get_ctx(inode); | |
238 | if (IS_ERR(ctx)) | |
239 | return (struct page *)ctx; | |
240 | ||
241 | /* The encryption operation will require a bounce page. */ | |
242 | ciphertext_page = alloc_bounce_page(ctx); | |
243 | if (IS_ERR(ciphertext_page)) | |
244 | goto errout; | |
245 | ||
246 | ctx->w.control_page = plaintext_page; | |
247 | err = do_page_crypto(inode, FS_ENCRYPT, plaintext_page->index, | |
248 | plaintext_page, ciphertext_page); | |
249 | if (err) { | |
250 | ciphertext_page = ERR_PTR(err); | |
251 | goto errout; | |
252 | } | |
253 | SetPagePrivate(ciphertext_page); | |
254 | set_page_private(ciphertext_page, (unsigned long)ctx); | |
255 | lock_page(ciphertext_page); | |
256 | return ciphertext_page; | |
257 | ||
258 | errout: | |
259 | fscrypt_release_ctx(ctx); | |
260 | return ciphertext_page; | |
261 | } | |
262 | EXPORT_SYMBOL(fscrypt_encrypt_page); | |
263 | ||
264 | /** | |
265 | * f2crypt_decrypt_page() - Decrypts a page in-place | |
266 | * @page: The page to decrypt. Must be locked. | |
267 | * | |
268 | * Decrypts page in-place using the ctx encryption context. | |
269 | * | |
270 | * Called from the read completion callback. | |
271 | * | |
272 | * Return: Zero on success, non-zero otherwise. | |
273 | */ | |
274 | int fscrypt_decrypt_page(struct page *page) | |
275 | { | |
276 | BUG_ON(!PageLocked(page)); | |
277 | ||
278 | return do_page_crypto(page->mapping->host, | |
279 | FS_DECRYPT, page->index, page, page); | |
280 | } | |
281 | EXPORT_SYMBOL(fscrypt_decrypt_page); | |
282 | ||
283 | int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk, | |
284 | sector_t pblk, unsigned int len) | |
285 | { | |
286 | struct fscrypt_ctx *ctx; | |
287 | struct page *ciphertext_page = NULL; | |
288 | struct bio *bio; | |
289 | int ret, err = 0; | |
290 | ||
291 | BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); | |
292 | ||
293 | ctx = fscrypt_get_ctx(inode); | |
294 | if (IS_ERR(ctx)) | |
295 | return PTR_ERR(ctx); | |
296 | ||
297 | ciphertext_page = alloc_bounce_page(ctx); | |
298 | if (IS_ERR(ciphertext_page)) { | |
299 | err = PTR_ERR(ciphertext_page); | |
300 | goto errout; | |
301 | } | |
302 | ||
303 | while (len--) { | |
304 | err = do_page_crypto(inode, FS_ENCRYPT, lblk, | |
305 | ZERO_PAGE(0), ciphertext_page); | |
306 | if (err) | |
307 | goto errout; | |
308 | ||
309 | bio = bio_alloc(GFP_KERNEL, 1); | |
310 | if (!bio) { | |
311 | err = -ENOMEM; | |
312 | goto errout; | |
313 | } | |
314 | bio->bi_bdev = inode->i_sb->s_bdev; | |
315 | bio->bi_iter.bi_sector = | |
316 | pblk << (inode->i_sb->s_blocksize_bits - 9); | |
317 | ret = bio_add_page(bio, ciphertext_page, | |
318 | inode->i_sb->s_blocksize, 0); | |
319 | if (ret != inode->i_sb->s_blocksize) { | |
320 | /* should never happen! */ | |
321 | WARN_ON(1); | |
322 | bio_put(bio); | |
323 | err = -EIO; | |
324 | goto errout; | |
325 | } | |
326 | err = submit_bio_wait(WRITE, bio); | |
327 | if ((err == 0) && bio->bi_error) | |
328 | err = -EIO; | |
329 | bio_put(bio); | |
330 | if (err) | |
331 | goto errout; | |
332 | lblk++; | |
333 | pblk++; | |
334 | } | |
335 | err = 0; | |
336 | errout: | |
337 | fscrypt_release_ctx(ctx); | |
338 | return err; | |
339 | } | |
340 | EXPORT_SYMBOL(fscrypt_zeroout_range); | |
341 | ||
342 | /* | |
343 | * Validate dentries for encrypted directories to make sure we aren't | |
344 | * potentially caching stale data after a key has been added or | |
345 | * removed. | |
346 | */ | |
347 | static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) | |
348 | { | |
349 | struct inode *dir = d_inode(dentry->d_parent); | |
350 | struct fscrypt_info *ci = dir->i_crypt_info; | |
351 | int dir_has_key, cached_with_key; | |
352 | ||
353 | if (!dir->i_sb->s_cop->is_encrypted(dir)) | |
354 | return 0; | |
355 | ||
356 | if (ci && ci->ci_keyring_key && | |
357 | (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | | |
358 | (1 << KEY_FLAG_REVOKED) | | |
359 | (1 << KEY_FLAG_DEAD)))) | |
360 | ci = NULL; | |
361 | ||
362 | /* this should eventually be an flag in d_flags */ | |
363 | spin_lock(&dentry->d_lock); | |
364 | cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY; | |
365 | spin_unlock(&dentry->d_lock); | |
366 | dir_has_key = (ci != NULL); | |
367 | ||
368 | /* | |
369 | * If the dentry was cached without the key, and it is a | |
370 | * negative dentry, it might be a valid name. We can't check | |
371 | * if the key has since been made available due to locking | |
372 | * reasons, so we fail the validation so ext4_lookup() can do | |
373 | * this check. | |
374 | * | |
375 | * We also fail the validation if the dentry was created with | |
376 | * the key present, but we no longer have the key, or vice versa. | |
377 | */ | |
378 | if ((!cached_with_key && d_is_negative(dentry)) || | |
379 | (!cached_with_key && dir_has_key) || | |
380 | (cached_with_key && !dir_has_key)) | |
381 | return 0; | |
382 | return 1; | |
383 | } | |
384 | ||
385 | const struct dentry_operations fscrypt_d_ops = { | |
386 | .d_revalidate = fscrypt_d_revalidate, | |
387 | }; | |
388 | EXPORT_SYMBOL(fscrypt_d_ops); | |
389 | ||
390 | /* | |
391 | * Call fscrypt_decrypt_page on every single page, reusing the encryption | |
392 | * context. | |
393 | */ | |
394 | static void completion_pages(struct work_struct *work) | |
395 | { | |
396 | struct fscrypt_ctx *ctx = | |
397 | container_of(work, struct fscrypt_ctx, r.work); | |
398 | struct bio *bio = ctx->r.bio; | |
399 | struct bio_vec *bv; | |
400 | int i; | |
401 | ||
402 | bio_for_each_segment_all(bv, bio, i) { | |
403 | struct page *page = bv->bv_page; | |
404 | int ret = fscrypt_decrypt_page(page); | |
405 | ||
406 | if (ret) { | |
407 | WARN_ON_ONCE(1); | |
408 | SetPageError(page); | |
409 | } else { | |
410 | SetPageUptodate(page); | |
411 | } | |
412 | unlock_page(page); | |
413 | } | |
414 | fscrypt_release_ctx(ctx); | |
415 | bio_put(bio); | |
416 | } | |
417 | ||
418 | void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio) | |
419 | { | |
420 | INIT_WORK(&ctx->r.work, completion_pages); | |
421 | ctx->r.bio = bio; | |
422 | queue_work(fscrypt_read_workqueue, &ctx->r.work); | |
423 | } | |
424 | EXPORT_SYMBOL(fscrypt_decrypt_bio_pages); | |
425 | ||
426 | void fscrypt_pullback_bio_page(struct page **page, bool restore) | |
427 | { | |
428 | struct fscrypt_ctx *ctx; | |
429 | struct page *bounce_page; | |
430 | ||
431 | /* The bounce data pages are unmapped. */ | |
432 | if ((*page)->mapping) | |
433 | return; | |
434 | ||
435 | /* The bounce data page is unmapped. */ | |
436 | bounce_page = *page; | |
437 | ctx = (struct fscrypt_ctx *)page_private(bounce_page); | |
438 | ||
439 | /* restore control page */ | |
440 | *page = ctx->w.control_page; | |
441 | ||
442 | if (restore) | |
443 | fscrypt_restore_control_page(bounce_page); | |
444 | } | |
445 | EXPORT_SYMBOL(fscrypt_pullback_bio_page); | |
446 | ||
447 | void fscrypt_restore_control_page(struct page *page) | |
448 | { | |
449 | struct fscrypt_ctx *ctx; | |
450 | ||
451 | ctx = (struct fscrypt_ctx *)page_private(page); | |
452 | set_page_private(page, (unsigned long)NULL); | |
453 | ClearPagePrivate(page); | |
454 | unlock_page(page); | |
455 | fscrypt_release_ctx(ctx); | |
456 | } | |
457 | EXPORT_SYMBOL(fscrypt_restore_control_page); | |
458 | ||
459 | static void fscrypt_destroy(void) | |
460 | { | |
461 | struct fscrypt_ctx *pos, *n; | |
462 | ||
463 | list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list) | |
464 | kmem_cache_free(fscrypt_ctx_cachep, pos); | |
465 | INIT_LIST_HEAD(&fscrypt_free_ctxs); | |
466 | mempool_destroy(fscrypt_bounce_page_pool); | |
467 | fscrypt_bounce_page_pool = NULL; | |
468 | } | |
469 | ||
470 | /** | |
471 | * fscrypt_initialize() - allocate major buffers for fs encryption. | |
472 | * | |
473 | * We only call this when we start accessing encrypted files, since it | |
474 | * results in memory getting allocated that wouldn't otherwise be used. | |
475 | * | |
476 | * Return: Zero on success, non-zero otherwise. | |
477 | */ | |
478 | int fscrypt_initialize(void) | |
479 | { | |
480 | int i, res = -ENOMEM; | |
481 | ||
482 | if (fscrypt_bounce_page_pool) | |
483 | return 0; | |
484 | ||
485 | mutex_lock(&fscrypt_init_mutex); | |
486 | if (fscrypt_bounce_page_pool) | |
487 | goto already_initialized; | |
488 | ||
489 | for (i = 0; i < num_prealloc_crypto_ctxs; i++) { | |
490 | struct fscrypt_ctx *ctx; | |
491 | ||
492 | ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS); | |
493 | if (!ctx) | |
494 | goto fail; | |
495 | list_add(&ctx->free_list, &fscrypt_free_ctxs); | |
496 | } | |
497 | ||
498 | fscrypt_bounce_page_pool = | |
499 | mempool_create_page_pool(num_prealloc_crypto_pages, 0); | |
500 | if (!fscrypt_bounce_page_pool) | |
501 | goto fail; | |
502 | ||
503 | already_initialized: | |
504 | mutex_unlock(&fscrypt_init_mutex); | |
505 | return 0; | |
506 | fail: | |
507 | fscrypt_destroy(); | |
508 | mutex_unlock(&fscrypt_init_mutex); | |
509 | return res; | |
510 | } | |
511 | EXPORT_SYMBOL(fscrypt_initialize); | |
512 | ||
513 | /** | |
514 | * fscrypt_init() - Set up for fs encryption. | |
515 | */ | |
516 | static int __init fscrypt_init(void) | |
517 | { | |
518 | fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue", | |
519 | WQ_HIGHPRI, 0); | |
520 | if (!fscrypt_read_workqueue) | |
521 | goto fail; | |
522 | ||
523 | fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT); | |
524 | if (!fscrypt_ctx_cachep) | |
525 | goto fail_free_queue; | |
526 | ||
527 | fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT); | |
528 | if (!fscrypt_info_cachep) | |
529 | goto fail_free_ctx; | |
530 | ||
531 | return 0; | |
532 | ||
533 | fail_free_ctx: | |
534 | kmem_cache_destroy(fscrypt_ctx_cachep); | |
535 | fail_free_queue: | |
536 | destroy_workqueue(fscrypt_read_workqueue); | |
537 | fail: | |
538 | return -ENOMEM; | |
539 | } | |
540 | module_init(fscrypt_init) | |
541 | ||
542 | /** | |
543 | * fscrypt_exit() - Shutdown the fs encryption system | |
544 | */ | |
545 | static void __exit fscrypt_exit(void) | |
546 | { | |
547 | fscrypt_destroy(); | |
548 | ||
549 | if (fscrypt_read_workqueue) | |
550 | destroy_workqueue(fscrypt_read_workqueue); | |
551 | kmem_cache_destroy(fscrypt_ctx_cachep); | |
552 | kmem_cache_destroy(fscrypt_info_cachep); | |
553 | } | |
554 | module_exit(fscrypt_exit); | |
555 | ||
556 | MODULE_LICENSE("GPL"); |