btrfs: initial fsverity support
[linux-block.git] / fs / btrfs / verity.c
CommitLineData
14605409
BB
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/init.h>
4#include <linux/fs.h>
5#include <linux/slab.h>
6#include <linux/rwsem.h>
7#include <linux/xattr.h>
8#include <linux/security.h>
9#include <linux/posix_acl_xattr.h>
10#include <linux/iversion.h>
11#include <linux/fsverity.h>
12#include <linux/sched/mm.h>
13#include "ctree.h"
14#include "btrfs_inode.h"
15#include "transaction.h"
16#include "disk-io.h"
17#include "locking.h"
18
19/*
20 * Implementation of the interface defined in struct fsverity_operations.
21 *
22 * The main question is how and where to store the verity descriptor and the
23 * Merkle tree. We store both in dedicated btree items in the filesystem tree,
24 * together with the rest of the inode metadata. This means we'll need to do
25 * extra work to encrypt them once encryption is supported in btrfs, but btrfs
26 * has a lot of careful code around i_size and it seems better to make a new key
27 * type than try and adjust all of our expectations for i_size.
28 *
29 * Note that this differs from the implementation in ext4 and f2fs, where
30 * this data is stored as if it were in the file, but past EOF. However, btrfs
31 * does not have a widespread mechanism for caching opaque metadata pages, so we
32 * do pretend that the Merkle tree pages themselves are past EOF for the
33 * purposes of caching them (as opposed to creating a virtual inode).
34 *
35 * fs verity items are stored under two different key types on disk.
36 * The descriptor items:
37 * [ inode objectid, BTRFS_VERITY_DESC_ITEM_KEY, offset ]
38 *
39 * At offset 0, we store a btrfs_verity_descriptor_item which tracks the
40 * size of the descriptor item and some extra data for encryption.
41 * Starting at offset 1, these hold the generic fs verity descriptor.
42 * The latter are opaque to btrfs, we just read and write them as a blob for
43 * the higher level verity code. The most common descriptor size is 256 bytes.
44 *
45 * The merkle tree items:
46 * [ inode objectid, BTRFS_VERITY_MERKLE_ITEM_KEY, offset ]
47 *
48 * These also start at offset 0, and correspond to the merkle tree bytes.
49 * So when fsverity asks for page 0 of the merkle tree, we pull up one page
50 * starting at offset 0 for this key type. These are also opaque to btrfs,
51 * we're blindly storing whatever fsverity sends down.
52 */
53
54#define MERKLE_START_ALIGN 65536
55
56/*
57 * Compute the logical file offset where we cache the Merkle tree.
58 *
59 * @inode: inode of the verity file
60 *
61 * For the purposes of caching the Merkle tree pages, as required by
62 * fs-verity, it is convenient to do size computations in terms of a file
63 * offset, rather than in terms of page indices.
64 *
65 * Use 64K to be sure it's past the last page in the file, even with 64K pages.
66 * That rounding operation itself can overflow loff_t, so we do it in u64 and
67 * check.
68 *
69 * Returns the file offset on success, negative error code on failure.
70 */
71static loff_t merkle_file_pos(const struct inode *inode)
72{
73 u64 sz = inode->i_size;
74 u64 rounded = round_up(sz, MERKLE_START_ALIGN);
75
76 if (rounded > inode->i_sb->s_maxbytes)
77 return -EFBIG;
78
79 return rounded;
80}
81
82/*
83 * Drop all the items for this inode with this key_type.
84 *
85 * @inode: inode to drop items for
86 * @key_type: type of items to drop (BTRFS_VERITY_DESC_ITEM or
87 * BTRFS_VERITY_MERKLE_ITEM)
88 *
89 * Before doing a verity enable we cleanup any existing verity items.
90 * This is also used to clean up if a verity enable failed half way through.
91 *
92 * Returns number of dropped items on success, negative error code on failure.
93 */
94static int drop_verity_items(struct btrfs_inode *inode, u8 key_type)
95{
96 struct btrfs_trans_handle *trans;
97 struct btrfs_root *root = inode->root;
98 struct btrfs_path *path;
99 struct btrfs_key key;
100 int count = 0;
101 int ret;
102
103 path = btrfs_alloc_path();
104 if (!path)
105 return -ENOMEM;
106
107 while (1) {
108 /* 1 for the item being dropped */
109 trans = btrfs_start_transaction(root, 1);
110 if (IS_ERR(trans)) {
111 ret = PTR_ERR(trans);
112 goto out;
113 }
114
115 /*
116 * Walk backwards through all the items until we find one that
117 * isn't from our key type or objectid
118 */
119 key.objectid = btrfs_ino(inode);
120 key.type = key_type;
121 key.offset = (u64)-1;
122
123 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
124 if (ret > 0) {
125 ret = 0;
126 /* No more keys of this type, we're done */
127 if (path->slots[0] == 0)
128 break;
129 path->slots[0]--;
130 } else if (ret < 0) {
131 btrfs_end_transaction(trans);
132 goto out;
133 }
134
135 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
136
137 /* No more keys of this type, we're done */
138 if (key.objectid != btrfs_ino(inode) || key.type != key_type)
139 break;
140
141 /*
142 * This shouldn't be a performance sensitive function because
143 * it's not used as part of truncate. If it ever becomes
144 * perf sensitive, change this to walk forward and bulk delete
145 * items
146 */
147 ret = btrfs_del_items(trans, root, path, path->slots[0], 1);
148 if (ret) {
149 btrfs_end_transaction(trans);
150 goto out;
151 }
152 count++;
153 btrfs_release_path(path);
154 btrfs_end_transaction(trans);
155 }
156 ret = count;
157 btrfs_end_transaction(trans);
158out:
159 btrfs_free_path(path);
160 return ret;
161}
162
163/*
164 * Drop all verity items
165 *
166 * @inode: inode to drop verity items for
167 *
168 * In most contexts where we are dropping verity items, we want to do it for all
169 * the types of verity items, not a particular one.
170 *
171 * Returns: 0 on success, negative error code on failure.
172 */
173int btrfs_drop_verity_items(struct btrfs_inode *inode)
174{
175 int ret;
176
177 ret = drop_verity_items(inode, BTRFS_VERITY_DESC_ITEM_KEY);
178 if (ret < 0)
179 return ret;
180 ret = drop_verity_items(inode, BTRFS_VERITY_MERKLE_ITEM_KEY);
181 if (ret < 0)
182 return ret;
183
184 return 0;
185}
186
187/*
188 * Insert and write inode items with a given key type and offset.
189 *
190 * @inode: inode to insert for
191 * @key_type: key type to insert
192 * @offset: item offset to insert at
193 * @src: source data to write
194 * @len: length of source data to write
195 *
196 * Write len bytes from src into items of up to 2K length.
197 * The inserted items will have key (ino, key_type, offset + off) where off is
198 * consecutively increasing from 0 up to the last item ending at offset + len.
199 *
200 * Returns 0 on success and a negative error code on failure.
201 */
202static int write_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
203 const char *src, u64 len)
204{
205 struct btrfs_trans_handle *trans;
206 struct btrfs_path *path;
207 struct btrfs_root *root = inode->root;
208 struct extent_buffer *leaf;
209 struct btrfs_key key;
210 unsigned long copy_bytes;
211 unsigned long src_offset = 0;
212 void *data;
213 int ret = 0;
214
215 path = btrfs_alloc_path();
216 if (!path)
217 return -ENOMEM;
218
219 while (len > 0) {
220 /* 1 for the new item being inserted */
221 trans = btrfs_start_transaction(root, 1);
222 if (IS_ERR(trans)) {
223 ret = PTR_ERR(trans);
224 break;
225 }
226
227 key.objectid = btrfs_ino(inode);
228 key.type = key_type;
229 key.offset = offset;
230
231 /*
232 * Insert 2K at a time mostly to be friendly for smaller leaf
233 * size filesystems
234 */
235 copy_bytes = min_t(u64, len, 2048);
236
237 ret = btrfs_insert_empty_item(trans, root, path, &key, copy_bytes);
238 if (ret) {
239 btrfs_end_transaction(trans);
240 break;
241 }
242
243 leaf = path->nodes[0];
244
245 data = btrfs_item_ptr(leaf, path->slots[0], void);
246 write_extent_buffer(leaf, src + src_offset,
247 (unsigned long)data, copy_bytes);
248 offset += copy_bytes;
249 src_offset += copy_bytes;
250 len -= copy_bytes;
251
252 btrfs_release_path(path);
253 btrfs_end_transaction(trans);
254 }
255
256 btrfs_free_path(path);
257 return ret;
258}
259
260/*
261 * Read inode items of the given key type and offset from the btree.
262 *
263 * @inode: inode to read items of
264 * @key_type: key type to read
265 * @offset: item offset to read from
266 * @dest: Buffer to read into. This parameter has slightly tricky
267 * semantics. If it is NULL, the function will not do any copying
268 * and will just return the size of all the items up to len bytes.
269 * If dest_page is passed, then the function will kmap_local the
270 * page and ignore dest, but it must still be non-NULL to avoid the
271 * counting-only behavior.
272 * @len: length in bytes to read
273 * @dest_page: copy into this page instead of the dest buffer
274 *
275 * Helper function to read items from the btree. This returns the number of
276 * bytes read or < 0 for errors. We can return short reads if the items don't
277 * exist on disk or aren't big enough to fill the desired length. Supports
278 * reading into a provided buffer (dest) or into the page cache
279 *
280 * Returns number of bytes read or a negative error code on failure.
281 */
282static int read_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
283 char *dest, u64 len, struct page *dest_page)
284{
285 struct btrfs_path *path;
286 struct btrfs_root *root = inode->root;
287 struct extent_buffer *leaf;
288 struct btrfs_key key;
289 u64 item_end;
290 u64 copy_end;
291 int copied = 0;
292 u32 copy_offset;
293 unsigned long copy_bytes;
294 unsigned long dest_offset = 0;
295 void *data;
296 char *kaddr = dest;
297 int ret;
298
299 path = btrfs_alloc_path();
300 if (!path)
301 return -ENOMEM;
302
303 if (dest_page)
304 path->reada = READA_FORWARD;
305
306 key.objectid = btrfs_ino(inode);
307 key.type = key_type;
308 key.offset = offset;
309
310 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
311 if (ret < 0) {
312 goto out;
313 } else if (ret > 0) {
314 ret = 0;
315 if (path->slots[0] == 0)
316 goto out;
317 path->slots[0]--;
318 }
319
320 while (len > 0) {
321 leaf = path->nodes[0];
322 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
323
324 if (key.objectid != btrfs_ino(inode) || key.type != key_type)
325 break;
326
327 item_end = btrfs_item_size_nr(leaf, path->slots[0]) + key.offset;
328
329 if (copied > 0) {
330 /*
331 * Once we've copied something, we want all of the items
332 * to be sequential
333 */
334 if (key.offset != offset)
335 break;
336 } else {
337 /*
338 * Our initial offset might be in the middle of an
339 * item. Make sure it all makes sense.
340 */
341 if (key.offset > offset)
342 break;
343 if (item_end <= offset)
344 break;
345 }
346
347 /* desc = NULL to just sum all the item lengths */
348 if (!dest)
349 copy_end = item_end;
350 else
351 copy_end = min(offset + len, item_end);
352
353 /* Number of bytes in this item we want to copy */
354 copy_bytes = copy_end - offset;
355
356 /* Offset from the start of item for copying */
357 copy_offset = offset - key.offset;
358
359 if (dest) {
360 if (dest_page)
361 kaddr = kmap_local_page(dest_page);
362
363 data = btrfs_item_ptr(leaf, path->slots[0], void);
364 read_extent_buffer(leaf, kaddr + dest_offset,
365 (unsigned long)data + copy_offset,
366 copy_bytes);
367
368 if (dest_page)
369 kunmap_local(kaddr);
370 }
371
372 offset += copy_bytes;
373 dest_offset += copy_bytes;
374 len -= copy_bytes;
375 copied += copy_bytes;
376
377 path->slots[0]++;
378 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
379 /*
380 * We've reached the last slot in this leaf and we need
381 * to go to the next leaf.
382 */
383 ret = btrfs_next_leaf(root, path);
384 if (ret < 0) {
385 break;
386 } else if (ret > 0) {
387 ret = 0;
388 break;
389 }
390 }
391 }
392out:
393 btrfs_free_path(path);
394 if (!ret)
395 ret = copied;
396 return ret;
397}
398
399/*
400 * Rollback in-progress verity if we encounter an error.
401 *
402 * @inode: inode verity had an error for
403 *
404 * We try to handle recoverable errors while enabling verity by rolling it back
405 * and just failing the operation, rather than having an fs level error no
406 * matter what. However, any error in rollback is unrecoverable.
407 *
408 * Returns 0 on success, negative error code on failure.
409 */
410static int rollback_verity(struct btrfs_inode *inode)
411{
412 struct btrfs_trans_handle *trans;
413 struct btrfs_root *root = inode->root;
414 int ret;
415
416 ASSERT(inode_is_locked(&inode->vfs_inode));
417 truncate_inode_pages(inode->vfs_inode.i_mapping, inode->vfs_inode.i_size);
418 clear_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags);
419 ret = btrfs_drop_verity_items(inode);
420 if (ret) {
421 btrfs_handle_fs_error(root->fs_info, ret,
422 "failed to drop verity items in rollback %llu",
423 (u64)inode->vfs_inode.i_ino);
424 goto out;
425 }
426
427 /* 1 for updating the inode flag */
428 trans = btrfs_start_transaction(root, 1);
429 if (IS_ERR(trans)) {
430 ret = PTR_ERR(trans);
431 btrfs_handle_fs_error(root->fs_info, ret,
432 "failed to start transaction in verity rollback %llu",
433 (u64)inode->vfs_inode.i_ino);
434 goto out;
435 }
436 inode->ro_flags &= ~BTRFS_INODE_RO_VERITY;
437 btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
438 ret = btrfs_update_inode(trans, root, inode);
439 if (ret) {
440 btrfs_abort_transaction(trans, ret);
441 goto out;
442 }
443 btrfs_end_transaction(trans);
444out:
445 return ret;
446}
447
448/*
449 * Finalize making the file a valid verity file
450 *
451 * @inode: inode to be marked as verity
452 * @desc: contents of the verity descriptor to write (not NULL)
453 * @desc_size: size of the verity descriptor
454 *
455 * Do the actual work of finalizing verity after successfully writing the Merkle
456 * tree:
457 *
458 * - write out the descriptor items
459 * - mark the inode with the verity flag
460 * - mark the ro compat bit
461 * - clear the in progress bit
462 *
463 * Returns 0 on success, negative error code on failure.
464 */
465static int finish_verity(struct btrfs_inode *inode, const void *desc,
466 size_t desc_size)
467{
468 struct btrfs_trans_handle *trans = NULL;
469 struct btrfs_root *root = inode->root;
470 struct btrfs_verity_descriptor_item item;
471 int ret;
472
473 /* Write out the descriptor item */
474 memset(&item, 0, sizeof(item));
475 btrfs_set_stack_verity_descriptor_size(&item, desc_size);
476 ret = write_key_bytes(inode, BTRFS_VERITY_DESC_ITEM_KEY, 0,
477 (const char *)&item, sizeof(item));
478 if (ret)
479 goto out;
480
481 /* Write out the descriptor itself */
482 ret = write_key_bytes(inode, BTRFS_VERITY_DESC_ITEM_KEY, 1,
483 desc, desc_size);
484 if (ret)
485 goto out;
486
487 /* 1 for updating the inode flag */
488 trans = btrfs_start_transaction(root, 1);
489 if (IS_ERR(trans)) {
490 ret = PTR_ERR(trans);
491 goto out;
492 }
493 inode->ro_flags |= BTRFS_INODE_RO_VERITY;
494 btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
495 ret = btrfs_update_inode(trans, root, inode);
496 if (ret)
497 goto end_trans;
498 clear_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags);
499 btrfs_set_fs_compat_ro(root->fs_info, VERITY);
500end_trans:
501 btrfs_end_transaction(trans);
502out:
503 return ret;
504
505}
506
507/*
508 * fsverity op that begins enabling verity.
509 *
510 * @filp: file to enable verity on
511 *
512 * Begin enabling fsverity for the file. We drop any existing verity items
513 * and set the in progress bit.
514 *
515 * Returns 0 on success, negative error code on failure.
516 */
517static int btrfs_begin_enable_verity(struct file *filp)
518{
519 struct btrfs_inode *inode = BTRFS_I(file_inode(filp));
520 int ret;
521
522 ASSERT(inode_is_locked(file_inode(filp)));
523
524 if (test_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags))
525 return -EBUSY;
526
527 ret = btrfs_drop_verity_items(inode);
528 if (ret)
529 return ret;
530
531 set_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags);
532
533 return 0;
534}
535
536/*
537 * fsverity op that ends enabling verity.
538 *
539 * @filp: file we are finishing enabling verity on
540 * @desc: verity descriptor to write out (NULL in error conditions)
541 * @desc_size: size of the verity descriptor (variable with signatures)
542 * @merkle_tree_size: size of the merkle tree in bytes
543 *
544 * If desc is null, then VFS is signaling an error occurred during verity
545 * enable, and we should try to rollback. Otherwise, attempt to finish verity.
546 *
547 * Returns 0 on success, negative error code on error.
548 */
549static int btrfs_end_enable_verity(struct file *filp, const void *desc,
550 size_t desc_size, u64 merkle_tree_size)
551{
552 struct btrfs_inode *inode = BTRFS_I(file_inode(filp));
553 int ret = 0;
554 int rollback_ret;
555
556 ASSERT(inode_is_locked(file_inode(filp)));
557
558 if (desc == NULL)
559 goto rollback;
560
561 ret = finish_verity(inode, desc, desc_size);
562 if (ret)
563 goto rollback;
564 return ret;
565
566rollback:
567 rollback_ret = rollback_verity(inode);
568 if (rollback_ret)
569 btrfs_err(inode->root->fs_info,
570 "failed to rollback verity items: %d", rollback_ret);
571 return ret;
572}
573
574/*
575 * fsverity op that gets the struct fsverity_descriptor.
576 *
577 * @inode: inode to get the descriptor of
578 * @buf: output buffer for the descriptor contents
579 * @buf_size: size of the output buffer. 0 to query the size
580 *
581 * fsverity does a two pass setup for reading the descriptor, in the first pass
582 * it calls with buf_size = 0 to query the size of the descriptor, and then in
583 * the second pass it actually reads the descriptor off disk.
584 *
585 * Returns the size on success or a negative error code on failure.
586 */
587static int btrfs_get_verity_descriptor(struct inode *inode, void *buf,
588 size_t buf_size)
589{
590 u64 true_size;
591 int ret = 0;
592 struct btrfs_verity_descriptor_item item;
593
594 memset(&item, 0, sizeof(item));
595 ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_DESC_ITEM_KEY, 0,
596 (char *)&item, sizeof(item), NULL);
597 if (ret < 0)
598 return ret;
599
600 if (item.reserved[0] != 0 || item.reserved[1] != 0)
601 return -EUCLEAN;
602
603 true_size = btrfs_stack_verity_descriptor_size(&item);
604 if (true_size > INT_MAX)
605 return -EUCLEAN;
606
607 if (buf_size == 0)
608 return true_size;
609 if (buf_size < true_size)
610 return -ERANGE;
611
612 ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_DESC_ITEM_KEY, 1,
613 buf, buf_size, NULL);
614 if (ret < 0)
615 return ret;
616 if (ret != true_size)
617 return -EIO;
618
619 return true_size;
620}
621
622/*
623 * fsverity op that reads and caches a merkle tree page.
624 *
625 * @inode: inode to read a merkle tree page for
626 * @index: page index relative to the start of the merkle tree
627 * @num_ra_pages: number of pages to readahead. Optional, we ignore it
628 *
629 * The Merkle tree is stored in the filesystem btree, but its pages are cached
630 * with a logical position past EOF in the inode's mapping.
631 *
632 * Returns the page we read, or an ERR_PTR on error.
633 */
634static struct page *btrfs_read_merkle_tree_page(struct inode *inode,
635 pgoff_t index,
636 unsigned long num_ra_pages)
637{
638 struct page *page;
639 u64 off = (u64)index << PAGE_SHIFT;
640 loff_t merkle_pos = merkle_file_pos(inode);
641 int ret;
642
643 if (merkle_pos < 0)
644 return ERR_PTR(merkle_pos);
645 if (merkle_pos > inode->i_sb->s_maxbytes - off - PAGE_SIZE)
646 return ERR_PTR(-EFBIG);
647 index += merkle_pos >> PAGE_SHIFT;
648again:
649 page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
650 if (page) {
651 if (PageUptodate(page))
652 return page;
653
654 lock_page(page);
655 /*
656 * We only insert uptodate pages, so !Uptodate has to be
657 * an error
658 */
659 if (!PageUptodate(page)) {
660 unlock_page(page);
661 put_page(page);
662 return ERR_PTR(-EIO);
663 }
664 unlock_page(page);
665 return page;
666 }
667
668 page = __page_cache_alloc(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
669 if (!page)
670 return ERR_PTR(-ENOMEM);
671
672 /*
673 * Merkle item keys are indexed from byte 0 in the merkle tree.
674 * They have the form:
675 *
676 * [ inode objectid, BTRFS_MERKLE_ITEM_KEY, offset in bytes ]
677 */
678 ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_MERKLE_ITEM_KEY, off,
679 page_address(page), PAGE_SIZE, page);
680 if (ret < 0) {
681 put_page(page);
682 return ERR_PTR(ret);
683 }
684 if (ret < PAGE_SIZE)
685 memzero_page(page, ret, PAGE_SIZE - ret);
686
687 SetPageUptodate(page);
688 ret = add_to_page_cache_lru(page, inode->i_mapping, index, GFP_NOFS);
689
690 if (!ret) {
691 /* Inserted and ready for fsverity */
692 unlock_page(page);
693 } else {
694 put_page(page);
695 /* Did someone race us into inserting this page? */
696 if (ret == -EEXIST)
697 goto again;
698 page = ERR_PTR(ret);
699 }
700 return page;
701}
702
703/*
704 * fsverity op that writes a Merkle tree block into the btree.
705 *
706 * @inode: inode to write a Merkle tree block for
707 * @buf: Merkle tree data block to write
708 * @index: index of the block in the Merkle tree
709 * @log_blocksize: log base 2 of the Merkle tree block size
710 *
711 * Note that the block size could be different from the page size, so it is not
712 * safe to assume that index is a page index.
713 *
714 * Returns 0 on success or negative error code on failure
715 */
716static int btrfs_write_merkle_tree_block(struct inode *inode, const void *buf,
717 u64 index, int log_blocksize)
718{
719 u64 off = index << log_blocksize;
720 u64 len = 1ULL << log_blocksize;
721 loff_t merkle_pos = merkle_file_pos(inode);
722
723 if (merkle_pos < 0)
724 return merkle_pos;
725 if (merkle_pos > inode->i_sb->s_maxbytes - off - len)
726 return -EFBIG;
727
728 return write_key_bytes(BTRFS_I(inode), BTRFS_VERITY_MERKLE_ITEM_KEY,
729 off, buf, len);
730}
731
732const struct fsverity_operations btrfs_verityops = {
733 .begin_enable_verity = btrfs_begin_enable_verity,
734 .end_enable_verity = btrfs_end_enable_verity,
735 .get_verity_descriptor = btrfs_get_verity_descriptor,
736 .read_merkle_tree_page = btrfs_read_merkle_tree_page,
737 .write_merkle_tree_block = btrfs_write_merkle_tree_block,
738};