1 // SPDX-License-Identifier: GPL-2.0
3 * Data verification functions, i.e. hooks for ->readahead()
5 * Copyright 2019 Google LLC
8 #include "fsverity_private.h"
10 #include <crypto/hash.h>
11 #include <linux/bio.h>
13 static struct workqueue_struct *fsverity_read_workqueue;
16 * Returns true if the hash block with index @hblock_idx in the tree, located in
17 * @hpage, has already been verified.
19 static bool is_hash_block_verified(struct fsverity_info *vi, struct page *hpage,
20 unsigned long hblock_idx)
23 unsigned int blocks_per_page;
27 * When the Merkle tree block size and page size are the same, then the
28 * ->hash_block_verified bitmap isn't allocated, and we use PG_checked
29 * to directly indicate whether the page's block has been verified.
31 * Using PG_checked also guarantees that we re-verify hash pages that
32 * get evicted and re-instantiated from the backing storage, as new
33 * pages always start out with PG_checked cleared.
35 if (!vi->hash_block_verified)
36 return PageChecked(hpage);
39 * When the Merkle tree block size and page size differ, we use a bitmap
40 * to indicate whether each hash block has been verified.
42 * However, we still need to ensure that hash pages that get evicted and
43 * re-instantiated from the backing storage are re-verified. To do
44 * this, we use PG_checked again, but now it doesn't really mean
45 * "checked". Instead, now it just serves as an indicator for whether
46 * the hash page is newly instantiated or not.
48 * The first thread that sees PG_checked=0 must clear the corresponding
49 * bitmap bits, then set PG_checked=1. This requires a spinlock. To
50 * avoid having to take this spinlock in the common case of
51 * PG_checked=1, we start with an opportunistic lockless read.
53 if (PageChecked(hpage)) {
55 * A read memory barrier is needed here to give ACQUIRE
56 * semantics to the above PageChecked() test.
59 return test_bit(hblock_idx, vi->hash_block_verified);
61 spin_lock(&vi->hash_page_init_lock);
62 if (PageChecked(hpage)) {
63 verified = test_bit(hblock_idx, vi->hash_block_verified);
65 blocks_per_page = vi->tree_params.blocks_per_page;
66 hblock_idx = round_down(hblock_idx, blocks_per_page);
67 for (i = 0; i < blocks_per_page; i++)
68 clear_bit(hblock_idx + i, vi->hash_block_verified);
70 * A write memory barrier is needed here to give RELEASE
71 * semantics to the below SetPageChecked() operation.
74 SetPageChecked(hpage);
77 spin_unlock(&vi->hash_page_init_lock);
82 * Verify a single data block against the file's Merkle tree.
84 * In principle, we need to verify the entire path to the root node. However,
85 * for efficiency the filesystem may cache the hash blocks. Therefore we need
86 * only ascend the tree until an already-verified hash block is seen, and then
87 * verify the path to that block.
89 * Return: %true if the data block is valid, else %false.
92 verify_data_block(struct inode *inode, struct fsverity_info *vi,
93 const void *data, u64 data_pos, unsigned long max_ra_pages)
95 const struct merkle_tree_params *params = &vi->tree_params;
96 const unsigned int hsize = params->digest_size;
98 u8 _want_hash[FS_VERITY_MAX_DIGEST_SIZE];
100 u8 real_hash[FS_VERITY_MAX_DIGEST_SIZE];
101 /* The hash blocks that are traversed, indexed by level */
103 /* Page containing the hash block */
105 /* Mapped address of the hash block (will be within @page) */
107 /* Index of the hash block in the tree overall */
109 /* Byte offset of the wanted hash relative to @addr */
110 unsigned int hoffset;
111 } hblocks[FS_VERITY_MAX_LEVELS];
113 * The index of the previous level's block within that level; also the
114 * index of that block's hash within the current level.
116 u64 hidx = data_pos >> params->log_blocksize;
118 /* Up to 1 + FS_VERITY_MAX_LEVELS pages may be mapped at once */
119 BUILD_BUG_ON(1 + FS_VERITY_MAX_LEVELS > KM_MAX_IDX);
121 if (unlikely(data_pos >= inode->i_size)) {
123 * This can happen in the data page spanning EOF when the Merkle
124 * tree block size is less than the page size. The Merkle tree
125 * doesn't cover data blocks fully past EOF. But the entire
126 * page spanning EOF can be visible to userspace via a mmap, and
127 * any part past EOF should be all zeroes. Therefore, we need
128 * to verify that any data blocks fully past EOF are all zeroes.
130 if (memchr_inv(data, 0, params->block_size)) {
132 "FILE CORRUPTED! Data past EOF is not zeroed");
139 * Starting at the leaf level, ascend the tree saving hash blocks along
140 * the way until we find a hash block that has already been verified, or
141 * until we reach the root.
143 for (level = 0; level < params->num_levels; level++) {
144 unsigned long next_hidx;
145 unsigned long hblock_idx;
147 unsigned int hblock_offset_in_page;
148 unsigned int hoffset;
153 * The index of the block in the current level; also the index
154 * of that block's hash within the next level.
156 next_hidx = hidx >> params->log_arity;
158 /* Index of the hash block in the tree overall */
159 hblock_idx = params->level_start[level] + next_hidx;
161 /* Index of the hash page in the tree overall */
162 hpage_idx = hblock_idx >> params->log_blocks_per_page;
164 /* Byte offset of the hash block within the page */
165 hblock_offset_in_page =
166 (hblock_idx << params->log_blocksize) & ~PAGE_MASK;
168 /* Byte offset of the hash within the block */
169 hoffset = (hidx << params->log_digestsize) &
170 (params->block_size - 1);
172 hpage = inode->i_sb->s_vop->read_merkle_tree_page(inode,
173 hpage_idx, level == 0 ? min(max_ra_pages,
174 params->tree_pages - hpage_idx) : 0);
177 "Error %ld reading Merkle tree page %lu",
178 PTR_ERR(hpage), hpage_idx);
181 haddr = kmap_local_page(hpage) + hblock_offset_in_page;
182 if (is_hash_block_verified(vi, hpage, hblock_idx)) {
183 memcpy(_want_hash, haddr + hoffset, hsize);
184 want_hash = _want_hash;
189 hblocks[level].page = hpage;
190 hblocks[level].addr = haddr;
191 hblocks[level].index = hblock_idx;
192 hblocks[level].hoffset = hoffset;
196 want_hash = vi->root_hash;
198 /* Descend the tree verifying hash blocks. */
199 for (; level > 0; level--) {
200 struct page *hpage = hblocks[level - 1].page;
201 const void *haddr = hblocks[level - 1].addr;
202 unsigned long hblock_idx = hblocks[level - 1].index;
203 unsigned int hoffset = hblocks[level - 1].hoffset;
205 if (fsverity_hash_block(params, inode, haddr, real_hash) != 0)
207 if (memcmp(want_hash, real_hash, hsize) != 0)
210 * Mark the hash block as verified. This must be atomic and
211 * idempotent, as the same hash block might be verified by
212 * multiple threads concurrently.
214 if (vi->hash_block_verified)
215 set_bit(hblock_idx, vi->hash_block_verified);
217 SetPageChecked(hpage);
218 memcpy(_want_hash, haddr + hoffset, hsize);
219 want_hash = _want_hash;
224 /* Finally, verify the data block. */
225 if (fsverity_hash_block(params, inode, data, real_hash) != 0)
227 if (memcmp(want_hash, real_hash, hsize) != 0)
233 "FILE CORRUPTED! pos=%llu, level=%d, want_hash=%s:%*phN, real_hash=%s:%*phN",
235 params->hash_alg->name, hsize, want_hash,
236 params->hash_alg->name, hsize, real_hash);
238 for (; level > 0; level--) {
239 kunmap_local(hblocks[level - 1].addr);
240 put_page(hblocks[level - 1].page);
246 verify_data_blocks(struct folio *data_folio, size_t len, size_t offset,
247 unsigned long max_ra_pages)
249 struct inode *inode = data_folio->mapping->host;
250 struct fsverity_info *vi = inode->i_verity_info;
251 const unsigned int block_size = vi->tree_params.block_size;
252 u64 pos = (u64)data_folio->index << PAGE_SHIFT;
254 if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offset, block_size)))
256 if (WARN_ON_ONCE(!folio_test_locked(data_folio) ||
257 folio_test_uptodate(data_folio)))
263 data = kmap_local_folio(data_folio, offset);
264 valid = verify_data_block(inode, vi, data, pos + offset,
269 offset += block_size;
276 * fsverity_verify_blocks() - verify data in a folio
277 * @folio: the folio containing the data to verify
278 * @len: the length of the data to verify in the folio
279 * @offset: the offset of the data to verify in the folio
281 * Verify data that has just been read from a verity file. The data must be
282 * located in a pagecache folio that is still locked and not yet uptodate. The
283 * length and offset of the data must be Merkle tree block size aligned.
285 * Return: %true if the data is valid, else %false.
287 bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset)
289 return verify_data_blocks(folio, len, offset, 0);
291 EXPORT_SYMBOL_GPL(fsverity_verify_blocks);
295 * fsverity_verify_bio() - verify a 'read' bio that has just completed
296 * @bio: the bio to verify
298 * Verify the bio's data against the file's Merkle tree. All bio data segments
299 * must be aligned to the file's Merkle tree block size. If any data fails
300 * verification, then bio->bi_status is set to an error status.
302 * This is a helper function for use by the ->readahead() method of filesystems
303 * that issue bios to read data directly into the page cache. Filesystems that
304 * populate the page cache without issuing bios (e.g. non block-based
305 * filesystems) must instead call fsverity_verify_page() directly on each page.
306 * All filesystems must also call fsverity_verify_page() on holes.
308 void fsverity_verify_bio(struct bio *bio)
310 struct folio_iter fi;
311 unsigned long max_ra_pages = 0;
313 if (bio->bi_opf & REQ_RAHEAD) {
315 * If this bio is for data readahead, then we also do readahead
316 * of the first (largest) level of the Merkle tree. Namely,
317 * when a Merkle tree page is read, we also try to piggy-back on
318 * some additional pages -- up to 1/4 the number of data pages.
320 * This improves sequential read performance, as it greatly
321 * reduces the number of I/O requests made to the Merkle tree.
323 max_ra_pages = bio->bi_iter.bi_size >> (PAGE_SHIFT + 2);
326 bio_for_each_folio_all(fi, bio) {
327 if (!verify_data_blocks(fi.folio, fi.length, fi.offset,
329 bio->bi_status = BLK_STS_IOERR;
334 EXPORT_SYMBOL_GPL(fsverity_verify_bio);
335 #endif /* CONFIG_BLOCK */
338 * fsverity_enqueue_verify_work() - enqueue work on the fs-verity workqueue
339 * @work: the work to enqueue
341 * Enqueue verification work for asynchronous processing.
343 void fsverity_enqueue_verify_work(struct work_struct *work)
345 queue_work(fsverity_read_workqueue, work);
347 EXPORT_SYMBOL_GPL(fsverity_enqueue_verify_work);
349 void __init fsverity_init_workqueue(void)
352 * Use a high-priority workqueue to prioritize verification work, which
353 * blocks reads from completing, over regular application tasks.
355 * For performance reasons, don't use an unbound workqueue. Using an
356 * unbound workqueue for crypto operations causes excessive scheduler
359 fsverity_read_workqueue = alloc_workqueue("fsverity_read_queue",
362 if (!fsverity_read_workqueue)
363 panic("failed to allocate fsverity_read_queue");