Commit | Line | Data |
---|---|---|
a1d312de | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
aa0b42b7 | 2 | /* |
1da177e4 | 3 | * aops.c - NTFS kernel address space operations and page cache handling. |
1da177e4 | 4 | * |
ce1bafa0 | 5 | * Copyright (c) 2001-2014 Anton Altaparmakov and Tuxera Inc. |
1da177e4 | 6 | * Copyright (c) 2002 Richard Russon |
1da177e4 LT |
7 | */ |
8 | ||
9 | #include <linux/errno.h> | |
78264bd9 | 10 | #include <linux/fs.h> |
5a0e3ad6 | 11 | #include <linux/gfp.h> |
1da177e4 LT |
12 | #include <linux/mm.h> |
13 | #include <linux/pagemap.h> | |
14 | #include <linux/swap.h> | |
15 | #include <linux/buffer_head.h> | |
16 | #include <linux/writeback.h> | |
b4012a98 | 17 | #include <linux/bit_spinlock.h> |
be297968 | 18 | #include <linux/bio.h> |
1da177e4 LT |
19 | |
20 | #include "aops.h" | |
21 | #include "attrib.h" | |
22 | #include "debug.h" | |
23 | #include "inode.h" | |
24 | #include "mft.h" | |
25 | #include "runlist.h" | |
26 | #include "types.h" | |
27 | #include "ntfs.h" | |
28 | ||
29 | /** | |
30 | * ntfs_end_buffer_async_read - async io completion for reading attributes | |
31 | * @bh: buffer head on which io is completed | |
32 | * @uptodate: whether @bh is now uptodate or not | |
33 | * | |
34 | * Asynchronous I/O completion handler for reading pages belonging to the | |
35 | * attribute address space of an inode. The inodes can either be files or | |
36 | * directories or they can be fake inodes describing some attribute. | |
37 | * | |
38 | * If NInoMstProtected(), perform the post read mst fixups when all IO on the | |
39 | * page has been completed and mark the page uptodate or set the error bit on | |
40 | * the page. To determine the size of the records that need fixing up, we | |
41 | * cheat a little bit by setting the index_block_size in ntfs_inode to the ntfs | |
42 | * record size, and index_block_size_bits, to the log(base 2) of the ntfs | |
43 | * record size. | |
44 | */ | |
45 | static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) | |
46 | { | |
1da177e4 | 47 | unsigned long flags; |
e604635c | 48 | struct buffer_head *first, *tmp; |
1da177e4 | 49 | struct page *page; |
f6098cf4 | 50 | struct inode *vi; |
1da177e4 LT |
51 | ntfs_inode *ni; |
52 | int page_uptodate = 1; | |
53 | ||
54 | page = bh->b_page; | |
f6098cf4 AA |
55 | vi = page->mapping->host; |
56 | ni = NTFS_I(vi); | |
1da177e4 LT |
57 | |
58 | if (likely(uptodate)) { | |
f6098cf4 AA |
59 | loff_t i_size; |
60 | s64 file_ofs, init_size; | |
1da177e4 LT |
61 | |
62 | set_buffer_uptodate(bh); | |
63 | ||
09cbfeaf | 64 | file_ofs = ((s64)page->index << PAGE_SHIFT) + |
1da177e4 | 65 | bh_offset(bh); |
07a4e2da | 66 | read_lock_irqsave(&ni->size_lock, flags); |
f6098cf4 AA |
67 | init_size = ni->initialized_size; |
68 | i_size = i_size_read(vi); | |
07a4e2da | 69 | read_unlock_irqrestore(&ni->size_lock, flags); |
f6098cf4 AA |
70 | if (unlikely(init_size > i_size)) { |
71 | /* Race with shrinking truncate. */ | |
72 | init_size = i_size; | |
73 | } | |
1da177e4 | 74 | /* Check for the current buffer head overflowing. */ |
f6098cf4 | 75 | if (unlikely(file_ofs + bh->b_size > init_size)) { |
f6098cf4 | 76 | int ofs; |
eebd2aa3 | 77 | void *kaddr; |
f6098cf4 AA |
78 | |
79 | ofs = 0; | |
80 | if (file_ofs < init_size) | |
81 | ofs = init_size - file_ofs; | |
a3ac1414 | 82 | kaddr = kmap_atomic(page); |
eebd2aa3 CL |
83 | memset(kaddr + bh_offset(bh) + ofs, 0, |
84 | bh->b_size - ofs); | |
85 | flush_dcache_page(page); | |
a3ac1414 | 86 | kunmap_atomic(kaddr); |
1da177e4 LT |
87 | } |
88 | } else { | |
89 | clear_buffer_uptodate(bh); | |
e604635c | 90 | SetPageError(page); |
f6098cf4 AA |
91 | ntfs_error(ni->vol->sb, "Buffer I/O error, logical block " |
92 | "0x%llx.", (unsigned long long)bh->b_blocknr); | |
1da177e4 | 93 | } |
e604635c | 94 | first = page_buffers(page); |
f1e67e35 | 95 | spin_lock_irqsave(&first->b_uptodate_lock, flags); |
1da177e4 LT |
96 | clear_buffer_async_read(bh); |
97 | unlock_buffer(bh); | |
98 | tmp = bh; | |
99 | do { | |
100 | if (!buffer_uptodate(tmp)) | |
101 | page_uptodate = 0; | |
102 | if (buffer_async_read(tmp)) { | |
103 | if (likely(buffer_locked(tmp))) | |
104 | goto still_busy; | |
105 | /* Async buffers must be locked. */ | |
106 | BUG(); | |
107 | } | |
108 | tmp = tmp->b_this_page; | |
109 | } while (tmp != bh); | |
f1e67e35 | 110 | spin_unlock_irqrestore(&first->b_uptodate_lock, flags); |
1da177e4 LT |
111 | /* |
112 | * If none of the buffers had errors then we can set the page uptodate, | |
113 | * but we first have to perform the post read mst fixups, if the | |
114 | * attribute is mst protected, i.e. if NInoMstProteced(ni) is true. | |
115 | * Note we ignore fixup errors as those are detected when | |
116 | * map_mft_record() is called which gives us per record granularity | |
117 | * rather than per page granularity. | |
118 | */ | |
119 | if (!NInoMstProtected(ni)) { | |
120 | if (likely(page_uptodate && !PageError(page))) | |
121 | SetPageUptodate(page); | |
122 | } else { | |
f6098cf4 | 123 | u8 *kaddr; |
1da177e4 LT |
124 | unsigned int i, recs; |
125 | u32 rec_size; | |
126 | ||
127 | rec_size = ni->itype.index.block_size; | |
09cbfeaf | 128 | recs = PAGE_SIZE / rec_size; |
1da177e4 LT |
129 | /* Should have been verified before we got here... */ |
130 | BUG_ON(!recs); | |
a3ac1414 | 131 | kaddr = kmap_atomic(page); |
1da177e4 | 132 | for (i = 0; i < recs; i++) |
f6098cf4 | 133 | post_read_mst_fixup((NTFS_RECORD*)(kaddr + |
1da177e4 | 134 | i * rec_size), rec_size); |
a3ac1414 | 135 | kunmap_atomic(kaddr); |
1da177e4 | 136 | flush_dcache_page(page); |
b6ad6c52 | 137 | if (likely(page_uptodate && !PageError(page))) |
1da177e4 LT |
138 | SetPageUptodate(page); |
139 | } | |
140 | unlock_page(page); | |
141 | return; | |
142 | still_busy: | |
f1e67e35 | 143 | spin_unlock_irqrestore(&first->b_uptodate_lock, flags); |
1da177e4 LT |
144 | return; |
145 | } | |
146 | ||
147 | /** | |
148 | * ntfs_read_block - fill a @page of an address space with data | |
149 | * @page: page cache page to fill with data | |
150 | * | |
151 | * Fill the page @page of the address space belonging to the @page->host inode. | |
152 | * We read each buffer asynchronously and when all buffers are read in, our io | |
153 | * completion handler ntfs_end_buffer_read_async(), if required, automatically | |
154 | * applies the mst fixups to the page before finally marking it uptodate and | |
155 | * unlocking it. | |
156 | * | |
157 | * We only enforce allocated_size limit because i_size is checked for in | |
158 | * generic_file_read(). | |
159 | * | |
160 | * Return 0 on success and -errno on error. | |
161 | * | |
933906f8 | 162 | * Contains an adapted version of fs/buffer.c::block_read_full_folio(). |
1da177e4 LT |
163 | */ |
164 | static int ntfs_read_block(struct page *page) | |
165 | { | |
f6098cf4 | 166 | loff_t i_size; |
1da177e4 LT |
167 | VCN vcn; |
168 | LCN lcn; | |
f6098cf4 AA |
169 | s64 init_size; |
170 | struct inode *vi; | |
1da177e4 LT |
171 | ntfs_inode *ni; |
172 | ntfs_volume *vol; | |
173 | runlist_element *rl; | |
174 | struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE]; | |
175 | sector_t iblock, lblock, zblock; | |
07a4e2da | 176 | unsigned long flags; |
1da177e4 LT |
177 | unsigned int blocksize, vcn_ofs; |
178 | int i, nr; | |
179 | unsigned char blocksize_bits; | |
180 | ||
f6098cf4 AA |
181 | vi = page->mapping->host; |
182 | ni = NTFS_I(vi); | |
1da177e4 LT |
183 | vol = ni->vol; |
184 | ||
185 | /* $MFT/$DATA must have its complete runlist in memory at all times. */ | |
186 | BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni)); | |
187 | ||
78af34f0 AA |
188 | blocksize = vol->sb->s_blocksize; |
189 | blocksize_bits = vol->sb->s_blocksize_bits; | |
1da177e4 | 190 | |
a01ac532 | 191 | if (!page_has_buffers(page)) { |
1da177e4 | 192 | create_empty_buffers(page, blocksize, 0); |
a01ac532 AA |
193 | if (unlikely(!page_has_buffers(page))) { |
194 | unlock_page(page); | |
195 | return -ENOMEM; | |
196 | } | |
1da177e4 | 197 | } |
a01ac532 AA |
198 | bh = head = page_buffers(page); |
199 | BUG_ON(!bh); | |
1da177e4 | 200 | |
f6098cf4 AA |
201 | /* |
202 | * We may be racing with truncate. To avoid some of the problems we | |
203 | * now take a snapshot of the various sizes and use those for the whole | |
204 | * of the function. In case of an extending truncate it just means we | |
205 | * may leave some buffers unmapped which are now allocated. This is | |
206 | * not a problem since these buffers will just get mapped when a write | |
207 | * occurs. In case of a shrinking truncate, we will detect this later | |
208 | * on due to the runlist being incomplete and if the page is being | |
209 | * fully truncated, truncate will throw it away as soon as we unlock | |
210 | * it so no need to worry what we do with it. | |
211 | */ | |
09cbfeaf | 212 | iblock = (s64)page->index << (PAGE_SHIFT - blocksize_bits); |
07a4e2da | 213 | read_lock_irqsave(&ni->size_lock, flags); |
1da177e4 | 214 | lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; |
f6098cf4 AA |
215 | init_size = ni->initialized_size; |
216 | i_size = i_size_read(vi); | |
07a4e2da | 217 | read_unlock_irqrestore(&ni->size_lock, flags); |
f6098cf4 AA |
218 | if (unlikely(init_size > i_size)) { |
219 | /* Race with shrinking truncate. */ | |
220 | init_size = i_size; | |
221 | } | |
222 | zblock = (init_size + blocksize - 1) >> blocksize_bits; | |
1da177e4 LT |
223 | |
224 | /* Loop through all the buffers in the page. */ | |
225 | rl = NULL; | |
226 | nr = i = 0; | |
227 | do { | |
e3bf460f | 228 | int err = 0; |
1da177e4 LT |
229 | |
230 | if (unlikely(buffer_uptodate(bh))) | |
231 | continue; | |
232 | if (unlikely(buffer_mapped(bh))) { | |
233 | arr[nr++] = bh; | |
234 | continue; | |
235 | } | |
236 | bh->b_bdev = vol->sb->s_bdev; | |
237 | /* Is the block within the allowed limits? */ | |
238 | if (iblock < lblock) { | |
c49c3111 | 239 | bool is_retry = false; |
1da177e4 LT |
240 | |
241 | /* Convert iblock into corresponding vcn and offset. */ | |
242 | vcn = (VCN)iblock << blocksize_bits >> | |
243 | vol->cluster_size_bits; | |
244 | vcn_ofs = ((VCN)iblock << blocksize_bits) & | |
245 | vol->cluster_size_mask; | |
246 | if (!rl) { | |
247 | lock_retry_remap: | |
248 | down_read(&ni->runlist.lock); | |
249 | rl = ni->runlist.rl; | |
250 | } | |
251 | if (likely(rl != NULL)) { | |
252 | /* Seek to element containing target vcn. */ | |
253 | while (rl->length && rl[1].vcn <= vcn) | |
254 | rl++; | |
255 | lcn = ntfs_rl_vcn_to_lcn(rl, vcn); | |
256 | } else | |
257 | lcn = LCN_RL_NOT_MAPPED; | |
258 | /* Successful remap. */ | |
259 | if (lcn >= 0) { | |
260 | /* Setup buffer head to correct block. */ | |
261 | bh->b_blocknr = ((lcn << vol->cluster_size_bits) | |
262 | + vcn_ofs) >> blocksize_bits; | |
263 | set_buffer_mapped(bh); | |
264 | /* Only read initialized data blocks. */ | |
265 | if (iblock < zblock) { | |
266 | arr[nr++] = bh; | |
267 | continue; | |
268 | } | |
269 | /* Fully non-initialized data block, zero it. */ | |
270 | goto handle_zblock; | |
271 | } | |
272 | /* It is a hole, need to zero it. */ | |
273 | if (lcn == LCN_HOLE) | |
274 | goto handle_hole; | |
275 | /* If first try and runlist unmapped, map and retry. */ | |
276 | if (!is_retry && lcn == LCN_RL_NOT_MAPPED) { | |
c49c3111 | 277 | is_retry = true; |
1da177e4 LT |
278 | /* |
279 | * Attempt to map runlist, dropping lock for | |
280 | * the duration. | |
281 | */ | |
282 | up_read(&ni->runlist.lock); | |
283 | err = ntfs_map_runlist(ni, vcn); | |
284 | if (likely(!err)) | |
285 | goto lock_retry_remap; | |
286 | rl = NULL; | |
9f993fe4 AA |
287 | } else if (!rl) |
288 | up_read(&ni->runlist.lock); | |
8273d5d4 AA |
289 | /* |
290 | * If buffer is outside the runlist, treat it as a | |
291 | * hole. This can happen due to concurrent truncate | |
292 | * for example. | |
293 | */ | |
294 | if (err == -ENOENT || lcn == LCN_ENOENT) { | |
295 | err = 0; | |
296 | goto handle_hole; | |
297 | } | |
1da177e4 | 298 | /* Hard error, zero out region. */ |
8273d5d4 AA |
299 | if (!err) |
300 | err = -EIO; | |
1da177e4 LT |
301 | bh->b_blocknr = -1; |
302 | SetPageError(page); | |
303 | ntfs_error(vol->sb, "Failed to read from inode 0x%lx, " | |
304 | "attribute type 0x%x, vcn 0x%llx, " | |
305 | "offset 0x%x because its location on " | |
306 | "disk could not be determined%s " | |
8273d5d4 | 307 | "(error code %i).", ni->mft_no, |
1da177e4 LT |
308 | ni->type, (unsigned long long)vcn, |
309 | vcn_ofs, is_retry ? " even after " | |
8273d5d4 | 310 | "retrying" : "", err); |
1da177e4 LT |
311 | } |
312 | /* | |
313 | * Either iblock was outside lblock limits or | |
314 | * ntfs_rl_vcn_to_lcn() returned error. Just zero that portion | |
315 | * of the page and set the buffer uptodate. | |
316 | */ | |
317 | handle_hole: | |
318 | bh->b_blocknr = -1UL; | |
319 | clear_buffer_mapped(bh); | |
320 | handle_zblock: | |
eebd2aa3 | 321 | zero_user(page, i * blocksize, blocksize); |
8273d5d4 AA |
322 | if (likely(!err)) |
323 | set_buffer_uptodate(bh); | |
1da177e4 LT |
324 | } while (i++, iblock++, (bh = bh->b_this_page) != head); |
325 | ||
326 | /* Release the lock if we took it. */ | |
327 | if (rl) | |
328 | up_read(&ni->runlist.lock); | |
329 | ||
330 | /* Check we have at least one buffer ready for i/o. */ | |
331 | if (nr) { | |
332 | struct buffer_head *tbh; | |
333 | ||
334 | /* Lock the buffers. */ | |
335 | for (i = 0; i < nr; i++) { | |
336 | tbh = arr[i]; | |
337 | lock_buffer(tbh); | |
338 | tbh->b_end_io = ntfs_end_buffer_async_read; | |
339 | set_buffer_async_read(tbh); | |
340 | } | |
341 | /* Finally, start i/o on the buffers. */ | |
342 | for (i = 0; i < nr; i++) { | |
343 | tbh = arr[i]; | |
344 | if (likely(!buffer_uptodate(tbh))) | |
1420c4a5 | 345 | submit_bh(REQ_OP_READ, tbh); |
1da177e4 LT |
346 | else |
347 | ntfs_end_buffer_async_read(tbh, 1); | |
348 | } | |
349 | return 0; | |
350 | } | |
351 | /* No i/o was scheduled on any of the buffers. */ | |
352 | if (likely(!PageError(page))) | |
353 | SetPageUptodate(page); | |
354 | else /* Signal synchronous i/o error. */ | |
355 | nr = -EIO; | |
356 | unlock_page(page); | |
357 | return nr; | |
358 | } | |
359 | ||
360 | /** | |
933906f8 MWO |
361 | * ntfs_read_folio - fill a @folio of a @file with data from the device |
362 | * @file: open file to which the folio @folio belongs or NULL | |
363 | * @folio: page cache folio to fill with data | |
1da177e4 | 364 | * |
933906f8 MWO |
365 | * For non-resident attributes, ntfs_read_folio() fills the @folio of the open |
366 | * file @file by calling the ntfs version of the generic block_read_full_folio() | |
1da177e4 | 367 | * function, ntfs_read_block(), which in turn creates and reads in the buffers |
933906f8 | 368 | * associated with the folio asynchronously. |
1da177e4 | 369 | * |
933906f8 | 370 | * For resident attributes, OTOH, ntfs_read_folio() fills @folio by copying the |
1da177e4 LT |
371 | * data from the mft record (which at this stage is most likely in memory) and |
372 | * fills the remainder with zeroes. Thus, in this case, I/O is synchronous, as | |
373 | * even if the mft record is not cached at this point in time, we need to wait | |
374 | * for it to be read in before we can do the copy. | |
375 | * | |
376 | * Return 0 on success and -errno on error. | |
377 | */ | |
933906f8 | 378 | static int ntfs_read_folio(struct file *file, struct folio *folio) |
1da177e4 | 379 | { |
933906f8 | 380 | struct page *page = &folio->page; |
f6098cf4 AA |
381 | loff_t i_size; |
382 | struct inode *vi; | |
1da177e4 | 383 | ntfs_inode *ni, *base_ni; |
bfab36e8 | 384 | u8 *addr; |
1da177e4 LT |
385 | ntfs_attr_search_ctx *ctx; |
386 | MFT_RECORD *mrec; | |
b6ad6c52 | 387 | unsigned long flags; |
1da177e4 LT |
388 | u32 attr_len; |
389 | int err = 0; | |
390 | ||
905685f6 | 391 | retry_readpage: |
1da177e4 | 392 | BUG_ON(!PageLocked(page)); |
ebab8990 AA |
393 | vi = page->mapping->host; |
394 | i_size = i_size_read(vi); | |
395 | /* Is the page fully outside i_size? (truncate in progress) */ | |
09cbfeaf KS |
396 | if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >> |
397 | PAGE_SHIFT)) { | |
398 | zero_user(page, 0, PAGE_SIZE); | |
ebab8990 AA |
399 | ntfs_debug("Read outside i_size - truncated?"); |
400 | goto done; | |
401 | } | |
1da177e4 LT |
402 | /* |
403 | * This can potentially happen because we clear PageUptodate() during | |
404 | * ntfs_writepage() of MstProtected() attributes. | |
405 | */ | |
406 | if (PageUptodate(page)) { | |
407 | unlock_page(page); | |
408 | return 0; | |
409 | } | |
f6098cf4 | 410 | ni = NTFS_I(vi); |
311120ec AA |
411 | /* |
412 | * Only $DATA attributes can be encrypted and only unnamed $DATA | |
413 | * attributes can be compressed. Index root can have the flags set but | |
414 | * this means to create compressed/encrypted files, not that the | |
4e64c886 AA |
415 | * attribute is compressed/encrypted. Note we need to check for |
416 | * AT_INDEX_ALLOCATION since this is the type of both directory and | |
417 | * index inodes. | |
311120ec | 418 | */ |
4e64c886 | 419 | if (ni->type != AT_INDEX_ALLOCATION) { |
311120ec AA |
420 | /* If attribute is encrypted, deny access, just like NT4. */ |
421 | if (NInoEncrypted(ni)) { | |
422 | BUG_ON(ni->type != AT_DATA); | |
423 | err = -EACCES; | |
424 | goto err_out; | |
425 | } | |
426 | /* Compressed data streams are handled in compress.c. */ | |
427 | if (NInoNonResident(ni) && NInoCompressed(ni)) { | |
428 | BUG_ON(ni->type != AT_DATA); | |
429 | BUG_ON(ni->name_len); | |
430 | return ntfs_read_compressed_block(page); | |
431 | } | |
432 | } | |
1da177e4 LT |
433 | /* NInoNonResident() == NInoIndexAllocPresent() */ |
434 | if (NInoNonResident(ni)) { | |
311120ec | 435 | /* Normal, non-resident data stream. */ |
1da177e4 LT |
436 | return ntfs_read_block(page); |
437 | } | |
438 | /* | |
439 | * Attribute is resident, implying it is not compressed or encrypted. | |
440 | * This also means the attribute is smaller than an mft record and | |
441 | * hence smaller than a page, so can simply zero out any pages with | |
311120ec AA |
442 | * index above 0. Note the attribute can actually be marked compressed |
443 | * but if it is resident the actual data is not compressed so we are | |
444 | * ok to ignore the compressed flag here. | |
1da177e4 | 445 | */ |
b6ad6c52 | 446 | if (unlikely(page->index > 0)) { |
09cbfeaf | 447 | zero_user(page, 0, PAGE_SIZE); |
1da177e4 LT |
448 | goto done; |
449 | } | |
450 | if (!NInoAttr(ni)) | |
451 | base_ni = ni; | |
452 | else | |
453 | base_ni = ni->ext.base_ntfs_ino; | |
454 | /* Map, pin, and lock the mft record. */ | |
455 | mrec = map_mft_record(base_ni); | |
456 | if (IS_ERR(mrec)) { | |
457 | err = PTR_ERR(mrec); | |
458 | goto err_out; | |
459 | } | |
905685f6 AA |
460 | /* |
461 | * If a parallel write made the attribute non-resident, drop the mft | |
933906f8 | 462 | * record and retry the read_folio. |
905685f6 AA |
463 | */ |
464 | if (unlikely(NInoNonResident(ni))) { | |
465 | unmap_mft_record(base_ni); | |
466 | goto retry_readpage; | |
467 | } | |
1da177e4 LT |
468 | ctx = ntfs_attr_get_search_ctx(base_ni, mrec); |
469 | if (unlikely(!ctx)) { | |
470 | err = -ENOMEM; | |
471 | goto unm_err_out; | |
472 | } | |
473 | err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, | |
474 | CASE_SENSITIVE, 0, NULL, 0, ctx); | |
475 | if (unlikely(err)) | |
476 | goto put_unm_err_out; | |
477 | attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); | |
b6ad6c52 AA |
478 | read_lock_irqsave(&ni->size_lock, flags); |
479 | if (unlikely(attr_len > ni->initialized_size)) | |
480 | attr_len = ni->initialized_size; | |
f6098cf4 | 481 | i_size = i_size_read(vi); |
b6ad6c52 | 482 | read_unlock_irqrestore(&ni->size_lock, flags); |
f6098cf4 AA |
483 | if (unlikely(attr_len > i_size)) { |
484 | /* Race with shrinking truncate. */ | |
485 | attr_len = i_size; | |
486 | } | |
a3ac1414 | 487 | addr = kmap_atomic(page); |
1da177e4 | 488 | /* Copy the data to the page. */ |
bfab36e8 | 489 | memcpy(addr, (u8*)ctx->attr + |
1da177e4 LT |
490 | le16_to_cpu(ctx->attr->data.resident.value_offset), |
491 | attr_len); | |
492 | /* Zero the remainder of the page. */ | |
09cbfeaf | 493 | memset(addr + attr_len, 0, PAGE_SIZE - attr_len); |
1da177e4 | 494 | flush_dcache_page(page); |
a3ac1414 | 495 | kunmap_atomic(addr); |
1da177e4 LT |
496 | put_unm_err_out: |
497 | ntfs_attr_put_search_ctx(ctx); | |
498 | unm_err_out: | |
499 | unmap_mft_record(base_ni); | |
500 | done: | |
501 | SetPageUptodate(page); | |
502 | err_out: | |
503 | unlock_page(page); | |
504 | return err; | |
505 | } | |
506 | ||
507 | #ifdef NTFS_RW | |
508 | ||
509 | /** | |
510 | * ntfs_write_block - write a @page to the backing store | |
511 | * @page: page cache page to write out | |
512 | * @wbc: writeback control structure | |
513 | * | |
514 | * This function is for writing pages belonging to non-resident, non-mst | |
515 | * protected attributes to their backing store. | |
516 | * | |
517 | * For a page with buffers, map and write the dirty buffers asynchronously | |
518 | * under page writeback. For a page without buffers, create buffers for the | |
519 | * page, then proceed as above. | |
520 | * | |
521 | * If a page doesn't have buffers the page dirty state is definitive. If a page | |
522 | * does have buffers, the page dirty state is just a hint, and the buffer dirty | |
523 | * state is definitive. (A hint which has rules: dirty buffers against a clean | |
524 | * page is illegal. Other combinations are legal and need to be handled. In | |
525 | * particular a dirty page containing clean buffers for example.) | |
526 | * | |
527 | * Return 0 on success and -errno on error. | |
528 | * | |
529 | * Based on ntfs_read_block() and __block_write_full_page(). | |
530 | */ | |
531 | static int ntfs_write_block(struct page *page, struct writeback_control *wbc) | |
532 | { | |
533 | VCN vcn; | |
534 | LCN lcn; | |
07a4e2da AA |
535 | s64 initialized_size; |
536 | loff_t i_size; | |
1da177e4 LT |
537 | sector_t block, dblock, iblock; |
538 | struct inode *vi; | |
539 | ntfs_inode *ni; | |
540 | ntfs_volume *vol; | |
541 | runlist_element *rl; | |
542 | struct buffer_head *bh, *head; | |
07a4e2da | 543 | unsigned long flags; |
1da177e4 LT |
544 | unsigned int blocksize, vcn_ofs; |
545 | int err; | |
c49c3111 | 546 | bool need_end_writeback; |
1da177e4 LT |
547 | unsigned char blocksize_bits; |
548 | ||
549 | vi = page->mapping->host; | |
550 | ni = NTFS_I(vi); | |
551 | vol = ni->vol; | |
552 | ||
553 | ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index " | |
554 | "0x%lx.", ni->mft_no, ni->type, page->index); | |
555 | ||
556 | BUG_ON(!NInoNonResident(ni)); | |
557 | BUG_ON(NInoMstProtected(ni)); | |
78af34f0 AA |
558 | blocksize = vol->sb->s_blocksize; |
559 | blocksize_bits = vol->sb->s_blocksize_bits; | |
1da177e4 LT |
560 | if (!page_has_buffers(page)) { |
561 | BUG_ON(!PageUptodate(page)); | |
562 | create_empty_buffers(page, blocksize, | |
563 | (1 << BH_Uptodate) | (1 << BH_Dirty)); | |
a01ac532 AA |
564 | if (unlikely(!page_has_buffers(page))) { |
565 | ntfs_warning(vol->sb, "Error allocating page " | |
566 | "buffers. Redirtying page so we try " | |
567 | "again later."); | |
568 | /* | |
569 | * Put the page back on mapping->dirty_pages, but leave | |
570 | * its buffers' dirty state as-is. | |
571 | */ | |
572 | redirty_page_for_writepage(wbc, page); | |
573 | unlock_page(page); | |
574 | return 0; | |
575 | } | |
1da177e4 LT |
576 | } |
577 | bh = head = page_buffers(page); | |
a01ac532 | 578 | BUG_ON(!bh); |
1da177e4 LT |
579 | |
580 | /* NOTE: Different naming scheme to ntfs_read_block()! */ | |
581 | ||
582 | /* The first block in the page. */ | |
09cbfeaf | 583 | block = (s64)page->index << (PAGE_SHIFT - blocksize_bits); |
1da177e4 | 584 | |
07a4e2da AA |
585 | read_lock_irqsave(&ni->size_lock, flags); |
586 | i_size = i_size_read(vi); | |
587 | initialized_size = ni->initialized_size; | |
588 | read_unlock_irqrestore(&ni->size_lock, flags); | |
589 | ||
1da177e4 | 590 | /* The first out of bounds block for the data size. */ |
07a4e2da | 591 | dblock = (i_size + blocksize - 1) >> blocksize_bits; |
1da177e4 LT |
592 | |
593 | /* The last (fully or partially) initialized block. */ | |
07a4e2da | 594 | iblock = initialized_size >> blocksize_bits; |
1da177e4 LT |
595 | |
596 | /* | |
e621900a | 597 | * Be very careful. We have no exclusion from block_dirty_folio |
1da177e4 LT |
598 | * here, and the (potentially unmapped) buffers may become dirty at |
599 | * any time. If a buffer becomes dirty here after we've inspected it | |
600 | * then we just miss that fact, and the page stays dirty. | |
601 | * | |
e621900a | 602 | * Buffers outside i_size may be dirtied by block_dirty_folio; |
1da177e4 LT |
603 | * handle that here by just cleaning them. |
604 | */ | |
605 | ||
606 | /* | |
607 | * Loop through all the buffers in the page, mapping all the dirty | |
608 | * buffers to disk addresses and handling any aliases from the | |
609 | * underlying block device's mapping. | |
610 | */ | |
611 | rl = NULL; | |
612 | err = 0; | |
613 | do { | |
c49c3111 | 614 | bool is_retry = false; |
1da177e4 LT |
615 | |
616 | if (unlikely(block >= dblock)) { | |
617 | /* | |
618 | * Mapped buffers outside i_size will occur, because | |
619 | * this page can be outside i_size when there is a | |
620 | * truncate in progress. The contents of such buffers | |
621 | * were zeroed by ntfs_writepage(). | |
622 | * | |
623 | * FIXME: What about the small race window where | |
624 | * ntfs_writepage() has not done any clearing because | |
625 | * the page was within i_size but before we get here, | |
626 | * vmtruncate() modifies i_size? | |
627 | */ | |
628 | clear_buffer_dirty(bh); | |
629 | set_buffer_uptodate(bh); | |
630 | continue; | |
631 | } | |
632 | ||
633 | /* Clean buffers are not written out, so no need to map them. */ | |
634 | if (!buffer_dirty(bh)) | |
635 | continue; | |
636 | ||
637 | /* Make sure we have enough initialized size. */ | |
638 | if (unlikely((block >= iblock) && | |
07a4e2da | 639 | (initialized_size < i_size))) { |
1da177e4 | 640 | /* |
933906f8 MWO |
641 | * If this page is fully outside initialized |
642 | * size, zero out all pages between the current | |
643 | * initialized size and the current page. Just | |
644 | * use ntfs_read_folio() to do the zeroing | |
645 | * transparently. | |
1da177e4 LT |
646 | */ |
647 | if (block > iblock) { | |
648 | // TODO: | |
649 | // For each page do: | |
650 | // - read_cache_page() | |
651 | // Again for each page do: | |
652 | // - wait_on_page_locked() | |
653 | // - Check (PageUptodate(page) && | |
654 | // !PageError(page)) | |
655 | // Update initialized size in the attribute and | |
656 | // in the inode. | |
657 | // Again, for each page do: | |
e621900a | 658 | // block_dirty_folio(); |
ea1754a0 | 659 | // put_page() |
1da177e4 LT |
660 | // We don't need to wait on the writes. |
661 | // Update iblock. | |
662 | } | |
663 | /* | |
664 | * The current page straddles initialized size. Zero | |
665 | * all non-uptodate buffers and set them uptodate (and | |
666 | * dirty?). Note, there aren't any non-uptodate buffers | |
667 | * if the page is uptodate. | |
668 | * FIXME: For an uptodate page, the buffers may need to | |
669 | * be written out because they were not initialized on | |
670 | * disk before. | |
671 | */ | |
672 | if (!PageUptodate(page)) { | |
673 | // TODO: | |
674 | // Zero any non-uptodate buffers up to i_size. | |
675 | // Set them uptodate and dirty. | |
676 | } | |
677 | // TODO: | |
678 | // Update initialized size in the attribute and in the | |
679 | // inode (up to i_size). | |
680 | // Update iblock. | |
681 | // FIXME: This is inefficient. Try to batch the two | |
682 | // size changes to happen in one go. | |
683 | ntfs_error(vol->sb, "Writing beyond initialized size " | |
684 | "is not supported yet. Sorry."); | |
685 | err = -EOPNOTSUPP; | |
686 | break; | |
687 | // Do NOT set_buffer_new() BUT DO clear buffer range | |
688 | // outside write request range. | |
689 | // set_buffer_uptodate() on complete buffers as well as | |
690 | // set_buffer_dirty(). | |
691 | } | |
692 | ||
693 | /* No need to map buffers that are already mapped. */ | |
694 | if (buffer_mapped(bh)) | |
695 | continue; | |
696 | ||
697 | /* Unmapped, dirty buffer. Need to map it. */ | |
698 | bh->b_bdev = vol->sb->s_bdev; | |
699 | ||
700 | /* Convert block into corresponding vcn and offset. */ | |
701 | vcn = (VCN)block << blocksize_bits; | |
702 | vcn_ofs = vcn & vol->cluster_size_mask; | |
703 | vcn >>= vol->cluster_size_bits; | |
704 | if (!rl) { | |
705 | lock_retry_remap: | |
706 | down_read(&ni->runlist.lock); | |
707 | rl = ni->runlist.rl; | |
708 | } | |
709 | if (likely(rl != NULL)) { | |
710 | /* Seek to element containing target vcn. */ | |
711 | while (rl->length && rl[1].vcn <= vcn) | |
712 | rl++; | |
713 | lcn = ntfs_rl_vcn_to_lcn(rl, vcn); | |
714 | } else | |
715 | lcn = LCN_RL_NOT_MAPPED; | |
716 | /* Successful remap. */ | |
717 | if (lcn >= 0) { | |
718 | /* Setup buffer head to point to correct block. */ | |
719 | bh->b_blocknr = ((lcn << vol->cluster_size_bits) + | |
720 | vcn_ofs) >> blocksize_bits; | |
721 | set_buffer_mapped(bh); | |
722 | continue; | |
723 | } | |
724 | /* It is a hole, need to instantiate it. */ | |
725 | if (lcn == LCN_HOLE) { | |
8dcdebaf AA |
726 | u8 *kaddr; |
727 | unsigned long *bpos, *bend; | |
728 | ||
729 | /* Check if the buffer is zero. */ | |
a3ac1414 | 730 | kaddr = kmap_atomic(page); |
8dcdebaf AA |
731 | bpos = (unsigned long *)(kaddr + bh_offset(bh)); |
732 | bend = (unsigned long *)((u8*)bpos + blocksize); | |
733 | do { | |
734 | if (unlikely(*bpos)) | |
735 | break; | |
736 | } while (likely(++bpos < bend)); | |
a3ac1414 | 737 | kunmap_atomic(kaddr); |
8dcdebaf AA |
738 | if (bpos == bend) { |
739 | /* | |
740 | * Buffer is zero and sparse, no need to write | |
741 | * it. | |
742 | */ | |
743 | bh->b_blocknr = -1; | |
744 | clear_buffer_dirty(bh); | |
745 | continue; | |
746 | } | |
1da177e4 LT |
747 | // TODO: Instantiate the hole. |
748 | // clear_buffer_new(bh); | |
e64855c6 | 749 | // clean_bdev_bh_alias(bh); |
1da177e4 LT |
750 | ntfs_error(vol->sb, "Writing into sparse regions is " |
751 | "not supported yet. Sorry."); | |
752 | err = -EOPNOTSUPP; | |
753 | break; | |
754 | } | |
755 | /* If first try and runlist unmapped, map and retry. */ | |
756 | if (!is_retry && lcn == LCN_RL_NOT_MAPPED) { | |
c49c3111 | 757 | is_retry = true; |
1da177e4 LT |
758 | /* |
759 | * Attempt to map runlist, dropping lock for | |
760 | * the duration. | |
761 | */ | |
762 | up_read(&ni->runlist.lock); | |
763 | err = ntfs_map_runlist(ni, vcn); | |
764 | if (likely(!err)) | |
765 | goto lock_retry_remap; | |
766 | rl = NULL; | |
9f993fe4 AA |
767 | } else if (!rl) |
768 | up_read(&ni->runlist.lock); | |
8273d5d4 AA |
769 | /* |
770 | * If buffer is outside the runlist, truncate has cut it out | |
771 | * of the runlist. Just clean and clear the buffer and set it | |
772 | * uptodate so it can get discarded by the VM. | |
773 | */ | |
774 | if (err == -ENOENT || lcn == LCN_ENOENT) { | |
8273d5d4 AA |
775 | bh->b_blocknr = -1; |
776 | clear_buffer_dirty(bh); | |
eebd2aa3 | 777 | zero_user(page, bh_offset(bh), blocksize); |
8273d5d4 AA |
778 | set_buffer_uptodate(bh); |
779 | err = 0; | |
780 | continue; | |
781 | } | |
1da177e4 | 782 | /* Failed to map the buffer, even after retrying. */ |
8273d5d4 AA |
783 | if (!err) |
784 | err = -EIO; | |
1da177e4 LT |
785 | bh->b_blocknr = -1; |
786 | ntfs_error(vol->sb, "Failed to write to inode 0x%lx, " | |
787 | "attribute type 0x%x, vcn 0x%llx, offset 0x%x " | |
788 | "because its location on disk could not be " | |
8273d5d4 | 789 | "determined%s (error code %i).", ni->mft_no, |
1da177e4 LT |
790 | ni->type, (unsigned long long)vcn, |
791 | vcn_ofs, is_retry ? " even after " | |
8273d5d4 | 792 | "retrying" : "", err); |
1da177e4 LT |
793 | break; |
794 | } while (block++, (bh = bh->b_this_page) != head); | |
795 | ||
796 | /* Release the lock if we took it. */ | |
797 | if (rl) | |
798 | up_read(&ni->runlist.lock); | |
799 | ||
800 | /* For the error case, need to reset bh to the beginning. */ | |
801 | bh = head; | |
802 | ||
933906f8 | 803 | /* Just an optimization, so ->read_folio() is not called later. */ |
1da177e4 LT |
804 | if (unlikely(!PageUptodate(page))) { |
805 | int uptodate = 1; | |
806 | do { | |
807 | if (!buffer_uptodate(bh)) { | |
808 | uptodate = 0; | |
809 | bh = head; | |
810 | break; | |
811 | } | |
812 | } while ((bh = bh->b_this_page) != head); | |
813 | if (uptodate) | |
814 | SetPageUptodate(page); | |
815 | } | |
816 | ||
817 | /* Setup all mapped, dirty buffers for async write i/o. */ | |
818 | do { | |
1da177e4 LT |
819 | if (buffer_mapped(bh) && buffer_dirty(bh)) { |
820 | lock_buffer(bh); | |
821 | if (test_clear_buffer_dirty(bh)) { | |
822 | BUG_ON(!buffer_uptodate(bh)); | |
823 | mark_buffer_async_write(bh); | |
824 | } else | |
825 | unlock_buffer(bh); | |
826 | } else if (unlikely(err)) { | |
827 | /* | |
828 | * For the error case. The buffer may have been set | |
829 | * dirty during attachment to a dirty page. | |
830 | */ | |
831 | if (err != -ENOMEM) | |
832 | clear_buffer_dirty(bh); | |
833 | } | |
834 | } while ((bh = bh->b_this_page) != head); | |
835 | ||
836 | if (unlikely(err)) { | |
837 | // TODO: Remove the -EOPNOTSUPP check later on... | |
838 | if (unlikely(err == -EOPNOTSUPP)) | |
839 | err = 0; | |
840 | else if (err == -ENOMEM) { | |
841 | ntfs_warning(vol->sb, "Error allocating memory. " | |
842 | "Redirtying page so we try again " | |
843 | "later."); | |
844 | /* | |
845 | * Put the page back on mapping->dirty_pages, but | |
846 | * leave its buffer's dirty state as-is. | |
847 | */ | |
848 | redirty_page_for_writepage(wbc, page); | |
849 | err = 0; | |
850 | } else | |
851 | SetPageError(page); | |
852 | } | |
853 | ||
854 | BUG_ON(PageWriteback(page)); | |
855 | set_page_writeback(page); /* Keeps try_to_free_buffers() away. */ | |
1da177e4 | 856 | |
54b02eb0 | 857 | /* Submit the prepared buffers for i/o. */ |
c49c3111 | 858 | need_end_writeback = true; |
1da177e4 LT |
859 | do { |
860 | struct buffer_head *next = bh->b_this_page; | |
861 | if (buffer_async_write(bh)) { | |
1420c4a5 | 862 | submit_bh(REQ_OP_WRITE, bh); |
c49c3111 | 863 | need_end_writeback = false; |
1da177e4 | 864 | } |
1da177e4 LT |
865 | bh = next; |
866 | } while (bh != head); | |
54b02eb0 | 867 | unlock_page(page); |
1da177e4 LT |
868 | |
869 | /* If no i/o was started, need to end_page_writeback(). */ | |
870 | if (unlikely(need_end_writeback)) | |
871 | end_page_writeback(page); | |
872 | ||
873 | ntfs_debug("Done."); | |
874 | return err; | |
875 | } | |
876 | ||
877 | /** | |
878 | * ntfs_write_mst_block - write a @page to the backing store | |
879 | * @page: page cache page to write out | |
880 | * @wbc: writeback control structure | |
881 | * | |
882 | * This function is for writing pages belonging to non-resident, mst protected | |
883 | * attributes to their backing store. The only supported attributes are index | |
884 | * allocation and $MFT/$DATA. Both directory inodes and index inodes are | |
885 | * supported for the index allocation case. | |
886 | * | |
887 | * The page must remain locked for the duration of the write because we apply | |
888 | * the mst fixups, write, and then undo the fixups, so if we were to unlock the | |
889 | * page before undoing the fixups, any other user of the page will see the | |
890 | * page contents as corrupt. | |
891 | * | |
892 | * We clear the page uptodate flag for the duration of the function to ensure | |
893 | * exclusion for the $MFT/$DATA case against someone mapping an mft record we | |
894 | * are about to apply the mst fixups to. | |
895 | * | |
896 | * Return 0 on success and -errno on error. | |
897 | * | |
898 | * Based on ntfs_write_block(), ntfs_mft_writepage(), and | |
899 | * write_mft_record_nolock(). | |
900 | */ | |
901 | static int ntfs_write_mst_block(struct page *page, | |
902 | struct writeback_control *wbc) | |
903 | { | |
904 | sector_t block, dblock, rec_block; | |
905 | struct inode *vi = page->mapping->host; | |
906 | ntfs_inode *ni = NTFS_I(vi); | |
907 | ntfs_volume *vol = ni->vol; | |
908 | u8 *kaddr; | |
1da177e4 | 909 | unsigned int rec_size = ni->itype.index.block_size; |
ac4ecf96 | 910 | ntfs_inode *locked_nis[PAGE_SIZE / NTFS_BLOCK_SIZE]; |
1da177e4 | 911 | struct buffer_head *bh, *head, *tbh, *rec_start_bh; |
d53ee322 | 912 | struct buffer_head *bhs[MAX_BUF_PER_PAGE]; |
1da177e4 | 913 | runlist_element *rl; |
d53ee322 AA |
914 | int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2; |
915 | unsigned bh_size, rec_size_bits; | |
c49c3111 | 916 | bool sync, is_mft, page_is_dirty, rec_is_dirty; |
d53ee322 | 917 | unsigned char bh_size_bits; |
1da177e4 | 918 | |
ac4ecf96 KC |
919 | if (WARN_ON(rec_size < NTFS_BLOCK_SIZE)) |
920 | return -EINVAL; | |
921 | ||
1da177e4 LT |
922 | ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index " |
923 | "0x%lx.", vi->i_ino, ni->type, page->index); | |
924 | BUG_ON(!NInoNonResident(ni)); | |
925 | BUG_ON(!NInoMstProtected(ni)); | |
926 | is_mft = (S_ISREG(vi->i_mode) && !vi->i_ino); | |
927 | /* | |
928 | * NOTE: ntfs_write_mst_block() would be called for $MFTMirr if a page | |
929 | * in its page cache were to be marked dirty. However this should | |
930 | * never happen with the current driver and considering we do not | |
931 | * handle this case here we do want to BUG(), at least for now. | |
932 | */ | |
933 | BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) || | |
934 | (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION))); | |
78af34f0 AA |
935 | bh_size = vol->sb->s_blocksize; |
936 | bh_size_bits = vol->sb->s_blocksize_bits; | |
09cbfeaf | 937 | max_bhs = PAGE_SIZE / bh_size; |
1da177e4 | 938 | BUG_ON(!max_bhs); |
d53ee322 | 939 | BUG_ON(max_bhs > MAX_BUF_PER_PAGE); |
1da177e4 LT |
940 | |
941 | /* Were we called for sync purposes? */ | |
942 | sync = (wbc->sync_mode == WB_SYNC_ALL); | |
943 | ||
944 | /* Make sure we have mapped buffers. */ | |
1da177e4 LT |
945 | bh = head = page_buffers(page); |
946 | BUG_ON(!bh); | |
947 | ||
948 | rec_size_bits = ni->itype.index.block_size_bits; | |
09cbfeaf | 949 | BUG_ON(!(PAGE_SIZE >> rec_size_bits)); |
1da177e4 LT |
950 | bhs_per_rec = rec_size >> bh_size_bits; |
951 | BUG_ON(!bhs_per_rec); | |
952 | ||
953 | /* The first block in the page. */ | |
954 | rec_block = block = (sector_t)page->index << | |
09cbfeaf | 955 | (PAGE_SHIFT - bh_size_bits); |
1da177e4 LT |
956 | |
957 | /* The first out of bounds block for the data size. */ | |
07a4e2da | 958 | dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits; |
1da177e4 LT |
959 | |
960 | rl = NULL; | |
961 | err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0; | |
c49c3111 | 962 | page_is_dirty = rec_is_dirty = false; |
1da177e4 LT |
963 | rec_start_bh = NULL; |
964 | do { | |
c49c3111 | 965 | bool is_retry = false; |
1da177e4 LT |
966 | |
967 | if (likely(block < rec_block)) { | |
968 | if (unlikely(block >= dblock)) { | |
969 | clear_buffer_dirty(bh); | |
946929d8 | 970 | set_buffer_uptodate(bh); |
1da177e4 LT |
971 | continue; |
972 | } | |
973 | /* | |
974 | * This block is not the first one in the record. We | |
975 | * ignore the buffer's dirty state because we could | |
976 | * have raced with a parallel mark_ntfs_record_dirty(). | |
977 | */ | |
978 | if (!rec_is_dirty) | |
979 | continue; | |
980 | if (unlikely(err2)) { | |
981 | if (err2 != -ENOMEM) | |
982 | clear_buffer_dirty(bh); | |
983 | continue; | |
984 | } | |
985 | } else /* if (block == rec_block) */ { | |
986 | BUG_ON(block > rec_block); | |
987 | /* This block is the first one in the record. */ | |
988 | rec_block += bhs_per_rec; | |
989 | err2 = 0; | |
990 | if (unlikely(block >= dblock)) { | |
991 | clear_buffer_dirty(bh); | |
992 | continue; | |
993 | } | |
994 | if (!buffer_dirty(bh)) { | |
995 | /* Clean records are not written out. */ | |
c49c3111 | 996 | rec_is_dirty = false; |
1da177e4 LT |
997 | continue; |
998 | } | |
c49c3111 | 999 | rec_is_dirty = true; |
1da177e4 LT |
1000 | rec_start_bh = bh; |
1001 | } | |
1002 | /* Need to map the buffer if it is not mapped already. */ | |
1003 | if (unlikely(!buffer_mapped(bh))) { | |
1004 | VCN vcn; | |
1005 | LCN lcn; | |
1006 | unsigned int vcn_ofs; | |
1007 | ||
481d0374 | 1008 | bh->b_bdev = vol->sb->s_bdev; |
1da177e4 LT |
1009 | /* Obtain the vcn and offset of the current block. */ |
1010 | vcn = (VCN)block << bh_size_bits; | |
1011 | vcn_ofs = vcn & vol->cluster_size_mask; | |
1012 | vcn >>= vol->cluster_size_bits; | |
1013 | if (!rl) { | |
1014 | lock_retry_remap: | |
1015 | down_read(&ni->runlist.lock); | |
1016 | rl = ni->runlist.rl; | |
1017 | } | |
1018 | if (likely(rl != NULL)) { | |
1019 | /* Seek to element containing target vcn. */ | |
1020 | while (rl->length && rl[1].vcn <= vcn) | |
1021 | rl++; | |
1022 | lcn = ntfs_rl_vcn_to_lcn(rl, vcn); | |
1023 | } else | |
1024 | lcn = LCN_RL_NOT_MAPPED; | |
1025 | /* Successful remap. */ | |
1026 | if (likely(lcn >= 0)) { | |
1027 | /* Setup buffer head to correct block. */ | |
1028 | bh->b_blocknr = ((lcn << | |
1029 | vol->cluster_size_bits) + | |
1030 | vcn_ofs) >> bh_size_bits; | |
1031 | set_buffer_mapped(bh); | |
1032 | } else { | |
1033 | /* | |
1034 | * Remap failed. Retry to map the runlist once | |
1035 | * unless we are working on $MFT which always | |
1036 | * has the whole of its runlist in memory. | |
1037 | */ | |
1038 | if (!is_mft && !is_retry && | |
1039 | lcn == LCN_RL_NOT_MAPPED) { | |
c49c3111 | 1040 | is_retry = true; |
1da177e4 LT |
1041 | /* |
1042 | * Attempt to map runlist, dropping | |
1043 | * lock for the duration. | |
1044 | */ | |
1045 | up_read(&ni->runlist.lock); | |
1046 | err2 = ntfs_map_runlist(ni, vcn); | |
1047 | if (likely(!err2)) | |
1048 | goto lock_retry_remap; | |
1049 | if (err2 == -ENOMEM) | |
c49c3111 | 1050 | page_is_dirty = true; |
1da177e4 | 1051 | lcn = err2; |
9f993fe4 | 1052 | } else { |
1da177e4 | 1053 | err2 = -EIO; |
9f993fe4 AA |
1054 | if (!rl) |
1055 | up_read(&ni->runlist.lock); | |
1056 | } | |
1da177e4 LT |
1057 | /* Hard error. Abort writing this record. */ |
1058 | if (!err || err == -ENOMEM) | |
1059 | err = err2; | |
1060 | bh->b_blocknr = -1; | |
1061 | ntfs_error(vol->sb, "Cannot write ntfs record " | |
1062 | "0x%llx (inode 0x%lx, " | |
1063 | "attribute type 0x%x) because " | |
1064 | "its location on disk could " | |
1065 | "not be determined (error " | |
8907547d RD |
1066 | "code %lli).", |
1067 | (long long)block << | |
1da177e4 LT |
1068 | bh_size_bits >> |
1069 | vol->mft_record_size_bits, | |
1070 | ni->mft_no, ni->type, | |
1071 | (long long)lcn); | |
1072 | /* | |
1073 | * If this is not the first buffer, remove the | |
1074 | * buffers in this record from the list of | |
1075 | * buffers to write and clear their dirty bit | |
1076 | * if not error -ENOMEM. | |
1077 | */ | |
1078 | if (rec_start_bh != bh) { | |
1079 | while (bhs[--nr_bhs] != rec_start_bh) | |
1080 | ; | |
1081 | if (err2 != -ENOMEM) { | |
1082 | do { | |
1083 | clear_buffer_dirty( | |
1084 | rec_start_bh); | |
1085 | } while ((rec_start_bh = | |
1086 | rec_start_bh-> | |
1087 | b_this_page) != | |
1088 | bh); | |
1089 | } | |
1090 | } | |
1091 | continue; | |
1092 | } | |
1093 | } | |
1094 | BUG_ON(!buffer_uptodate(bh)); | |
1095 | BUG_ON(nr_bhs >= max_bhs); | |
1096 | bhs[nr_bhs++] = bh; | |
1097 | } while (block++, (bh = bh->b_this_page) != head); | |
1098 | if (unlikely(rl)) | |
1099 | up_read(&ni->runlist.lock); | |
1100 | /* If there were no dirty buffers, we are done. */ | |
1101 | if (!nr_bhs) | |
1102 | goto done; | |
1103 | /* Map the page so we can access its contents. */ | |
1104 | kaddr = kmap(page); | |
1105 | /* Clear the page uptodate flag whilst the mst fixups are applied. */ | |
1106 | BUG_ON(!PageUptodate(page)); | |
1107 | ClearPageUptodate(page); | |
1108 | for (i = 0; i < nr_bhs; i++) { | |
1109 | unsigned int ofs; | |
1110 | ||
1111 | /* Skip buffers which are not at the beginning of records. */ | |
1112 | if (i % bhs_per_rec) | |
1113 | continue; | |
1114 | tbh = bhs[i]; | |
1115 | ofs = bh_offset(tbh); | |
1116 | if (is_mft) { | |
1117 | ntfs_inode *tni; | |
1118 | unsigned long mft_no; | |
1119 | ||
1120 | /* Get the mft record number. */ | |
09cbfeaf | 1121 | mft_no = (((s64)page->index << PAGE_SHIFT) + ofs) |
1da177e4 LT |
1122 | >> rec_size_bits; |
1123 | /* Check whether to write this mft record. */ | |
1124 | tni = NULL; | |
1125 | if (!ntfs_may_write_mft_record(vol, mft_no, | |
1126 | (MFT_RECORD*)(kaddr + ofs), &tni)) { | |
1127 | /* | |
1128 | * The record should not be written. This | |
1129 | * means we need to redirty the page before | |
1130 | * returning. | |
1131 | */ | |
c49c3111 | 1132 | page_is_dirty = true; |
1da177e4 LT |
1133 | /* |
1134 | * Remove the buffers in this mft record from | |
1135 | * the list of buffers to write. | |
1136 | */ | |
1137 | do { | |
1138 | bhs[i] = NULL; | |
1139 | } while (++i % bhs_per_rec); | |
1140 | continue; | |
1141 | } | |
1142 | /* | |
1143 | * The record should be written. If a locked ntfs | |
1144 | * inode was returned, add it to the array of locked | |
1145 | * ntfs inodes. | |
1146 | */ | |
1147 | if (tni) | |
1148 | locked_nis[nr_locked_nis++] = tni; | |
1149 | } | |
1150 | /* Apply the mst protection fixups. */ | |
1151 | err2 = pre_write_mst_fixup((NTFS_RECORD*)(kaddr + ofs), | |
1152 | rec_size); | |
1153 | if (unlikely(err2)) { | |
1154 | if (!err || err == -ENOMEM) | |
1155 | err = -EIO; | |
1156 | ntfs_error(vol->sb, "Failed to apply mst fixups " | |
1157 | "(inode 0x%lx, attribute type 0x%x, " | |
1158 | "page index 0x%lx, page offset 0x%x)!" | |
1159 | " Unmount and run chkdsk.", vi->i_ino, | |
1160 | ni->type, page->index, ofs); | |
1161 | /* | |
1162 | * Mark all the buffers in this record clean as we do | |
1163 | * not want to write corrupt data to disk. | |
1164 | */ | |
1165 | do { | |
1166 | clear_buffer_dirty(bhs[i]); | |
1167 | bhs[i] = NULL; | |
1168 | } while (++i % bhs_per_rec); | |
1169 | continue; | |
1170 | } | |
1171 | nr_recs++; | |
1172 | } | |
1173 | /* If no records are to be written out, we are done. */ | |
1174 | if (!nr_recs) | |
1175 | goto unm_done; | |
1176 | flush_dcache_page(page); | |
1177 | /* Lock buffers and start synchronous write i/o on them. */ | |
1178 | for (i = 0; i < nr_bhs; i++) { | |
1179 | tbh = bhs[i]; | |
1180 | if (!tbh) | |
1181 | continue; | |
ca5de404 | 1182 | if (!trylock_buffer(tbh)) |
1da177e4 LT |
1183 | BUG(); |
1184 | /* The buffer dirty state is now irrelevant, just clean it. */ | |
1185 | clear_buffer_dirty(tbh); | |
1186 | BUG_ON(!buffer_uptodate(tbh)); | |
1187 | BUG_ON(!buffer_mapped(tbh)); | |
1188 | get_bh(tbh); | |
1189 | tbh->b_end_io = end_buffer_write_sync; | |
1420c4a5 | 1190 | submit_bh(REQ_OP_WRITE, tbh); |
1da177e4 LT |
1191 | } |
1192 | /* Synchronize the mft mirror now if not @sync. */ | |
1193 | if (is_mft && !sync) | |
1194 | goto do_mirror; | |
1195 | do_wait: | |
1196 | /* Wait on i/o completion of buffers. */ | |
1197 | for (i = 0; i < nr_bhs; i++) { | |
1198 | tbh = bhs[i]; | |
1199 | if (!tbh) | |
1200 | continue; | |
1201 | wait_on_buffer(tbh); | |
1202 | if (unlikely(!buffer_uptodate(tbh))) { | |
1203 | ntfs_error(vol->sb, "I/O error while writing ntfs " | |
1204 | "record buffer (inode 0x%lx, " | |
1205 | "attribute type 0x%x, page index " | |
1206 | "0x%lx, page offset 0x%lx)! Unmount " | |
1207 | "and run chkdsk.", vi->i_ino, ni->type, | |
1208 | page->index, bh_offset(tbh)); | |
1209 | if (!err || err == -ENOMEM) | |
1210 | err = -EIO; | |
1211 | /* | |
1212 | * Set the buffer uptodate so the page and buffer | |
1213 | * states do not become out of sync. | |
1214 | */ | |
1215 | set_buffer_uptodate(tbh); | |
1216 | } | |
1217 | } | |
1218 | /* If @sync, now synchronize the mft mirror. */ | |
1219 | if (is_mft && sync) { | |
1220 | do_mirror: | |
1221 | for (i = 0; i < nr_bhs; i++) { | |
1222 | unsigned long mft_no; | |
1223 | unsigned int ofs; | |
1224 | ||
1225 | /* | |
1226 | * Skip buffers which are not at the beginning of | |
1227 | * records. | |
1228 | */ | |
1229 | if (i % bhs_per_rec) | |
1230 | continue; | |
1231 | tbh = bhs[i]; | |
1232 | /* Skip removed buffers (and hence records). */ | |
1233 | if (!tbh) | |
1234 | continue; | |
1235 | ofs = bh_offset(tbh); | |
1236 | /* Get the mft record number. */ | |
09cbfeaf | 1237 | mft_no = (((s64)page->index << PAGE_SHIFT) + ofs) |
1da177e4 LT |
1238 | >> rec_size_bits; |
1239 | if (mft_no < vol->mftmirr_size) | |
1240 | ntfs_sync_mft_mirror(vol, mft_no, | |
1241 | (MFT_RECORD*)(kaddr + ofs), | |
1242 | sync); | |
1243 | } | |
1244 | if (!sync) | |
1245 | goto do_wait; | |
1246 | } | |
1247 | /* Remove the mst protection fixups again. */ | |
1248 | for (i = 0; i < nr_bhs; i++) { | |
1249 | if (!(i % bhs_per_rec)) { | |
1250 | tbh = bhs[i]; | |
1251 | if (!tbh) | |
1252 | continue; | |
1253 | post_write_mst_fixup((NTFS_RECORD*)(kaddr + | |
1254 | bh_offset(tbh))); | |
1255 | } | |
1256 | } | |
1257 | flush_dcache_page(page); | |
1258 | unm_done: | |
1259 | /* Unlock any locked inodes. */ | |
1260 | while (nr_locked_nis-- > 0) { | |
1261 | ntfs_inode *tni, *base_tni; | |
1262 | ||
1263 | tni = locked_nis[nr_locked_nis]; | |
1264 | /* Get the base inode. */ | |
4e5e529a | 1265 | mutex_lock(&tni->extent_lock); |
1da177e4 LT |
1266 | if (tni->nr_extents >= 0) |
1267 | base_tni = tni; | |
1268 | else { | |
1269 | base_tni = tni->ext.base_ntfs_ino; | |
1270 | BUG_ON(!base_tni); | |
1271 | } | |
4e5e529a | 1272 | mutex_unlock(&tni->extent_lock); |
1da177e4 LT |
1273 | ntfs_debug("Unlocking %s inode 0x%lx.", |
1274 | tni == base_tni ? "base" : "extent", | |
1275 | tni->mft_no); | |
4e5e529a | 1276 | mutex_unlock(&tni->mrec_lock); |
1da177e4 LT |
1277 | atomic_dec(&tni->count); |
1278 | iput(VFS_I(base_tni)); | |
1279 | } | |
1280 | SetPageUptodate(page); | |
1281 | kunmap(page); | |
1282 | done: | |
1283 | if (unlikely(err && err != -ENOMEM)) { | |
1284 | /* | |
1285 | * Set page error if there is only one ntfs record in the page. | |
1286 | * Otherwise we would loose per-record granularity. | |
1287 | */ | |
09cbfeaf | 1288 | if (ni->itype.index.block_size == PAGE_SIZE) |
1da177e4 LT |
1289 | SetPageError(page); |
1290 | NVolSetErrors(vol); | |
1291 | } | |
1292 | if (page_is_dirty) { | |
1293 | ntfs_debug("Page still contains one or more dirty ntfs " | |
1294 | "records. Redirtying the page starting at " | |
1295 | "record 0x%lx.", page->index << | |
09cbfeaf | 1296 | (PAGE_SHIFT - rec_size_bits)); |
1da177e4 LT |
1297 | redirty_page_for_writepage(wbc, page); |
1298 | unlock_page(page); | |
1299 | } else { | |
1300 | /* | |
1301 | * Keep the VM happy. This must be done otherwise the | |
1302 | * radix-tree tag PAGECACHE_TAG_DIRTY remains set even though | |
1303 | * the page is clean. | |
1304 | */ | |
1305 | BUG_ON(PageWriteback(page)); | |
1306 | set_page_writeback(page); | |
1307 | unlock_page(page); | |
1308 | end_page_writeback(page); | |
1309 | } | |
1310 | if (likely(!err)) | |
1311 | ntfs_debug("Done."); | |
1312 | return err; | |
1313 | } | |
1314 | ||
1315 | /** | |
1316 | * ntfs_writepage - write a @page to the backing store | |
1317 | * @page: page cache page to write out | |
1318 | * @wbc: writeback control structure | |
1319 | * | |
1320 | * This is called from the VM when it wants to have a dirty ntfs page cache | |
1321 | * page cleaned. The VM has already locked the page and marked it clean. | |
1322 | * | |
1323 | * For non-resident attributes, ntfs_writepage() writes the @page by calling | |
1324 | * the ntfs version of the generic block_write_full_page() function, | |
1325 | * ntfs_write_block(), which in turn if necessary creates and writes the | |
1326 | * buffers associated with the page asynchronously. | |
1327 | * | |
1328 | * For resident attributes, OTOH, ntfs_writepage() writes the @page by copying | |
1329 | * the data to the mft record (which at this stage is most likely in memory). | |
1330 | * The mft record is then marked dirty and written out asynchronously via the | |
1331 | * vfs inode dirty code path for the inode the mft record belongs to or via the | |
1332 | * vm page dirty code path for the page the mft record is in. | |
1333 | * | |
933906f8 | 1334 | * Based on ntfs_read_folio() and fs/buffer.c::block_write_full_page(). |
1da177e4 LT |
1335 | * |
1336 | * Return 0 on success and -errno on error. | |
1337 | */ | |
1338 | static int ntfs_writepage(struct page *page, struct writeback_control *wbc) | |
1339 | { | |
1340 | loff_t i_size; | |
149f0c52 AA |
1341 | struct inode *vi = page->mapping->host; |
1342 | ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi); | |
bfab36e8 | 1343 | char *addr; |
149f0c52 AA |
1344 | ntfs_attr_search_ctx *ctx = NULL; |
1345 | MFT_RECORD *m = NULL; | |
1da177e4 LT |
1346 | u32 attr_len; |
1347 | int err; | |
1348 | ||
905685f6 | 1349 | retry_writepage: |
1da177e4 | 1350 | BUG_ON(!PageLocked(page)); |
1da177e4 | 1351 | i_size = i_size_read(vi); |
1da177e4 | 1352 | /* Is the page fully outside i_size? (truncate in progress) */ |
09cbfeaf KS |
1353 | if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >> |
1354 | PAGE_SHIFT)) { | |
7ba13abb | 1355 | struct folio *folio = page_folio(page); |
1da177e4 LT |
1356 | /* |
1357 | * The page may have dirty, unmapped buffers. Make them | |
1358 | * freeable here, so the page does not leak. | |
1359 | */ | |
7ba13abb MWO |
1360 | block_invalidate_folio(folio, 0, folio_size(folio)); |
1361 | folio_unlock(folio); | |
1da177e4 LT |
1362 | ntfs_debug("Write outside i_size - truncated?"); |
1363 | return 0; | |
1364 | } | |
bd45fdd2 AA |
1365 | /* |
1366 | * Only $DATA attributes can be encrypted and only unnamed $DATA | |
1367 | * attributes can be compressed. Index root can have the flags set but | |
1368 | * this means to create compressed/encrypted files, not that the | |
4e64c886 AA |
1369 | * attribute is compressed/encrypted. Note we need to check for |
1370 | * AT_INDEX_ALLOCATION since this is the type of both directory and | |
1371 | * index inodes. | |
bd45fdd2 | 1372 | */ |
4e64c886 | 1373 | if (ni->type != AT_INDEX_ALLOCATION) { |
bd45fdd2 AA |
1374 | /* If file is encrypted, deny access, just like NT4. */ |
1375 | if (NInoEncrypted(ni)) { | |
1376 | unlock_page(page); | |
1377 | BUG_ON(ni->type != AT_DATA); | |
7d0ffdb2 | 1378 | ntfs_debug("Denying write access to encrypted file."); |
bd45fdd2 AA |
1379 | return -EACCES; |
1380 | } | |
1381 | /* Compressed data streams are handled in compress.c. */ | |
1382 | if (NInoNonResident(ni) && NInoCompressed(ni)) { | |
1383 | BUG_ON(ni->type != AT_DATA); | |
1384 | BUG_ON(ni->name_len); | |
1385 | // TODO: Implement and replace this with | |
1386 | // return ntfs_write_compressed_block(page); | |
1387 | unlock_page(page); | |
1388 | ntfs_error(vi->i_sb, "Writing to compressed files is " | |
1389 | "not supported yet. Sorry."); | |
1390 | return -EOPNOTSUPP; | |
1391 | } | |
1392 | // TODO: Implement and remove this check. | |
1393 | if (NInoNonResident(ni) && NInoSparse(ni)) { | |
1394 | unlock_page(page); | |
1395 | ntfs_error(vi->i_sb, "Writing to sparse files is not " | |
1396 | "supported yet. Sorry."); | |
1397 | return -EOPNOTSUPP; | |
1398 | } | |
1399 | } | |
1da177e4 LT |
1400 | /* NInoNonResident() == NInoIndexAllocPresent() */ |
1401 | if (NInoNonResident(ni)) { | |
1da177e4 | 1402 | /* We have to zero every time due to mmap-at-end-of-file. */ |
09cbfeaf | 1403 | if (page->index >= (i_size >> PAGE_SHIFT)) { |
1da177e4 | 1404 | /* The page straddles i_size. */ |
09cbfeaf KS |
1405 | unsigned int ofs = i_size & ~PAGE_MASK; |
1406 | zero_user_segment(page, ofs, PAGE_SIZE); | |
1da177e4 LT |
1407 | } |
1408 | /* Handle mst protected attributes. */ | |
1409 | if (NInoMstProtected(ni)) | |
1410 | return ntfs_write_mst_block(page, wbc); | |
bd45fdd2 | 1411 | /* Normal, non-resident data stream. */ |
1da177e4 LT |
1412 | return ntfs_write_block(page, wbc); |
1413 | } | |
1414 | /* | |
bd45fdd2 AA |
1415 | * Attribute is resident, implying it is not compressed, encrypted, or |
1416 | * mst protected. This also means the attribute is smaller than an mft | |
1417 | * record and hence smaller than a page, so can simply return error on | |
1418 | * any pages with index above 0. Note the attribute can actually be | |
1419 | * marked compressed but if it is resident the actual data is not | |
1420 | * compressed so we are ok to ignore the compressed flag here. | |
1da177e4 LT |
1421 | */ |
1422 | BUG_ON(page_has_buffers(page)); | |
1423 | BUG_ON(!PageUptodate(page)); | |
1424 | if (unlikely(page->index > 0)) { | |
1425 | ntfs_error(vi->i_sb, "BUG()! page->index (0x%lx) > 0. " | |
1426 | "Aborting write.", page->index); | |
1427 | BUG_ON(PageWriteback(page)); | |
1428 | set_page_writeback(page); | |
1429 | unlock_page(page); | |
1430 | end_page_writeback(page); | |
1431 | return -EIO; | |
1432 | } | |
1433 | if (!NInoAttr(ni)) | |
1434 | base_ni = ni; | |
1435 | else | |
1436 | base_ni = ni->ext.base_ntfs_ino; | |
1437 | /* Map, pin, and lock the mft record. */ | |
1438 | m = map_mft_record(base_ni); | |
1439 | if (IS_ERR(m)) { | |
1440 | err = PTR_ERR(m); | |
1441 | m = NULL; | |
1442 | ctx = NULL; | |
1443 | goto err_out; | |
1444 | } | |
905685f6 AA |
1445 | /* |
1446 | * If a parallel write made the attribute non-resident, drop the mft | |
1447 | * record and retry the writepage. | |
1448 | */ | |
1449 | if (unlikely(NInoNonResident(ni))) { | |
1450 | unmap_mft_record(base_ni); | |
1451 | goto retry_writepage; | |
1452 | } | |
1da177e4 LT |
1453 | ctx = ntfs_attr_get_search_ctx(base_ni, m); |
1454 | if (unlikely(!ctx)) { | |
1455 | err = -ENOMEM; | |
1456 | goto err_out; | |
1457 | } | |
1458 | err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len, | |
1459 | CASE_SENSITIVE, 0, NULL, 0, ctx); | |
1460 | if (unlikely(err)) | |
1461 | goto err_out; | |
1462 | /* | |
1463 | * Keep the VM happy. This must be done otherwise the radix-tree tag | |
1464 | * PAGECACHE_TAG_DIRTY remains set even though the page is clean. | |
1465 | */ | |
1466 | BUG_ON(PageWriteback(page)); | |
1467 | set_page_writeback(page); | |
1468 | unlock_page(page); | |
1da177e4 | 1469 | attr_len = le32_to_cpu(ctx->attr->data.resident.value_length); |
07a4e2da | 1470 | i_size = i_size_read(vi); |
1da177e4 | 1471 | if (unlikely(attr_len > i_size)) { |
f6098cf4 | 1472 | /* Race with shrinking truncate or a failed truncate. */ |
1da177e4 | 1473 | attr_len = i_size; |
f6098cf4 AA |
1474 | /* |
1475 | * If the truncate failed, fix it up now. If a concurrent | |
1476 | * truncate, we do its job, so it does not have to do anything. | |
1477 | */ | |
1478 | err = ntfs_resident_attr_value_resize(ctx->mrec, ctx->attr, | |
1479 | attr_len); | |
1480 | /* Shrinking cannot fail. */ | |
1481 | BUG_ON(err); | |
1da177e4 | 1482 | } |
a3ac1414 | 1483 | addr = kmap_atomic(page); |
1da177e4 LT |
1484 | /* Copy the data from the page to the mft record. */ |
1485 | memcpy((u8*)ctx->attr + | |
1486 | le16_to_cpu(ctx->attr->data.resident.value_offset), | |
bfab36e8 | 1487 | addr, attr_len); |
1da177e4 | 1488 | /* Zero out of bounds area in the page cache page. */ |
09cbfeaf | 1489 | memset(addr + attr_len, 0, PAGE_SIZE - attr_len); |
a3ac1414 | 1490 | kunmap_atomic(addr); |
f6098cf4 | 1491 | flush_dcache_page(page); |
7d0ffdb2 | 1492 | flush_dcache_mft_record_page(ctx->ntfs_ino); |
f6098cf4 | 1493 | /* We are done with the page. */ |
1da177e4 | 1494 | end_page_writeback(page); |
f6098cf4 | 1495 | /* Finally, mark the mft record dirty, so it gets written back. */ |
1da177e4 LT |
1496 | mark_mft_record_dirty(ctx->ntfs_ino); |
1497 | ntfs_attr_put_search_ctx(ctx); | |
1498 | unmap_mft_record(base_ni); | |
1499 | return 0; | |
1500 | err_out: | |
1501 | if (err == -ENOMEM) { | |
1502 | ntfs_warning(vi->i_sb, "Error allocating memory. Redirtying " | |
1503 | "page so we try again later."); | |
1504 | /* | |
1505 | * Put the page back on mapping->dirty_pages, but leave its | |
1506 | * buffers' dirty state as-is. | |
1507 | */ | |
1508 | redirty_page_for_writepage(wbc, page); | |
1509 | err = 0; | |
1510 | } else { | |
1511 | ntfs_error(vi->i_sb, "Resident attribute write failed with " | |
149f0c52 | 1512 | "error %i.", err); |
1da177e4 | 1513 | SetPageError(page); |
149f0c52 | 1514 | NVolSetErrors(ni->vol); |
1da177e4 LT |
1515 | } |
1516 | unlock_page(page); | |
1517 | if (ctx) | |
1518 | ntfs_attr_put_search_ctx(ctx); | |
1519 | if (m) | |
1520 | unmap_mft_record(base_ni); | |
1521 | return err; | |
1522 | } | |
1523 | ||
1da177e4 LT |
1524 | #endif /* NTFS_RW */ |
1525 | ||
3f7fc6f2 AA |
1526 | /** |
1527 | * ntfs_bmap - map logical file block to physical device block | |
1528 | * @mapping: address space mapping to which the block to be mapped belongs | |
1529 | * @block: logical block to map to its physical device block | |
1530 | * | |
1531 | * For regular, non-resident files (i.e. not compressed and not encrypted), map | |
1532 | * the logical @block belonging to the file described by the address space | |
1533 | * mapping @mapping to its physical device block. | |
1534 | * | |
1535 | * The size of the block is equal to the @s_blocksize field of the super block | |
1536 | * of the mounted file system which is guaranteed to be smaller than or equal | |
1537 | * to the cluster size thus the block is guaranteed to fit entirely inside the | |
1538 | * cluster which means we do not need to care how many contiguous bytes are | |
1539 | * available after the beginning of the block. | |
1540 | * | |
1541 | * Return the physical device block if the mapping succeeded or 0 if the block | |
1542 | * is sparse or there was an error. | |
1543 | * | |
1544 | * Note: This is a problem if someone tries to run bmap() on $Boot system file | |
1545 | * as that really is in block zero but there is nothing we can do. bmap() is | |
1546 | * just broken in that respect (just like it cannot distinguish sparse from | |
1547 | * not available or error). | |
1548 | */ | |
1549 | static sector_t ntfs_bmap(struct address_space *mapping, sector_t block) | |
1550 | { | |
1551 | s64 ofs, size; | |
1552 | loff_t i_size; | |
1553 | LCN lcn; | |
1554 | unsigned long blocksize, flags; | |
1555 | ntfs_inode *ni = NTFS_I(mapping->host); | |
1556 | ntfs_volume *vol = ni->vol; | |
1557 | unsigned delta; | |
1558 | unsigned char blocksize_bits, cluster_size_shift; | |
1559 | ||
1560 | ntfs_debug("Entering for mft_no 0x%lx, logical block 0x%llx.", | |
1561 | ni->mft_no, (unsigned long long)block); | |
1562 | if (ni->type != AT_DATA || !NInoNonResident(ni) || NInoEncrypted(ni)) { | |
1563 | ntfs_error(vol->sb, "BMAP does not make sense for %s " | |
1564 | "attributes, returning 0.", | |
1565 | (ni->type != AT_DATA) ? "non-data" : | |
1566 | (!NInoNonResident(ni) ? "resident" : | |
1567 | "encrypted")); | |
1568 | return 0; | |
1569 | } | |
1570 | /* None of these can happen. */ | |
1571 | BUG_ON(NInoCompressed(ni)); | |
1572 | BUG_ON(NInoMstProtected(ni)); | |
1573 | blocksize = vol->sb->s_blocksize; | |
1574 | blocksize_bits = vol->sb->s_blocksize_bits; | |
1575 | ofs = (s64)block << blocksize_bits; | |
1576 | read_lock_irqsave(&ni->size_lock, flags); | |
1577 | size = ni->initialized_size; | |
1578 | i_size = i_size_read(VFS_I(ni)); | |
1579 | read_unlock_irqrestore(&ni->size_lock, flags); | |
1580 | /* | |
1581 | * If the offset is outside the initialized size or the block straddles | |
1582 | * the initialized size then pretend it is a hole unless the | |
1583 | * initialized size equals the file size. | |
1584 | */ | |
1585 | if (unlikely(ofs >= size || (ofs + blocksize > size && size < i_size))) | |
1586 | goto hole; | |
1587 | cluster_size_shift = vol->cluster_size_bits; | |
1588 | down_read(&ni->runlist.lock); | |
1589 | lcn = ntfs_attr_vcn_to_lcn_nolock(ni, ofs >> cluster_size_shift, false); | |
1590 | up_read(&ni->runlist.lock); | |
1591 | if (unlikely(lcn < LCN_HOLE)) { | |
1592 | /* | |
1593 | * Step down to an integer to avoid gcc doing a long long | |
1594 | * comparision in the switch when we know @lcn is between | |
1595 | * LCN_HOLE and LCN_EIO (i.e. -1 to -5). | |
1596 | * | |
1597 | * Otherwise older gcc (at least on some architectures) will | |
1598 | * try to use __cmpdi2() which is of course not available in | |
1599 | * the kernel. | |
1600 | */ | |
1601 | switch ((int)lcn) { | |
1602 | case LCN_ENOENT: | |
1603 | /* | |
1604 | * If the offset is out of bounds then pretend it is a | |
1605 | * hole. | |
1606 | */ | |
1607 | goto hole; | |
1608 | case LCN_ENOMEM: | |
1609 | ntfs_error(vol->sb, "Not enough memory to complete " | |
1610 | "mapping for inode 0x%lx. " | |
1611 | "Returning 0.", ni->mft_no); | |
1612 | break; | |
1613 | default: | |
1614 | ntfs_error(vol->sb, "Failed to complete mapping for " | |
1615 | "inode 0x%lx. Run chkdsk. " | |
1616 | "Returning 0.", ni->mft_no); | |
1617 | break; | |
1618 | } | |
1619 | return 0; | |
1620 | } | |
1621 | if (lcn < 0) { | |
1622 | /* It is a hole. */ | |
1623 | hole: | |
1624 | ntfs_debug("Done (returning hole)."); | |
1625 | return 0; | |
1626 | } | |
1627 | /* | |
1628 | * The block is really allocated and fullfils all our criteria. | |
1629 | * Convert the cluster to units of block size and return the result. | |
1630 | */ | |
1631 | delta = ofs & vol->cluster_size_mask; | |
1632 | if (unlikely(sizeof(block) < sizeof(lcn))) { | |
1633 | block = lcn = ((lcn << cluster_size_shift) + delta) >> | |
1634 | blocksize_bits; | |
1635 | /* If the block number was truncated return 0. */ | |
1636 | if (unlikely(block != lcn)) { | |
1637 | ntfs_error(vol->sb, "Physical block 0x%llx is too " | |
1638 | "large to be returned, returning 0.", | |
1639 | (long long)lcn); | |
1640 | return 0; | |
1641 | } | |
1642 | } else | |
1643 | block = ((lcn << cluster_size_shift) + delta) >> | |
1644 | blocksize_bits; | |
1645 | ntfs_debug("Done (returning block 0x%llx).", (unsigned long long)lcn); | |
1646 | return block; | |
1647 | } | |
1648 | ||
aa0b42b7 | 1649 | /* |
ce1bafa0 AA |
1650 | * ntfs_normal_aops - address space operations for normal inodes and attributes |
1651 | * | |
1652 | * Note these are not used for compressed or mst protected inodes and | |
1653 | * attributes. | |
1da177e4 | 1654 | */ |
ce1bafa0 | 1655 | const struct address_space_operations ntfs_normal_aops = { |
933906f8 | 1656 | .read_folio = ntfs_read_folio, |
1da177e4 | 1657 | #ifdef NTFS_RW |
ce1bafa0 | 1658 | .writepage = ntfs_writepage, |
e621900a | 1659 | .dirty_folio = block_dirty_folio, |
ce1bafa0 | 1660 | #endif /* NTFS_RW */ |
3f7fc6f2 | 1661 | .bmap = ntfs_bmap, |
67235182 | 1662 | .migrate_folio = buffer_migrate_folio, |
ce1bafa0 AA |
1663 | .is_partially_uptodate = block_is_partially_uptodate, |
1664 | .error_remove_page = generic_error_remove_page, | |
1665 | }; | |
1666 | ||
aa0b42b7 | 1667 | /* |
ce1bafa0 AA |
1668 | * ntfs_compressed_aops - address space operations for compressed inodes |
1669 | */ | |
1670 | const struct address_space_operations ntfs_compressed_aops = { | |
933906f8 | 1671 | .read_folio = ntfs_read_folio, |
ce1bafa0 AA |
1672 | #ifdef NTFS_RW |
1673 | .writepage = ntfs_writepage, | |
e621900a | 1674 | .dirty_folio = block_dirty_folio, |
1da177e4 | 1675 | #endif /* NTFS_RW */ |
67235182 | 1676 | .migrate_folio = buffer_migrate_folio, |
ce1bafa0 | 1677 | .is_partially_uptodate = block_is_partially_uptodate, |
aa261f54 | 1678 | .error_remove_page = generic_error_remove_page, |
1da177e4 LT |
1679 | }; |
1680 | ||
aa0b42b7 | 1681 | /* |
1da177e4 | 1682 | * ntfs_mst_aops - general address space operations for mst protecteed inodes |
aa0b42b7 | 1683 | * and attributes |
1da177e4 | 1684 | */ |
f5e54d6e | 1685 | const struct address_space_operations ntfs_mst_aops = { |
933906f8 | 1686 | .read_folio = ntfs_read_folio, /* Fill page with data. */ |
1da177e4 LT |
1687 | #ifdef NTFS_RW |
1688 | .writepage = ntfs_writepage, /* Write dirty page to disk. */ | |
187c82cb | 1689 | .dirty_folio = filemap_dirty_folio, |
1da177e4 | 1690 | #endif /* NTFS_RW */ |
67235182 | 1691 | .migrate_folio = buffer_migrate_folio, |
ce1bafa0 | 1692 | .is_partially_uptodate = block_is_partially_uptodate, |
aa261f54 | 1693 | .error_remove_page = generic_error_remove_page, |
1da177e4 LT |
1694 | }; |
1695 | ||
1696 | #ifdef NTFS_RW | |
1697 | ||
1698 | /** | |
1699 | * mark_ntfs_record_dirty - mark an ntfs record dirty | |
1700 | * @page: page containing the ntfs record to mark dirty | |
1701 | * @ofs: byte offset within @page at which the ntfs record begins | |
1702 | * | |
1703 | * Set the buffers and the page in which the ntfs record is located dirty. | |
1704 | * | |
1705 | * The latter also marks the vfs inode the ntfs record belongs to dirty | |
1706 | * (I_DIRTY_PAGES only). | |
1707 | * | |
1708 | * If the page does not have buffers, we create them and set them uptodate. | |
1709 | * The page may not be locked which is why we need to handle the buffers under | |
1710 | * the mapping->private_lock. Once the buffers are marked dirty we no longer | |
1711 | * need the lock since try_to_free_buffers() does not free dirty buffers. | |
1712 | */ | |
1713 | void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) { | |
1714 | struct address_space *mapping = page->mapping; | |
1715 | ntfs_inode *ni = NTFS_I(mapping->host); | |
1716 | struct buffer_head *bh, *head, *buffers_to_free = NULL; | |
1717 | unsigned int end, bh_size, bh_ofs; | |
1718 | ||
1719 | BUG_ON(!PageUptodate(page)); | |
1720 | end = ofs + ni->itype.index.block_size; | |
78af34f0 | 1721 | bh_size = VFS_I(ni)->i_sb->s_blocksize; |
1da177e4 LT |
1722 | spin_lock(&mapping->private_lock); |
1723 | if (unlikely(!page_has_buffers(page))) { | |
1724 | spin_unlock(&mapping->private_lock); | |
640ab98f | 1725 | bh = head = alloc_page_buffers(page, bh_size, true); |
1da177e4 LT |
1726 | spin_lock(&mapping->private_lock); |
1727 | if (likely(!page_has_buffers(page))) { | |
1728 | struct buffer_head *tail; | |
1729 | ||
1730 | do { | |
1731 | set_buffer_uptodate(bh); | |
1732 | tail = bh; | |
1733 | bh = bh->b_this_page; | |
1734 | } while (bh); | |
1735 | tail->b_this_page = head; | |
14ed109e | 1736 | attach_page_private(page, head); |
1da177e4 LT |
1737 | } else |
1738 | buffers_to_free = bh; | |
1739 | } | |
1740 | bh = head = page_buffers(page); | |
a01ac532 | 1741 | BUG_ON(!bh); |
1da177e4 LT |
1742 | do { |
1743 | bh_ofs = bh_offset(bh); | |
1744 | if (bh_ofs + bh_size <= ofs) | |
1745 | continue; | |
1746 | if (unlikely(bh_ofs >= end)) | |
1747 | break; | |
1748 | set_buffer_dirty(bh); | |
1749 | } while ((bh = bh->b_this_page) != head); | |
1750 | spin_unlock(&mapping->private_lock); | |
c3773130 | 1751 | filemap_dirty_folio(mapping, page_folio(page)); |
1da177e4 LT |
1752 | if (unlikely(buffers_to_free)) { |
1753 | do { | |
1754 | bh = buffers_to_free->b_this_page; | |
1755 | free_buffer_head(buffers_to_free); | |
1756 | buffers_to_free = bh; | |
1757 | } while (buffers_to_free); | |
1758 | } | |
1759 | } | |
1760 | ||
1761 | #endif /* NTFS_RW */ |