Merge tag 'sched-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / fs / ntfs3 / attrib.c
CommitLineData
be71b5cb
KK
1// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
e8b8e97f 6 * TODO: Merge attr_set_size/attr_data_get_block/attr_allocate_frame?
be71b5cb
KK
7 */
8
be71b5cb 9#include <linux/fs.h>
be71b5cb 10#include <linux/slab.h>
6e3331ee 11#include <linux/kernel.h>
be71b5cb
KK
12
13#include "debug.h"
14#include "ntfs.h"
15#include "ntfs_fs.h"
16
17/*
18 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
e8b8e97f 19 * preallocate algorithm.
be71b5cb
KK
20 */
21#ifndef NTFS_MIN_LOG2_OF_CLUMP
22#define NTFS_MIN_LOG2_OF_CLUMP 16
23#endif
24
25#ifndef NTFS_MAX_LOG2_OF_CLUMP
26#define NTFS_MAX_LOG2_OF_CLUMP 26
27#endif
28
29// 16M
30#define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
31// 16G
32#define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
33
be71b5cb
KK
34static inline u64 get_pre_allocated(u64 size)
35{
36 u32 clump;
37 u8 align_shift;
38 u64 ret;
39
40 if (size <= NTFS_CLUMP_MIN) {
41 clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
42 align_shift = NTFS_MIN_LOG2_OF_CLUMP;
43 } else if (size >= NTFS_CLUMP_MAX) {
44 clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
45 align_shift = NTFS_MAX_LOG2_OF_CLUMP;
46 } else {
47 align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
48 __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
49 clump = 1u << align_shift;
50 }
51
52 ret = (((size + clump - 1) >> align_shift)) << align_shift;
53
54 return ret;
55}
56
be71b5cb 57/*
e8b8e97f 58 * attr_load_runs - Load all runs stored in @attr.
be71b5cb 59 */
cf760ec0
KK
60static int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
61 struct runs_tree *run, const CLST *vcn)
be71b5cb
KK
62{
63 int err;
64 CLST svcn = le64_to_cpu(attr->nres.svcn);
65 CLST evcn = le64_to_cpu(attr->nres.evcn);
66 u32 asize;
67 u16 run_off;
68
69 if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
70 return 0;
71
72 if (vcn && (evcn < *vcn || *vcn < svcn))
73 return -EINVAL;
74
75 asize = le32_to_cpu(attr->size);
76 run_off = le16_to_cpu(attr->nres.run_off);
6db62086
EL
77
78 if (run_off > asize)
79 return -EINVAL;
80
be71b5cb
KK
81 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
82 vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
83 asize - run_off);
84 if (err < 0)
85 return err;
86
87 return 0;
88}
89
90/*
e8b8e97f 91 * run_deallocate_ex - Deallocate clusters.
be71b5cb
KK
92 */
93static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
94 CLST vcn, CLST len, CLST *done, bool trim)
95{
96 int err = 0;
97 CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
98 size_t idx;
99
100 if (!len)
101 goto out;
102
103 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
104failed:
105 run_truncate(run, vcn0);
106 err = -EINVAL;
107 goto out;
108 }
109
110 for (;;) {
111 if (clen > len)
112 clen = len;
113
114 if (!clen) {
115 err = -EINVAL;
116 goto out;
117 }
118
119 if (lcn != SPARSE_LCN) {
20abc64f
KK
120 if (sbi) {
121 /* mark bitmap range [lcn + clen) as free and trim clusters. */
122 mark_as_free_ex(sbi, lcn, clen, trim);
123 }
be71b5cb
KK
124 dn += clen;
125 }
126
127 len -= clen;
128 if (!len)
129 break;
130
131 vcn_next = vcn + clen;
132 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
133 vcn != vcn_next) {
e8b8e97f 134 /* Save memory - don't load entire run. */
be71b5cb
KK
135 goto failed;
136 }
137 }
138
139out:
140 if (done)
141 *done += dn;
142
143 return err;
144}
145
146/*
e8b8e97f 147 * attr_allocate_clusters - Find free space, mark it as used and store in @run.
be71b5cb
KK
148 */
149int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
150 CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
151 enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
c380b52f 152 CLST *new_lcn, CLST *new_len)
be71b5cb
KK
153{
154 int err;
155 CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
be71b5cb
KK
156 size_t cnt = run->count;
157
158 for (;;) {
159 err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
160 opt);
161
162 if (err == -ENOSPC && pre) {
163 pre = 0;
164 if (*pre_alloc)
165 *pre_alloc = 0;
166 continue;
167 }
168
169 if (err)
170 goto out;
171
c380b52f
KK
172 if (vcn == vcn0) {
173 /* Return the first fragment. */
174 if (new_lcn)
175 *new_lcn = lcn;
176 if (new_len)
177 *new_len = flen;
178 }
be71b5cb 179
e8b8e97f 180 /* Add new fragment into run storage. */
c380b52f 181 if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) {
d3624466 182 /* Undo last 'ntfs_look_for_free_space' */
0e5b044c 183 mark_as_free_ex(sbi, lcn, len, false);
be71b5cb
KK
184 err = -ENOMEM;
185 goto out;
186 }
187
c380b52f
KK
188 if (opt & ALLOCATE_ZERO) {
189 u8 shift = sbi->cluster_bits - SECTOR_SHIFT;
190
191 err = blkdev_issue_zeroout(sbi->sb->s_bdev,
192 (sector_t)lcn << shift,
193 (sector_t)flen << shift,
194 GFP_NOFS, 0);
195 if (err)
196 goto out;
197 }
198
be71b5cb
KK
199 vcn += flen;
200
c380b52f 201 if (flen >= len || (opt & ALLOCATE_MFT) ||
be71b5cb
KK
202 (fr && run->count - cnt >= fr)) {
203 *alen = vcn - vcn0;
204 return 0;
205 }
206
207 len -= flen;
208 }
209
210out:
78ab59fe
KK
211 /* Undo 'ntfs_look_for_free_space' */
212 if (vcn - vcn0) {
213 run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
214 run_truncate(run, vcn0);
215 }
be71b5cb
KK
216
217 return err;
218}
219
220/*
e8b8e97f
KA
221 * attr_make_nonresident
222 *
223 * If page is not NULL - it is already contains resident data
224 * and locked (called from ni_write_frame()).
be71b5cb
KK
225 */
226int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
227 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
228 u64 new_size, struct runs_tree *run,
229 struct ATTRIB **ins_attr, struct page *page)
230{
231 struct ntfs_sb_info *sbi;
232 struct ATTRIB *attr_s;
233 struct MFT_REC *rec;
234 u32 used, asize, rsize, aoff, align;
235 bool is_data;
236 CLST len, alen;
237 char *next;
238 int err;
239
240 if (attr->non_res) {
241 *ins_attr = attr;
242 return 0;
243 }
244
245 sbi = mi->sbi;
246 rec = mi->mrec;
247 attr_s = NULL;
248 used = le32_to_cpu(rec->used);
249 asize = le32_to_cpu(attr->size);
250 next = Add2Ptr(attr, asize);
251 aoff = PtrOffset(rec, attr);
252 rsize = le32_to_cpu(attr->res.data_size);
253 is_data = attr->type == ATTR_DATA && !attr->name_len;
254
255 align = sbi->cluster_size;
256 if (is_attr_compressed(attr))
257 align <<= COMPRESSION_UNIT;
258 len = (rsize + align - 1) >> sbi->cluster_bits;
259
260 run_init(run);
261
e8b8e97f 262 /* Make a copy of original attribute. */
195c52bd 263 attr_s = kmemdup(attr, asize, GFP_NOFS);
be71b5cb
KK
264 if (!attr_s) {
265 err = -ENOMEM;
266 goto out;
267 }
268
269 if (!len) {
e8b8e97f 270 /* Empty resident -> Empty nonresident. */
be71b5cb
KK
271 alen = 0;
272 } else {
273 const char *data = resident_data(attr);
274
275 err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
c380b52f
KK
276 ALLOCATE_DEF, &alen, 0, NULL,
277 NULL);
be71b5cb
KK
278 if (err)
279 goto out1;
280
281 if (!rsize) {
e8b8e97f 282 /* Empty resident -> Non empty nonresident. */
be71b5cb 283 } else if (!is_data) {
63544672 284 err = ntfs_sb_write_run(sbi, run, 0, data, rsize, 0);
be71b5cb
KK
285 if (err)
286 goto out2;
287 } else if (!page) {
288 char *kaddr;
289
290 page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
291 if (!page) {
292 err = -ENOMEM;
293 goto out2;
294 }
295 kaddr = kmap_atomic(page);
296 memcpy(kaddr, data, rsize);
297 memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
298 kunmap_atomic(kaddr);
299 flush_dcache_page(page);
300 SetPageUptodate(page);
301 set_page_dirty(page);
302 unlock_page(page);
303 put_page(page);
304 }
305 }
306
e8b8e97f 307 /* Remove original attribute. */
be71b5cb
KK
308 used -= asize;
309 memmove(attr, Add2Ptr(attr, asize), used - aoff);
310 rec->used = cpu_to_le32(used);
311 mi->dirty = true;
312 if (le)
313 al_remove_le(ni, le);
314
315 err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
316 attr_s->name_len, run, 0, alen,
c1e0ab37 317 attr_s->flags, &attr, NULL, NULL);
be71b5cb
KK
318 if (err)
319 goto out3;
320
195c52bd 321 kfree(attr_s);
be71b5cb
KK
322 attr->nres.data_size = cpu_to_le64(rsize);
323 attr->nres.valid_size = attr->nres.data_size;
324
325 *ins_attr = attr;
326
327 if (is_data)
328 ni->ni_flags &= ~NI_FLAG_RESIDENT;
329
e8b8e97f 330 /* Resident attribute becomes non resident. */
be71b5cb
KK
331 return 0;
332
333out3:
334 attr = Add2Ptr(rec, aoff);
335 memmove(next, attr, used - aoff);
336 memcpy(attr, attr_s, asize);
337 rec->used = cpu_to_le32(used + asize);
338 mi->dirty = true;
339out2:
e8b8e97f 340 /* Undo: do not trim new allocated clusters. */
be71b5cb
KK
341 run_deallocate(sbi, run, false);
342 run_close(run);
343out1:
195c52bd 344 kfree(attr_s);
be71b5cb
KK
345out:
346 return err;
347}
348
349/*
e8b8e97f 350 * attr_set_size_res - Helper for attr_set_size().
be71b5cb
KK
351 */
352static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
353 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
354 u64 new_size, struct runs_tree *run,
355 struct ATTRIB **ins_attr)
356{
357 struct ntfs_sb_info *sbi = mi->sbi;
358 struct MFT_REC *rec = mi->mrec;
359 u32 used = le32_to_cpu(rec->used);
360 u32 asize = le32_to_cpu(attr->size);
361 u32 aoff = PtrOffset(rec, attr);
362 u32 rsize = le32_to_cpu(attr->res.data_size);
363 u32 tail = used - aoff - asize;
364 char *next = Add2Ptr(attr, asize);
fa3cacf5 365 s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
be71b5cb
KK
366
367 if (dsize < 0) {
368 memmove(next + dsize, next, tail);
369 } else if (dsize > 0) {
370 if (used + dsize > sbi->max_bytes_per_attr)
371 return attr_make_nonresident(ni, attr, le, mi, new_size,
372 run, ins_attr, NULL);
373
374 memmove(next + dsize, next, tail);
375 memset(next, 0, dsize);
376 }
377
378 if (new_size > rsize)
379 memset(Add2Ptr(resident_data(attr), rsize), 0,
380 new_size - rsize);
381
382 rec->used = cpu_to_le32(used + dsize);
383 attr->size = cpu_to_le32(asize + dsize);
384 attr->res.data_size = cpu_to_le32(new_size);
385 mi->dirty = true;
386 *ins_attr = attr;
387
388 return 0;
389}
390
391/*
e8b8e97f 392 * attr_set_size - Change the size of attribute.
be71b5cb 393 *
be71b5cb 394 * Extend:
e8b8e97f
KA
395 * - Sparse/compressed: No allocated clusters.
396 * - Normal: Append allocated and preallocated new clusters.
be71b5cb 397 * Shrink:
e8b8e97f 398 * - No deallocate if @keep_prealloc is set.
be71b5cb
KK
399 */
400int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
401 const __le16 *name, u8 name_len, struct runs_tree *run,
402 u64 new_size, const u64 *new_valid, bool keep_prealloc,
403 struct ATTRIB **ret)
404{
405 int err = 0;
406 struct ntfs_sb_info *sbi = ni->mi.sbi;
407 u8 cluster_bits = sbi->cluster_bits;
408 bool is_mft =
409 ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len;
410 u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
411 struct ATTRIB *attr = NULL, *attr_b;
412 struct ATTR_LIST_ENTRY *le, *le_b;
413 struct mft_inode *mi, *mi_b;
414 CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
415 CLST next_svcn, pre_alloc = -1, done = 0;
0e5b044c 416 bool is_ext, is_bad = false;
ad26a9c8 417 bool dirty = false;
be71b5cb
KK
418 u32 align;
419 struct MFT_REC *rec;
420
421again:
0e5b044c 422 alen = 0;
be71b5cb
KK
423 le_b = NULL;
424 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
425 &mi_b);
426 if (!attr_b) {
427 err = -ENOENT;
0e5b044c 428 goto bad_inode;
be71b5cb
KK
429 }
430
431 if (!attr_b->non_res) {
432 err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
433 &attr_b);
0e5b044c
KK
434 if (err)
435 return err;
436
437 /* Return if file is still resident. */
ad26a9c8
KK
438 if (!attr_b->non_res) {
439 dirty = true;
0e5b044c 440 goto ok1;
ad26a9c8 441 }
be71b5cb 442
e8b8e97f 443 /* Layout of records may be changed, so do a full search. */
be71b5cb
KK
444 goto again;
445 }
446
447 is_ext = is_attr_ext(attr_b);
be71b5cb 448 align = sbi->cluster_size;
ce46ae0c 449 if (is_ext)
be71b5cb 450 align <<= attr_b->nres.c_unit;
be71b5cb
KK
451
452 old_valid = le64_to_cpu(attr_b->nres.valid_size);
453 old_size = le64_to_cpu(attr_b->nres.data_size);
454 old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
0e5b044c
KK
455
456again_1:
be71b5cb
KK
457 old_alen = old_alloc >> cluster_bits;
458
459 new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
460 new_alen = new_alloc >> cluster_bits;
461
be71b5cb
KK
462 if (keep_prealloc && new_size < old_size) {
463 attr_b->nres.data_size = cpu_to_le64(new_size);
ad26a9c8 464 mi_b->dirty = dirty = true;
be71b5cb
KK
465 goto ok;
466 }
467
468 vcn = old_alen - 1;
469
470 svcn = le64_to_cpu(attr_b->nres.svcn);
471 evcn = le64_to_cpu(attr_b->nres.evcn);
472
473 if (svcn <= vcn && vcn <= evcn) {
474 attr = attr_b;
475 le = le_b;
476 mi = mi_b;
477 } else if (!le_b) {
478 err = -EINVAL;
0e5b044c 479 goto bad_inode;
be71b5cb
KK
480 } else {
481 le = le_b;
482 attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
483 &mi);
484 if (!attr) {
485 err = -EINVAL;
0e5b044c 486 goto bad_inode;
be71b5cb
KK
487 }
488
489next_le_1:
490 svcn = le64_to_cpu(attr->nres.svcn);
491 evcn = le64_to_cpu(attr->nres.evcn);
492 }
0e5b044c
KK
493 /*
494 * Here we have:
495 * attr,mi,le - last attribute segment (containing 'vcn').
496 * attr_b,mi_b,le_b - base (primary) attribute segment.
497 */
be71b5cb
KK
498next_le:
499 rec = mi->mrec;
be71b5cb
KK
500 err = attr_load_runs(attr, ni, run, NULL);
501 if (err)
502 goto out;
503
504 if (new_size > old_size) {
505 CLST to_allocate;
506 size_t free;
507
508 if (new_alloc <= old_alloc) {
509 attr_b->nres.data_size = cpu_to_le64(new_size);
ad26a9c8 510 mi_b->dirty = dirty = true;
be71b5cb
KK
511 goto ok;
512 }
513
0e5b044c
KK
514 /*
515 * Add clusters. In simple case we have to:
516 * - allocate space (vcn, lcn, len)
517 * - update packed run in 'mi'
518 * - update attr->nres.evcn
519 * - update attr_b->nres.data_size/attr_b->nres.alloc_size
520 */
be71b5cb
KK
521 to_allocate = new_alen - old_alen;
522add_alloc_in_same_attr_seg:
523 lcn = 0;
524 if (is_mft) {
e8b8e97f 525 /* MFT allocates clusters from MFT zone. */
be71b5cb
KK
526 pre_alloc = 0;
527 } else if (is_ext) {
e8b8e97f 528 /* No preallocate for sparse/compress. */
be71b5cb
KK
529 pre_alloc = 0;
530 } else if (pre_alloc == -1) {
531 pre_alloc = 0;
532 if (type == ATTR_DATA && !name_len &&
564c97bd 533 sbi->options->prealloc) {
0e5b044c
KK
534 pre_alloc =
535 bytes_to_cluster(
536 sbi,
537 get_pre_allocated(new_size)) -
538 new_alen;
be71b5cb
KK
539 }
540
e8b8e97f 541 /* Get the last LCN to allocate from. */
be71b5cb
KK
542 if (old_alen &&
543 !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
544 lcn = SPARSE_LCN;
545 }
546
547 if (lcn == SPARSE_LCN)
548 lcn = 0;
549 else if (lcn)
550 lcn += 1;
551
552 free = wnd_zeroes(&sbi->used.bitmap);
553 if (to_allocate > free) {
554 err = -ENOSPC;
555 goto out;
556 }
557
558 if (pre_alloc && to_allocate + pre_alloc > free)
559 pre_alloc = 0;
560 }
561
562 vcn = old_alen;
563
564 if (is_ext) {
565 if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
566 false)) {
567 err = -ENOMEM;
568 goto out;
569 }
570 alen = to_allocate;
571 } else {
e8b8e97f 572 /* ~3 bytes per fragment. */
be71b5cb
KK
573 err = attr_allocate_clusters(
574 sbi, run, vcn, lcn, to_allocate, &pre_alloc,
c380b52f 575 is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen,
be71b5cb
KK
576 is_mft ? 0
577 : (sbi->record_size -
578 le32_to_cpu(rec->used) + 8) /
579 3 +
580 1,
c380b52f 581 NULL, NULL);
be71b5cb
KK
582 if (err)
583 goto out;
584 }
585
586 done += alen;
587 vcn += alen;
588 if (to_allocate > alen)
589 to_allocate -= alen;
590 else
591 to_allocate = 0;
592
593pack_runs:
594 err = mi_pack_runs(mi, attr, run, vcn - svcn);
595 if (err)
0e5b044c 596 goto undo_1;
be71b5cb
KK
597
598 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
599 new_alloc_tmp = (u64)next_svcn << cluster_bits;
600 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
ad26a9c8 601 mi_b->dirty = dirty = true;
be71b5cb
KK
602
603 if (next_svcn >= vcn && !to_allocate) {
e8b8e97f 604 /* Normal way. Update attribute and exit. */
be71b5cb
KK
605 attr_b->nres.data_size = cpu_to_le64(new_size);
606 goto ok;
607 }
608
e8b8e97f 609 /* At least two MFT to avoid recursive loop. */
be71b5cb
KK
610 if (is_mft && next_svcn == vcn &&
611 ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
612 new_size = new_alloc_tmp;
613 attr_b->nres.data_size = attr_b->nres.alloc_size;
614 goto ok;
615 }
616
617 if (le32_to_cpu(rec->used) < sbi->record_size) {
618 old_alen = next_svcn;
619 evcn = old_alen - 1;
620 goto add_alloc_in_same_attr_seg;
621 }
622
623 attr_b->nres.data_size = attr_b->nres.alloc_size;
624 if (new_alloc_tmp < old_valid)
625 attr_b->nres.valid_size = attr_b->nres.data_size;
626
627 if (type == ATTR_LIST) {
628 err = ni_expand_list(ni);
629 if (err)
0e5b044c 630 goto undo_2;
be71b5cb
KK
631 if (next_svcn < vcn)
632 goto pack_runs;
633
e8b8e97f 634 /* Layout of records is changed. */
be71b5cb
KK
635 goto again;
636 }
637
638 if (!ni->attr_list.size) {
639 err = ni_create_attr_list(ni);
0e5b044c 640 /* In case of error layout of records is not changed. */
be71b5cb 641 if (err)
0e5b044c 642 goto undo_2;
e8b8e97f 643 /* Layout of records is changed. */
be71b5cb
KK
644 }
645
646 if (next_svcn >= vcn) {
e8b8e97f 647 /* This is MFT data, repeat. */
be71b5cb
KK
648 goto again;
649 }
650
e8b8e97f 651 /* Insert new attribute segment. */
be71b5cb
KK
652 err = ni_insert_nonresident(ni, type, name, name_len, run,
653 next_svcn, vcn - next_svcn,
c1e0ab37 654 attr_b->flags, &attr, &mi, NULL);
be71b5cb 655
e8b8e97f
KA
656 /*
657 * Layout of records maybe changed.
658 * Find base attribute to update.
659 */
0e5b044c 660 le_b = NULL;
be71b5cb
KK
661 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
662 NULL, &mi_b);
663 if (!attr_b) {
0e5b044c
KK
664 err = -EINVAL;
665 goto bad_inode;
be71b5cb
KK
666 }
667
0e5b044c
KK
668 if (err) {
669 /* ni_insert_nonresident failed. */
670 attr = NULL;
671 goto undo_2;
672 }
673
674 if (!is_mft)
675 run_truncate_head(run, evcn + 1);
676
677 svcn = le64_to_cpu(attr->nres.svcn);
678 evcn = le64_to_cpu(attr->nres.evcn);
679
680 /*
681 * Attribute is in consistency state.
682 * Save this point to restore to if next steps fail.
683 */
684 old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
685 attr_b->nres.valid_size = attr_b->nres.data_size =
686 attr_b->nres.alloc_size = cpu_to_le64(old_size);
ad26a9c8 687 mi_b->dirty = dirty = true;
be71b5cb
KK
688 goto again_1;
689 }
690
691 if (new_size != old_size ||
692 (new_alloc != old_alloc && !keep_prealloc)) {
0e5b044c
KK
693 /*
694 * Truncate clusters. In simple case we have to:
695 * - update packed run in 'mi'
696 * - update attr->nres.evcn
697 * - update attr_b->nres.data_size/attr_b->nres.alloc_size
698 * - mark and trim clusters as free (vcn, lcn, len)
699 */
700 CLST dlen = 0;
701
be71b5cb
KK
702 vcn = max(svcn, new_alen);
703 new_alloc_tmp = (u64)vcn << cluster_bits;
704
be71b5cb
KK
705 if (vcn > svcn) {
706 err = mi_pack_runs(mi, attr, run, vcn - svcn);
707 if (err)
708 goto out;
709 } else if (le && le->vcn) {
710 u16 le_sz = le16_to_cpu(le->size);
711
712 /*
e8b8e97f 713 * NOTE: List entries for one attribute are always
be71b5cb
KK
714 * the same size. We deal with last entry (vcn==0)
715 * and it is not first in entries array
e8b8e97f
KA
716 * (list entry for std attribute always first).
717 * So it is safe to step back.
be71b5cb 718 */
78ab59fe 719 mi_remove_attr(NULL, mi, attr);
be71b5cb
KK
720
721 if (!al_remove_le(ni, le)) {
722 err = -EINVAL;
0e5b044c 723 goto bad_inode;
be71b5cb
KK
724 }
725
726 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
727 } else {
728 attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
729 mi->dirty = true;
730 }
731
732 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
733
734 if (vcn == new_alen) {
735 attr_b->nres.data_size = cpu_to_le64(new_size);
736 if (new_size < old_valid)
737 attr_b->nres.valid_size =
738 attr_b->nres.data_size;
739 } else {
740 if (new_alloc_tmp <=
741 le64_to_cpu(attr_b->nres.data_size))
742 attr_b->nres.data_size =
743 attr_b->nres.alloc_size;
744 if (new_alloc_tmp <
745 le64_to_cpu(attr_b->nres.valid_size))
746 attr_b->nres.valid_size =
747 attr_b->nres.alloc_size;
748 }
ad26a9c8 749 mi_b->dirty = dirty = true;
0e5b044c
KK
750
751 err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
752 true);
753 if (err)
754 goto out;
be71b5cb 755
0e5b044c
KK
756 if (is_ext) {
757 /* dlen - really deallocated clusters. */
be71b5cb 758 le64_sub_cpu(&attr_b->nres.total_size,
0e5b044c
KK
759 ((u64)dlen << cluster_bits));
760 }
be71b5cb 761
0e5b044c 762 run_truncate(run, vcn);
be71b5cb
KK
763
764 if (new_alloc_tmp <= new_alloc)
765 goto ok;
766
767 old_size = new_alloc_tmp;
768 vcn = svcn - 1;
769
770 if (le == le_b) {
771 attr = attr_b;
772 mi = mi_b;
773 evcn = svcn - 1;
774 svcn = 0;
775 goto next_le;
776 }
777
778 if (le->type != type || le->name_len != name_len ||
779 memcmp(le_name(le), name, name_len * sizeof(short))) {
780 err = -EINVAL;
0e5b044c 781 goto bad_inode;
be71b5cb
KK
782 }
783
784 err = ni_load_mi(ni, le, &mi);
785 if (err)
786 goto out;
787
788 attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
789 if (!attr) {
790 err = -EINVAL;
0e5b044c 791 goto bad_inode;
be71b5cb
KK
792 }
793 goto next_le_1;
794 }
795
796ok:
797 if (new_valid) {
798 __le64 valid = cpu_to_le64(min(*new_valid, new_size));
799
800 if (attr_b->nres.valid_size != valid) {
801 attr_b->nres.valid_size = valid;
802 mi_b->dirty = true;
803 }
804 }
805
0e5b044c
KK
806ok1:
807 if (ret)
be71b5cb
KK
808 *ret = attr_b;
809
0e5b044c
KK
810 if (((type == ATTR_DATA && !name_len) ||
811 (type == ATTR_ALLOC && name == I30_NAME))) {
ad26a9c8 812 /* Update inode_set_bytes. */
0e5b044c 813 if (attr_b->non_res) {
be71b5cb
KK
814 new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
815 if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
816 inode_set_bytes(&ni->vfs_inode, new_alloc);
817 dirty = true;
818 }
819 }
820
ad26a9c8 821 /* Don't forget to update duplicate information in parent. */
be71b5cb
KK
822 if (dirty) {
823 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
824 mark_inode_dirty(&ni->vfs_inode);
825 }
826 }
827
0e5b044c
KK
828 return 0;
829
830undo_2:
831 vcn -= alen;
832 attr_b->nres.data_size = cpu_to_le64(old_size);
833 attr_b->nres.valid_size = cpu_to_le64(old_valid);
834 attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
835
836 /* Restore 'attr' and 'mi'. */
837 if (attr)
838 goto restore_run;
839
840 if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
841 svcn <= le64_to_cpu(attr_b->nres.evcn)) {
842 attr = attr_b;
843 le = le_b;
844 mi = mi_b;
845 } else if (!le_b) {
846 err = -EINVAL;
847 goto bad_inode;
848 } else {
849 le = le_b;
850 attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
851 &svcn, &mi);
852 if (!attr)
853 goto bad_inode;
854 }
855
856restore_run:
857 if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
858 is_bad = true;
859
860undo_1:
861 run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
862
863 run_truncate(run, vcn);
864out:
865 if (is_bad) {
866bad_inode:
867 _ntfs_bad_inode(&ni->vfs_inode);
868 }
be71b5cb
KK
869 return err;
870}
871
c380b52f
KK
872/*
873 * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'.
874 *
875 * @new == NULL means just to get current mapping for 'vcn'
876 * @new != NULL means allocate real cluster if 'vcn' maps to hole
877 * @zero - zeroout new allocated clusters
878 *
879 * NOTE:
880 * - @new != NULL is called only for sparsed or compressed attributes.
881 * - new allocated clusters are zeroed via blkdev_issue_zeroout.
882 */
be71b5cb 883int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
c380b52f 884 CLST *len, bool *new, bool zero)
be71b5cb
KK
885{
886 int err = 0;
887 struct runs_tree *run = &ni->file.run;
888 struct ntfs_sb_info *sbi;
889 u8 cluster_bits;
890 struct ATTRIB *attr = NULL, *attr_b;
891 struct ATTR_LIST_ENTRY *le, *le_b;
892 struct mft_inode *mi, *mi_b;
c380b52f 893 CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen;
910013f7 894 CLST alloc, evcn;
c380b52f 895 unsigned fr;
910013f7
KK
896 u64 total_size, total_size0;
897 int step = 0;
be71b5cb
KK
898
899 if (new)
900 *new = false;
901
c380b52f 902 /* Try to find in cache. */
be71b5cb 903 down_read(&ni->file.run_lock);
c380b52f
KK
904 if (!run_lookup_entry(run, vcn, lcn, len, NULL))
905 *len = 0;
be71b5cb
KK
906 up_read(&ni->file.run_lock);
907
c380b52f
KK
908 if (*len) {
909 if (*lcn != SPARSE_LCN || !new)
910 return 0; /* Fast normal way without allocation. */
911 else if (clen > *len)
912 clen = *len;
be71b5cb
KK
913 }
914
c380b52f 915 /* No cluster in cache or we need to allocate cluster in hole. */
be71b5cb
KK
916 sbi = ni->mi.sbi;
917 cluster_bits = sbi->cluster_bits;
918
919 ni_lock(ni);
920 down_write(&ni->file.run_lock);
921
922 le_b = NULL;
923 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
924 if (!attr_b) {
925 err = -ENOENT;
926 goto out;
927 }
928
929 if (!attr_b->non_res) {
930 *lcn = RESIDENT_LCN;
931 *len = 1;
932 goto out;
933 }
934
c1e0ab37 935 asize = le64_to_cpu(attr_b->nres.alloc_size) >> cluster_bits;
be71b5cb 936 if (vcn >= asize) {
910013f7
KK
937 if (new) {
938 err = -EINVAL;
939 } else {
940 *len = 1;
941 *lcn = SPARSE_LCN;
942 }
be71b5cb
KK
943 goto out;
944 }
945
be71b5cb
KK
946 svcn = le64_to_cpu(attr_b->nres.svcn);
947 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
948
949 attr = attr_b;
950 le = le_b;
951 mi = mi_b;
952
953 if (le_b && (vcn < svcn || evcn1 <= vcn)) {
954 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
955 &mi);
956 if (!attr) {
957 err = -EINVAL;
958 goto out;
959 }
960 svcn = le64_to_cpu(attr->nres.svcn);
961 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
962 }
963
c380b52f 964 /* Load in cache actual information. */
be71b5cb
KK
965 err = attr_load_runs(attr, ni, run, NULL);
966 if (err)
967 goto out;
968
c380b52f
KK
969 if (!*len) {
970 if (run_lookup_entry(run, vcn, lcn, len, NULL)) {
971 if (*lcn != SPARSE_LCN || !new)
972 goto ok; /* Slow normal way without allocation. */
be71b5cb 973
c380b52f
KK
974 if (clen > *len)
975 clen = *len;
976 } else if (!new) {
977 /* Here we may return -ENOENT.
978 * In any case caller gets zero length. */
be71b5cb
KK
979 goto ok;
980 }
be71b5cb
KK
981 }
982
983 if (!is_attr_ext(attr_b)) {
c380b52f 984 /* The code below only for sparsed or compressed attributes. */
be71b5cb
KK
985 err = -EINVAL;
986 goto out;
987 }
988
c380b52f
KK
989 vcn0 = vcn;
990 to_alloc = clen;
991 fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1;
992 /* Allocate frame aligned clusters.
993 * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed.
994 * ntfs3 uses 1 cluster per frame for new created sparsed files. */
995 if (attr_b->nres.c_unit) {
996 CLST clst_per_frame = 1u << attr_b->nres.c_unit;
997 CLST cmask = ~(clst_per_frame - 1);
998
999 /* Get frame aligned vcn and to_alloc. */
1000 vcn = vcn0 & cmask;
1001 to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn;
1002 if (fr < clst_per_frame)
1003 fr = clst_per_frame;
1004 zero = true;
1005
1006 /* Check if 'vcn' and 'vcn0' in different attribute segments. */
1007 if (vcn < svcn || evcn1 <= vcn) {
1008 /* Load attribute for truncated vcn. */
1009 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0,
1010 &vcn, &mi);
1011 if (!attr) {
1012 err = -EINVAL;
1013 goto out;
1014 }
1015 svcn = le64_to_cpu(attr->nres.svcn);
1016 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1017 err = attr_load_runs(attr, ni, run, NULL);
1018 if (err)
1019 goto out;
1020 }
1021 }
1022
1023 if (vcn + to_alloc > asize)
1024 to_alloc = asize - vcn;
1025
e8b8e97f 1026 /* Get the last LCN to allocate from. */
be71b5cb
KK
1027 hint = 0;
1028
1029 if (vcn > evcn1) {
1030 if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
1031 false)) {
1032 err = -ENOMEM;
1033 goto out;
1034 }
1035 } else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
1036 hint = -1;
1037 }
1038
c380b52f
KK
1039 /* Allocate and zeroout new clusters. */
1040 err = attr_allocate_clusters(sbi, run, vcn, hint + 1, to_alloc, NULL,
1041 zero ? ALLOCATE_ZERO : ALLOCATE_DEF, &alen,
1042 fr, lcn, len);
be71b5cb
KK
1043 if (err)
1044 goto out;
1045 *new = true;
910013f7 1046 step = 1;
be71b5cb 1047
c380b52f 1048 end = vcn + alen;
910013f7
KK
1049 /* Save 'total_size0' to restore if error. */
1050 total_size0 = le64_to_cpu(attr_b->nres.total_size);
1051 total_size = total_size0 + ((u64)alen << cluster_bits);
c380b52f
KK
1052
1053 if (vcn != vcn0) {
1054 if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) {
1055 err = -EINVAL;
1056 goto out;
1057 }
1058 if (*lcn == SPARSE_LCN) {
1059 /* Internal error. Should not happened. */
1060 WARN_ON(1);
1061 err = -EINVAL;
1062 goto out;
1063 }
1064 /* Check case when vcn0 + len overlaps new allocated clusters. */
1065 if (vcn0 + *len > end)
1066 *len = end - vcn0;
1067 }
be71b5cb
KK
1068
1069repack:
1070 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1071 if (err)
1072 goto out;
1073
1074 attr_b->nres.total_size = cpu_to_le64(total_size);
1075 inode_set_bytes(&ni->vfs_inode, total_size);
1076 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1077
1078 mi_b->dirty = true;
1079 mark_inode_dirty(&ni->vfs_inode);
1080
e8b8e97f 1081 /* Stored [vcn : next_svcn) from [vcn : end). */
be71b5cb
KK
1082 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1083
1084 if (end <= evcn1) {
1085 if (next_svcn == evcn1) {
e8b8e97f 1086 /* Normal way. Update attribute and exit. */
be71b5cb
KK
1087 goto ok;
1088 }
e8b8e97f 1089 /* Add new segment [next_svcn : evcn1 - next_svcn). */
be71b5cb
KK
1090 if (!ni->attr_list.size) {
1091 err = ni_create_attr_list(ni);
1092 if (err)
910013f7 1093 goto undo1;
e8b8e97f 1094 /* Layout of records is changed. */
be71b5cb
KK
1095 le_b = NULL;
1096 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1097 0, NULL, &mi_b);
1098 if (!attr_b) {
1099 err = -ENOENT;
1100 goto out;
1101 }
1102
1103 attr = attr_b;
1104 le = le_b;
1105 mi = mi_b;
1106 goto repack;
1107 }
1108 }
1109
910013f7
KK
1110 /*
1111 * The code below may require additional cluster (to extend attribute list)
1112 * and / or one MFT record
1113 * It is too complex to undo operations if -ENOSPC occurs deep inside
1114 * in 'ni_insert_nonresident'.
1115 * Return in advance -ENOSPC here if there are no free cluster and no free MFT.
1116 */
1117 if (!ntfs_check_for_free_space(sbi, 1, 1)) {
1118 /* Undo step 1. */
1119 err = -ENOSPC;
1120 goto undo1;
1121 }
1122
1123 step = 2;
be71b5cb
KK
1124 svcn = evcn1;
1125
e8b8e97f 1126 /* Estimate next attribute. */
be71b5cb
KK
1127 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1128
910013f7
KK
1129 if (!attr) {
1130 /* Insert new attribute segment. */
1131 goto ins_ext;
1132 }
be71b5cb 1133
910013f7
KK
1134 /* Try to update existed attribute segment. */
1135 alloc = bytes_to_cluster(sbi, le64_to_cpu(attr_b->nres.alloc_size));
1136 evcn = le64_to_cpu(attr->nres.evcn);
be71b5cb 1137
910013f7
KK
1138 if (end < next_svcn)
1139 end = next_svcn;
1140 while (end > evcn) {
1141 /* Remove segment [svcn : evcn). */
1142 mi_remove_attr(NULL, mi, attr);
be71b5cb 1143
910013f7
KK
1144 if (!al_remove_le(ni, le)) {
1145 err = -EINVAL;
1146 goto out;
be71b5cb
KK
1147 }
1148
910013f7
KK
1149 if (evcn + 1 >= alloc) {
1150 /* Last attribute segment. */
1151 evcn1 = evcn + 1;
1152 goto ins_ext;
1153 }
be71b5cb 1154
910013f7
KK
1155 if (ni_load_mi(ni, le, &mi)) {
1156 attr = NULL;
be71b5cb 1157 goto out;
910013f7 1158 }
be71b5cb 1159
910013f7
KK
1160 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, &le->id);
1161 if (!attr) {
1162 err = -EINVAL;
be71b5cb 1163 goto out;
910013f7
KK
1164 }
1165 svcn = le64_to_cpu(attr->nres.svcn);
1166 evcn = le64_to_cpu(attr->nres.evcn);
1167 }
be71b5cb 1168
910013f7
KK
1169 if (end < svcn)
1170 end = svcn;
1171
1172 err = attr_load_runs(attr, ni, run, &end);
1173 if (err)
1174 goto out;
1175
1176 evcn1 = evcn + 1;
1177 attr->nres.svcn = cpu_to_le64(next_svcn);
1178 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1179 if (err)
1180 goto out;
1181
1182 le->vcn = cpu_to_le64(next_svcn);
1183 ni->attr_list.dirty = true;
1184 mi->dirty = true;
1185 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
be71b5cb 1186
be71b5cb
KK
1187ins_ext:
1188 if (evcn1 > next_svcn) {
1189 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1190 next_svcn, evcn1 - next_svcn,
c1e0ab37 1191 attr_b->flags, &attr, &mi, NULL);
be71b5cb
KK
1192 if (err)
1193 goto out;
1194 }
1195ok:
1196 run_truncate_around(run, vcn);
1197out:
910013f7
KK
1198 if (err && step > 1) {
1199 /* Too complex to restore. */
1200 _ntfs_bad_inode(&ni->vfs_inode);
1201 }
be71b5cb
KK
1202 up_write(&ni->file.run_lock);
1203 ni_unlock(ni);
1204
1205 return err;
910013f7
KK
1206
1207undo1:
1208 /* Undo step1. */
1209 attr_b->nres.total_size = cpu_to_le64(total_size0);
1210 inode_set_bytes(&ni->vfs_inode, total_size0);
1211
1212 if (run_deallocate_ex(sbi, run, vcn, alen, NULL, false) ||
1213 !run_add_entry(run, vcn, SPARSE_LCN, alen, false) ||
1214 mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn)) {
1215 _ntfs_bad_inode(&ni->vfs_inode);
1216 }
1217 goto out;
be71b5cb
KK
1218}
1219
1220int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
1221{
1222 u64 vbo;
1223 struct ATTRIB *attr;
1224 u32 data_size;
1225
1226 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1227 if (!attr)
1228 return -EINVAL;
1229
1230 if (attr->non_res)
1231 return E_NTFS_NONRESIDENT;
1232
1233 vbo = page->index << PAGE_SHIFT;
1234 data_size = le32_to_cpu(attr->res.data_size);
1235 if (vbo < data_size) {
1236 const char *data = resident_data(attr);
1237 char *kaddr = kmap_atomic(page);
1238 u32 use = data_size - vbo;
1239
1240 if (use > PAGE_SIZE)
1241 use = PAGE_SIZE;
1242
1243 memcpy(kaddr, data + vbo, use);
1244 memset(kaddr + use, 0, PAGE_SIZE - use);
1245 kunmap_atomic(kaddr);
1246 flush_dcache_page(page);
1247 SetPageUptodate(page);
1248 } else if (!PageUptodate(page)) {
1249 zero_user_segment(page, 0, PAGE_SIZE);
1250 SetPageUptodate(page);
1251 }
1252
1253 return 0;
1254}
1255
1256int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
1257{
1258 u64 vbo;
1259 struct mft_inode *mi;
1260 struct ATTRIB *attr;
1261 u32 data_size;
1262
1263 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1264 if (!attr)
1265 return -EINVAL;
1266
1267 if (attr->non_res) {
e8b8e97f 1268 /* Return special error code to check this case. */
be71b5cb
KK
1269 return E_NTFS_NONRESIDENT;
1270 }
1271
1272 vbo = page->index << PAGE_SHIFT;
1273 data_size = le32_to_cpu(attr->res.data_size);
1274 if (vbo < data_size) {
1275 char *data = resident_data(attr);
1276 char *kaddr = kmap_atomic(page);
1277 u32 use = data_size - vbo;
1278
1279 if (use > PAGE_SIZE)
1280 use = PAGE_SIZE;
1281 memcpy(data + vbo, kaddr, use);
1282 kunmap_atomic(kaddr);
1283 mi->dirty = true;
1284 }
1285 ni->i_valid = data_size;
1286
1287 return 0;
1288}
1289
1290/*
e8b8e97f 1291 * attr_load_runs_vcn - Load runs with VCN.
be71b5cb
KK
1292 */
1293int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1294 const __le16 *name, u8 name_len, struct runs_tree *run,
1295 CLST vcn)
1296{
1297 struct ATTRIB *attr;
1298 int err;
1299 CLST svcn, evcn;
1300 u16 ro;
1301
2681631c
EL
1302 if (!ni) {
1303 /* Is record corrupted? */
1304 return -ENOENT;
1305 }
1306
be71b5cb 1307 attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
d3624466
KK
1308 if (!attr) {
1309 /* Is record corrupted? */
be71b5cb 1310 return -ENOENT;
d3624466 1311 }
be71b5cb
KK
1312
1313 svcn = le64_to_cpu(attr->nres.svcn);
1314 evcn = le64_to_cpu(attr->nres.evcn);
1315
d3624466
KK
1316 if (evcn < vcn || vcn < svcn) {
1317 /* Is record corrupted? */
be71b5cb 1318 return -EINVAL;
d3624466 1319 }
be71b5cb
KK
1320
1321 ro = le16_to_cpu(attr->nres.run_off);
6db62086
EL
1322
1323 if (ro > le32_to_cpu(attr->size))
1324 return -EINVAL;
1325
be71b5cb
KK
1326 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1327 Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1328 if (err < 0)
1329 return err;
1330 return 0;
1331}
1332
1333/*
d3624466 1334 * attr_load_runs_range - Load runs for given range [from to).
be71b5cb
KK
1335 */
1336int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1337 const __le16 *name, u8 name_len, struct runs_tree *run,
1338 u64 from, u64 to)
1339{
1340 struct ntfs_sb_info *sbi = ni->mi.sbi;
1341 u8 cluster_bits = sbi->cluster_bits;
dc8965ab 1342 CLST vcn;
be71b5cb
KK
1343 CLST vcn_last = (to - 1) >> cluster_bits;
1344 CLST lcn, clen;
1345 int err;
1346
1347 for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1348 if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1349 err = attr_load_runs_vcn(ni, type, name, name_len, run,
1350 vcn);
1351 if (err)
1352 return err;
e8b8e97f 1353 clen = 0; /* Next run_lookup_entry(vcn) must be success. */
be71b5cb
KK
1354 }
1355 }
1356
1357 return 0;
1358}
1359
1360#ifdef CONFIG_NTFS3_LZX_XPRESS
1361/*
1362 * attr_wof_frame_info
1363 *
e8b8e97f 1364 * Read header of Xpress/LZX file to get info about frame.
be71b5cb
KK
1365 */
1366int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1367 struct runs_tree *run, u64 frame, u64 frames,
1368 u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1369{
1370 struct ntfs_sb_info *sbi = ni->mi.sbi;
1371 u64 vbo[2], off[2], wof_size;
1372 u32 voff;
1373 u8 bytes_per_off;
1374 char *addr;
1375 struct page *page;
1376 int i, err;
1377 __le32 *off32;
1378 __le64 *off64;
1379
1380 if (ni->vfs_inode.i_size < 0x100000000ull) {
e8b8e97f 1381 /* File starts with array of 32 bit offsets. */
be71b5cb
KK
1382 bytes_per_off = sizeof(__le32);
1383 vbo[1] = frame << 2;
1384 *vbo_data = frames << 2;
1385 } else {
e8b8e97f 1386 /* File starts with array of 64 bit offsets. */
be71b5cb
KK
1387 bytes_per_off = sizeof(__le64);
1388 vbo[1] = frame << 3;
1389 *vbo_data = frames << 3;
1390 }
1391
1392 /*
e8b8e97f
KA
1393 * Read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts.
1394 * Read 4/8 bytes at [vbo] == offset where compressed frame ends.
be71b5cb
KK
1395 */
1396 if (!attr->non_res) {
1397 if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1398 ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1399 return -EINVAL;
1400 }
1401 addr = resident_data(attr);
1402
1403 if (bytes_per_off == sizeof(__le32)) {
1404 off32 = Add2Ptr(addr, vbo[1]);
1405 off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1406 off[1] = le32_to_cpu(off32[0]);
1407 } else {
1408 off64 = Add2Ptr(addr, vbo[1]);
1409 off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1410 off[1] = le64_to_cpu(off64[0]);
1411 }
1412
1413 *vbo_data += off[0];
1414 *ondisk_size = off[1] - off[0];
1415 return 0;
1416 }
1417
1418 wof_size = le64_to_cpu(attr->nres.data_size);
1419 down_write(&ni->file.run_lock);
1420 page = ni->file.offs_page;
1421 if (!page) {
1422 page = alloc_page(GFP_KERNEL);
1423 if (!page) {
1424 err = -ENOMEM;
1425 goto out;
1426 }
1427 page->index = -1;
1428 ni->file.offs_page = page;
1429 }
1430 lock_page(page);
1431 addr = page_address(page);
1432
1433 if (vbo[1]) {
1434 voff = vbo[1] & (PAGE_SIZE - 1);
1435 vbo[0] = vbo[1] - bytes_per_off;
1436 i = 0;
1437 } else {
1438 voff = 0;
1439 vbo[0] = 0;
1440 off[0] = 0;
1441 i = 1;
1442 }
1443
1444 do {
1445 pgoff_t index = vbo[i] >> PAGE_SHIFT;
1446
1447 if (index != page->index) {
1448 u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1449 u64 to = min(from + PAGE_SIZE, wof_size);
1450
1451 err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1452 ARRAY_SIZE(WOF_NAME), run,
1453 from, to);
1454 if (err)
1455 goto out1;
1456
1457 err = ntfs_bio_pages(sbi, run, &page, 1, from,
1458 to - from, REQ_OP_READ);
1459 if (err) {
1460 page->index = -1;
1461 goto out1;
1462 }
1463 page->index = index;
1464 }
1465
1466 if (i) {
1467 if (bytes_per_off == sizeof(__le32)) {
1468 off32 = Add2Ptr(addr, voff);
1469 off[1] = le32_to_cpu(*off32);
1470 } else {
1471 off64 = Add2Ptr(addr, voff);
1472 off[1] = le64_to_cpu(*off64);
1473 }
1474 } else if (!voff) {
1475 if (bytes_per_off == sizeof(__le32)) {
1476 off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1477 off[0] = le32_to_cpu(*off32);
1478 } else {
1479 off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1480 off[0] = le64_to_cpu(*off64);
1481 }
1482 } else {
e8b8e97f 1483 /* Two values in one page. */
be71b5cb
KK
1484 if (bytes_per_off == sizeof(__le32)) {
1485 off32 = Add2Ptr(addr, voff);
1486 off[0] = le32_to_cpu(off32[-1]);
1487 off[1] = le32_to_cpu(off32[0]);
1488 } else {
1489 off64 = Add2Ptr(addr, voff);
1490 off[0] = le64_to_cpu(off64[-1]);
1491 off[1] = le64_to_cpu(off64[0]);
1492 }
1493 break;
1494 }
1495 } while (++i < 2);
1496
1497 *vbo_data += off[0];
1498 *ondisk_size = off[1] - off[0];
1499
1500out1:
1501 unlock_page(page);
1502out:
1503 up_write(&ni->file.run_lock);
1504 return err;
1505}
1506#endif
1507
1508/*
e8b8e97f 1509 * attr_is_frame_compressed - Used to detect compressed frame.
be71b5cb
KK
1510 */
1511int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1512 CLST frame, CLST *clst_data)
1513{
1514 int err;
1515 u32 clst_frame;
1516 CLST clen, lcn, vcn, alen, slen, vcn_next;
1517 size_t idx;
1518 struct runs_tree *run;
1519
1520 *clst_data = 0;
1521
1522 if (!is_attr_compressed(attr))
1523 return 0;
1524
1525 if (!attr->non_res)
1526 return 0;
1527
1528 clst_frame = 1u << attr->nres.c_unit;
1529 vcn = frame * clst_frame;
1530 run = &ni->file.run;
1531
1532 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1533 err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1534 attr->name_len, run, vcn);
1535 if (err)
1536 return err;
1537
1538 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1539 return -EINVAL;
1540 }
1541
1542 if (lcn == SPARSE_LCN) {
e8b8e97f 1543 /* Sparsed frame. */
be71b5cb
KK
1544 return 0;
1545 }
1546
1547 if (clen >= clst_frame) {
1548 /*
1549 * The frame is not compressed 'cause
e8b8e97f 1550 * it does not contain any sparse clusters.
be71b5cb
KK
1551 */
1552 *clst_data = clst_frame;
1553 return 0;
1554 }
1555
1556 alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1557 slen = 0;
1558 *clst_data = clen;
1559
1560 /*
e8b8e97f
KA
1561 * The frame is compressed if *clst_data + slen >= clst_frame.
1562 * Check next fragments.
be71b5cb
KK
1563 */
1564 while ((vcn += clen) < alen) {
1565 vcn_next = vcn;
1566
1567 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1568 vcn_next != vcn) {
1569 err = attr_load_runs_vcn(ni, attr->type,
1570 attr_name(attr),
1571 attr->name_len, run, vcn_next);
1572 if (err)
1573 return err;
1574 vcn = vcn_next;
1575
1576 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1577 return -EINVAL;
1578 }
1579
1580 if (lcn == SPARSE_LCN) {
1581 slen += clen;
1582 } else {
1583 if (slen) {
1584 /*
e8b8e97f
KA
1585 * Data_clusters + sparse_clusters =
1586 * not enough for frame.
be71b5cb
KK
1587 */
1588 return -EINVAL;
1589 }
1590 *clst_data += clen;
1591 }
1592
1593 if (*clst_data + slen >= clst_frame) {
1594 if (!slen) {
1595 /*
1596 * There is no sparsed clusters in this frame
e8b8e97f 1597 * so it is not compressed.
be71b5cb
KK
1598 */
1599 *clst_data = clst_frame;
1600 } else {
e8b8e97f 1601 /* Frame is compressed. */
be71b5cb
KK
1602 }
1603 break;
1604 }
1605 }
1606
1607 return 0;
1608}
1609
1610/*
e8b8e97f 1611 * attr_allocate_frame - Allocate/free clusters for @frame.
be71b5cb 1612 *
e8b8e97f 1613 * Assumed: down_write(&ni->file.run_lock);
be71b5cb
KK
1614 */
1615int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1616 u64 new_valid)
1617{
1618 int err = 0;
1619 struct runs_tree *run = &ni->file.run;
1620 struct ntfs_sb_info *sbi = ni->mi.sbi;
1621 struct ATTRIB *attr = NULL, *attr_b;
1622 struct ATTR_LIST_ENTRY *le, *le_b;
1623 struct mft_inode *mi, *mi_b;
c380b52f 1624 CLST svcn, evcn1, next_svcn, len;
be71b5cb
KK
1625 CLST vcn, end, clst_data;
1626 u64 total_size, valid_size, data_size;
1627
1628 le_b = NULL;
1629 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1630 if (!attr_b)
1631 return -ENOENT;
1632
1633 if (!is_attr_ext(attr_b))
1634 return -EINVAL;
1635
1636 vcn = frame << NTFS_LZNT_CUNIT;
1637 total_size = le64_to_cpu(attr_b->nres.total_size);
1638
1639 svcn = le64_to_cpu(attr_b->nres.svcn);
1640 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1641 data_size = le64_to_cpu(attr_b->nres.data_size);
1642
1643 if (svcn <= vcn && vcn < evcn1) {
1644 attr = attr_b;
1645 le = le_b;
1646 mi = mi_b;
1647 } else if (!le_b) {
1648 err = -EINVAL;
1649 goto out;
1650 } else {
1651 le = le_b;
1652 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1653 &mi);
1654 if (!attr) {
1655 err = -EINVAL;
1656 goto out;
1657 }
1658 svcn = le64_to_cpu(attr->nres.svcn);
1659 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1660 }
1661
1662 err = attr_load_runs(attr, ni, run, NULL);
1663 if (err)
1664 goto out;
1665
1666 err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
1667 if (err)
1668 goto out;
1669
1670 total_size -= (u64)clst_data << sbi->cluster_bits;
1671
1672 len = bytes_to_cluster(sbi, compr_size);
1673
1674 if (len == clst_data)
1675 goto out;
1676
1677 if (len < clst_data) {
1678 err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1679 NULL, true);
1680 if (err)
1681 goto out;
1682
1683 if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1684 false)) {
1685 err = -ENOMEM;
1686 goto out;
1687 }
1688 end = vcn + clst_data;
e8b8e97f 1689 /* Run contains updated range [vcn + len : end). */
be71b5cb
KK
1690 } else {
1691 CLST alen, hint = 0;
e8b8e97f 1692 /* Get the last LCN to allocate from. */
be71b5cb
KK
1693 if (vcn + clst_data &&
1694 !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1695 NULL)) {
1696 hint = -1;
1697 }
1698
1699 err = attr_allocate_clusters(sbi, run, vcn + clst_data,
c380b52f
KK
1700 hint + 1, len - clst_data, NULL,
1701 ALLOCATE_DEF, &alen, 0, NULL,
1702 NULL);
be71b5cb
KK
1703 if (err)
1704 goto out;
1705
1706 end = vcn + len;
e8b8e97f 1707 /* Run contains updated range [vcn + clst_data : end). */
be71b5cb
KK
1708 }
1709
1710 total_size += (u64)len << sbi->cluster_bits;
1711
1712repack:
1713 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1714 if (err)
1715 goto out;
1716
1717 attr_b->nres.total_size = cpu_to_le64(total_size);
1718 inode_set_bytes(&ni->vfs_inode, total_size);
1719
1720 mi_b->dirty = true;
1721 mark_inode_dirty(&ni->vfs_inode);
1722
e8b8e97f 1723 /* Stored [vcn : next_svcn) from [vcn : end). */
be71b5cb
KK
1724 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1725
1726 if (end <= evcn1) {
1727 if (next_svcn == evcn1) {
e8b8e97f 1728 /* Normal way. Update attribute and exit. */
be71b5cb
KK
1729 goto ok;
1730 }
e8b8e97f 1731 /* Add new segment [next_svcn : evcn1 - next_svcn). */
be71b5cb
KK
1732 if (!ni->attr_list.size) {
1733 err = ni_create_attr_list(ni);
1734 if (err)
1735 goto out;
e8b8e97f 1736 /* Layout of records is changed. */
be71b5cb
KK
1737 le_b = NULL;
1738 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1739 0, NULL, &mi_b);
1740 if (!attr_b) {
1741 err = -ENOENT;
1742 goto out;
1743 }
1744
1745 attr = attr_b;
1746 le = le_b;
1747 mi = mi_b;
1748 goto repack;
1749 }
1750 }
1751
1752 svcn = evcn1;
1753
e8b8e97f 1754 /* Estimate next attribute. */
be71b5cb
KK
1755 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1756
1757 if (attr) {
1758 CLST alloc = bytes_to_cluster(
1759 sbi, le64_to_cpu(attr_b->nres.alloc_size));
1760 CLST evcn = le64_to_cpu(attr->nres.evcn);
1761
1762 if (end < next_svcn)
1763 end = next_svcn;
1764 while (end > evcn) {
e8b8e97f 1765 /* Remove segment [svcn : evcn). */
78ab59fe 1766 mi_remove_attr(NULL, mi, attr);
be71b5cb
KK
1767
1768 if (!al_remove_le(ni, le)) {
1769 err = -EINVAL;
1770 goto out;
1771 }
1772
1773 if (evcn + 1 >= alloc) {
e8b8e97f 1774 /* Last attribute segment. */
be71b5cb
KK
1775 evcn1 = evcn + 1;
1776 goto ins_ext;
1777 }
1778
1779 if (ni_load_mi(ni, le, &mi)) {
1780 attr = NULL;
1781 goto out;
1782 }
1783
1784 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1785 &le->id);
1786 if (!attr) {
1787 err = -EINVAL;
1788 goto out;
1789 }
1790 svcn = le64_to_cpu(attr->nres.svcn);
1791 evcn = le64_to_cpu(attr->nres.evcn);
1792 }
1793
1794 if (end < svcn)
1795 end = svcn;
1796
1797 err = attr_load_runs(attr, ni, run, &end);
1798 if (err)
1799 goto out;
1800
1801 evcn1 = evcn + 1;
1802 attr->nres.svcn = cpu_to_le64(next_svcn);
1803 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1804 if (err)
1805 goto out;
1806
1807 le->vcn = cpu_to_le64(next_svcn);
1808 ni->attr_list.dirty = true;
1809 mi->dirty = true;
1810
1811 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1812 }
1813ins_ext:
1814 if (evcn1 > next_svcn) {
1815 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1816 next_svcn, evcn1 - next_svcn,
c1e0ab37 1817 attr_b->flags, &attr, &mi, NULL);
be71b5cb
KK
1818 if (err)
1819 goto out;
1820 }
1821ok:
1822 run_truncate_around(run, vcn);
1823out:
1824 if (new_valid > data_size)
1825 new_valid = data_size;
1826
1827 valid_size = le64_to_cpu(attr_b->nres.valid_size);
1828 if (new_valid != valid_size) {
1829 attr_b->nres.valid_size = cpu_to_le64(valid_size);
1830 mi_b->dirty = true;
1831 }
1832
1833 return err;
1834}
1835
e8b8e97f
KA
1836/*
1837 * attr_collapse_range - Collapse range in file.
1838 */
be71b5cb
KK
1839int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1840{
1841 int err = 0;
1842 struct runs_tree *run = &ni->file.run;
1843 struct ntfs_sb_info *sbi = ni->mi.sbi;
1844 struct ATTRIB *attr = NULL, *attr_b;
1845 struct ATTR_LIST_ENTRY *le, *le_b;
1846 struct mft_inode *mi, *mi_b;
1847 CLST svcn, evcn1, len, dealloc, alen;
1848 CLST vcn, end;
1849 u64 valid_size, data_size, alloc_size, total_size;
1850 u32 mask;
1851 __le16 a_flags;
1852
1853 if (!bytes)
1854 return 0;
1855
1856 le_b = NULL;
1857 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1858 if (!attr_b)
1859 return -ENOENT;
1860
1861 if (!attr_b->non_res) {
1862 /* Attribute is resident. Nothing to do? */
1863 return 0;
1864 }
1865
1866 data_size = le64_to_cpu(attr_b->nres.data_size);
1867 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1868 a_flags = attr_b->flags;
1869
1870 if (is_attr_ext(attr_b)) {
1871 total_size = le64_to_cpu(attr_b->nres.total_size);
1872 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1873 } else {
1874 total_size = alloc_size;
1875 mask = sbi->cluster_mask;
1876 }
1877
1878 if ((vbo & mask) || (bytes & mask)) {
e8b8e97f 1879 /* Allow to collapse only cluster aligned ranges. */
be71b5cb
KK
1880 return -EINVAL;
1881 }
1882
1883 if (vbo > data_size)
1884 return -EINVAL;
1885
1886 down_write(&ni->file.run_lock);
1887
1888 if (vbo + bytes >= data_size) {
1889 u64 new_valid = min(ni->i_valid, vbo);
1890
e8b8e97f 1891 /* Simple truncate file at 'vbo'. */
be71b5cb
KK
1892 truncate_setsize(&ni->vfs_inode, vbo);
1893 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1894 &new_valid, true, NULL);
1895
1896 if (!err && new_valid < ni->i_valid)
1897 ni->i_valid = new_valid;
1898
1899 goto out;
1900 }
1901
1902 /*
e8b8e97f 1903 * Enumerate all attribute segments and collapse.
be71b5cb
KK
1904 */
1905 alen = alloc_size >> sbi->cluster_bits;
1906 vcn = vbo >> sbi->cluster_bits;
1907 len = bytes >> sbi->cluster_bits;
1908 end = vcn + len;
1909 dealloc = 0;
1910
1911 svcn = le64_to_cpu(attr_b->nres.svcn);
1912 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1913
1914 if (svcn <= vcn && vcn < evcn1) {
1915 attr = attr_b;
1916 le = le_b;
1917 mi = mi_b;
1918 } else if (!le_b) {
1919 err = -EINVAL;
1920 goto out;
1921 } else {
1922 le = le_b;
1923 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1924 &mi);
1925 if (!attr) {
1926 err = -EINVAL;
1927 goto out;
1928 }
1929
1930 svcn = le64_to_cpu(attr->nres.svcn);
1931 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1932 }
1933
1934 for (;;) {
1935 if (svcn >= end) {
e8b8e97f 1936 /* Shift VCN- */
be71b5cb
KK
1937 attr->nres.svcn = cpu_to_le64(svcn - len);
1938 attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1939 if (le) {
1940 le->vcn = attr->nres.svcn;
1941 ni->attr_list.dirty = true;
1942 }
1943 mi->dirty = true;
1944 } else if (svcn < vcn || end < evcn1) {
1945 CLST vcn1, eat, next_svcn;
1946
e8b8e97f 1947 /* Collapse a part of this attribute segment. */
be71b5cb
KK
1948 err = attr_load_runs(attr, ni, run, &svcn);
1949 if (err)
1950 goto out;
1951 vcn1 = max(vcn, svcn);
1952 eat = min(end, evcn1) - vcn1;
1953
1954 err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1955 true);
1956 if (err)
1957 goto out;
1958
1959 if (!run_collapse_range(run, vcn1, eat)) {
1960 err = -ENOMEM;
1961 goto out;
1962 }
1963
1964 if (svcn >= vcn) {
e8b8e97f 1965 /* Shift VCN */
be71b5cb
KK
1966 attr->nres.svcn = cpu_to_le64(vcn);
1967 if (le) {
1968 le->vcn = attr->nres.svcn;
1969 ni->attr_list.dirty = true;
1970 }
1971 }
1972
1973 err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1974 if (err)
1975 goto out;
1976
1977 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1978 if (next_svcn + eat < evcn1) {
1979 err = ni_insert_nonresident(
1980 ni, ATTR_DATA, NULL, 0, run, next_svcn,
1981 evcn1 - eat - next_svcn, a_flags, &attr,
c1e0ab37 1982 &mi, &le);
be71b5cb
KK
1983 if (err)
1984 goto out;
1985
e8b8e97f 1986 /* Layout of records maybe changed. */
be71b5cb 1987 attr_b = NULL;
be71b5cb
KK
1988 }
1989
e8b8e97f 1990 /* Free all allocated memory. */
be71b5cb
KK
1991 run_truncate(run, 0);
1992 } else {
1993 u16 le_sz;
1994 u16 roff = le16_to_cpu(attr->nres.run_off);
1995
6db62086
EL
1996 if (roff > le32_to_cpu(attr->size)) {
1997 err = -EINVAL;
1998 goto out;
1999 }
2000
be71b5cb
KK
2001 run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
2002 evcn1 - 1, svcn, Add2Ptr(attr, roff),
2003 le32_to_cpu(attr->size) - roff);
2004
e8b8e97f 2005 /* Delete this attribute segment. */
78ab59fe 2006 mi_remove_attr(NULL, mi, attr);
be71b5cb
KK
2007 if (!le)
2008 break;
2009
2010 le_sz = le16_to_cpu(le->size);
2011 if (!al_remove_le(ni, le)) {
2012 err = -EINVAL;
2013 goto out;
2014 }
2015
2016 if (evcn1 >= alen)
2017 break;
2018
2019 if (!svcn) {
e8b8e97f 2020 /* Load next record that contains this attribute. */
be71b5cb
KK
2021 if (ni_load_mi(ni, le, &mi)) {
2022 err = -EINVAL;
2023 goto out;
2024 }
2025
e8b8e97f 2026 /* Look for required attribute. */
be71b5cb
KK
2027 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
2028 0, &le->id);
2029 if (!attr) {
2030 err = -EINVAL;
2031 goto out;
2032 }
2033 goto next_attr;
2034 }
2035 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
2036 }
2037
2038 if (evcn1 >= alen)
2039 break;
2040
2041 attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2042 if (!attr) {
2043 err = -EINVAL;
2044 goto out;
2045 }
2046
2047next_attr:
2048 svcn = le64_to_cpu(attr->nres.svcn);
2049 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2050 }
2051
2052 if (!attr_b) {
2053 le_b = NULL;
2054 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2055 &mi_b);
2056 if (!attr_b) {
2057 err = -ENOENT;
2058 goto out;
2059 }
2060 }
2061
2062 data_size -= bytes;
2063 valid_size = ni->i_valid;
2064 if (vbo + bytes <= valid_size)
2065 valid_size -= bytes;
2066 else if (vbo < valid_size)
2067 valid_size = vbo;
2068
2069 attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
2070 attr_b->nres.data_size = cpu_to_le64(data_size);
2071 attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
2072 total_size -= (u64)dealloc << sbi->cluster_bits;
2073 if (is_attr_ext(attr_b))
2074 attr_b->nres.total_size = cpu_to_le64(total_size);
2075 mi_b->dirty = true;
2076
e8b8e97f 2077 /* Update inode size. */
be71b5cb
KK
2078 ni->i_valid = valid_size;
2079 ni->vfs_inode.i_size = data_size;
2080 inode_set_bytes(&ni->vfs_inode, total_size);
2081 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2082 mark_inode_dirty(&ni->vfs_inode);
2083
2084out:
2085 up_write(&ni->file.run_lock);
2086 if (err)
c12df45e 2087 _ntfs_bad_inode(&ni->vfs_inode);
be71b5cb
KK
2088
2089 return err;
2090}
2091
e8b8e97f
KA
2092/*
2093 * attr_punch_hole
2094 *
2095 * Not for normal files.
2096 */
be71b5cb
KK
2097int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
2098{
2099 int err = 0;
2100 struct runs_tree *run = &ni->file.run;
2101 struct ntfs_sb_info *sbi = ni->mi.sbi;
2102 struct ATTRIB *attr = NULL, *attr_b;
2103 struct ATTR_LIST_ENTRY *le, *le_b;
2104 struct mft_inode *mi, *mi_b;
20abc64f 2105 CLST svcn, evcn1, vcn, len, end, alen, hole, next_svcn;
be71b5cb
KK
2106 u64 total_size, alloc_size;
2107 u32 mask;
c1e0ab37 2108 __le16 a_flags;
20abc64f 2109 struct runs_tree run2;
be71b5cb
KK
2110
2111 if (!bytes)
2112 return 0;
2113
2114 le_b = NULL;
2115 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2116 if (!attr_b)
2117 return -ENOENT;
2118
2119 if (!attr_b->non_res) {
6d5c9e79 2120 u32 data_size = le32_to_cpu(attr_b->res.data_size);
be71b5cb
KK
2121 u32 from, to;
2122
2123 if (vbo > data_size)
2124 return 0;
2125
2126 from = vbo;
6e3331ee 2127 to = min_t(u64, vbo + bytes, data_size);
be71b5cb
KK
2128 memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
2129 return 0;
2130 }
2131
2132 if (!is_attr_ext(attr_b))
2133 return -EOPNOTSUPP;
2134
2135 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2136 total_size = le64_to_cpu(attr_b->nres.total_size);
2137
2138 if (vbo >= alloc_size) {
d3624466 2139 /* NOTE: It is allowed. */
be71b5cb
KK
2140 return 0;
2141 }
2142
2143 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2144
2145 bytes += vbo;
2146 if (bytes > alloc_size)
2147 bytes = alloc_size;
2148 bytes -= vbo;
2149
2150 if ((vbo & mask) || (bytes & mask)) {
d3624466 2151 /* We have to zero a range(s). */
be71b5cb 2152 if (frame_size == NULL) {
d3624466 2153 /* Caller insists range is aligned. */
be71b5cb
KK
2154 return -EINVAL;
2155 }
2156 *frame_size = mask + 1;
2157 return E_NTFS_NOTALIGNED;
2158 }
2159
2160 down_write(&ni->file.run_lock);
20abc64f
KK
2161 run_init(&run2);
2162 run_truncate(run, 0);
2163
be71b5cb 2164 /*
e8b8e97f 2165 * Enumerate all attribute segments and punch hole where necessary.
be71b5cb
KK
2166 */
2167 alen = alloc_size >> sbi->cluster_bits;
2168 vcn = vbo >> sbi->cluster_bits;
2169 len = bytes >> sbi->cluster_bits;
2170 end = vcn + len;
20abc64f 2171 hole = 0;
be71b5cb
KK
2172
2173 svcn = le64_to_cpu(attr_b->nres.svcn);
2174 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
c1e0ab37 2175 a_flags = attr_b->flags;
be71b5cb
KK
2176
2177 if (svcn <= vcn && vcn < evcn1) {
2178 attr = attr_b;
2179 le = le_b;
2180 mi = mi_b;
2181 } else if (!le_b) {
2182 err = -EINVAL;
20abc64f 2183 goto bad_inode;
be71b5cb
KK
2184 } else {
2185 le = le_b;
2186 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2187 &mi);
2188 if (!attr) {
2189 err = -EINVAL;
20abc64f 2190 goto bad_inode;
be71b5cb
KK
2191 }
2192
2193 svcn = le64_to_cpu(attr->nres.svcn);
2194 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2195 }
2196
2197 while (svcn < end) {
20abc64f 2198 CLST vcn1, zero, hole2 = hole;
be71b5cb
KK
2199
2200 err = attr_load_runs(attr, ni, run, &svcn);
2201 if (err)
20abc64f 2202 goto done;
be71b5cb
KK
2203 vcn1 = max(vcn, svcn);
2204 zero = min(end, evcn1) - vcn1;
2205
20abc64f
KK
2206 /*
2207 * Check range [vcn1 + zero).
2208 * Calculate how many clusters there are.
2209 * Don't do any destructive actions.
2210 */
2211 err = run_deallocate_ex(NULL, run, vcn1, zero, &hole2, false);
be71b5cb 2212 if (err)
20abc64f 2213 goto done;
be71b5cb 2214
20abc64f
KK
2215 /* Check if required range is already hole. */
2216 if (hole2 == hole)
2217 goto next_attr;
be71b5cb 2218
20abc64f
KK
2219 /* Make a clone of run to undo. */
2220 err = run_clone(run, &run2);
2221 if (err)
2222 goto done;
2223
2224 /* Make a hole range (sparse) [vcn1 + zero). */
2225 if (!run_add_entry(run, vcn1, SPARSE_LCN, zero, false)) {
2226 err = -ENOMEM;
2227 goto done;
2228 }
2229
2230 /* Update run in attribute segment. */
2231 err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2232 if (err)
2233 goto done;
2234 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
2235 if (next_svcn < evcn1) {
2236 /* Insert new attribute segment. */
2237 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2238 next_svcn,
2239 evcn1 - next_svcn, a_flags,
2240 &attr, &mi, &le);
be71b5cb 2241 if (err)
20abc64f
KK
2242 goto undo_punch;
2243
2244 /* Layout of records maybe changed. */
2245 attr_b = NULL;
be71b5cb 2246 }
20abc64f
KK
2247
2248 /* Real deallocate. Should not fail. */
2249 run_deallocate_ex(sbi, &run2, vcn1, zero, &hole, true);
2250
2251next_attr:
e8b8e97f 2252 /* Free all allocated memory. */
be71b5cb
KK
2253 run_truncate(run, 0);
2254
2255 if (evcn1 >= alen)
2256 break;
2257
20abc64f 2258 /* Get next attribute segment. */
be71b5cb
KK
2259 attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2260 if (!attr) {
2261 err = -EINVAL;
20abc64f 2262 goto bad_inode;
be71b5cb
KK
2263 }
2264
2265 svcn = le64_to_cpu(attr->nres.svcn);
2266 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2267 }
2268
20abc64f
KK
2269done:
2270 if (!hole)
2271 goto out;
2272
560f7736
KK
2273 if (!attr_b) {
2274 attr_b = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
2275 &mi_b);
2276 if (!attr_b) {
2277 err = -EINVAL;
20abc64f 2278 goto bad_inode;
560f7736
KK
2279 }
2280 }
20abc64f
KK
2281
2282 total_size -= (u64)hole << sbi->cluster_bits;
be71b5cb
KK
2283 attr_b->nres.total_size = cpu_to_le64(total_size);
2284 mi_b->dirty = true;
2285
e8b8e97f 2286 /* Update inode size. */
be71b5cb
KK
2287 inode_set_bytes(&ni->vfs_inode, total_size);
2288 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2289 mark_inode_dirty(&ni->vfs_inode);
2290
2291out:
20abc64f 2292 run_close(&run2);
be71b5cb 2293 up_write(&ni->file.run_lock);
be71b5cb 2294 return err;
20abc64f
KK
2295
2296bad_inode:
2297 _ntfs_bad_inode(&ni->vfs_inode);
2298 goto out;
2299
2300undo_punch:
2301 /*
2302 * Restore packed runs.
2303 * 'mi_pack_runs' should not fail, cause we restore original.
2304 */
2305 if (mi_pack_runs(mi, attr, &run2, evcn1 - svcn))
2306 goto bad_inode;
2307
2308 goto done;
be71b5cb 2309}
aa30eccb
KK
2310
2311/*
2312 * attr_insert_range - Insert range (hole) in file.
2313 * Not for normal files.
2314 */
2315int attr_insert_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
2316{
2317 int err = 0;
2318 struct runs_tree *run = &ni->file.run;
2319 struct ntfs_sb_info *sbi = ni->mi.sbi;
2320 struct ATTRIB *attr = NULL, *attr_b;
2321 struct ATTR_LIST_ENTRY *le, *le_b;
2322 struct mft_inode *mi, *mi_b;
2323 CLST vcn, svcn, evcn1, len, next_svcn;
2324 u64 data_size, alloc_size;
2325 u32 mask;
2326 __le16 a_flags;
2327
2328 if (!bytes)
2329 return 0;
2330
2331 le_b = NULL;
2332 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
2333 if (!attr_b)
2334 return -ENOENT;
2335
2336 if (!is_attr_ext(attr_b)) {
2337 /* It was checked above. See fallocate. */
2338 return -EOPNOTSUPP;
2339 }
2340
2341 if (!attr_b->non_res) {
2342 data_size = le32_to_cpu(attr_b->res.data_size);
13747aac 2343 alloc_size = data_size;
aa30eccb
KK
2344 mask = sbi->cluster_mask; /* cluster_size - 1 */
2345 } else {
2346 data_size = le64_to_cpu(attr_b->nres.data_size);
13747aac 2347 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
aa30eccb
KK
2348 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
2349 }
2350
2351 if (vbo > data_size) {
2352 /* Insert range after the file size is not allowed. */
2353 return -EINVAL;
2354 }
2355
2356 if ((vbo & mask) || (bytes & mask)) {
2357 /* Allow to insert only frame aligned ranges. */
2358 return -EINVAL;
2359 }
2360
13747aac
KK
2361 /*
2362 * valid_size <= data_size <= alloc_size
2363 * Check alloc_size for maximum possible.
2364 */
2365 if (bytes > sbi->maxbytes_sparse - alloc_size)
2366 return -EFBIG;
2367
aa30eccb
KK
2368 vcn = vbo >> sbi->cluster_bits;
2369 len = bytes >> sbi->cluster_bits;
2370
2371 down_write(&ni->file.run_lock);
2372
2373 if (!attr_b->non_res) {
2374 err = attr_set_size(ni, ATTR_DATA, NULL, 0, run,
9256ec35 2375 data_size + bytes, NULL, false, NULL);
aa30eccb 2376
aa30eccb
KK
2377 le_b = NULL;
2378 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2379 &mi_b);
4838ec0d 2380 if (!attr_b) {
aa30eccb 2381 err = -EINVAL;
9256ec35
KK
2382 goto bad_inode;
2383 }
2384
2385 if (err)
aa30eccb 2386 goto out;
9256ec35
KK
2387
2388 if (!attr_b->non_res) {
2389 /* Still resident. */
9144b438
KK
2390 char *data = Add2Ptr(attr_b,
2391 le16_to_cpu(attr_b->res.data_off));
9256ec35
KK
2392
2393 memmove(data + bytes, data, bytes);
2394 memset(data, 0, bytes);
2395 goto done;
aa30eccb 2396 }
9256ec35
KK
2397
2398 /* Resident files becomes nonresident. */
aa30eccb
KK
2399 data_size = le64_to_cpu(attr_b->nres.data_size);
2400 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
2401 }
2402
2403 /*
2404 * Enumerate all attribute segments and shift start vcn.
2405 */
2406 a_flags = attr_b->flags;
2407 svcn = le64_to_cpu(attr_b->nres.svcn);
2408 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2409
2410 if (svcn <= vcn && vcn < evcn1) {
2411 attr = attr_b;
2412 le = le_b;
2413 mi = mi_b;
2414 } else if (!le_b) {
2415 err = -EINVAL;
9256ec35 2416 goto bad_inode;
aa30eccb
KK
2417 } else {
2418 le = le_b;
2419 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2420 &mi);
2421 if (!attr) {
2422 err = -EINVAL;
9256ec35 2423 goto bad_inode;
aa30eccb
KK
2424 }
2425
2426 svcn = le64_to_cpu(attr->nres.svcn);
2427 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2428 }
2429
2430 run_truncate(run, 0); /* clear cached values. */
2431 err = attr_load_runs(attr, ni, run, NULL);
2432 if (err)
2433 goto out;
2434
2435 if (!run_insert_range(run, vcn, len)) {
2436 err = -ENOMEM;
2437 goto out;
2438 }
2439
2440 /* Try to pack in current record as much as possible. */
2441 err = mi_pack_runs(mi, attr, run, evcn1 + len - svcn);
2442 if (err)
2443 goto out;
2444
2445 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
aa30eccb
KK
2446
2447 while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2448 attr->type == ATTR_DATA && !attr->name_len) {
2449 le64_add_cpu(&attr->nres.svcn, len);
2450 le64_add_cpu(&attr->nres.evcn, len);
2451 if (le) {
2452 le->vcn = attr->nres.svcn;
2453 ni->attr_list.dirty = true;
2454 }
2455 mi->dirty = true;
2456 }
2457
9256ec35
KK
2458 if (next_svcn < evcn1 + len) {
2459 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
2460 next_svcn, evcn1 + len - next_svcn,
2461 a_flags, NULL, NULL, NULL);
2462
2463 le_b = NULL;
2464 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
2465 &mi_b);
2466 if (!attr_b) {
2467 err = -EINVAL;
2468 goto bad_inode;
2469 }
2470
2471 if (err) {
2472 /* ni_insert_nonresident failed. Try to undo. */
2473 goto undo_insert_range;
2474 }
2475 }
2476
aa30eccb 2477 /*
9256ec35 2478 * Update primary attribute segment.
aa30eccb
KK
2479 */
2480 if (vbo <= ni->i_valid)
2481 ni->i_valid += bytes;
2482
9144b438
KK
2483 attr_b->nres.data_size = cpu_to_le64(data_size + bytes);
2484 attr_b->nres.alloc_size = cpu_to_le64(alloc_size + bytes);
aa30eccb
KK
2485
2486 /* ni->valid may be not equal valid_size (temporary). */
2487 if (ni->i_valid > data_size + bytes)
2488 attr_b->nres.valid_size = attr_b->nres.data_size;
2489 else
2490 attr_b->nres.valid_size = cpu_to_le64(ni->i_valid);
2491 mi_b->dirty = true;
2492
9256ec35 2493done:
aa30eccb
KK
2494 ni->vfs_inode.i_size += bytes;
2495 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2496 mark_inode_dirty(&ni->vfs_inode);
2497
2498out:
2499 run_truncate(run, 0); /* clear cached values. */
2500
2501 up_write(&ni->file.run_lock);
aa30eccb
KK
2502
2503 return err;
9256ec35
KK
2504
2505bad_inode:
2506 _ntfs_bad_inode(&ni->vfs_inode);
2507 goto out;
2508
2509undo_insert_range:
2510 svcn = le64_to_cpu(attr_b->nres.svcn);
2511 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2512
2513 if (svcn <= vcn && vcn < evcn1) {
2514 attr = attr_b;
2515 le = le_b;
2516 mi = mi_b;
2517 } else if (!le_b) {
2518 goto bad_inode;
2519 } else {
2520 le = le_b;
2521 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2522 &mi);
2523 if (!attr) {
2524 goto bad_inode;
2525 }
2526
2527 svcn = le64_to_cpu(attr->nres.svcn);
2528 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2529 }
2530
2531 if (attr_load_runs(attr, ni, run, NULL))
2532 goto bad_inode;
2533
2534 if (!run_collapse_range(run, vcn, len))
2535 goto bad_inode;
2536
2537 if (mi_pack_runs(mi, attr, run, evcn1 + len - svcn))
2538 goto bad_inode;
2539
2540 while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi)) &&
2541 attr->type == ATTR_DATA && !attr->name_len) {
2542 le64_sub_cpu(&attr->nres.svcn, len);
2543 le64_sub_cpu(&attr->nres.evcn, len);
2544 if (le) {
2545 le->vcn = attr->nres.svcn;
2546 ni->attr_list.dirty = true;
2547 }
2548 mi->dirty = true;
2549 }
2550
2551 goto out;
aa30eccb 2552}