fs/ntfs3: Do not use driver own alloc wrappers
[linux-2.6-block.git] / fs / ntfs3 / attrib.c
CommitLineData
be71b5cb
KK
1// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
6 * TODO: merge attr_set_size/attr_data_get_block/attr_allocate_frame?
7 */
8
9#include <linux/blkdev.h>
10#include <linux/buffer_head.h>
11#include <linux/fs.h>
12#include <linux/hash.h>
13#include <linux/nls.h>
14#include <linux/ratelimit.h>
15#include <linux/slab.h>
16
17#include "debug.h"
18#include "ntfs.h"
19#include "ntfs_fs.h"
20
21/*
22 * You can set external NTFS_MIN_LOG2_OF_CLUMP/NTFS_MAX_LOG2_OF_CLUMP to manage
23 * preallocate algorithm
24 */
25#ifndef NTFS_MIN_LOG2_OF_CLUMP
26#define NTFS_MIN_LOG2_OF_CLUMP 16
27#endif
28
29#ifndef NTFS_MAX_LOG2_OF_CLUMP
30#define NTFS_MAX_LOG2_OF_CLUMP 26
31#endif
32
33// 16M
34#define NTFS_CLUMP_MIN (1 << (NTFS_MIN_LOG2_OF_CLUMP + 8))
35// 16G
36#define NTFS_CLUMP_MAX (1ull << (NTFS_MAX_LOG2_OF_CLUMP + 8))
37
38/*
39 * get_pre_allocated
40 *
41 */
42static inline u64 get_pre_allocated(u64 size)
43{
44 u32 clump;
45 u8 align_shift;
46 u64 ret;
47
48 if (size <= NTFS_CLUMP_MIN) {
49 clump = 1 << NTFS_MIN_LOG2_OF_CLUMP;
50 align_shift = NTFS_MIN_LOG2_OF_CLUMP;
51 } else if (size >= NTFS_CLUMP_MAX) {
52 clump = 1 << NTFS_MAX_LOG2_OF_CLUMP;
53 align_shift = NTFS_MAX_LOG2_OF_CLUMP;
54 } else {
55 align_shift = NTFS_MIN_LOG2_OF_CLUMP - 1 +
56 __ffs(size >> (8 + NTFS_MIN_LOG2_OF_CLUMP));
57 clump = 1u << align_shift;
58 }
59
60 ret = (((size + clump - 1) >> align_shift)) << align_shift;
61
62 return ret;
63}
64
65/*
66 * attr_must_be_resident
67 *
68 * returns true if attribute must be resident
69 */
70static inline bool attr_must_be_resident(struct ntfs_sb_info *sbi,
71 enum ATTR_TYPE type)
72{
73 const struct ATTR_DEF_ENTRY *de;
74
75 switch (type) {
76 case ATTR_STD:
77 case ATTR_NAME:
78 case ATTR_ID:
79 case ATTR_LABEL:
80 case ATTR_VOL_INFO:
81 case ATTR_ROOT:
82 case ATTR_EA_INFO:
83 return true;
84 default:
85 de = ntfs_query_def(sbi, type);
86 if (de && (de->flags & NTFS_ATTR_MUST_BE_RESIDENT))
87 return true;
88 return false;
89 }
90}
91
92/*
93 * attr_load_runs
94 *
95 * load all runs stored in 'attr'
96 */
97int attr_load_runs(struct ATTRIB *attr, struct ntfs_inode *ni,
98 struct runs_tree *run, const CLST *vcn)
99{
100 int err;
101 CLST svcn = le64_to_cpu(attr->nres.svcn);
102 CLST evcn = le64_to_cpu(attr->nres.evcn);
103 u32 asize;
104 u16 run_off;
105
106 if (svcn >= evcn + 1 || run_is_mapped_full(run, svcn, evcn))
107 return 0;
108
109 if (vcn && (evcn < *vcn || *vcn < svcn))
110 return -EINVAL;
111
112 asize = le32_to_cpu(attr->size);
113 run_off = le16_to_cpu(attr->nres.run_off);
114 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn,
115 vcn ? *vcn : svcn, Add2Ptr(attr, run_off),
116 asize - run_off);
117 if (err < 0)
118 return err;
119
120 return 0;
121}
122
123/*
124 * int run_deallocate_ex
125 *
126 * Deallocate clusters
127 */
128static int run_deallocate_ex(struct ntfs_sb_info *sbi, struct runs_tree *run,
129 CLST vcn, CLST len, CLST *done, bool trim)
130{
131 int err = 0;
132 CLST vcn_next, vcn0 = vcn, lcn, clen, dn = 0;
133 size_t idx;
134
135 if (!len)
136 goto out;
137
138 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
139failed:
140 run_truncate(run, vcn0);
141 err = -EINVAL;
142 goto out;
143 }
144
145 for (;;) {
146 if (clen > len)
147 clen = len;
148
149 if (!clen) {
150 err = -EINVAL;
151 goto out;
152 }
153
154 if (lcn != SPARSE_LCN) {
155 mark_as_free_ex(sbi, lcn, clen, trim);
156 dn += clen;
157 }
158
159 len -= clen;
160 if (!len)
161 break;
162
163 vcn_next = vcn + clen;
164 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
165 vcn != vcn_next) {
166 // save memory - don't load entire run
167 goto failed;
168 }
169 }
170
171out:
172 if (done)
173 *done += dn;
174
175 return err;
176}
177
178/*
179 * attr_allocate_clusters
180 *
181 * find free space, mark it as used and store in 'run'
182 */
183int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
184 CLST vcn, CLST lcn, CLST len, CLST *pre_alloc,
185 enum ALLOCATE_OPT opt, CLST *alen, const size_t fr,
186 CLST *new_lcn)
187{
188 int err;
189 CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
190 struct wnd_bitmap *wnd = &sbi->used.bitmap;
191 size_t cnt = run->count;
192
193 for (;;) {
194 err = ntfs_look_for_free_space(sbi, lcn, len + pre, &lcn, &flen,
195 opt);
196
197 if (err == -ENOSPC && pre) {
198 pre = 0;
199 if (*pre_alloc)
200 *pre_alloc = 0;
201 continue;
202 }
203
204 if (err)
205 goto out;
206
207 if (new_lcn && vcn == vcn0)
208 *new_lcn = lcn;
209
210 /* Add new fragment into run storage */
211 if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
212 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
213 wnd_set_free(wnd, lcn, flen);
214 up_write(&wnd->rw_lock);
215 err = -ENOMEM;
216 goto out;
217 }
218
219 vcn += flen;
220
221 if (flen >= len || opt == ALLOCATE_MFT ||
222 (fr && run->count - cnt >= fr)) {
223 *alen = vcn - vcn0;
224 return 0;
225 }
226
227 len -= flen;
228 }
229
230out:
231 /* undo */
232 run_deallocate_ex(sbi, run, vcn0, vcn - vcn0, NULL, false);
233 run_truncate(run, vcn0);
234
235 return err;
236}
237
238/*
239 * if page is not NULL - it is already contains resident data
240 * and locked (called from ni_write_frame)
241 */
242int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr,
243 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
244 u64 new_size, struct runs_tree *run,
245 struct ATTRIB **ins_attr, struct page *page)
246{
247 struct ntfs_sb_info *sbi;
248 struct ATTRIB *attr_s;
249 struct MFT_REC *rec;
250 u32 used, asize, rsize, aoff, align;
251 bool is_data;
252 CLST len, alen;
253 char *next;
254 int err;
255
256 if (attr->non_res) {
257 *ins_attr = attr;
258 return 0;
259 }
260
261 sbi = mi->sbi;
262 rec = mi->mrec;
263 attr_s = NULL;
264 used = le32_to_cpu(rec->used);
265 asize = le32_to_cpu(attr->size);
266 next = Add2Ptr(attr, asize);
267 aoff = PtrOffset(rec, attr);
268 rsize = le32_to_cpu(attr->res.data_size);
269 is_data = attr->type == ATTR_DATA && !attr->name_len;
270
271 align = sbi->cluster_size;
272 if (is_attr_compressed(attr))
273 align <<= COMPRESSION_UNIT;
274 len = (rsize + align - 1) >> sbi->cluster_bits;
275
276 run_init(run);
277
278 /* make a copy of original attribute */
195c52bd 279 attr_s = kmemdup(attr, asize, GFP_NOFS);
be71b5cb
KK
280 if (!attr_s) {
281 err = -ENOMEM;
282 goto out;
283 }
284
285 if (!len) {
286 /* empty resident -> empty nonresident */
287 alen = 0;
288 } else {
289 const char *data = resident_data(attr);
290
291 err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL,
292 ALLOCATE_DEF, &alen, 0, NULL);
293 if (err)
294 goto out1;
295
296 if (!rsize) {
297 /* empty resident -> non empty nonresident */
298 } else if (!is_data) {
299 err = ntfs_sb_write_run(sbi, run, 0, data, rsize);
300 if (err)
301 goto out2;
302 } else if (!page) {
303 char *kaddr;
304
305 page = grab_cache_page(ni->vfs_inode.i_mapping, 0);
306 if (!page) {
307 err = -ENOMEM;
308 goto out2;
309 }
310 kaddr = kmap_atomic(page);
311 memcpy(kaddr, data, rsize);
312 memset(kaddr + rsize, 0, PAGE_SIZE - rsize);
313 kunmap_atomic(kaddr);
314 flush_dcache_page(page);
315 SetPageUptodate(page);
316 set_page_dirty(page);
317 unlock_page(page);
318 put_page(page);
319 }
320 }
321
322 /* remove original attribute */
323 used -= asize;
324 memmove(attr, Add2Ptr(attr, asize), used - aoff);
325 rec->used = cpu_to_le32(used);
326 mi->dirty = true;
327 if (le)
328 al_remove_le(ni, le);
329
330 err = ni_insert_nonresident(ni, attr_s->type, attr_name(attr_s),
331 attr_s->name_len, run, 0, alen,
332 attr_s->flags, &attr, NULL);
333 if (err)
334 goto out3;
335
195c52bd 336 kfree(attr_s);
be71b5cb
KK
337 attr->nres.data_size = cpu_to_le64(rsize);
338 attr->nres.valid_size = attr->nres.data_size;
339
340 *ins_attr = attr;
341
342 if (is_data)
343 ni->ni_flags &= ~NI_FLAG_RESIDENT;
344
345 /* Resident attribute becomes non resident */
346 return 0;
347
348out3:
349 attr = Add2Ptr(rec, aoff);
350 memmove(next, attr, used - aoff);
351 memcpy(attr, attr_s, asize);
352 rec->used = cpu_to_le32(used + asize);
353 mi->dirty = true;
354out2:
355 /* undo: do not trim new allocated clusters */
356 run_deallocate(sbi, run, false);
357 run_close(run);
358out1:
195c52bd 359 kfree(attr_s);
be71b5cb
KK
360 /*reinsert le*/
361out:
362 return err;
363}
364
365/*
366 * attr_set_size_res
367 *
368 * helper for attr_set_size
369 */
370static int attr_set_size_res(struct ntfs_inode *ni, struct ATTRIB *attr,
371 struct ATTR_LIST_ENTRY *le, struct mft_inode *mi,
372 u64 new_size, struct runs_tree *run,
373 struct ATTRIB **ins_attr)
374{
375 struct ntfs_sb_info *sbi = mi->sbi;
376 struct MFT_REC *rec = mi->mrec;
377 u32 used = le32_to_cpu(rec->used);
378 u32 asize = le32_to_cpu(attr->size);
379 u32 aoff = PtrOffset(rec, attr);
380 u32 rsize = le32_to_cpu(attr->res.data_size);
381 u32 tail = used - aoff - asize;
382 char *next = Add2Ptr(attr, asize);
fa3cacf5 383 s64 dsize = ALIGN(new_size, 8) - ALIGN(rsize, 8);
be71b5cb
KK
384
385 if (dsize < 0) {
386 memmove(next + dsize, next, tail);
387 } else if (dsize > 0) {
388 if (used + dsize > sbi->max_bytes_per_attr)
389 return attr_make_nonresident(ni, attr, le, mi, new_size,
390 run, ins_attr, NULL);
391
392 memmove(next + dsize, next, tail);
393 memset(next, 0, dsize);
394 }
395
396 if (new_size > rsize)
397 memset(Add2Ptr(resident_data(attr), rsize), 0,
398 new_size - rsize);
399
400 rec->used = cpu_to_le32(used + dsize);
401 attr->size = cpu_to_le32(asize + dsize);
402 attr->res.data_size = cpu_to_le32(new_size);
403 mi->dirty = true;
404 *ins_attr = attr;
405
406 return 0;
407}
408
409/*
410 * attr_set_size
411 *
412 * change the size of attribute
413 * Extend:
414 * - sparse/compressed: no allocated clusters
415 * - normal: append allocated and preallocated new clusters
416 * Shrink:
417 * - no deallocate if keep_prealloc is set
418 */
419int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
420 const __le16 *name, u8 name_len, struct runs_tree *run,
421 u64 new_size, const u64 *new_valid, bool keep_prealloc,
422 struct ATTRIB **ret)
423{
424 int err = 0;
425 struct ntfs_sb_info *sbi = ni->mi.sbi;
426 u8 cluster_bits = sbi->cluster_bits;
427 bool is_mft =
428 ni->mi.rno == MFT_REC_MFT && type == ATTR_DATA && !name_len;
429 u64 old_valid, old_size, old_alloc, new_alloc, new_alloc_tmp;
430 struct ATTRIB *attr = NULL, *attr_b;
431 struct ATTR_LIST_ENTRY *le, *le_b;
432 struct mft_inode *mi, *mi_b;
433 CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
434 CLST next_svcn, pre_alloc = -1, done = 0;
435 bool is_ext;
436 u32 align;
437 struct MFT_REC *rec;
438
439again:
440 le_b = NULL;
441 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
442 &mi_b);
443 if (!attr_b) {
444 err = -ENOENT;
445 goto out;
446 }
447
448 if (!attr_b->non_res) {
449 err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
450 &attr_b);
451 if (err || !attr_b->non_res)
452 goto out;
453
454 /* layout of records may be changed, so do a full search */
455 goto again;
456 }
457
458 is_ext = is_attr_ext(attr_b);
459
460again_1:
461 align = sbi->cluster_size;
462
463 if (is_ext) {
464 align <<= attr_b->nres.c_unit;
465 if (is_attr_sparsed(attr_b))
466 keep_prealloc = false;
467 }
468
469 old_valid = le64_to_cpu(attr_b->nres.valid_size);
470 old_size = le64_to_cpu(attr_b->nres.data_size);
471 old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
472 old_alen = old_alloc >> cluster_bits;
473
474 new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
475 new_alen = new_alloc >> cluster_bits;
476
477 if (keep_prealloc && is_ext)
478 keep_prealloc = false;
479
480 if (keep_prealloc && new_size < old_size) {
481 attr_b->nres.data_size = cpu_to_le64(new_size);
482 mi_b->dirty = true;
483 goto ok;
484 }
485
486 vcn = old_alen - 1;
487
488 svcn = le64_to_cpu(attr_b->nres.svcn);
489 evcn = le64_to_cpu(attr_b->nres.evcn);
490
491 if (svcn <= vcn && vcn <= evcn) {
492 attr = attr_b;
493 le = le_b;
494 mi = mi_b;
495 } else if (!le_b) {
496 err = -EINVAL;
497 goto out;
498 } else {
499 le = le_b;
500 attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
501 &mi);
502 if (!attr) {
503 err = -EINVAL;
504 goto out;
505 }
506
507next_le_1:
508 svcn = le64_to_cpu(attr->nres.svcn);
509 evcn = le64_to_cpu(attr->nres.evcn);
510 }
511
512next_le:
513 rec = mi->mrec;
514
515 err = attr_load_runs(attr, ni, run, NULL);
516 if (err)
517 goto out;
518
519 if (new_size > old_size) {
520 CLST to_allocate;
521 size_t free;
522
523 if (new_alloc <= old_alloc) {
524 attr_b->nres.data_size = cpu_to_le64(new_size);
525 mi_b->dirty = true;
526 goto ok;
527 }
528
529 to_allocate = new_alen - old_alen;
530add_alloc_in_same_attr_seg:
531 lcn = 0;
532 if (is_mft) {
533 /* mft allocates clusters from mftzone */
534 pre_alloc = 0;
535 } else if (is_ext) {
536 /* no preallocate for sparse/compress */
537 pre_alloc = 0;
538 } else if (pre_alloc == -1) {
539 pre_alloc = 0;
540 if (type == ATTR_DATA && !name_len &&
541 sbi->options.prealloc) {
542 CLST new_alen2 = bytes_to_cluster(
543 sbi, get_pre_allocated(new_size));
544 pre_alloc = new_alen2 - new_alen;
545 }
546
547 /* Get the last lcn to allocate from */
548 if (old_alen &&
549 !run_lookup_entry(run, vcn, &lcn, NULL, NULL)) {
550 lcn = SPARSE_LCN;
551 }
552
553 if (lcn == SPARSE_LCN)
554 lcn = 0;
555 else if (lcn)
556 lcn += 1;
557
558 free = wnd_zeroes(&sbi->used.bitmap);
559 if (to_allocate > free) {
560 err = -ENOSPC;
561 goto out;
562 }
563
564 if (pre_alloc && to_allocate + pre_alloc > free)
565 pre_alloc = 0;
566 }
567
568 vcn = old_alen;
569
570 if (is_ext) {
571 if (!run_add_entry(run, vcn, SPARSE_LCN, to_allocate,
572 false)) {
573 err = -ENOMEM;
574 goto out;
575 }
576 alen = to_allocate;
577 } else {
578 /* ~3 bytes per fragment */
579 err = attr_allocate_clusters(
580 sbi, run, vcn, lcn, to_allocate, &pre_alloc,
581 is_mft ? ALLOCATE_MFT : 0, &alen,
582 is_mft ? 0
583 : (sbi->record_size -
584 le32_to_cpu(rec->used) + 8) /
585 3 +
586 1,
587 NULL);
588 if (err)
589 goto out;
590 }
591
592 done += alen;
593 vcn += alen;
594 if (to_allocate > alen)
595 to_allocate -= alen;
596 else
597 to_allocate = 0;
598
599pack_runs:
600 err = mi_pack_runs(mi, attr, run, vcn - svcn);
601 if (err)
602 goto out;
603
604 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
605 new_alloc_tmp = (u64)next_svcn << cluster_bits;
606 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
607 mi_b->dirty = true;
608
609 if (next_svcn >= vcn && !to_allocate) {
610 /* Normal way. update attribute and exit */
611 attr_b->nres.data_size = cpu_to_le64(new_size);
612 goto ok;
613 }
614
615 /* at least two mft to avoid recursive loop*/
616 if (is_mft && next_svcn == vcn &&
617 ((u64)done << sbi->cluster_bits) >= 2 * sbi->record_size) {
618 new_size = new_alloc_tmp;
619 attr_b->nres.data_size = attr_b->nres.alloc_size;
620 goto ok;
621 }
622
623 if (le32_to_cpu(rec->used) < sbi->record_size) {
624 old_alen = next_svcn;
625 evcn = old_alen - 1;
626 goto add_alloc_in_same_attr_seg;
627 }
628
629 attr_b->nres.data_size = attr_b->nres.alloc_size;
630 if (new_alloc_tmp < old_valid)
631 attr_b->nres.valid_size = attr_b->nres.data_size;
632
633 if (type == ATTR_LIST) {
634 err = ni_expand_list(ni);
635 if (err)
636 goto out;
637 if (next_svcn < vcn)
638 goto pack_runs;
639
640 /* layout of records is changed */
641 goto again;
642 }
643
644 if (!ni->attr_list.size) {
645 err = ni_create_attr_list(ni);
646 if (err)
647 goto out;
648 /* layout of records is changed */
649 }
650
651 if (next_svcn >= vcn) {
652 /* this is mft data, repeat */
653 goto again;
654 }
655
656 /* insert new attribute segment */
657 err = ni_insert_nonresident(ni, type, name, name_len, run,
658 next_svcn, vcn - next_svcn,
659 attr_b->flags, &attr, &mi);
660 if (err)
661 goto out;
662
663 if (!is_mft)
664 run_truncate_head(run, evcn + 1);
665
666 svcn = le64_to_cpu(attr->nres.svcn);
667 evcn = le64_to_cpu(attr->nres.evcn);
668
669 le_b = NULL;
670 /* layout of records maybe changed */
671 /* find base attribute to update*/
672 attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
673 NULL, &mi_b);
674 if (!attr_b) {
675 err = -ENOENT;
676 goto out;
677 }
678
679 attr_b->nres.alloc_size = cpu_to_le64((u64)vcn << cluster_bits);
680 attr_b->nres.data_size = attr_b->nres.alloc_size;
681 attr_b->nres.valid_size = attr_b->nres.alloc_size;
682 mi_b->dirty = true;
683 goto again_1;
684 }
685
686 if (new_size != old_size ||
687 (new_alloc != old_alloc && !keep_prealloc)) {
688 vcn = max(svcn, new_alen);
689 new_alloc_tmp = (u64)vcn << cluster_bits;
690
691 alen = 0;
692 err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &alen,
693 true);
694 if (err)
695 goto out;
696
697 run_truncate(run, vcn);
698
699 if (vcn > svcn) {
700 err = mi_pack_runs(mi, attr, run, vcn - svcn);
701 if (err)
702 goto out;
703 } else if (le && le->vcn) {
704 u16 le_sz = le16_to_cpu(le->size);
705
706 /*
707 * NOTE: list entries for one attribute are always
708 * the same size. We deal with last entry (vcn==0)
709 * and it is not first in entries array
710 * (list entry for std attribute always first)
711 * So it is safe to step back
712 */
713 mi_remove_attr(mi, attr);
714
715 if (!al_remove_le(ni, le)) {
716 err = -EINVAL;
717 goto out;
718 }
719
720 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
721 } else {
722 attr->nres.evcn = cpu_to_le64((u64)vcn - 1);
723 mi->dirty = true;
724 }
725
726 attr_b->nres.alloc_size = cpu_to_le64(new_alloc_tmp);
727
728 if (vcn == new_alen) {
729 attr_b->nres.data_size = cpu_to_le64(new_size);
730 if (new_size < old_valid)
731 attr_b->nres.valid_size =
732 attr_b->nres.data_size;
733 } else {
734 if (new_alloc_tmp <=
735 le64_to_cpu(attr_b->nres.data_size))
736 attr_b->nres.data_size =
737 attr_b->nres.alloc_size;
738 if (new_alloc_tmp <
739 le64_to_cpu(attr_b->nres.valid_size))
740 attr_b->nres.valid_size =
741 attr_b->nres.alloc_size;
742 }
743
744 if (is_ext)
745 le64_sub_cpu(&attr_b->nres.total_size,
746 ((u64)alen << cluster_bits));
747
748 mi_b->dirty = true;
749
750 if (new_alloc_tmp <= new_alloc)
751 goto ok;
752
753 old_size = new_alloc_tmp;
754 vcn = svcn - 1;
755
756 if (le == le_b) {
757 attr = attr_b;
758 mi = mi_b;
759 evcn = svcn - 1;
760 svcn = 0;
761 goto next_le;
762 }
763
764 if (le->type != type || le->name_len != name_len ||
765 memcmp(le_name(le), name, name_len * sizeof(short))) {
766 err = -EINVAL;
767 goto out;
768 }
769
770 err = ni_load_mi(ni, le, &mi);
771 if (err)
772 goto out;
773
774 attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
775 if (!attr) {
776 err = -EINVAL;
777 goto out;
778 }
779 goto next_le_1;
780 }
781
782ok:
783 if (new_valid) {
784 __le64 valid = cpu_to_le64(min(*new_valid, new_size));
785
786 if (attr_b->nres.valid_size != valid) {
787 attr_b->nres.valid_size = valid;
788 mi_b->dirty = true;
789 }
790 }
791
792out:
793 if (!err && attr_b && ret)
794 *ret = attr_b;
795
796 /* update inode_set_bytes*/
797 if (!err && ((type == ATTR_DATA && !name_len) ||
798 (type == ATTR_ALLOC && name == I30_NAME))) {
799 bool dirty = false;
800
801 if (ni->vfs_inode.i_size != new_size) {
802 ni->vfs_inode.i_size = new_size;
803 dirty = true;
804 }
805
806 if (attr_b && attr_b->non_res) {
807 new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
808 if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
809 inode_set_bytes(&ni->vfs_inode, new_alloc);
810 dirty = true;
811 }
812 }
813
814 if (dirty) {
815 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
816 mark_inode_dirty(&ni->vfs_inode);
817 }
818 }
819
820 return err;
821}
822
823int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
824 CLST *len, bool *new)
825{
826 int err = 0;
827 struct runs_tree *run = &ni->file.run;
828 struct ntfs_sb_info *sbi;
829 u8 cluster_bits;
830 struct ATTRIB *attr = NULL, *attr_b;
831 struct ATTR_LIST_ENTRY *le, *le_b;
832 struct mft_inode *mi, *mi_b;
833 CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end;
834 u64 total_size;
835 u32 clst_per_frame;
836 bool ok;
837
838 if (new)
839 *new = false;
840
841 down_read(&ni->file.run_lock);
842 ok = run_lookup_entry(run, vcn, lcn, len, NULL);
843 up_read(&ni->file.run_lock);
844
845 if (ok && (*lcn != SPARSE_LCN || !new)) {
846 /* normal way */
847 return 0;
848 }
849
850 if (!clen)
851 clen = 1;
852
853 if (ok && clen > *len)
854 clen = *len;
855
856 sbi = ni->mi.sbi;
857 cluster_bits = sbi->cluster_bits;
858
859 ni_lock(ni);
860 down_write(&ni->file.run_lock);
861
862 le_b = NULL;
863 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
864 if (!attr_b) {
865 err = -ENOENT;
866 goto out;
867 }
868
869 if (!attr_b->non_res) {
870 *lcn = RESIDENT_LCN;
871 *len = 1;
872 goto out;
873 }
874
875 asize = le64_to_cpu(attr_b->nres.alloc_size) >> sbi->cluster_bits;
876 if (vcn >= asize) {
877 err = -EINVAL;
878 goto out;
879 }
880
881 clst_per_frame = 1u << attr_b->nres.c_unit;
882 to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1);
883
884 if (vcn + to_alloc > asize)
885 to_alloc = asize - vcn;
886
887 svcn = le64_to_cpu(attr_b->nres.svcn);
888 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
889
890 attr = attr_b;
891 le = le_b;
892 mi = mi_b;
893
894 if (le_b && (vcn < svcn || evcn1 <= vcn)) {
895 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
896 &mi);
897 if (!attr) {
898 err = -EINVAL;
899 goto out;
900 }
901 svcn = le64_to_cpu(attr->nres.svcn);
902 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
903 }
904
905 err = attr_load_runs(attr, ni, run, NULL);
906 if (err)
907 goto out;
908
909 if (!ok) {
910 ok = run_lookup_entry(run, vcn, lcn, len, NULL);
911 if (ok && (*lcn != SPARSE_LCN || !new)) {
912 /* normal way */
913 err = 0;
914 goto ok;
915 }
916
917 if (!ok && !new) {
918 *len = 0;
919 err = 0;
920 goto ok;
921 }
922
923 if (ok && clen > *len) {
924 clen = *len;
925 to_alloc = (clen + clst_per_frame - 1) &
926 ~(clst_per_frame - 1);
927 }
928 }
929
930 if (!is_attr_ext(attr_b)) {
931 err = -EINVAL;
932 goto out;
933 }
934
935 /* Get the last lcn to allocate from */
936 hint = 0;
937
938 if (vcn > evcn1) {
939 if (!run_add_entry(run, evcn1, SPARSE_LCN, vcn - evcn1,
940 false)) {
941 err = -ENOMEM;
942 goto out;
943 }
944 } else if (vcn && !run_lookup_entry(run, vcn - 1, &hint, NULL, NULL)) {
945 hint = -1;
946 }
947
948 err = attr_allocate_clusters(
949 sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len,
950 (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1,
951 lcn);
952 if (err)
953 goto out;
954 *new = true;
955
956 end = vcn + *len;
957
958 total_size = le64_to_cpu(attr_b->nres.total_size) +
959 ((u64)*len << cluster_bits);
960
961repack:
962 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
963 if (err)
964 goto out;
965
966 attr_b->nres.total_size = cpu_to_le64(total_size);
967 inode_set_bytes(&ni->vfs_inode, total_size);
968 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
969
970 mi_b->dirty = true;
971 mark_inode_dirty(&ni->vfs_inode);
972
973 /* stored [vcn : next_svcn) from [vcn : end) */
974 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
975
976 if (end <= evcn1) {
977 if (next_svcn == evcn1) {
978 /* Normal way. update attribute and exit */
979 goto ok;
980 }
981 /* add new segment [next_svcn : evcn1 - next_svcn )*/
982 if (!ni->attr_list.size) {
983 err = ni_create_attr_list(ni);
984 if (err)
985 goto out;
986 /* layout of records is changed */
987 le_b = NULL;
988 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
989 0, NULL, &mi_b);
990 if (!attr_b) {
991 err = -ENOENT;
992 goto out;
993 }
994
995 attr = attr_b;
996 le = le_b;
997 mi = mi_b;
998 goto repack;
999 }
1000 }
1001
1002 svcn = evcn1;
1003
1004 /* Estimate next attribute */
1005 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1006
1007 if (attr) {
1008 CLST alloc = bytes_to_cluster(
1009 sbi, le64_to_cpu(attr_b->nres.alloc_size));
1010 CLST evcn = le64_to_cpu(attr->nres.evcn);
1011
1012 if (end < next_svcn)
1013 end = next_svcn;
1014 while (end > evcn) {
1015 /* remove segment [svcn : evcn)*/
1016 mi_remove_attr(mi, attr);
1017
1018 if (!al_remove_le(ni, le)) {
1019 err = -EINVAL;
1020 goto out;
1021 }
1022
1023 if (evcn + 1 >= alloc) {
1024 /* last attribute segment */
1025 evcn1 = evcn + 1;
1026 goto ins_ext;
1027 }
1028
1029 if (ni_load_mi(ni, le, &mi)) {
1030 attr = NULL;
1031 goto out;
1032 }
1033
1034 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1035 &le->id);
1036 if (!attr) {
1037 err = -EINVAL;
1038 goto out;
1039 }
1040 svcn = le64_to_cpu(attr->nres.svcn);
1041 evcn = le64_to_cpu(attr->nres.evcn);
1042 }
1043
1044 if (end < svcn)
1045 end = svcn;
1046
1047 err = attr_load_runs(attr, ni, run, &end);
1048 if (err)
1049 goto out;
1050
1051 evcn1 = evcn + 1;
1052 attr->nres.svcn = cpu_to_le64(next_svcn);
1053 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1054 if (err)
1055 goto out;
1056
1057 le->vcn = cpu_to_le64(next_svcn);
1058 ni->attr_list.dirty = true;
1059 mi->dirty = true;
1060
1061 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1062 }
1063ins_ext:
1064 if (evcn1 > next_svcn) {
1065 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1066 next_svcn, evcn1 - next_svcn,
1067 attr_b->flags, &attr, &mi);
1068 if (err)
1069 goto out;
1070 }
1071ok:
1072 run_truncate_around(run, vcn);
1073out:
1074 up_write(&ni->file.run_lock);
1075 ni_unlock(ni);
1076
1077 return err;
1078}
1079
1080int attr_data_read_resident(struct ntfs_inode *ni, struct page *page)
1081{
1082 u64 vbo;
1083 struct ATTRIB *attr;
1084 u32 data_size;
1085
1086 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL);
1087 if (!attr)
1088 return -EINVAL;
1089
1090 if (attr->non_res)
1091 return E_NTFS_NONRESIDENT;
1092
1093 vbo = page->index << PAGE_SHIFT;
1094 data_size = le32_to_cpu(attr->res.data_size);
1095 if (vbo < data_size) {
1096 const char *data = resident_data(attr);
1097 char *kaddr = kmap_atomic(page);
1098 u32 use = data_size - vbo;
1099
1100 if (use > PAGE_SIZE)
1101 use = PAGE_SIZE;
1102
1103 memcpy(kaddr, data + vbo, use);
1104 memset(kaddr + use, 0, PAGE_SIZE - use);
1105 kunmap_atomic(kaddr);
1106 flush_dcache_page(page);
1107 SetPageUptodate(page);
1108 } else if (!PageUptodate(page)) {
1109 zero_user_segment(page, 0, PAGE_SIZE);
1110 SetPageUptodate(page);
1111 }
1112
1113 return 0;
1114}
1115
1116int attr_data_write_resident(struct ntfs_inode *ni, struct page *page)
1117{
1118 u64 vbo;
1119 struct mft_inode *mi;
1120 struct ATTRIB *attr;
1121 u32 data_size;
1122
1123 attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi);
1124 if (!attr)
1125 return -EINVAL;
1126
1127 if (attr->non_res) {
1128 /*return special error code to check this case*/
1129 return E_NTFS_NONRESIDENT;
1130 }
1131
1132 vbo = page->index << PAGE_SHIFT;
1133 data_size = le32_to_cpu(attr->res.data_size);
1134 if (vbo < data_size) {
1135 char *data = resident_data(attr);
1136 char *kaddr = kmap_atomic(page);
1137 u32 use = data_size - vbo;
1138
1139 if (use > PAGE_SIZE)
1140 use = PAGE_SIZE;
1141 memcpy(data + vbo, kaddr, use);
1142 kunmap_atomic(kaddr);
1143 mi->dirty = true;
1144 }
1145 ni->i_valid = data_size;
1146
1147 return 0;
1148}
1149
1150/*
1151 * attr_load_runs_vcn
1152 *
1153 * load runs with vcn
1154 */
1155int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type,
1156 const __le16 *name, u8 name_len, struct runs_tree *run,
1157 CLST vcn)
1158{
1159 struct ATTRIB *attr;
1160 int err;
1161 CLST svcn, evcn;
1162 u16 ro;
1163
1164 attr = ni_find_attr(ni, NULL, NULL, type, name, name_len, &vcn, NULL);
1165 if (!attr)
1166 return -ENOENT;
1167
1168 svcn = le64_to_cpu(attr->nres.svcn);
1169 evcn = le64_to_cpu(attr->nres.evcn);
1170
1171 if (evcn < vcn || vcn < svcn)
1172 return -EINVAL;
1173
1174 ro = le16_to_cpu(attr->nres.run_off);
1175 err = run_unpack_ex(run, ni->mi.sbi, ni->mi.rno, svcn, evcn, svcn,
1176 Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro);
1177 if (err < 0)
1178 return err;
1179 return 0;
1180}
1181
1182/*
1183 * load runs for given range [from to)
1184 */
1185int attr_load_runs_range(struct ntfs_inode *ni, enum ATTR_TYPE type,
1186 const __le16 *name, u8 name_len, struct runs_tree *run,
1187 u64 from, u64 to)
1188{
1189 struct ntfs_sb_info *sbi = ni->mi.sbi;
1190 u8 cluster_bits = sbi->cluster_bits;
1191 CLST vcn = from >> cluster_bits;
1192 CLST vcn_last = (to - 1) >> cluster_bits;
1193 CLST lcn, clen;
1194 int err;
1195
1196 for (vcn = from >> cluster_bits; vcn <= vcn_last; vcn += clen) {
1197 if (!run_lookup_entry(run, vcn, &lcn, &clen, NULL)) {
1198 err = attr_load_runs_vcn(ni, type, name, name_len, run,
1199 vcn);
1200 if (err)
1201 return err;
1202 clen = 0; /*next run_lookup_entry(vcn) must be success*/
1203 }
1204 }
1205
1206 return 0;
1207}
1208
1209#ifdef CONFIG_NTFS3_LZX_XPRESS
1210/*
1211 * attr_wof_frame_info
1212 *
1213 * read header of xpress/lzx file to get info about frame
1214 */
1215int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
1216 struct runs_tree *run, u64 frame, u64 frames,
1217 u8 frame_bits, u32 *ondisk_size, u64 *vbo_data)
1218{
1219 struct ntfs_sb_info *sbi = ni->mi.sbi;
1220 u64 vbo[2], off[2], wof_size;
1221 u32 voff;
1222 u8 bytes_per_off;
1223 char *addr;
1224 struct page *page;
1225 int i, err;
1226 __le32 *off32;
1227 __le64 *off64;
1228
1229 if (ni->vfs_inode.i_size < 0x100000000ull) {
1230 /* file starts with array of 32 bit offsets */
1231 bytes_per_off = sizeof(__le32);
1232 vbo[1] = frame << 2;
1233 *vbo_data = frames << 2;
1234 } else {
1235 /* file starts with array of 64 bit offsets */
1236 bytes_per_off = sizeof(__le64);
1237 vbo[1] = frame << 3;
1238 *vbo_data = frames << 3;
1239 }
1240
1241 /*
1242 * read 4/8 bytes at [vbo - 4(8)] == offset where compressed frame starts
1243 * read 4/8 bytes at [vbo] == offset where compressed frame ends
1244 */
1245 if (!attr->non_res) {
1246 if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) {
1247 ntfs_inode_err(&ni->vfs_inode, "is corrupted");
1248 return -EINVAL;
1249 }
1250 addr = resident_data(attr);
1251
1252 if (bytes_per_off == sizeof(__le32)) {
1253 off32 = Add2Ptr(addr, vbo[1]);
1254 off[0] = vbo[1] ? le32_to_cpu(off32[-1]) : 0;
1255 off[1] = le32_to_cpu(off32[0]);
1256 } else {
1257 off64 = Add2Ptr(addr, vbo[1]);
1258 off[0] = vbo[1] ? le64_to_cpu(off64[-1]) : 0;
1259 off[1] = le64_to_cpu(off64[0]);
1260 }
1261
1262 *vbo_data += off[0];
1263 *ondisk_size = off[1] - off[0];
1264 return 0;
1265 }
1266
1267 wof_size = le64_to_cpu(attr->nres.data_size);
1268 down_write(&ni->file.run_lock);
1269 page = ni->file.offs_page;
1270 if (!page) {
1271 page = alloc_page(GFP_KERNEL);
1272 if (!page) {
1273 err = -ENOMEM;
1274 goto out;
1275 }
1276 page->index = -1;
1277 ni->file.offs_page = page;
1278 }
1279 lock_page(page);
1280 addr = page_address(page);
1281
1282 if (vbo[1]) {
1283 voff = vbo[1] & (PAGE_SIZE - 1);
1284 vbo[0] = vbo[1] - bytes_per_off;
1285 i = 0;
1286 } else {
1287 voff = 0;
1288 vbo[0] = 0;
1289 off[0] = 0;
1290 i = 1;
1291 }
1292
1293 do {
1294 pgoff_t index = vbo[i] >> PAGE_SHIFT;
1295
1296 if (index != page->index) {
1297 u64 from = vbo[i] & ~(u64)(PAGE_SIZE - 1);
1298 u64 to = min(from + PAGE_SIZE, wof_size);
1299
1300 err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME,
1301 ARRAY_SIZE(WOF_NAME), run,
1302 from, to);
1303 if (err)
1304 goto out1;
1305
1306 err = ntfs_bio_pages(sbi, run, &page, 1, from,
1307 to - from, REQ_OP_READ);
1308 if (err) {
1309 page->index = -1;
1310 goto out1;
1311 }
1312 page->index = index;
1313 }
1314
1315 if (i) {
1316 if (bytes_per_off == sizeof(__le32)) {
1317 off32 = Add2Ptr(addr, voff);
1318 off[1] = le32_to_cpu(*off32);
1319 } else {
1320 off64 = Add2Ptr(addr, voff);
1321 off[1] = le64_to_cpu(*off64);
1322 }
1323 } else if (!voff) {
1324 if (bytes_per_off == sizeof(__le32)) {
1325 off32 = Add2Ptr(addr, PAGE_SIZE - sizeof(u32));
1326 off[0] = le32_to_cpu(*off32);
1327 } else {
1328 off64 = Add2Ptr(addr, PAGE_SIZE - sizeof(u64));
1329 off[0] = le64_to_cpu(*off64);
1330 }
1331 } else {
1332 /* two values in one page*/
1333 if (bytes_per_off == sizeof(__le32)) {
1334 off32 = Add2Ptr(addr, voff);
1335 off[0] = le32_to_cpu(off32[-1]);
1336 off[1] = le32_to_cpu(off32[0]);
1337 } else {
1338 off64 = Add2Ptr(addr, voff);
1339 off[0] = le64_to_cpu(off64[-1]);
1340 off[1] = le64_to_cpu(off64[0]);
1341 }
1342 break;
1343 }
1344 } while (++i < 2);
1345
1346 *vbo_data += off[0];
1347 *ondisk_size = off[1] - off[0];
1348
1349out1:
1350 unlock_page(page);
1351out:
1352 up_write(&ni->file.run_lock);
1353 return err;
1354}
1355#endif
1356
1357/*
1358 * attr_is_frame_compressed
1359 *
1360 * This function is used to detect compressed frame
1361 */
1362int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
1363 CLST frame, CLST *clst_data)
1364{
1365 int err;
1366 u32 clst_frame;
1367 CLST clen, lcn, vcn, alen, slen, vcn_next;
1368 size_t idx;
1369 struct runs_tree *run;
1370
1371 *clst_data = 0;
1372
1373 if (!is_attr_compressed(attr))
1374 return 0;
1375
1376 if (!attr->non_res)
1377 return 0;
1378
1379 clst_frame = 1u << attr->nres.c_unit;
1380 vcn = frame * clst_frame;
1381 run = &ni->file.run;
1382
1383 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1384 err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
1385 attr->name_len, run, vcn);
1386 if (err)
1387 return err;
1388
1389 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1390 return -EINVAL;
1391 }
1392
1393 if (lcn == SPARSE_LCN) {
1394 /* sparsed frame */
1395 return 0;
1396 }
1397
1398 if (clen >= clst_frame) {
1399 /*
1400 * The frame is not compressed 'cause
1401 * it does not contain any sparse clusters
1402 */
1403 *clst_data = clst_frame;
1404 return 0;
1405 }
1406
1407 alen = bytes_to_cluster(ni->mi.sbi, le64_to_cpu(attr->nres.alloc_size));
1408 slen = 0;
1409 *clst_data = clen;
1410
1411 /*
1412 * The frame is compressed if *clst_data + slen >= clst_frame
1413 * Check next fragments
1414 */
1415 while ((vcn += clen) < alen) {
1416 vcn_next = vcn;
1417
1418 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1419 vcn_next != vcn) {
1420 err = attr_load_runs_vcn(ni, attr->type,
1421 attr_name(attr),
1422 attr->name_len, run, vcn_next);
1423 if (err)
1424 return err;
1425 vcn = vcn_next;
1426
1427 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1428 return -EINVAL;
1429 }
1430
1431 if (lcn == SPARSE_LCN) {
1432 slen += clen;
1433 } else {
1434 if (slen) {
1435 /*
1436 * data_clusters + sparse_clusters =
1437 * not enough for frame
1438 */
1439 return -EINVAL;
1440 }
1441 *clst_data += clen;
1442 }
1443
1444 if (*clst_data + slen >= clst_frame) {
1445 if (!slen) {
1446 /*
1447 * There is no sparsed clusters in this frame
1448 * So it is not compressed
1449 */
1450 *clst_data = clst_frame;
1451 } else {
1452 /*frame is compressed*/
1453 }
1454 break;
1455 }
1456 }
1457
1458 return 0;
1459}
1460
1461/*
1462 * attr_allocate_frame
1463 *
1464 * allocate/free clusters for 'frame'
1465 * assumed: down_write(&ni->file.run_lock);
1466 */
1467int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
1468 u64 new_valid)
1469{
1470 int err = 0;
1471 struct runs_tree *run = &ni->file.run;
1472 struct ntfs_sb_info *sbi = ni->mi.sbi;
1473 struct ATTRIB *attr = NULL, *attr_b;
1474 struct ATTR_LIST_ENTRY *le, *le_b;
1475 struct mft_inode *mi, *mi_b;
1476 CLST svcn, evcn1, next_svcn, lcn, len;
1477 CLST vcn, end, clst_data;
1478 u64 total_size, valid_size, data_size;
1479
1480 le_b = NULL;
1481 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1482 if (!attr_b)
1483 return -ENOENT;
1484
1485 if (!is_attr_ext(attr_b))
1486 return -EINVAL;
1487
1488 vcn = frame << NTFS_LZNT_CUNIT;
1489 total_size = le64_to_cpu(attr_b->nres.total_size);
1490
1491 svcn = le64_to_cpu(attr_b->nres.svcn);
1492 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1493 data_size = le64_to_cpu(attr_b->nres.data_size);
1494
1495 if (svcn <= vcn && vcn < evcn1) {
1496 attr = attr_b;
1497 le = le_b;
1498 mi = mi_b;
1499 } else if (!le_b) {
1500 err = -EINVAL;
1501 goto out;
1502 } else {
1503 le = le_b;
1504 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1505 &mi);
1506 if (!attr) {
1507 err = -EINVAL;
1508 goto out;
1509 }
1510 svcn = le64_to_cpu(attr->nres.svcn);
1511 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1512 }
1513
1514 err = attr_load_runs(attr, ni, run, NULL);
1515 if (err)
1516 goto out;
1517
1518 err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
1519 if (err)
1520 goto out;
1521
1522 total_size -= (u64)clst_data << sbi->cluster_bits;
1523
1524 len = bytes_to_cluster(sbi, compr_size);
1525
1526 if (len == clst_data)
1527 goto out;
1528
1529 if (len < clst_data) {
1530 err = run_deallocate_ex(sbi, run, vcn + len, clst_data - len,
1531 NULL, true);
1532 if (err)
1533 goto out;
1534
1535 if (!run_add_entry(run, vcn + len, SPARSE_LCN, clst_data - len,
1536 false)) {
1537 err = -ENOMEM;
1538 goto out;
1539 }
1540 end = vcn + clst_data;
1541 /* run contains updated range [vcn + len : end) */
1542 } else {
1543 CLST alen, hint = 0;
1544 /* Get the last lcn to allocate from */
1545 if (vcn + clst_data &&
1546 !run_lookup_entry(run, vcn + clst_data - 1, &hint, NULL,
1547 NULL)) {
1548 hint = -1;
1549 }
1550
1551 err = attr_allocate_clusters(sbi, run, vcn + clst_data,
1552 hint + 1, len - clst_data, NULL, 0,
1553 &alen, 0, &lcn);
1554 if (err)
1555 goto out;
1556
1557 end = vcn + len;
1558 /* run contains updated range [vcn + clst_data : end) */
1559 }
1560
1561 total_size += (u64)len << sbi->cluster_bits;
1562
1563repack:
1564 err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn);
1565 if (err)
1566 goto out;
1567
1568 attr_b->nres.total_size = cpu_to_le64(total_size);
1569 inode_set_bytes(&ni->vfs_inode, total_size);
1570
1571 mi_b->dirty = true;
1572 mark_inode_dirty(&ni->vfs_inode);
1573
1574 /* stored [vcn : next_svcn) from [vcn : end) */
1575 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1576
1577 if (end <= evcn1) {
1578 if (next_svcn == evcn1) {
1579 /* Normal way. update attribute and exit */
1580 goto ok;
1581 }
1582 /* add new segment [next_svcn : evcn1 - next_svcn )*/
1583 if (!ni->attr_list.size) {
1584 err = ni_create_attr_list(ni);
1585 if (err)
1586 goto out;
1587 /* layout of records is changed */
1588 le_b = NULL;
1589 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
1590 0, NULL, &mi_b);
1591 if (!attr_b) {
1592 err = -ENOENT;
1593 goto out;
1594 }
1595
1596 attr = attr_b;
1597 le = le_b;
1598 mi = mi_b;
1599 goto repack;
1600 }
1601 }
1602
1603 svcn = evcn1;
1604
1605 /* Estimate next attribute */
1606 attr = ni_find_attr(ni, attr, &le, ATTR_DATA, NULL, 0, &svcn, &mi);
1607
1608 if (attr) {
1609 CLST alloc = bytes_to_cluster(
1610 sbi, le64_to_cpu(attr_b->nres.alloc_size));
1611 CLST evcn = le64_to_cpu(attr->nres.evcn);
1612
1613 if (end < next_svcn)
1614 end = next_svcn;
1615 while (end > evcn) {
1616 /* remove segment [svcn : evcn)*/
1617 mi_remove_attr(mi, attr);
1618
1619 if (!al_remove_le(ni, le)) {
1620 err = -EINVAL;
1621 goto out;
1622 }
1623
1624 if (evcn + 1 >= alloc) {
1625 /* last attribute segment */
1626 evcn1 = evcn + 1;
1627 goto ins_ext;
1628 }
1629
1630 if (ni_load_mi(ni, le, &mi)) {
1631 attr = NULL;
1632 goto out;
1633 }
1634
1635 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0,
1636 &le->id);
1637 if (!attr) {
1638 err = -EINVAL;
1639 goto out;
1640 }
1641 svcn = le64_to_cpu(attr->nres.svcn);
1642 evcn = le64_to_cpu(attr->nres.evcn);
1643 }
1644
1645 if (end < svcn)
1646 end = svcn;
1647
1648 err = attr_load_runs(attr, ni, run, &end);
1649 if (err)
1650 goto out;
1651
1652 evcn1 = evcn + 1;
1653 attr->nres.svcn = cpu_to_le64(next_svcn);
1654 err = mi_pack_runs(mi, attr, run, evcn1 - next_svcn);
1655 if (err)
1656 goto out;
1657
1658 le->vcn = cpu_to_le64(next_svcn);
1659 ni->attr_list.dirty = true;
1660 mi->dirty = true;
1661
1662 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1663 }
1664ins_ext:
1665 if (evcn1 > next_svcn) {
1666 err = ni_insert_nonresident(ni, ATTR_DATA, NULL, 0, run,
1667 next_svcn, evcn1 - next_svcn,
1668 attr_b->flags, &attr, &mi);
1669 if (err)
1670 goto out;
1671 }
1672ok:
1673 run_truncate_around(run, vcn);
1674out:
1675 if (new_valid > data_size)
1676 new_valid = data_size;
1677
1678 valid_size = le64_to_cpu(attr_b->nres.valid_size);
1679 if (new_valid != valid_size) {
1680 attr_b->nres.valid_size = cpu_to_le64(valid_size);
1681 mi_b->dirty = true;
1682 }
1683
1684 return err;
1685}
1686
1687/* Collapse range in file */
1688int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes)
1689{
1690 int err = 0;
1691 struct runs_tree *run = &ni->file.run;
1692 struct ntfs_sb_info *sbi = ni->mi.sbi;
1693 struct ATTRIB *attr = NULL, *attr_b;
1694 struct ATTR_LIST_ENTRY *le, *le_b;
1695 struct mft_inode *mi, *mi_b;
1696 CLST svcn, evcn1, len, dealloc, alen;
1697 CLST vcn, end;
1698 u64 valid_size, data_size, alloc_size, total_size;
1699 u32 mask;
1700 __le16 a_flags;
1701
1702 if (!bytes)
1703 return 0;
1704
1705 le_b = NULL;
1706 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1707 if (!attr_b)
1708 return -ENOENT;
1709
1710 if (!attr_b->non_res) {
1711 /* Attribute is resident. Nothing to do? */
1712 return 0;
1713 }
1714
1715 data_size = le64_to_cpu(attr_b->nres.data_size);
1716 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1717 a_flags = attr_b->flags;
1718
1719 if (is_attr_ext(attr_b)) {
1720 total_size = le64_to_cpu(attr_b->nres.total_size);
1721 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1722 } else {
1723 total_size = alloc_size;
1724 mask = sbi->cluster_mask;
1725 }
1726
1727 if ((vbo & mask) || (bytes & mask)) {
1728 /* allow to collapse only cluster aligned ranges */
1729 return -EINVAL;
1730 }
1731
1732 if (vbo > data_size)
1733 return -EINVAL;
1734
1735 down_write(&ni->file.run_lock);
1736
1737 if (vbo + bytes >= data_size) {
1738 u64 new_valid = min(ni->i_valid, vbo);
1739
1740 /* Simple truncate file at 'vbo' */
1741 truncate_setsize(&ni->vfs_inode, vbo);
1742 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, vbo,
1743 &new_valid, true, NULL);
1744
1745 if (!err && new_valid < ni->i_valid)
1746 ni->i_valid = new_valid;
1747
1748 goto out;
1749 }
1750
1751 /*
1752 * Enumerate all attribute segments and collapse
1753 */
1754 alen = alloc_size >> sbi->cluster_bits;
1755 vcn = vbo >> sbi->cluster_bits;
1756 len = bytes >> sbi->cluster_bits;
1757 end = vcn + len;
1758 dealloc = 0;
1759
1760 svcn = le64_to_cpu(attr_b->nres.svcn);
1761 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
1762
1763 if (svcn <= vcn && vcn < evcn1) {
1764 attr = attr_b;
1765 le = le_b;
1766 mi = mi_b;
1767 } else if (!le_b) {
1768 err = -EINVAL;
1769 goto out;
1770 } else {
1771 le = le_b;
1772 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
1773 &mi);
1774 if (!attr) {
1775 err = -EINVAL;
1776 goto out;
1777 }
1778
1779 svcn = le64_to_cpu(attr->nres.svcn);
1780 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1781 }
1782
1783 for (;;) {
1784 if (svcn >= end) {
1785 /* shift vcn */
1786 attr->nres.svcn = cpu_to_le64(svcn - len);
1787 attr->nres.evcn = cpu_to_le64(evcn1 - 1 - len);
1788 if (le) {
1789 le->vcn = attr->nres.svcn;
1790 ni->attr_list.dirty = true;
1791 }
1792 mi->dirty = true;
1793 } else if (svcn < vcn || end < evcn1) {
1794 CLST vcn1, eat, next_svcn;
1795
1796 /* collapse a part of this attribute segment */
1797 err = attr_load_runs(attr, ni, run, &svcn);
1798 if (err)
1799 goto out;
1800 vcn1 = max(vcn, svcn);
1801 eat = min(end, evcn1) - vcn1;
1802
1803 err = run_deallocate_ex(sbi, run, vcn1, eat, &dealloc,
1804 true);
1805 if (err)
1806 goto out;
1807
1808 if (!run_collapse_range(run, vcn1, eat)) {
1809 err = -ENOMEM;
1810 goto out;
1811 }
1812
1813 if (svcn >= vcn) {
1814 /* shift vcn */
1815 attr->nres.svcn = cpu_to_le64(vcn);
1816 if (le) {
1817 le->vcn = attr->nres.svcn;
1818 ni->attr_list.dirty = true;
1819 }
1820 }
1821
1822 err = mi_pack_runs(mi, attr, run, evcn1 - svcn - eat);
1823 if (err)
1824 goto out;
1825
1826 next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
1827 if (next_svcn + eat < evcn1) {
1828 err = ni_insert_nonresident(
1829 ni, ATTR_DATA, NULL, 0, run, next_svcn,
1830 evcn1 - eat - next_svcn, a_flags, &attr,
1831 &mi);
1832 if (err)
1833 goto out;
1834
1835 /* layout of records maybe changed */
1836 attr_b = NULL;
1837 le = al_find_ex(ni, NULL, ATTR_DATA, NULL, 0,
1838 &next_svcn);
1839 if (!le) {
1840 err = -EINVAL;
1841 goto out;
1842 }
1843 }
1844
1845 /* free all allocated memory */
1846 run_truncate(run, 0);
1847 } else {
1848 u16 le_sz;
1849 u16 roff = le16_to_cpu(attr->nres.run_off);
1850
1851 /*run==1 means unpack and deallocate*/
1852 run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn,
1853 evcn1 - 1, svcn, Add2Ptr(attr, roff),
1854 le32_to_cpu(attr->size) - roff);
1855
1856 /* delete this attribute segment */
1857 mi_remove_attr(mi, attr);
1858 if (!le)
1859 break;
1860
1861 le_sz = le16_to_cpu(le->size);
1862 if (!al_remove_le(ni, le)) {
1863 err = -EINVAL;
1864 goto out;
1865 }
1866
1867 if (evcn1 >= alen)
1868 break;
1869
1870 if (!svcn) {
1871 /* Load next record that contains this attribute */
1872 if (ni_load_mi(ni, le, &mi)) {
1873 err = -EINVAL;
1874 goto out;
1875 }
1876
1877 /* Look for required attribute */
1878 attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL,
1879 0, &le->id);
1880 if (!attr) {
1881 err = -EINVAL;
1882 goto out;
1883 }
1884 goto next_attr;
1885 }
1886 le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
1887 }
1888
1889 if (evcn1 >= alen)
1890 break;
1891
1892 attr = ni_enum_attr_ex(ni, attr, &le, &mi);
1893 if (!attr) {
1894 err = -EINVAL;
1895 goto out;
1896 }
1897
1898next_attr:
1899 svcn = le64_to_cpu(attr->nres.svcn);
1900 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
1901 }
1902
1903 if (!attr_b) {
1904 le_b = NULL;
1905 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL,
1906 &mi_b);
1907 if (!attr_b) {
1908 err = -ENOENT;
1909 goto out;
1910 }
1911 }
1912
1913 data_size -= bytes;
1914 valid_size = ni->i_valid;
1915 if (vbo + bytes <= valid_size)
1916 valid_size -= bytes;
1917 else if (vbo < valid_size)
1918 valid_size = vbo;
1919
1920 attr_b->nres.alloc_size = cpu_to_le64(alloc_size - bytes);
1921 attr_b->nres.data_size = cpu_to_le64(data_size);
1922 attr_b->nres.valid_size = cpu_to_le64(min(valid_size, data_size));
1923 total_size -= (u64)dealloc << sbi->cluster_bits;
1924 if (is_attr_ext(attr_b))
1925 attr_b->nres.total_size = cpu_to_le64(total_size);
1926 mi_b->dirty = true;
1927
1928 /*update inode size*/
1929 ni->i_valid = valid_size;
1930 ni->vfs_inode.i_size = data_size;
1931 inode_set_bytes(&ni->vfs_inode, total_size);
1932 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
1933 mark_inode_dirty(&ni->vfs_inode);
1934
1935out:
1936 up_write(&ni->file.run_lock);
1937 if (err)
1938 make_bad_inode(&ni->vfs_inode);
1939
1940 return err;
1941}
1942
1943/* not for normal files */
1944int attr_punch_hole(struct ntfs_inode *ni, u64 vbo, u64 bytes, u32 *frame_size)
1945{
1946 int err = 0;
1947 struct runs_tree *run = &ni->file.run;
1948 struct ntfs_sb_info *sbi = ni->mi.sbi;
1949 struct ATTRIB *attr = NULL, *attr_b;
1950 struct ATTR_LIST_ENTRY *le, *le_b;
1951 struct mft_inode *mi, *mi_b;
1952 CLST svcn, evcn1, vcn, len, end, alen, dealloc;
1953 u64 total_size, alloc_size;
1954 u32 mask;
1955
1956 if (!bytes)
1957 return 0;
1958
1959 le_b = NULL;
1960 attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL, 0, NULL, &mi_b);
1961 if (!attr_b)
1962 return -ENOENT;
1963
1964 if (!attr_b->non_res) {
1965 u32 data_size = le32_to_cpu(attr->res.data_size);
1966 u32 from, to;
1967
1968 if (vbo > data_size)
1969 return 0;
1970
1971 from = vbo;
1972 to = (vbo + bytes) < data_size ? (vbo + bytes) : data_size;
1973 memset(Add2Ptr(resident_data(attr_b), from), 0, to - from);
1974 return 0;
1975 }
1976
1977 if (!is_attr_ext(attr_b))
1978 return -EOPNOTSUPP;
1979
1980 alloc_size = le64_to_cpu(attr_b->nres.alloc_size);
1981 total_size = le64_to_cpu(attr_b->nres.total_size);
1982
1983 if (vbo >= alloc_size) {
1984 // NOTE: it is allowed
1985 return 0;
1986 }
1987
1988 mask = (sbi->cluster_size << attr_b->nres.c_unit) - 1;
1989
1990 bytes += vbo;
1991 if (bytes > alloc_size)
1992 bytes = alloc_size;
1993 bytes -= vbo;
1994
1995 if ((vbo & mask) || (bytes & mask)) {
1996 /* We have to zero a range(s)*/
1997 if (frame_size == NULL) {
1998 /* Caller insists range is aligned */
1999 return -EINVAL;
2000 }
2001 *frame_size = mask + 1;
2002 return E_NTFS_NOTALIGNED;
2003 }
2004
2005 down_write(&ni->file.run_lock);
2006 /*
2007 * Enumerate all attribute segments and punch hole where necessary
2008 */
2009 alen = alloc_size >> sbi->cluster_bits;
2010 vcn = vbo >> sbi->cluster_bits;
2011 len = bytes >> sbi->cluster_bits;
2012 end = vcn + len;
2013 dealloc = 0;
2014
2015 svcn = le64_to_cpu(attr_b->nres.svcn);
2016 evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1;
2017
2018 if (svcn <= vcn && vcn < evcn1) {
2019 attr = attr_b;
2020 le = le_b;
2021 mi = mi_b;
2022 } else if (!le_b) {
2023 err = -EINVAL;
2024 goto out;
2025 } else {
2026 le = le_b;
2027 attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, &vcn,
2028 &mi);
2029 if (!attr) {
2030 err = -EINVAL;
2031 goto out;
2032 }
2033
2034 svcn = le64_to_cpu(attr->nres.svcn);
2035 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2036 }
2037
2038 while (svcn < end) {
2039 CLST vcn1, zero, dealloc2;
2040
2041 err = attr_load_runs(attr, ni, run, &svcn);
2042 if (err)
2043 goto out;
2044 vcn1 = max(vcn, svcn);
2045 zero = min(end, evcn1) - vcn1;
2046
2047 dealloc2 = dealloc;
2048 err = run_deallocate_ex(sbi, run, vcn1, zero, &dealloc, true);
2049 if (err)
2050 goto out;
2051
2052 if (dealloc2 == dealloc) {
2053 /* looks like the required range is already sparsed */
2054 } else {
2055 if (!run_add_entry(run, vcn1, SPARSE_LCN, zero,
2056 false)) {
2057 err = -ENOMEM;
2058 goto out;
2059 }
2060
2061 err = mi_pack_runs(mi, attr, run, evcn1 - svcn);
2062 if (err)
2063 goto out;
2064 }
2065 /* free all allocated memory */
2066 run_truncate(run, 0);
2067
2068 if (evcn1 >= alen)
2069 break;
2070
2071 attr = ni_enum_attr_ex(ni, attr, &le, &mi);
2072 if (!attr) {
2073 err = -EINVAL;
2074 goto out;
2075 }
2076
2077 svcn = le64_to_cpu(attr->nres.svcn);
2078 evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
2079 }
2080
2081 total_size -= (u64)dealloc << sbi->cluster_bits;
2082 attr_b->nres.total_size = cpu_to_le64(total_size);
2083 mi_b->dirty = true;
2084
2085 /*update inode size*/
2086 inode_set_bytes(&ni->vfs_inode, total_size);
2087 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
2088 mark_inode_dirty(&ni->vfs_inode);
2089
2090out:
2091 up_write(&ni->file.run_lock);
2092 if (err)
2093 make_bad_inode(&ni->vfs_inode);
2094
2095 return err;
2096}