1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
11 #include <linux/kernel.h>
18 const struct cpu_str NAME_MFT = {
19 4, 0, { '$', 'M', 'F', 'T' },
21 const struct cpu_str NAME_MIRROR = {
22 8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
24 const struct cpu_str NAME_LOGFILE = {
25 8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
27 const struct cpu_str NAME_VOLUME = {
28 7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
30 const struct cpu_str NAME_ATTRDEF = {
31 8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
33 const struct cpu_str NAME_ROOT = {
36 const struct cpu_str NAME_BITMAP = {
37 7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
39 const struct cpu_str NAME_BOOT = {
40 5, 0, { '$', 'B', 'o', 'o', 't' },
42 const struct cpu_str NAME_BADCLUS = {
43 8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
45 const struct cpu_str NAME_QUOTA = {
46 6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
48 const struct cpu_str NAME_SECURE = {
49 7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
51 const struct cpu_str NAME_UPCASE = {
52 7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
54 const struct cpu_str NAME_EXTEND = {
55 7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
57 const struct cpu_str NAME_OBJID = {
58 6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
60 const struct cpu_str NAME_REPARSE = {
61 8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
63 const struct cpu_str NAME_USNJRNL = {
64 8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
66 const __le16 BAD_NAME[4] = {
67 cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
69 const __le16 I30_NAME[4] = {
70 cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
72 const __le16 SII_NAME[4] = {
73 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
75 const __le16 SDH_NAME[4] = {
76 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
78 const __le16 SDS_NAME[4] = {
79 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
81 const __le16 SO_NAME[2] = {
82 cpu_to_le16('$'), cpu_to_le16('O'),
84 const __le16 SQ_NAME[2] = {
85 cpu_to_le16('$'), cpu_to_le16('Q'),
87 const __le16 SR_NAME[2] = {
88 cpu_to_le16('$'), cpu_to_le16('R'),
91 #ifdef CONFIG_NTFS3_LZX_XPRESS
92 const __le16 WOF_NAME[17] = {
93 cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
94 cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
95 cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
96 cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
101 static const __le16 CON_NAME[3] = {
102 cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('N'),
105 static const __le16 NUL_NAME[3] = {
106 cpu_to_le16('N'), cpu_to_le16('U'), cpu_to_le16('L'),
109 static const __le16 AUX_NAME[3] = {
110 cpu_to_le16('A'), cpu_to_le16('U'), cpu_to_le16('X'),
113 static const __le16 PRN_NAME[3] = {
114 cpu_to_le16('P'), cpu_to_le16('R'), cpu_to_le16('N'),
117 static const __le16 COM_NAME[3] = {
118 cpu_to_le16('C'), cpu_to_le16('O'), cpu_to_le16('M'),
121 static const __le16 LPT_NAME[3] = {
122 cpu_to_le16('L'), cpu_to_le16('P'), cpu_to_le16('T'),
128 * ntfs_fix_pre_write - Insert fixups into @rhdr before writing to disk.
130 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
134 u16 fo = le16_to_cpu(rhdr->fix_off);
135 u16 fn = le16_to_cpu(rhdr->fix_num);
137 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
138 fn * SECTOR_SIZE > bytes) {
142 /* Get fixup pointer. */
143 fixup = Add2Ptr(rhdr, fo);
145 if (*fixup >= 0x7FFF)
152 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
157 ptr += SECTOR_SIZE / sizeof(short);
163 * ntfs_fix_post_read - Remove fixups after reading from disk.
165 * Return: < 0 if error, 0 if ok, 1 if need to update fixups.
167 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
174 fo = le16_to_cpu(rhdr->fix_off);
175 fn = simple ? ((bytes >> SECTOR_SHIFT) + 1)
176 : le16_to_cpu(rhdr->fix_num);
179 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
180 fn * SECTOR_SIZE > bytes) {
181 return -EINVAL; /* Native chkntfs returns ok! */
184 /* Get fixup pointer. */
185 fixup = Add2Ptr(rhdr, fo);
187 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
191 /* Test current word. */
192 if (*ptr != sample) {
193 /* Fixup does not match! Is it serious error? */
199 ptr += SECTOR_SIZE / sizeof(short);
206 * ntfs_extend_init - Load $Extend file.
208 int ntfs_extend_init(struct ntfs_sb_info *sbi)
211 struct super_block *sb = sbi->sb;
212 struct inode *inode, *inode2;
215 if (sbi->volume.major_ver < 3) {
216 ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
220 ref.low = cpu_to_le32(MFT_REC_EXTEND);
222 ref.seq = cpu_to_le16(MFT_REC_EXTEND);
223 inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
225 err = PTR_ERR(inode);
226 ntfs_err(sb, "Failed to load $Extend.");
231 /* If ntfs_iget5() reads from disk it never returns bad inode. */
232 if (!S_ISDIR(inode->i_mode)) {
237 /* Try to find $ObjId */
238 inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
239 if (inode2 && !IS_ERR(inode2)) {
240 if (is_bad_inode(inode2)) {
243 sbi->objid.ni = ntfs_i(inode2);
244 sbi->objid_no = inode2->i_ino;
248 /* Try to find $Quota */
249 inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
250 if (inode2 && !IS_ERR(inode2)) {
251 sbi->quota_no = inode2->i_ino;
255 /* Try to find $Reparse */
256 inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
257 if (inode2 && !IS_ERR(inode2)) {
258 sbi->reparse.ni = ntfs_i(inode2);
259 sbi->reparse_no = inode2->i_ino;
262 /* Try to find $UsnJrnl */
263 inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
264 if (inode2 && !IS_ERR(inode2)) {
265 sbi->usn_jrnl_no = inode2->i_ino;
275 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
278 struct super_block *sb = sbi->sb;
279 bool initialized = false;
284 if (ni->vfs_inode.i_size >= 0x100000000ull) {
285 ntfs_err(sb, "\x24LogFile is too big");
290 sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
292 ref.low = cpu_to_le32(MFT_REC_MFT);
294 ref.seq = cpu_to_le16(1);
296 inode = ntfs_iget5(sb, &ref, NULL);
302 /* Try to use MFT copy. */
303 u64 t64 = sbi->mft.lbo;
305 sbi->mft.lbo = sbi->mft.lbo2;
306 inode = ntfs_iget5(sb, &ref, NULL);
314 ntfs_err(sb, "Failed to load $MFT.");
318 sbi->mft.ni = ntfs_i(inode);
320 /* LogFile should not contains attribute list. */
321 err = ni_load_all_mi(sbi->mft.ni);
323 err = log_replay(ni, &initialized);
328 sync_blockdev(sb->s_bdev);
329 invalidate_bdev(sb->s_bdev);
331 if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
336 if (sb_rdonly(sb) || !initialized)
339 /* Fill LogFile by '-1' if it is initialized. */
340 err = ntfs_bio_fill_1(sbi, &ni->file.run);
343 sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
349 * ntfs_look_for_free_space - Look for a free space in bitmap.
351 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
352 CLST *new_lcn, CLST *new_len,
353 enum ALLOCATE_OPT opt)
357 struct super_block *sb = sbi->sb;
358 size_t alcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
359 struct wnd_bitmap *wnd = &sbi->used.bitmap;
361 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
362 if (opt & ALLOCATE_MFT) {
363 zlen = wnd_zone_len(wnd);
366 err = ntfs_refresh_zone(sbi);
370 zlen = wnd_zone_len(wnd);
374 ntfs_err(sbi->sb, "no free space to extend mft");
379 lcn = wnd_zone_bit(wnd);
380 alen = min_t(CLST, len, zlen);
382 wnd_zone_set(wnd, lcn + alen, zlen - alen);
384 err = wnd_set_used(wnd, lcn, alen);
392 * 'Cause cluster 0 is always used this value means that we should use
393 * cached value of 'next_free_lcn' to improve performance.
396 lcn = sbi->used.next_free_lcn;
398 if (lcn >= wnd->nbits)
401 alen = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &alcn);
405 /* Try to use clusters from MftZone. */
406 zlen = wnd_zone_len(wnd);
407 zeroes = wnd_zeroes(wnd);
409 /* Check too big request */
410 if (len > zeroes + zlen || zlen <= NTFS_MIN_MFT_ZONE) {
415 /* How many clusters to cat from zone. */
416 zlcn = wnd_zone_bit(wnd);
418 ztrim = clamp_val(len, zlen2, zlen);
419 new_zlen = max_t(size_t, zlen - ztrim, NTFS_MIN_MFT_ZONE);
421 wnd_zone_set(wnd, zlcn, new_zlen);
423 /* Allocate continues clusters. */
424 alen = wnd_find(wnd, len, 0,
425 BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &alcn);
436 ntfs_unmap_meta(sb, alcn, alen);
438 /* Set hint for next requests. */
439 if (!(opt & ALLOCATE_MFT))
440 sbi->used.next_free_lcn = alcn + alen;
442 up_write(&wnd->rw_lock);
447 * ntfs_check_for_free_space
449 * Check if it is possible to allocate 'clen' clusters and 'mlen' Mft records
451 bool ntfs_check_for_free_space(struct ntfs_sb_info *sbi, CLST clen, CLST mlen)
453 size_t free, zlen, avail;
454 struct wnd_bitmap *wnd;
456 wnd = &sbi->used.bitmap;
457 down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
458 free = wnd_zeroes(wnd);
459 zlen = min_t(size_t, NTFS_MIN_MFT_ZONE, wnd_zone_len(wnd));
460 up_read(&wnd->rw_lock);
462 if (free < zlen + clen)
465 avail = free - (zlen + clen);
467 wnd = &sbi->mft.bitmap;
468 down_read_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
469 free = wnd_zeroes(wnd);
470 zlen = wnd_zone_len(wnd);
471 up_read(&wnd->rw_lock);
473 if (free >= zlen + mlen)
476 return avail >= bytes_to_cluster(sbi, mlen << sbi->record_bits);
480 * ntfs_extend_mft - Allocate additional MFT records.
482 * sbi->mft.bitmap is locked for write.
485 * ntfs_look_free_mft ->
488 * ni_insert_nonresident ->
491 * ntfs_look_free_mft ->
494 * To avoid recursive always allocate space for two new MFT records
495 * see attrib.c: "at least two MFT to avoid recursive loop".
497 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
500 struct ntfs_inode *ni = sbi->mft.ni;
501 size_t new_mft_total;
502 u64 new_mft_bytes, new_bitmap_bytes;
504 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
506 new_mft_total = ALIGN(wnd->nbits + NTFS_MFT_INCREASE_STEP, 128);
507 new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
509 /* Step 1: Resize $MFT::DATA. */
510 down_write(&ni->file.run_lock);
511 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
512 new_mft_bytes, NULL, false, &attr);
515 up_write(&ni->file.run_lock);
519 attr->nres.valid_size = attr->nres.data_size;
520 new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
523 /* Step 2: Resize $MFT::BITMAP. */
524 new_bitmap_bytes = bitmap_size(new_mft_total);
526 err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
527 new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
529 /* Refresh MFT Zone if necessary. */
530 down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
532 ntfs_refresh_zone(sbi);
534 up_write(&sbi->used.bitmap.rw_lock);
535 up_write(&ni->file.run_lock);
540 err = wnd_extend(wnd, new_mft_total);
545 ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
547 err = _ni_write_inode(&ni->vfs_inode, 0);
553 * ntfs_look_free_mft - Look for a free MFT record.
555 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
556 struct ntfs_inode *ni, struct mft_inode **mi)
559 size_t zbit, zlen, from, to, fr;
562 struct super_block *sb = sbi->sb;
563 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
566 static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
567 MFT_REC_FREE - MFT_REC_RESERVED);
570 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
572 zlen = wnd_zone_len(wnd);
574 /* Always reserve space for MFT. */
577 zbit = wnd_zone_bit(wnd);
579 wnd_zone_set(wnd, zbit + 1, zlen - 1);
584 /* No MFT zone. Find the nearest to '0' free MFT. */
585 if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
587 mft_total = wnd->nbits;
589 err = ntfs_extend_mft(sbi);
595 if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
601 * Look for free record reserved area [11-16) ==
602 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
605 if (!sbi->mft.reserved_bitmap) {
606 /* Once per session create internal bitmap for 5 bits. */
607 sbi->mft.reserved_bitmap = 0xFF;
610 for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
612 struct ntfs_inode *ni;
613 struct MFT_REC *mrec;
615 ref.low = cpu_to_le32(ir);
616 ref.seq = cpu_to_le16(ir);
618 i = ntfs_iget5(sb, &ref, NULL);
623 "Invalid reserved record %x",
627 if (is_bad_inode(i)) {
636 if (!is_rec_base(mrec))
639 if (mrec->hard_links)
645 if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
646 NULL, 0, NULL, NULL))
649 __clear_bit_le(ir - MFT_REC_RESERVED,
650 &sbi->mft.reserved_bitmap);
654 /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
655 zbit = find_next_zero_bit_le(&sbi->mft.reserved_bitmap,
656 MFT_REC_FREE, MFT_REC_RESERVED);
657 if (zbit >= MFT_REC_FREE) {
658 sbi->mft.next_reserved = MFT_REC_FREE;
663 sbi->mft.next_reserved = zbit;
666 zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
667 if (zbit + zlen > wnd->nbits)
668 zlen = wnd->nbits - zbit;
670 while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
673 /* [zbit, zbit + zlen) will be used for MFT itself. */
674 from = sbi->mft.used;
679 ntfs_clear_mft_tail(sbi, from, to);
690 wnd_zone_set(wnd, zbit, zlen);
694 /* The request to get record for general purpose. */
695 if (sbi->mft.next_free < MFT_REC_USER)
696 sbi->mft.next_free = MFT_REC_USER;
699 if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
700 } else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
701 sbi->mft.next_free = sbi->mft.bitmap.nbits;
704 sbi->mft.next_free = *rno + 1;
708 err = ntfs_extend_mft(sbi);
714 if (ni && !ni_add_subrecord(ni, *rno, mi)) {
719 /* We have found a record that are not reserved for next MFT. */
720 if (*rno >= MFT_REC_FREE)
721 wnd_set_used(wnd, *rno, 1);
722 else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
723 __set_bit_le(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
727 up_write(&wnd->rw_lock);
733 * ntfs_mark_rec_free - Mark record as free.
734 * is_mft - true if we are changing MFT
736 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno, bool is_mft)
738 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
741 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
742 if (rno >= wnd->nbits)
745 if (rno >= MFT_REC_FREE) {
746 if (!wnd_is_used(wnd, rno, 1))
747 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
749 wnd_set_free(wnd, rno, 1);
750 } else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
751 __clear_bit_le(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
754 if (rno < wnd_zone_bit(wnd))
755 wnd_zone_set(wnd, rno, 1);
756 else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
757 sbi->mft.next_free = rno;
761 up_write(&wnd->rw_lock);
765 * ntfs_clear_mft_tail - Format empty records [from, to).
767 * sbi->mft.bitmap is locked for write.
769 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
774 struct runs_tree *run;
775 struct ntfs_inode *ni;
780 rs = sbi->record_size;
784 down_read(&ni->file.run_lock);
785 vbo = (u64)from * rs;
786 for (; from < to; from++, vbo += rs) {
787 struct ntfs_buffers nb;
789 err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
793 err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
800 sbi->mft.used = from;
801 up_read(&ni->file.run_lock);
806 * ntfs_refresh_zone - Refresh MFT zone.
808 * sbi->used.bitmap is locked for rw.
809 * sbi->mft.bitmap is locked for write.
810 * sbi->mft.ni->file.run_lock for write.
812 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
816 struct wnd_bitmap *wnd = &sbi->used.bitmap;
817 struct ntfs_inode *ni = sbi->mft.ni;
819 /* Do not change anything unless we have non empty MFT zone. */
820 if (wnd_zone_len(wnd))
823 vcn = bytes_to_cluster(sbi,
824 (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
826 if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
829 /* We should always find Last Lcn for MFT. */
830 if (lcn == SPARSE_LCN)
835 /* Try to allocate clusters after last MFT run. */
836 zlen = wnd_find(wnd, sbi->zone_max, lcn_s, 0, &lcn_s);
837 wnd_zone_set(wnd, lcn_s, zlen);
843 * ntfs_update_mftmirr - Update $MFTMirr data.
845 void ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
848 struct super_block *sb = sbi->sb;
850 sector_t block1, block2;
856 blocksize = sb->s_blocksize;
858 if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
861 bytes = sbi->mft.recs_mirr << sbi->record_bits;
862 block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
863 block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
865 for (; bytes >= blocksize; bytes -= blocksize) {
866 struct buffer_head *bh1, *bh2;
868 bh1 = sb_bread(sb, block1++);
872 bh2 = sb_getblk(sb, block2++);
878 if (buffer_locked(bh2))
879 __wait_on_buffer(bh2);
882 memcpy(bh2->b_data, bh1->b_data, blocksize);
883 set_buffer_uptodate(bh2);
884 mark_buffer_dirty(bh2);
890 err = wait ? sync_dirty_buffer(bh2) : 0;
897 sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
903 * Marks inode as bad and marks fs as 'dirty'
905 void ntfs_bad_inode(struct inode *inode, const char *hint)
907 struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
909 ntfs_inode_err(inode, "%s", hint);
910 make_bad_inode(inode);
911 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
917 * Mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
918 * Umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
919 * NTFS error: ntfs_set_state(NTFS_DIRTY_ERROR)
921 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
925 struct VOLUME_INFO *info;
926 struct mft_inode *mi;
927 struct ntfs_inode *ni;
930 * Do not change state if fs was real_dirty.
931 * Do not change state if fs already dirty(clear).
932 * Do not change any thing if mounted read only.
934 if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
937 /* Check cached value. */
938 if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
939 (sbi->volume.flags & VOLUME_FLAG_DIRTY))
946 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
948 attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
954 info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
961 case NTFS_DIRTY_ERROR:
962 ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
963 sbi->volume.real_dirty = true;
965 case NTFS_DIRTY_DIRTY:
966 info->flags |= VOLUME_FLAG_DIRTY;
968 case NTFS_DIRTY_CLEAR:
969 info->flags &= ~VOLUME_FLAG_DIRTY;
972 /* Cache current volume flags. */
973 sbi->volume.flags = info->flags;
982 mark_inode_dirty(&ni->vfs_inode);
983 /* verify(!ntfs_update_mftmirr()); */
986 * If we used wait=1, sync_inode_metadata waits for the io for the
987 * inode to finish. It hangs when media is removed.
988 * So wait=0 is sent down to sync_inode_metadata
989 * and filemap_fdatawrite is used for the data blocks.
991 err = sync_inode_metadata(&ni->vfs_inode, 0);
993 err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
999 * security_hash - Calculates a hash of security descriptor.
1001 static inline __le32 security_hash(const void *sd, size_t bytes)
1004 const __le32 *ptr = sd;
1008 hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
1009 return cpu_to_le32(hash);
1012 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
1014 struct block_device *bdev = sb->s_bdev;
1015 u32 blocksize = sb->s_blocksize;
1016 u64 block = lbo >> sb->s_blocksize_bits;
1017 u32 off = lbo & (blocksize - 1);
1018 u32 op = blocksize - off;
1020 for (; bytes; block += 1, off = 0, op = blocksize) {
1021 struct buffer_head *bh = __bread(bdev, block, blocksize);
1029 memcpy(buffer, bh->b_data + off, op);
1034 buffer = Add2Ptr(buffer, op);
1040 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1041 const void *buf, int wait)
1043 u32 blocksize = sb->s_blocksize;
1044 struct block_device *bdev = sb->s_bdev;
1045 sector_t block = lbo >> sb->s_blocksize_bits;
1046 u32 off = lbo & (blocksize - 1);
1047 u32 op = blocksize - off;
1048 struct buffer_head *bh;
1050 if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1053 for (; bytes; block += 1, off = 0, op = blocksize) {
1057 if (op < blocksize) {
1058 bh = __bread(bdev, block, blocksize);
1060 ntfs_err(sb, "failed to read block %llx",
1065 bh = __getblk(bdev, block, blocksize);
1070 if (buffer_locked(bh))
1071 __wait_on_buffer(bh);
1075 memcpy(bh->b_data + off, buf, op);
1076 buf = Add2Ptr(buf, op);
1078 memset(bh->b_data + off, -1, op);
1081 set_buffer_uptodate(bh);
1082 mark_buffer_dirty(bh);
1086 int err = sync_dirty_buffer(bh);
1091 "failed to sync buffer at block %llx, error %d",
1105 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1106 u64 vbo, const void *buf, size_t bytes, int sync)
1108 struct super_block *sb = sbi->sb;
1109 u8 cluster_bits = sbi->cluster_bits;
1110 u32 off = vbo & sbi->cluster_mask;
1111 CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1115 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1118 if (lcn == SPARSE_LCN)
1121 lbo = ((u64)lcn << cluster_bits) + off;
1122 len = ((u64)clen << cluster_bits) - off;
1125 u32 op = min_t(u64, len, bytes);
1126 int err = ntfs_sb_write(sb, lbo, op, buf, sync);
1135 vcn_next = vcn + clen;
1136 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1140 if (lcn == SPARSE_LCN)
1144 buf = Add2Ptr(buf, op);
1146 lbo = ((u64)lcn << cluster_bits);
1147 len = ((u64)clen << cluster_bits);
1153 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1154 const struct runs_tree *run, u64 vbo)
1156 struct super_block *sb = sbi->sb;
1157 u8 cluster_bits = sbi->cluster_bits;
1161 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1162 return ERR_PTR(-ENOENT);
1164 lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1166 return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1169 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1170 u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1173 struct super_block *sb = sbi->sb;
1174 u32 blocksize = sb->s_blocksize;
1175 u8 cluster_bits = sbi->cluster_bits;
1176 u32 off = vbo & sbi->cluster_mask;
1178 CLST vcn_next, vcn = vbo >> cluster_bits;
1182 struct buffer_head *bh;
1185 /* First reading of $Volume + $MFTMirr + $LogFile goes here. */
1186 if (vbo > MFT_REC_VOL * sbi->record_size) {
1191 /* Use absolute boot's 'MFTCluster' to read record. */
1192 lbo = vbo + sbi->mft.lbo;
1193 len = sbi->record_size;
1194 } else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1198 if (lcn == SPARSE_LCN) {
1203 lbo = ((u64)lcn << cluster_bits) + off;
1204 len = ((u64)clen << cluster_bits) - off;
1207 off = lbo & (blocksize - 1);
1214 u32 len32 = len >= bytes ? bytes : len;
1215 sector_t block = lbo >> sb->s_blocksize_bits;
1218 u32 op = blocksize - off;
1223 bh = ntfs_bread(sb, block);
1230 memcpy(buf, bh->b_data + off, op);
1231 buf = Add2Ptr(buf, op);
1236 } else if (nbh >= ARRAY_SIZE(nb->bh)) {
1253 vcn_next = vcn + clen;
1254 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1260 if (lcn == SPARSE_LCN) {
1265 lbo = ((u64)lcn << cluster_bits);
1266 len = ((u64)clen << cluster_bits);
1274 put_bh(nb->bh[--nbh]);
1285 * Return: < 0 if error, 0 if ok, -E_NTFS_FIXUP if need to update fixups.
1287 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1288 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1289 struct ntfs_buffers *nb)
1291 int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1295 return ntfs_fix_post_read(rhdr, nb->bytes, true);
1298 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1299 u32 bytes, struct ntfs_buffers *nb)
1302 struct super_block *sb = sbi->sb;
1303 u32 blocksize = sb->s_blocksize;
1304 u8 cluster_bits = sbi->cluster_bits;
1305 CLST vcn_next, vcn = vbo >> cluster_bits;
1314 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1319 off = vbo & sbi->cluster_mask;
1320 lbo = ((u64)lcn << cluster_bits) + off;
1321 len = ((u64)clen << cluster_bits) - off;
1323 nb->off = off = lbo & (blocksize - 1);
1326 u32 len32 = min_t(u64, len, bytes);
1327 sector_t block = lbo >> sb->s_blocksize_bits;
1331 struct buffer_head *bh;
1333 if (nbh >= ARRAY_SIZE(nb->bh)) {
1338 op = blocksize - off;
1342 if (op == blocksize) {
1343 bh = sb_getblk(sb, block);
1348 if (buffer_locked(bh))
1349 __wait_on_buffer(bh);
1350 set_buffer_uptodate(bh);
1352 bh = ntfs_bread(sb, block);
1371 vcn_next = vcn + clen;
1372 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1378 lbo = ((u64)lcn << cluster_bits);
1379 len = ((u64)clen << cluster_bits);
1384 put_bh(nb->bh[--nbh]);
1393 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1394 struct ntfs_buffers *nb, int sync)
1397 struct super_block *sb = sbi->sb;
1398 u32 block_size = sb->s_blocksize;
1399 u32 bytes = nb->bytes;
1401 u16 fo = le16_to_cpu(rhdr->fix_off);
1402 u16 fn = le16_to_cpu(rhdr->fix_num);
1407 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1408 fn * SECTOR_SIZE > bytes) {
1412 for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1413 u32 op = block_size - off;
1415 struct buffer_head *bh = nb->bh[idx];
1416 __le16 *ptr, *end_data;
1421 if (buffer_locked(bh))
1422 __wait_on_buffer(bh);
1426 bh_data = bh->b_data + off;
1427 end_data = Add2Ptr(bh_data, op);
1428 memcpy(bh_data, rhdr, op);
1433 fixup = Add2Ptr(bh_data, fo);
1435 t16 = le16_to_cpu(sample);
1436 if (t16 >= 0x7FFF) {
1437 sample = *fixup = cpu_to_le16(1);
1439 sample = cpu_to_le16(t16 + 1);
1443 *(__le16 *)Add2Ptr(rhdr, fo) = sample;
1446 ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1451 ptr += SECTOR_SIZE / sizeof(short);
1452 } while (ptr < end_data);
1454 set_buffer_uptodate(bh);
1455 mark_buffer_dirty(bh);
1459 int err2 = sync_dirty_buffer(bh);
1466 rhdr = Add2Ptr(rhdr, op);
1473 * ntfs_bio_pages - Read/write pages from/to disk.
1475 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1476 struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1480 struct bio *new, *bio = NULL;
1481 struct super_block *sb = sbi->sb;
1482 struct block_device *bdev = sb->s_bdev;
1484 u8 cluster_bits = sbi->cluster_bits;
1485 CLST lcn, clen, vcn, vcn_next;
1486 u32 add, off, page_idx;
1489 struct blk_plug plug;
1494 blk_start_plug(&plug);
1496 /* Align vbo and bytes to be 512 bytes aligned. */
1497 lbo = (vbo + bytes + 511) & ~511ull;
1498 vbo = vbo & ~511ull;
1501 vcn = vbo >> cluster_bits;
1502 if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1506 off = vbo & sbi->cluster_mask;
1511 lbo = ((u64)lcn << cluster_bits) + off;
1512 len = ((u64)clen << cluster_bits) - off;
1514 new = bio_alloc(bdev, nr_pages - page_idx, op, GFP_NOFS);
1516 bio_chain(bio, new);
1520 bio->bi_iter.bi_sector = lbo >> 9;
1523 off = vbo & (PAGE_SIZE - 1);
1524 add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1526 if (bio_add_page(bio, page, add, off) < add)
1534 if (add + off == PAGE_SIZE) {
1536 if (WARN_ON(page_idx >= nr_pages)) {
1540 page = pages[page_idx];
1549 vcn_next = vcn + clen;
1550 if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1560 err = submit_bio_wait(bio);
1563 blk_finish_plug(&plug);
1569 * ntfs_bio_fill_1 - Helper for ntfs_loadlog_and_replay().
1571 * Fill on-disk logfile range by (-1)
1572 * this means empty logfile.
1574 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1577 struct super_block *sb = sbi->sb;
1578 struct block_device *bdev = sb->s_bdev;
1579 u8 cluster_bits = sbi->cluster_bits;
1580 struct bio *new, *bio = NULL;
1586 struct blk_plug plug;
1588 fill = alloc_page(GFP_KERNEL);
1592 kaddr = kmap_atomic(fill);
1593 memset(kaddr, -1, PAGE_SIZE);
1594 kunmap_atomic(kaddr);
1595 flush_dcache_page(fill);
1598 if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1604 * TODO: Try blkdev_issue_write_same.
1606 blk_start_plug(&plug);
1608 lbo = (u64)lcn << cluster_bits;
1609 len = (u64)clen << cluster_bits;
1611 new = bio_alloc(bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOFS);
1613 bio_chain(bio, new);
1617 bio->bi_iter.bi_sector = lbo >> 9;
1620 u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1622 if (bio_add_page(bio, fill, add, 0) < add)
1630 } while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1633 err = submit_bio_wait(bio);
1636 blk_finish_plug(&plug);
1644 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1645 u64 vbo, u64 *lbo, u64 *bytes)
1649 u8 cluster_bits = sbi->cluster_bits;
1651 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1654 off = vbo & sbi->cluster_mask;
1655 *lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1656 *bytes = ((u64)len << cluster_bits) - off;
1661 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
1664 struct super_block *sb = sbi->sb;
1665 struct inode *inode = new_inode(sb);
1666 struct ntfs_inode *ni;
1669 return ERR_PTR(-ENOMEM);
1673 err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
1679 if (insert_inode_locked(inode) < 0) {
1693 * O:BAG:BAD:(A;OICI;FA;;;WD)
1694 * Owner S-1-5-32-544 (Administrators)
1695 * Group S-1-5-32-544 (Administrators)
1696 * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1698 const u8 s_default_security[] __aligned(8) = {
1699 0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1700 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1701 0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1702 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1703 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1704 0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1705 0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1708 static_assert(sizeof(s_default_security) == 0x50);
1710 static inline u32 sid_length(const struct SID *sid)
1712 return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1718 * Thanks Mark Harmstone for idea.
1720 static bool is_acl_valid(const struct ACL *acl, u32 len)
1722 const struct ACE_HEADER *ace;
1724 u16 ace_count, ace_size;
1726 if (acl->AclRevision != ACL_REVISION &&
1727 acl->AclRevision != ACL_REVISION_DS) {
1729 * This value should be ACL_REVISION, unless the ACL contains an
1730 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1731 * All ACEs in an ACL must be at the same revision level.
1739 if (le16_to_cpu(acl->AclSize) > len)
1745 len -= sizeof(struct ACL);
1746 ace = (struct ACE_HEADER *)&acl[1];
1747 ace_count = le16_to_cpu(acl->AceCount);
1749 for (i = 0; i < ace_count; i++) {
1750 if (len < sizeof(struct ACE_HEADER))
1753 ace_size = le16_to_cpu(ace->AceSize);
1758 ace = Add2Ptr(ace, ace_size);
1764 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1766 u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1768 if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1771 if (sd->Revision != 1)
1777 if (!(sd->Control & SE_SELF_RELATIVE))
1780 sd_owner = le32_to_cpu(sd->Owner);
1782 const struct SID *owner = Add2Ptr(sd, sd_owner);
1784 if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1787 if (owner->Revision != 1)
1790 if (sd_owner + sid_length(owner) > len)
1794 sd_group = le32_to_cpu(sd->Group);
1796 const struct SID *group = Add2Ptr(sd, sd_group);
1798 if (sd_group + offsetof(struct SID, SubAuthority) > len)
1801 if (group->Revision != 1)
1804 if (sd_group + sid_length(group) > len)
1808 sd_sacl = le32_to_cpu(sd->Sacl);
1810 const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1812 if (sd_sacl + sizeof(struct ACL) > len)
1815 if (!is_acl_valid(sacl, len - sd_sacl))
1819 sd_dacl = le32_to_cpu(sd->Dacl);
1821 const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1823 if (sd_dacl + sizeof(struct ACL) > len)
1826 if (!is_acl_valid(dacl, len - sd_dacl))
1834 * ntfs_security_init - Load and parse $Secure.
1836 int ntfs_security_init(struct ntfs_sb_info *sbi)
1839 struct super_block *sb = sbi->sb;
1840 struct inode *inode;
1841 struct ntfs_inode *ni;
1843 struct ATTRIB *attr;
1844 struct ATTR_LIST_ENTRY *le;
1848 struct NTFS_DE_SII *sii_e;
1849 struct ntfs_fnd *fnd_sii = NULL;
1850 const struct INDEX_ROOT *root_sii;
1851 const struct INDEX_ROOT *root_sdh;
1852 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1853 struct ntfs_index *indx_sii = &sbi->security.index_sii;
1855 ref.low = cpu_to_le32(MFT_REC_SECURE);
1857 ref.seq = cpu_to_le16(MFT_REC_SECURE);
1859 inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1860 if (IS_ERR(inode)) {
1861 err = PTR_ERR(inode);
1862 ntfs_err(sb, "Failed to load $Secure.");
1871 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1872 ARRAY_SIZE(SDH_NAME), NULL, NULL);
1878 root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT));
1879 if (root_sdh->type != ATTR_ZERO ||
1880 root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH ||
1881 offsetof(struct INDEX_ROOT, ihdr) + root_sdh->ihdr.used > attr->res.data_size) {
1886 err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1890 attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1891 ARRAY_SIZE(SII_NAME), NULL, NULL);
1897 root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT));
1898 if (root_sii->type != ATTR_ZERO ||
1899 root_sii->rule != NTFS_COLLATION_TYPE_UINT ||
1900 offsetof(struct INDEX_ROOT, ihdr) + root_sii->ihdr.used > attr->res.data_size) {
1905 err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1909 fnd_sii = fnd_get();
1915 sds_size = inode->i_size;
1917 /* Find the last valid Id. */
1918 sbi->security.next_id = SECURITY_ID_FIRST;
1919 /* Always write new security at the end of bucket. */
1920 sbi->security.next_off =
1921 ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1929 err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1933 sii_e = (struct NTFS_DE_SII *)ne;
1934 if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
1937 next_id = le32_to_cpu(sii_e->sec_id) + 1;
1938 if (next_id >= sbi->security.next_id)
1939 sbi->security.next_id = next_id;
1942 sbi->security.ni = ni;
1952 * ntfs_get_security_by_id - Read security descriptor by id.
1954 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1955 struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1960 struct ntfs_inode *ni = sbi->security.ni;
1961 struct ntfs_index *indx = &sbi->security.index_sii;
1963 struct NTFS_DE_SII *sii_e;
1964 struct ntfs_fnd *fnd_sii;
1965 struct SECURITY_HDR d_security;
1966 const struct INDEX_ROOT *root_sii;
1971 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
1973 fnd_sii = fnd_get();
1979 root_sii = indx_get_root(indx, ni, NULL, NULL);
1985 /* Try to find this SECURITY descriptor in SII indexes. */
1986 err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
1987 NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
1994 t32 = le32_to_cpu(sii_e->sec_hdr.size);
1995 if (t32 < SIZEOF_SECURITY_HDR) {
2000 if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
2001 /* Looks like too big security. 0x10000 - is arbitrary big number. */
2006 *size = t32 - SIZEOF_SECURITY_HDR;
2008 p = kmalloc(*size, GFP_NOFS);
2014 err = ntfs_read_run_nb(sbi, &ni->file.run,
2015 le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2016 sizeof(d_security), NULL);
2020 if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
2025 err = ntfs_read_run_nb(sbi, &ni->file.run,
2026 le64_to_cpu(sii_e->sec_hdr.off) +
2027 SIZEOF_SECURITY_HDR,
2044 * ntfs_insert_security - Insert security descriptor into $Secure::SDS.
2046 * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2047 * and it contains a mirror copy of each security descriptor. When writing
2048 * to a security descriptor at location X, another copy will be written at
2049 * location (X+256K).
2050 * When writing a security descriptor that will cross the 256K boundary,
2051 * the pointer will be advanced by 256K to skip
2052 * over the mirror portion.
2054 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2055 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2056 u32 size_sd, __le32 *security_id, bool *inserted)
2059 struct ntfs_inode *ni = sbi->security.ni;
2060 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2061 struct ntfs_index *indx_sii = &sbi->security.index_sii;
2062 struct NTFS_DE_SDH *e;
2063 struct NTFS_DE_SDH sdh_e;
2064 struct NTFS_DE_SII sii_e;
2065 struct SECURITY_HDR *d_security;
2066 u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
2067 u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2068 struct SECURITY_KEY hash_key;
2069 struct ntfs_fnd *fnd_sdh = NULL;
2070 const struct INDEX_ROOT *root_sdh;
2071 const struct INDEX_ROOT *root_sii;
2072 u64 mirr_off, new_sds_size;
2075 static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2076 SecurityDescriptorsBlockSize);
2078 hash_key.hash = security_hash(sd, size_sd);
2079 hash_key.sec_id = SECURITY_ID_INVALID;
2083 *security_id = SECURITY_ID_INVALID;
2085 /* Allocate a temporal buffer. */
2086 d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2090 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2092 fnd_sdh = fnd_get();
2098 root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2104 root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2111 * Check if such security already exists.
2112 * Use "SDH" and hash -> to get the offset in "SDS".
2114 err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2115 &d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2121 if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2122 err = ntfs_read_run_nb(sbi, &ni->file.run,
2123 le64_to_cpu(e->sec_hdr.off),
2124 d_security, new_sec_size, NULL);
2128 if (le32_to_cpu(d_security->size) == new_sec_size &&
2129 d_security->key.hash == hash_key.hash &&
2130 !memcmp(d_security + 1, sd, size_sd)) {
2131 *security_id = d_security->key.sec_id;
2132 /* Such security already exists. */
2138 err = indx_find_sort(indx_sdh, ni, root_sdh,
2139 (struct NTFS_DE **)&e, fnd_sdh);
2143 if (!e || e->key.hash != hash_key.hash)
2147 /* Zero unused space. */
2148 next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2149 left = SecurityDescriptorsBlockSize - next;
2151 /* Zero gap until SecurityDescriptorsBlockSize. */
2152 if (left < new_sec_size) {
2153 /* Zero "left" bytes from sbi->security.next_off. */
2154 sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2157 /* Zero tail of previous security. */
2158 //used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2162 * 0x40438 == ni->vfs_inode.i_size
2163 * 0x00440 == sbi->security.next_off
2164 * need to zero [0x438-0x440)
2165 * if (next > used) {
2166 * u32 tozero = next - used;
2167 * zero "tozero" bytes from sbi->security.next_off - tozero
2170 /* Format new security descriptor. */
2171 d_security->key.hash = hash_key.hash;
2172 d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2173 d_security->off = cpu_to_le64(sbi->security.next_off);
2174 d_security->size = cpu_to_le32(new_sec_size);
2175 memcpy(d_security + 1, sd, size_sd);
2177 /* Write main SDS bucket. */
2178 err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2179 d_security, aligned_sec_size, 0);
2184 mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2185 new_sds_size = mirr_off + aligned_sec_size;
2187 if (new_sds_size > ni->vfs_inode.i_size) {
2188 err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2189 ARRAY_SIZE(SDS_NAME), &ni->file.run,
2190 new_sds_size, &new_sds_size, false, NULL);
2195 /* Write copy SDS bucket. */
2196 err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2197 aligned_sec_size, 0);
2201 /* Fill SII entry. */
2202 sii_e.de.view.data_off =
2203 cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2204 sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2205 sii_e.de.view.res = 0;
2206 sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
2207 sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2210 sii_e.sec_id = d_security->key.sec_id;
2211 memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2213 err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL, 0);
2217 /* Fill SDH entry. */
2218 sdh_e.de.view.data_off =
2219 cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2220 sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2221 sdh_e.de.view.res = 0;
2222 sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2223 sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2226 sdh_e.key.hash = d_security->key.hash;
2227 sdh_e.key.sec_id = d_security->key.sec_id;
2228 memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2229 sdh_e.magic[0] = cpu_to_le16('I');
2230 sdh_e.magic[1] = cpu_to_le16('I');
2233 err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2238 *security_id = d_security->key.sec_id;
2242 /* Update Id and offset for next descriptor. */
2243 sbi->security.next_id += 1;
2244 sbi->security.next_off += aligned_sec_size;
2248 mark_inode_dirty(&ni->vfs_inode);
2256 * ntfs_reparse_init - Load and parse $Extend/$Reparse.
2258 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2261 struct ntfs_inode *ni = sbi->reparse.ni;
2262 struct ntfs_index *indx = &sbi->reparse.index_r;
2263 struct ATTRIB *attr;
2264 struct ATTR_LIST_ENTRY *le;
2265 const struct INDEX_ROOT *root_r;
2271 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2272 ARRAY_SIZE(SR_NAME), NULL, NULL);
2278 root_r = resident_data(attr);
2279 if (root_r->type != ATTR_ZERO ||
2280 root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2285 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2294 * ntfs_objid_init - Load and parse $Extend/$ObjId.
2296 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2299 struct ntfs_inode *ni = sbi->objid.ni;
2300 struct ntfs_index *indx = &sbi->objid.index_o;
2301 struct ATTRIB *attr;
2302 struct ATTR_LIST_ENTRY *le;
2303 const struct INDEX_ROOT *root;
2309 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2310 ARRAY_SIZE(SO_NAME), NULL, NULL);
2316 root = resident_data(attr);
2317 if (root->type != ATTR_ZERO ||
2318 root->rule != NTFS_COLLATION_TYPE_UINTS) {
2323 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2331 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2334 struct ntfs_inode *ni = sbi->objid.ni;
2335 struct ntfs_index *indx = &sbi->objid.index_o;
2340 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2342 err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2344 mark_inode_dirty(&ni->vfs_inode);
2350 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2351 const struct MFT_REF *ref)
2354 struct ntfs_inode *ni = sbi->reparse.ni;
2355 struct ntfs_index *indx = &sbi->reparse.index_r;
2356 struct NTFS_DE_R re;
2361 memset(&re, 0, sizeof(re));
2363 re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2364 re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2365 re.de.key_size = cpu_to_le16(sizeof(re.key));
2367 re.key.ReparseTag = rtag;
2368 memcpy(&re.key.ref, ref, sizeof(*ref));
2370 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2372 err = indx_insert_entry(indx, ni, &re.de, NULL, NULL, 0);
2374 mark_inode_dirty(&ni->vfs_inode);
2380 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2381 const struct MFT_REF *ref)
2384 struct ntfs_inode *ni = sbi->reparse.ni;
2385 struct ntfs_index *indx = &sbi->reparse.index_r;
2386 struct ntfs_fnd *fnd = NULL;
2387 struct REPARSE_KEY rkey;
2388 struct NTFS_DE_R *re;
2389 struct INDEX_ROOT *root_r;
2394 rkey.ReparseTag = rtag;
2397 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2400 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2410 root_r = indx_get_root(indx, ni, NULL, NULL);
2416 /* 1 - forces to ignore rkey.ReparseTag when comparing keys. */
2417 err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2418 (struct NTFS_DE **)&re, fnd);
2422 if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2423 /* Impossible. Looks like volume corrupt? */
2427 memcpy(&rkey, &re->key, sizeof(rkey));
2432 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2440 mark_inode_dirty(&ni->vfs_inode);
2446 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2449 ntfs_unmap_meta(sbi->sb, lcn, len);
2450 ntfs_discard(sbi, lcn, len);
2453 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2455 CLST end, i, zone_len, zlen;
2456 struct wnd_bitmap *wnd = &sbi->used.bitmap;
2458 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2459 if (!wnd_is_used(wnd, lcn, len)) {
2460 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2464 for (i = lcn; i < end; i++) {
2465 if (wnd_is_used(wnd, i, 1)) {
2476 ntfs_unmap_and_discard(sbi, lcn, len);
2478 wnd_set_free(wnd, lcn, len);
2487 ntfs_unmap_and_discard(sbi, lcn, len);
2488 wnd_set_free(wnd, lcn, len);
2490 /* append to MFT zone, if possible. */
2491 zone_len = wnd_zone_len(wnd);
2492 zlen = min(zone_len + len, sbi->zone_max);
2494 if (zlen == zone_len) {
2495 /* MFT zone already has maximum size. */
2496 } else if (!zone_len) {
2497 /* Create MFT zone only if 'zlen' is large enough. */
2498 if (zlen == sbi->zone_max)
2499 wnd_zone_set(wnd, lcn, zlen);
2501 CLST zone_lcn = wnd_zone_bit(wnd);
2503 if (lcn + len == zone_lcn) {
2504 /* Append into head MFT zone. */
2505 wnd_zone_set(wnd, lcn, zlen);
2506 } else if (zone_lcn + zone_len == lcn) {
2507 /* Append into tail MFT zone. */
2508 wnd_zone_set(wnd, zone_lcn, zlen);
2513 up_write(&wnd->rw_lock);
2517 * run_deallocate - Deallocate clusters.
2519 int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
2524 while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2525 if (lcn == SPARSE_LCN)
2528 mark_as_free_ex(sbi, lcn, len, trim);
2534 static inline bool name_has_forbidden_chars(const struct le_str *fname)
2538 /* check for forbidden chars */
2539 for (i = 0; i < fname->len; ++i) {
2540 ch = le16_to_cpu(fname->name[i]);
2547 /* disallowed by Windows */
2565 /* file names cannot end with space or . */
2566 if (fname->len > 0) {
2567 ch = le16_to_cpu(fname->name[fname->len - 1]);
2568 if (ch == ' ' || ch == '.')
2575 static inline bool is_reserved_name(struct ntfs_sb_info *sbi,
2576 const struct le_str *fname)
2579 const __le16 *name = fname->name;
2580 int len = fname->len;
2581 u16 *upcase = sbi->upcase;
2583 /* check for 3 chars reserved names (device names) */
2584 /* name by itself or with any extension is forbidden */
2585 if (len == 3 || (len > 3 && le16_to_cpu(name[3]) == '.'))
2586 if (!ntfs_cmp_names(name, 3, CON_NAME, 3, upcase, false) ||
2587 !ntfs_cmp_names(name, 3, NUL_NAME, 3, upcase, false) ||
2588 !ntfs_cmp_names(name, 3, AUX_NAME, 3, upcase, false) ||
2589 !ntfs_cmp_names(name, 3, PRN_NAME, 3, upcase, false))
2592 /* check for 4 chars reserved names (port name followed by 1..9) */
2593 /* name by itself or with any extension is forbidden */
2594 if (len == 4 || (len > 4 && le16_to_cpu(name[4]) == '.')) {
2595 port_digit = le16_to_cpu(name[3]);
2596 if (port_digit >= '1' && port_digit <= '9')
2597 if (!ntfs_cmp_names(name, 3, COM_NAME, 3, upcase, false) ||
2598 !ntfs_cmp_names(name, 3, LPT_NAME, 3, upcase, false))
2606 * valid_windows_name - Check if a file name is valid in Windows.
2608 bool valid_windows_name(struct ntfs_sb_info *sbi, const struct le_str *fname)
2610 return !name_has_forbidden_chars(fname) &&
2611 !is_reserved_name(sbi, fname);