1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
8 #include <linux/blkdev.h>
9 #include <linux/buffer_head.h>
11 #include <linux/nls.h>
18 const struct cpu_str NAME_MFT = {
19 4, 0, { '$', 'M', 'F', 'T' },
21 const struct cpu_str NAME_MIRROR = {
22 8, 0, { '$', 'M', 'F', 'T', 'M', 'i', 'r', 'r' },
24 const struct cpu_str NAME_LOGFILE = {
25 8, 0, { '$', 'L', 'o', 'g', 'F', 'i', 'l', 'e' },
27 const struct cpu_str NAME_VOLUME = {
28 7, 0, { '$', 'V', 'o', 'l', 'u', 'm', 'e' },
30 const struct cpu_str NAME_ATTRDEF = {
31 8, 0, { '$', 'A', 't', 't', 'r', 'D', 'e', 'f' },
33 const struct cpu_str NAME_ROOT = {
36 const struct cpu_str NAME_BITMAP = {
37 7, 0, { '$', 'B', 'i', 't', 'm', 'a', 'p' },
39 const struct cpu_str NAME_BOOT = {
40 5, 0, { '$', 'B', 'o', 'o', 't' },
42 const struct cpu_str NAME_BADCLUS = {
43 8, 0, { '$', 'B', 'a', 'd', 'C', 'l', 'u', 's' },
45 const struct cpu_str NAME_QUOTA = {
46 6, 0, { '$', 'Q', 'u', 'o', 't', 'a' },
48 const struct cpu_str NAME_SECURE = {
49 7, 0, { '$', 'S', 'e', 'c', 'u', 'r', 'e' },
51 const struct cpu_str NAME_UPCASE = {
52 7, 0, { '$', 'U', 'p', 'C', 'a', 's', 'e' },
54 const struct cpu_str NAME_EXTEND = {
55 7, 0, { '$', 'E', 'x', 't', 'e', 'n', 'd' },
57 const struct cpu_str NAME_OBJID = {
58 6, 0, { '$', 'O', 'b', 'j', 'I', 'd' },
60 const struct cpu_str NAME_REPARSE = {
61 8, 0, { '$', 'R', 'e', 'p', 'a', 'r', 's', 'e' },
63 const struct cpu_str NAME_USNJRNL = {
64 8, 0, { '$', 'U', 's', 'n', 'J', 'r', 'n', 'l' },
66 const __le16 BAD_NAME[4] = {
67 cpu_to_le16('$'), cpu_to_le16('B'), cpu_to_le16('a'), cpu_to_le16('d'),
69 const __le16 I30_NAME[4] = {
70 cpu_to_le16('$'), cpu_to_le16('I'), cpu_to_le16('3'), cpu_to_le16('0'),
72 const __le16 SII_NAME[4] = {
73 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('I'), cpu_to_le16('I'),
75 const __le16 SDH_NAME[4] = {
76 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('H'),
78 const __le16 SDS_NAME[4] = {
79 cpu_to_le16('$'), cpu_to_le16('S'), cpu_to_le16('D'), cpu_to_le16('S'),
81 const __le16 SO_NAME[2] = {
82 cpu_to_le16('$'), cpu_to_le16('O'),
84 const __le16 SQ_NAME[2] = {
85 cpu_to_le16('$'), cpu_to_le16('Q'),
87 const __le16 SR_NAME[2] = {
88 cpu_to_le16('$'), cpu_to_le16('R'),
91 #ifdef CONFIG_NTFS3_LZX_XPRESS
92 const __le16 WOF_NAME[17] = {
93 cpu_to_le16('W'), cpu_to_le16('o'), cpu_to_le16('f'), cpu_to_le16('C'),
94 cpu_to_le16('o'), cpu_to_le16('m'), cpu_to_le16('p'), cpu_to_le16('r'),
95 cpu_to_le16('e'), cpu_to_le16('s'), cpu_to_le16('s'), cpu_to_le16('e'),
96 cpu_to_le16('d'), cpu_to_le16('D'), cpu_to_le16('a'), cpu_to_le16('t'),
106 * inserts fixups into 'rhdr' before writing to disk
108 bool ntfs_fix_pre_write(struct NTFS_RECORD_HEADER *rhdr, size_t bytes)
112 u16 fo = le16_to_cpu(rhdr->fix_off);
113 u16 fn = le16_to_cpu(rhdr->fix_num);
115 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
116 fn * SECTOR_SIZE > bytes) {
120 /* Get fixup pointer */
121 fixup = Add2Ptr(rhdr, fo);
123 if (*fixup >= 0x7FFF)
130 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
135 ptr += SECTOR_SIZE / sizeof(short);
143 * remove fixups after reading from disk
144 * Returns < 0 if error, 0 if ok, 1 if need to update fixups
146 int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
153 fo = le16_to_cpu(rhdr->fix_off);
154 fn = simple ? ((bytes >> SECTOR_SHIFT) + 1)
155 : le16_to_cpu(rhdr->fix_num);
158 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
159 fn * SECTOR_SIZE > bytes) {
160 return -EINVAL; /* native chkntfs returns ok! */
163 /* Get fixup pointer */
164 fixup = Add2Ptr(rhdr, fo);
166 ptr = Add2Ptr(rhdr, SECTOR_SIZE - sizeof(short));
170 /* Test current word */
171 if (*ptr != sample) {
172 /* Fixup does not match! Is it serious error? */
178 ptr += SECTOR_SIZE / sizeof(short);
189 int ntfs_extend_init(struct ntfs_sb_info *sbi)
192 struct super_block *sb = sbi->sb;
193 struct inode *inode, *inode2;
196 if (sbi->volume.major_ver < 3) {
197 ntfs_notice(sb, "Skip $Extend 'cause NTFS version");
201 ref.low = cpu_to_le32(MFT_REC_EXTEND);
203 ref.seq = cpu_to_le16(MFT_REC_EXTEND);
204 inode = ntfs_iget5(sb, &ref, &NAME_EXTEND);
206 err = PTR_ERR(inode);
207 ntfs_err(sb, "Failed to load $Extend.");
212 /* if ntfs_iget5 reads from disk it never returns bad inode */
213 if (!S_ISDIR(inode->i_mode)) {
218 /* Try to find $ObjId */
219 inode2 = dir_search_u(inode, &NAME_OBJID, NULL);
220 if (inode2 && !IS_ERR(inode2)) {
221 if (is_bad_inode(inode2)) {
224 sbi->objid.ni = ntfs_i(inode2);
225 sbi->objid_no = inode2->i_ino;
229 /* Try to find $Quota */
230 inode2 = dir_search_u(inode, &NAME_QUOTA, NULL);
231 if (inode2 && !IS_ERR(inode2)) {
232 sbi->quota_no = inode2->i_ino;
236 /* Try to find $Reparse */
237 inode2 = dir_search_u(inode, &NAME_REPARSE, NULL);
238 if (inode2 && !IS_ERR(inode2)) {
239 sbi->reparse.ni = ntfs_i(inode2);
240 sbi->reparse_no = inode2->i_ino;
243 /* Try to find $UsnJrnl */
244 inode2 = dir_search_u(inode, &NAME_USNJRNL, NULL);
245 if (inode2 && !IS_ERR(inode2)) {
246 sbi->usn_jrnl_no = inode2->i_ino;
256 int ntfs_loadlog_and_replay(struct ntfs_inode *ni, struct ntfs_sb_info *sbi)
259 struct super_block *sb = sbi->sb;
260 bool initialized = false;
265 if (ni->vfs_inode.i_size >= 0x100000000ull) {
266 ntfs_err(sb, "\x24LogFile is too big");
271 sbi->flags |= NTFS_FLAGS_LOG_REPLAYING;
273 ref.low = cpu_to_le32(MFT_REC_MFT);
275 ref.seq = cpu_to_le16(1);
277 inode = ntfs_iget5(sb, &ref, NULL);
283 /* Try to use mft copy */
284 u64 t64 = sbi->mft.lbo;
286 sbi->mft.lbo = sbi->mft.lbo2;
287 inode = ntfs_iget5(sb, &ref, NULL);
295 ntfs_err(sb, "Failed to load $MFT.");
299 sbi->mft.ni = ntfs_i(inode);
301 /* LogFile should not contains attribute list */
302 err = ni_load_all_mi(sbi->mft.ni);
304 err = log_replay(ni, &initialized);
309 sync_blockdev(sb->s_bdev);
310 invalidate_bdev(sb->s_bdev);
312 if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
317 if (sb_rdonly(sb) || !initialized)
320 /* fill LogFile by '-1' if it is initialized */
321 err = ntfs_bio_fill_1(sbi, &ni->file.run);
324 sbi->flags &= ~NTFS_FLAGS_LOG_REPLAYING;
332 * returns current ATTR_DEF_ENTRY for given attribute type
334 const struct ATTR_DEF_ENTRY *ntfs_query_def(struct ntfs_sb_info *sbi,
337 int type_in = le32_to_cpu(type);
339 size_t max_idx = sbi->def_entries - 1;
341 while (min_idx <= max_idx) {
342 size_t i = min_idx + ((max_idx - min_idx) >> 1);
343 const struct ATTR_DEF_ENTRY *entry = sbi->def_table + i;
344 int diff = le32_to_cpu(entry->type) - type_in;
359 * ntfs_look_for_free_space
361 * looks for a free space in bitmap
363 int ntfs_look_for_free_space(struct ntfs_sb_info *sbi, CLST lcn, CLST len,
364 CLST *new_lcn, CLST *new_len,
365 enum ALLOCATE_OPT opt)
368 struct super_block *sb = sbi->sb;
369 size_t a_lcn, zlen, zeroes, zlcn, zlen2, ztrim, new_zlen;
370 struct wnd_bitmap *wnd = &sbi->used.bitmap;
372 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
373 if (opt & ALLOCATE_MFT) {
376 zlen = wnd_zone_len(wnd);
379 err = ntfs_refresh_zone(sbi);
383 zlen = wnd_zone_len(wnd);
387 "no free space to extend mft");
393 lcn = wnd_zone_bit(wnd);
394 alen = zlen > len ? len : zlen;
396 wnd_zone_set(wnd, lcn + alen, zlen - alen);
398 err = wnd_set_used(wnd, lcn, alen);
408 * 'Cause cluster 0 is always used this value means that we should use
409 * cached value of 'next_free_lcn' to improve performance
412 lcn = sbi->used.next_free_lcn;
414 if (lcn >= wnd->nbits)
417 *new_len = wnd_find(wnd, len, lcn, BITMAP_FIND_MARK_AS_USED, &a_lcn);
423 /* Try to use clusters from MftZone */
424 zlen = wnd_zone_len(wnd);
425 zeroes = wnd_zeroes(wnd);
427 /* Check too big request */
428 if (len > zeroes + zlen)
431 if (zlen <= NTFS_MIN_MFT_ZONE)
434 /* How many clusters to cat from zone */
435 zlcn = wnd_zone_bit(wnd);
437 ztrim = len > zlen ? zlen : (len > zlen2 ? len : zlen2);
438 new_zlen = zlen - ztrim;
440 if (new_zlen < NTFS_MIN_MFT_ZONE) {
441 new_zlen = NTFS_MIN_MFT_ZONE;
446 wnd_zone_set(wnd, zlcn, new_zlen);
448 /* allocate continues clusters */
450 wnd_find(wnd, len, 0,
451 BITMAP_FIND_MARK_AS_USED | BITMAP_FIND_FULL, &a_lcn);
458 up_write(&wnd->rw_lock);
465 ntfs_unmap_meta(sb, *new_lcn, *new_len);
467 if (opt & ALLOCATE_MFT)
470 /* Set hint for next requests */
471 sbi->used.next_free_lcn = *new_lcn + *new_len;
474 up_write(&wnd->rw_lock);
481 * allocates additional MFT records
482 * sbi->mft.bitmap is locked for write
485 * ntfs_look_free_mft ->
488 * ni_insert_nonresident ->
491 * ntfs_look_free_mft ->
493 * To avoid recursive always allocate space for two new mft records
494 * see attrib.c: "at least two mft to avoid recursive loop"
496 static int ntfs_extend_mft(struct ntfs_sb_info *sbi)
499 struct ntfs_inode *ni = sbi->mft.ni;
500 size_t new_mft_total;
501 u64 new_mft_bytes, new_bitmap_bytes;
503 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
505 new_mft_total = (wnd->nbits + MFT_INCREASE_CHUNK + 127) & (CLST)~127;
506 new_mft_bytes = (u64)new_mft_total << sbi->record_bits;
508 /* Step 1: Resize $MFT::DATA */
509 down_write(&ni->file.run_lock);
510 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
511 new_mft_bytes, NULL, false, &attr);
514 up_write(&ni->file.run_lock);
518 attr->nres.valid_size = attr->nres.data_size;
519 new_mft_total = le64_to_cpu(attr->nres.alloc_size) >> sbi->record_bits;
522 /* Step 2: Resize $MFT::BITMAP */
523 new_bitmap_bytes = bitmap_size(new_mft_total);
525 err = attr_set_size(ni, ATTR_BITMAP, NULL, 0, &sbi->mft.bitmap.run,
526 new_bitmap_bytes, &new_bitmap_bytes, true, NULL);
528 /* Refresh Mft Zone if necessary */
529 down_write_nested(&sbi->used.bitmap.rw_lock, BITMAP_MUTEX_CLUSTERS);
531 ntfs_refresh_zone(sbi);
533 up_write(&sbi->used.bitmap.rw_lock);
534 up_write(&ni->file.run_lock);
539 err = wnd_extend(wnd, new_mft_total);
544 ntfs_clear_mft_tail(sbi, sbi->mft.used, new_mft_total);
546 err = _ni_write_inode(&ni->vfs_inode, 0);
554 * looks for a free MFT record
556 int ntfs_look_free_mft(struct ntfs_sb_info *sbi, CLST *rno, bool mft,
557 struct ntfs_inode *ni, struct mft_inode **mi)
560 size_t zbit, zlen, from, to, fr;
563 struct super_block *sb = sbi->sb;
564 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
567 static_assert(sizeof(sbi->mft.reserved_bitmap) * 8 >=
568 MFT_REC_FREE - MFT_REC_RESERVED);
571 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
573 zlen = wnd_zone_len(wnd);
575 /* Always reserve space for MFT */
578 zbit = wnd_zone_bit(wnd);
580 wnd_zone_set(wnd, zbit + 1, zlen - 1);
585 /* No MFT zone. find the nearest to '0' free MFT */
586 if (!wnd_find(wnd, 1, MFT_REC_FREE, 0, &zbit)) {
588 mft_total = wnd->nbits;
590 err = ntfs_extend_mft(sbi);
596 if (!mft || MFT_REC_FREE == sbi->mft.next_reserved)
602 * Look for free record reserved area [11-16) ==
603 * [MFT_REC_RESERVED, MFT_REC_FREE ) MFT bitmap always
606 if (!sbi->mft.reserved_bitmap) {
607 /* Once per session create internal bitmap for 5 bits */
608 sbi->mft.reserved_bitmap = 0xFF;
611 for (ir = MFT_REC_RESERVED; ir < MFT_REC_FREE; ir++) {
613 struct ntfs_inode *ni;
614 struct MFT_REC *mrec;
616 ref.low = cpu_to_le32(ir);
617 ref.seq = cpu_to_le16(ir);
619 i = ntfs_iget5(sb, &ref, NULL);
624 "Invalid reserved record %x",
628 if (is_bad_inode(i)) {
637 if (!is_rec_base(mrec))
640 if (mrec->hard_links)
646 if (ni_find_attr(ni, NULL, NULL, ATTR_NAME,
647 NULL, 0, NULL, NULL))
650 __clear_bit(ir - MFT_REC_RESERVED,
651 &sbi->mft.reserved_bitmap);
655 /* Scan 5 bits for zero. Bit 0 == MFT_REC_RESERVED */
656 zbit = find_next_zero_bit(&sbi->mft.reserved_bitmap,
657 MFT_REC_FREE, MFT_REC_RESERVED);
658 if (zbit >= MFT_REC_FREE) {
659 sbi->mft.next_reserved = MFT_REC_FREE;
664 sbi->mft.next_reserved = zbit;
667 zlen = zbit == MFT_REC_FREE ? (MFT_REC_USER - MFT_REC_FREE) : 4;
668 if (zbit + zlen > wnd->nbits)
669 zlen = wnd->nbits - zbit;
671 while (zlen > 1 && !wnd_is_free(wnd, zbit, zlen))
674 /* [zbit, zbit + zlen) will be used for Mft itself */
675 from = sbi->mft.used;
680 ntfs_clear_mft_tail(sbi, from, to);
691 wnd_zone_set(wnd, zbit, zlen);
695 /* The request to get record for general purpose */
696 if (sbi->mft.next_free < MFT_REC_USER)
697 sbi->mft.next_free = MFT_REC_USER;
700 if (sbi->mft.next_free >= sbi->mft.bitmap.nbits) {
701 } else if (!wnd_find(wnd, 1, MFT_REC_USER, 0, &fr)) {
702 sbi->mft.next_free = sbi->mft.bitmap.nbits;
705 sbi->mft.next_free = *rno + 1;
709 err = ntfs_extend_mft(sbi);
715 if (ni && !ni_add_subrecord(ni, *rno, mi)) {
720 /* We have found a record that are not reserved for next MFT */
721 if (*rno >= MFT_REC_FREE)
722 wnd_set_used(wnd, *rno, 1);
723 else if (*rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited)
724 __set_bit(*rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
728 up_write(&wnd->rw_lock);
736 * marks record as free
738 void ntfs_mark_rec_free(struct ntfs_sb_info *sbi, CLST rno)
740 struct wnd_bitmap *wnd = &sbi->mft.bitmap;
742 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_MFT);
743 if (rno >= wnd->nbits)
746 if (rno >= MFT_REC_FREE) {
747 if (!wnd_is_used(wnd, rno, 1))
748 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
750 wnd_set_free(wnd, rno, 1);
751 } else if (rno >= MFT_REC_RESERVED && sbi->mft.reserved_bitmap_inited) {
752 __clear_bit(rno - MFT_REC_RESERVED, &sbi->mft.reserved_bitmap);
755 if (rno < wnd_zone_bit(wnd))
756 wnd_zone_set(wnd, rno, 1);
757 else if (rno < sbi->mft.next_free && rno >= MFT_REC_USER)
758 sbi->mft.next_free = rno;
761 up_write(&wnd->rw_lock);
765 * ntfs_clear_mft_tail
767 * formats empty records [from, to)
768 * sbi->mft.bitmap is locked for write
770 int ntfs_clear_mft_tail(struct ntfs_sb_info *sbi, size_t from, size_t to)
775 struct runs_tree *run;
776 struct ntfs_inode *ni;
781 rs = sbi->record_size;
785 down_read(&ni->file.run_lock);
786 vbo = (u64)from * rs;
787 for (; from < to; from++, vbo += rs) {
788 struct ntfs_buffers nb;
790 err = ntfs_get_bh(sbi, run, vbo, rs, &nb);
794 err = ntfs_write_bh(sbi, &sbi->new_rec->rhdr, &nb, 0);
801 sbi->mft.used = from;
802 up_read(&ni->file.run_lock);
810 * sbi->used.bitmap is locked for rw
811 * sbi->mft.bitmap is locked for write
812 * sbi->mft.ni->file.run_lock for write
814 int ntfs_refresh_zone(struct ntfs_sb_info *sbi)
816 CLST zone_limit, zone_max, lcn, vcn, len;
818 struct wnd_bitmap *wnd = &sbi->used.bitmap;
819 struct ntfs_inode *ni = sbi->mft.ni;
821 /* Do not change anything unless we have non empty Mft zone */
822 if (wnd_zone_len(wnd))
826 * Compute the mft zone at two steps
827 * It would be nice if we are able to allocate
828 * 1/8 of total clusters for MFT but not more then 512 MB
830 zone_limit = (512 * 1024 * 1024) >> sbi->cluster_bits;
831 zone_max = wnd->nbits >> 3;
832 if (zone_max > zone_limit)
833 zone_max = zone_limit;
835 vcn = bytes_to_cluster(sbi,
836 (u64)sbi->mft.bitmap.nbits << sbi->record_bits);
838 if (!run_lookup_entry(&ni->file.run, vcn - 1, &lcn, &len, NULL))
841 /* We should always find Last Lcn for MFT */
842 if (lcn == SPARSE_LCN)
847 /* Try to allocate clusters after last MFT run */
848 zlen = wnd_find(wnd, zone_max, lcn_s, 0, &lcn_s);
850 ntfs_notice(sbi->sb, "MftZone: unavailable");
854 /* Truncate too large zone */
855 wnd_zone_set(wnd, lcn_s, zlen);
861 * ntfs_update_mftmirr
863 * updates $MFTMirr data
865 int ntfs_update_mftmirr(struct ntfs_sb_info *sbi, int wait)
868 struct super_block *sb = sbi->sb;
869 u32 blocksize = sb->s_blocksize;
870 sector_t block1, block2;
873 if (!(sbi->flags & NTFS_FLAGS_MFTMIRR))
877 bytes = sbi->mft.recs_mirr << sbi->record_bits;
878 block1 = sbi->mft.lbo >> sb->s_blocksize_bits;
879 block2 = sbi->mft.lbo2 >> sb->s_blocksize_bits;
881 for (; bytes >= blocksize; bytes -= blocksize) {
882 struct buffer_head *bh1, *bh2;
884 bh1 = sb_bread(sb, block1++);
890 bh2 = sb_getblk(sb, block2++);
897 if (buffer_locked(bh2))
898 __wait_on_buffer(bh2);
901 memcpy(bh2->b_data, bh1->b_data, blocksize);
902 set_buffer_uptodate(bh2);
903 mark_buffer_dirty(bh2);
910 err = sync_dirty_buffer(bh2);
917 sbi->flags &= ~NTFS_FLAGS_MFTMIRR;
926 * mount: ntfs_set_state(NTFS_DIRTY_DIRTY)
927 * umount: ntfs_set_state(NTFS_DIRTY_CLEAR)
928 * ntfs error: ntfs_set_state(NTFS_DIRTY_ERROR)
930 int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty)
934 struct VOLUME_INFO *info;
935 struct mft_inode *mi;
936 struct ntfs_inode *ni;
939 * do not change state if fs was real_dirty
940 * do not change state if fs already dirty(clear)
941 * do not change any thing if mounted read only
943 if (sbi->volume.real_dirty || sb_rdonly(sbi->sb))
946 /* Check cached value */
947 if ((dirty == NTFS_DIRTY_CLEAR ? 0 : VOLUME_FLAG_DIRTY) ==
948 (sbi->volume.flags & VOLUME_FLAG_DIRTY))
955 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_DIRTY);
957 attr = ni_find_attr(ni, NULL, NULL, ATTR_VOL_INFO, NULL, 0, NULL, &mi);
963 info = resident_data_ex(attr, SIZEOF_ATTRIBUTE_VOLUME_INFO);
970 case NTFS_DIRTY_ERROR:
971 ntfs_notice(sbi->sb, "Mark volume as dirty due to NTFS errors");
972 sbi->volume.real_dirty = true;
974 case NTFS_DIRTY_DIRTY:
975 info->flags |= VOLUME_FLAG_DIRTY;
977 case NTFS_DIRTY_CLEAR:
978 info->flags &= ~VOLUME_FLAG_DIRTY;
981 /* cache current volume flags*/
982 sbi->volume.flags = info->flags;
991 mark_inode_dirty(&ni->vfs_inode);
992 /*verify(!ntfs_update_mftmirr()); */
995 * if we used wait=1, sync_inode_metadata waits for the io for the
996 * inode to finish. It hangs when media is removed.
997 * So wait=0 is sent down to sync_inode_metadata
998 * and filemap_fdatawrite is used for the data blocks
1000 err = sync_inode_metadata(&ni->vfs_inode, 0);
1002 err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
1010 * calculates a hash of security descriptor
1012 static inline __le32 security_hash(const void *sd, size_t bytes)
1015 const __le32 *ptr = sd;
1019 hash = ((hash >> 0x1D) | (hash << 3)) + le32_to_cpu(*ptr++);
1020 return cpu_to_le32(hash);
1023 int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
1025 struct block_device *bdev = sb->s_bdev;
1026 u32 blocksize = sb->s_blocksize;
1027 u64 block = lbo >> sb->s_blocksize_bits;
1028 u32 off = lbo & (blocksize - 1);
1029 u32 op = blocksize - off;
1031 for (; bytes; block += 1, off = 0, op = blocksize) {
1032 struct buffer_head *bh = __bread(bdev, block, blocksize);
1040 memcpy(buffer, bh->b_data + off, op);
1045 buffer = Add2Ptr(buffer, op);
1051 int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
1052 const void *buf, int wait)
1054 u32 blocksize = sb->s_blocksize;
1055 struct block_device *bdev = sb->s_bdev;
1056 sector_t block = lbo >> sb->s_blocksize_bits;
1057 u32 off = lbo & (blocksize - 1);
1058 u32 op = blocksize - off;
1059 struct buffer_head *bh;
1061 if (!wait && (sb->s_flags & SB_SYNCHRONOUS))
1064 for (; bytes; block += 1, off = 0, op = blocksize) {
1068 if (op < blocksize) {
1069 bh = __bread(bdev, block, blocksize);
1071 ntfs_err(sb, "failed to read block %llx",
1076 bh = __getblk(bdev, block, blocksize);
1081 if (buffer_locked(bh))
1082 __wait_on_buffer(bh);
1086 memcpy(bh->b_data + off, buf, op);
1087 buf = Add2Ptr(buf, op);
1089 memset(bh->b_data + off, -1, op);
1092 set_buffer_uptodate(bh);
1093 mark_buffer_dirty(bh);
1097 int err = sync_dirty_buffer(bh);
1102 "failed to sync buffer at block %llx, error %d",
1116 int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1117 u64 vbo, const void *buf, size_t bytes)
1119 struct super_block *sb = sbi->sb;
1120 u8 cluster_bits = sbi->cluster_bits;
1121 u32 off = vbo & sbi->cluster_mask;
1122 CLST lcn, clen, vcn = vbo >> cluster_bits, vcn_next;
1126 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx))
1129 if (lcn == SPARSE_LCN)
1132 lbo = ((u64)lcn << cluster_bits) + off;
1133 len = ((u64)clen << cluster_bits) - off;
1136 u32 op = len < bytes ? len : bytes;
1137 int err = ntfs_sb_write(sb, lbo, op, buf, 0);
1146 vcn_next = vcn + clen;
1147 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1151 if (lcn == SPARSE_LCN)
1155 buf = Add2Ptr(buf, op);
1157 lbo = ((u64)lcn << cluster_bits);
1158 len = ((u64)clen << cluster_bits);
1164 struct buffer_head *ntfs_bread_run(struct ntfs_sb_info *sbi,
1165 const struct runs_tree *run, u64 vbo)
1167 struct super_block *sb = sbi->sb;
1168 u8 cluster_bits = sbi->cluster_bits;
1172 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, NULL, NULL))
1173 return ERR_PTR(-ENOENT);
1175 lbo = ((u64)lcn << cluster_bits) + (vbo & sbi->cluster_mask);
1177 return ntfs_bread(sb, lbo >> sb->s_blocksize_bits);
1180 int ntfs_read_run_nb(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1181 u64 vbo, void *buf, u32 bytes, struct ntfs_buffers *nb)
1184 struct super_block *sb = sbi->sb;
1185 u32 blocksize = sb->s_blocksize;
1186 u8 cluster_bits = sbi->cluster_bits;
1187 u32 off = vbo & sbi->cluster_mask;
1189 CLST vcn_next, vcn = vbo >> cluster_bits;
1193 struct buffer_head *bh;
1196 /* first reading of $Volume + $MFTMirr + LogFile goes here*/
1197 if (vbo > MFT_REC_VOL * sbi->record_size) {
1202 /* use absolute boot's 'MFTCluster' to read record */
1203 lbo = vbo + sbi->mft.lbo;
1204 len = sbi->record_size;
1205 } else if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1209 if (lcn == SPARSE_LCN) {
1214 lbo = ((u64)lcn << cluster_bits) + off;
1215 len = ((u64)clen << cluster_bits) - off;
1218 off = lbo & (blocksize - 1);
1225 u32 len32 = len >= bytes ? bytes : len;
1226 sector_t block = lbo >> sb->s_blocksize_bits;
1229 u32 op = blocksize - off;
1234 bh = ntfs_bread(sb, block);
1241 memcpy(buf, bh->b_data + off, op);
1242 buf = Add2Ptr(buf, op);
1247 } else if (nbh >= ARRAY_SIZE(nb->bh)) {
1264 vcn_next = vcn + clen;
1265 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1271 if (lcn == SPARSE_LCN) {
1276 lbo = ((u64)lcn << cluster_bits);
1277 len = ((u64)clen << cluster_bits);
1285 put_bh(nb->bh[--nbh]);
1293 /* Returns < 0 if error, 0 if ok, '-E_NTFS_FIXUP' if need to update fixups */
1294 int ntfs_read_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1295 struct NTFS_RECORD_HEADER *rhdr, u32 bytes,
1296 struct ntfs_buffers *nb)
1298 int err = ntfs_read_run_nb(sbi, run, vbo, rhdr, bytes, nb);
1302 return ntfs_fix_post_read(rhdr, nb->bytes, true);
1305 int ntfs_get_bh(struct ntfs_sb_info *sbi, const struct runs_tree *run, u64 vbo,
1306 u32 bytes, struct ntfs_buffers *nb)
1309 struct super_block *sb = sbi->sb;
1310 u32 blocksize = sb->s_blocksize;
1311 u8 cluster_bits = sbi->cluster_bits;
1312 CLST vcn_next, vcn = vbo >> cluster_bits;
1321 if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
1326 off = vbo & sbi->cluster_mask;
1327 lbo = ((u64)lcn << cluster_bits) + off;
1328 len = ((u64)clen << cluster_bits) - off;
1330 nb->off = off = lbo & (blocksize - 1);
1333 u32 len32 = len < bytes ? len : bytes;
1334 sector_t block = lbo >> sb->s_blocksize_bits;
1338 struct buffer_head *bh;
1340 if (nbh >= ARRAY_SIZE(nb->bh)) {
1345 op = blocksize - off;
1349 if (op == blocksize) {
1350 bh = sb_getblk(sb, block);
1355 if (buffer_locked(bh))
1356 __wait_on_buffer(bh);
1357 set_buffer_uptodate(bh);
1359 bh = ntfs_bread(sb, block);
1378 vcn_next = vcn + clen;
1379 if (!run_get_entry(run, ++idx, &vcn, &lcn, &clen) ||
1385 lbo = ((u64)lcn << cluster_bits);
1386 len = ((u64)clen << cluster_bits);
1391 put_bh(nb->bh[--nbh]);
1400 int ntfs_write_bh(struct ntfs_sb_info *sbi, struct NTFS_RECORD_HEADER *rhdr,
1401 struct ntfs_buffers *nb, int sync)
1404 struct super_block *sb = sbi->sb;
1405 u32 block_size = sb->s_blocksize;
1406 u32 bytes = nb->bytes;
1408 u16 fo = le16_to_cpu(rhdr->fix_off);
1409 u16 fn = le16_to_cpu(rhdr->fix_num);
1414 if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
1415 fn * SECTOR_SIZE > bytes) {
1419 for (idx = 0; bytes && idx < nb->nbufs; idx += 1, off = 0) {
1420 u32 op = block_size - off;
1422 struct buffer_head *bh = nb->bh[idx];
1423 __le16 *ptr, *end_data;
1428 if (buffer_locked(bh))
1429 __wait_on_buffer(bh);
1431 lock_buffer(nb->bh[idx]);
1433 bh_data = bh->b_data + off;
1434 end_data = Add2Ptr(bh_data, op);
1435 memcpy(bh_data, rhdr, op);
1440 fixup = Add2Ptr(bh_data, fo);
1442 t16 = le16_to_cpu(sample);
1443 if (t16 >= 0x7FFF) {
1444 sample = *fixup = cpu_to_le16(1);
1446 sample = cpu_to_le16(t16 + 1);
1450 *(__le16 *)Add2Ptr(rhdr, fo) = sample;
1453 ptr = Add2Ptr(bh_data, SECTOR_SIZE - sizeof(short));
1458 ptr += SECTOR_SIZE / sizeof(short);
1459 } while (ptr < end_data);
1461 set_buffer_uptodate(bh);
1462 mark_buffer_dirty(bh);
1466 int err2 = sync_dirty_buffer(bh);
1473 rhdr = Add2Ptr(rhdr, op);
1479 static inline struct bio *ntfs_alloc_bio(u32 nr_vecs)
1481 struct bio *bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
1483 if (!bio && (current->flags & PF_MEMALLOC)) {
1484 while (!bio && (nr_vecs /= 2))
1485 bio = bio_alloc(GFP_NOFS | __GFP_HIGH, nr_vecs);
1490 /* read/write pages from/to disk*/
1491 int ntfs_bio_pages(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1492 struct page **pages, u32 nr_pages, u64 vbo, u32 bytes,
1496 struct bio *new, *bio = NULL;
1497 struct super_block *sb = sbi->sb;
1498 struct block_device *bdev = sb->s_bdev;
1500 u8 cluster_bits = sbi->cluster_bits;
1501 CLST lcn, clen, vcn, vcn_next;
1502 u32 add, off, page_idx;
1505 struct blk_plug plug;
1510 blk_start_plug(&plug);
1512 /* align vbo and bytes to be 512 bytes aligned */
1513 lbo = (vbo + bytes + 511) & ~511ull;
1514 vbo = vbo & ~511ull;
1517 vcn = vbo >> cluster_bits;
1518 if (!run_lookup_entry(run, vcn, &lcn, &clen, &run_idx)) {
1522 off = vbo & sbi->cluster_mask;
1527 lbo = ((u64)lcn << cluster_bits) + off;
1528 len = ((u64)clen << cluster_bits) - off;
1530 new = ntfs_alloc_bio(nr_pages - page_idx);
1536 bio_chain(bio, new);
1540 bio_set_dev(bio, bdev);
1541 bio->bi_iter.bi_sector = lbo >> 9;
1545 off = vbo & (PAGE_SIZE - 1);
1546 add = off + len > PAGE_SIZE ? (PAGE_SIZE - off) : len;
1548 if (bio_add_page(bio, page, add, off) < add)
1556 if (add + off == PAGE_SIZE) {
1558 if (WARN_ON(page_idx >= nr_pages)) {
1562 page = pages[page_idx];
1571 vcn_next = vcn + clen;
1572 if (!run_get_entry(run, ++run_idx, &vcn, &lcn, &clen) ||
1582 err = submit_bio_wait(bio);
1585 blk_finish_plug(&plug);
1591 * Helper for ntfs_loadlog_and_replay
1592 * fill on-disk logfile range by (-1)
1593 * this means empty logfile
1595 int ntfs_bio_fill_1(struct ntfs_sb_info *sbi, const struct runs_tree *run)
1598 struct super_block *sb = sbi->sb;
1599 struct block_device *bdev = sb->s_bdev;
1600 u8 cluster_bits = sbi->cluster_bits;
1601 struct bio *new, *bio = NULL;
1607 struct blk_plug plug;
1609 fill = alloc_page(GFP_KERNEL);
1613 kaddr = kmap_atomic(fill);
1614 memset(kaddr, -1, PAGE_SIZE);
1615 kunmap_atomic(kaddr);
1616 flush_dcache_page(fill);
1619 if (!run_lookup_entry(run, 0, &lcn, &clen, &run_idx)) {
1625 * TODO: try blkdev_issue_write_same
1627 blk_start_plug(&plug);
1629 lbo = (u64)lcn << cluster_bits;
1630 len = (u64)clen << cluster_bits;
1632 new = ntfs_alloc_bio(BIO_MAX_VECS);
1638 bio_chain(bio, new);
1642 bio_set_dev(bio, bdev);
1643 bio->bi_opf = REQ_OP_WRITE;
1644 bio->bi_iter.bi_sector = lbo >> 9;
1647 u32 add = len > PAGE_SIZE ? PAGE_SIZE : len;
1649 if (bio_add_page(bio, fill, add, 0) < add)
1657 } while (run_get_entry(run, ++run_idx, NULL, &lcn, &clen));
1661 err = submit_bio_wait(bio);
1664 blk_finish_plug(&plug);
1672 int ntfs_vbo_to_lbo(struct ntfs_sb_info *sbi, const struct runs_tree *run,
1673 u64 vbo, u64 *lbo, u64 *bytes)
1677 u8 cluster_bits = sbi->cluster_bits;
1679 if (!run_lookup_entry(run, vbo >> cluster_bits, &lcn, &len, NULL))
1682 off = vbo & sbi->cluster_mask;
1683 *lbo = lcn == SPARSE_LCN ? -1 : (((u64)lcn << cluster_bits) + off);
1684 *bytes = ((u64)len << cluster_bits) - off;
1689 struct ntfs_inode *ntfs_new_inode(struct ntfs_sb_info *sbi, CLST rno, bool dir)
1692 struct super_block *sb = sbi->sb;
1693 struct inode *inode = new_inode(sb);
1694 struct ntfs_inode *ni;
1697 return ERR_PTR(-ENOMEM);
1701 err = mi_format_new(&ni->mi, sbi, rno, dir ? RECORD_FLAG_DIR : 0,
1707 if (insert_inode_locked(inode) < 0) {
1721 * O:BAG:BAD:(A;OICI;FA;;;WD)
1722 * owner S-1-5-32-544 (Administrators)
1723 * group S-1-5-32-544 (Administrators)
1724 * ACE: allow S-1-1-0 (Everyone) with FILE_ALL_ACCESS
1726 const u8 s_default_security[] __aligned(8) = {
1727 0x01, 0x00, 0x04, 0x80, 0x30, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00,
1728 0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x02, 0x00, 0x1C, 0x00,
1729 0x01, 0x00, 0x00, 0x00, 0x00, 0x03, 0x14, 0x00, 0xFF, 0x01, 0x1F, 0x00,
1730 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
1731 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x20, 0x00, 0x00, 0x00,
1732 0x20, 0x02, 0x00, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
1733 0x20, 0x00, 0x00, 0x00, 0x20, 0x02, 0x00, 0x00,
1736 static_assert(sizeof(s_default_security) == 0x50);
1738 static inline u32 sid_length(const struct SID *sid)
1740 return struct_size(sid, SubAuthority, sid->SubAuthorityCount);
1744 * Thanks Mark Harmstone for idea
1746 static bool is_acl_valid(const struct ACL *acl, u32 len)
1748 const struct ACE_HEADER *ace;
1750 u16 ace_count, ace_size;
1752 if (acl->AclRevision != ACL_REVISION &&
1753 acl->AclRevision != ACL_REVISION_DS) {
1755 * This value should be ACL_REVISION, unless the ACL contains an
1756 * object-specific ACE, in which case this value must be ACL_REVISION_DS.
1757 * All ACEs in an ACL must be at the same revision level.
1765 if (le16_to_cpu(acl->AclSize) > len)
1771 len -= sizeof(struct ACL);
1772 ace = (struct ACE_HEADER *)&acl[1];
1773 ace_count = le16_to_cpu(acl->AceCount);
1775 for (i = 0; i < ace_count; i++) {
1776 if (len < sizeof(struct ACE_HEADER))
1779 ace_size = le16_to_cpu(ace->AceSize);
1784 ace = Add2Ptr(ace, ace_size);
1790 bool is_sd_valid(const struct SECURITY_DESCRIPTOR_RELATIVE *sd, u32 len)
1792 u32 sd_owner, sd_group, sd_sacl, sd_dacl;
1794 if (len < sizeof(struct SECURITY_DESCRIPTOR_RELATIVE))
1797 if (sd->Revision != 1)
1803 if (!(sd->Control & SE_SELF_RELATIVE))
1806 sd_owner = le32_to_cpu(sd->Owner);
1808 const struct SID *owner = Add2Ptr(sd, sd_owner);
1810 if (sd_owner + offsetof(struct SID, SubAuthority) > len)
1813 if (owner->Revision != 1)
1816 if (sd_owner + sid_length(owner) > len)
1820 sd_group = le32_to_cpu(sd->Group);
1822 const struct SID *group = Add2Ptr(sd, sd_group);
1824 if (sd_group + offsetof(struct SID, SubAuthority) > len)
1827 if (group->Revision != 1)
1830 if (sd_group + sid_length(group) > len)
1834 sd_sacl = le32_to_cpu(sd->Sacl);
1836 const struct ACL *sacl = Add2Ptr(sd, sd_sacl);
1838 if (sd_sacl + sizeof(struct ACL) > len)
1841 if (!is_acl_valid(sacl, len - sd_sacl))
1845 sd_dacl = le32_to_cpu(sd->Dacl);
1847 const struct ACL *dacl = Add2Ptr(sd, sd_dacl);
1849 if (sd_dacl + sizeof(struct ACL) > len)
1852 if (!is_acl_valid(dacl, len - sd_dacl))
1860 * ntfs_security_init
1862 * loads and parse $Secure
1864 int ntfs_security_init(struct ntfs_sb_info *sbi)
1867 struct super_block *sb = sbi->sb;
1868 struct inode *inode;
1869 struct ntfs_inode *ni;
1871 struct ATTRIB *attr;
1872 struct ATTR_LIST_ENTRY *le;
1876 struct NTFS_DE_SII *sii_e;
1877 struct ntfs_fnd *fnd_sii = NULL;
1878 const struct INDEX_ROOT *root_sii;
1879 const struct INDEX_ROOT *root_sdh;
1880 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
1881 struct ntfs_index *indx_sii = &sbi->security.index_sii;
1883 ref.low = cpu_to_le32(MFT_REC_SECURE);
1885 ref.seq = cpu_to_le16(MFT_REC_SECURE);
1887 inode = ntfs_iget5(sb, &ref, &NAME_SECURE);
1888 if (IS_ERR(inode)) {
1889 err = PTR_ERR(inode);
1890 ntfs_err(sb, "Failed to load $Secure.");
1899 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SDH_NAME,
1900 ARRAY_SIZE(SDH_NAME), NULL, NULL);
1906 root_sdh = resident_data(attr);
1907 if (root_sdh->type != ATTR_ZERO ||
1908 root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH) {
1913 err = indx_init(indx_sdh, sbi, attr, INDEX_MUTEX_SDH);
1917 attr = ni_find_attr(ni, attr, &le, ATTR_ROOT, SII_NAME,
1918 ARRAY_SIZE(SII_NAME), NULL, NULL);
1924 root_sii = resident_data(attr);
1925 if (root_sii->type != ATTR_ZERO ||
1926 root_sii->rule != NTFS_COLLATION_TYPE_UINT) {
1931 err = indx_init(indx_sii, sbi, attr, INDEX_MUTEX_SII);
1935 fnd_sii = fnd_get();
1941 sds_size = inode->i_size;
1943 /* Find the last valid Id */
1944 sbi->security.next_id = SECURITY_ID_FIRST;
1945 /* Always write new security at the end of bucket */
1946 sbi->security.next_off =
1947 ALIGN(sds_size - SecurityDescriptorsBlockSize, 16);
1955 err = indx_find_raw(indx_sii, ni, root_sii, &ne, &off, fnd_sii);
1959 sii_e = (struct NTFS_DE_SII *)ne;
1960 if (le16_to_cpu(ne->view.data_size) < SIZEOF_SECURITY_HDR)
1963 next_id = le32_to_cpu(sii_e->sec_id) + 1;
1964 if (next_id >= sbi->security.next_id)
1965 sbi->security.next_id = next_id;
1968 sbi->security.ni = ni;
1978 * ntfs_get_security_by_id
1980 * reads security descriptor by id
1982 int ntfs_get_security_by_id(struct ntfs_sb_info *sbi, __le32 security_id,
1983 struct SECURITY_DESCRIPTOR_RELATIVE **sd,
1988 struct ntfs_inode *ni = sbi->security.ni;
1989 struct ntfs_index *indx = &sbi->security.index_sii;
1991 struct NTFS_DE_SII *sii_e;
1992 struct ntfs_fnd *fnd_sii;
1993 struct SECURITY_HDR d_security;
1994 const struct INDEX_ROOT *root_sii;
1999 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2001 fnd_sii = fnd_get();
2007 root_sii = indx_get_root(indx, ni, NULL, NULL);
2013 /* Try to find this SECURITY descriptor in SII indexes */
2014 err = indx_find(indx, ni, root_sii, &security_id, sizeof(security_id),
2015 NULL, &diff, (struct NTFS_DE **)&sii_e, fnd_sii);
2022 t32 = le32_to_cpu(sii_e->sec_hdr.size);
2023 if (t32 < SIZEOF_SECURITY_HDR) {
2028 if (t32 > SIZEOF_SECURITY_HDR + 0x10000) {
2030 * looks like too big security. 0x10000 - is arbitrary big number
2036 *size = t32 - SIZEOF_SECURITY_HDR;
2038 p = kmalloc(*size, GFP_NOFS);
2044 err = ntfs_read_run_nb(sbi, &ni->file.run,
2045 le64_to_cpu(sii_e->sec_hdr.off), &d_security,
2046 sizeof(d_security), NULL);
2050 if (memcmp(&d_security, &sii_e->sec_hdr, SIZEOF_SECURITY_HDR)) {
2055 err = ntfs_read_run_nb(sbi, &ni->file.run,
2056 le64_to_cpu(sii_e->sec_hdr.off) +
2057 SIZEOF_SECURITY_HDR,
2074 * ntfs_insert_security
2076 * inserts security descriptor into $Secure::SDS
2078 * SECURITY Descriptor Stream data is organized into chunks of 256K bytes
2079 * and it contains a mirror copy of each security descriptor. When writing
2080 * to a security descriptor at location X, another copy will be written at
2081 * location (X+256K).
2082 * When writing a security descriptor that will cross the 256K boundary,
2083 * the pointer will be advanced by 256K to skip
2084 * over the mirror portion.
2086 int ntfs_insert_security(struct ntfs_sb_info *sbi,
2087 const struct SECURITY_DESCRIPTOR_RELATIVE *sd,
2088 u32 size_sd, __le32 *security_id, bool *inserted)
2091 struct ntfs_inode *ni = sbi->security.ni;
2092 struct ntfs_index *indx_sdh = &sbi->security.index_sdh;
2093 struct ntfs_index *indx_sii = &sbi->security.index_sii;
2094 struct NTFS_DE_SDH *e;
2095 struct NTFS_DE_SDH sdh_e;
2096 struct NTFS_DE_SII sii_e;
2097 struct SECURITY_HDR *d_security;
2098 u32 new_sec_size = size_sd + SIZEOF_SECURITY_HDR;
2099 u32 aligned_sec_size = ALIGN(new_sec_size, 16);
2100 struct SECURITY_KEY hash_key;
2101 struct ntfs_fnd *fnd_sdh = NULL;
2102 const struct INDEX_ROOT *root_sdh;
2103 const struct INDEX_ROOT *root_sii;
2104 u64 mirr_off, new_sds_size;
2107 static_assert((1 << Log2OfSecurityDescriptorsBlockSize) ==
2108 SecurityDescriptorsBlockSize);
2110 hash_key.hash = security_hash(sd, size_sd);
2111 hash_key.sec_id = SECURITY_ID_INVALID;
2115 *security_id = SECURITY_ID_INVALID;
2117 /* Allocate a temporal buffer*/
2118 d_security = kzalloc(aligned_sec_size, GFP_NOFS);
2122 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_SECURITY);
2124 fnd_sdh = fnd_get();
2130 root_sdh = indx_get_root(indx_sdh, ni, NULL, NULL);
2136 root_sii = indx_get_root(indx_sii, ni, NULL, NULL);
2143 * Check if such security already exists
2144 * use "SDH" and hash -> to get the offset in "SDS"
2146 err = indx_find(indx_sdh, ni, root_sdh, &hash_key, sizeof(hash_key),
2147 &d_security->key.sec_id, &diff, (struct NTFS_DE **)&e,
2153 if (le32_to_cpu(e->sec_hdr.size) == new_sec_size) {
2154 err = ntfs_read_run_nb(sbi, &ni->file.run,
2155 le64_to_cpu(e->sec_hdr.off),
2156 d_security, new_sec_size, NULL);
2160 if (le32_to_cpu(d_security->size) == new_sec_size &&
2161 d_security->key.hash == hash_key.hash &&
2162 !memcmp(d_security + 1, sd, size_sd)) {
2163 *security_id = d_security->key.sec_id;
2164 /*such security already exists*/
2170 err = indx_find_sort(indx_sdh, ni, root_sdh,
2171 (struct NTFS_DE **)&e, fnd_sdh);
2175 if (!e || e->key.hash != hash_key.hash)
2179 /* Zero unused space */
2180 next = sbi->security.next_off & (SecurityDescriptorsBlockSize - 1);
2181 left = SecurityDescriptorsBlockSize - next;
2183 /* Zero gap until SecurityDescriptorsBlockSize */
2184 if (left < new_sec_size) {
2185 /* zero "left" bytes from sbi->security.next_off */
2186 sbi->security.next_off += SecurityDescriptorsBlockSize + left;
2189 /* Zero tail of previous security */
2190 //used = ni->vfs_inode.i_size & (SecurityDescriptorsBlockSize - 1);
2194 * 0x40438 == ni->vfs_inode.i_size
2195 * 0x00440 == sbi->security.next_off
2196 * need to zero [0x438-0x440)
2197 * if (next > used) {
2198 * u32 tozero = next - used;
2199 * zero "tozero" bytes from sbi->security.next_off - tozero
2202 /* format new security descriptor */
2203 d_security->key.hash = hash_key.hash;
2204 d_security->key.sec_id = cpu_to_le32(sbi->security.next_id);
2205 d_security->off = cpu_to_le64(sbi->security.next_off);
2206 d_security->size = cpu_to_le32(new_sec_size);
2207 memcpy(d_security + 1, sd, size_sd);
2209 /* Write main SDS bucket */
2210 err = ntfs_sb_write_run(sbi, &ni->file.run, sbi->security.next_off,
2211 d_security, aligned_sec_size);
2216 mirr_off = sbi->security.next_off + SecurityDescriptorsBlockSize;
2217 new_sds_size = mirr_off + aligned_sec_size;
2219 if (new_sds_size > ni->vfs_inode.i_size) {
2220 err = attr_set_size(ni, ATTR_DATA, SDS_NAME,
2221 ARRAY_SIZE(SDS_NAME), &ni->file.run,
2222 new_sds_size, &new_sds_size, false, NULL);
2227 /* Write copy SDS bucket */
2228 err = ntfs_sb_write_run(sbi, &ni->file.run, mirr_off, d_security,
2233 /* Fill SII entry */
2234 sii_e.de.view.data_off =
2235 cpu_to_le16(offsetof(struct NTFS_DE_SII, sec_hdr));
2236 sii_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2237 sii_e.de.view.res = 0;
2238 sii_e.de.size = cpu_to_le16(SIZEOF_SII_DIRENTRY);
2239 sii_e.de.key_size = cpu_to_le16(sizeof(d_security->key.sec_id));
2242 sii_e.sec_id = d_security->key.sec_id;
2243 memcpy(&sii_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2245 err = indx_insert_entry(indx_sii, ni, &sii_e.de, NULL, NULL);
2249 /* Fill SDH entry */
2250 sdh_e.de.view.data_off =
2251 cpu_to_le16(offsetof(struct NTFS_DE_SDH, sec_hdr));
2252 sdh_e.de.view.data_size = cpu_to_le16(SIZEOF_SECURITY_HDR);
2253 sdh_e.de.view.res = 0;
2254 sdh_e.de.size = cpu_to_le16(SIZEOF_SDH_DIRENTRY);
2255 sdh_e.de.key_size = cpu_to_le16(sizeof(sdh_e.key));
2258 sdh_e.key.hash = d_security->key.hash;
2259 sdh_e.key.sec_id = d_security->key.sec_id;
2260 memcpy(&sdh_e.sec_hdr, d_security, SIZEOF_SECURITY_HDR);
2261 sdh_e.magic[0] = cpu_to_le16('I');
2262 sdh_e.magic[1] = cpu_to_le16('I');
2265 err = indx_insert_entry(indx_sdh, ni, &sdh_e.de, (void *)(size_t)1,
2270 *security_id = d_security->key.sec_id;
2274 /* Update Id and offset for next descriptor */
2275 sbi->security.next_id += 1;
2276 sbi->security.next_off += aligned_sec_size;
2280 mark_inode_dirty(&ni->vfs_inode);
2290 * loads and parse $Extend/$Reparse
2292 int ntfs_reparse_init(struct ntfs_sb_info *sbi)
2295 struct ntfs_inode *ni = sbi->reparse.ni;
2296 struct ntfs_index *indx = &sbi->reparse.index_r;
2297 struct ATTRIB *attr;
2298 struct ATTR_LIST_ENTRY *le;
2299 const struct INDEX_ROOT *root_r;
2305 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SR_NAME,
2306 ARRAY_SIZE(SR_NAME), NULL, NULL);
2312 root_r = resident_data(attr);
2313 if (root_r->type != ATTR_ZERO ||
2314 root_r->rule != NTFS_COLLATION_TYPE_UINTS) {
2319 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SR);
2330 * loads and parse $Extend/$ObjId
2332 int ntfs_objid_init(struct ntfs_sb_info *sbi)
2335 struct ntfs_inode *ni = sbi->objid.ni;
2336 struct ntfs_index *indx = &sbi->objid.index_o;
2337 struct ATTRIB *attr;
2338 struct ATTR_LIST_ENTRY *le;
2339 const struct INDEX_ROOT *root;
2345 attr = ni_find_attr(ni, NULL, &le, ATTR_ROOT, SO_NAME,
2346 ARRAY_SIZE(SO_NAME), NULL, NULL);
2352 root = resident_data(attr);
2353 if (root->type != ATTR_ZERO ||
2354 root->rule != NTFS_COLLATION_TYPE_UINTS) {
2359 err = indx_init(indx, sbi, attr, INDEX_MUTEX_SO);
2367 int ntfs_objid_remove(struct ntfs_sb_info *sbi, struct GUID *guid)
2370 struct ntfs_inode *ni = sbi->objid.ni;
2371 struct ntfs_index *indx = &sbi->objid.index_o;
2376 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_OBJID);
2378 err = indx_delete_entry(indx, ni, guid, sizeof(*guid), NULL);
2380 mark_inode_dirty(&ni->vfs_inode);
2386 int ntfs_insert_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2387 const struct MFT_REF *ref)
2390 struct ntfs_inode *ni = sbi->reparse.ni;
2391 struct ntfs_index *indx = &sbi->reparse.index_r;
2392 struct NTFS_DE_R re;
2397 memset(&re, 0, sizeof(re));
2399 re.de.view.data_off = cpu_to_le16(offsetof(struct NTFS_DE_R, zero));
2400 re.de.size = cpu_to_le16(sizeof(struct NTFS_DE_R));
2401 re.de.key_size = cpu_to_le16(sizeof(re.key));
2403 re.key.ReparseTag = rtag;
2404 memcpy(&re.key.ref, ref, sizeof(*ref));
2406 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2408 err = indx_insert_entry(indx, ni, &re.de, NULL, NULL);
2410 mark_inode_dirty(&ni->vfs_inode);
2416 int ntfs_remove_reparse(struct ntfs_sb_info *sbi, __le32 rtag,
2417 const struct MFT_REF *ref)
2420 struct ntfs_inode *ni = sbi->reparse.ni;
2421 struct ntfs_index *indx = &sbi->reparse.index_r;
2422 struct ntfs_fnd *fnd = NULL;
2423 struct REPARSE_KEY rkey;
2424 struct NTFS_DE_R *re;
2425 struct INDEX_ROOT *root_r;
2430 rkey.ReparseTag = rtag;
2433 mutex_lock_nested(&ni->ni_lock, NTFS_INODE_MUTEX_REPARSE);
2436 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2446 root_r = indx_get_root(indx, ni, NULL, NULL);
2452 /* 1 - forces to ignore rkey.ReparseTag when comparing keys */
2453 err = indx_find(indx, ni, root_r, &rkey, sizeof(rkey), (void *)1, &diff,
2454 (struct NTFS_DE **)&re, fnd);
2458 if (memcmp(&re->key.ref, ref, sizeof(*ref))) {
2459 /* Impossible. Looks like volume corrupt?*/
2463 memcpy(&rkey, &re->key, sizeof(rkey));
2468 err = indx_delete_entry(indx, ni, &rkey, sizeof(rkey), NULL);
2476 mark_inode_dirty(&ni->vfs_inode);
2482 static inline void ntfs_unmap_and_discard(struct ntfs_sb_info *sbi, CLST lcn,
2485 ntfs_unmap_meta(sbi->sb, lcn, len);
2486 ntfs_discard(sbi, lcn, len);
2489 void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
2492 struct wnd_bitmap *wnd = &sbi->used.bitmap;
2494 down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
2495 if (!wnd_is_used(wnd, lcn, len)) {
2496 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
2500 for (i = lcn; i < end; i++) {
2501 if (wnd_is_used(wnd, i, 1)) {
2512 ntfs_unmap_and_discard(sbi, lcn, len);
2514 wnd_set_free(wnd, lcn, len);
2523 ntfs_unmap_and_discard(sbi, lcn, len);
2524 wnd_set_free(wnd, lcn, len);
2527 up_write(&wnd->rw_lock);
2533 * deallocate clusters
2535 int run_deallocate(struct ntfs_sb_info *sbi, struct runs_tree *run, bool trim)
2540 while (run_get_entry(run, idx++, NULL, &lcn, &len)) {
2541 if (lcn == SPARSE_LCN)
2544 mark_as_free_ex(sbi, lcn, len, trim);