1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
14 static inline int compare_attr(const struct ATTRIB *left, enum ATTR_TYPE type,
15 const __le16 *name, u8 name_len,
18 /* First, compare the type codes. */
19 int diff = le32_to_cpu(left->type) - le32_to_cpu(type);
24 /* They have the same type code, so we have to compare the names. */
25 return ntfs_cmp_names(attr_name(left), left->name_len, name, name_len,
32 * Return: Unused attribute id that is less than mrec->next_attr_id.
34 static __le16 mi_new_attt_id(struct mft_inode *mi)
36 u16 free_id, max_id, t16;
37 struct MFT_REC *rec = mi->mrec;
41 id = rec->next_attr_id;
42 free_id = le16_to_cpu(id);
43 if (free_id < 0x7FFF) {
44 rec->next_attr_id = cpu_to_le16(free_id + 1);
48 /* One record can store up to 1024/24 ~= 42 attributes. */
55 attr = mi_enum_attr(mi, attr);
57 rec->next_attr_id = cpu_to_le16(max_id + 1);
59 return cpu_to_le16(free_id);
62 t16 = le16_to_cpu(attr->id);
66 } else if (max_id < t16)
71 int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi)
74 struct mft_inode *m = kzalloc(sizeof(struct mft_inode), GFP_NOFS);
79 err = mi_init(m, sbi, rno);
85 err = mi_read(m, false);
95 void mi_put(struct mft_inode *mi)
101 int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno)
105 mi->mrec = kmalloc(sbi->record_size, GFP_NOFS);
113 * mi_read - Read MFT data.
115 int mi_read(struct mft_inode *mi, bool is_mft)
118 struct MFT_REC *rec = mi->mrec;
119 struct ntfs_sb_info *sbi = mi->sbi;
120 u32 bpr = sbi->record_size;
121 u64 vbo = (u64)mi->rno << sbi->record_bits;
122 struct ntfs_inode *mft_ni = sbi->mft.ni;
123 struct runs_tree *run = mft_ni ? &mft_ni->file.run : NULL;
124 struct rw_semaphore *rw_lock = NULL;
126 if (is_mounted(sbi)) {
127 if (!is_mft && mft_ni) {
128 rw_lock = &mft_ni->file.run_lock;
133 err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
139 if (err == -E_NTFS_FIXUP) {
151 err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, run,
152 vbo >> sbi->cluster_bits);
162 err = ntfs_read_bh(sbi, run, vbo, &rec->rhdr, bpr, &mi->nb);
166 if (err == -E_NTFS_FIXUP) {
174 /* Check field 'total' only here. */
175 if (le32_to_cpu(rec->total) != bpr) {
183 if (err == -E_NTFS_CORRUPT) {
184 ntfs_err(sbi->sb, "mft corrupted");
185 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
192 struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
194 const struct MFT_REC *rec = mi->mrec;
195 u32 used = le32_to_cpu(rec->used);
196 u32 t32, off, asize, prev_type;
198 u64 data_size, alloc_size, tot_size;
201 u32 total = le32_to_cpu(rec->total);
203 off = le16_to_cpu(rec->attr_off);
208 if (off >= used || off < MFTRECORD_FIXUP_OFFSET_1 ||
209 !IS_ALIGNED(off, 4)) {
213 /* Skip non-resident records. */
214 if (!is_rec_inuse(rec))
218 attr = Add2Ptr(rec, off);
220 /* Check if input attr inside record. */
221 off = PtrOffset(rec, attr);
225 asize = le32_to_cpu(attr->size);
226 if (asize < SIZEOF_RESIDENT) {
227 /* Impossible 'cause we should not return such attribute. */
231 /* Overflow check. */
232 if (off + asize < off)
235 prev_type = le32_to_cpu(attr->type);
236 attr = Add2Ptr(attr, asize);
240 asize = le32_to_cpu(attr->size);
242 /* Can we use the first field (attr->type). */
243 if (off + 8 > used) {
244 static_assert(ALIGN(sizeof(enum ATTR_TYPE), 8) == 8);
248 if (attr->type == ATTR_END) {
249 /* End of enumeration. */
253 /* 0x100 is last known attribute for now. */
254 t32 = le32_to_cpu(attr->type);
255 if (!t32 || (t32 & 0xf) || (t32 > 0x100))
258 /* attributes in record must be ordered by type */
262 /* Check overflow and boundary. */
263 if (off + asize < off || off + asize > used)
266 /* Check size of attribute. */
267 if (!attr->non_res) {
268 /* Check resident fields. */
269 if (asize < SIZEOF_RESIDENT)
272 t16 = le16_to_cpu(attr->res.data_off);
276 if (t16 + le32_to_cpu(attr->res.data_size) > asize)
279 t32 = sizeof(short) * attr->name_len;
280 if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
286 /* Check nonresident fields. */
287 if (attr->non_res != 1)
290 t16 = le16_to_cpu(attr->nres.run_off);
294 t32 = sizeof(short) * attr->name_len;
295 if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
298 /* Check start/end vcn. */
299 if (le64_to_cpu(attr->nres.svcn) > le64_to_cpu(attr->nres.evcn) + 1)
302 data_size = le64_to_cpu(attr->nres.data_size);
303 if (le64_to_cpu(attr->nres.valid_size) > data_size)
306 alloc_size = le64_to_cpu(attr->nres.alloc_size);
307 if (data_size > alloc_size)
310 t32 = mi->sbi->cluster_mask;
311 if (alloc_size & t32)
314 if (!attr->nres.svcn && is_attr_ext(attr)) {
315 /* First segment of sparse/compressed attribute */
316 if (asize + 8 < SIZEOF_NONRESIDENT_EX)
319 tot_size = le64_to_cpu(attr->nres.total_size);
323 if (tot_size > alloc_size)
326 if (asize + 8 < SIZEOF_NONRESIDENT)
329 if (attr->nres.c_unit)
337 * mi_find_attr - Find the attribute by type and name and id.
339 struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr,
340 enum ATTR_TYPE type, const __le16 *name,
341 u8 name_len, const __le16 *id)
343 u32 type_in = le32_to_cpu(type);
347 attr = mi_enum_attr(mi, attr);
351 atype = le32_to_cpu(attr->type);
358 if (attr->name_len != name_len)
361 if (name_len && memcmp(attr_name(attr), name, name_len * sizeof(short)))
364 if (id && *id != attr->id)
370 int mi_write(struct mft_inode *mi, int wait)
374 struct ntfs_sb_info *sbi;
382 err = ntfs_write_bh(sbi, &rec->rhdr, &mi->nb, wait);
386 if (mi->rno < sbi->mft.recs_mirr)
387 sbi->flags |= NTFS_FLAGS_MFTMIRR;
394 int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno,
395 __le16 flags, bool is_mft)
400 u64 vbo = (u64)rno << sbi->record_bits;
402 err = mi_init(mi, sbi, rno);
408 if (rno == MFT_REC_MFT) {
410 } else if (rno < MFT_REC_FREE) {
412 } else if (rno >= sbi->mft.used) {
414 } else if (mi_read(mi, is_mft)) {
416 } else if (rec->rhdr.sign == NTFS_FILE_SIGNATURE) {
417 /* Record is reused. Update its sequence number. */
418 seq = le16_to_cpu(rec->seq) + 1;
423 memcpy(rec, sbi->new_rec, sbi->record_size);
425 rec->seq = cpu_to_le16(seq);
426 rec->flags = RECORD_FLAG_IN_USE | flags;
427 if (MFTRECORD_FIXUP_OFFSET == MFTRECORD_FIXUP_OFFSET_3)
428 rec->mft_record = cpu_to_le32(rno);
433 struct ntfs_inode *ni = sbi->mft.ni;
436 if (is_mounted(sbi) && !is_mft) {
437 down_read(&ni->file.run_lock);
441 err = ntfs_get_bh(sbi, &ni->file.run, vbo, sbi->record_size,
444 up_read(&ni->file.run_lock);
451 * mi_insert_attr - Reserve space for new attribute.
453 * Return: Not full constructed attribute or NULL if not possible to create.
455 struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type,
456 const __le16 *name, u8 name_len, u32 asize,
462 struct MFT_REC *rec = mi->mrec;
463 struct ntfs_sb_info *sbi = mi->sbi;
464 u32 used = le32_to_cpu(rec->used);
465 const u16 *upcase = sbi->upcase;
467 /* Can we insert mi attribute? */
468 if (used + asize > sbi->record_size)
472 * Scan through the list of attributes to find the point
473 * at which we should insert it.
476 while ((attr = mi_enum_attr(mi, attr))) {
477 int diff = compare_attr(attr, type, name, name_len, upcase);
482 if (!diff && !is_attr_indexed(attr))
490 attr = Add2Ptr(rec, used - 8);
492 /* Insert before 'attr'. */
493 tail = used - PtrOffset(rec, attr);
496 id = mi_new_attt_id(mi);
498 memmove(Add2Ptr(attr, asize), attr, tail);
499 memset(attr, 0, asize);
502 attr->size = cpu_to_le32(asize);
503 attr->name_len = name_len;
504 attr->name_off = cpu_to_le16(name_off);
507 memmove(Add2Ptr(attr, name_off), name, name_len * sizeof(short));
508 rec->used = cpu_to_le32(used + asize);
516 * mi_remove_attr - Remove the attribute from record.
518 * NOTE: The source attr will point to next attribute.
520 bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
523 struct MFT_REC *rec = mi->mrec;
524 u32 aoff = PtrOffset(rec, attr);
525 u32 used = le32_to_cpu(rec->used);
526 u32 asize = le32_to_cpu(attr->size);
528 if (aoff + asize > used)
531 if (ni && is_attr_indexed(attr)) {
532 le16_add_cpu(&ni->mi.mrec->hard_links, -1);
537 memmove(attr, Add2Ptr(attr, asize), used - aoff);
538 rec->used = cpu_to_le32(used);
544 /* bytes = "new attribute size" - "old attribute size" */
545 bool mi_resize_attr(struct mft_inode *mi, struct ATTRIB *attr, int bytes)
547 struct MFT_REC *rec = mi->mrec;
548 u32 aoff = PtrOffset(rec, attr);
549 u32 total, used = le32_to_cpu(rec->used);
550 u32 nsize, asize = le32_to_cpu(attr->size);
551 u32 rsize = le32_to_cpu(attr->res.data_size);
552 int tail = (int)(used - aoff - asize);
556 if (tail < 0 || aoff >= used)
562 total = le32_to_cpu(rec->total);
563 next = Add2Ptr(attr, asize);
566 dsize = ALIGN(bytes, 8);
567 if (used + dsize > total)
569 nsize = asize + dsize;
571 memmove(next + dsize, next, tail);
572 memset(next, 0, dsize);
576 dsize = ALIGN(-bytes, 8);
579 nsize = asize - dsize;
580 memmove(next - dsize, next, tail);
585 rec->used = cpu_to_le32(used);
586 attr->size = cpu_to_le32(nsize);
588 attr->res.data_size = cpu_to_le32(rsize);
595 * Pack runs in MFT record.
596 * If failed record is not changed.
598 int mi_pack_runs(struct mft_inode *mi, struct ATTRIB *attr,
599 struct runs_tree *run, CLST len)
602 struct ntfs_sb_info *sbi = mi->sbi;
605 struct MFT_REC *rec = mi->mrec;
606 CLST svcn = le64_to_cpu(attr->nres.svcn);
607 u32 used = le32_to_cpu(rec->used);
608 u32 aoff = PtrOffset(rec, attr);
609 u32 asize = le32_to_cpu(attr->size);
610 char *next = Add2Ptr(attr, asize);
611 u16 run_off = le16_to_cpu(attr->nres.run_off);
612 u32 run_size = asize - run_off;
613 u32 tail = used - aoff - asize;
614 u32 dsize = sbi->record_size - used;
616 /* Make a maximum gap in current record. */
617 memmove(next + dsize, next, tail);
619 /* Pack as much as possible. */
620 err = run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size + dsize,
623 memmove(next, next + dsize, tail);
627 new_run_size = ALIGN(err, 8);
629 memmove(next + new_run_size - run_size, next + dsize, tail);
631 attr->size = cpu_to_le32(asize + new_run_size - run_size);
632 attr->nres.evcn = cpu_to_le64(svcn + plen - 1);
633 rec->used = cpu_to_le32(used + new_run_size - run_size);