Merge tag 'x86_cleanups_for_v6.4_rc1' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / fs / ntfs3 / fslog.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *
4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5  *
6  */
7
8 #include <linux/blkdev.h>
9 #include <linux/fs.h>
10 #include <linux/random.h>
11 #include <linux/slab.h>
12
13 #include "debug.h"
14 #include "ntfs.h"
15 #include "ntfs_fs.h"
16
17 /*
18  * LOG FILE structs
19  */
20
21 // clang-format off
22
23 #define MaxLogFileSize     0x100000000ull
24 #define DefaultLogPageSize 4096
25 #define MinLogRecordPages  0x30
26
27 struct RESTART_HDR {
28         struct NTFS_RECORD_HEADER rhdr; // 'RSTR'
29         __le32 sys_page_size; // 0x10: Page size of the system which initialized the log.
30         __le32 page_size;     // 0x14: Log page size used for this log file.
31         __le16 ra_off;        // 0x18:
32         __le16 minor_ver;     // 0x1A:
33         __le16 major_ver;     // 0x1C:
34         __le16 fixups[];
35 };
36
37 #define LFS_NO_CLIENT 0xffff
38 #define LFS_NO_CLIENT_LE cpu_to_le16(0xffff)
39
40 struct CLIENT_REC {
41         __le64 oldest_lsn;
42         __le64 restart_lsn; // 0x08:
43         __le16 prev_client; // 0x10:
44         __le16 next_client; // 0x12:
45         __le16 seq_num;     // 0x14:
46         u8 align[6];        // 0x16:
47         __le32 name_bytes;  // 0x1C: In bytes.
48         __le16 name[32];    // 0x20: Name of client.
49 };
50
51 static_assert(sizeof(struct CLIENT_REC) == 0x60);
52
53 /* Two copies of these will exist at the beginning of the log file */
54 struct RESTART_AREA {
55         __le64 current_lsn;    // 0x00: Current logical end of log file.
56         __le16 log_clients;    // 0x08: Maximum number of clients.
57         __le16 client_idx[2];  // 0x0A: Free/use index into the client record arrays.
58         __le16 flags;          // 0x0E: See RESTART_SINGLE_PAGE_IO.
59         __le32 seq_num_bits;   // 0x10: The number of bits in sequence number.
60         __le16 ra_len;         // 0x14:
61         __le16 client_off;     // 0x16:
62         __le64 l_size;         // 0x18: Usable log file size.
63         __le32 last_lsn_data_len; // 0x20:
64         __le16 rec_hdr_len;    // 0x24: Log page data offset.
65         __le16 data_off;       // 0x26: Log page data length.
66         __le32 open_log_count; // 0x28:
67         __le32 align[5];       // 0x2C:
68         struct CLIENT_REC clients[]; // 0x40:
69 };
70
71 struct LOG_REC_HDR {
72         __le16 redo_op;      // 0x00:  NTFS_LOG_OPERATION
73         __le16 undo_op;      // 0x02:  NTFS_LOG_OPERATION
74         __le16 redo_off;     // 0x04:  Offset to Redo record.
75         __le16 redo_len;     // 0x06:  Redo length.
76         __le16 undo_off;     // 0x08:  Offset to Undo record.
77         __le16 undo_len;     // 0x0A:  Undo length.
78         __le16 target_attr;  // 0x0C:
79         __le16 lcns_follow;  // 0x0E:
80         __le16 record_off;   // 0x10:
81         __le16 attr_off;     // 0x12:
82         __le16 cluster_off;  // 0x14:
83         __le16 reserved;     // 0x16:
84         __le64 target_vcn;   // 0x18:
85         __le64 page_lcns[];  // 0x20:
86 };
87
88 static_assert(sizeof(struct LOG_REC_HDR) == 0x20);
89
90 #define RESTART_ENTRY_ALLOCATED    0xFFFFFFFF
91 #define RESTART_ENTRY_ALLOCATED_LE cpu_to_le32(0xFFFFFFFF)
92
93 struct RESTART_TABLE {
94         __le16 size;       // 0x00: In bytes
95         __le16 used;       // 0x02: Entries
96         __le16 total;      // 0x04: Entries
97         __le16 res[3];     // 0x06:
98         __le32 free_goal;  // 0x0C:
99         __le32 first_free; // 0x10:
100         __le32 last_free;  // 0x14:
101
102 };
103
104 static_assert(sizeof(struct RESTART_TABLE) == 0x18);
105
106 struct ATTR_NAME_ENTRY {
107         __le16 off; // Offset in the Open attribute Table.
108         __le16 name_bytes;
109         __le16 name[];
110 };
111
112 struct OPEN_ATTR_ENRTY {
113         __le32 next;            // 0x00: RESTART_ENTRY_ALLOCATED if allocated
114         __le32 bytes_per_index; // 0x04:
115         enum ATTR_TYPE type;    // 0x08:
116         u8 is_dirty_pages;      // 0x0C:
117         u8 is_attr_name;        // 0x0B: Faked field to manage 'ptr'
118         u8 name_len;            // 0x0C: Faked field to manage 'ptr'
119         u8 res;
120         struct MFT_REF ref;     // 0x10: File Reference of file containing attribute
121         __le64 open_record_lsn; // 0x18:
122         void *ptr;              // 0x20:
123 };
124
125 /* 32 bit version of 'struct OPEN_ATTR_ENRTY' */
126 struct OPEN_ATTR_ENRTY_32 {
127         __le32 next;            // 0x00: RESTART_ENTRY_ALLOCATED if allocated
128         __le32 ptr;             // 0x04:
129         struct MFT_REF ref;     // 0x08:
130         __le64 open_record_lsn; // 0x10:
131         u8 is_dirty_pages;      // 0x18:
132         u8 is_attr_name;        // 0x19:
133         u8 res1[2];
134         enum ATTR_TYPE type;    // 0x1C:
135         u8 name_len;            // 0x20: In wchar
136         u8 res2[3];
137         __le32 AttributeName;   // 0x24:
138         __le32 bytes_per_index; // 0x28:
139 };
140
141 #define SIZEOF_OPENATTRIBUTEENTRY0 0x2c
142 // static_assert( 0x2C == sizeof(struct OPEN_ATTR_ENRTY_32) );
143 static_assert(sizeof(struct OPEN_ATTR_ENRTY) < SIZEOF_OPENATTRIBUTEENTRY0);
144
145 /*
146  * One entry exists in the Dirty Pages Table for each page which is dirty at
147  * the time the Restart Area is written.
148  */
149 struct DIR_PAGE_ENTRY {
150         __le32 next;         // 0x00: RESTART_ENTRY_ALLOCATED if allocated
151         __le32 target_attr;  // 0x04: Index into the Open attribute Table
152         __le32 transfer_len; // 0x08:
153         __le32 lcns_follow;  // 0x0C:
154         __le64 vcn;          // 0x10: Vcn of dirty page
155         __le64 oldest_lsn;   // 0x18:
156         __le64 page_lcns[];  // 0x20:
157 };
158
159 static_assert(sizeof(struct DIR_PAGE_ENTRY) == 0x20);
160
161 /* 32 bit version of 'struct DIR_PAGE_ENTRY' */
162 struct DIR_PAGE_ENTRY_32 {
163         __le32 next;            // 0x00: RESTART_ENTRY_ALLOCATED if allocated
164         __le32 target_attr;     // 0x04: Index into the Open attribute Table
165         __le32 transfer_len;    // 0x08:
166         __le32 lcns_follow;     // 0x0C:
167         __le32 reserved;        // 0x10:
168         __le32 vcn_low;         // 0x14: Vcn of dirty page
169         __le32 vcn_hi;          // 0x18: Vcn of dirty page
170         __le32 oldest_lsn_low;  // 0x1C:
171         __le32 oldest_lsn_hi;   // 0x1C:
172         __le32 page_lcns_low;   // 0x24:
173         __le32 page_lcns_hi;    // 0x24:
174 };
175
176 static_assert(offsetof(struct DIR_PAGE_ENTRY_32, vcn_low) == 0x14);
177 static_assert(sizeof(struct DIR_PAGE_ENTRY_32) == 0x2c);
178
179 enum transact_state {
180         TransactionUninitialized = 0,
181         TransactionActive,
182         TransactionPrepared,
183         TransactionCommitted
184 };
185
186 struct TRANSACTION_ENTRY {
187         __le32 next;          // 0x00: RESTART_ENTRY_ALLOCATED if allocated
188         u8 transact_state;    // 0x04:
189         u8 reserved[3];       // 0x05:
190         __le64 first_lsn;     // 0x08:
191         __le64 prev_lsn;      // 0x10:
192         __le64 undo_next_lsn; // 0x18:
193         __le32 undo_records;  // 0x20: Number of undo log records pending abort
194         __le32 undo_len;      // 0x24: Total undo size
195 };
196
197 static_assert(sizeof(struct TRANSACTION_ENTRY) == 0x28);
198
199 struct NTFS_RESTART {
200         __le32 major_ver;             // 0x00:
201         __le32 minor_ver;             // 0x04:
202         __le64 check_point_start;     // 0x08:
203         __le64 open_attr_table_lsn;   // 0x10:
204         __le64 attr_names_lsn;        // 0x18:
205         __le64 dirty_pages_table_lsn; // 0x20:
206         __le64 transact_table_lsn;    // 0x28:
207         __le32 open_attr_len;         // 0x30: In bytes
208         __le32 attr_names_len;        // 0x34: In bytes
209         __le32 dirty_pages_len;       // 0x38: In bytes
210         __le32 transact_table_len;    // 0x3C: In bytes
211 };
212
213 static_assert(sizeof(struct NTFS_RESTART) == 0x40);
214
215 struct NEW_ATTRIBUTE_SIZES {
216         __le64 alloc_size;
217         __le64 valid_size;
218         __le64 data_size;
219         __le64 total_size;
220 };
221
222 struct BITMAP_RANGE {
223         __le32 bitmap_off;
224         __le32 bits;
225 };
226
227 struct LCN_RANGE {
228         __le64 lcn;
229         __le64 len;
230 };
231
232 /* The following type defines the different log record types. */
233 #define LfsClientRecord  cpu_to_le32(1)
234 #define LfsClientRestart cpu_to_le32(2)
235
236 /* This is used to uniquely identify a client for a particular log file. */
237 struct CLIENT_ID {
238         __le16 seq_num;
239         __le16 client_idx;
240 };
241
242 /* This is the header that begins every Log Record in the log file. */
243 struct LFS_RECORD_HDR {
244         __le64 this_lsn;                // 0x00:
245         __le64 client_prev_lsn;         // 0x08:
246         __le64 client_undo_next_lsn;    // 0x10:
247         __le32 client_data_len;         // 0x18:
248         struct CLIENT_ID client;        // 0x1C: Owner of this log record.
249         __le32 record_type;             // 0x20: LfsClientRecord or LfsClientRestart.
250         __le32 transact_id;             // 0x24:
251         __le16 flags;                   // 0x28: LOG_RECORD_MULTI_PAGE
252         u8 align[6];                    // 0x2A:
253 };
254
255 #define LOG_RECORD_MULTI_PAGE cpu_to_le16(1)
256
257 static_assert(sizeof(struct LFS_RECORD_HDR) == 0x30);
258
259 struct LFS_RECORD {
260         __le16 next_record_off; // 0x00: Offset of the free space in the page,
261         u8 align[6];            // 0x02:
262         __le64 last_end_lsn;    // 0x08: lsn for the last log record which ends on the page,
263 };
264
265 static_assert(sizeof(struct LFS_RECORD) == 0x10);
266
267 struct RECORD_PAGE_HDR {
268         struct NTFS_RECORD_HEADER rhdr; // 'RCRD'
269         __le32 rflags;                  // 0x10: See LOG_PAGE_LOG_RECORD_END
270         __le16 page_count;              // 0x14:
271         __le16 page_pos;                // 0x16:
272         struct LFS_RECORD record_hdr;   // 0x18:
273         __le16 fixups[10];              // 0x28:
274         __le32 file_off;                // 0x3c: Used when major version >= 2
275 };
276
277 // clang-format on
278
279 // Page contains the end of a log record.
280 #define LOG_PAGE_LOG_RECORD_END cpu_to_le32(0x00000001)
281
282 static inline bool is_log_record_end(const struct RECORD_PAGE_HDR *hdr)
283 {
284         return hdr->rflags & LOG_PAGE_LOG_RECORD_END;
285 }
286
287 static_assert(offsetof(struct RECORD_PAGE_HDR, file_off) == 0x3c);
288
289 /*
290  * END of NTFS LOG structures
291  */
292
293 /* Define some tuning parameters to keep the restart tables a reasonable size. */
294 #define INITIAL_NUMBER_TRANSACTIONS 5
295
296 enum NTFS_LOG_OPERATION {
297
298         Noop = 0x00,
299         CompensationLogRecord = 0x01,
300         InitializeFileRecordSegment = 0x02,
301         DeallocateFileRecordSegment = 0x03,
302         WriteEndOfFileRecordSegment = 0x04,
303         CreateAttribute = 0x05,
304         DeleteAttribute = 0x06,
305         UpdateResidentValue = 0x07,
306         UpdateNonresidentValue = 0x08,
307         UpdateMappingPairs = 0x09,
308         DeleteDirtyClusters = 0x0A,
309         SetNewAttributeSizes = 0x0B,
310         AddIndexEntryRoot = 0x0C,
311         DeleteIndexEntryRoot = 0x0D,
312         AddIndexEntryAllocation = 0x0E,
313         DeleteIndexEntryAllocation = 0x0F,
314         WriteEndOfIndexBuffer = 0x10,
315         SetIndexEntryVcnRoot = 0x11,
316         SetIndexEntryVcnAllocation = 0x12,
317         UpdateFileNameRoot = 0x13,
318         UpdateFileNameAllocation = 0x14,
319         SetBitsInNonresidentBitMap = 0x15,
320         ClearBitsInNonresidentBitMap = 0x16,
321         HotFix = 0x17,
322         EndTopLevelAction = 0x18,
323         PrepareTransaction = 0x19,
324         CommitTransaction = 0x1A,
325         ForgetTransaction = 0x1B,
326         OpenNonresidentAttribute = 0x1C,
327         OpenAttributeTableDump = 0x1D,
328         AttributeNamesDump = 0x1E,
329         DirtyPageTableDump = 0x1F,
330         TransactionTableDump = 0x20,
331         UpdateRecordDataRoot = 0x21,
332         UpdateRecordDataAllocation = 0x22,
333
334         UpdateRelativeDataInIndex =
335                 0x23, // NtOfsRestartUpdateRelativeDataInIndex
336         UpdateRelativeDataInIndex2 = 0x24,
337         ZeroEndOfFileRecord = 0x25,
338 };
339
340 /*
341  * Array for log records which require a target attribute.
342  * A true indicates that the corresponding restart operation
343  * requires a target attribute.
344  */
345 static const u8 AttributeRequired[] = {
346         0xFC, 0xFB, 0xFF, 0x10, 0x06,
347 };
348
349 static inline bool is_target_required(u16 op)
350 {
351         bool ret = op <= UpdateRecordDataAllocation &&
352                    (AttributeRequired[op >> 3] >> (op & 7) & 1);
353         return ret;
354 }
355
356 static inline bool can_skip_action(enum NTFS_LOG_OPERATION op)
357 {
358         switch (op) {
359         case Noop:
360         case DeleteDirtyClusters:
361         case HotFix:
362         case EndTopLevelAction:
363         case PrepareTransaction:
364         case CommitTransaction:
365         case ForgetTransaction:
366         case CompensationLogRecord:
367         case OpenNonresidentAttribute:
368         case OpenAttributeTableDump:
369         case AttributeNamesDump:
370         case DirtyPageTableDump:
371         case TransactionTableDump:
372                 return true;
373         default:
374                 return false;
375         }
376 }
377
378 enum { lcb_ctx_undo_next, lcb_ctx_prev, lcb_ctx_next };
379
380 /* Bytes per restart table. */
381 static inline u32 bytes_per_rt(const struct RESTART_TABLE *rt)
382 {
383         return le16_to_cpu(rt->used) * le16_to_cpu(rt->size) +
384                sizeof(struct RESTART_TABLE);
385 }
386
387 /* Log record length. */
388 static inline u32 lrh_length(const struct LOG_REC_HDR *lr)
389 {
390         u16 t16 = le16_to_cpu(lr->lcns_follow);
391
392         return struct_size(lr, page_lcns, max_t(u16, 1, t16));
393 }
394
395 struct lcb {
396         struct LFS_RECORD_HDR *lrh; // Log record header of the current lsn.
397         struct LOG_REC_HDR *log_rec;
398         u32 ctx_mode; // lcb_ctx_undo_next/lcb_ctx_prev/lcb_ctx_next
399         struct CLIENT_ID client;
400         bool alloc; // If true the we should deallocate 'log_rec'.
401 };
402
403 static void lcb_put(struct lcb *lcb)
404 {
405         if (lcb->alloc)
406                 kfree(lcb->log_rec);
407         kfree(lcb->lrh);
408         kfree(lcb);
409 }
410
411 /* Find the oldest lsn from active clients. */
412 static inline void oldest_client_lsn(const struct CLIENT_REC *ca,
413                                      __le16 next_client, u64 *oldest_lsn)
414 {
415         while (next_client != LFS_NO_CLIENT_LE) {
416                 const struct CLIENT_REC *cr = ca + le16_to_cpu(next_client);
417                 u64 lsn = le64_to_cpu(cr->oldest_lsn);
418
419                 /* Ignore this block if it's oldest lsn is 0. */
420                 if (lsn && lsn < *oldest_lsn)
421                         *oldest_lsn = lsn;
422
423                 next_client = cr->next_client;
424         }
425 }
426
427 static inline bool is_rst_page_hdr_valid(u32 file_off,
428                                          const struct RESTART_HDR *rhdr)
429 {
430         u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
431         u32 page_size = le32_to_cpu(rhdr->page_size);
432         u32 end_usa;
433         u16 ro;
434
435         if (sys_page < SECTOR_SIZE || page_size < SECTOR_SIZE ||
436             sys_page & (sys_page - 1) || page_size & (page_size - 1)) {
437                 return false;
438         }
439
440         /* Check that if the file offset isn't 0, it is the system page size. */
441         if (file_off && file_off != sys_page)
442                 return false;
443
444         /* Check support version 1.1+. */
445         if (le16_to_cpu(rhdr->major_ver) <= 1 && !rhdr->minor_ver)
446                 return false;
447
448         if (le16_to_cpu(rhdr->major_ver) > 2)
449                 return false;
450
451         ro = le16_to_cpu(rhdr->ra_off);
452         if (!IS_ALIGNED(ro, 8) || ro > sys_page)
453                 return false;
454
455         end_usa = ((sys_page >> SECTOR_SHIFT) + 1) * sizeof(short);
456         end_usa += le16_to_cpu(rhdr->rhdr.fix_off);
457
458         if (ro < end_usa)
459                 return false;
460
461         return true;
462 }
463
464 static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr)
465 {
466         const struct RESTART_AREA *ra;
467         u16 cl, fl, ul;
468         u32 off, l_size, file_dat_bits, file_size_round;
469         u16 ro = le16_to_cpu(rhdr->ra_off);
470         u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
471
472         if (ro + offsetof(struct RESTART_AREA, l_size) >
473             SECTOR_SIZE - sizeof(short))
474                 return false;
475
476         ra = Add2Ptr(rhdr, ro);
477         cl = le16_to_cpu(ra->log_clients);
478
479         if (cl > 1)
480                 return false;
481
482         off = le16_to_cpu(ra->client_off);
483
484         if (!IS_ALIGNED(off, 8) || ro + off > SECTOR_SIZE - sizeof(short))
485                 return false;
486
487         off += cl * sizeof(struct CLIENT_REC);
488
489         if (off > sys_page)
490                 return false;
491
492         /*
493          * Check the restart length field and whether the entire
494          * restart area is contained that length.
495          */
496         if (le16_to_cpu(rhdr->ra_off) + le16_to_cpu(ra->ra_len) > sys_page ||
497             off > le16_to_cpu(ra->ra_len)) {
498                 return false;
499         }
500
501         /*
502          * As a final check make sure that the use list and the free list
503          * are either empty or point to a valid client.
504          */
505         fl = le16_to_cpu(ra->client_idx[0]);
506         ul = le16_to_cpu(ra->client_idx[1]);
507         if ((fl != LFS_NO_CLIENT && fl >= cl) ||
508             (ul != LFS_NO_CLIENT && ul >= cl))
509                 return false;
510
511         /* Make sure the sequence number bits match the log file size. */
512         l_size = le64_to_cpu(ra->l_size);
513
514         file_dat_bits = sizeof(u64) * 8 - le32_to_cpu(ra->seq_num_bits);
515         file_size_round = 1u << (file_dat_bits + 3);
516         if (file_size_round != l_size &&
517             (file_size_round < l_size || (file_size_round / 2) > l_size)) {
518                 return false;
519         }
520
521         /* The log page data offset and record header length must be quad-aligned. */
522         if (!IS_ALIGNED(le16_to_cpu(ra->data_off), 8) ||
523             !IS_ALIGNED(le16_to_cpu(ra->rec_hdr_len), 8))
524                 return false;
525
526         return true;
527 }
528
529 static inline bool is_client_area_valid(const struct RESTART_HDR *rhdr,
530                                         bool usa_error)
531 {
532         u16 ro = le16_to_cpu(rhdr->ra_off);
533         const struct RESTART_AREA *ra = Add2Ptr(rhdr, ro);
534         u16 ra_len = le16_to_cpu(ra->ra_len);
535         const struct CLIENT_REC *ca;
536         u32 i;
537
538         if (usa_error && ra_len + ro > SECTOR_SIZE - sizeof(short))
539                 return false;
540
541         /* Find the start of the client array. */
542         ca = Add2Ptr(ra, le16_to_cpu(ra->client_off));
543
544         /*
545          * Start with the free list.
546          * Check that all the clients are valid and that there isn't a cycle.
547          * Do the in-use list on the second pass.
548          */
549         for (i = 0; i < 2; i++) {
550                 u16 client_idx = le16_to_cpu(ra->client_idx[i]);
551                 bool first_client = true;
552                 u16 clients = le16_to_cpu(ra->log_clients);
553
554                 while (client_idx != LFS_NO_CLIENT) {
555                         const struct CLIENT_REC *cr;
556
557                         if (!clients ||
558                             client_idx >= le16_to_cpu(ra->log_clients))
559                                 return false;
560
561                         clients -= 1;
562                         cr = ca + client_idx;
563
564                         client_idx = le16_to_cpu(cr->next_client);
565
566                         if (first_client) {
567                                 first_client = false;
568                                 if (cr->prev_client != LFS_NO_CLIENT_LE)
569                                         return false;
570                         }
571                 }
572         }
573
574         return true;
575 }
576
577 /*
578  * remove_client
579  *
580  * Remove a client record from a client record list an restart area.
581  */
582 static inline void remove_client(struct CLIENT_REC *ca,
583                                  const struct CLIENT_REC *cr, __le16 *head)
584 {
585         if (cr->prev_client == LFS_NO_CLIENT_LE)
586                 *head = cr->next_client;
587         else
588                 ca[le16_to_cpu(cr->prev_client)].next_client = cr->next_client;
589
590         if (cr->next_client != LFS_NO_CLIENT_LE)
591                 ca[le16_to_cpu(cr->next_client)].prev_client = cr->prev_client;
592 }
593
594 /*
595  * add_client - Add a client record to the start of a list.
596  */
597 static inline void add_client(struct CLIENT_REC *ca, u16 index, __le16 *head)
598 {
599         struct CLIENT_REC *cr = ca + index;
600
601         cr->prev_client = LFS_NO_CLIENT_LE;
602         cr->next_client = *head;
603
604         if (*head != LFS_NO_CLIENT_LE)
605                 ca[le16_to_cpu(*head)].prev_client = cpu_to_le16(index);
606
607         *head = cpu_to_le16(index);
608 }
609
610 static inline void *enum_rstbl(struct RESTART_TABLE *t, void *c)
611 {
612         __le32 *e;
613         u32 bprt;
614         u16 rsize = t ? le16_to_cpu(t->size) : 0;
615
616         if (!c) {
617                 if (!t || !t->total)
618                         return NULL;
619                 e = Add2Ptr(t, sizeof(struct RESTART_TABLE));
620         } else {
621                 e = Add2Ptr(c, rsize);
622         }
623
624         /* Loop until we hit the first one allocated, or the end of the list. */
625         for (bprt = bytes_per_rt(t); PtrOffset(t, e) < bprt;
626              e = Add2Ptr(e, rsize)) {
627                 if (*e == RESTART_ENTRY_ALLOCATED_LE)
628                         return e;
629         }
630         return NULL;
631 }
632
633 /*
634  * find_dp - Search for a @vcn in Dirty Page Table.
635  */
636 static inline struct DIR_PAGE_ENTRY *find_dp(struct RESTART_TABLE *dptbl,
637                                              u32 target_attr, u64 vcn)
638 {
639         __le32 ta = cpu_to_le32(target_attr);
640         struct DIR_PAGE_ENTRY *dp = NULL;
641
642         while ((dp = enum_rstbl(dptbl, dp))) {
643                 u64 dp_vcn = le64_to_cpu(dp->vcn);
644
645                 if (dp->target_attr == ta && vcn >= dp_vcn &&
646                     vcn < dp_vcn + le32_to_cpu(dp->lcns_follow)) {
647                         return dp;
648                 }
649         }
650         return NULL;
651 }
652
653 static inline u32 norm_file_page(u32 page_size, u32 *l_size, bool use_default)
654 {
655         if (use_default)
656                 page_size = DefaultLogPageSize;
657
658         /* Round the file size down to a system page boundary. */
659         *l_size &= ~(page_size - 1);
660
661         /* File should contain at least 2 restart pages and MinLogRecordPages pages. */
662         if (*l_size < (MinLogRecordPages + 2) * page_size)
663                 return 0;
664
665         return page_size;
666 }
667
668 static bool check_log_rec(const struct LOG_REC_HDR *lr, u32 bytes, u32 tr,
669                           u32 bytes_per_attr_entry)
670 {
671         u16 t16;
672
673         if (bytes < sizeof(struct LOG_REC_HDR))
674                 return false;
675         if (!tr)
676                 return false;
677
678         if ((tr - sizeof(struct RESTART_TABLE)) %
679             sizeof(struct TRANSACTION_ENTRY))
680                 return false;
681
682         if (le16_to_cpu(lr->redo_off) & 7)
683                 return false;
684
685         if (le16_to_cpu(lr->undo_off) & 7)
686                 return false;
687
688         if (lr->target_attr)
689                 goto check_lcns;
690
691         if (is_target_required(le16_to_cpu(lr->redo_op)))
692                 return false;
693
694         if (is_target_required(le16_to_cpu(lr->undo_op)))
695                 return false;
696
697 check_lcns:
698         if (!lr->lcns_follow)
699                 goto check_length;
700
701         t16 = le16_to_cpu(lr->target_attr);
702         if ((t16 - sizeof(struct RESTART_TABLE)) % bytes_per_attr_entry)
703                 return false;
704
705 check_length:
706         if (bytes < lrh_length(lr))
707                 return false;
708
709         return true;
710 }
711
712 static bool check_rstbl(const struct RESTART_TABLE *rt, size_t bytes)
713 {
714         u32 ts;
715         u32 i, off;
716         u16 rsize = le16_to_cpu(rt->size);
717         u16 ne = le16_to_cpu(rt->used);
718         u32 ff = le32_to_cpu(rt->first_free);
719         u32 lf = le32_to_cpu(rt->last_free);
720
721         ts = rsize * ne + sizeof(struct RESTART_TABLE);
722
723         if (!rsize || rsize > bytes ||
724             rsize + sizeof(struct RESTART_TABLE) > bytes || bytes < ts ||
725             le16_to_cpu(rt->total) > ne || ff > ts || lf > ts ||
726             (ff && ff < sizeof(struct RESTART_TABLE)) ||
727             (lf && lf < sizeof(struct RESTART_TABLE))) {
728                 return false;
729         }
730
731         /*
732          * Verify each entry is either allocated or points
733          * to a valid offset the table.
734          */
735         for (i = 0; i < ne; i++) {
736                 off = le32_to_cpu(*(__le32 *)Add2Ptr(
737                         rt, i * rsize + sizeof(struct RESTART_TABLE)));
738
739                 if (off != RESTART_ENTRY_ALLOCATED && off &&
740                     (off < sizeof(struct RESTART_TABLE) ||
741                      ((off - sizeof(struct RESTART_TABLE)) % rsize))) {
742                         return false;
743                 }
744         }
745
746         /*
747          * Walk through the list headed by the first entry to make
748          * sure none of the entries are currently being used.
749          */
750         for (off = ff; off;) {
751                 if (off == RESTART_ENTRY_ALLOCATED)
752                         return false;
753
754                 off = le32_to_cpu(*(__le32 *)Add2Ptr(rt, off));
755         }
756
757         return true;
758 }
759
760 /*
761  * free_rsttbl_idx - Free a previously allocated index a Restart Table.
762  */
763 static inline void free_rsttbl_idx(struct RESTART_TABLE *rt, u32 off)
764 {
765         __le32 *e;
766         u32 lf = le32_to_cpu(rt->last_free);
767         __le32 off_le = cpu_to_le32(off);
768
769         e = Add2Ptr(rt, off);
770
771         if (off < le32_to_cpu(rt->free_goal)) {
772                 *e = rt->first_free;
773                 rt->first_free = off_le;
774                 if (!lf)
775                         rt->last_free = off_le;
776         } else {
777                 if (lf)
778                         *(__le32 *)Add2Ptr(rt, lf) = off_le;
779                 else
780                         rt->first_free = off_le;
781
782                 rt->last_free = off_le;
783                 *e = 0;
784         }
785
786         le16_sub_cpu(&rt->total, 1);
787 }
788
789 static inline struct RESTART_TABLE *init_rsttbl(u16 esize, u16 used)
790 {
791         __le32 *e, *last_free;
792         u32 off;
793         u32 bytes = esize * used + sizeof(struct RESTART_TABLE);
794         u32 lf = sizeof(struct RESTART_TABLE) + (used - 1) * esize;
795         struct RESTART_TABLE *t = kzalloc(bytes, GFP_NOFS);
796
797         if (!t)
798                 return NULL;
799
800         t->size = cpu_to_le16(esize);
801         t->used = cpu_to_le16(used);
802         t->free_goal = cpu_to_le32(~0u);
803         t->first_free = cpu_to_le32(sizeof(struct RESTART_TABLE));
804         t->last_free = cpu_to_le32(lf);
805
806         e = (__le32 *)(t + 1);
807         last_free = Add2Ptr(t, lf);
808
809         for (off = sizeof(struct RESTART_TABLE) + esize; e < last_free;
810              e = Add2Ptr(e, esize), off += esize) {
811                 *e = cpu_to_le32(off);
812         }
813         return t;
814 }
815
816 static inline struct RESTART_TABLE *extend_rsttbl(struct RESTART_TABLE *tbl,
817                                                   u32 add, u32 free_goal)
818 {
819         u16 esize = le16_to_cpu(tbl->size);
820         __le32 osize = cpu_to_le32(bytes_per_rt(tbl));
821         u32 used = le16_to_cpu(tbl->used);
822         struct RESTART_TABLE *rt;
823
824         rt = init_rsttbl(esize, used + add);
825         if (!rt)
826                 return NULL;
827
828         memcpy(rt + 1, tbl + 1, esize * used);
829
830         rt->free_goal = free_goal == ~0u
831                                 ? cpu_to_le32(~0u)
832                                 : cpu_to_le32(sizeof(struct RESTART_TABLE) +
833                                               free_goal * esize);
834
835         if (tbl->first_free) {
836                 rt->first_free = tbl->first_free;
837                 *(__le32 *)Add2Ptr(rt, le32_to_cpu(tbl->last_free)) = osize;
838         } else {
839                 rt->first_free = osize;
840         }
841
842         rt->total = tbl->total;
843
844         kfree(tbl);
845         return rt;
846 }
847
848 /*
849  * alloc_rsttbl_idx
850  *
851  * Allocate an index from within a previously initialized Restart Table.
852  */
853 static inline void *alloc_rsttbl_idx(struct RESTART_TABLE **tbl)
854 {
855         u32 off;
856         __le32 *e;
857         struct RESTART_TABLE *t = *tbl;
858
859         if (!t->first_free) {
860                 *tbl = t = extend_rsttbl(t, 16, ~0u);
861                 if (!t)
862                         return NULL;
863         }
864
865         off = le32_to_cpu(t->first_free);
866
867         /* Dequeue this entry and zero it. */
868         e = Add2Ptr(t, off);
869
870         t->first_free = *e;
871
872         memset(e, 0, le16_to_cpu(t->size));
873
874         *e = RESTART_ENTRY_ALLOCATED_LE;
875
876         /* If list is going empty, then we fix the last_free as well. */
877         if (!t->first_free)
878                 t->last_free = 0;
879
880         le16_add_cpu(&t->total, 1);
881
882         return Add2Ptr(t, off);
883 }
884
885 /*
886  * alloc_rsttbl_from_idx
887  *
888  * Allocate a specific index from within a previously initialized Restart Table.
889  */
890 static inline void *alloc_rsttbl_from_idx(struct RESTART_TABLE **tbl, u32 vbo)
891 {
892         u32 off;
893         __le32 *e;
894         struct RESTART_TABLE *rt = *tbl;
895         u32 bytes = bytes_per_rt(rt);
896         u16 esize = le16_to_cpu(rt->size);
897
898         /* If the entry is not the table, we will have to extend the table. */
899         if (vbo >= bytes) {
900                 /*
901                  * Extend the size by computing the number of entries between
902                  * the existing size and the desired index and adding 1 to that.
903                  */
904                 u32 bytes2idx = vbo - bytes;
905
906                 /*
907                  * There should always be an integral number of entries
908                  * being added. Now extend the table.
909                  */
910                 *tbl = rt = extend_rsttbl(rt, bytes2idx / esize + 1, bytes);
911                 if (!rt)
912                         return NULL;
913         }
914
915         /* See if the entry is already allocated, and just return if it is. */
916         e = Add2Ptr(rt, vbo);
917
918         if (*e == RESTART_ENTRY_ALLOCATED_LE)
919                 return e;
920
921         /*
922          * Walk through the table, looking for the entry we're
923          * interested and the previous entry.
924          */
925         off = le32_to_cpu(rt->first_free);
926         e = Add2Ptr(rt, off);
927
928         if (off == vbo) {
929                 /* this is a match */
930                 rt->first_free = *e;
931                 goto skip_looking;
932         }
933
934         /*
935          * Need to walk through the list looking for the predecessor
936          * of our entry.
937          */
938         for (;;) {
939                 /* Remember the entry just found */
940                 u32 last_off = off;
941                 __le32 *last_e = e;
942
943                 /* Should never run of entries. */
944
945                 /* Lookup up the next entry the list. */
946                 off = le32_to_cpu(*last_e);
947                 e = Add2Ptr(rt, off);
948
949                 /* If this is our match we are done. */
950                 if (off == vbo) {
951                         *last_e = *e;
952
953                         /*
954                          * If this was the last entry, we update that
955                          * table as well.
956                          */
957                         if (le32_to_cpu(rt->last_free) == off)
958                                 rt->last_free = cpu_to_le32(last_off);
959                         break;
960                 }
961         }
962
963 skip_looking:
964         /* If the list is now empty, we fix the last_free as well. */
965         if (!rt->first_free)
966                 rt->last_free = 0;
967
968         /* Zero this entry. */
969         memset(e, 0, esize);
970         *e = RESTART_ENTRY_ALLOCATED_LE;
971
972         le16_add_cpu(&rt->total, 1);
973
974         return e;
975 }
976
977 #define RESTART_SINGLE_PAGE_IO cpu_to_le16(0x0001)
978
979 #define NTFSLOG_WRAPPED 0x00000001
980 #define NTFSLOG_MULTIPLE_PAGE_IO 0x00000002
981 #define NTFSLOG_NO_LAST_LSN 0x00000004
982 #define NTFSLOG_REUSE_TAIL 0x00000010
983 #define NTFSLOG_NO_OLDEST_LSN 0x00000020
984
985 /* Helper struct to work with NTFS $LogFile. */
986 struct ntfs_log {
987         struct ntfs_inode *ni;
988
989         u32 l_size;
990         u32 sys_page_size;
991         u32 sys_page_mask;
992         u32 page_size;
993         u32 page_mask; // page_size - 1
994         u8 page_bits;
995         struct RECORD_PAGE_HDR *one_page_buf;
996
997         struct RESTART_TABLE *open_attr_tbl;
998         u32 transaction_id;
999         u32 clst_per_page;
1000
1001         u32 first_page;
1002         u32 next_page;
1003         u32 ra_off;
1004         u32 data_off;
1005         u32 restart_size;
1006         u32 data_size;
1007         u16 record_header_len;
1008         u64 seq_num;
1009         u32 seq_num_bits;
1010         u32 file_data_bits;
1011         u32 seq_num_mask; /* (1 << file_data_bits) - 1 */
1012
1013         struct RESTART_AREA *ra; /* In-memory image of the next restart area. */
1014         u32 ra_size; /* The usable size of the restart area. */
1015
1016         /*
1017          * If true, then the in-memory restart area is to be written
1018          * to the first position on the disk.
1019          */
1020         bool init_ra;
1021         bool set_dirty; /* True if we need to set dirty flag. */
1022
1023         u64 oldest_lsn;
1024
1025         u32 oldest_lsn_off;
1026         u64 last_lsn;
1027
1028         u32 total_avail;
1029         u32 total_avail_pages;
1030         u32 total_undo_commit;
1031         u32 max_current_avail;
1032         u32 current_avail;
1033         u32 reserved;
1034
1035         short major_ver;
1036         short minor_ver;
1037
1038         u32 l_flags; /* See NTFSLOG_XXX */
1039         u32 current_openlog_count; /* On-disk value for open_log_count. */
1040
1041         struct CLIENT_ID client_id;
1042         u32 client_undo_commit;
1043 };
1044
1045 static inline u32 lsn_to_vbo(struct ntfs_log *log, const u64 lsn)
1046 {
1047         u32 vbo = (lsn << log->seq_num_bits) >> (log->seq_num_bits - 3);
1048
1049         return vbo;
1050 }
1051
1052 /* Compute the offset in the log file of the next log page. */
1053 static inline u32 next_page_off(struct ntfs_log *log, u32 off)
1054 {
1055         off = (off & ~log->sys_page_mask) + log->page_size;
1056         return off >= log->l_size ? log->first_page : off;
1057 }
1058
1059 static inline u32 lsn_to_page_off(struct ntfs_log *log, u64 lsn)
1060 {
1061         return (((u32)lsn) << 3) & log->page_mask;
1062 }
1063
1064 static inline u64 vbo_to_lsn(struct ntfs_log *log, u32 off, u64 Seq)
1065 {
1066         return (off >> 3) + (Seq << log->file_data_bits);
1067 }
1068
1069 static inline bool is_lsn_in_file(struct ntfs_log *log, u64 lsn)
1070 {
1071         return lsn >= log->oldest_lsn &&
1072                lsn <= le64_to_cpu(log->ra->current_lsn);
1073 }
1074
1075 static inline u32 hdr_file_off(struct ntfs_log *log,
1076                                struct RECORD_PAGE_HDR *hdr)
1077 {
1078         if (log->major_ver < 2)
1079                 return le64_to_cpu(hdr->rhdr.lsn);
1080
1081         return le32_to_cpu(hdr->file_off);
1082 }
1083
1084 static inline u64 base_lsn(struct ntfs_log *log,
1085                            const struct RECORD_PAGE_HDR *hdr, u64 lsn)
1086 {
1087         u64 h_lsn = le64_to_cpu(hdr->rhdr.lsn);
1088         u64 ret = (((h_lsn >> log->file_data_bits) +
1089                     (lsn < (lsn_to_vbo(log, h_lsn) & ~log->page_mask) ? 1 : 0))
1090                    << log->file_data_bits) +
1091                   ((((is_log_record_end(hdr) &&
1092                       h_lsn <= le64_to_cpu(hdr->record_hdr.last_end_lsn))
1093                              ? le16_to_cpu(hdr->record_hdr.next_record_off)
1094                              : log->page_size) +
1095                     lsn) >>
1096                    3);
1097
1098         return ret;
1099 }
1100
1101 static inline bool verify_client_lsn(struct ntfs_log *log,
1102                                      const struct CLIENT_REC *client, u64 lsn)
1103 {
1104         return lsn >= le64_to_cpu(client->oldest_lsn) &&
1105                lsn <= le64_to_cpu(log->ra->current_lsn) && lsn;
1106 }
1107
1108 struct restart_info {
1109         u64 last_lsn;
1110         struct RESTART_HDR *r_page;
1111         u32 vbo;
1112         bool chkdsk_was_run;
1113         bool valid_page;
1114         bool initialized;
1115         bool restart;
1116 };
1117
1118 static int read_log_page(struct ntfs_log *log, u32 vbo,
1119                          struct RECORD_PAGE_HDR **buffer, bool *usa_error)
1120 {
1121         int err = 0;
1122         u32 page_idx = vbo >> log->page_bits;
1123         u32 page_off = vbo & log->page_mask;
1124         u32 bytes = log->page_size - page_off;
1125         void *to_free = NULL;
1126         u32 page_vbo = page_idx << log->page_bits;
1127         struct RECORD_PAGE_HDR *page_buf;
1128         struct ntfs_inode *ni = log->ni;
1129         bool bBAAD;
1130
1131         if (vbo >= log->l_size)
1132                 return -EINVAL;
1133
1134         if (!*buffer) {
1135                 to_free = kmalloc(log->page_size, GFP_NOFS);
1136                 if (!to_free)
1137                         return -ENOMEM;
1138                 *buffer = to_free;
1139         }
1140
1141         page_buf = page_off ? log->one_page_buf : *buffer;
1142
1143         err = ntfs_read_run_nb(ni->mi.sbi, &ni->file.run, page_vbo, page_buf,
1144                                log->page_size, NULL);
1145         if (err)
1146                 goto out;
1147
1148         if (page_buf->rhdr.sign != NTFS_FFFF_SIGNATURE)
1149                 ntfs_fix_post_read(&page_buf->rhdr, PAGE_SIZE, false);
1150
1151         if (page_buf != *buffer)
1152                 memcpy(*buffer, Add2Ptr(page_buf, page_off), bytes);
1153
1154         bBAAD = page_buf->rhdr.sign == NTFS_BAAD_SIGNATURE;
1155
1156         if (usa_error)
1157                 *usa_error = bBAAD;
1158         /* Check that the update sequence array for this page is valid */
1159         /* If we don't allow errors, raise an error status */
1160         else if (bBAAD)
1161                 err = -EINVAL;
1162
1163 out:
1164         if (err && to_free) {
1165                 kfree(to_free);
1166                 *buffer = NULL;
1167         }
1168
1169         return err;
1170 }
1171
1172 /*
1173  * log_read_rst
1174  *
1175  * It walks through 512 blocks of the file looking for a valid
1176  * restart page header. It will stop the first time we find a
1177  * valid page header.
1178  */
1179 static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
1180                         struct restart_info *info)
1181 {
1182         u32 skip, vbo;
1183         struct RESTART_HDR *r_page = NULL;
1184
1185         /* Determine which restart area we are looking for. */
1186         if (first) {
1187                 vbo = 0;
1188                 skip = 512;
1189         } else {
1190                 vbo = 512;
1191                 skip = 0;
1192         }
1193
1194         /* Loop continuously until we succeed. */
1195         for (; vbo < l_size; vbo = 2 * vbo + skip, skip = 0) {
1196                 bool usa_error;
1197                 bool brst, bchk;
1198                 struct RESTART_AREA *ra;
1199
1200                 /* Read a page header at the current offset. */
1201                 if (read_log_page(log, vbo, (struct RECORD_PAGE_HDR **)&r_page,
1202                                   &usa_error)) {
1203                         /* Ignore any errors. */
1204                         continue;
1205                 }
1206
1207                 /* Exit if the signature is a log record page. */
1208                 if (r_page->rhdr.sign == NTFS_RCRD_SIGNATURE) {
1209                         info->initialized = true;
1210                         break;
1211                 }
1212
1213                 brst = r_page->rhdr.sign == NTFS_RSTR_SIGNATURE;
1214                 bchk = r_page->rhdr.sign == NTFS_CHKD_SIGNATURE;
1215
1216                 if (!bchk && !brst) {
1217                         if (r_page->rhdr.sign != NTFS_FFFF_SIGNATURE) {
1218                                 /*
1219                                  * Remember if the signature does not
1220                                  * indicate uninitialized file.
1221                                  */
1222                                 info->initialized = true;
1223                         }
1224                         continue;
1225                 }
1226
1227                 ra = NULL;
1228                 info->valid_page = false;
1229                 info->initialized = true;
1230                 info->vbo = vbo;
1231
1232                 /* Let's check the restart area if this is a valid page. */
1233                 if (!is_rst_page_hdr_valid(vbo, r_page))
1234                         goto check_result;
1235                 ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off));
1236
1237                 if (!is_rst_area_valid(r_page))
1238                         goto check_result;
1239
1240                 /*
1241                  * We have a valid restart page header and restart area.
1242                  * If chkdsk was run or we have no clients then we have
1243                  * no more checking to do.
1244                  */
1245                 if (bchk || ra->client_idx[1] == LFS_NO_CLIENT_LE) {
1246                         info->valid_page = true;
1247                         goto check_result;
1248                 }
1249
1250                 if (is_client_area_valid(r_page, usa_error)) {
1251                         info->valid_page = true;
1252                         ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off));
1253                 }
1254
1255 check_result:
1256                 /*
1257                  * If chkdsk was run then update the caller's
1258                  * values and return.
1259                  */
1260                 if (r_page->rhdr.sign == NTFS_CHKD_SIGNATURE) {
1261                         info->chkdsk_was_run = true;
1262                         info->last_lsn = le64_to_cpu(r_page->rhdr.lsn);
1263                         info->restart = true;
1264                         info->r_page = r_page;
1265                         return 0;
1266                 }
1267
1268                 /*
1269                  * If we have a valid page then copy the values
1270                  * we need from it.
1271                  */
1272                 if (info->valid_page) {
1273                         info->last_lsn = le64_to_cpu(ra->current_lsn);
1274                         info->restart = true;
1275                         info->r_page = r_page;
1276                         return 0;
1277                 }
1278         }
1279
1280         kfree(r_page);
1281
1282         return 0;
1283 }
1284
1285 /*
1286  * Ilog_init_pg_hdr - Init @log from restart page header.
1287  */
1288 static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size,
1289                             u32 page_size, u16 major_ver, u16 minor_ver)
1290 {
1291         log->sys_page_size = sys_page_size;
1292         log->sys_page_mask = sys_page_size - 1;
1293         log->page_size = page_size;
1294         log->page_mask = page_size - 1;
1295         log->page_bits = blksize_bits(page_size);
1296
1297         log->clst_per_page = log->page_size >> log->ni->mi.sbi->cluster_bits;
1298         if (!log->clst_per_page)
1299                 log->clst_per_page = 1;
1300
1301         log->first_page = major_ver >= 2
1302                                   ? 0x22 * page_size
1303                                   : ((sys_page_size << 1) + (page_size << 1));
1304         log->major_ver = major_ver;
1305         log->minor_ver = minor_ver;
1306 }
1307
1308 /*
1309  * log_create - Init @log in cases when we don't have a restart area to use.
1310  */
1311 static void log_create(struct ntfs_log *log, u32 l_size, const u64 last_lsn,
1312                        u32 open_log_count, bool wrapped, bool use_multi_page)
1313 {
1314         log->l_size = l_size;
1315         /* All file offsets must be quadword aligned. */
1316         log->file_data_bits = blksize_bits(l_size) - 3;
1317         log->seq_num_mask = (8 << log->file_data_bits) - 1;
1318         log->seq_num_bits = sizeof(u64) * 8 - log->file_data_bits;
1319         log->seq_num = (last_lsn >> log->file_data_bits) + 2;
1320         log->next_page = log->first_page;
1321         log->oldest_lsn = log->seq_num << log->file_data_bits;
1322         log->oldest_lsn_off = 0;
1323         log->last_lsn = log->oldest_lsn;
1324
1325         log->l_flags |= NTFSLOG_NO_LAST_LSN | NTFSLOG_NO_OLDEST_LSN;
1326
1327         /* Set the correct flags for the I/O and indicate if we have wrapped. */
1328         if (wrapped)
1329                 log->l_flags |= NTFSLOG_WRAPPED;
1330
1331         if (use_multi_page)
1332                 log->l_flags |= NTFSLOG_MULTIPLE_PAGE_IO;
1333
1334         /* Compute the log page values. */
1335         log->data_off = ALIGN(
1336                 offsetof(struct RECORD_PAGE_HDR, fixups) +
1337                         sizeof(short) * ((log->page_size >> SECTOR_SHIFT) + 1),
1338                 8);
1339         log->data_size = log->page_size - log->data_off;
1340         log->record_header_len = sizeof(struct LFS_RECORD_HDR);
1341
1342         /* Remember the different page sizes for reservation. */
1343         log->reserved = log->data_size - log->record_header_len;
1344
1345         /* Compute the restart page values. */
1346         log->ra_off = ALIGN(
1347                 offsetof(struct RESTART_HDR, fixups) +
1348                         sizeof(short) *
1349                                 ((log->sys_page_size >> SECTOR_SHIFT) + 1),
1350                 8);
1351         log->restart_size = log->sys_page_size - log->ra_off;
1352         log->ra_size = struct_size(log->ra, clients, 1);
1353         log->current_openlog_count = open_log_count;
1354
1355         /*
1356          * The total available log file space is the number of
1357          * log file pages times the space available on each page.
1358          */
1359         log->total_avail_pages = log->l_size - log->first_page;
1360         log->total_avail = log->total_avail_pages >> log->page_bits;
1361
1362         /*
1363          * We assume that we can't use the end of the page less than
1364          * the file record size.
1365          * Then we won't need to reserve more than the caller asks for.
1366          */
1367         log->max_current_avail = log->total_avail * log->reserved;
1368         log->total_avail = log->total_avail * log->data_size;
1369         log->current_avail = log->max_current_avail;
1370 }
1371
1372 /*
1373  * log_create_ra - Fill a restart area from the values stored in @log.
1374  */
1375 static struct RESTART_AREA *log_create_ra(struct ntfs_log *log)
1376 {
1377         struct CLIENT_REC *cr;
1378         struct RESTART_AREA *ra = kzalloc(log->restart_size, GFP_NOFS);
1379
1380         if (!ra)
1381                 return NULL;
1382
1383         ra->current_lsn = cpu_to_le64(log->last_lsn);
1384         ra->log_clients = cpu_to_le16(1);
1385         ra->client_idx[1] = LFS_NO_CLIENT_LE;
1386         if (log->l_flags & NTFSLOG_MULTIPLE_PAGE_IO)
1387                 ra->flags = RESTART_SINGLE_PAGE_IO;
1388         ra->seq_num_bits = cpu_to_le32(log->seq_num_bits);
1389         ra->ra_len = cpu_to_le16(log->ra_size);
1390         ra->client_off = cpu_to_le16(offsetof(struct RESTART_AREA, clients));
1391         ra->l_size = cpu_to_le64(log->l_size);
1392         ra->rec_hdr_len = cpu_to_le16(log->record_header_len);
1393         ra->data_off = cpu_to_le16(log->data_off);
1394         ra->open_log_count = cpu_to_le32(log->current_openlog_count + 1);
1395
1396         cr = ra->clients;
1397
1398         cr->prev_client = LFS_NO_CLIENT_LE;
1399         cr->next_client = LFS_NO_CLIENT_LE;
1400
1401         return ra;
1402 }
1403
1404 static u32 final_log_off(struct ntfs_log *log, u64 lsn, u32 data_len)
1405 {
1406         u32 base_vbo = lsn << 3;
1407         u32 final_log_off = (base_vbo & log->seq_num_mask) & ~log->page_mask;
1408         u32 page_off = base_vbo & log->page_mask;
1409         u32 tail = log->page_size - page_off;
1410
1411         page_off -= 1;
1412
1413         /* Add the length of the header. */
1414         data_len += log->record_header_len;
1415
1416         /*
1417          * If this lsn is contained this log page we are done.
1418          * Otherwise we need to walk through several log pages.
1419          */
1420         if (data_len > tail) {
1421                 data_len -= tail;
1422                 tail = log->data_size;
1423                 page_off = log->data_off - 1;
1424
1425                 for (;;) {
1426                         final_log_off = next_page_off(log, final_log_off);
1427
1428                         /*
1429                          * We are done if the remaining bytes
1430                          * fit on this page.
1431                          */
1432                         if (data_len <= tail)
1433                                 break;
1434                         data_len -= tail;
1435                 }
1436         }
1437
1438         /*
1439          * We add the remaining bytes to our starting position on this page
1440          * and then add that value to the file offset of this log page.
1441          */
1442         return final_log_off + data_len + page_off;
1443 }
1444
1445 static int next_log_lsn(struct ntfs_log *log, const struct LFS_RECORD_HDR *rh,
1446                         u64 *lsn)
1447 {
1448         int err;
1449         u64 this_lsn = le64_to_cpu(rh->this_lsn);
1450         u32 vbo = lsn_to_vbo(log, this_lsn);
1451         u32 end =
1452                 final_log_off(log, this_lsn, le32_to_cpu(rh->client_data_len));
1453         u32 hdr_off = end & ~log->sys_page_mask;
1454         u64 seq = this_lsn >> log->file_data_bits;
1455         struct RECORD_PAGE_HDR *page = NULL;
1456
1457         /* Remember if we wrapped. */
1458         if (end <= vbo)
1459                 seq += 1;
1460
1461         /* Log page header for this page. */
1462         err = read_log_page(log, hdr_off, &page, NULL);
1463         if (err)
1464                 return err;
1465
1466         /*
1467          * If the lsn we were given was not the last lsn on this page,
1468          * then the starting offset for the next lsn is on a quad word
1469          * boundary following the last file offset for the current lsn.
1470          * Otherwise the file offset is the start of the data on the next page.
1471          */
1472         if (this_lsn == le64_to_cpu(page->rhdr.lsn)) {
1473                 /* If we wrapped, we need to increment the sequence number. */
1474                 hdr_off = next_page_off(log, hdr_off);
1475                 if (hdr_off == log->first_page)
1476                         seq += 1;
1477
1478                 vbo = hdr_off + log->data_off;
1479         } else {
1480                 vbo = ALIGN(end, 8);
1481         }
1482
1483         /* Compute the lsn based on the file offset and the sequence count. */
1484         *lsn = vbo_to_lsn(log, vbo, seq);
1485
1486         /*
1487          * If this lsn is within the legal range for the file, we return true.
1488          * Otherwise false indicates that there are no more lsn's.
1489          */
1490         if (!is_lsn_in_file(log, *lsn))
1491                 *lsn = 0;
1492
1493         kfree(page);
1494
1495         return 0;
1496 }
1497
1498 /*
1499  * current_log_avail - Calculate the number of bytes available for log records.
1500  */
1501 static u32 current_log_avail(struct ntfs_log *log)
1502 {
1503         u32 oldest_off, next_free_off, free_bytes;
1504
1505         if (log->l_flags & NTFSLOG_NO_LAST_LSN) {
1506                 /* The entire file is available. */
1507                 return log->max_current_avail;
1508         }
1509
1510         /*
1511          * If there is a last lsn the restart area then we know that we will
1512          * have to compute the free range.
1513          * If there is no oldest lsn then start at the first page of the file.
1514          */
1515         oldest_off = (log->l_flags & NTFSLOG_NO_OLDEST_LSN)
1516                              ? log->first_page
1517                              : (log->oldest_lsn_off & ~log->sys_page_mask);
1518
1519         /*
1520          * We will use the next log page offset to compute the next free page.
1521          * If we are going to reuse this page go to the next page.
1522          * If we are at the first page then use the end of the file.
1523          */
1524         next_free_off = (log->l_flags & NTFSLOG_REUSE_TAIL)
1525                                 ? log->next_page + log->page_size
1526                                 : log->next_page == log->first_page
1527                                           ? log->l_size
1528                                           : log->next_page;
1529
1530         /* If the two offsets are the same then there is no available space. */
1531         if (oldest_off == next_free_off)
1532                 return 0;
1533         /*
1534          * If the free offset follows the oldest offset then subtract
1535          * this range from the total available pages.
1536          */
1537         free_bytes =
1538                 oldest_off < next_free_off
1539                         ? log->total_avail_pages - (next_free_off - oldest_off)
1540                         : oldest_off - next_free_off;
1541
1542         free_bytes >>= log->page_bits;
1543         return free_bytes * log->reserved;
1544 }
1545
1546 static bool check_subseq_log_page(struct ntfs_log *log,
1547                                   const struct RECORD_PAGE_HDR *rp, u32 vbo,
1548                                   u64 seq)
1549 {
1550         u64 lsn_seq;
1551         const struct NTFS_RECORD_HEADER *rhdr = &rp->rhdr;
1552         u64 lsn = le64_to_cpu(rhdr->lsn);
1553
1554         if (rhdr->sign == NTFS_FFFF_SIGNATURE || !rhdr->sign)
1555                 return false;
1556
1557         /*
1558          * If the last lsn on the page occurs was written after the page
1559          * that caused the original error then we have a fatal error.
1560          */
1561         lsn_seq = lsn >> log->file_data_bits;
1562
1563         /*
1564          * If the sequence number for the lsn the page is equal or greater
1565          * than lsn we expect, then this is a subsequent write.
1566          */
1567         return lsn_seq >= seq ||
1568                (lsn_seq == seq - 1 && log->first_page == vbo &&
1569                 vbo != (lsn_to_vbo(log, lsn) & ~log->page_mask));
1570 }
1571
1572 /*
1573  * last_log_lsn
1574  *
1575  * Walks through the log pages for a file, searching for the
1576  * last log page written to the file.
1577  */
1578 static int last_log_lsn(struct ntfs_log *log)
1579 {
1580         int err;
1581         bool usa_error = false;
1582         bool replace_page = false;
1583         bool reuse_page = log->l_flags & NTFSLOG_REUSE_TAIL;
1584         bool wrapped_file, wrapped;
1585
1586         u32 page_cnt = 1, page_pos = 1;
1587         u32 page_off = 0, page_off1 = 0, saved_off = 0;
1588         u32 final_off, second_off, final_off_prev = 0, second_off_prev = 0;
1589         u32 first_file_off = 0, second_file_off = 0;
1590         u32 part_io_count = 0;
1591         u32 tails = 0;
1592         u32 this_off, curpage_off, nextpage_off, remain_pages;
1593
1594         u64 expected_seq, seq_base = 0, lsn_base = 0;
1595         u64 best_lsn, best_lsn1, best_lsn2;
1596         u64 lsn_cur, lsn1, lsn2;
1597         u64 last_ok_lsn = reuse_page ? log->last_lsn : 0;
1598
1599         u16 cur_pos, best_page_pos;
1600
1601         struct RECORD_PAGE_HDR *page = NULL;
1602         struct RECORD_PAGE_HDR *tst_page = NULL;
1603         struct RECORD_PAGE_HDR *first_tail = NULL;
1604         struct RECORD_PAGE_HDR *second_tail = NULL;
1605         struct RECORD_PAGE_HDR *tail_page = NULL;
1606         struct RECORD_PAGE_HDR *second_tail_prev = NULL;
1607         struct RECORD_PAGE_HDR *first_tail_prev = NULL;
1608         struct RECORD_PAGE_HDR *page_bufs = NULL;
1609         struct RECORD_PAGE_HDR *best_page;
1610
1611         if (log->major_ver >= 2) {
1612                 final_off = 0x02 * log->page_size;
1613                 second_off = 0x12 * log->page_size;
1614
1615                 // 0x10 == 0x12 - 0x2
1616                 page_bufs = kmalloc(log->page_size * 0x10, GFP_NOFS);
1617                 if (!page_bufs)
1618                         return -ENOMEM;
1619         } else {
1620                 second_off = log->first_page - log->page_size;
1621                 final_off = second_off - log->page_size;
1622         }
1623
1624 next_tail:
1625         /* Read second tail page (at pos 3/0x12000). */
1626         if (read_log_page(log, second_off, &second_tail, &usa_error) ||
1627             usa_error || second_tail->rhdr.sign != NTFS_RCRD_SIGNATURE) {
1628                 kfree(second_tail);
1629                 second_tail = NULL;
1630                 second_file_off = 0;
1631                 lsn2 = 0;
1632         } else {
1633                 second_file_off = hdr_file_off(log, second_tail);
1634                 lsn2 = le64_to_cpu(second_tail->record_hdr.last_end_lsn);
1635         }
1636
1637         /* Read first tail page (at pos 2/0x2000). */
1638         if (read_log_page(log, final_off, &first_tail, &usa_error) ||
1639             usa_error || first_tail->rhdr.sign != NTFS_RCRD_SIGNATURE) {
1640                 kfree(first_tail);
1641                 first_tail = NULL;
1642                 first_file_off = 0;
1643                 lsn1 = 0;
1644         } else {
1645                 first_file_off = hdr_file_off(log, first_tail);
1646                 lsn1 = le64_to_cpu(first_tail->record_hdr.last_end_lsn);
1647         }
1648
1649         if (log->major_ver < 2) {
1650                 int best_page;
1651
1652                 first_tail_prev = first_tail;
1653                 final_off_prev = first_file_off;
1654                 second_tail_prev = second_tail;
1655                 second_off_prev = second_file_off;
1656                 tails = 1;
1657
1658                 if (!first_tail && !second_tail)
1659                         goto tail_read;
1660
1661                 if (first_tail && second_tail)
1662                         best_page = lsn1 < lsn2 ? 1 : 0;
1663                 else if (first_tail)
1664                         best_page = 0;
1665                 else
1666                         best_page = 1;
1667
1668                 page_off = best_page ? second_file_off : first_file_off;
1669                 seq_base = (best_page ? lsn2 : lsn1) >> log->file_data_bits;
1670                 goto tail_read;
1671         }
1672
1673         best_lsn1 = first_tail ? base_lsn(log, first_tail, first_file_off) : 0;
1674         best_lsn2 =
1675                 second_tail ? base_lsn(log, second_tail, second_file_off) : 0;
1676
1677         if (first_tail && second_tail) {
1678                 if (best_lsn1 > best_lsn2) {
1679                         best_lsn = best_lsn1;
1680                         best_page = first_tail;
1681                         this_off = first_file_off;
1682                 } else {
1683                         best_lsn = best_lsn2;
1684                         best_page = second_tail;
1685                         this_off = second_file_off;
1686                 }
1687         } else if (first_tail) {
1688                 best_lsn = best_lsn1;
1689                 best_page = first_tail;
1690                 this_off = first_file_off;
1691         } else if (second_tail) {
1692                 best_lsn = best_lsn2;
1693                 best_page = second_tail;
1694                 this_off = second_file_off;
1695         } else {
1696                 goto tail_read;
1697         }
1698
1699         best_page_pos = le16_to_cpu(best_page->page_pos);
1700
1701         if (!tails) {
1702                 if (best_page_pos == page_pos) {
1703                         seq_base = best_lsn >> log->file_data_bits;
1704                         saved_off = page_off = le32_to_cpu(best_page->file_off);
1705                         lsn_base = best_lsn;
1706
1707                         memmove(page_bufs, best_page, log->page_size);
1708
1709                         page_cnt = le16_to_cpu(best_page->page_count);
1710                         if (page_cnt > 1)
1711                                 page_pos += 1;
1712
1713                         tails = 1;
1714                 }
1715         } else if (seq_base == (best_lsn >> log->file_data_bits) &&
1716                    saved_off + log->page_size == this_off &&
1717                    lsn_base < best_lsn &&
1718                    (page_pos != page_cnt || best_page_pos == page_pos ||
1719                     best_page_pos == 1) &&
1720                    (page_pos >= page_cnt || best_page_pos == page_pos)) {
1721                 u16 bppc = le16_to_cpu(best_page->page_count);
1722
1723                 saved_off += log->page_size;
1724                 lsn_base = best_lsn;
1725
1726                 memmove(Add2Ptr(page_bufs, tails * log->page_size), best_page,
1727                         log->page_size);
1728
1729                 tails += 1;
1730
1731                 if (best_page_pos != bppc) {
1732                         page_cnt = bppc;
1733                         page_pos = best_page_pos;
1734
1735                         if (page_cnt > 1)
1736                                 page_pos += 1;
1737                 } else {
1738                         page_pos = page_cnt = 1;
1739                 }
1740         } else {
1741                 kfree(first_tail);
1742                 kfree(second_tail);
1743                 goto tail_read;
1744         }
1745
1746         kfree(first_tail_prev);
1747         first_tail_prev = first_tail;
1748         final_off_prev = first_file_off;
1749         first_tail = NULL;
1750
1751         kfree(second_tail_prev);
1752         second_tail_prev = second_tail;
1753         second_off_prev = second_file_off;
1754         second_tail = NULL;
1755
1756         final_off += log->page_size;
1757         second_off += log->page_size;
1758
1759         if (tails < 0x10)
1760                 goto next_tail;
1761 tail_read:
1762         first_tail = first_tail_prev;
1763         final_off = final_off_prev;
1764
1765         second_tail = second_tail_prev;
1766         second_off = second_off_prev;
1767
1768         page_cnt = page_pos = 1;
1769
1770         curpage_off = seq_base == log->seq_num ? min(log->next_page, page_off)
1771                                                : log->next_page;
1772
1773         wrapped_file =
1774                 curpage_off == log->first_page &&
1775                 !(log->l_flags & (NTFSLOG_NO_LAST_LSN | NTFSLOG_REUSE_TAIL));
1776
1777         expected_seq = wrapped_file ? (log->seq_num + 1) : log->seq_num;
1778
1779         nextpage_off = curpage_off;
1780
1781 next_page:
1782         tail_page = NULL;
1783         /* Read the next log page. */
1784         err = read_log_page(log, curpage_off, &page, &usa_error);
1785
1786         /* Compute the next log page offset the file. */
1787         nextpage_off = next_page_off(log, curpage_off);
1788         wrapped = nextpage_off == log->first_page;
1789
1790         if (tails > 1) {
1791                 struct RECORD_PAGE_HDR *cur_page =
1792                         Add2Ptr(page_bufs, curpage_off - page_off);
1793
1794                 if (curpage_off == saved_off) {
1795                         tail_page = cur_page;
1796                         goto use_tail_page;
1797                 }
1798
1799                 if (page_off > curpage_off || curpage_off >= saved_off)
1800                         goto use_tail_page;
1801
1802                 if (page_off1)
1803                         goto use_cur_page;
1804
1805                 if (!err && !usa_error &&
1806                     page->rhdr.sign == NTFS_RCRD_SIGNATURE &&
1807                     cur_page->rhdr.lsn == page->rhdr.lsn &&
1808                     cur_page->record_hdr.next_record_off ==
1809                             page->record_hdr.next_record_off &&
1810                     ((page_pos == page_cnt &&
1811                       le16_to_cpu(page->page_pos) == 1) ||
1812                      (page_pos != page_cnt &&
1813                       le16_to_cpu(page->page_pos) == page_pos + 1 &&
1814                       le16_to_cpu(page->page_count) == page_cnt))) {
1815                         cur_page = NULL;
1816                         goto use_tail_page;
1817                 }
1818
1819                 page_off1 = page_off;
1820
1821 use_cur_page:
1822
1823                 lsn_cur = le64_to_cpu(cur_page->rhdr.lsn);
1824
1825                 if (last_ok_lsn !=
1826                             le64_to_cpu(cur_page->record_hdr.last_end_lsn) &&
1827                     ((lsn_cur >> log->file_data_bits) +
1828                      ((curpage_off <
1829                        (lsn_to_vbo(log, lsn_cur) & ~log->page_mask))
1830                               ? 1
1831                               : 0)) != expected_seq) {
1832                         goto check_tail;
1833                 }
1834
1835                 if (!is_log_record_end(cur_page)) {
1836                         tail_page = NULL;
1837                         last_ok_lsn = lsn_cur;
1838                         goto next_page_1;
1839                 }
1840
1841                 log->seq_num = expected_seq;
1842                 log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
1843                 log->last_lsn = le64_to_cpu(cur_page->record_hdr.last_end_lsn);
1844                 log->ra->current_lsn = cur_page->record_hdr.last_end_lsn;
1845
1846                 if (log->record_header_len <=
1847                     log->page_size -
1848                             le16_to_cpu(cur_page->record_hdr.next_record_off)) {
1849                         log->l_flags |= NTFSLOG_REUSE_TAIL;
1850                         log->next_page = curpage_off;
1851                 } else {
1852                         log->l_flags &= ~NTFSLOG_REUSE_TAIL;
1853                         log->next_page = nextpage_off;
1854                 }
1855
1856                 if (wrapped_file)
1857                         log->l_flags |= NTFSLOG_WRAPPED;
1858
1859                 last_ok_lsn = le64_to_cpu(cur_page->record_hdr.last_end_lsn);
1860                 goto next_page_1;
1861         }
1862
1863         /*
1864          * If we are at the expected first page of a transfer check to see
1865          * if either tail copy is at this offset.
1866          * If this page is the last page of a transfer, check if we wrote
1867          * a subsequent tail copy.
1868          */
1869         if (page_cnt == page_pos || page_cnt == page_pos + 1) {
1870                 /*
1871                  * Check if the offset matches either the first or second
1872                  * tail copy. It is possible it will match both.
1873                  */
1874                 if (curpage_off == final_off)
1875                         tail_page = first_tail;
1876
1877                 /*
1878                  * If we already matched on the first page then
1879                  * check the ending lsn's.
1880                  */
1881                 if (curpage_off == second_off) {
1882                         if (!tail_page ||
1883                             (second_tail &&
1884                              le64_to_cpu(second_tail->record_hdr.last_end_lsn) >
1885                                      le64_to_cpu(first_tail->record_hdr
1886                                                          .last_end_lsn))) {
1887                                 tail_page = second_tail;
1888                         }
1889                 }
1890         }
1891
1892 use_tail_page:
1893         if (tail_page) {
1894                 /* We have a candidate for a tail copy. */
1895                 lsn_cur = le64_to_cpu(tail_page->record_hdr.last_end_lsn);
1896
1897                 if (last_ok_lsn < lsn_cur) {
1898                         /*
1899                          * If the sequence number is not expected,
1900                          * then don't use the tail copy.
1901                          */
1902                         if (expected_seq != (lsn_cur >> log->file_data_bits))
1903                                 tail_page = NULL;
1904                 } else if (last_ok_lsn > lsn_cur) {
1905                         /*
1906                          * If the last lsn is greater than the one on
1907                          * this page then forget this tail.
1908                          */
1909                         tail_page = NULL;
1910                 }
1911         }
1912
1913         /*
1914          *If we have an error on the current page,
1915          * we will break of this loop.
1916          */
1917         if (err || usa_error)
1918                 goto check_tail;
1919
1920         /*
1921          * Done if the last lsn on this page doesn't match the previous known
1922          * last lsn or the sequence number is not expected.
1923          */
1924         lsn_cur = le64_to_cpu(page->rhdr.lsn);
1925         if (last_ok_lsn != lsn_cur &&
1926             expected_seq != (lsn_cur >> log->file_data_bits)) {
1927                 goto check_tail;
1928         }
1929
1930         /*
1931          * Check that the page position and page count values are correct.
1932          * If this is the first page of a transfer the position must be 1
1933          * and the count will be unknown.
1934          */
1935         if (page_cnt == page_pos) {
1936                 if (page->page_pos != cpu_to_le16(1) &&
1937                     (!reuse_page || page->page_pos != page->page_count)) {
1938                         /*
1939                          * If the current page is the first page we are
1940                          * looking at and we are reusing this page then
1941                          * it can be either the first or last page of a
1942                          * transfer. Otherwise it can only be the first.
1943                          */
1944                         goto check_tail;
1945                 }
1946         } else if (le16_to_cpu(page->page_count) != page_cnt ||
1947                    le16_to_cpu(page->page_pos) != page_pos + 1) {
1948                 /*
1949                  * The page position better be 1 more than the last page
1950                  * position and the page count better match.
1951                  */
1952                 goto check_tail;
1953         }
1954
1955         /*
1956          * We have a valid page the file and may have a valid page
1957          * the tail copy area.
1958          * If the tail page was written after the page the file then
1959          * break of the loop.
1960          */
1961         if (tail_page &&
1962             le64_to_cpu(tail_page->record_hdr.last_end_lsn) > lsn_cur) {
1963                 /* Remember if we will replace the page. */
1964                 replace_page = true;
1965                 goto check_tail;
1966         }
1967
1968         tail_page = NULL;
1969
1970         if (is_log_record_end(page)) {
1971                 /*
1972                  * Since we have read this page we know the sequence number
1973                  * is the same as our expected value.
1974                  */
1975                 log->seq_num = expected_seq;
1976                 log->last_lsn = le64_to_cpu(page->record_hdr.last_end_lsn);
1977                 log->ra->current_lsn = page->record_hdr.last_end_lsn;
1978                 log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
1979
1980                 /*
1981                  * If there is room on this page for another header then
1982                  * remember we want to reuse the page.
1983                  */
1984                 if (log->record_header_len <=
1985                     log->page_size -
1986                             le16_to_cpu(page->record_hdr.next_record_off)) {
1987                         log->l_flags |= NTFSLOG_REUSE_TAIL;
1988                         log->next_page = curpage_off;
1989                 } else {
1990                         log->l_flags &= ~NTFSLOG_REUSE_TAIL;
1991                         log->next_page = nextpage_off;
1992                 }
1993
1994                 /* Remember if we wrapped the log file. */
1995                 if (wrapped_file)
1996                         log->l_flags |= NTFSLOG_WRAPPED;
1997         }
1998
1999         /*
2000          * Remember the last page count and position.
2001          * Also remember the last known lsn.
2002          */
2003         page_cnt = le16_to_cpu(page->page_count);
2004         page_pos = le16_to_cpu(page->page_pos);
2005         last_ok_lsn = le64_to_cpu(page->rhdr.lsn);
2006
2007 next_page_1:
2008
2009         if (wrapped) {
2010                 expected_seq += 1;
2011                 wrapped_file = 1;
2012         }
2013
2014         curpage_off = nextpage_off;
2015         kfree(page);
2016         page = NULL;
2017         reuse_page = 0;
2018         goto next_page;
2019
2020 check_tail:
2021         if (tail_page) {
2022                 log->seq_num = expected_seq;
2023                 log->last_lsn = le64_to_cpu(tail_page->record_hdr.last_end_lsn);
2024                 log->ra->current_lsn = tail_page->record_hdr.last_end_lsn;
2025                 log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
2026
2027                 if (log->page_size -
2028                             le16_to_cpu(
2029                                     tail_page->record_hdr.next_record_off) >=
2030                     log->record_header_len) {
2031                         log->l_flags |= NTFSLOG_REUSE_TAIL;
2032                         log->next_page = curpage_off;
2033                 } else {
2034                         log->l_flags &= ~NTFSLOG_REUSE_TAIL;
2035                         log->next_page = nextpage_off;
2036                 }
2037
2038                 if (wrapped)
2039                         log->l_flags |= NTFSLOG_WRAPPED;
2040         }
2041
2042         /* Remember that the partial IO will start at the next page. */
2043         second_off = nextpage_off;
2044
2045         /*
2046          * If the next page is the first page of the file then update
2047          * the sequence number for log records which begon the next page.
2048          */
2049         if (wrapped)
2050                 expected_seq += 1;
2051
2052         /*
2053          * If we have a tail copy or are performing single page I/O we can
2054          * immediately look at the next page.
2055          */
2056         if (replace_page || (log->ra->flags & RESTART_SINGLE_PAGE_IO)) {
2057                 page_cnt = 2;
2058                 page_pos = 1;
2059                 goto check_valid;
2060         }
2061
2062         if (page_pos != page_cnt)
2063                 goto check_valid;
2064         /*
2065          * If the next page causes us to wrap to the beginning of the log
2066          * file then we know which page to check next.
2067          */
2068         if (wrapped) {
2069                 page_cnt = 2;
2070                 page_pos = 1;
2071                 goto check_valid;
2072         }
2073
2074         cur_pos = 2;
2075
2076 next_test_page:
2077         kfree(tst_page);
2078         tst_page = NULL;
2079
2080         /* Walk through the file, reading log pages. */
2081         err = read_log_page(log, nextpage_off, &tst_page, &usa_error);
2082
2083         /*
2084          * If we get a USA error then assume that we correctly found
2085          * the end of the original transfer.
2086          */
2087         if (usa_error)
2088                 goto file_is_valid;
2089
2090         /*
2091          * If we were able to read the page, we examine it to see if it
2092          * is the same or different Io block.
2093          */
2094         if (err)
2095                 goto next_test_page_1;
2096
2097         if (le16_to_cpu(tst_page->page_pos) == cur_pos &&
2098             check_subseq_log_page(log, tst_page, nextpage_off, expected_seq)) {
2099                 page_cnt = le16_to_cpu(tst_page->page_count) + 1;
2100                 page_pos = le16_to_cpu(tst_page->page_pos);
2101                 goto check_valid;
2102         } else {
2103                 goto file_is_valid;
2104         }
2105
2106 next_test_page_1:
2107
2108         nextpage_off = next_page_off(log, curpage_off);
2109         wrapped = nextpage_off == log->first_page;
2110
2111         if (wrapped) {
2112                 expected_seq += 1;
2113                 page_cnt = 2;
2114                 page_pos = 1;
2115         }
2116
2117         cur_pos += 1;
2118         part_io_count += 1;
2119         if (!wrapped)
2120                 goto next_test_page;
2121
2122 check_valid:
2123         /* Skip over the remaining pages this transfer. */
2124         remain_pages = page_cnt - page_pos - 1;
2125         part_io_count += remain_pages;
2126
2127         while (remain_pages--) {
2128                 nextpage_off = next_page_off(log, curpage_off);
2129                 wrapped = nextpage_off == log->first_page;
2130
2131                 if (wrapped)
2132                         expected_seq += 1;
2133         }
2134
2135         /* Call our routine to check this log page. */
2136         kfree(tst_page);
2137         tst_page = NULL;
2138
2139         err = read_log_page(log, nextpage_off, &tst_page, &usa_error);
2140         if (!err && !usa_error &&
2141             check_subseq_log_page(log, tst_page, nextpage_off, expected_seq)) {
2142                 err = -EINVAL;
2143                 goto out;
2144         }
2145
2146 file_is_valid:
2147
2148         /* We have a valid file. */
2149         if (page_off1 || tail_page) {
2150                 struct RECORD_PAGE_HDR *tmp_page;
2151
2152                 if (sb_rdonly(log->ni->mi.sbi->sb)) {
2153                         err = -EROFS;
2154                         goto out;
2155                 }
2156
2157                 if (page_off1) {
2158                         tmp_page = Add2Ptr(page_bufs, page_off1 - page_off);
2159                         tails -= (page_off1 - page_off) / log->page_size;
2160                         if (!tail_page)
2161                                 tails -= 1;
2162                 } else {
2163                         tmp_page = tail_page;
2164                         tails = 1;
2165                 }
2166
2167                 while (tails--) {
2168                         u64 off = hdr_file_off(log, tmp_page);
2169
2170                         if (!page) {
2171                                 page = kmalloc(log->page_size, GFP_NOFS);
2172                                 if (!page)
2173                                         return -ENOMEM;
2174                         }
2175
2176                         /*
2177                          * Correct page and copy the data from this page
2178                          * into it and flush it to disk.
2179                          */
2180                         memcpy(page, tmp_page, log->page_size);
2181
2182                         /* Fill last flushed lsn value flush the page. */
2183                         if (log->major_ver < 2)
2184                                 page->rhdr.lsn = page->record_hdr.last_end_lsn;
2185                         else
2186                                 page->file_off = 0;
2187
2188                         page->page_pos = page->page_count = cpu_to_le16(1);
2189
2190                         ntfs_fix_pre_write(&page->rhdr, log->page_size);
2191
2192                         err = ntfs_sb_write_run(log->ni->mi.sbi,
2193                                                 &log->ni->file.run, off, page,
2194                                                 log->page_size, 0);
2195
2196                         if (err)
2197                                 goto out;
2198
2199                         if (part_io_count && second_off == off) {
2200                                 second_off += log->page_size;
2201                                 part_io_count -= 1;
2202                         }
2203
2204                         tmp_page = Add2Ptr(tmp_page, log->page_size);
2205                 }
2206         }
2207
2208         if (part_io_count) {
2209                 if (sb_rdonly(log->ni->mi.sbi->sb)) {
2210                         err = -EROFS;
2211                         goto out;
2212                 }
2213         }
2214
2215 out:
2216         kfree(second_tail);
2217         kfree(first_tail);
2218         kfree(page);
2219         kfree(tst_page);
2220         kfree(page_bufs);
2221
2222         return err;
2223 }
2224
2225 /*
2226  * read_log_rec_buf - Copy a log record from the file to a buffer.
2227  *
2228  * The log record may span several log pages and may even wrap the file.
2229  */
2230 static int read_log_rec_buf(struct ntfs_log *log,
2231                             const struct LFS_RECORD_HDR *rh, void *buffer)
2232 {
2233         int err;
2234         struct RECORD_PAGE_HDR *ph = NULL;
2235         u64 lsn = le64_to_cpu(rh->this_lsn);
2236         u32 vbo = lsn_to_vbo(log, lsn) & ~log->page_mask;
2237         u32 off = lsn_to_page_off(log, lsn) + log->record_header_len;
2238         u32 data_len = le32_to_cpu(rh->client_data_len);
2239
2240         /*
2241          * While there are more bytes to transfer,
2242          * we continue to attempt to perform the read.
2243          */
2244         for (;;) {
2245                 bool usa_error;
2246                 u32 tail = log->page_size - off;
2247
2248                 if (tail >= data_len)
2249                         tail = data_len;
2250
2251                 data_len -= tail;
2252
2253                 err = read_log_page(log, vbo, &ph, &usa_error);
2254                 if (err)
2255                         goto out;
2256
2257                 /*
2258                  * The last lsn on this page better be greater or equal
2259                  * to the lsn we are copying.
2260                  */
2261                 if (lsn > le64_to_cpu(ph->rhdr.lsn)) {
2262                         err = -EINVAL;
2263                         goto out;
2264                 }
2265
2266                 memcpy(buffer, Add2Ptr(ph, off), tail);
2267
2268                 /* If there are no more bytes to transfer, we exit the loop. */
2269                 if (!data_len) {
2270                         if (!is_log_record_end(ph) ||
2271                             lsn > le64_to_cpu(ph->record_hdr.last_end_lsn)) {
2272                                 err = -EINVAL;
2273                                 goto out;
2274                         }
2275                         break;
2276                 }
2277
2278                 if (ph->rhdr.lsn == ph->record_hdr.last_end_lsn ||
2279                     lsn > le64_to_cpu(ph->rhdr.lsn)) {
2280                         err = -EINVAL;
2281                         goto out;
2282                 }
2283
2284                 vbo = next_page_off(log, vbo);
2285                 off = log->data_off;
2286
2287                 /*
2288                  * Adjust our pointer the user's buffer to transfer
2289                  * the next block to.
2290                  */
2291                 buffer = Add2Ptr(buffer, tail);
2292         }
2293
2294 out:
2295         kfree(ph);
2296         return err;
2297 }
2298
2299 static int read_rst_area(struct ntfs_log *log, struct NTFS_RESTART **rst_,
2300                          u64 *lsn)
2301 {
2302         int err;
2303         struct LFS_RECORD_HDR *rh = NULL;
2304         const struct CLIENT_REC *cr =
2305                 Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off));
2306         u64 lsnr, lsnc = le64_to_cpu(cr->restart_lsn);
2307         u32 len;
2308         struct NTFS_RESTART *rst;
2309
2310         *lsn = 0;
2311         *rst_ = NULL;
2312
2313         /* If the client doesn't have a restart area, go ahead and exit now. */
2314         if (!lsnc)
2315                 return 0;
2316
2317         err = read_log_page(log, lsn_to_vbo(log, lsnc),
2318                             (struct RECORD_PAGE_HDR **)&rh, NULL);
2319         if (err)
2320                 return err;
2321
2322         rst = NULL;
2323         lsnr = le64_to_cpu(rh->this_lsn);
2324
2325         if (lsnc != lsnr) {
2326                 /* If the lsn values don't match, then the disk is corrupt. */
2327                 err = -EINVAL;
2328                 goto out;
2329         }
2330
2331         *lsn = lsnr;
2332         len = le32_to_cpu(rh->client_data_len);
2333
2334         if (!len) {
2335                 err = 0;
2336                 goto out;
2337         }
2338
2339         if (len < sizeof(struct NTFS_RESTART)) {
2340                 err = -EINVAL;
2341                 goto out;
2342         }
2343
2344         rst = kmalloc(len, GFP_NOFS);
2345         if (!rst) {
2346                 err = -ENOMEM;
2347                 goto out;
2348         }
2349
2350         /* Copy the data into the 'rst' buffer. */
2351         err = read_log_rec_buf(log, rh, rst);
2352         if (err)
2353                 goto out;
2354
2355         *rst_ = rst;
2356         rst = NULL;
2357
2358 out:
2359         kfree(rh);
2360         kfree(rst);
2361
2362         return err;
2363 }
2364
2365 static int find_log_rec(struct ntfs_log *log, u64 lsn, struct lcb *lcb)
2366 {
2367         int err;
2368         struct LFS_RECORD_HDR *rh = lcb->lrh;
2369         u32 rec_len, len;
2370
2371         /* Read the record header for this lsn. */
2372         if (!rh) {
2373                 err = read_log_page(log, lsn_to_vbo(log, lsn),
2374                                     (struct RECORD_PAGE_HDR **)&rh, NULL);
2375
2376                 lcb->lrh = rh;
2377                 if (err)
2378                         return err;
2379         }
2380
2381         /*
2382          * If the lsn the log record doesn't match the desired
2383          * lsn then the disk is corrupt.
2384          */
2385         if (lsn != le64_to_cpu(rh->this_lsn))
2386                 return -EINVAL;
2387
2388         len = le32_to_cpu(rh->client_data_len);
2389
2390         /*
2391          * Check that the length field isn't greater than the total
2392          * available space the log file.
2393          */
2394         rec_len = len + log->record_header_len;
2395         if (rec_len >= log->total_avail)
2396                 return -EINVAL;
2397
2398         /*
2399          * If the entire log record is on this log page,
2400          * put a pointer to the log record the context block.
2401          */
2402         if (rh->flags & LOG_RECORD_MULTI_PAGE) {
2403                 void *lr = kmalloc(len, GFP_NOFS);
2404
2405                 if (!lr)
2406                         return -ENOMEM;
2407
2408                 lcb->log_rec = lr;
2409                 lcb->alloc = true;
2410
2411                 /* Copy the data into the buffer returned. */
2412                 err = read_log_rec_buf(log, rh, lr);
2413                 if (err)
2414                         return err;
2415         } else {
2416                 /* If beyond the end of the current page -> an error. */
2417                 u32 page_off = lsn_to_page_off(log, lsn);
2418
2419                 if (page_off + len + log->record_header_len > log->page_size)
2420                         return -EINVAL;
2421
2422                 lcb->log_rec = Add2Ptr(rh, sizeof(struct LFS_RECORD_HDR));
2423                 lcb->alloc = false;
2424         }
2425
2426         return 0;
2427 }
2428
2429 /*
2430  * read_log_rec_lcb - Init the query operation.
2431  */
2432 static int read_log_rec_lcb(struct ntfs_log *log, u64 lsn, u32 ctx_mode,
2433                             struct lcb **lcb_)
2434 {
2435         int err;
2436         const struct CLIENT_REC *cr;
2437         struct lcb *lcb;
2438
2439         switch (ctx_mode) {
2440         case lcb_ctx_undo_next:
2441         case lcb_ctx_prev:
2442         case lcb_ctx_next:
2443                 break;
2444         default:
2445                 return -EINVAL;
2446         }
2447
2448         /* Check that the given lsn is the legal range for this client. */
2449         cr = Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off));
2450
2451         if (!verify_client_lsn(log, cr, lsn))
2452                 return -EINVAL;
2453
2454         lcb = kzalloc(sizeof(struct lcb), GFP_NOFS);
2455         if (!lcb)
2456                 return -ENOMEM;
2457         lcb->client = log->client_id;
2458         lcb->ctx_mode = ctx_mode;
2459
2460         /* Find the log record indicated by the given lsn. */
2461         err = find_log_rec(log, lsn, lcb);
2462         if (err)
2463                 goto out;
2464
2465         *lcb_ = lcb;
2466         return 0;
2467
2468 out:
2469         lcb_put(lcb);
2470         *lcb_ = NULL;
2471         return err;
2472 }
2473
2474 /*
2475  * find_client_next_lsn
2476  *
2477  * Attempt to find the next lsn to return to a client based on the context mode.
2478  */
2479 static int find_client_next_lsn(struct ntfs_log *log, struct lcb *lcb, u64 *lsn)
2480 {
2481         int err;
2482         u64 next_lsn;
2483         struct LFS_RECORD_HDR *hdr;
2484
2485         hdr = lcb->lrh;
2486         *lsn = 0;
2487
2488         if (lcb_ctx_next != lcb->ctx_mode)
2489                 goto check_undo_next;
2490
2491         /* Loop as long as another lsn can be found. */
2492         for (;;) {
2493                 u64 current_lsn;
2494
2495                 err = next_log_lsn(log, hdr, &current_lsn);
2496                 if (err)
2497                         goto out;
2498
2499                 if (!current_lsn)
2500                         break;
2501
2502                 if (hdr != lcb->lrh)
2503                         kfree(hdr);
2504
2505                 hdr = NULL;
2506                 err = read_log_page(log, lsn_to_vbo(log, current_lsn),
2507                                     (struct RECORD_PAGE_HDR **)&hdr, NULL);
2508                 if (err)
2509                         goto out;
2510
2511                 if (memcmp(&hdr->client, &lcb->client,
2512                            sizeof(struct CLIENT_ID))) {
2513                         /*err = -EINVAL; */
2514                 } else if (LfsClientRecord == hdr->record_type) {
2515                         kfree(lcb->lrh);
2516                         lcb->lrh = hdr;
2517                         *lsn = current_lsn;
2518                         return 0;
2519                 }
2520         }
2521
2522 out:
2523         if (hdr != lcb->lrh)
2524                 kfree(hdr);
2525         return err;
2526
2527 check_undo_next:
2528         if (lcb_ctx_undo_next == lcb->ctx_mode)
2529                 next_lsn = le64_to_cpu(hdr->client_undo_next_lsn);
2530         else if (lcb_ctx_prev == lcb->ctx_mode)
2531                 next_lsn = le64_to_cpu(hdr->client_prev_lsn);
2532         else
2533                 return 0;
2534
2535         if (!next_lsn)
2536                 return 0;
2537
2538         if (!verify_client_lsn(
2539                     log, Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off)),
2540                     next_lsn))
2541                 return 0;
2542
2543         hdr = NULL;
2544         err = read_log_page(log, lsn_to_vbo(log, next_lsn),
2545                             (struct RECORD_PAGE_HDR **)&hdr, NULL);
2546         if (err)
2547                 return err;
2548         kfree(lcb->lrh);
2549         lcb->lrh = hdr;
2550
2551         *lsn = next_lsn;
2552
2553         return 0;
2554 }
2555
2556 static int read_next_log_rec(struct ntfs_log *log, struct lcb *lcb, u64 *lsn)
2557 {
2558         int err;
2559
2560         err = find_client_next_lsn(log, lcb, lsn);
2561         if (err)
2562                 return err;
2563
2564         if (!*lsn)
2565                 return 0;
2566
2567         if (lcb->alloc)
2568                 kfree(lcb->log_rec);
2569
2570         lcb->log_rec = NULL;
2571         lcb->alloc = false;
2572         kfree(lcb->lrh);
2573         lcb->lrh = NULL;
2574
2575         return find_log_rec(log, *lsn, lcb);
2576 }
2577
2578 static inline bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes)
2579 {
2580         __le16 mask;
2581         u32 min_de, de_off, used, total;
2582         const struct NTFS_DE *e;
2583
2584         if (hdr_has_subnode(hdr)) {
2585                 min_de = sizeof(struct NTFS_DE) + sizeof(u64);
2586                 mask = NTFS_IE_HAS_SUBNODES;
2587         } else {
2588                 min_de = sizeof(struct NTFS_DE);
2589                 mask = 0;
2590         }
2591
2592         de_off = le32_to_cpu(hdr->de_off);
2593         used = le32_to_cpu(hdr->used);
2594         total = le32_to_cpu(hdr->total);
2595
2596         if (de_off > bytes - min_de || used > bytes || total > bytes ||
2597             de_off + min_de > used || used > total) {
2598                 return false;
2599         }
2600
2601         e = Add2Ptr(hdr, de_off);
2602         for (;;) {
2603                 u16 esize = le16_to_cpu(e->size);
2604                 struct NTFS_DE *next = Add2Ptr(e, esize);
2605
2606                 if (esize < min_de || PtrOffset(hdr, next) > used ||
2607                     (e->flags & NTFS_IE_HAS_SUBNODES) != mask) {
2608                         return false;
2609                 }
2610
2611                 if (de_is_last(e))
2612                         break;
2613
2614                 e = next;
2615         }
2616
2617         return true;
2618 }
2619
2620 static inline bool check_index_buffer(const struct INDEX_BUFFER *ib, u32 bytes)
2621 {
2622         u16 fo;
2623         const struct NTFS_RECORD_HEADER *r = &ib->rhdr;
2624
2625         if (r->sign != NTFS_INDX_SIGNATURE)
2626                 return false;
2627
2628         fo = (SECTOR_SIZE - ((bytes >> SECTOR_SHIFT) + 1) * sizeof(short));
2629
2630         if (le16_to_cpu(r->fix_off) > fo)
2631                 return false;
2632
2633         if ((le16_to_cpu(r->fix_num) - 1) * SECTOR_SIZE != bytes)
2634                 return false;
2635
2636         return check_index_header(&ib->ihdr,
2637                                   bytes - offsetof(struct INDEX_BUFFER, ihdr));
2638 }
2639
2640 static inline bool check_index_root(const struct ATTRIB *attr,
2641                                     struct ntfs_sb_info *sbi)
2642 {
2643         bool ret;
2644         const struct INDEX_ROOT *root = resident_data(attr);
2645         u8 index_bits = le32_to_cpu(root->index_block_size) >= sbi->cluster_size
2646                                 ? sbi->cluster_bits
2647                                 : SECTOR_SHIFT;
2648         u8 block_clst = root->index_block_clst;
2649
2650         if (le32_to_cpu(attr->res.data_size) < sizeof(struct INDEX_ROOT) ||
2651             (root->type != ATTR_NAME && root->type != ATTR_ZERO) ||
2652             (root->type == ATTR_NAME &&
2653              root->rule != NTFS_COLLATION_TYPE_FILENAME) ||
2654             (le32_to_cpu(root->index_block_size) !=
2655              (block_clst << index_bits)) ||
2656             (block_clst != 1 && block_clst != 2 && block_clst != 4 &&
2657              block_clst != 8 && block_clst != 0x10 && block_clst != 0x20 &&
2658              block_clst != 0x40 && block_clst != 0x80)) {
2659                 return false;
2660         }
2661
2662         ret = check_index_header(&root->ihdr,
2663                                  le32_to_cpu(attr->res.data_size) -
2664                                          offsetof(struct INDEX_ROOT, ihdr));
2665         return ret;
2666 }
2667
2668 static inline bool check_attr(const struct MFT_REC *rec,
2669                               const struct ATTRIB *attr,
2670                               struct ntfs_sb_info *sbi)
2671 {
2672         u32 asize = le32_to_cpu(attr->size);
2673         u32 rsize = 0;
2674         u64 dsize, svcn, evcn;
2675         u16 run_off;
2676
2677         /* Check the fixed part of the attribute record header. */
2678         if (asize >= sbi->record_size ||
2679             asize + PtrOffset(rec, attr) >= sbi->record_size ||
2680             (attr->name_len &&
2681              le16_to_cpu(attr->name_off) + attr->name_len * sizeof(short) >
2682                      asize)) {
2683                 return false;
2684         }
2685
2686         /* Check the attribute fields. */
2687         switch (attr->non_res) {
2688         case 0:
2689                 rsize = le32_to_cpu(attr->res.data_size);
2690                 if (rsize >= asize ||
2691                     le16_to_cpu(attr->res.data_off) + rsize > asize) {
2692                         return false;
2693                 }
2694                 break;
2695
2696         case 1:
2697                 dsize = le64_to_cpu(attr->nres.data_size);
2698                 svcn = le64_to_cpu(attr->nres.svcn);
2699                 evcn = le64_to_cpu(attr->nres.evcn);
2700                 run_off = le16_to_cpu(attr->nres.run_off);
2701
2702                 if (svcn > evcn + 1 || run_off >= asize ||
2703                     le64_to_cpu(attr->nres.valid_size) > dsize ||
2704                     dsize > le64_to_cpu(attr->nres.alloc_size)) {
2705                         return false;
2706                 }
2707
2708                 if (run_off > asize)
2709                         return false;
2710
2711                 if (run_unpack(NULL, sbi, 0, svcn, evcn, svcn,
2712                                Add2Ptr(attr, run_off), asize - run_off) < 0) {
2713                         return false;
2714                 }
2715
2716                 return true;
2717
2718         default:
2719                 return false;
2720         }
2721
2722         switch (attr->type) {
2723         case ATTR_NAME:
2724                 if (fname_full_size(Add2Ptr(
2725                             attr, le16_to_cpu(attr->res.data_off))) > asize) {
2726                         return false;
2727                 }
2728                 break;
2729
2730         case ATTR_ROOT:
2731                 return check_index_root(attr, sbi);
2732
2733         case ATTR_STD:
2734                 if (rsize < sizeof(struct ATTR_STD_INFO5) &&
2735                     rsize != sizeof(struct ATTR_STD_INFO)) {
2736                         return false;
2737                 }
2738                 break;
2739
2740         case ATTR_LIST:
2741         case ATTR_ID:
2742         case ATTR_SECURE:
2743         case ATTR_LABEL:
2744         case ATTR_VOL_INFO:
2745         case ATTR_DATA:
2746         case ATTR_ALLOC:
2747         case ATTR_BITMAP:
2748         case ATTR_REPARSE:
2749         case ATTR_EA_INFO:
2750         case ATTR_EA:
2751         case ATTR_PROPERTYSET:
2752         case ATTR_LOGGED_UTILITY_STREAM:
2753                 break;
2754
2755         default:
2756                 return false;
2757         }
2758
2759         return true;
2760 }
2761
2762 static inline bool check_file_record(const struct MFT_REC *rec,
2763                                      const struct MFT_REC *rec2,
2764                                      struct ntfs_sb_info *sbi)
2765 {
2766         const struct ATTRIB *attr;
2767         u16 fo = le16_to_cpu(rec->rhdr.fix_off);
2768         u16 fn = le16_to_cpu(rec->rhdr.fix_num);
2769         u16 ao = le16_to_cpu(rec->attr_off);
2770         u32 rs = sbi->record_size;
2771
2772         /* Check the file record header for consistency. */
2773         if (rec->rhdr.sign != NTFS_FILE_SIGNATURE ||
2774             fo > (SECTOR_SIZE - ((rs >> SECTOR_SHIFT) + 1) * sizeof(short)) ||
2775             (fn - 1) * SECTOR_SIZE != rs || ao < MFTRECORD_FIXUP_OFFSET_1 ||
2776             ao > sbi->record_size - SIZEOF_RESIDENT || !is_rec_inuse(rec) ||
2777             le32_to_cpu(rec->total) != rs) {
2778                 return false;
2779         }
2780
2781         /* Loop to check all of the attributes. */
2782         for (attr = Add2Ptr(rec, ao); attr->type != ATTR_END;
2783              attr = Add2Ptr(attr, le32_to_cpu(attr->size))) {
2784                 if (check_attr(rec, attr, sbi))
2785                         continue;
2786                 return false;
2787         }
2788
2789         return true;
2790 }
2791
2792 static inline int check_lsn(const struct NTFS_RECORD_HEADER *hdr,
2793                             const u64 *rlsn)
2794 {
2795         u64 lsn;
2796
2797         if (!rlsn)
2798                 return true;
2799
2800         lsn = le64_to_cpu(hdr->lsn);
2801
2802         if (hdr->sign == NTFS_HOLE_SIGNATURE)
2803                 return false;
2804
2805         if (*rlsn > lsn)
2806                 return true;
2807
2808         return false;
2809 }
2810
2811 static inline bool check_if_attr(const struct MFT_REC *rec,
2812                                  const struct LOG_REC_HDR *lrh)
2813 {
2814         u16 ro = le16_to_cpu(lrh->record_off);
2815         u16 o = le16_to_cpu(rec->attr_off);
2816         const struct ATTRIB *attr = Add2Ptr(rec, o);
2817
2818         while (o < ro) {
2819                 u32 asize;
2820
2821                 if (attr->type == ATTR_END)
2822                         break;
2823
2824                 asize = le32_to_cpu(attr->size);
2825                 if (!asize)
2826                         break;
2827
2828                 o += asize;
2829                 attr = Add2Ptr(attr, asize);
2830         }
2831
2832         return o == ro;
2833 }
2834
2835 static inline bool check_if_index_root(const struct MFT_REC *rec,
2836                                        const struct LOG_REC_HDR *lrh)
2837 {
2838         u16 ro = le16_to_cpu(lrh->record_off);
2839         u16 o = le16_to_cpu(rec->attr_off);
2840         const struct ATTRIB *attr = Add2Ptr(rec, o);
2841
2842         while (o < ro) {
2843                 u32 asize;
2844
2845                 if (attr->type == ATTR_END)
2846                         break;
2847
2848                 asize = le32_to_cpu(attr->size);
2849                 if (!asize)
2850                         break;
2851
2852                 o += asize;
2853                 attr = Add2Ptr(attr, asize);
2854         }
2855
2856         return o == ro && attr->type == ATTR_ROOT;
2857 }
2858
2859 static inline bool check_if_root_index(const struct ATTRIB *attr,
2860                                        const struct INDEX_HDR *hdr,
2861                                        const struct LOG_REC_HDR *lrh)
2862 {
2863         u16 ao = le16_to_cpu(lrh->attr_off);
2864         u32 de_off = le32_to_cpu(hdr->de_off);
2865         u32 o = PtrOffset(attr, hdr) + de_off;
2866         const struct NTFS_DE *e = Add2Ptr(hdr, de_off);
2867         u32 asize = le32_to_cpu(attr->size);
2868
2869         while (o < ao) {
2870                 u16 esize;
2871
2872                 if (o >= asize)
2873                         break;
2874
2875                 esize = le16_to_cpu(e->size);
2876                 if (!esize)
2877                         break;
2878
2879                 o += esize;
2880                 e = Add2Ptr(e, esize);
2881         }
2882
2883         return o == ao;
2884 }
2885
2886 static inline bool check_if_alloc_index(const struct INDEX_HDR *hdr,
2887                                         u32 attr_off)
2888 {
2889         u32 de_off = le32_to_cpu(hdr->de_off);
2890         u32 o = offsetof(struct INDEX_BUFFER, ihdr) + de_off;
2891         const struct NTFS_DE *e = Add2Ptr(hdr, de_off);
2892         u32 used = le32_to_cpu(hdr->used);
2893
2894         while (o < attr_off) {
2895                 u16 esize;
2896
2897                 if (de_off >= used)
2898                         break;
2899
2900                 esize = le16_to_cpu(e->size);
2901                 if (!esize)
2902                         break;
2903
2904                 o += esize;
2905                 de_off += esize;
2906                 e = Add2Ptr(e, esize);
2907         }
2908
2909         return o == attr_off;
2910 }
2911
2912 static inline void change_attr_size(struct MFT_REC *rec, struct ATTRIB *attr,
2913                                     u32 nsize)
2914 {
2915         u32 asize = le32_to_cpu(attr->size);
2916         int dsize = nsize - asize;
2917         u8 *next = Add2Ptr(attr, asize);
2918         u32 used = le32_to_cpu(rec->used);
2919
2920         memmove(Add2Ptr(attr, nsize), next, used - PtrOffset(rec, next));
2921
2922         rec->used = cpu_to_le32(used + dsize);
2923         attr->size = cpu_to_le32(nsize);
2924 }
2925
2926 struct OpenAttr {
2927         struct ATTRIB *attr;
2928         struct runs_tree *run1;
2929         struct runs_tree run0;
2930         struct ntfs_inode *ni;
2931         // CLST rno;
2932 };
2933
2934 /*
2935  * cmp_type_and_name
2936  *
2937  * Return: 0 if 'attr' has the same type and name.
2938  */
2939 static inline int cmp_type_and_name(const struct ATTRIB *a1,
2940                                     const struct ATTRIB *a2)
2941 {
2942         return a1->type != a2->type || a1->name_len != a2->name_len ||
2943                (a1->name_len && memcmp(attr_name(a1), attr_name(a2),
2944                                        a1->name_len * sizeof(short)));
2945 }
2946
2947 static struct OpenAttr *find_loaded_attr(struct ntfs_log *log,
2948                                          const struct ATTRIB *attr, CLST rno)
2949 {
2950         struct OPEN_ATTR_ENRTY *oe = NULL;
2951
2952         while ((oe = enum_rstbl(log->open_attr_tbl, oe))) {
2953                 struct OpenAttr *op_attr;
2954
2955                 if (ino_get(&oe->ref) != rno)
2956                         continue;
2957
2958                 op_attr = (struct OpenAttr *)oe->ptr;
2959                 if (!cmp_type_and_name(op_attr->attr, attr))
2960                         return op_attr;
2961         }
2962         return NULL;
2963 }
2964
2965 static struct ATTRIB *attr_create_nonres_log(struct ntfs_sb_info *sbi,
2966                                              enum ATTR_TYPE type, u64 size,
2967                                              const u16 *name, size_t name_len,
2968                                              __le16 flags)
2969 {
2970         struct ATTRIB *attr;
2971         u32 name_size = ALIGN(name_len * sizeof(short), 8);
2972         bool is_ext = flags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED);
2973         u32 asize = name_size +
2974                     (is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT);
2975
2976         attr = kzalloc(asize, GFP_NOFS);
2977         if (!attr)
2978                 return NULL;
2979
2980         attr->type = type;
2981         attr->size = cpu_to_le32(asize);
2982         attr->flags = flags;
2983         attr->non_res = 1;
2984         attr->name_len = name_len;
2985
2986         attr->nres.evcn = cpu_to_le64((u64)bytes_to_cluster(sbi, size) - 1);
2987         attr->nres.alloc_size = cpu_to_le64(ntfs_up_cluster(sbi, size));
2988         attr->nres.data_size = cpu_to_le64(size);
2989         attr->nres.valid_size = attr->nres.data_size;
2990         if (is_ext) {
2991                 attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
2992                 if (is_attr_compressed(attr))
2993                         attr->nres.c_unit = COMPRESSION_UNIT;
2994
2995                 attr->nres.run_off =
2996                         cpu_to_le16(SIZEOF_NONRESIDENT_EX + name_size);
2997                 memcpy(Add2Ptr(attr, SIZEOF_NONRESIDENT_EX), name,
2998                        name_len * sizeof(short));
2999         } else {
3000                 attr->name_off = SIZEOF_NONRESIDENT_LE;
3001                 attr->nres.run_off =
3002                         cpu_to_le16(SIZEOF_NONRESIDENT + name_size);
3003                 memcpy(Add2Ptr(attr, SIZEOF_NONRESIDENT), name,
3004                        name_len * sizeof(short));
3005         }
3006
3007         return attr;
3008 }
3009
3010 /*
3011  * do_action - Common routine for the Redo and Undo Passes.
3012  * @rlsn: If it is NULL then undo.
3013  */
3014 static int do_action(struct ntfs_log *log, struct OPEN_ATTR_ENRTY *oe,
3015                      const struct LOG_REC_HDR *lrh, u32 op, void *data,
3016                      u32 dlen, u32 rec_len, const u64 *rlsn)
3017 {
3018         int err = 0;
3019         struct ntfs_sb_info *sbi = log->ni->mi.sbi;
3020         struct inode *inode = NULL, *inode_parent;
3021         struct mft_inode *mi = NULL, *mi2_child = NULL;
3022         CLST rno = 0, rno_base = 0;
3023         struct INDEX_BUFFER *ib = NULL;
3024         struct MFT_REC *rec = NULL;
3025         struct ATTRIB *attr = NULL, *attr2;
3026         struct INDEX_HDR *hdr;
3027         struct INDEX_ROOT *root;
3028         struct NTFS_DE *e, *e1, *e2;
3029         struct NEW_ATTRIBUTE_SIZES *new_sz;
3030         struct ATTR_FILE_NAME *fname;
3031         struct OpenAttr *oa, *oa2;
3032         u32 nsize, t32, asize, used, esize, off, bits;
3033         u16 id, id2;
3034         u32 record_size = sbi->record_size;
3035         u64 t64;
3036         u16 roff = le16_to_cpu(lrh->record_off);
3037         u16 aoff = le16_to_cpu(lrh->attr_off);
3038         u64 lco = 0;
3039         u64 cbo = (u64)le16_to_cpu(lrh->cluster_off) << SECTOR_SHIFT;
3040         u64 tvo = le64_to_cpu(lrh->target_vcn) << sbi->cluster_bits;
3041         u64 vbo = cbo + tvo;
3042         void *buffer_le = NULL;
3043         u32 bytes = 0;
3044         bool a_dirty = false;
3045         u16 data_off;
3046
3047         oa = oe->ptr;
3048
3049         /* Big switch to prepare. */
3050         switch (op) {
3051         /* ============================================================
3052          * Process MFT records, as described by the current log record.
3053          * ============================================================
3054          */
3055         case InitializeFileRecordSegment:
3056         case DeallocateFileRecordSegment:
3057         case WriteEndOfFileRecordSegment:
3058         case CreateAttribute:
3059         case DeleteAttribute:
3060         case UpdateResidentValue:
3061         case UpdateMappingPairs:
3062         case SetNewAttributeSizes:
3063         case AddIndexEntryRoot:
3064         case DeleteIndexEntryRoot:
3065         case SetIndexEntryVcnRoot:
3066         case UpdateFileNameRoot:
3067         case UpdateRecordDataRoot:
3068         case ZeroEndOfFileRecord:
3069                 rno = vbo >> sbi->record_bits;
3070                 inode = ilookup(sbi->sb, rno);
3071                 if (inode) {
3072                         mi = &ntfs_i(inode)->mi;
3073                 } else if (op == InitializeFileRecordSegment) {
3074                         mi = kzalloc(sizeof(struct mft_inode), GFP_NOFS);
3075                         if (!mi)
3076                                 return -ENOMEM;
3077                         err = mi_format_new(mi, sbi, rno, 0, false);
3078                         if (err)
3079                                 goto out;
3080                 } else {
3081                         /* Read from disk. */
3082                         err = mi_get(sbi, rno, &mi);
3083                         if (err)
3084                                 return err;
3085                 }
3086                 rec = mi->mrec;
3087
3088                 if (op == DeallocateFileRecordSegment)
3089                         goto skip_load_parent;
3090
3091                 if (InitializeFileRecordSegment != op) {
3092                         if (rec->rhdr.sign == NTFS_BAAD_SIGNATURE)
3093                                 goto dirty_vol;
3094                         if (!check_lsn(&rec->rhdr, rlsn))
3095                                 goto out;
3096                         if (!check_file_record(rec, NULL, sbi))
3097                                 goto dirty_vol;
3098                         attr = Add2Ptr(rec, roff);
3099                 }
3100
3101                 if (is_rec_base(rec) || InitializeFileRecordSegment == op) {
3102                         rno_base = rno;
3103                         goto skip_load_parent;
3104                 }
3105
3106                 rno_base = ino_get(&rec->parent_ref);
3107                 inode_parent = ntfs_iget5(sbi->sb, &rec->parent_ref, NULL);
3108                 if (IS_ERR(inode_parent))
3109                         goto skip_load_parent;
3110
3111                 if (is_bad_inode(inode_parent)) {
3112                         iput(inode_parent);
3113                         goto skip_load_parent;
3114                 }
3115
3116                 if (ni_load_mi_ex(ntfs_i(inode_parent), rno, &mi2_child)) {
3117                         iput(inode_parent);
3118                 } else {
3119                         if (mi2_child->mrec != mi->mrec)
3120                                 memcpy(mi2_child->mrec, mi->mrec,
3121                                        sbi->record_size);
3122
3123                         if (inode)
3124                                 iput(inode);
3125                         else if (mi)
3126                                 mi_put(mi);
3127
3128                         inode = inode_parent;
3129                         mi = mi2_child;
3130                         rec = mi2_child->mrec;
3131                         attr = Add2Ptr(rec, roff);
3132                 }
3133
3134 skip_load_parent:
3135                 inode_parent = NULL;
3136                 break;
3137
3138         /*
3139          * Process attributes, as described by the current log record.
3140          */
3141         case UpdateNonresidentValue:
3142         case AddIndexEntryAllocation:
3143         case DeleteIndexEntryAllocation:
3144         case WriteEndOfIndexBuffer:
3145         case SetIndexEntryVcnAllocation:
3146         case UpdateFileNameAllocation:
3147         case SetBitsInNonresidentBitMap:
3148         case ClearBitsInNonresidentBitMap:
3149         case UpdateRecordDataAllocation:
3150                 attr = oa->attr;
3151                 bytes = UpdateNonresidentValue == op ? dlen : 0;
3152                 lco = (u64)le16_to_cpu(lrh->lcns_follow) << sbi->cluster_bits;
3153
3154                 if (attr->type == ATTR_ALLOC) {
3155                         t32 = le32_to_cpu(oe->bytes_per_index);
3156                         if (bytes < t32)
3157                                 bytes = t32;
3158                 }
3159
3160                 if (!bytes)
3161                         bytes = lco - cbo;
3162
3163                 bytes += roff;
3164                 if (attr->type == ATTR_ALLOC)
3165                         bytes = (bytes + 511) & ~511; // align
3166
3167                 buffer_le = kmalloc(bytes, GFP_NOFS);
3168                 if (!buffer_le)
3169                         return -ENOMEM;
3170
3171                 err = ntfs_read_run_nb(sbi, oa->run1, vbo, buffer_le, bytes,
3172                                        NULL);
3173                 if (err)
3174                         goto out;
3175
3176                 if (attr->type == ATTR_ALLOC && *(int *)buffer_le)
3177                         ntfs_fix_post_read(buffer_le, bytes, false);
3178                 break;
3179
3180         default:
3181                 WARN_ON(1);
3182         }
3183
3184         /* Big switch to do operation. */
3185         switch (op) {
3186         case InitializeFileRecordSegment:
3187                 if (roff + dlen > record_size)
3188                         goto dirty_vol;
3189
3190                 memcpy(Add2Ptr(rec, roff), data, dlen);
3191                 mi->dirty = true;
3192                 break;
3193
3194         case DeallocateFileRecordSegment:
3195                 clear_rec_inuse(rec);
3196                 le16_add_cpu(&rec->seq, 1);
3197                 mi->dirty = true;
3198                 break;
3199
3200         case WriteEndOfFileRecordSegment:
3201                 attr2 = (struct ATTRIB *)data;
3202                 if (!check_if_attr(rec, lrh) || roff + dlen > record_size)
3203                         goto dirty_vol;
3204
3205                 memmove(attr, attr2, dlen);
3206                 rec->used = cpu_to_le32(ALIGN(roff + dlen, 8));
3207
3208                 mi->dirty = true;
3209                 break;
3210
3211         case CreateAttribute:
3212                 attr2 = (struct ATTRIB *)data;
3213                 asize = le32_to_cpu(attr2->size);
3214                 used = le32_to_cpu(rec->used);
3215
3216                 if (!check_if_attr(rec, lrh) || dlen < SIZEOF_RESIDENT ||
3217                     !IS_ALIGNED(asize, 8) ||
3218                     Add2Ptr(attr2, asize) > Add2Ptr(lrh, rec_len) ||
3219                     dlen > record_size - used) {
3220                         goto dirty_vol;
3221                 }
3222
3223                 memmove(Add2Ptr(attr, asize), attr, used - roff);
3224                 memcpy(attr, attr2, asize);
3225
3226                 rec->used = cpu_to_le32(used + asize);
3227                 id = le16_to_cpu(rec->next_attr_id);
3228                 id2 = le16_to_cpu(attr2->id);
3229                 if (id <= id2)
3230                         rec->next_attr_id = cpu_to_le16(id2 + 1);
3231                 if (is_attr_indexed(attr))
3232                         le16_add_cpu(&rec->hard_links, 1);
3233
3234                 oa2 = find_loaded_attr(log, attr, rno_base);
3235                 if (oa2) {
3236                         void *p2 = kmemdup(attr, le32_to_cpu(attr->size),
3237                                            GFP_NOFS);
3238                         if (p2) {
3239                                 // run_close(oa2->run1);
3240                                 kfree(oa2->attr);
3241                                 oa2->attr = p2;
3242                         }
3243                 }
3244
3245                 mi->dirty = true;
3246                 break;
3247
3248         case DeleteAttribute:
3249                 asize = le32_to_cpu(attr->size);
3250                 used = le32_to_cpu(rec->used);
3251
3252                 if (!check_if_attr(rec, lrh))
3253                         goto dirty_vol;
3254
3255                 rec->used = cpu_to_le32(used - asize);
3256                 if (is_attr_indexed(attr))
3257                         le16_add_cpu(&rec->hard_links, -1);
3258
3259                 memmove(attr, Add2Ptr(attr, asize), used - asize - roff);
3260
3261                 mi->dirty = true;
3262                 break;
3263
3264         case UpdateResidentValue:
3265                 nsize = aoff + dlen;
3266
3267                 if (!check_if_attr(rec, lrh))
3268                         goto dirty_vol;
3269
3270                 asize = le32_to_cpu(attr->size);
3271                 used = le32_to_cpu(rec->used);
3272
3273                 if (lrh->redo_len == lrh->undo_len) {
3274                         if (nsize > asize)
3275                                 goto dirty_vol;
3276                         goto move_data;
3277                 }
3278
3279                 if (nsize > asize && nsize - asize > record_size - used)
3280                         goto dirty_vol;
3281
3282                 nsize = ALIGN(nsize, 8);
3283                 data_off = le16_to_cpu(attr->res.data_off);
3284
3285                 if (nsize < asize) {
3286                         memmove(Add2Ptr(attr, aoff), data, dlen);
3287                         data = NULL; // To skip below memmove().
3288                 }
3289
3290                 memmove(Add2Ptr(attr, nsize), Add2Ptr(attr, asize),
3291                         used - le16_to_cpu(lrh->record_off) - asize);
3292
3293                 rec->used = cpu_to_le32(used + nsize - asize);
3294                 attr->size = cpu_to_le32(nsize);
3295                 attr->res.data_size = cpu_to_le32(aoff + dlen - data_off);
3296
3297 move_data:
3298                 if (data)
3299                         memmove(Add2Ptr(attr, aoff), data, dlen);
3300
3301                 oa2 = find_loaded_attr(log, attr, rno_base);
3302                 if (oa2) {
3303                         void *p2 = kmemdup(attr, le32_to_cpu(attr->size),
3304                                            GFP_NOFS);
3305                         if (p2) {
3306                                 // run_close(&oa2->run0);
3307                                 oa2->run1 = &oa2->run0;
3308                                 kfree(oa2->attr);
3309                                 oa2->attr = p2;
3310                         }
3311                 }
3312
3313                 mi->dirty = true;
3314                 break;
3315
3316         case UpdateMappingPairs:
3317                 nsize = aoff + dlen;
3318                 asize = le32_to_cpu(attr->size);
3319                 used = le32_to_cpu(rec->used);
3320
3321                 if (!check_if_attr(rec, lrh) || !attr->non_res ||
3322                     aoff < le16_to_cpu(attr->nres.run_off) || aoff > asize ||
3323                     (nsize > asize && nsize - asize > record_size - used)) {
3324                         goto dirty_vol;
3325                 }
3326
3327                 nsize = ALIGN(nsize, 8);
3328
3329                 memmove(Add2Ptr(attr, nsize), Add2Ptr(attr, asize),
3330                         used - le16_to_cpu(lrh->record_off) - asize);
3331                 rec->used = cpu_to_le32(used + nsize - asize);
3332                 attr->size = cpu_to_le32(nsize);
3333                 memmove(Add2Ptr(attr, aoff), data, dlen);
3334
3335                 if (run_get_highest_vcn(le64_to_cpu(attr->nres.svcn),
3336                                         attr_run(attr), &t64)) {
3337                         goto dirty_vol;
3338                 }
3339
3340                 attr->nres.evcn = cpu_to_le64(t64);
3341                 oa2 = find_loaded_attr(log, attr, rno_base);
3342                 if (oa2 && oa2->attr->non_res)
3343                         oa2->attr->nres.evcn = attr->nres.evcn;
3344
3345                 mi->dirty = true;
3346                 break;
3347
3348         case SetNewAttributeSizes:
3349                 new_sz = data;
3350                 if (!check_if_attr(rec, lrh) || !attr->non_res)
3351                         goto dirty_vol;
3352
3353                 attr->nres.alloc_size = new_sz->alloc_size;
3354                 attr->nres.data_size = new_sz->data_size;
3355                 attr->nres.valid_size = new_sz->valid_size;
3356
3357                 if (dlen >= sizeof(struct NEW_ATTRIBUTE_SIZES))
3358                         attr->nres.total_size = new_sz->total_size;
3359
3360                 oa2 = find_loaded_attr(log, attr, rno_base);
3361                 if (oa2) {
3362                         void *p2 = kmemdup(attr, le32_to_cpu(attr->size),
3363                                            GFP_NOFS);
3364                         if (p2) {
3365                                 kfree(oa2->attr);
3366                                 oa2->attr = p2;
3367                         }
3368                 }
3369                 mi->dirty = true;
3370                 break;
3371
3372         case AddIndexEntryRoot:
3373                 e = (struct NTFS_DE *)data;
3374                 esize = le16_to_cpu(e->size);
3375                 root = resident_data(attr);
3376                 hdr = &root->ihdr;
3377                 used = le32_to_cpu(hdr->used);
3378
3379                 if (!check_if_index_root(rec, lrh) ||
3380                     !check_if_root_index(attr, hdr, lrh) ||
3381                     Add2Ptr(data, esize) > Add2Ptr(lrh, rec_len) ||
3382                     esize > le32_to_cpu(rec->total) - le32_to_cpu(rec->used)) {
3383                         goto dirty_vol;
3384                 }
3385
3386                 e1 = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
3387
3388                 change_attr_size(rec, attr, le32_to_cpu(attr->size) + esize);
3389
3390                 memmove(Add2Ptr(e1, esize), e1,
3391                         PtrOffset(e1, Add2Ptr(hdr, used)));
3392                 memmove(e1, e, esize);
3393
3394                 le32_add_cpu(&attr->res.data_size, esize);
3395                 hdr->used = cpu_to_le32(used + esize);
3396                 le32_add_cpu(&hdr->total, esize);
3397
3398                 mi->dirty = true;
3399                 break;
3400
3401         case DeleteIndexEntryRoot:
3402                 root = resident_data(attr);
3403                 hdr = &root->ihdr;
3404                 used = le32_to_cpu(hdr->used);
3405
3406                 if (!check_if_index_root(rec, lrh) ||
3407                     !check_if_root_index(attr, hdr, lrh)) {
3408                         goto dirty_vol;
3409                 }
3410
3411                 e1 = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
3412                 esize = le16_to_cpu(e1->size);
3413                 e2 = Add2Ptr(e1, esize);
3414
3415                 memmove(e1, e2, PtrOffset(e2, Add2Ptr(hdr, used)));
3416
3417                 le32_sub_cpu(&attr->res.data_size, esize);
3418                 hdr->used = cpu_to_le32(used - esize);
3419                 le32_sub_cpu(&hdr->total, esize);
3420
3421                 change_attr_size(rec, attr, le32_to_cpu(attr->size) - esize);
3422
3423                 mi->dirty = true;
3424                 break;
3425
3426         case SetIndexEntryVcnRoot:
3427                 root = resident_data(attr);
3428                 hdr = &root->ihdr;
3429
3430                 if (!check_if_index_root(rec, lrh) ||
3431                     !check_if_root_index(attr, hdr, lrh)) {
3432                         goto dirty_vol;
3433                 }
3434
3435                 e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
3436
3437                 de_set_vbn_le(e, *(__le64 *)data);
3438                 mi->dirty = true;
3439                 break;
3440
3441         case UpdateFileNameRoot:
3442                 root = resident_data(attr);
3443                 hdr = &root->ihdr;
3444
3445                 if (!check_if_index_root(rec, lrh) ||
3446                     !check_if_root_index(attr, hdr, lrh)) {
3447                         goto dirty_vol;
3448                 }
3449
3450                 e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
3451                 fname = (struct ATTR_FILE_NAME *)(e + 1);
3452                 memmove(&fname->dup, data, sizeof(fname->dup)); //
3453                 mi->dirty = true;
3454                 break;
3455
3456         case UpdateRecordDataRoot:
3457                 root = resident_data(attr);
3458                 hdr = &root->ihdr;
3459
3460                 if (!check_if_index_root(rec, lrh) ||
3461                     !check_if_root_index(attr, hdr, lrh)) {
3462                         goto dirty_vol;
3463                 }
3464
3465                 e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
3466
3467                 memmove(Add2Ptr(e, le16_to_cpu(e->view.data_off)), data, dlen);
3468
3469                 mi->dirty = true;
3470                 break;
3471
3472         case ZeroEndOfFileRecord:
3473                 if (roff + dlen > record_size)
3474                         goto dirty_vol;
3475
3476                 memset(attr, 0, dlen);
3477                 mi->dirty = true;
3478                 break;
3479
3480         case UpdateNonresidentValue:
3481                 if (lco < cbo + roff + dlen)
3482                         goto dirty_vol;
3483
3484                 memcpy(Add2Ptr(buffer_le, roff), data, dlen);
3485
3486                 a_dirty = true;
3487                 if (attr->type == ATTR_ALLOC)
3488                         ntfs_fix_pre_write(buffer_le, bytes);
3489                 break;
3490
3491         case AddIndexEntryAllocation:
3492                 ib = Add2Ptr(buffer_le, roff);
3493                 hdr = &ib->ihdr;
3494                 e = data;
3495                 esize = le16_to_cpu(e->size);
3496                 e1 = Add2Ptr(ib, aoff);
3497
3498                 if (is_baad(&ib->rhdr))
3499                         goto dirty_vol;
3500                 if (!check_lsn(&ib->rhdr, rlsn))
3501                         goto out;
3502
3503                 used = le32_to_cpu(hdr->used);
3504
3505                 if (!check_index_buffer(ib, bytes) ||
3506                     !check_if_alloc_index(hdr, aoff) ||
3507                     Add2Ptr(e, esize) > Add2Ptr(lrh, rec_len) ||
3508                     used + esize > le32_to_cpu(hdr->total)) {
3509                         goto dirty_vol;
3510                 }
3511
3512                 memmove(Add2Ptr(e1, esize), e1,
3513                         PtrOffset(e1, Add2Ptr(hdr, used)));
3514                 memcpy(e1, e, esize);
3515
3516                 hdr->used = cpu_to_le32(used + esize);
3517
3518                 a_dirty = true;
3519
3520                 ntfs_fix_pre_write(&ib->rhdr, bytes);
3521                 break;
3522
3523         case DeleteIndexEntryAllocation:
3524                 ib = Add2Ptr(buffer_le, roff);
3525                 hdr = &ib->ihdr;
3526                 e = Add2Ptr(ib, aoff);
3527                 esize = le16_to_cpu(e->size);
3528
3529                 if (is_baad(&ib->rhdr))
3530                         goto dirty_vol;
3531                 if (!check_lsn(&ib->rhdr, rlsn))
3532                         goto out;
3533
3534                 if (!check_index_buffer(ib, bytes) ||
3535                     !check_if_alloc_index(hdr, aoff)) {
3536                         goto dirty_vol;
3537                 }
3538
3539                 e1 = Add2Ptr(e, esize);
3540                 nsize = esize;
3541                 used = le32_to_cpu(hdr->used);
3542
3543                 memmove(e, e1, PtrOffset(e1, Add2Ptr(hdr, used)));
3544
3545                 hdr->used = cpu_to_le32(used - nsize);
3546
3547                 a_dirty = true;
3548
3549                 ntfs_fix_pre_write(&ib->rhdr, bytes);
3550                 break;
3551
3552         case WriteEndOfIndexBuffer:
3553                 ib = Add2Ptr(buffer_le, roff);
3554                 hdr = &ib->ihdr;
3555                 e = Add2Ptr(ib, aoff);
3556
3557                 if (is_baad(&ib->rhdr))
3558                         goto dirty_vol;
3559                 if (!check_lsn(&ib->rhdr, rlsn))
3560                         goto out;
3561                 if (!check_index_buffer(ib, bytes) ||
3562                     !check_if_alloc_index(hdr, aoff) ||
3563                     aoff + dlen > offsetof(struct INDEX_BUFFER, ihdr) +
3564                                           le32_to_cpu(hdr->total)) {
3565                         goto dirty_vol;
3566                 }
3567
3568                 hdr->used = cpu_to_le32(dlen + PtrOffset(hdr, e));
3569                 memmove(e, data, dlen);
3570
3571                 a_dirty = true;
3572                 ntfs_fix_pre_write(&ib->rhdr, bytes);
3573                 break;
3574
3575         case SetIndexEntryVcnAllocation:
3576                 ib = Add2Ptr(buffer_le, roff);
3577                 hdr = &ib->ihdr;
3578                 e = Add2Ptr(ib, aoff);
3579
3580                 if (is_baad(&ib->rhdr))
3581                         goto dirty_vol;
3582
3583                 if (!check_lsn(&ib->rhdr, rlsn))
3584                         goto out;
3585                 if (!check_index_buffer(ib, bytes) ||
3586                     !check_if_alloc_index(hdr, aoff)) {
3587                         goto dirty_vol;
3588                 }
3589
3590                 de_set_vbn_le(e, *(__le64 *)data);
3591
3592                 a_dirty = true;
3593                 ntfs_fix_pre_write(&ib->rhdr, bytes);
3594                 break;
3595
3596         case UpdateFileNameAllocation:
3597                 ib = Add2Ptr(buffer_le, roff);
3598                 hdr = &ib->ihdr;
3599                 e = Add2Ptr(ib, aoff);
3600
3601                 if (is_baad(&ib->rhdr))
3602                         goto dirty_vol;
3603
3604                 if (!check_lsn(&ib->rhdr, rlsn))
3605                         goto out;
3606                 if (!check_index_buffer(ib, bytes) ||
3607                     !check_if_alloc_index(hdr, aoff)) {
3608                         goto dirty_vol;
3609                 }
3610
3611                 fname = (struct ATTR_FILE_NAME *)(e + 1);
3612                 memmove(&fname->dup, data, sizeof(fname->dup));
3613
3614                 a_dirty = true;
3615                 ntfs_fix_pre_write(&ib->rhdr, bytes);
3616                 break;
3617
3618         case SetBitsInNonresidentBitMap:
3619                 off = le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off);
3620                 bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits);
3621
3622                 if (cbo + (off + 7) / 8 > lco ||
3623                     cbo + ((off + bits + 7) / 8) > lco) {
3624                         goto dirty_vol;
3625                 }
3626
3627                 ntfs_bitmap_set_le(Add2Ptr(buffer_le, roff), off, bits);
3628                 a_dirty = true;
3629                 break;
3630
3631         case ClearBitsInNonresidentBitMap:
3632                 off = le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off);
3633                 bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits);
3634
3635                 if (cbo + (off + 7) / 8 > lco ||
3636                     cbo + ((off + bits + 7) / 8) > lco) {
3637                         goto dirty_vol;
3638                 }
3639
3640                 ntfs_bitmap_clear_le(Add2Ptr(buffer_le, roff), off, bits);
3641                 a_dirty = true;
3642                 break;
3643
3644         case UpdateRecordDataAllocation:
3645                 ib = Add2Ptr(buffer_le, roff);
3646                 hdr = &ib->ihdr;
3647                 e = Add2Ptr(ib, aoff);
3648
3649                 if (is_baad(&ib->rhdr))
3650                         goto dirty_vol;
3651
3652                 if (!check_lsn(&ib->rhdr, rlsn))
3653                         goto out;
3654                 if (!check_index_buffer(ib, bytes) ||
3655                     !check_if_alloc_index(hdr, aoff)) {
3656                         goto dirty_vol;
3657                 }
3658
3659                 memmove(Add2Ptr(e, le16_to_cpu(e->view.data_off)), data, dlen);
3660
3661                 a_dirty = true;
3662                 ntfs_fix_pre_write(&ib->rhdr, bytes);
3663                 break;
3664
3665         default:
3666                 WARN_ON(1);
3667         }
3668
3669         if (rlsn) {
3670                 __le64 t64 = cpu_to_le64(*rlsn);
3671
3672                 if (rec)
3673                         rec->rhdr.lsn = t64;
3674                 if (ib)
3675                         ib->rhdr.lsn = t64;
3676         }
3677
3678         if (mi && mi->dirty) {
3679                 err = mi_write(mi, 0);
3680                 if (err)
3681                         goto out;
3682         }
3683
3684         if (a_dirty) {
3685                 attr = oa->attr;
3686                 err = ntfs_sb_write_run(sbi, oa->run1, vbo, buffer_le, bytes, 0);
3687                 if (err)
3688                         goto out;
3689         }
3690
3691 out:
3692
3693         if (inode)
3694                 iput(inode);
3695         else if (mi != mi2_child)
3696                 mi_put(mi);
3697
3698         kfree(buffer_le);
3699
3700         return err;
3701
3702 dirty_vol:
3703         log->set_dirty = true;
3704         goto out;
3705 }
3706
3707 /*
3708  * log_replay - Replays log and empties it.
3709  *
3710  * This function is called during mount operation.
3711  * It replays log and empties it.
3712  * Initialized is set false if logfile contains '-1'.
3713  */
3714 int log_replay(struct ntfs_inode *ni, bool *initialized)
3715 {
3716         int err;
3717         struct ntfs_sb_info *sbi = ni->mi.sbi;
3718         struct ntfs_log *log;
3719
3720         struct restart_info rst_info, rst_info2;
3721         u64 rec_lsn, ra_lsn, checkpt_lsn = 0, rlsn = 0;
3722         struct ATTR_NAME_ENTRY *attr_names = NULL;
3723         struct ATTR_NAME_ENTRY *ane;
3724         struct RESTART_TABLE *dptbl = NULL;
3725         struct RESTART_TABLE *trtbl = NULL;
3726         const struct RESTART_TABLE *rt;
3727         struct RESTART_TABLE *oatbl = NULL;
3728         struct inode *inode;
3729         struct OpenAttr *oa;
3730         struct ntfs_inode *ni_oe;
3731         struct ATTRIB *attr = NULL;
3732         u64 size, vcn, undo_next_lsn;
3733         CLST rno, lcn, lcn0, len0, clen;
3734         void *data;
3735         struct NTFS_RESTART *rst = NULL;
3736         struct lcb *lcb = NULL;
3737         struct OPEN_ATTR_ENRTY *oe;
3738         struct TRANSACTION_ENTRY *tr;
3739         struct DIR_PAGE_ENTRY *dp;
3740         u32 i, bytes_per_attr_entry;
3741         u32 l_size = ni->vfs_inode.i_size;
3742         u32 orig_file_size = l_size;
3743         u32 page_size, vbo, tail, off, dlen;
3744         u32 saved_len, rec_len, transact_id;
3745         bool use_second_page;
3746         struct RESTART_AREA *ra2, *ra = NULL;
3747         struct CLIENT_REC *ca, *cr;
3748         __le16 client;
3749         struct RESTART_HDR *rh;
3750         const struct LFS_RECORD_HDR *frh;
3751         const struct LOG_REC_HDR *lrh;
3752         bool is_mapped;
3753         bool is_ro = sb_rdonly(sbi->sb);
3754         u64 t64;
3755         u16 t16;
3756         u32 t32;
3757
3758         /* Get the size of page. NOTE: To replay we can use default page. */
3759 #if PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2
3760         page_size = norm_file_page(PAGE_SIZE, &l_size, true);
3761 #else
3762         page_size = norm_file_page(PAGE_SIZE, &l_size, false);
3763 #endif
3764         if (!page_size)
3765                 return -EINVAL;
3766
3767         log = kzalloc(sizeof(struct ntfs_log), GFP_NOFS);
3768         if (!log)
3769                 return -ENOMEM;
3770
3771         memset(&rst_info, 0, sizeof(struct restart_info));
3772
3773         log->ni = ni;
3774         log->l_size = l_size;
3775         log->one_page_buf = kmalloc(page_size, GFP_NOFS);
3776         if (!log->one_page_buf) {
3777                 err = -ENOMEM;
3778                 goto out;
3779         }
3780
3781         log->page_size = page_size;
3782         log->page_mask = page_size - 1;
3783         log->page_bits = blksize_bits(page_size);
3784
3785         /* Look for a restart area on the disk. */
3786         err = log_read_rst(log, l_size, true, &rst_info);
3787         if (err)
3788                 goto out;
3789
3790         /* remember 'initialized' */
3791         *initialized = rst_info.initialized;
3792
3793         if (!rst_info.restart) {
3794                 if (rst_info.initialized) {
3795                         /* No restart area but the file is not initialized. */
3796                         err = -EINVAL;
3797                         goto out;
3798                 }
3799
3800                 log_init_pg_hdr(log, page_size, page_size, 1, 1);
3801                 log_create(log, l_size, 0, get_random_u32(), false, false);
3802
3803                 log->ra = ra;
3804
3805                 ra = log_create_ra(log);
3806                 if (!ra) {
3807                         err = -ENOMEM;
3808                         goto out;
3809                 }
3810                 log->ra = ra;
3811                 log->init_ra = true;
3812
3813                 goto process_log;
3814         }
3815
3816         /*
3817          * If the restart offset above wasn't zero then we won't
3818          * look for a second restart.
3819          */
3820         if (rst_info.vbo)
3821                 goto check_restart_area;
3822
3823         memset(&rst_info2, 0, sizeof(struct restart_info));
3824         err = log_read_rst(log, l_size, false, &rst_info2);
3825         if (err)
3826                 goto out;
3827
3828         /* Determine which restart area to use. */
3829         if (!rst_info2.restart || rst_info2.last_lsn <= rst_info.last_lsn)
3830                 goto use_first_page;
3831
3832         use_second_page = true;
3833
3834         if (rst_info.chkdsk_was_run && page_size != rst_info.vbo) {
3835                 struct RECORD_PAGE_HDR *sp = NULL;
3836                 bool usa_error;
3837
3838                 if (!read_log_page(log, page_size, &sp, &usa_error) &&
3839                     sp->rhdr.sign == NTFS_CHKD_SIGNATURE) {
3840                         use_second_page = false;
3841                 }
3842                 kfree(sp);
3843         }
3844
3845         if (use_second_page) {
3846                 kfree(rst_info.r_page);
3847                 memcpy(&rst_info, &rst_info2, sizeof(struct restart_info));
3848                 rst_info2.r_page = NULL;
3849         }
3850
3851 use_first_page:
3852         kfree(rst_info2.r_page);
3853
3854 check_restart_area:
3855         /*
3856          * If the restart area is at offset 0, we want
3857          * to write the second restart area first.
3858          */
3859         log->init_ra = !!rst_info.vbo;
3860
3861         /* If we have a valid page then grab a pointer to the restart area. */
3862         ra2 = rst_info.valid_page
3863                       ? Add2Ptr(rst_info.r_page,
3864                                 le16_to_cpu(rst_info.r_page->ra_off))
3865                       : NULL;
3866
3867         if (rst_info.chkdsk_was_run ||
3868             (ra2 && ra2->client_idx[1] == LFS_NO_CLIENT_LE)) {
3869                 bool wrapped = false;
3870                 bool use_multi_page = false;
3871                 u32 open_log_count;
3872
3873                 /* Do some checks based on whether we have a valid log page. */
3874                 if (!rst_info.valid_page) {
3875                         open_log_count = get_random_u32();
3876                         goto init_log_instance;
3877                 }
3878                 open_log_count = le32_to_cpu(ra2->open_log_count);
3879
3880                 /*
3881                  * If the restart page size isn't changing then we want to
3882                  * check how much work we need to do.
3883                  */
3884                 if (page_size != le32_to_cpu(rst_info.r_page->sys_page_size))
3885                         goto init_log_instance;
3886
3887 init_log_instance:
3888                 log_init_pg_hdr(log, page_size, page_size, 1, 1);
3889
3890                 log_create(log, l_size, rst_info.last_lsn, open_log_count,
3891                            wrapped, use_multi_page);
3892
3893                 ra = log_create_ra(log);
3894                 if (!ra) {
3895                         err = -ENOMEM;
3896                         goto out;
3897                 }
3898                 log->ra = ra;
3899
3900                 /* Put the restart areas and initialize
3901                  * the log file as required.
3902                  */
3903                 goto process_log;
3904         }
3905
3906         if (!ra2) {
3907                 err = -EINVAL;
3908                 goto out;
3909         }
3910
3911         /*
3912          * If the log page or the system page sizes have changed, we can't
3913          * use the log file. We must use the system page size instead of the
3914          * default size if there is not a clean shutdown.
3915          */
3916         t32 = le32_to_cpu(rst_info.r_page->sys_page_size);
3917         if (page_size != t32) {
3918                 l_size = orig_file_size;
3919                 page_size =
3920                         norm_file_page(t32, &l_size, t32 == DefaultLogPageSize);
3921         }
3922
3923         if (page_size != t32 ||
3924             page_size != le32_to_cpu(rst_info.r_page->page_size)) {
3925                 err = -EINVAL;
3926                 goto out;
3927         }
3928
3929         /* If the file size has shrunk then we won't mount it. */
3930         if (l_size < le64_to_cpu(ra2->l_size)) {
3931                 err = -EINVAL;
3932                 goto out;
3933         }
3934
3935         log_init_pg_hdr(log, page_size, page_size,
3936                         le16_to_cpu(rst_info.r_page->major_ver),
3937                         le16_to_cpu(rst_info.r_page->minor_ver));
3938
3939         log->l_size = le64_to_cpu(ra2->l_size);
3940         log->seq_num_bits = le32_to_cpu(ra2->seq_num_bits);
3941         log->file_data_bits = sizeof(u64) * 8 - log->seq_num_bits;
3942         log->seq_num_mask = (8 << log->file_data_bits) - 1;
3943         log->last_lsn = le64_to_cpu(ra2->current_lsn);
3944         log->seq_num = log->last_lsn >> log->file_data_bits;
3945         log->ra_off = le16_to_cpu(rst_info.r_page->ra_off);
3946         log->restart_size = log->sys_page_size - log->ra_off;
3947         log->record_header_len = le16_to_cpu(ra2->rec_hdr_len);
3948         log->ra_size = le16_to_cpu(ra2->ra_len);
3949         log->data_off = le16_to_cpu(ra2->data_off);
3950         log->data_size = log->page_size - log->data_off;
3951         log->reserved = log->data_size - log->record_header_len;
3952
3953         vbo = lsn_to_vbo(log, log->last_lsn);
3954
3955         if (vbo < log->first_page) {
3956                 /* This is a pseudo lsn. */
3957                 log->l_flags |= NTFSLOG_NO_LAST_LSN;
3958                 log->next_page = log->first_page;
3959                 goto find_oldest;
3960         }
3961
3962         /* Find the end of this log record. */
3963         off = final_log_off(log, log->last_lsn,
3964                             le32_to_cpu(ra2->last_lsn_data_len));
3965
3966         /* If we wrapped the file then increment the sequence number. */
3967         if (off <= vbo) {
3968                 log->seq_num += 1;
3969                 log->l_flags |= NTFSLOG_WRAPPED;
3970         }
3971
3972         /* Now compute the next log page to use. */
3973         vbo &= ~log->sys_page_mask;
3974         tail = log->page_size - (off & log->page_mask) - 1;
3975
3976         /*
3977          *If we can fit another log record on the page,
3978          * move back a page the log file.
3979          */
3980         if (tail >= log->record_header_len) {
3981                 log->l_flags |= NTFSLOG_REUSE_TAIL;
3982                 log->next_page = vbo;
3983         } else {
3984                 log->next_page = next_page_off(log, vbo);
3985         }
3986
3987 find_oldest:
3988         /*
3989          * Find the oldest client lsn. Use the last
3990          * flushed lsn as a starting point.
3991          */
3992         log->oldest_lsn = log->last_lsn;
3993         oldest_client_lsn(Add2Ptr(ra2, le16_to_cpu(ra2->client_off)),
3994                           ra2->client_idx[1], &log->oldest_lsn);
3995         log->oldest_lsn_off = lsn_to_vbo(log, log->oldest_lsn);
3996
3997         if (log->oldest_lsn_off < log->first_page)
3998                 log->l_flags |= NTFSLOG_NO_OLDEST_LSN;
3999
4000         if (!(ra2->flags & RESTART_SINGLE_PAGE_IO))
4001                 log->l_flags |= NTFSLOG_WRAPPED | NTFSLOG_MULTIPLE_PAGE_IO;
4002
4003         log->current_openlog_count = le32_to_cpu(ra2->open_log_count);
4004         log->total_avail_pages = log->l_size - log->first_page;
4005         log->total_avail = log->total_avail_pages >> log->page_bits;
4006         log->max_current_avail = log->total_avail * log->reserved;
4007         log->total_avail = log->total_avail * log->data_size;
4008
4009         log->current_avail = current_log_avail(log);
4010
4011         ra = kzalloc(log->restart_size, GFP_NOFS);
4012         if (!ra) {
4013                 err = -ENOMEM;
4014                 goto out;
4015         }
4016         log->ra = ra;
4017
4018         t16 = le16_to_cpu(ra2->client_off);
4019         if (t16 == offsetof(struct RESTART_AREA, clients)) {
4020                 memcpy(ra, ra2, log->ra_size);
4021         } else {
4022                 memcpy(ra, ra2, offsetof(struct RESTART_AREA, clients));
4023                 memcpy(ra->clients, Add2Ptr(ra2, t16),
4024                        le16_to_cpu(ra2->ra_len) - t16);
4025
4026                 log->current_openlog_count = get_random_u32();
4027                 ra->open_log_count = cpu_to_le32(log->current_openlog_count);
4028                 log->ra_size = offsetof(struct RESTART_AREA, clients) +
4029                                sizeof(struct CLIENT_REC);
4030                 ra->client_off =
4031                         cpu_to_le16(offsetof(struct RESTART_AREA, clients));
4032                 ra->ra_len = cpu_to_le16(log->ra_size);
4033         }
4034
4035         le32_add_cpu(&ra->open_log_count, 1);
4036
4037         /* Now we need to walk through looking for the last lsn. */
4038         err = last_log_lsn(log);
4039         if (err)
4040                 goto out;
4041
4042         log->current_avail = current_log_avail(log);
4043
4044         /* Remember which restart area to write first. */
4045         log->init_ra = rst_info.vbo;
4046
4047 process_log:
4048         /* 1.0, 1.1, 2.0 log->major_ver/minor_ver - short values. */
4049         switch ((log->major_ver << 16) + log->minor_ver) {
4050         case 0x10000:
4051         case 0x10001:
4052         case 0x20000:
4053                 break;
4054         default:
4055                 ntfs_warn(sbi->sb, "\x24LogFile version %d.%d is not supported",
4056                           log->major_ver, log->minor_ver);
4057                 err = -EOPNOTSUPP;
4058                 log->set_dirty = true;
4059                 goto out;
4060         }
4061
4062         /* One client "NTFS" per logfile. */
4063         ca = Add2Ptr(ra, le16_to_cpu(ra->client_off));
4064
4065         for (client = ra->client_idx[1];; client = cr->next_client) {
4066                 if (client == LFS_NO_CLIENT_LE) {
4067                         /* Insert "NTFS" client LogFile. */
4068                         client = ra->client_idx[0];
4069                         if (client == LFS_NO_CLIENT_LE) {
4070                                 err = -EINVAL;
4071                                 goto out;
4072                         }
4073
4074                         t16 = le16_to_cpu(client);
4075                         cr = ca + t16;
4076
4077                         remove_client(ca, cr, &ra->client_idx[0]);
4078
4079                         cr->restart_lsn = 0;
4080                         cr->oldest_lsn = cpu_to_le64(log->oldest_lsn);
4081                         cr->name_bytes = cpu_to_le32(8);
4082                         cr->name[0] = cpu_to_le16('N');
4083                         cr->name[1] = cpu_to_le16('T');
4084                         cr->name[2] = cpu_to_le16('F');
4085                         cr->name[3] = cpu_to_le16('S');
4086
4087                         add_client(ca, t16, &ra->client_idx[1]);
4088                         break;
4089                 }
4090
4091                 cr = ca + le16_to_cpu(client);
4092
4093                 if (cpu_to_le32(8) == cr->name_bytes &&
4094                     cpu_to_le16('N') == cr->name[0] &&
4095                     cpu_to_le16('T') == cr->name[1] &&
4096                     cpu_to_le16('F') == cr->name[2] &&
4097                     cpu_to_le16('S') == cr->name[3])
4098                         break;
4099         }
4100
4101         /* Update the client handle with the client block information. */
4102         log->client_id.seq_num = cr->seq_num;
4103         log->client_id.client_idx = client;
4104
4105         err = read_rst_area(log, &rst, &ra_lsn);
4106         if (err)
4107                 goto out;
4108
4109         if (!rst)
4110                 goto out;
4111
4112         bytes_per_attr_entry = !rst->major_ver ? 0x2C : 0x28;
4113
4114         checkpt_lsn = le64_to_cpu(rst->check_point_start);
4115         if (!checkpt_lsn)
4116                 checkpt_lsn = ra_lsn;
4117
4118         /* Allocate and Read the Transaction Table. */
4119         if (!rst->transact_table_len)
4120                 goto check_dirty_page_table;
4121
4122         t64 = le64_to_cpu(rst->transact_table_lsn);
4123         err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
4124         if (err)
4125                 goto out;
4126
4127         lrh = lcb->log_rec;
4128         frh = lcb->lrh;
4129         rec_len = le32_to_cpu(frh->client_data_len);
4130
4131         if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
4132                            bytes_per_attr_entry)) {
4133                 err = -EINVAL;
4134                 goto out;
4135         }
4136
4137         t16 = le16_to_cpu(lrh->redo_off);
4138
4139         rt = Add2Ptr(lrh, t16);
4140         t32 = rec_len - t16;
4141
4142         /* Now check that this is a valid restart table. */
4143         if (!check_rstbl(rt, t32)) {
4144                 err = -EINVAL;
4145                 goto out;
4146         }
4147
4148         trtbl = kmemdup(rt, t32, GFP_NOFS);
4149         if (!trtbl) {
4150                 err = -ENOMEM;
4151                 goto out;
4152         }
4153
4154         lcb_put(lcb);
4155         lcb = NULL;
4156
4157 check_dirty_page_table:
4158         /* The next record back should be the Dirty Pages Table. */
4159         if (!rst->dirty_pages_len)
4160                 goto check_attribute_names;
4161
4162         t64 = le64_to_cpu(rst->dirty_pages_table_lsn);
4163         err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
4164         if (err)
4165                 goto out;
4166
4167         lrh = lcb->log_rec;
4168         frh = lcb->lrh;
4169         rec_len = le32_to_cpu(frh->client_data_len);
4170
4171         if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
4172                            bytes_per_attr_entry)) {
4173                 err = -EINVAL;
4174                 goto out;
4175         }
4176
4177         t16 = le16_to_cpu(lrh->redo_off);
4178
4179         rt = Add2Ptr(lrh, t16);
4180         t32 = rec_len - t16;
4181
4182         /* Now check that this is a valid restart table. */
4183         if (!check_rstbl(rt, t32)) {
4184                 err = -EINVAL;
4185                 goto out;
4186         }
4187
4188         dptbl = kmemdup(rt, t32, GFP_NOFS);
4189         if (!dptbl) {
4190                 err = -ENOMEM;
4191                 goto out;
4192         }
4193
4194         /* Convert Ra version '0' into version '1'. */
4195         if (rst->major_ver)
4196                 goto end_conv_1;
4197
4198         dp = NULL;
4199         while ((dp = enum_rstbl(dptbl, dp))) {
4200                 struct DIR_PAGE_ENTRY_32 *dp0 = (struct DIR_PAGE_ENTRY_32 *)dp;
4201                 // NOTE: Danger. Check for of boundary.
4202                 memmove(&dp->vcn, &dp0->vcn_low,
4203                         2 * sizeof(u64) +
4204                                 le32_to_cpu(dp->lcns_follow) * sizeof(u64));
4205         }
4206
4207 end_conv_1:
4208         lcb_put(lcb);
4209         lcb = NULL;
4210
4211         /*
4212          * Go through the table and remove the duplicates,
4213          * remembering the oldest lsn values.
4214          */
4215         if (sbi->cluster_size <= log->page_size)
4216                 goto trace_dp_table;
4217
4218         dp = NULL;
4219         while ((dp = enum_rstbl(dptbl, dp))) {
4220                 struct DIR_PAGE_ENTRY *next = dp;
4221
4222                 while ((next = enum_rstbl(dptbl, next))) {
4223                         if (next->target_attr == dp->target_attr &&
4224                             next->vcn == dp->vcn) {
4225                                 if (le64_to_cpu(next->oldest_lsn) <
4226                                     le64_to_cpu(dp->oldest_lsn)) {
4227                                         dp->oldest_lsn = next->oldest_lsn;
4228                                 }
4229
4230                                 free_rsttbl_idx(dptbl, PtrOffset(dptbl, next));
4231                         }
4232                 }
4233         }
4234 trace_dp_table:
4235 check_attribute_names:
4236         /* The next record should be the Attribute Names. */
4237         if (!rst->attr_names_len)
4238                 goto check_attr_table;
4239
4240         t64 = le64_to_cpu(rst->attr_names_lsn);
4241         err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
4242         if (err)
4243                 goto out;
4244
4245         lrh = lcb->log_rec;
4246         frh = lcb->lrh;
4247         rec_len = le32_to_cpu(frh->client_data_len);
4248
4249         if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
4250                            bytes_per_attr_entry)) {
4251                 err = -EINVAL;
4252                 goto out;
4253         }
4254
4255         t32 = lrh_length(lrh);
4256         rec_len -= t32;
4257
4258         attr_names = kmemdup(Add2Ptr(lrh, t32), rec_len, GFP_NOFS);
4259
4260         lcb_put(lcb);
4261         lcb = NULL;
4262
4263 check_attr_table:
4264         /* The next record should be the attribute Table. */
4265         if (!rst->open_attr_len)
4266                 goto check_attribute_names2;
4267
4268         t64 = le64_to_cpu(rst->open_attr_table_lsn);
4269         err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
4270         if (err)
4271                 goto out;
4272
4273         lrh = lcb->log_rec;
4274         frh = lcb->lrh;
4275         rec_len = le32_to_cpu(frh->client_data_len);
4276
4277         if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
4278                            bytes_per_attr_entry)) {
4279                 err = -EINVAL;
4280                 goto out;
4281         }
4282
4283         t16 = le16_to_cpu(lrh->redo_off);
4284
4285         rt = Add2Ptr(lrh, t16);
4286         t32 = rec_len - t16;
4287
4288         if (!check_rstbl(rt, t32)) {
4289                 err = -EINVAL;
4290                 goto out;
4291         }
4292
4293         oatbl = kmemdup(rt, t32, GFP_NOFS);
4294         if (!oatbl) {
4295                 err = -ENOMEM;
4296                 goto out;
4297         }
4298
4299         log->open_attr_tbl = oatbl;
4300
4301         /* Clear all of the Attr pointers. */
4302         oe = NULL;
4303         while ((oe = enum_rstbl(oatbl, oe))) {
4304                 if (!rst->major_ver) {
4305                         struct OPEN_ATTR_ENRTY_32 oe0;
4306
4307                         /* Really 'oe' points to OPEN_ATTR_ENRTY_32. */
4308                         memcpy(&oe0, oe, SIZEOF_OPENATTRIBUTEENTRY0);
4309
4310                         oe->bytes_per_index = oe0.bytes_per_index;
4311                         oe->type = oe0.type;
4312                         oe->is_dirty_pages = oe0.is_dirty_pages;
4313                         oe->name_len = 0;
4314                         oe->ref = oe0.ref;
4315                         oe->open_record_lsn = oe0.open_record_lsn;
4316                 }
4317
4318                 oe->is_attr_name = 0;
4319                 oe->ptr = NULL;
4320         }
4321
4322         lcb_put(lcb);
4323         lcb = NULL;
4324
4325 check_attribute_names2:
4326         if (!rst->attr_names_len)
4327                 goto trace_attribute_table;
4328
4329         ane = attr_names;
4330         if (!oatbl)
4331                 goto trace_attribute_table;
4332         while (ane->off) {
4333                 /* TODO: Clear table on exit! */
4334                 oe = Add2Ptr(oatbl, le16_to_cpu(ane->off));
4335                 t16 = le16_to_cpu(ane->name_bytes);
4336                 oe->name_len = t16 / sizeof(short);
4337                 oe->ptr = ane->name;
4338                 oe->is_attr_name = 2;
4339                 ane = Add2Ptr(ane, sizeof(struct ATTR_NAME_ENTRY) + t16);
4340         }
4341
4342 trace_attribute_table:
4343         /*
4344          * If the checkpt_lsn is zero, then this is a freshly
4345          * formatted disk and we have no work to do.
4346          */
4347         if (!checkpt_lsn) {
4348                 err = 0;
4349                 goto out;
4350         }
4351
4352         if (!oatbl) {
4353                 oatbl = init_rsttbl(bytes_per_attr_entry, 8);
4354                 if (!oatbl) {
4355                         err = -ENOMEM;
4356                         goto out;
4357                 }
4358         }
4359
4360         log->open_attr_tbl = oatbl;
4361
4362         /* Start the analysis pass from the Checkpoint lsn. */
4363         rec_lsn = checkpt_lsn;
4364
4365         /* Read the first lsn. */
4366         err = read_log_rec_lcb(log, checkpt_lsn, lcb_ctx_next, &lcb);
4367         if (err)
4368                 goto out;
4369
4370         /* Loop to read all subsequent records to the end of the log file. */
4371 next_log_record_analyze:
4372         err = read_next_log_rec(log, lcb, &rec_lsn);
4373         if (err)
4374                 goto out;
4375
4376         if (!rec_lsn)
4377                 goto end_log_records_enumerate;
4378
4379         frh = lcb->lrh;
4380         transact_id = le32_to_cpu(frh->transact_id);
4381         rec_len = le32_to_cpu(frh->client_data_len);
4382         lrh = lcb->log_rec;
4383
4384         if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
4385                 err = -EINVAL;
4386                 goto out;
4387         }
4388
4389         /*
4390          * The first lsn after the previous lsn remembered
4391          * the checkpoint is the first candidate for the rlsn.
4392          */
4393         if (!rlsn)
4394                 rlsn = rec_lsn;
4395
4396         if (LfsClientRecord != frh->record_type)
4397                 goto next_log_record_analyze;
4398
4399         /*
4400          * Now update the Transaction Table for this transaction. If there
4401          * is no entry present or it is unallocated we allocate the entry.
4402          */
4403         if (!trtbl) {
4404                 trtbl = init_rsttbl(sizeof(struct TRANSACTION_ENTRY),
4405                                     INITIAL_NUMBER_TRANSACTIONS);
4406                 if (!trtbl) {
4407                         err = -ENOMEM;
4408                         goto out;
4409                 }
4410         }
4411
4412         tr = Add2Ptr(trtbl, transact_id);
4413
4414         if (transact_id >= bytes_per_rt(trtbl) ||
4415             tr->next != RESTART_ENTRY_ALLOCATED_LE) {
4416                 tr = alloc_rsttbl_from_idx(&trtbl, transact_id);
4417                 if (!tr) {
4418                         err = -ENOMEM;
4419                         goto out;
4420                 }
4421                 tr->transact_state = TransactionActive;
4422                 tr->first_lsn = cpu_to_le64(rec_lsn);
4423         }
4424
4425         tr->prev_lsn = tr->undo_next_lsn = cpu_to_le64(rec_lsn);
4426
4427         /*
4428          * If this is a compensation log record, then change
4429          * the undo_next_lsn to be the undo_next_lsn of this record.
4430          */
4431         if (lrh->undo_op == cpu_to_le16(CompensationLogRecord))
4432                 tr->undo_next_lsn = frh->client_undo_next_lsn;
4433
4434         /* Dispatch to handle log record depending on type. */
4435         switch (le16_to_cpu(lrh->redo_op)) {
4436         case InitializeFileRecordSegment:
4437         case DeallocateFileRecordSegment:
4438         case WriteEndOfFileRecordSegment:
4439         case CreateAttribute:
4440         case DeleteAttribute:
4441         case UpdateResidentValue:
4442         case UpdateNonresidentValue:
4443         case UpdateMappingPairs:
4444         case SetNewAttributeSizes:
4445         case AddIndexEntryRoot:
4446         case DeleteIndexEntryRoot:
4447         case AddIndexEntryAllocation:
4448         case DeleteIndexEntryAllocation:
4449         case WriteEndOfIndexBuffer:
4450         case SetIndexEntryVcnRoot:
4451         case SetIndexEntryVcnAllocation:
4452         case UpdateFileNameRoot:
4453         case UpdateFileNameAllocation:
4454         case SetBitsInNonresidentBitMap:
4455         case ClearBitsInNonresidentBitMap:
4456         case UpdateRecordDataRoot:
4457         case UpdateRecordDataAllocation:
4458         case ZeroEndOfFileRecord:
4459                 t16 = le16_to_cpu(lrh->target_attr);
4460                 t64 = le64_to_cpu(lrh->target_vcn);
4461                 dp = find_dp(dptbl, t16, t64);
4462
4463                 if (dp)
4464                         goto copy_lcns;
4465
4466                 /*
4467                  * Calculate the number of clusters per page the system
4468                  * which wrote the checkpoint, possibly creating the table.
4469                  */
4470                 if (dptbl) {
4471                         t32 = (le16_to_cpu(dptbl->size) -
4472                                sizeof(struct DIR_PAGE_ENTRY)) /
4473                               sizeof(u64);
4474                 } else {
4475                         t32 = log->clst_per_page;
4476                         kfree(dptbl);
4477                         dptbl = init_rsttbl(struct_size(dp, page_lcns, t32),
4478                                             32);
4479                         if (!dptbl) {
4480                                 err = -ENOMEM;
4481                                 goto out;
4482                         }
4483                 }
4484
4485                 dp = alloc_rsttbl_idx(&dptbl);
4486                 if (!dp) {
4487                         err = -ENOMEM;
4488                         goto out;
4489                 }
4490                 dp->target_attr = cpu_to_le32(t16);
4491                 dp->transfer_len = cpu_to_le32(t32 << sbi->cluster_bits);
4492                 dp->lcns_follow = cpu_to_le32(t32);
4493                 dp->vcn = cpu_to_le64(t64 & ~((u64)t32 - 1));
4494                 dp->oldest_lsn = cpu_to_le64(rec_lsn);
4495
4496 copy_lcns:
4497                 /*
4498                  * Copy the Lcns from the log record into the Dirty Page Entry.
4499                  * TODO: For different page size support, must somehow make
4500                  * whole routine a loop, case Lcns do not fit below.
4501                  */
4502                 t16 = le16_to_cpu(lrh->lcns_follow);
4503                 for (i = 0; i < t16; i++) {
4504                         size_t j = (size_t)(le64_to_cpu(lrh->target_vcn) -
4505                                             le64_to_cpu(dp->vcn));
4506                         dp->page_lcns[j + i] = lrh->page_lcns[i];
4507                 }
4508
4509                 goto next_log_record_analyze;
4510
4511         case DeleteDirtyClusters: {
4512                 u32 range_count =
4513                         le16_to_cpu(lrh->redo_len) / sizeof(struct LCN_RANGE);
4514                 const struct LCN_RANGE *r =
4515                         Add2Ptr(lrh, le16_to_cpu(lrh->redo_off));
4516
4517                 /* Loop through all of the Lcn ranges this log record. */
4518                 for (i = 0; i < range_count; i++, r++) {
4519                         u64 lcn0 = le64_to_cpu(r->lcn);
4520                         u64 lcn_e = lcn0 + le64_to_cpu(r->len) - 1;
4521
4522                         dp = NULL;
4523                         while ((dp = enum_rstbl(dptbl, dp))) {
4524                                 u32 j;
4525
4526                                 t32 = le32_to_cpu(dp->lcns_follow);
4527                                 for (j = 0; j < t32; j++) {
4528                                         t64 = le64_to_cpu(dp->page_lcns[j]);
4529                                         if (t64 >= lcn0 && t64 <= lcn_e)
4530                                                 dp->page_lcns[j] = 0;
4531                                 }
4532                         }
4533                 }
4534                 goto next_log_record_analyze;
4535                 ;
4536         }
4537
4538         case OpenNonresidentAttribute:
4539                 t16 = le16_to_cpu(lrh->target_attr);
4540                 if (t16 >= bytes_per_rt(oatbl)) {
4541                         /*
4542                          * Compute how big the table needs to be.
4543                          * Add 10 extra entries for some cushion.
4544                          */
4545                         u32 new_e = t16 / le16_to_cpu(oatbl->size);
4546
4547                         new_e += 10 - le16_to_cpu(oatbl->used);
4548
4549                         oatbl = extend_rsttbl(oatbl, new_e, ~0u);
4550                         log->open_attr_tbl = oatbl;
4551                         if (!oatbl) {
4552                                 err = -ENOMEM;
4553                                 goto out;
4554                         }
4555                 }
4556
4557                 /* Point to the entry being opened. */
4558                 oe = alloc_rsttbl_from_idx(&oatbl, t16);
4559                 log->open_attr_tbl = oatbl;
4560                 if (!oe) {
4561                         err = -ENOMEM;
4562                         goto out;
4563                 }
4564
4565                 /* Initialize this entry from the log record. */
4566                 t16 = le16_to_cpu(lrh->redo_off);
4567                 if (!rst->major_ver) {
4568                         /* Convert version '0' into version '1'. */
4569                         struct OPEN_ATTR_ENRTY_32 *oe0 = Add2Ptr(lrh, t16);
4570
4571                         oe->bytes_per_index = oe0->bytes_per_index;
4572                         oe->type = oe0->type;
4573                         oe->is_dirty_pages = oe0->is_dirty_pages;
4574                         oe->name_len = 0; //oe0.name_len;
4575                         oe->ref = oe0->ref;
4576                         oe->open_record_lsn = oe0->open_record_lsn;
4577                 } else {
4578                         memcpy(oe, Add2Ptr(lrh, t16), bytes_per_attr_entry);
4579                 }
4580
4581                 t16 = le16_to_cpu(lrh->undo_len);
4582                 if (t16) {
4583                         oe->ptr = kmalloc(t16, GFP_NOFS);
4584                         if (!oe->ptr) {
4585                                 err = -ENOMEM;
4586                                 goto out;
4587                         }
4588                         oe->name_len = t16 / sizeof(short);
4589                         memcpy(oe->ptr,
4590                                Add2Ptr(lrh, le16_to_cpu(lrh->undo_off)), t16);
4591                         oe->is_attr_name = 1;
4592                 } else {
4593                         oe->ptr = NULL;
4594                         oe->is_attr_name = 0;
4595                 }
4596
4597                 goto next_log_record_analyze;
4598
4599         case HotFix:
4600                 t16 = le16_to_cpu(lrh->target_attr);
4601                 t64 = le64_to_cpu(lrh->target_vcn);
4602                 dp = find_dp(dptbl, t16, t64);
4603                 if (dp) {
4604                         size_t j = le64_to_cpu(lrh->target_vcn) -
4605                                    le64_to_cpu(dp->vcn);
4606                         if (dp->page_lcns[j])
4607                                 dp->page_lcns[j] = lrh->page_lcns[0];
4608                 }
4609                 goto next_log_record_analyze;
4610
4611         case EndTopLevelAction:
4612                 tr = Add2Ptr(trtbl, transact_id);
4613                 tr->prev_lsn = cpu_to_le64(rec_lsn);
4614                 tr->undo_next_lsn = frh->client_undo_next_lsn;
4615                 goto next_log_record_analyze;
4616
4617         case PrepareTransaction:
4618                 tr = Add2Ptr(trtbl, transact_id);
4619                 tr->transact_state = TransactionPrepared;
4620                 goto next_log_record_analyze;
4621
4622         case CommitTransaction:
4623                 tr = Add2Ptr(trtbl, transact_id);
4624                 tr->transact_state = TransactionCommitted;
4625                 goto next_log_record_analyze;
4626
4627         case ForgetTransaction:
4628                 free_rsttbl_idx(trtbl, transact_id);
4629                 goto next_log_record_analyze;
4630
4631         case Noop:
4632         case OpenAttributeTableDump:
4633         case AttributeNamesDump:
4634         case DirtyPageTableDump:
4635         case TransactionTableDump:
4636                 /* The following cases require no action the Analysis Pass. */
4637                 goto next_log_record_analyze;
4638
4639         default:
4640                 /*
4641                  * All codes will be explicitly handled.
4642                  * If we see a code we do not expect, then we are trouble.
4643                  */
4644                 goto next_log_record_analyze;
4645         }
4646
4647 end_log_records_enumerate:
4648         lcb_put(lcb);
4649         lcb = NULL;
4650
4651         /*
4652          * Scan the Dirty Page Table and Transaction Table for
4653          * the lowest lsn, and return it as the Redo lsn.
4654          */
4655         dp = NULL;
4656         while ((dp = enum_rstbl(dptbl, dp))) {
4657                 t64 = le64_to_cpu(dp->oldest_lsn);
4658                 if (t64 && t64 < rlsn)
4659                         rlsn = t64;
4660         }
4661
4662         tr = NULL;
4663         while ((tr = enum_rstbl(trtbl, tr))) {
4664                 t64 = le64_to_cpu(tr->first_lsn);
4665                 if (t64 && t64 < rlsn)
4666                         rlsn = t64;
4667         }
4668
4669         /*
4670          * Only proceed if the Dirty Page Table or Transaction
4671          * table are not empty.
4672          */
4673         if ((!dptbl || !dptbl->total) && (!trtbl || !trtbl->total))
4674                 goto end_reply;
4675
4676         sbi->flags |= NTFS_FLAGS_NEED_REPLAY;
4677         if (is_ro)
4678                 goto out;
4679
4680         /* Reopen all of the attributes with dirty pages. */
4681         oe = NULL;
4682 next_open_attribute:
4683
4684         oe = enum_rstbl(oatbl, oe);
4685         if (!oe) {
4686                 err = 0;
4687                 dp = NULL;
4688                 goto next_dirty_page;
4689         }
4690
4691         oa = kzalloc(sizeof(struct OpenAttr), GFP_NOFS);
4692         if (!oa) {
4693                 err = -ENOMEM;
4694                 goto out;
4695         }
4696
4697         inode = ntfs_iget5(sbi->sb, &oe->ref, NULL);
4698         if (IS_ERR(inode))
4699                 goto fake_attr;
4700
4701         if (is_bad_inode(inode)) {
4702                 iput(inode);
4703 fake_attr:
4704                 if (oa->ni) {
4705                         iput(&oa->ni->vfs_inode);
4706                         oa->ni = NULL;
4707                 }
4708
4709                 attr = attr_create_nonres_log(sbi, oe->type, 0, oe->ptr,
4710                                               oe->name_len, 0);
4711                 if (!attr) {
4712                         kfree(oa);
4713                         err = -ENOMEM;
4714                         goto out;
4715                 }
4716                 oa->attr = attr;
4717                 oa->run1 = &oa->run0;
4718                 goto final_oe;
4719         }
4720
4721         ni_oe = ntfs_i(inode);
4722         oa->ni = ni_oe;
4723
4724         attr = ni_find_attr(ni_oe, NULL, NULL, oe->type, oe->ptr, oe->name_len,
4725                             NULL, NULL);
4726
4727         if (!attr)
4728                 goto fake_attr;
4729
4730         t32 = le32_to_cpu(attr->size);
4731         oa->attr = kmemdup(attr, t32, GFP_NOFS);
4732         if (!oa->attr)
4733                 goto fake_attr;
4734
4735         if (!S_ISDIR(inode->i_mode)) {
4736                 if (attr->type == ATTR_DATA && !attr->name_len) {
4737                         oa->run1 = &ni_oe->file.run;
4738                         goto final_oe;
4739                 }
4740         } else {
4741                 if (attr->type == ATTR_ALLOC &&
4742                     attr->name_len == ARRAY_SIZE(I30_NAME) &&
4743                     !memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME))) {
4744                         oa->run1 = &ni_oe->dir.alloc_run;
4745                         goto final_oe;
4746                 }
4747         }
4748
4749         if (attr->non_res) {
4750                 u16 roff = le16_to_cpu(attr->nres.run_off);
4751                 CLST svcn = le64_to_cpu(attr->nres.svcn);
4752
4753                 if (roff > t32) {
4754                         kfree(oa->attr);
4755                         oa->attr = NULL;
4756                         goto fake_attr;
4757                 }
4758
4759                 err = run_unpack(&oa->run0, sbi, inode->i_ino, svcn,
4760                                  le64_to_cpu(attr->nres.evcn), svcn,
4761                                  Add2Ptr(attr, roff), t32 - roff);
4762                 if (err < 0) {
4763                         kfree(oa->attr);
4764                         oa->attr = NULL;
4765                         goto fake_attr;
4766                 }
4767                 err = 0;
4768         }
4769         oa->run1 = &oa->run0;
4770         attr = oa->attr;
4771
4772 final_oe:
4773         if (oe->is_attr_name == 1)
4774                 kfree(oe->ptr);
4775         oe->is_attr_name = 0;
4776         oe->ptr = oa;
4777         oe->name_len = attr->name_len;
4778
4779         goto next_open_attribute;
4780
4781         /*
4782          * Now loop through the dirty page table to extract all of the Vcn/Lcn.
4783          * Mapping that we have, and insert it into the appropriate run.
4784          */
4785 next_dirty_page:
4786         dp = enum_rstbl(dptbl, dp);
4787         if (!dp)
4788                 goto do_redo_1;
4789
4790         oe = Add2Ptr(oatbl, le32_to_cpu(dp->target_attr));
4791
4792         if (oe->next != RESTART_ENTRY_ALLOCATED_LE)
4793                 goto next_dirty_page;
4794
4795         oa = oe->ptr;
4796         if (!oa)
4797                 goto next_dirty_page;
4798
4799         i = -1;
4800 next_dirty_page_vcn:
4801         i += 1;
4802         if (i >= le32_to_cpu(dp->lcns_follow))
4803                 goto next_dirty_page;
4804
4805         vcn = le64_to_cpu(dp->vcn) + i;
4806         size = (vcn + 1) << sbi->cluster_bits;
4807
4808         if (!dp->page_lcns[i])
4809                 goto next_dirty_page_vcn;
4810
4811         rno = ino_get(&oe->ref);
4812         if (rno <= MFT_REC_MIRR &&
4813             size < (MFT_REC_VOL + 1) * sbi->record_size &&
4814             oe->type == ATTR_DATA) {
4815                 goto next_dirty_page_vcn;
4816         }
4817
4818         lcn = le64_to_cpu(dp->page_lcns[i]);
4819
4820         if ((!run_lookup_entry(oa->run1, vcn, &lcn0, &len0, NULL) ||
4821              lcn0 != lcn) &&
4822             !run_add_entry(oa->run1, vcn, lcn, 1, false)) {
4823                 err = -ENOMEM;
4824                 goto out;
4825         }
4826         attr = oa->attr;
4827         if (size > le64_to_cpu(attr->nres.alloc_size)) {
4828                 attr->nres.valid_size = attr->nres.data_size =
4829                         attr->nres.alloc_size = cpu_to_le64(size);
4830         }
4831         goto next_dirty_page_vcn;
4832
4833 do_redo_1:
4834         /*
4835          * Perform the Redo Pass, to restore all of the dirty pages to the same
4836          * contents that they had immediately before the crash. If the dirty
4837          * page table is empty, then we can skip the entire Redo Pass.
4838          */
4839         if (!dptbl || !dptbl->total)
4840                 goto do_undo_action;
4841
4842         rec_lsn = rlsn;
4843
4844         /*
4845          * Read the record at the Redo lsn, before falling
4846          * into common code to handle each record.
4847          */
4848         err = read_log_rec_lcb(log, rlsn, lcb_ctx_next, &lcb);
4849         if (err)
4850                 goto out;
4851
4852         /*
4853          * Now loop to read all of our log records forwards, until
4854          * we hit the end of the file, cleaning up at the end.
4855          */
4856 do_action_next:
4857         frh = lcb->lrh;
4858
4859         if (LfsClientRecord != frh->record_type)
4860                 goto read_next_log_do_action;
4861
4862         transact_id = le32_to_cpu(frh->transact_id);
4863         rec_len = le32_to_cpu(frh->client_data_len);
4864         lrh = lcb->log_rec;
4865
4866         if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
4867                 err = -EINVAL;
4868                 goto out;
4869         }
4870
4871         /* Ignore log records that do not update pages. */
4872         if (lrh->lcns_follow)
4873                 goto find_dirty_page;
4874
4875         goto read_next_log_do_action;
4876
4877 find_dirty_page:
4878         t16 = le16_to_cpu(lrh->target_attr);
4879         t64 = le64_to_cpu(lrh->target_vcn);
4880         dp = find_dp(dptbl, t16, t64);
4881
4882         if (!dp)
4883                 goto read_next_log_do_action;
4884
4885         if (rec_lsn < le64_to_cpu(dp->oldest_lsn))
4886                 goto read_next_log_do_action;
4887
4888         t16 = le16_to_cpu(lrh->target_attr);
4889         if (t16 >= bytes_per_rt(oatbl)) {
4890                 err = -EINVAL;
4891                 goto out;
4892         }
4893
4894         oe = Add2Ptr(oatbl, t16);
4895
4896         if (oe->next != RESTART_ENTRY_ALLOCATED_LE) {
4897                 err = -EINVAL;
4898                 goto out;
4899         }
4900
4901         oa = oe->ptr;
4902
4903         if (!oa) {
4904                 err = -EINVAL;
4905                 goto out;
4906         }
4907         attr = oa->attr;
4908
4909         vcn = le64_to_cpu(lrh->target_vcn);
4910
4911         if (!run_lookup_entry(oa->run1, vcn, &lcn, NULL, NULL) ||
4912             lcn == SPARSE_LCN) {
4913                 goto read_next_log_do_action;
4914         }
4915
4916         /* Point to the Redo data and get its length. */
4917         data = Add2Ptr(lrh, le16_to_cpu(lrh->redo_off));
4918         dlen = le16_to_cpu(lrh->redo_len);
4919
4920         /* Shorten length by any Lcns which were deleted. */
4921         saved_len = dlen;
4922
4923         for (i = le16_to_cpu(lrh->lcns_follow); i; i--) {
4924                 size_t j;
4925                 u32 alen, voff;
4926
4927                 voff = le16_to_cpu(lrh->record_off) +
4928                        le16_to_cpu(lrh->attr_off);
4929                 voff += le16_to_cpu(lrh->cluster_off) << SECTOR_SHIFT;
4930
4931                 /* If the Vcn question is allocated, we can just get out. */
4932                 j = le64_to_cpu(lrh->target_vcn) - le64_to_cpu(dp->vcn);
4933                 if (dp->page_lcns[j + i - 1])
4934                         break;
4935
4936                 if (!saved_len)
4937                         saved_len = 1;
4938
4939                 /*
4940                  * Calculate the allocated space left relative to the
4941                  * log record Vcn, after removing this unallocated Vcn.
4942                  */
4943                 alen = (i - 1) << sbi->cluster_bits;
4944
4945                 /*
4946                  * If the update described this log record goes beyond
4947                  * the allocated space, then we will have to reduce the length.
4948                  */
4949                 if (voff >= alen)
4950                         dlen = 0;
4951                 else if (voff + dlen > alen)
4952                         dlen = alen - voff;
4953         }
4954
4955         /*
4956          * If the resulting dlen from above is now zero,
4957          * we can skip this log record.
4958          */
4959         if (!dlen && saved_len)
4960                 goto read_next_log_do_action;
4961
4962         t16 = le16_to_cpu(lrh->redo_op);
4963         if (can_skip_action(t16))
4964                 goto read_next_log_do_action;
4965
4966         /* Apply the Redo operation a common routine. */
4967         err = do_action(log, oe, lrh, t16, data, dlen, rec_len, &rec_lsn);
4968         if (err)
4969                 goto out;
4970
4971         /* Keep reading and looping back until end of file. */
4972 read_next_log_do_action:
4973         err = read_next_log_rec(log, lcb, &rec_lsn);
4974         if (!err && rec_lsn)
4975                 goto do_action_next;
4976
4977         lcb_put(lcb);
4978         lcb = NULL;
4979
4980 do_undo_action:
4981         /* Scan Transaction Table. */
4982         tr = NULL;
4983 transaction_table_next:
4984         tr = enum_rstbl(trtbl, tr);
4985         if (!tr)
4986                 goto undo_action_done;
4987
4988         if (TransactionActive != tr->transact_state || !tr->undo_next_lsn) {
4989                 free_rsttbl_idx(trtbl, PtrOffset(trtbl, tr));
4990                 goto transaction_table_next;
4991         }
4992
4993         log->transaction_id = PtrOffset(trtbl, tr);
4994         undo_next_lsn = le64_to_cpu(tr->undo_next_lsn);
4995
4996         /*
4997          * We only have to do anything if the transaction has
4998          * something its undo_next_lsn field.
4999          */
5000         if (!undo_next_lsn)
5001                 goto commit_undo;
5002
5003         /* Read the first record to be undone by this transaction. */
5004         err = read_log_rec_lcb(log, undo_next_lsn, lcb_ctx_undo_next, &lcb);
5005         if (err)
5006                 goto out;
5007
5008         /*
5009          * Now loop to read all of our log records forwards,
5010          * until we hit the end of the file, cleaning up at the end.
5011          */
5012 undo_action_next:
5013
5014         lrh = lcb->log_rec;
5015         frh = lcb->lrh;
5016         transact_id = le32_to_cpu(frh->transact_id);
5017         rec_len = le32_to_cpu(frh->client_data_len);
5018
5019         if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
5020                 err = -EINVAL;
5021                 goto out;
5022         }
5023
5024         if (lrh->undo_op == cpu_to_le16(Noop))
5025                 goto read_next_log_undo_action;
5026
5027         oe = Add2Ptr(oatbl, le16_to_cpu(lrh->target_attr));
5028         oa = oe->ptr;
5029
5030         t16 = le16_to_cpu(lrh->lcns_follow);
5031         if (!t16)
5032                 goto add_allocated_vcns;
5033
5034         is_mapped = run_lookup_entry(oa->run1, le64_to_cpu(lrh->target_vcn),
5035                                      &lcn, &clen, NULL);
5036
5037         /*
5038          * If the mapping isn't already the table or the  mapping
5039          * corresponds to a hole the mapping, we need to make sure
5040          * there is no partial page already memory.
5041          */
5042         if (is_mapped && lcn != SPARSE_LCN && clen >= t16)
5043                 goto add_allocated_vcns;
5044
5045         vcn = le64_to_cpu(lrh->target_vcn);
5046         vcn &= ~(u64)(log->clst_per_page - 1);
5047
5048 add_allocated_vcns:
5049         for (i = 0, vcn = le64_to_cpu(lrh->target_vcn),
5050             size = (vcn + 1) << sbi->cluster_bits;
5051              i < t16; i++, vcn += 1, size += sbi->cluster_size) {
5052                 attr = oa->attr;
5053                 if (!attr->non_res) {
5054                         if (size > le32_to_cpu(attr->res.data_size))
5055                                 attr->res.data_size = cpu_to_le32(size);
5056                 } else {
5057                         if (size > le64_to_cpu(attr->nres.data_size))
5058                                 attr->nres.valid_size = attr->nres.data_size =
5059                                         attr->nres.alloc_size =
5060                                                 cpu_to_le64(size);
5061                 }
5062         }
5063
5064         t16 = le16_to_cpu(lrh->undo_op);
5065         if (can_skip_action(t16))
5066                 goto read_next_log_undo_action;
5067
5068         /* Point to the Redo data and get its length. */
5069         data = Add2Ptr(lrh, le16_to_cpu(lrh->undo_off));
5070         dlen = le16_to_cpu(lrh->undo_len);
5071
5072         /* It is time to apply the undo action. */
5073         err = do_action(log, oe, lrh, t16, data, dlen, rec_len, NULL);
5074
5075 read_next_log_undo_action:
5076         /*
5077          * Keep reading and looping back until we have read the
5078          * last record for this transaction.
5079          */
5080         err = read_next_log_rec(log, lcb, &rec_lsn);
5081         if (err)
5082                 goto out;
5083
5084         if (rec_lsn)
5085                 goto undo_action_next;
5086
5087         lcb_put(lcb);
5088         lcb = NULL;
5089
5090 commit_undo:
5091         free_rsttbl_idx(trtbl, log->transaction_id);
5092
5093         log->transaction_id = 0;
5094
5095         goto transaction_table_next;
5096
5097 undo_action_done:
5098
5099         ntfs_update_mftmirr(sbi, 0);
5100
5101         sbi->flags &= ~NTFS_FLAGS_NEED_REPLAY;
5102
5103 end_reply:
5104
5105         err = 0;
5106         if (is_ro)
5107                 goto out;
5108
5109         rh = kzalloc(log->page_size, GFP_NOFS);
5110         if (!rh) {
5111                 err = -ENOMEM;
5112                 goto out;
5113         }
5114
5115         rh->rhdr.sign = NTFS_RSTR_SIGNATURE;
5116         rh->rhdr.fix_off = cpu_to_le16(offsetof(struct RESTART_HDR, fixups));
5117         t16 = (log->page_size >> SECTOR_SHIFT) + 1;
5118         rh->rhdr.fix_num = cpu_to_le16(t16);
5119         rh->sys_page_size = cpu_to_le32(log->page_size);
5120         rh->page_size = cpu_to_le32(log->page_size);
5121
5122         t16 = ALIGN(offsetof(struct RESTART_HDR, fixups) + sizeof(short) * t16,
5123                     8);
5124         rh->ra_off = cpu_to_le16(t16);
5125         rh->minor_ver = cpu_to_le16(1); // 0x1A:
5126         rh->major_ver = cpu_to_le16(1); // 0x1C:
5127
5128         ra2 = Add2Ptr(rh, t16);
5129         memcpy(ra2, ra, sizeof(struct RESTART_AREA));
5130
5131         ra2->client_idx[0] = 0;
5132         ra2->client_idx[1] = LFS_NO_CLIENT_LE;
5133         ra2->flags = cpu_to_le16(2);
5134
5135         le32_add_cpu(&ra2->open_log_count, 1);
5136
5137         ntfs_fix_pre_write(&rh->rhdr, log->page_size);
5138
5139         err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rh, log->page_size, 0);
5140         if (!err)
5141                 err = ntfs_sb_write_run(sbi, &log->ni->file.run, log->page_size,
5142                                         rh, log->page_size, 0);
5143
5144         kfree(rh);
5145         if (err)
5146                 goto out;
5147
5148 out:
5149         kfree(rst);
5150         if (lcb)
5151                 lcb_put(lcb);
5152
5153         /*
5154          * Scan the Open Attribute Table to close all of
5155          * the open attributes.
5156          */
5157         oe = NULL;
5158         while ((oe = enum_rstbl(oatbl, oe))) {
5159                 rno = ino_get(&oe->ref);
5160
5161                 if (oe->is_attr_name == 1) {
5162                         kfree(oe->ptr);
5163                         oe->ptr = NULL;
5164                         continue;
5165                 }
5166
5167                 if (oe->is_attr_name)
5168                         continue;
5169
5170                 oa = oe->ptr;
5171                 if (!oa)
5172                         continue;
5173
5174                 run_close(&oa->run0);
5175                 kfree(oa->attr);
5176                 if (oa->ni)
5177                         iput(&oa->ni->vfs_inode);
5178                 kfree(oa);
5179         }
5180
5181         kfree(trtbl);
5182         kfree(oatbl);
5183         kfree(dptbl);
5184         kfree(attr_names);
5185         kfree(rst_info.r_page);
5186
5187         kfree(ra);
5188         kfree(log->one_page_buf);
5189
5190         if (err)
5191                 sbi->flags |= NTFS_FLAGS_NEED_REPLAY;
5192
5193         if (err == -EROFS)
5194                 err = 0;
5195         else if (log->set_dirty)
5196                 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
5197
5198         kfree(log);
5199
5200         return err;
5201 }