Merge tag 'xfs-5.20-merge-6' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[linux-block.git] / fs / ntfs3 / fslog.c
CommitLineData
b46acd6a
KK
1// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
5 *
6 */
7
8#include <linux/blkdev.h>
b46acd6a 9#include <linux/fs.h>
b46acd6a 10#include <linux/random.h>
b46acd6a
KK
11#include <linux/slab.h>
12
13#include "debug.h"
14#include "ntfs.h"
15#include "ntfs_fs.h"
16
17/*
18 * LOG FILE structs
19 */
20
21// clang-format off
22
23#define MaxLogFileSize 0x100000000ull
24#define DefaultLogPageSize 4096
25#define MinLogRecordPages 0x30
26
27struct RESTART_HDR {
28 struct NTFS_RECORD_HEADER rhdr; // 'RSTR'
e8b8e97f
KA
29 __le32 sys_page_size; // 0x10: Page size of the system which initialized the log.
30 __le32 page_size; // 0x14: Log page size used for this log file.
b46acd6a
KK
31 __le16 ra_off; // 0x18:
32 __le16 minor_ver; // 0x1A:
33 __le16 major_ver; // 0x1C:
34 __le16 fixups[];
35};
36
37#define LFS_NO_CLIENT 0xffff
38#define LFS_NO_CLIENT_LE cpu_to_le16(0xffff)
39
40struct CLIENT_REC {
41 __le64 oldest_lsn;
42 __le64 restart_lsn; // 0x08:
43 __le16 prev_client; // 0x10:
44 __le16 next_client; // 0x12:
45 __le16 seq_num; // 0x14:
e8b8e97f
KA
46 u8 align[6]; // 0x16:
47 __le32 name_bytes; // 0x1C: In bytes.
48 __le16 name[32]; // 0x20: Name of client.
b46acd6a
KK
49};
50
51static_assert(sizeof(struct CLIENT_REC) == 0x60);
52
53/* Two copies of these will exist at the beginning of the log file */
54struct RESTART_AREA {
e8b8e97f
KA
55 __le64 current_lsn; // 0x00: Current logical end of log file.
56 __le16 log_clients; // 0x08: Maximum number of clients.
57 __le16 client_idx[2]; // 0x0A: Free/use index into the client record arrays.
58 __le16 flags; // 0x0E: See RESTART_SINGLE_PAGE_IO.
59 __le32 seq_num_bits; // 0x10: The number of bits in sequence number.
b46acd6a
KK
60 __le16 ra_len; // 0x14:
61 __le16 client_off; // 0x16:
62 __le64 l_size; // 0x18: Usable log file size.
63 __le32 last_lsn_data_len; // 0x20:
e8b8e97f
KA
64 __le16 rec_hdr_len; // 0x24: Log page data offset.
65 __le16 data_off; // 0x26: Log page data length.
b46acd6a
KK
66 __le32 open_log_count; // 0x28:
67 __le32 align[5]; // 0x2C:
68 struct CLIENT_REC clients[]; // 0x40:
69};
70
71struct LOG_REC_HDR {
72 __le16 redo_op; // 0x00: NTFS_LOG_OPERATION
73 __le16 undo_op; // 0x02: NTFS_LOG_OPERATION
e8b8e97f
KA
74 __le16 redo_off; // 0x04: Offset to Redo record.
75 __le16 redo_len; // 0x06: Redo length.
76 __le16 undo_off; // 0x08: Offset to Undo record.
77 __le16 undo_len; // 0x0A: Undo length.
b46acd6a
KK
78 __le16 target_attr; // 0x0C:
79 __le16 lcns_follow; // 0x0E:
80 __le16 record_off; // 0x10:
81 __le16 attr_off; // 0x12:
82 __le16 cluster_off; // 0x14:
83 __le16 reserved; // 0x16:
84 __le64 target_vcn; // 0x18:
85 __le64 page_lcns[]; // 0x20:
86};
87
88static_assert(sizeof(struct LOG_REC_HDR) == 0x20);
89
90#define RESTART_ENTRY_ALLOCATED 0xFFFFFFFF
91#define RESTART_ENTRY_ALLOCATED_LE cpu_to_le32(0xFFFFFFFF)
92
93struct RESTART_TABLE {
e8b8e97f
KA
94 __le16 size; // 0x00: In bytes
95 __le16 used; // 0x02: Entries
96 __le16 total; // 0x04: Entries
b46acd6a
KK
97 __le16 res[3]; // 0x06:
98 __le32 free_goal; // 0x0C:
e8b8e97f
KA
99 __le32 first_free; // 0x10:
100 __le32 last_free; // 0x14:
b46acd6a
KK
101
102};
103
104static_assert(sizeof(struct RESTART_TABLE) == 0x18);
105
106struct ATTR_NAME_ENTRY {
e8b8e97f 107 __le16 off; // Offset in the Open attribute Table.
b46acd6a
KK
108 __le16 name_bytes;
109 __le16 name[];
110};
111
112struct OPEN_ATTR_ENRTY {
113 __le32 next; // 0x00: RESTART_ENTRY_ALLOCATED if allocated
114 __le32 bytes_per_index; // 0x04:
115 enum ATTR_TYPE type; // 0x08:
116 u8 is_dirty_pages; // 0x0C:
117 u8 is_attr_name; // 0x0B: Faked field to manage 'ptr'
118 u8 name_len; // 0x0C: Faked field to manage 'ptr'
119 u8 res;
e8b8e97f 120 struct MFT_REF ref; // 0x10: File Reference of file containing attribute
b46acd6a
KK
121 __le64 open_record_lsn; // 0x18:
122 void *ptr; // 0x20:
123};
124
125/* 32 bit version of 'struct OPEN_ATTR_ENRTY' */
126struct OPEN_ATTR_ENRTY_32 {
127 __le32 next; // 0x00: RESTART_ENTRY_ALLOCATED if allocated
128 __le32 ptr; // 0x04:
129 struct MFT_REF ref; // 0x08:
130 __le64 open_record_lsn; // 0x10:
131 u8 is_dirty_pages; // 0x18:
e8b8e97f 132 u8 is_attr_name; // 0x19:
b46acd6a
KK
133 u8 res1[2];
134 enum ATTR_TYPE type; // 0x1C:
e8b8e97f 135 u8 name_len; // 0x20: In wchar
b46acd6a
KK
136 u8 res2[3];
137 __le32 AttributeName; // 0x24:
138 __le32 bytes_per_index; // 0x28:
139};
140
141#define SIZEOF_OPENATTRIBUTEENTRY0 0x2c
142// static_assert( 0x2C == sizeof(struct OPEN_ATTR_ENRTY_32) );
143static_assert(sizeof(struct OPEN_ATTR_ENRTY) < SIZEOF_OPENATTRIBUTEENTRY0);
144
145/*
e8b8e97f
KA
146 * One entry exists in the Dirty Pages Table for each page which is dirty at
147 * the time the Restart Area is written.
b46acd6a
KK
148 */
149struct DIR_PAGE_ENTRY {
e8b8e97f
KA
150 __le32 next; // 0x00: RESTART_ENTRY_ALLOCATED if allocated
151 __le32 target_attr; // 0x04: Index into the Open attribute Table
b46acd6a
KK
152 __le32 transfer_len; // 0x08:
153 __le32 lcns_follow; // 0x0C:
e8b8e97f 154 __le64 vcn; // 0x10: Vcn of dirty page
b46acd6a
KK
155 __le64 oldest_lsn; // 0x18:
156 __le64 page_lcns[]; // 0x20:
157};
158
159static_assert(sizeof(struct DIR_PAGE_ENTRY) == 0x20);
160
161/* 32 bit version of 'struct DIR_PAGE_ENTRY' */
162struct DIR_PAGE_ENTRY_32 {
e8b8e97f
KA
163 __le32 next; // 0x00: RESTART_ENTRY_ALLOCATED if allocated
164 __le32 target_attr; // 0x04: Index into the Open attribute Table
165 __le32 transfer_len; // 0x08:
166 __le32 lcns_follow; // 0x0C:
167 __le32 reserved; // 0x10:
168 __le32 vcn_low; // 0x14: Vcn of dirty page
169 __le32 vcn_hi; // 0x18: Vcn of dirty page
170 __le32 oldest_lsn_low; // 0x1C:
171 __le32 oldest_lsn_hi; // 0x1C:
172 __le32 page_lcns_low; // 0x24:
173 __le32 page_lcns_hi; // 0x24:
b46acd6a
KK
174};
175
176static_assert(offsetof(struct DIR_PAGE_ENTRY_32, vcn_low) == 0x14);
177static_assert(sizeof(struct DIR_PAGE_ENTRY_32) == 0x2c);
178
179enum transact_state {
180 TransactionUninitialized = 0,
181 TransactionActive,
182 TransactionPrepared,
183 TransactionCommitted
184};
185
186struct TRANSACTION_ENTRY {
187 __le32 next; // 0x00: RESTART_ENTRY_ALLOCATED if allocated
188 u8 transact_state; // 0x04:
189 u8 reserved[3]; // 0x05:
190 __le64 first_lsn; // 0x08:
191 __le64 prev_lsn; // 0x10:
192 __le64 undo_next_lsn; // 0x18:
193 __le32 undo_records; // 0x20: Number of undo log records pending abort
194 __le32 undo_len; // 0x24: Total undo size
195};
196
197static_assert(sizeof(struct TRANSACTION_ENTRY) == 0x28);
198
199struct NTFS_RESTART {
200 __le32 major_ver; // 0x00:
201 __le32 minor_ver; // 0x04:
202 __le64 check_point_start; // 0x08:
203 __le64 open_attr_table_lsn; // 0x10:
204 __le64 attr_names_lsn; // 0x18:
205 __le64 dirty_pages_table_lsn; // 0x20:
206 __le64 transact_table_lsn; // 0x28:
207 __le32 open_attr_len; // 0x30: In bytes
208 __le32 attr_names_len; // 0x34: In bytes
209 __le32 dirty_pages_len; // 0x38: In bytes
210 __le32 transact_table_len; // 0x3C: In bytes
211};
212
213static_assert(sizeof(struct NTFS_RESTART) == 0x40);
214
215struct NEW_ATTRIBUTE_SIZES {
216 __le64 alloc_size;
217 __le64 valid_size;
218 __le64 data_size;
219 __le64 total_size;
220};
221
222struct BITMAP_RANGE {
223 __le32 bitmap_off;
224 __le32 bits;
225};
226
227struct LCN_RANGE {
228 __le64 lcn;
229 __le64 len;
230};
231
e8b8e97f 232/* The following type defines the different log record types. */
b46acd6a
KK
233#define LfsClientRecord cpu_to_le32(1)
234#define LfsClientRestart cpu_to_le32(2)
235
e8b8e97f 236/* This is used to uniquely identify a client for a particular log file. */
b46acd6a
KK
237struct CLIENT_ID {
238 __le16 seq_num;
239 __le16 client_idx;
240};
241
e8b8e97f 242/* This is the header that begins every Log Record in the log file. */
b46acd6a 243struct LFS_RECORD_HDR {
e8b8e97f
KA
244 __le64 this_lsn; // 0x00:
245 __le64 client_prev_lsn; // 0x08:
246 __le64 client_undo_next_lsn; // 0x10:
247 __le32 client_data_len; // 0x18:
248 struct CLIENT_ID client; // 0x1C: Owner of this log record.
249 __le32 record_type; // 0x20: LfsClientRecord or LfsClientRestart.
250 __le32 transact_id; // 0x24:
251 __le16 flags; // 0x28: LOG_RECORD_MULTI_PAGE
252 u8 align[6]; // 0x2A:
b46acd6a
KK
253};
254
255#define LOG_RECORD_MULTI_PAGE cpu_to_le16(1)
256
257static_assert(sizeof(struct LFS_RECORD_HDR) == 0x30);
258
259struct LFS_RECORD {
e8b8e97f
KA
260 __le16 next_record_off; // 0x00: Offset of the free space in the page,
261 u8 align[6]; // 0x02:
262 __le64 last_end_lsn; // 0x08: lsn for the last log record which ends on the page,
b46acd6a
KK
263};
264
265static_assert(sizeof(struct LFS_RECORD) == 0x10);
266
267struct RECORD_PAGE_HDR {
e8b8e97f
KA
268 struct NTFS_RECORD_HEADER rhdr; // 'RCRD'
269 __le32 rflags; // 0x10: See LOG_PAGE_LOG_RECORD_END
270 __le16 page_count; // 0x14:
271 __le16 page_pos; // 0x16:
272 struct LFS_RECORD record_hdr; // 0x18:
273 __le16 fixups[10]; // 0x28:
274 __le32 file_off; // 0x3c: Used when major version >= 2
b46acd6a
KK
275};
276
277// clang-format on
278
e8b8e97f 279// Page contains the end of a log record.
b46acd6a
KK
280#define LOG_PAGE_LOG_RECORD_END cpu_to_le32(0x00000001)
281
282static inline bool is_log_record_end(const struct RECORD_PAGE_HDR *hdr)
283{
284 return hdr->rflags & LOG_PAGE_LOG_RECORD_END;
285}
286
287static_assert(offsetof(struct RECORD_PAGE_HDR, file_off) == 0x3c);
288
289/*
290 * END of NTFS LOG structures
291 */
292
e8b8e97f 293/* Define some tuning parameters to keep the restart tables a reasonable size. */
b46acd6a
KK
294#define INITIAL_NUMBER_TRANSACTIONS 5
295
296enum NTFS_LOG_OPERATION {
297
298 Noop = 0x00,
299 CompensationLogRecord = 0x01,
300 InitializeFileRecordSegment = 0x02,
301 DeallocateFileRecordSegment = 0x03,
302 WriteEndOfFileRecordSegment = 0x04,
303 CreateAttribute = 0x05,
304 DeleteAttribute = 0x06,
305 UpdateResidentValue = 0x07,
306 UpdateNonresidentValue = 0x08,
307 UpdateMappingPairs = 0x09,
308 DeleteDirtyClusters = 0x0A,
309 SetNewAttributeSizes = 0x0B,
310 AddIndexEntryRoot = 0x0C,
311 DeleteIndexEntryRoot = 0x0D,
312 AddIndexEntryAllocation = 0x0E,
313 DeleteIndexEntryAllocation = 0x0F,
314 WriteEndOfIndexBuffer = 0x10,
315 SetIndexEntryVcnRoot = 0x11,
316 SetIndexEntryVcnAllocation = 0x12,
317 UpdateFileNameRoot = 0x13,
318 UpdateFileNameAllocation = 0x14,
319 SetBitsInNonresidentBitMap = 0x15,
320 ClearBitsInNonresidentBitMap = 0x16,
321 HotFix = 0x17,
322 EndTopLevelAction = 0x18,
323 PrepareTransaction = 0x19,
324 CommitTransaction = 0x1A,
325 ForgetTransaction = 0x1B,
326 OpenNonresidentAttribute = 0x1C,
327 OpenAttributeTableDump = 0x1D,
328 AttributeNamesDump = 0x1E,
329 DirtyPageTableDump = 0x1F,
330 TransactionTableDump = 0x20,
331 UpdateRecordDataRoot = 0x21,
332 UpdateRecordDataAllocation = 0x22,
333
334 UpdateRelativeDataInIndex =
335 0x23, // NtOfsRestartUpdateRelativeDataInIndex
336 UpdateRelativeDataInIndex2 = 0x24,
337 ZeroEndOfFileRecord = 0x25,
338};
339
340/*
e8b8e97f
KA
341 * Array for log records which require a target attribute.
342 * A true indicates that the corresponding restart operation
343 * requires a target attribute.
b46acd6a
KK
344 */
345static const u8 AttributeRequired[] = {
346 0xFC, 0xFB, 0xFF, 0x10, 0x06,
347};
348
349static inline bool is_target_required(u16 op)
350{
351 bool ret = op <= UpdateRecordDataAllocation &&
352 (AttributeRequired[op >> 3] >> (op & 7) & 1);
353 return ret;
354}
355
356static inline bool can_skip_action(enum NTFS_LOG_OPERATION op)
357{
358 switch (op) {
359 case Noop:
360 case DeleteDirtyClusters:
361 case HotFix:
362 case EndTopLevelAction:
363 case PrepareTransaction:
364 case CommitTransaction:
365 case ForgetTransaction:
366 case CompensationLogRecord:
367 case OpenNonresidentAttribute:
368 case OpenAttributeTableDump:
369 case AttributeNamesDump:
370 case DirtyPageTableDump:
371 case TransactionTableDump:
372 return true;
373 default:
374 return false;
375 }
376}
377
378enum { lcb_ctx_undo_next, lcb_ctx_prev, lcb_ctx_next };
379
e8b8e97f 380/* Bytes per restart table. */
b46acd6a
KK
381static inline u32 bytes_per_rt(const struct RESTART_TABLE *rt)
382{
383 return le16_to_cpu(rt->used) * le16_to_cpu(rt->size) +
384 sizeof(struct RESTART_TABLE);
385}
386
e8b8e97f 387/* Log record length. */
b46acd6a
KK
388static inline u32 lrh_length(const struct LOG_REC_HDR *lr)
389{
390 u16 t16 = le16_to_cpu(lr->lcns_follow);
391
392 return struct_size(lr, page_lcns, max_t(u16, 1, t16));
393}
394
395struct lcb {
e8b8e97f 396 struct LFS_RECORD_HDR *lrh; // Log record header of the current lsn.
b46acd6a
KK
397 struct LOG_REC_HDR *log_rec;
398 u32 ctx_mode; // lcb_ctx_undo_next/lcb_ctx_prev/lcb_ctx_next
399 struct CLIENT_ID client;
e8b8e97f 400 bool alloc; // If true the we should deallocate 'log_rec'.
b46acd6a
KK
401};
402
403static void lcb_put(struct lcb *lcb)
404{
405 if (lcb->alloc)
195c52bd
KA
406 kfree(lcb->log_rec);
407 kfree(lcb->lrh);
408 kfree(lcb);
b46acd6a
KK
409}
410
e8b8e97f 411/* Find the oldest lsn from active clients. */
b46acd6a
KK
412static inline void oldest_client_lsn(const struct CLIENT_REC *ca,
413 __le16 next_client, u64 *oldest_lsn)
414{
415 while (next_client != LFS_NO_CLIENT_LE) {
416 const struct CLIENT_REC *cr = ca + le16_to_cpu(next_client);
417 u64 lsn = le64_to_cpu(cr->oldest_lsn);
418
e8b8e97f 419 /* Ignore this block if it's oldest lsn is 0. */
b46acd6a
KK
420 if (lsn && lsn < *oldest_lsn)
421 *oldest_lsn = lsn;
422
423 next_client = cr->next_client;
424 }
425}
426
427static inline bool is_rst_page_hdr_valid(u32 file_off,
428 const struct RESTART_HDR *rhdr)
429{
430 u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
431 u32 page_size = le32_to_cpu(rhdr->page_size);
432 u32 end_usa;
433 u16 ro;
434
435 if (sys_page < SECTOR_SIZE || page_size < SECTOR_SIZE ||
436 sys_page & (sys_page - 1) || page_size & (page_size - 1)) {
437 return false;
438 }
439
e8b8e97f 440 /* Check that if the file offset isn't 0, it is the system page size. */
b46acd6a
KK
441 if (file_off && file_off != sys_page)
442 return false;
443
e8b8e97f 444 /* Check support version 1.1+. */
b46acd6a
KK
445 if (le16_to_cpu(rhdr->major_ver) <= 1 && !rhdr->minor_ver)
446 return false;
447
448 if (le16_to_cpu(rhdr->major_ver) > 2)
449 return false;
450
451 ro = le16_to_cpu(rhdr->ra_off);
fa3cacf5 452 if (!IS_ALIGNED(ro, 8) || ro > sys_page)
b46acd6a
KK
453 return false;
454
455 end_usa = ((sys_page >> SECTOR_SHIFT) + 1) * sizeof(short);
456 end_usa += le16_to_cpu(rhdr->rhdr.fix_off);
457
458 if (ro < end_usa)
459 return false;
460
461 return true;
462}
463
464static inline bool is_rst_area_valid(const struct RESTART_HDR *rhdr)
465{
466 const struct RESTART_AREA *ra;
467 u16 cl, fl, ul;
468 u32 off, l_size, file_dat_bits, file_size_round;
469 u16 ro = le16_to_cpu(rhdr->ra_off);
470 u32 sys_page = le32_to_cpu(rhdr->sys_page_size);
471
472 if (ro + offsetof(struct RESTART_AREA, l_size) >
473 SECTOR_SIZE - sizeof(short))
474 return false;
475
476 ra = Add2Ptr(rhdr, ro);
477 cl = le16_to_cpu(ra->log_clients);
478
479 if (cl > 1)
480 return false;
481
482 off = le16_to_cpu(ra->client_off);
483
fa3cacf5 484 if (!IS_ALIGNED(off, 8) || ro + off > SECTOR_SIZE - sizeof(short))
b46acd6a
KK
485 return false;
486
487 off += cl * sizeof(struct CLIENT_REC);
488
489 if (off > sys_page)
490 return false;
491
492 /*
493 * Check the restart length field and whether the entire
e8b8e97f 494 * restart area is contained that length.
b46acd6a
KK
495 */
496 if (le16_to_cpu(rhdr->ra_off) + le16_to_cpu(ra->ra_len) > sys_page ||
497 off > le16_to_cpu(ra->ra_len)) {
498 return false;
499 }
500
501 /*
502 * As a final check make sure that the use list and the free list
e8b8e97f 503 * are either empty or point to a valid client.
b46acd6a
KK
504 */
505 fl = le16_to_cpu(ra->client_idx[0]);
506 ul = le16_to_cpu(ra->client_idx[1]);
507 if ((fl != LFS_NO_CLIENT && fl >= cl) ||
508 (ul != LFS_NO_CLIENT && ul >= cl))
509 return false;
510
e8b8e97f 511 /* Make sure the sequence number bits match the log file size. */
b46acd6a
KK
512 l_size = le64_to_cpu(ra->l_size);
513
514 file_dat_bits = sizeof(u64) * 8 - le32_to_cpu(ra->seq_num_bits);
515 file_size_round = 1u << (file_dat_bits + 3);
516 if (file_size_round != l_size &&
517 (file_size_round < l_size || (file_size_round / 2) > l_size)) {
518 return false;
519 }
520
e8b8e97f 521 /* The log page data offset and record header length must be quad-aligned. */
fa3cacf5
KA
522 if (!IS_ALIGNED(le16_to_cpu(ra->data_off), 8) ||
523 !IS_ALIGNED(le16_to_cpu(ra->rec_hdr_len), 8))
b46acd6a
KK
524 return false;
525
526 return true;
527}
528
529static inline bool is_client_area_valid(const struct RESTART_HDR *rhdr,
530 bool usa_error)
531{
532 u16 ro = le16_to_cpu(rhdr->ra_off);
533 const struct RESTART_AREA *ra = Add2Ptr(rhdr, ro);
534 u16 ra_len = le16_to_cpu(ra->ra_len);
535 const struct CLIENT_REC *ca;
536 u32 i;
537
538 if (usa_error && ra_len + ro > SECTOR_SIZE - sizeof(short))
539 return false;
540
e8b8e97f 541 /* Find the start of the client array. */
b46acd6a
KK
542 ca = Add2Ptr(ra, le16_to_cpu(ra->client_off));
543
544 /*
e8b8e97f
KA
545 * Start with the free list.
546 * Check that all the clients are valid and that there isn't a cycle.
547 * Do the in-use list on the second pass.
b46acd6a
KK
548 */
549 for (i = 0; i < 2; i++) {
550 u16 client_idx = le16_to_cpu(ra->client_idx[i]);
551 bool first_client = true;
552 u16 clients = le16_to_cpu(ra->log_clients);
553
554 while (client_idx != LFS_NO_CLIENT) {
555 const struct CLIENT_REC *cr;
556
557 if (!clients ||
558 client_idx >= le16_to_cpu(ra->log_clients))
559 return false;
560
561 clients -= 1;
562 cr = ca + client_idx;
563
564 client_idx = le16_to_cpu(cr->next_client);
565
566 if (first_client) {
567 first_client = false;
568 if (cr->prev_client != LFS_NO_CLIENT_LE)
569 return false;
570 }
571 }
572 }
573
574 return true;
575}
576
577/*
578 * remove_client
579 *
e8b8e97f 580 * Remove a client record from a client record list an restart area.
b46acd6a
KK
581 */
582static inline void remove_client(struct CLIENT_REC *ca,
583 const struct CLIENT_REC *cr, __le16 *head)
584{
585 if (cr->prev_client == LFS_NO_CLIENT_LE)
586 *head = cr->next_client;
587 else
588 ca[le16_to_cpu(cr->prev_client)].next_client = cr->next_client;
589
590 if (cr->next_client != LFS_NO_CLIENT_LE)
591 ca[le16_to_cpu(cr->next_client)].prev_client = cr->prev_client;
592}
593
594/*
e8b8e97f 595 * add_client - Add a client record to the start of a list.
b46acd6a
KK
596 */
597static inline void add_client(struct CLIENT_REC *ca, u16 index, __le16 *head)
598{
599 struct CLIENT_REC *cr = ca + index;
600
601 cr->prev_client = LFS_NO_CLIENT_LE;
602 cr->next_client = *head;
603
604 if (*head != LFS_NO_CLIENT_LE)
605 ca[le16_to_cpu(*head)].prev_client = cpu_to_le16(index);
606
607 *head = cpu_to_le16(index);
608}
609
b46acd6a
KK
610static inline void *enum_rstbl(struct RESTART_TABLE *t, void *c)
611{
612 __le32 *e;
613 u32 bprt;
614 u16 rsize = t ? le16_to_cpu(t->size) : 0;
615
616 if (!c) {
617 if (!t || !t->total)
618 return NULL;
619 e = Add2Ptr(t, sizeof(struct RESTART_TABLE));
620 } else {
621 e = Add2Ptr(c, rsize);
622 }
623
e8b8e97f 624 /* Loop until we hit the first one allocated, or the end of the list. */
b46acd6a
KK
625 for (bprt = bytes_per_rt(t); PtrOffset(t, e) < bprt;
626 e = Add2Ptr(e, rsize)) {
627 if (*e == RESTART_ENTRY_ALLOCATED_LE)
628 return e;
629 }
630 return NULL;
631}
632
633/*
e8b8e97f 634 * find_dp - Search for a @vcn in Dirty Page Table.
b46acd6a
KK
635 */
636static inline struct DIR_PAGE_ENTRY *find_dp(struct RESTART_TABLE *dptbl,
637 u32 target_attr, u64 vcn)
638{
639 __le32 ta = cpu_to_le32(target_attr);
640 struct DIR_PAGE_ENTRY *dp = NULL;
641
642 while ((dp = enum_rstbl(dptbl, dp))) {
643 u64 dp_vcn = le64_to_cpu(dp->vcn);
644
645 if (dp->target_attr == ta && vcn >= dp_vcn &&
646 vcn < dp_vcn + le32_to_cpu(dp->lcns_follow)) {
647 return dp;
648 }
649 }
650 return NULL;
651}
652
653static inline u32 norm_file_page(u32 page_size, u32 *l_size, bool use_default)
654{
655 if (use_default)
656 page_size = DefaultLogPageSize;
657
e8b8e97f 658 /* Round the file size down to a system page boundary. */
b46acd6a
KK
659 *l_size &= ~(page_size - 1);
660
e8b8e97f 661 /* File should contain at least 2 restart pages and MinLogRecordPages pages. */
b46acd6a
KK
662 if (*l_size < (MinLogRecordPages + 2) * page_size)
663 return 0;
664
665 return page_size;
666}
667
668static bool check_log_rec(const struct LOG_REC_HDR *lr, u32 bytes, u32 tr,
669 u32 bytes_per_attr_entry)
670{
671 u16 t16;
672
673 if (bytes < sizeof(struct LOG_REC_HDR))
674 return false;
675 if (!tr)
676 return false;
677
678 if ((tr - sizeof(struct RESTART_TABLE)) %
679 sizeof(struct TRANSACTION_ENTRY))
680 return false;
681
682 if (le16_to_cpu(lr->redo_off) & 7)
683 return false;
684
685 if (le16_to_cpu(lr->undo_off) & 7)
686 return false;
687
688 if (lr->target_attr)
689 goto check_lcns;
690
691 if (is_target_required(le16_to_cpu(lr->redo_op)))
692 return false;
693
694 if (is_target_required(le16_to_cpu(lr->undo_op)))
695 return false;
696
697check_lcns:
698 if (!lr->lcns_follow)
699 goto check_length;
700
701 t16 = le16_to_cpu(lr->target_attr);
702 if ((t16 - sizeof(struct RESTART_TABLE)) % bytes_per_attr_entry)
703 return false;
704
705check_length:
706 if (bytes < lrh_length(lr))
707 return false;
708
709 return true;
710}
711
712static bool check_rstbl(const struct RESTART_TABLE *rt, size_t bytes)
713{
714 u32 ts;
715 u32 i, off;
716 u16 rsize = le16_to_cpu(rt->size);
717 u16 ne = le16_to_cpu(rt->used);
718 u32 ff = le32_to_cpu(rt->first_free);
719 u32 lf = le32_to_cpu(rt->last_free);
720
721 ts = rsize * ne + sizeof(struct RESTART_TABLE);
722
723 if (!rsize || rsize > bytes ||
724 rsize + sizeof(struct RESTART_TABLE) > bytes || bytes < ts ||
725 le16_to_cpu(rt->total) > ne || ff > ts || lf > ts ||
726 (ff && ff < sizeof(struct RESTART_TABLE)) ||
727 (lf && lf < sizeof(struct RESTART_TABLE))) {
728 return false;
729 }
730
e8b8e97f
KA
731 /*
732 * Verify each entry is either allocated or points
733 * to a valid offset the table.
b46acd6a
KK
734 */
735 for (i = 0; i < ne; i++) {
736 off = le32_to_cpu(*(__le32 *)Add2Ptr(
737 rt, i * rsize + sizeof(struct RESTART_TABLE)));
738
739 if (off != RESTART_ENTRY_ALLOCATED && off &&
740 (off < sizeof(struct RESTART_TABLE) ||
741 ((off - sizeof(struct RESTART_TABLE)) % rsize))) {
742 return false;
743 }
744 }
745
e8b8e97f
KA
746 /*
747 * Walk through the list headed by the first entry to make
748 * sure none of the entries are currently being used.
b46acd6a
KK
749 */
750 for (off = ff; off;) {
751 if (off == RESTART_ENTRY_ALLOCATED)
752 return false;
753
754 off = le32_to_cpu(*(__le32 *)Add2Ptr(rt, off));
755 }
756
757 return true;
758}
759
760/*
e8b8e97f 761 * free_rsttbl_idx - Free a previously allocated index a Restart Table.
b46acd6a
KK
762 */
763static inline void free_rsttbl_idx(struct RESTART_TABLE *rt, u32 off)
764{
765 __le32 *e;
766 u32 lf = le32_to_cpu(rt->last_free);
767 __le32 off_le = cpu_to_le32(off);
768
769 e = Add2Ptr(rt, off);
770
771 if (off < le32_to_cpu(rt->free_goal)) {
772 *e = rt->first_free;
773 rt->first_free = off_le;
774 if (!lf)
775 rt->last_free = off_le;
776 } else {
777 if (lf)
778 *(__le32 *)Add2Ptr(rt, lf) = off_le;
779 else
780 rt->first_free = off_le;
781
782 rt->last_free = off_le;
783 *e = 0;
784 }
785
786 le16_sub_cpu(&rt->total, 1);
787}
788
789static inline struct RESTART_TABLE *init_rsttbl(u16 esize, u16 used)
790{
791 __le32 *e, *last_free;
792 u32 off;
793 u32 bytes = esize * used + sizeof(struct RESTART_TABLE);
794 u32 lf = sizeof(struct RESTART_TABLE) + (used - 1) * esize;
195c52bd 795 struct RESTART_TABLE *t = kzalloc(bytes, GFP_NOFS);
b46acd6a 796
a1b04d38
DC
797 if (!t)
798 return NULL;
799
b46acd6a
KK
800 t->size = cpu_to_le16(esize);
801 t->used = cpu_to_le16(used);
802 t->free_goal = cpu_to_le32(~0u);
803 t->first_free = cpu_to_le32(sizeof(struct RESTART_TABLE));
804 t->last_free = cpu_to_le32(lf);
805
806 e = (__le32 *)(t + 1);
807 last_free = Add2Ptr(t, lf);
808
809 for (off = sizeof(struct RESTART_TABLE) + esize; e < last_free;
810 e = Add2Ptr(e, esize), off += esize) {
811 *e = cpu_to_le32(off);
812 }
813 return t;
814}
815
816static inline struct RESTART_TABLE *extend_rsttbl(struct RESTART_TABLE *tbl,
817 u32 add, u32 free_goal)
818{
819 u16 esize = le16_to_cpu(tbl->size);
820 __le32 osize = cpu_to_le32(bytes_per_rt(tbl));
821 u32 used = le16_to_cpu(tbl->used);
a1b04d38
DC
822 struct RESTART_TABLE *rt;
823
824 rt = init_rsttbl(esize, used + add);
825 if (!rt)
826 return NULL;
b46acd6a
KK
827
828 memcpy(rt + 1, tbl + 1, esize * used);
829
830 rt->free_goal = free_goal == ~0u
831 ? cpu_to_le32(~0u)
832 : cpu_to_le32(sizeof(struct RESTART_TABLE) +
833 free_goal * esize);
834
835 if (tbl->first_free) {
836 rt->first_free = tbl->first_free;
837 *(__le32 *)Add2Ptr(rt, le32_to_cpu(tbl->last_free)) = osize;
838 } else {
839 rt->first_free = osize;
840 }
841
842 rt->total = tbl->total;
843
195c52bd 844 kfree(tbl);
b46acd6a
KK
845 return rt;
846}
847
848/*
849 * alloc_rsttbl_idx
850 *
e8b8e97f 851 * Allocate an index from within a previously initialized Restart Table.
b46acd6a
KK
852 */
853static inline void *alloc_rsttbl_idx(struct RESTART_TABLE **tbl)
854{
855 u32 off;
856 __le32 *e;
857 struct RESTART_TABLE *t = *tbl;
858
a1b04d38 859 if (!t->first_free) {
b46acd6a 860 *tbl = t = extend_rsttbl(t, 16, ~0u);
a1b04d38
DC
861 if (!t)
862 return NULL;
863 }
b46acd6a
KK
864
865 off = le32_to_cpu(t->first_free);
866
867 /* Dequeue this entry and zero it. */
868 e = Add2Ptr(t, off);
869
870 t->first_free = *e;
871
872 memset(e, 0, le16_to_cpu(t->size));
873
874 *e = RESTART_ENTRY_ALLOCATED_LE;
875
876 /* If list is going empty, then we fix the last_free as well. */
877 if (!t->first_free)
878 t->last_free = 0;
879
880 le16_add_cpu(&t->total, 1);
881
882 return Add2Ptr(t, off);
883}
884
885/*
886 * alloc_rsttbl_from_idx
887 *
e8b8e97f 888 * Allocate a specific index from within a previously initialized Restart Table.
b46acd6a
KK
889 */
890static inline void *alloc_rsttbl_from_idx(struct RESTART_TABLE **tbl, u32 vbo)
891{
892 u32 off;
893 __le32 *e;
894 struct RESTART_TABLE *rt = *tbl;
895 u32 bytes = bytes_per_rt(rt);
896 u16 esize = le16_to_cpu(rt->size);
897
e8b8e97f 898 /* If the entry is not the table, we will have to extend the table. */
b46acd6a
KK
899 if (vbo >= bytes) {
900 /*
e8b8e97f
KA
901 * Extend the size by computing the number of entries between
902 * the existing size and the desired index and adding 1 to that.
b46acd6a
KK
903 */
904 u32 bytes2idx = vbo - bytes;
905
e8b8e97f
KA
906 /*
907 * There should always be an integral number of entries
908 * being added. Now extend the table.
909 */
b46acd6a
KK
910 *tbl = rt = extend_rsttbl(rt, bytes2idx / esize + 1, bytes);
911 if (!rt)
912 return NULL;
913 }
914
e8b8e97f 915 /* See if the entry is already allocated, and just return if it is. */
b46acd6a
KK
916 e = Add2Ptr(rt, vbo);
917
918 if (*e == RESTART_ENTRY_ALLOCATED_LE)
919 return e;
920
921 /*
922 * Walk through the table, looking for the entry we're
e8b8e97f 923 * interested and the previous entry.
b46acd6a
KK
924 */
925 off = le32_to_cpu(rt->first_free);
926 e = Add2Ptr(rt, off);
927
928 if (off == vbo) {
929 /* this is a match */
930 rt->first_free = *e;
931 goto skip_looking;
932 }
933
934 /*
e8b8e97f
KA
935 * Need to walk through the list looking for the predecessor
936 * of our entry.
b46acd6a
KK
937 */
938 for (;;) {
939 /* Remember the entry just found */
940 u32 last_off = off;
941 __le32 *last_e = e;
942
e8b8e97f 943 /* Should never run of entries. */
b46acd6a 944
e8b8e97f 945 /* Lookup up the next entry the list. */
b46acd6a
KK
946 off = le32_to_cpu(*last_e);
947 e = Add2Ptr(rt, off);
948
e8b8e97f 949 /* If this is our match we are done. */
b46acd6a
KK
950 if (off == vbo) {
951 *last_e = *e;
952
e8b8e97f
KA
953 /*
954 * If this was the last entry, we update that
955 * table as well.
956 */
b46acd6a
KK
957 if (le32_to_cpu(rt->last_free) == off)
958 rt->last_free = cpu_to_le32(last_off);
959 break;
960 }
961 }
962
963skip_looking:
e8b8e97f 964 /* If the list is now empty, we fix the last_free as well. */
b46acd6a
KK
965 if (!rt->first_free)
966 rt->last_free = 0;
967
e8b8e97f 968 /* Zero this entry. */
b46acd6a
KK
969 memset(e, 0, esize);
970 *e = RESTART_ENTRY_ALLOCATED_LE;
971
972 le16_add_cpu(&rt->total, 1);
973
974 return e;
975}
976
977#define RESTART_SINGLE_PAGE_IO cpu_to_le16(0x0001)
978
979#define NTFSLOG_WRAPPED 0x00000001
980#define NTFSLOG_MULTIPLE_PAGE_IO 0x00000002
981#define NTFSLOG_NO_LAST_LSN 0x00000004
982#define NTFSLOG_REUSE_TAIL 0x00000010
983#define NTFSLOG_NO_OLDEST_LSN 0x00000020
984
e8b8e97f 985/* Helper struct to work with NTFS $LogFile. */
b46acd6a
KK
986struct ntfs_log {
987 struct ntfs_inode *ni;
988
989 u32 l_size;
990 u32 sys_page_size;
991 u32 sys_page_mask;
992 u32 page_size;
993 u32 page_mask; // page_size - 1
994 u8 page_bits;
995 struct RECORD_PAGE_HDR *one_page_buf;
996
997 struct RESTART_TABLE *open_attr_tbl;
998 u32 transaction_id;
999 u32 clst_per_page;
1000
1001 u32 first_page;
1002 u32 next_page;
1003 u32 ra_off;
1004 u32 data_off;
1005 u32 restart_size;
1006 u32 data_size;
1007 u16 record_header_len;
1008 u64 seq_num;
1009 u32 seq_num_bits;
1010 u32 file_data_bits;
1011 u32 seq_num_mask; /* (1 << file_data_bits) - 1 */
1012
e8b8e97f
KA
1013 struct RESTART_AREA *ra; /* In-memory image of the next restart area. */
1014 u32 ra_size; /* The usable size of the restart area. */
b46acd6a
KK
1015
1016 /*
1017 * If true, then the in-memory restart area is to be written
e8b8e97f 1018 * to the first position on the disk.
b46acd6a
KK
1019 */
1020 bool init_ra;
e8b8e97f 1021 bool set_dirty; /* True if we need to set dirty flag. */
b46acd6a
KK
1022
1023 u64 oldest_lsn;
1024
1025 u32 oldest_lsn_off;
1026 u64 last_lsn;
1027
1028 u32 total_avail;
1029 u32 total_avail_pages;
1030 u32 total_undo_commit;
1031 u32 max_current_avail;
1032 u32 current_avail;
1033 u32 reserved;
1034
1035 short major_ver;
1036 short minor_ver;
1037
1038 u32 l_flags; /* See NTFSLOG_XXX */
e8b8e97f 1039 u32 current_openlog_count; /* On-disk value for open_log_count. */
b46acd6a
KK
1040
1041 struct CLIENT_ID client_id;
1042 u32 client_undo_commit;
1043};
1044
1045static inline u32 lsn_to_vbo(struct ntfs_log *log, const u64 lsn)
1046{
1047 u32 vbo = (lsn << log->seq_num_bits) >> (log->seq_num_bits - 3);
1048
1049 return vbo;
1050}
1051
e8b8e97f 1052/* Compute the offset in the log file of the next log page. */
b46acd6a
KK
1053static inline u32 next_page_off(struct ntfs_log *log, u32 off)
1054{
1055 off = (off & ~log->sys_page_mask) + log->page_size;
1056 return off >= log->l_size ? log->first_page : off;
1057}
1058
1059static inline u32 lsn_to_page_off(struct ntfs_log *log, u64 lsn)
1060{
1061 return (((u32)lsn) << 3) & log->page_mask;
1062}
1063
1064static inline u64 vbo_to_lsn(struct ntfs_log *log, u32 off, u64 Seq)
1065{
1066 return (off >> 3) + (Seq << log->file_data_bits);
1067}
1068
1069static inline bool is_lsn_in_file(struct ntfs_log *log, u64 lsn)
1070{
1071 return lsn >= log->oldest_lsn &&
1072 lsn <= le64_to_cpu(log->ra->current_lsn);
1073}
1074
1075static inline u32 hdr_file_off(struct ntfs_log *log,
1076 struct RECORD_PAGE_HDR *hdr)
1077{
1078 if (log->major_ver < 2)
1079 return le64_to_cpu(hdr->rhdr.lsn);
1080
1081 return le32_to_cpu(hdr->file_off);
1082}
1083
1084static inline u64 base_lsn(struct ntfs_log *log,
1085 const struct RECORD_PAGE_HDR *hdr, u64 lsn)
1086{
1087 u64 h_lsn = le64_to_cpu(hdr->rhdr.lsn);
1088 u64 ret = (((h_lsn >> log->file_data_bits) +
1089 (lsn < (lsn_to_vbo(log, h_lsn) & ~log->page_mask) ? 1 : 0))
1090 << log->file_data_bits) +
1091 ((((is_log_record_end(hdr) &&
1092 h_lsn <= le64_to_cpu(hdr->record_hdr.last_end_lsn))
1093 ? le16_to_cpu(hdr->record_hdr.next_record_off)
1094 : log->page_size) +
1095 lsn) >>
1096 3);
1097
1098 return ret;
1099}
1100
1101static inline bool verify_client_lsn(struct ntfs_log *log,
1102 const struct CLIENT_REC *client, u64 lsn)
1103{
1104 return lsn >= le64_to_cpu(client->oldest_lsn) &&
1105 lsn <= le64_to_cpu(log->ra->current_lsn) && lsn;
1106}
1107
1108struct restart_info {
1109 u64 last_lsn;
1110 struct RESTART_HDR *r_page;
1111 u32 vbo;
1112 bool chkdsk_was_run;
1113 bool valid_page;
1114 bool initialized;
1115 bool restart;
1116};
1117
1118static int read_log_page(struct ntfs_log *log, u32 vbo,
1119 struct RECORD_PAGE_HDR **buffer, bool *usa_error)
1120{
1121 int err = 0;
1122 u32 page_idx = vbo >> log->page_bits;
1123 u32 page_off = vbo & log->page_mask;
1124 u32 bytes = log->page_size - page_off;
1125 void *to_free = NULL;
1126 u32 page_vbo = page_idx << log->page_bits;
1127 struct RECORD_PAGE_HDR *page_buf;
1128 struct ntfs_inode *ni = log->ni;
1129 bool bBAAD;
1130
1131 if (vbo >= log->l_size)
1132 return -EINVAL;
1133
1134 if (!*buffer) {
195c52bd 1135 to_free = kmalloc(bytes, GFP_NOFS);
b46acd6a
KK
1136 if (!to_free)
1137 return -ENOMEM;
1138 *buffer = to_free;
1139 }
1140
1141 page_buf = page_off ? log->one_page_buf : *buffer;
1142
1143 err = ntfs_read_run_nb(ni->mi.sbi, &ni->file.run, page_vbo, page_buf,
1144 log->page_size, NULL);
1145 if (err)
1146 goto out;
1147
1148 if (page_buf->rhdr.sign != NTFS_FFFF_SIGNATURE)
1149 ntfs_fix_post_read(&page_buf->rhdr, PAGE_SIZE, false);
1150
1151 if (page_buf != *buffer)
1152 memcpy(*buffer, Add2Ptr(page_buf, page_off), bytes);
1153
1154 bBAAD = page_buf->rhdr.sign == NTFS_BAAD_SIGNATURE;
1155
1156 if (usa_error)
1157 *usa_error = bBAAD;
1158 /* Check that the update sequence array for this page is valid */
1159 /* If we don't allow errors, raise an error status */
1160 else if (bBAAD)
1161 err = -EINVAL;
1162
1163out:
1164 if (err && to_free) {
195c52bd 1165 kfree(to_free);
b46acd6a
KK
1166 *buffer = NULL;
1167 }
1168
1169 return err;
1170}
1171
1172/*
1173 * log_read_rst
1174 *
e8b8e97f
KA
1175 * It walks through 512 blocks of the file looking for a valid
1176 * restart page header. It will stop the first time we find a
1177 * valid page header.
b46acd6a
KK
1178 */
1179static int log_read_rst(struct ntfs_log *log, u32 l_size, bool first,
1180 struct restart_info *info)
1181{
1182 u32 skip, vbo;
195c52bd 1183 struct RESTART_HDR *r_page = kmalloc(DefaultLogPageSize, GFP_NOFS);
b46acd6a
KK
1184
1185 if (!r_page)
1186 return -ENOMEM;
1187
e8b8e97f 1188 /* Determine which restart area we are looking for. */
b46acd6a
KK
1189 if (first) {
1190 vbo = 0;
1191 skip = 512;
1192 } else {
1193 vbo = 512;
1194 skip = 0;
1195 }
1196
e8b8e97f 1197 /* Loop continuously until we succeed. */
b46acd6a
KK
1198 for (; vbo < l_size; vbo = 2 * vbo + skip, skip = 0) {
1199 bool usa_error;
1200 u32 sys_page_size;
1201 bool brst, bchk;
1202 struct RESTART_AREA *ra;
1203
e8b8e97f 1204 /* Read a page header at the current offset. */
b46acd6a
KK
1205 if (read_log_page(log, vbo, (struct RECORD_PAGE_HDR **)&r_page,
1206 &usa_error)) {
e8b8e97f 1207 /* Ignore any errors. */
b46acd6a
KK
1208 continue;
1209 }
1210
e8b8e97f 1211 /* Exit if the signature is a log record page. */
b46acd6a
KK
1212 if (r_page->rhdr.sign == NTFS_RCRD_SIGNATURE) {
1213 info->initialized = true;
1214 break;
1215 }
1216
1217 brst = r_page->rhdr.sign == NTFS_RSTR_SIGNATURE;
1218 bchk = r_page->rhdr.sign == NTFS_CHKD_SIGNATURE;
1219
1220 if (!bchk && !brst) {
1221 if (r_page->rhdr.sign != NTFS_FFFF_SIGNATURE) {
1222 /*
1223 * Remember if the signature does not
e8b8e97f 1224 * indicate uninitialized file.
b46acd6a
KK
1225 */
1226 info->initialized = true;
1227 }
1228 continue;
1229 }
1230
1231 ra = NULL;
1232 info->valid_page = false;
1233 info->initialized = true;
1234 info->vbo = vbo;
1235
e8b8e97f 1236 /* Let's check the restart area if this is a valid page. */
b46acd6a
KK
1237 if (!is_rst_page_hdr_valid(vbo, r_page))
1238 goto check_result;
1239 ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off));
1240
1241 if (!is_rst_area_valid(r_page))
1242 goto check_result;
1243
1244 /*
1245 * We have a valid restart page header and restart area.
1246 * If chkdsk was run or we have no clients then we have
e8b8e97f 1247 * no more checking to do.
b46acd6a
KK
1248 */
1249 if (bchk || ra->client_idx[1] == LFS_NO_CLIENT_LE) {
1250 info->valid_page = true;
1251 goto check_result;
1252 }
1253
e8b8e97f 1254 /* Read the entire restart area. */
b46acd6a
KK
1255 sys_page_size = le32_to_cpu(r_page->sys_page_size);
1256 if (DefaultLogPageSize != sys_page_size) {
195c52bd
KA
1257 kfree(r_page);
1258 r_page = kzalloc(sys_page_size, GFP_NOFS);
b46acd6a
KK
1259 if (!r_page)
1260 return -ENOMEM;
1261
1262 if (read_log_page(log, vbo,
1263 (struct RECORD_PAGE_HDR **)&r_page,
1264 &usa_error)) {
e8b8e97f 1265 /* Ignore any errors. */
195c52bd 1266 kfree(r_page);
b46acd6a
KK
1267 r_page = NULL;
1268 continue;
1269 }
1270 }
1271
1272 if (is_client_area_valid(r_page, usa_error)) {
1273 info->valid_page = true;
1274 ra = Add2Ptr(r_page, le16_to_cpu(r_page->ra_off));
1275 }
1276
1277check_result:
e8b8e97f
KA
1278 /*
1279 * If chkdsk was run then update the caller's
1280 * values and return.
1281 */
b46acd6a
KK
1282 if (r_page->rhdr.sign == NTFS_CHKD_SIGNATURE) {
1283 info->chkdsk_was_run = true;
1284 info->last_lsn = le64_to_cpu(r_page->rhdr.lsn);
1285 info->restart = true;
1286 info->r_page = r_page;
1287 return 0;
1288 }
1289
e8b8e97f
KA
1290 /*
1291 * If we have a valid page then copy the values
1292 * we need from it.
1293 */
b46acd6a
KK
1294 if (info->valid_page) {
1295 info->last_lsn = le64_to_cpu(ra->current_lsn);
1296 info->restart = true;
1297 info->r_page = r_page;
1298 return 0;
1299 }
1300 }
1301
195c52bd 1302 kfree(r_page);
b46acd6a
KK
1303
1304 return 0;
1305}
1306
1307/*
e8b8e97f 1308 * Ilog_init_pg_hdr - Init @log from restart page header.
b46acd6a
KK
1309 */
1310static void log_init_pg_hdr(struct ntfs_log *log, u32 sys_page_size,
1311 u32 page_size, u16 major_ver, u16 minor_ver)
1312{
1313 log->sys_page_size = sys_page_size;
1314 log->sys_page_mask = sys_page_size - 1;
1315 log->page_size = page_size;
1316 log->page_mask = page_size - 1;
1317 log->page_bits = blksize_bits(page_size);
1318
1319 log->clst_per_page = log->page_size >> log->ni->mi.sbi->cluster_bits;
1320 if (!log->clst_per_page)
1321 log->clst_per_page = 1;
1322
1323 log->first_page = major_ver >= 2
1324 ? 0x22 * page_size
1325 : ((sys_page_size << 1) + (page_size << 1));
1326 log->major_ver = major_ver;
1327 log->minor_ver = minor_ver;
1328}
1329
1330/*
e8b8e97f 1331 * log_create - Init @log in cases when we don't have a restart area to use.
b46acd6a
KK
1332 */
1333static void log_create(struct ntfs_log *log, u32 l_size, const u64 last_lsn,
1334 u32 open_log_count, bool wrapped, bool use_multi_page)
1335{
1336 log->l_size = l_size;
e8b8e97f 1337 /* All file offsets must be quadword aligned. */
b46acd6a
KK
1338 log->file_data_bits = blksize_bits(l_size) - 3;
1339 log->seq_num_mask = (8 << log->file_data_bits) - 1;
1340 log->seq_num_bits = sizeof(u64) * 8 - log->file_data_bits;
1341 log->seq_num = (last_lsn >> log->file_data_bits) + 2;
1342 log->next_page = log->first_page;
1343 log->oldest_lsn = log->seq_num << log->file_data_bits;
1344 log->oldest_lsn_off = 0;
1345 log->last_lsn = log->oldest_lsn;
1346
1347 log->l_flags |= NTFSLOG_NO_LAST_LSN | NTFSLOG_NO_OLDEST_LSN;
1348
e8b8e97f 1349 /* Set the correct flags for the I/O and indicate if we have wrapped. */
b46acd6a
KK
1350 if (wrapped)
1351 log->l_flags |= NTFSLOG_WRAPPED;
1352
1353 if (use_multi_page)
1354 log->l_flags |= NTFSLOG_MULTIPLE_PAGE_IO;
1355
e8b8e97f 1356 /* Compute the log page values. */
fa3cacf5 1357 log->data_off = ALIGN(
b46acd6a 1358 offsetof(struct RECORD_PAGE_HDR, fixups) +
d3624466
KK
1359 sizeof(short) * ((log->page_size >> SECTOR_SHIFT) + 1),
1360 8);
b46acd6a
KK
1361 log->data_size = log->page_size - log->data_off;
1362 log->record_header_len = sizeof(struct LFS_RECORD_HDR);
1363
e8b8e97f 1364 /* Remember the different page sizes for reservation. */
b46acd6a
KK
1365 log->reserved = log->data_size - log->record_header_len;
1366
1367 /* Compute the restart page values. */
fa3cacf5 1368 log->ra_off = ALIGN(
b46acd6a 1369 offsetof(struct RESTART_HDR, fixups) +
d3624466
KK
1370 sizeof(short) *
1371 ((log->sys_page_size >> SECTOR_SHIFT) + 1),
1372 8);
b46acd6a
KK
1373 log->restart_size = log->sys_page_size - log->ra_off;
1374 log->ra_size = struct_size(log->ra, clients, 1);
1375 log->current_openlog_count = open_log_count;
1376
1377 /*
1378 * The total available log file space is the number of
e8b8e97f 1379 * log file pages times the space available on each page.
b46acd6a
KK
1380 */
1381 log->total_avail_pages = log->l_size - log->first_page;
1382 log->total_avail = log->total_avail_pages >> log->page_bits;
1383
1384 /*
1385 * We assume that we can't use the end of the page less than
e8b8e97f
KA
1386 * the file record size.
1387 * Then we won't need to reserve more than the caller asks for.
b46acd6a
KK
1388 */
1389 log->max_current_avail = log->total_avail * log->reserved;
1390 log->total_avail = log->total_avail * log->data_size;
1391 log->current_avail = log->max_current_avail;
1392}
1393
1394/*
e8b8e97f 1395 * log_create_ra - Fill a restart area from the values stored in @log.
b46acd6a
KK
1396 */
1397static struct RESTART_AREA *log_create_ra(struct ntfs_log *log)
1398{
1399 struct CLIENT_REC *cr;
195c52bd 1400 struct RESTART_AREA *ra = kzalloc(log->restart_size, GFP_NOFS);
b46acd6a
KK
1401
1402 if (!ra)
1403 return NULL;
1404
1405 ra->current_lsn = cpu_to_le64(log->last_lsn);
1406 ra->log_clients = cpu_to_le16(1);
1407 ra->client_idx[1] = LFS_NO_CLIENT_LE;
1408 if (log->l_flags & NTFSLOG_MULTIPLE_PAGE_IO)
1409 ra->flags = RESTART_SINGLE_PAGE_IO;
1410 ra->seq_num_bits = cpu_to_le32(log->seq_num_bits);
1411 ra->ra_len = cpu_to_le16(log->ra_size);
1412 ra->client_off = cpu_to_le16(offsetof(struct RESTART_AREA, clients));
1413 ra->l_size = cpu_to_le64(log->l_size);
1414 ra->rec_hdr_len = cpu_to_le16(log->record_header_len);
1415 ra->data_off = cpu_to_le16(log->data_off);
1416 ra->open_log_count = cpu_to_le32(log->current_openlog_count + 1);
1417
1418 cr = ra->clients;
1419
1420 cr->prev_client = LFS_NO_CLIENT_LE;
1421 cr->next_client = LFS_NO_CLIENT_LE;
1422
1423 return ra;
1424}
1425
1426static u32 final_log_off(struct ntfs_log *log, u64 lsn, u32 data_len)
1427{
1428 u32 base_vbo = lsn << 3;
1429 u32 final_log_off = (base_vbo & log->seq_num_mask) & ~log->page_mask;
1430 u32 page_off = base_vbo & log->page_mask;
1431 u32 tail = log->page_size - page_off;
1432
1433 page_off -= 1;
1434
e8b8e97f 1435 /* Add the length of the header. */
b46acd6a
KK
1436 data_len += log->record_header_len;
1437
1438 /*
e8b8e97f
KA
1439 * If this lsn is contained this log page we are done.
1440 * Otherwise we need to walk through several log pages.
b46acd6a
KK
1441 */
1442 if (data_len > tail) {
1443 data_len -= tail;
1444 tail = log->data_size;
1445 page_off = log->data_off - 1;
1446
1447 for (;;) {
1448 final_log_off = next_page_off(log, final_log_off);
1449
e8b8e97f
KA
1450 /*
1451 * We are done if the remaining bytes
1452 * fit on this page.
1453 */
b46acd6a
KK
1454 if (data_len <= tail)
1455 break;
1456 data_len -= tail;
1457 }
1458 }
1459
1460 /*
1461 * We add the remaining bytes to our starting position on this page
e8b8e97f 1462 * and then add that value to the file offset of this log page.
b46acd6a
KK
1463 */
1464 return final_log_off + data_len + page_off;
1465}
1466
1467static int next_log_lsn(struct ntfs_log *log, const struct LFS_RECORD_HDR *rh,
1468 u64 *lsn)
1469{
1470 int err;
1471 u64 this_lsn = le64_to_cpu(rh->this_lsn);
1472 u32 vbo = lsn_to_vbo(log, this_lsn);
1473 u32 end =
1474 final_log_off(log, this_lsn, le32_to_cpu(rh->client_data_len));
1475 u32 hdr_off = end & ~log->sys_page_mask;
1476 u64 seq = this_lsn >> log->file_data_bits;
1477 struct RECORD_PAGE_HDR *page = NULL;
1478
e8b8e97f 1479 /* Remember if we wrapped. */
b46acd6a
KK
1480 if (end <= vbo)
1481 seq += 1;
1482
e8b8e97f 1483 /* Log page header for this page. */
b46acd6a
KK
1484 err = read_log_page(log, hdr_off, &page, NULL);
1485 if (err)
1486 return err;
1487
1488 /*
1489 * If the lsn we were given was not the last lsn on this page,
1490 * then the starting offset for the next lsn is on a quad word
e8b8e97f
KA
1491 * boundary following the last file offset for the current lsn.
1492 * Otherwise the file offset is the start of the data on the next page.
b46acd6a
KK
1493 */
1494 if (this_lsn == le64_to_cpu(page->rhdr.lsn)) {
e8b8e97f 1495 /* If we wrapped, we need to increment the sequence number. */
b46acd6a
KK
1496 hdr_off = next_page_off(log, hdr_off);
1497 if (hdr_off == log->first_page)
1498 seq += 1;
1499
1500 vbo = hdr_off + log->data_off;
1501 } else {
fa3cacf5 1502 vbo = ALIGN(end, 8);
b46acd6a
KK
1503 }
1504
e8b8e97f 1505 /* Compute the lsn based on the file offset and the sequence count. */
b46acd6a
KK
1506 *lsn = vbo_to_lsn(log, vbo, seq);
1507
1508 /*
e8b8e97f
KA
1509 * If this lsn is within the legal range for the file, we return true.
1510 * Otherwise false indicates that there are no more lsn's.
b46acd6a
KK
1511 */
1512 if (!is_lsn_in_file(log, *lsn))
1513 *lsn = 0;
1514
195c52bd 1515 kfree(page);
b46acd6a
KK
1516
1517 return 0;
1518}
1519
1520/*
e8b8e97f 1521 * current_log_avail - Calculate the number of bytes available for log records.
b46acd6a
KK
1522 */
1523static u32 current_log_avail(struct ntfs_log *log)
1524{
1525 u32 oldest_off, next_free_off, free_bytes;
1526
1527 if (log->l_flags & NTFSLOG_NO_LAST_LSN) {
e8b8e97f 1528 /* The entire file is available. */
b46acd6a
KK
1529 return log->max_current_avail;
1530 }
1531
1532 /*
1533 * If there is a last lsn the restart area then we know that we will
e8b8e97f
KA
1534 * have to compute the free range.
1535 * If there is no oldest lsn then start at the first page of the file.
b46acd6a
KK
1536 */
1537 oldest_off = (log->l_flags & NTFSLOG_NO_OLDEST_LSN)
1538 ? log->first_page
1539 : (log->oldest_lsn_off & ~log->sys_page_mask);
1540
1541 /*
e8b8e97f
KA
1542 * We will use the next log page offset to compute the next free page.
1543 * If we are going to reuse this page go to the next page.
1544 * If we are at the first page then use the end of the file.
b46acd6a
KK
1545 */
1546 next_free_off = (log->l_flags & NTFSLOG_REUSE_TAIL)
1547 ? log->next_page + log->page_size
1548 : log->next_page == log->first_page
1549 ? log->l_size
1550 : log->next_page;
1551
e8b8e97f 1552 /* If the two offsets are the same then there is no available space. */
b46acd6a
KK
1553 if (oldest_off == next_free_off)
1554 return 0;
1555 /*
1556 * If the free offset follows the oldest offset then subtract
e8b8e97f 1557 * this range from the total available pages.
b46acd6a
KK
1558 */
1559 free_bytes =
1560 oldest_off < next_free_off
1561 ? log->total_avail_pages - (next_free_off - oldest_off)
1562 : oldest_off - next_free_off;
1563
1564 free_bytes >>= log->page_bits;
1565 return free_bytes * log->reserved;
1566}
1567
1568static bool check_subseq_log_page(struct ntfs_log *log,
1569 const struct RECORD_PAGE_HDR *rp, u32 vbo,
1570 u64 seq)
1571{
1572 u64 lsn_seq;
1573 const struct NTFS_RECORD_HEADER *rhdr = &rp->rhdr;
1574 u64 lsn = le64_to_cpu(rhdr->lsn);
1575
1576 if (rhdr->sign == NTFS_FFFF_SIGNATURE || !rhdr->sign)
1577 return false;
1578
1579 /*
1580 * If the last lsn on the page occurs was written after the page
e8b8e97f 1581 * that caused the original error then we have a fatal error.
b46acd6a
KK
1582 */
1583 lsn_seq = lsn >> log->file_data_bits;
1584
1585 /*
1586 * If the sequence number for the lsn the page is equal or greater
e8b8e97f 1587 * than lsn we expect, then this is a subsequent write.
b46acd6a
KK
1588 */
1589 return lsn_seq >= seq ||
1590 (lsn_seq == seq - 1 && log->first_page == vbo &&
1591 vbo != (lsn_to_vbo(log, lsn) & ~log->page_mask));
1592}
1593
1594/*
1595 * last_log_lsn
1596 *
e8b8e97f
KA
1597 * Walks through the log pages for a file, searching for the
1598 * last log page written to the file.
b46acd6a
KK
1599 */
1600static int last_log_lsn(struct ntfs_log *log)
1601{
1602 int err;
1603 bool usa_error = false;
1604 bool replace_page = false;
1605 bool reuse_page = log->l_flags & NTFSLOG_REUSE_TAIL;
1606 bool wrapped_file, wrapped;
1607
1608 u32 page_cnt = 1, page_pos = 1;
1609 u32 page_off = 0, page_off1 = 0, saved_off = 0;
1610 u32 final_off, second_off, final_off_prev = 0, second_off_prev = 0;
1611 u32 first_file_off = 0, second_file_off = 0;
1612 u32 part_io_count = 0;
1613 u32 tails = 0;
1614 u32 this_off, curpage_off, nextpage_off, remain_pages;
1615
1616 u64 expected_seq, seq_base = 0, lsn_base = 0;
1617 u64 best_lsn, best_lsn1, best_lsn2;
1618 u64 lsn_cur, lsn1, lsn2;
1619 u64 last_ok_lsn = reuse_page ? log->last_lsn : 0;
1620
1621 u16 cur_pos, best_page_pos;
1622
1623 struct RECORD_PAGE_HDR *page = NULL;
1624 struct RECORD_PAGE_HDR *tst_page = NULL;
1625 struct RECORD_PAGE_HDR *first_tail = NULL;
1626 struct RECORD_PAGE_HDR *second_tail = NULL;
1627 struct RECORD_PAGE_HDR *tail_page = NULL;
1628 struct RECORD_PAGE_HDR *second_tail_prev = NULL;
1629 struct RECORD_PAGE_HDR *first_tail_prev = NULL;
1630 struct RECORD_PAGE_HDR *page_bufs = NULL;
1631 struct RECORD_PAGE_HDR *best_page;
1632
1633 if (log->major_ver >= 2) {
1634 final_off = 0x02 * log->page_size;
1635 second_off = 0x12 * log->page_size;
1636
1637 // 0x10 == 0x12 - 0x2
195c52bd 1638 page_bufs = kmalloc(log->page_size * 0x10, GFP_NOFS);
b46acd6a
KK
1639 if (!page_bufs)
1640 return -ENOMEM;
1641 } else {
1642 second_off = log->first_page - log->page_size;
1643 final_off = second_off - log->page_size;
1644 }
1645
1646next_tail:
e8b8e97f 1647 /* Read second tail page (at pos 3/0x12000). */
b46acd6a
KK
1648 if (read_log_page(log, second_off, &second_tail, &usa_error) ||
1649 usa_error || second_tail->rhdr.sign != NTFS_RCRD_SIGNATURE) {
195c52bd 1650 kfree(second_tail);
b46acd6a
KK
1651 second_tail = NULL;
1652 second_file_off = 0;
1653 lsn2 = 0;
1654 } else {
1655 second_file_off = hdr_file_off(log, second_tail);
1656 lsn2 = le64_to_cpu(second_tail->record_hdr.last_end_lsn);
1657 }
1658
e8b8e97f 1659 /* Read first tail page (at pos 2/0x2000). */
b46acd6a
KK
1660 if (read_log_page(log, final_off, &first_tail, &usa_error) ||
1661 usa_error || first_tail->rhdr.sign != NTFS_RCRD_SIGNATURE) {
195c52bd 1662 kfree(first_tail);
b46acd6a
KK
1663 first_tail = NULL;
1664 first_file_off = 0;
1665 lsn1 = 0;
1666 } else {
1667 first_file_off = hdr_file_off(log, first_tail);
1668 lsn1 = le64_to_cpu(first_tail->record_hdr.last_end_lsn);
1669 }
1670
1671 if (log->major_ver < 2) {
1672 int best_page;
1673
1674 first_tail_prev = first_tail;
1675 final_off_prev = first_file_off;
1676 second_tail_prev = second_tail;
1677 second_off_prev = second_file_off;
1678 tails = 1;
1679
1680 if (!first_tail && !second_tail)
1681 goto tail_read;
1682
1683 if (first_tail && second_tail)
1684 best_page = lsn1 < lsn2 ? 1 : 0;
1685 else if (first_tail)
1686 best_page = 0;
1687 else
1688 best_page = 1;
1689
1690 page_off = best_page ? second_file_off : first_file_off;
1691 seq_base = (best_page ? lsn2 : lsn1) >> log->file_data_bits;
1692 goto tail_read;
1693 }
1694
1695 best_lsn1 = first_tail ? base_lsn(log, first_tail, first_file_off) : 0;
1696 best_lsn2 =
1697 second_tail ? base_lsn(log, second_tail, second_file_off) : 0;
1698
1699 if (first_tail && second_tail) {
1700 if (best_lsn1 > best_lsn2) {
1701 best_lsn = best_lsn1;
1702 best_page = first_tail;
1703 this_off = first_file_off;
1704 } else {
1705 best_lsn = best_lsn2;
1706 best_page = second_tail;
1707 this_off = second_file_off;
1708 }
1709 } else if (first_tail) {
1710 best_lsn = best_lsn1;
1711 best_page = first_tail;
1712 this_off = first_file_off;
1713 } else if (second_tail) {
1714 best_lsn = best_lsn2;
1715 best_page = second_tail;
1716 this_off = second_file_off;
1717 } else {
1718 goto tail_read;
1719 }
1720
1721 best_page_pos = le16_to_cpu(best_page->page_pos);
1722
1723 if (!tails) {
1724 if (best_page_pos == page_pos) {
1725 seq_base = best_lsn >> log->file_data_bits;
1726 saved_off = page_off = le32_to_cpu(best_page->file_off);
1727 lsn_base = best_lsn;
1728
1729 memmove(page_bufs, best_page, log->page_size);
1730
1731 page_cnt = le16_to_cpu(best_page->page_count);
1732 if (page_cnt > 1)
1733 page_pos += 1;
1734
1735 tails = 1;
1736 }
1737 } else if (seq_base == (best_lsn >> log->file_data_bits) &&
1738 saved_off + log->page_size == this_off &&
1739 lsn_base < best_lsn &&
1740 (page_pos != page_cnt || best_page_pos == page_pos ||
1741 best_page_pos == 1) &&
1742 (page_pos >= page_cnt || best_page_pos == page_pos)) {
1743 u16 bppc = le16_to_cpu(best_page->page_count);
1744
1745 saved_off += log->page_size;
1746 lsn_base = best_lsn;
1747
1748 memmove(Add2Ptr(page_bufs, tails * log->page_size), best_page,
1749 log->page_size);
1750
1751 tails += 1;
1752
1753 if (best_page_pos != bppc) {
1754 page_cnt = bppc;
1755 page_pos = best_page_pos;
1756
1757 if (page_cnt > 1)
1758 page_pos += 1;
1759 } else {
1760 page_pos = page_cnt = 1;
1761 }
1762 } else {
195c52bd
KA
1763 kfree(first_tail);
1764 kfree(second_tail);
b46acd6a
KK
1765 goto tail_read;
1766 }
1767
195c52bd 1768 kfree(first_tail_prev);
b46acd6a
KK
1769 first_tail_prev = first_tail;
1770 final_off_prev = first_file_off;
1771 first_tail = NULL;
1772
195c52bd 1773 kfree(second_tail_prev);
b46acd6a
KK
1774 second_tail_prev = second_tail;
1775 second_off_prev = second_file_off;
1776 second_tail = NULL;
1777
1778 final_off += log->page_size;
1779 second_off += log->page_size;
1780
1781 if (tails < 0x10)
1782 goto next_tail;
1783tail_read:
1784 first_tail = first_tail_prev;
1785 final_off = final_off_prev;
1786
1787 second_tail = second_tail_prev;
1788 second_off = second_off_prev;
1789
1790 page_cnt = page_pos = 1;
1791
1792 curpage_off = seq_base == log->seq_num ? min(log->next_page, page_off)
1793 : log->next_page;
1794
1795 wrapped_file =
1796 curpage_off == log->first_page &&
1797 !(log->l_flags & (NTFSLOG_NO_LAST_LSN | NTFSLOG_REUSE_TAIL));
1798
1799 expected_seq = wrapped_file ? (log->seq_num + 1) : log->seq_num;
1800
1801 nextpage_off = curpage_off;
1802
1803next_page:
1804 tail_page = NULL;
e8b8e97f 1805 /* Read the next log page. */
b46acd6a
KK
1806 err = read_log_page(log, curpage_off, &page, &usa_error);
1807
e8b8e97f 1808 /* Compute the next log page offset the file. */
b46acd6a
KK
1809 nextpage_off = next_page_off(log, curpage_off);
1810 wrapped = nextpage_off == log->first_page;
1811
1812 if (tails > 1) {
1813 struct RECORD_PAGE_HDR *cur_page =
1814 Add2Ptr(page_bufs, curpage_off - page_off);
1815
1816 if (curpage_off == saved_off) {
1817 tail_page = cur_page;
1818 goto use_tail_page;
1819 }
1820
1821 if (page_off > curpage_off || curpage_off >= saved_off)
1822 goto use_tail_page;
1823
1824 if (page_off1)
1825 goto use_cur_page;
1826
1827 if (!err && !usa_error &&
1828 page->rhdr.sign == NTFS_RCRD_SIGNATURE &&
1829 cur_page->rhdr.lsn == page->rhdr.lsn &&
1830 cur_page->record_hdr.next_record_off ==
1831 page->record_hdr.next_record_off &&
1832 ((page_pos == page_cnt &&
1833 le16_to_cpu(page->page_pos) == 1) ||
1834 (page_pos != page_cnt &&
1835 le16_to_cpu(page->page_pos) == page_pos + 1 &&
1836 le16_to_cpu(page->page_count) == page_cnt))) {
1837 cur_page = NULL;
1838 goto use_tail_page;
1839 }
1840
1841 page_off1 = page_off;
1842
1843use_cur_page:
1844
1845 lsn_cur = le64_to_cpu(cur_page->rhdr.lsn);
1846
1847 if (last_ok_lsn !=
1848 le64_to_cpu(cur_page->record_hdr.last_end_lsn) &&
1849 ((lsn_cur >> log->file_data_bits) +
1850 ((curpage_off <
1851 (lsn_to_vbo(log, lsn_cur) & ~log->page_mask))
1852 ? 1
1853 : 0)) != expected_seq) {
1854 goto check_tail;
1855 }
1856
1857 if (!is_log_record_end(cur_page)) {
1858 tail_page = NULL;
1859 last_ok_lsn = lsn_cur;
1860 goto next_page_1;
1861 }
1862
1863 log->seq_num = expected_seq;
1864 log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
1865 log->last_lsn = le64_to_cpu(cur_page->record_hdr.last_end_lsn);
1866 log->ra->current_lsn = cur_page->record_hdr.last_end_lsn;
1867
1868 if (log->record_header_len <=
1869 log->page_size -
1870 le16_to_cpu(cur_page->record_hdr.next_record_off)) {
1871 log->l_flags |= NTFSLOG_REUSE_TAIL;
1872 log->next_page = curpage_off;
1873 } else {
1874 log->l_flags &= ~NTFSLOG_REUSE_TAIL;
1875 log->next_page = nextpage_off;
1876 }
1877
1878 if (wrapped_file)
1879 log->l_flags |= NTFSLOG_WRAPPED;
1880
1881 last_ok_lsn = le64_to_cpu(cur_page->record_hdr.last_end_lsn);
1882 goto next_page_1;
1883 }
1884
1885 /*
1886 * If we are at the expected first page of a transfer check to see
e8b8e97f 1887 * if either tail copy is at this offset.
b46acd6a 1888 * If this page is the last page of a transfer, check if we wrote
e8b8e97f 1889 * a subsequent tail copy.
b46acd6a
KK
1890 */
1891 if (page_cnt == page_pos || page_cnt == page_pos + 1) {
1892 /*
1893 * Check if the offset matches either the first or second
e8b8e97f 1894 * tail copy. It is possible it will match both.
b46acd6a
KK
1895 */
1896 if (curpage_off == final_off)
1897 tail_page = first_tail;
1898
1899 /*
1900 * If we already matched on the first page then
1901 * check the ending lsn's.
1902 */
1903 if (curpage_off == second_off) {
1904 if (!tail_page ||
1905 (second_tail &&
1906 le64_to_cpu(second_tail->record_hdr.last_end_lsn) >
1907 le64_to_cpu(first_tail->record_hdr
1908 .last_end_lsn))) {
1909 tail_page = second_tail;
1910 }
1911 }
1912 }
1913
1914use_tail_page:
1915 if (tail_page) {
e8b8e97f 1916 /* We have a candidate for a tail copy. */
b46acd6a
KK
1917 lsn_cur = le64_to_cpu(tail_page->record_hdr.last_end_lsn);
1918
1919 if (last_ok_lsn < lsn_cur) {
1920 /*
1921 * If the sequence number is not expected,
e8b8e97f 1922 * then don't use the tail copy.
b46acd6a
KK
1923 */
1924 if (expected_seq != (lsn_cur >> log->file_data_bits))
1925 tail_page = NULL;
1926 } else if (last_ok_lsn > lsn_cur) {
1927 /*
1928 * If the last lsn is greater than the one on
e8b8e97f 1929 * this page then forget this tail.
b46acd6a
KK
1930 */
1931 tail_page = NULL;
1932 }
1933 }
1934
e8b8e97f
KA
1935 /*
1936 *If we have an error on the current page,
1937 * we will break of this loop.
1938 */
b46acd6a
KK
1939 if (err || usa_error)
1940 goto check_tail;
1941
1942 /*
1943 * Done if the last lsn on this page doesn't match the previous known
e8b8e97f 1944 * last lsn or the sequence number is not expected.
b46acd6a
KK
1945 */
1946 lsn_cur = le64_to_cpu(page->rhdr.lsn);
1947 if (last_ok_lsn != lsn_cur &&
1948 expected_seq != (lsn_cur >> log->file_data_bits)) {
1949 goto check_tail;
1950 }
1951
1952 /*
e8b8e97f 1953 * Check that the page position and page count values are correct.
b46acd6a 1954 * If this is the first page of a transfer the position must be 1
e8b8e97f 1955 * and the count will be unknown.
b46acd6a
KK
1956 */
1957 if (page_cnt == page_pos) {
1958 if (page->page_pos != cpu_to_le16(1) &&
1959 (!reuse_page || page->page_pos != page->page_count)) {
1960 /*
1961 * If the current page is the first page we are
1962 * looking at and we are reusing this page then
1963 * it can be either the first or last page of a
1964 * transfer. Otherwise it can only be the first.
1965 */
1966 goto check_tail;
1967 }
1968 } else if (le16_to_cpu(page->page_count) != page_cnt ||
1969 le16_to_cpu(page->page_pos) != page_pos + 1) {
1970 /*
1971 * The page position better be 1 more than the last page
e8b8e97f 1972 * position and the page count better match.
b46acd6a
KK
1973 */
1974 goto check_tail;
1975 }
1976
1977 /*
1978 * We have a valid page the file and may have a valid page
e8b8e97f 1979 * the tail copy area.
b46acd6a 1980 * If the tail page was written after the page the file then
e8b8e97f 1981 * break of the loop.
b46acd6a
KK
1982 */
1983 if (tail_page &&
1984 le64_to_cpu(tail_page->record_hdr.last_end_lsn) > lsn_cur) {
e8b8e97f 1985 /* Remember if we will replace the page. */
b46acd6a
KK
1986 replace_page = true;
1987 goto check_tail;
1988 }
1989
1990 tail_page = NULL;
1991
1992 if (is_log_record_end(page)) {
1993 /*
1994 * Since we have read this page we know the sequence number
e8b8e97f 1995 * is the same as our expected value.
b46acd6a
KK
1996 */
1997 log->seq_num = expected_seq;
1998 log->last_lsn = le64_to_cpu(page->record_hdr.last_end_lsn);
1999 log->ra->current_lsn = page->record_hdr.last_end_lsn;
2000 log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
2001
2002 /*
2003 * If there is room on this page for another header then
e8b8e97f 2004 * remember we want to reuse the page.
b46acd6a
KK
2005 */
2006 if (log->record_header_len <=
2007 log->page_size -
2008 le16_to_cpu(page->record_hdr.next_record_off)) {
2009 log->l_flags |= NTFSLOG_REUSE_TAIL;
2010 log->next_page = curpage_off;
2011 } else {
2012 log->l_flags &= ~NTFSLOG_REUSE_TAIL;
2013 log->next_page = nextpage_off;
2014 }
2015
e8b8e97f 2016 /* Remember if we wrapped the log file. */
b46acd6a
KK
2017 if (wrapped_file)
2018 log->l_flags |= NTFSLOG_WRAPPED;
2019 }
2020
2021 /*
2022 * Remember the last page count and position.
e8b8e97f 2023 * Also remember the last known lsn.
b46acd6a
KK
2024 */
2025 page_cnt = le16_to_cpu(page->page_count);
2026 page_pos = le16_to_cpu(page->page_pos);
2027 last_ok_lsn = le64_to_cpu(page->rhdr.lsn);
2028
2029next_page_1:
2030
2031 if (wrapped) {
2032 expected_seq += 1;
2033 wrapped_file = 1;
2034 }
2035
2036 curpage_off = nextpage_off;
195c52bd 2037 kfree(page);
b46acd6a
KK
2038 page = NULL;
2039 reuse_page = 0;
2040 goto next_page;
2041
2042check_tail:
2043 if (tail_page) {
2044 log->seq_num = expected_seq;
2045 log->last_lsn = le64_to_cpu(tail_page->record_hdr.last_end_lsn);
2046 log->ra->current_lsn = tail_page->record_hdr.last_end_lsn;
2047 log->l_flags &= ~NTFSLOG_NO_LAST_LSN;
2048
2049 if (log->page_size -
2050 le16_to_cpu(
2051 tail_page->record_hdr.next_record_off) >=
2052 log->record_header_len) {
2053 log->l_flags |= NTFSLOG_REUSE_TAIL;
2054 log->next_page = curpage_off;
2055 } else {
2056 log->l_flags &= ~NTFSLOG_REUSE_TAIL;
2057 log->next_page = nextpage_off;
2058 }
2059
2060 if (wrapped)
2061 log->l_flags |= NTFSLOG_WRAPPED;
2062 }
2063
e8b8e97f 2064 /* Remember that the partial IO will start at the next page. */
b46acd6a
KK
2065 second_off = nextpage_off;
2066
2067 /*
2068 * If the next page is the first page of the file then update
e8b8e97f 2069 * the sequence number for log records which begon the next page.
b46acd6a
KK
2070 */
2071 if (wrapped)
2072 expected_seq += 1;
2073
2074 /*
2075 * If we have a tail copy or are performing single page I/O we can
e8b8e97f 2076 * immediately look at the next page.
b46acd6a
KK
2077 */
2078 if (replace_page || (log->ra->flags & RESTART_SINGLE_PAGE_IO)) {
2079 page_cnt = 2;
2080 page_pos = 1;
2081 goto check_valid;
2082 }
2083
2084 if (page_pos != page_cnt)
2085 goto check_valid;
2086 /*
2087 * If the next page causes us to wrap to the beginning of the log
2088 * file then we know which page to check next.
2089 */
2090 if (wrapped) {
2091 page_cnt = 2;
2092 page_pos = 1;
2093 goto check_valid;
2094 }
2095
2096 cur_pos = 2;
2097
2098next_test_page:
195c52bd 2099 kfree(tst_page);
b46acd6a
KK
2100 tst_page = NULL;
2101
e8b8e97f 2102 /* Walk through the file, reading log pages. */
b46acd6a
KK
2103 err = read_log_page(log, nextpage_off, &tst_page, &usa_error);
2104
2105 /*
2106 * If we get a USA error then assume that we correctly found
e8b8e97f 2107 * the end of the original transfer.
b46acd6a
KK
2108 */
2109 if (usa_error)
2110 goto file_is_valid;
2111
2112 /*
2113 * If we were able to read the page, we examine it to see if it
e8b8e97f 2114 * is the same or different Io block.
b46acd6a
KK
2115 */
2116 if (err)
2117 goto next_test_page_1;
2118
2119 if (le16_to_cpu(tst_page->page_pos) == cur_pos &&
2120 check_subseq_log_page(log, tst_page, nextpage_off, expected_seq)) {
2121 page_cnt = le16_to_cpu(tst_page->page_count) + 1;
2122 page_pos = le16_to_cpu(tst_page->page_pos);
2123 goto check_valid;
2124 } else {
2125 goto file_is_valid;
2126 }
2127
2128next_test_page_1:
2129
2130 nextpage_off = next_page_off(log, curpage_off);
2131 wrapped = nextpage_off == log->first_page;
2132
2133 if (wrapped) {
2134 expected_seq += 1;
2135 page_cnt = 2;
2136 page_pos = 1;
2137 }
2138
2139 cur_pos += 1;
2140 part_io_count += 1;
2141 if (!wrapped)
2142 goto next_test_page;
2143
2144check_valid:
e8b8e97f 2145 /* Skip over the remaining pages this transfer. */
b46acd6a
KK
2146 remain_pages = page_cnt - page_pos - 1;
2147 part_io_count += remain_pages;
2148
2149 while (remain_pages--) {
2150 nextpage_off = next_page_off(log, curpage_off);
2151 wrapped = nextpage_off == log->first_page;
2152
2153 if (wrapped)
2154 expected_seq += 1;
2155 }
2156
e8b8e97f 2157 /* Call our routine to check this log page. */
195c52bd 2158 kfree(tst_page);
b46acd6a
KK
2159 tst_page = NULL;
2160
2161 err = read_log_page(log, nextpage_off, &tst_page, &usa_error);
2162 if (!err && !usa_error &&
2163 check_subseq_log_page(log, tst_page, nextpage_off, expected_seq)) {
2164 err = -EINVAL;
2165 goto out;
2166 }
2167
2168file_is_valid:
2169
e8b8e97f 2170 /* We have a valid file. */
b46acd6a
KK
2171 if (page_off1 || tail_page) {
2172 struct RECORD_PAGE_HDR *tmp_page;
2173
2174 if (sb_rdonly(log->ni->mi.sbi->sb)) {
2175 err = -EROFS;
2176 goto out;
2177 }
2178
2179 if (page_off1) {
2180 tmp_page = Add2Ptr(page_bufs, page_off1 - page_off);
2181 tails -= (page_off1 - page_off) / log->page_size;
2182 if (!tail_page)
2183 tails -= 1;
2184 } else {
2185 tmp_page = tail_page;
2186 tails = 1;
2187 }
2188
2189 while (tails--) {
2190 u64 off = hdr_file_off(log, tmp_page);
2191
2192 if (!page) {
195c52bd 2193 page = kmalloc(log->page_size, GFP_NOFS);
b46acd6a
KK
2194 if (!page)
2195 return -ENOMEM;
2196 }
2197
2198 /*
2199 * Correct page and copy the data from this page
e8b8e97f 2200 * into it and flush it to disk.
b46acd6a
KK
2201 */
2202 memcpy(page, tmp_page, log->page_size);
2203
e8b8e97f 2204 /* Fill last flushed lsn value flush the page. */
b46acd6a
KK
2205 if (log->major_ver < 2)
2206 page->rhdr.lsn = page->record_hdr.last_end_lsn;
2207 else
2208 page->file_off = 0;
2209
2210 page->page_pos = page->page_count = cpu_to_le16(1);
2211
2212 ntfs_fix_pre_write(&page->rhdr, log->page_size);
2213
2214 err = ntfs_sb_write_run(log->ni->mi.sbi,
2215 &log->ni->file.run, off, page,
63544672 2216 log->page_size, 0);
b46acd6a
KK
2217
2218 if (err)
2219 goto out;
2220
2221 if (part_io_count && second_off == off) {
2222 second_off += log->page_size;
2223 part_io_count -= 1;
2224 }
2225
2226 tmp_page = Add2Ptr(tmp_page, log->page_size);
2227 }
2228 }
2229
2230 if (part_io_count) {
2231 if (sb_rdonly(log->ni->mi.sbi->sb)) {
2232 err = -EROFS;
2233 goto out;
2234 }
2235 }
2236
2237out:
195c52bd
KA
2238 kfree(second_tail);
2239 kfree(first_tail);
2240 kfree(page);
2241 kfree(tst_page);
2242 kfree(page_bufs);
b46acd6a
KK
2243
2244 return err;
2245}
2246
2247/*
e8b8e97f 2248 * read_log_rec_buf - Copy a log record from the file to a buffer.
b46acd6a 2249 *
e8b8e97f 2250 * The log record may span several log pages and may even wrap the file.
b46acd6a
KK
2251 */
2252static int read_log_rec_buf(struct ntfs_log *log,
2253 const struct LFS_RECORD_HDR *rh, void *buffer)
2254{
2255 int err;
2256 struct RECORD_PAGE_HDR *ph = NULL;
2257 u64 lsn = le64_to_cpu(rh->this_lsn);
2258 u32 vbo = lsn_to_vbo(log, lsn) & ~log->page_mask;
2259 u32 off = lsn_to_page_off(log, lsn) + log->record_header_len;
2260 u32 data_len = le32_to_cpu(rh->client_data_len);
2261
2262 /*
2263 * While there are more bytes to transfer,
e8b8e97f 2264 * we continue to attempt to perform the read.
b46acd6a
KK
2265 */
2266 for (;;) {
2267 bool usa_error;
2268 u32 tail = log->page_size - off;
2269
2270 if (tail >= data_len)
2271 tail = data_len;
2272
2273 data_len -= tail;
2274
2275 err = read_log_page(log, vbo, &ph, &usa_error);
2276 if (err)
2277 goto out;
2278
2279 /*
2280 * The last lsn on this page better be greater or equal
e8b8e97f 2281 * to the lsn we are copying.
b46acd6a
KK
2282 */
2283 if (lsn > le64_to_cpu(ph->rhdr.lsn)) {
2284 err = -EINVAL;
2285 goto out;
2286 }
2287
2288 memcpy(buffer, Add2Ptr(ph, off), tail);
2289
e8b8e97f 2290 /* If there are no more bytes to transfer, we exit the loop. */
b46acd6a
KK
2291 if (!data_len) {
2292 if (!is_log_record_end(ph) ||
2293 lsn > le64_to_cpu(ph->record_hdr.last_end_lsn)) {
2294 err = -EINVAL;
2295 goto out;
2296 }
2297 break;
2298 }
2299
2300 if (ph->rhdr.lsn == ph->record_hdr.last_end_lsn ||
2301 lsn > le64_to_cpu(ph->rhdr.lsn)) {
2302 err = -EINVAL;
2303 goto out;
2304 }
2305
2306 vbo = next_page_off(log, vbo);
2307 off = log->data_off;
2308
2309 /*
e8b8e97f
KA
2310 * Adjust our pointer the user's buffer to transfer
2311 * the next block to.
b46acd6a
KK
2312 */
2313 buffer = Add2Ptr(buffer, tail);
2314 }
2315
2316out:
195c52bd 2317 kfree(ph);
b46acd6a
KK
2318 return err;
2319}
2320
2321static int read_rst_area(struct ntfs_log *log, struct NTFS_RESTART **rst_,
2322 u64 *lsn)
2323{
2324 int err;
2325 struct LFS_RECORD_HDR *rh = NULL;
2326 const struct CLIENT_REC *cr =
2327 Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off));
2328 u64 lsnr, lsnc = le64_to_cpu(cr->restart_lsn);
2329 u32 len;
2330 struct NTFS_RESTART *rst;
2331
2332 *lsn = 0;
2333 *rst_ = NULL;
2334
e8b8e97f 2335 /* If the client doesn't have a restart area, go ahead and exit now. */
b46acd6a
KK
2336 if (!lsnc)
2337 return 0;
2338
2339 err = read_log_page(log, lsn_to_vbo(log, lsnc),
2340 (struct RECORD_PAGE_HDR **)&rh, NULL);
2341 if (err)
2342 return err;
2343
2344 rst = NULL;
2345 lsnr = le64_to_cpu(rh->this_lsn);
2346
2347 if (lsnc != lsnr) {
e8b8e97f 2348 /* If the lsn values don't match, then the disk is corrupt. */
b46acd6a
KK
2349 err = -EINVAL;
2350 goto out;
2351 }
2352
2353 *lsn = lsnr;
2354 len = le32_to_cpu(rh->client_data_len);
2355
2356 if (!len) {
2357 err = 0;
2358 goto out;
2359 }
2360
2361 if (len < sizeof(struct NTFS_RESTART)) {
2362 err = -EINVAL;
2363 goto out;
2364 }
2365
195c52bd 2366 rst = kmalloc(len, GFP_NOFS);
b46acd6a
KK
2367 if (!rst) {
2368 err = -ENOMEM;
2369 goto out;
2370 }
2371
e8b8e97f 2372 /* Copy the data into the 'rst' buffer. */
b46acd6a
KK
2373 err = read_log_rec_buf(log, rh, rst);
2374 if (err)
2375 goto out;
2376
2377 *rst_ = rst;
2378 rst = NULL;
2379
2380out:
195c52bd
KA
2381 kfree(rh);
2382 kfree(rst);
b46acd6a
KK
2383
2384 return err;
2385}
2386
2387static int find_log_rec(struct ntfs_log *log, u64 lsn, struct lcb *lcb)
2388{
2389 int err;
2390 struct LFS_RECORD_HDR *rh = lcb->lrh;
2391 u32 rec_len, len;
2392
e8b8e97f 2393 /* Read the record header for this lsn. */
b46acd6a
KK
2394 if (!rh) {
2395 err = read_log_page(log, lsn_to_vbo(log, lsn),
2396 (struct RECORD_PAGE_HDR **)&rh, NULL);
2397
2398 lcb->lrh = rh;
2399 if (err)
2400 return err;
2401 }
2402
2403 /*
2404 * If the lsn the log record doesn't match the desired
e8b8e97f 2405 * lsn then the disk is corrupt.
b46acd6a
KK
2406 */
2407 if (lsn != le64_to_cpu(rh->this_lsn))
2408 return -EINVAL;
2409
2410 len = le32_to_cpu(rh->client_data_len);
2411
2412 /*
e8b8e97f
KA
2413 * Check that the length field isn't greater than the total
2414 * available space the log file.
b46acd6a
KK
2415 */
2416 rec_len = len + log->record_header_len;
2417 if (rec_len >= log->total_avail)
2418 return -EINVAL;
2419
2420 /*
2421 * If the entire log record is on this log page,
e8b8e97f 2422 * put a pointer to the log record the context block.
b46acd6a
KK
2423 */
2424 if (rh->flags & LOG_RECORD_MULTI_PAGE) {
195c52bd 2425 void *lr = kmalloc(len, GFP_NOFS);
b46acd6a
KK
2426
2427 if (!lr)
2428 return -ENOMEM;
2429
2430 lcb->log_rec = lr;
2431 lcb->alloc = true;
2432
e8b8e97f 2433 /* Copy the data into the buffer returned. */
b46acd6a
KK
2434 err = read_log_rec_buf(log, rh, lr);
2435 if (err)
2436 return err;
2437 } else {
e8b8e97f 2438 /* If beyond the end of the current page -> an error. */
b46acd6a
KK
2439 u32 page_off = lsn_to_page_off(log, lsn);
2440
2441 if (page_off + len + log->record_header_len > log->page_size)
2442 return -EINVAL;
2443
2444 lcb->log_rec = Add2Ptr(rh, sizeof(struct LFS_RECORD_HDR));
2445 lcb->alloc = false;
2446 }
2447
2448 return 0;
2449}
2450
2451/*
e8b8e97f 2452 * read_log_rec_lcb - Init the query operation.
b46acd6a
KK
2453 */
2454static int read_log_rec_lcb(struct ntfs_log *log, u64 lsn, u32 ctx_mode,
2455 struct lcb **lcb_)
2456{
2457 int err;
2458 const struct CLIENT_REC *cr;
2459 struct lcb *lcb;
2460
2461 switch (ctx_mode) {
2462 case lcb_ctx_undo_next:
2463 case lcb_ctx_prev:
2464 case lcb_ctx_next:
2465 break;
2466 default:
2467 return -EINVAL;
2468 }
2469
e8b8e97f 2470 /* Check that the given lsn is the legal range for this client. */
b46acd6a
KK
2471 cr = Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off));
2472
2473 if (!verify_client_lsn(log, cr, lsn))
2474 return -EINVAL;
2475
195c52bd 2476 lcb = kzalloc(sizeof(struct lcb), GFP_NOFS);
b46acd6a
KK
2477 if (!lcb)
2478 return -ENOMEM;
2479 lcb->client = log->client_id;
2480 lcb->ctx_mode = ctx_mode;
2481
e8b8e97f 2482 /* Find the log record indicated by the given lsn. */
b46acd6a
KK
2483 err = find_log_rec(log, lsn, lcb);
2484 if (err)
2485 goto out;
2486
2487 *lcb_ = lcb;
2488 return 0;
2489
2490out:
2491 lcb_put(lcb);
2492 *lcb_ = NULL;
2493 return err;
2494}
2495
2496/*
2497 * find_client_next_lsn
2498 *
e8b8e97f 2499 * Attempt to find the next lsn to return to a client based on the context mode.
b46acd6a
KK
2500 */
2501static int find_client_next_lsn(struct ntfs_log *log, struct lcb *lcb, u64 *lsn)
2502{
2503 int err;
2504 u64 next_lsn;
2505 struct LFS_RECORD_HDR *hdr;
2506
2507 hdr = lcb->lrh;
2508 *lsn = 0;
2509
2510 if (lcb_ctx_next != lcb->ctx_mode)
2511 goto check_undo_next;
2512
e8b8e97f 2513 /* Loop as long as another lsn can be found. */
b46acd6a
KK
2514 for (;;) {
2515 u64 current_lsn;
2516
2517 err = next_log_lsn(log, hdr, &current_lsn);
2518 if (err)
2519 goto out;
2520
2521 if (!current_lsn)
2522 break;
2523
2524 if (hdr != lcb->lrh)
195c52bd 2525 kfree(hdr);
b46acd6a
KK
2526
2527 hdr = NULL;
2528 err = read_log_page(log, lsn_to_vbo(log, current_lsn),
2529 (struct RECORD_PAGE_HDR **)&hdr, NULL);
2530 if (err)
2531 goto out;
2532
2533 if (memcmp(&hdr->client, &lcb->client,
2534 sizeof(struct CLIENT_ID))) {
2535 /*err = -EINVAL; */
2536 } else if (LfsClientRecord == hdr->record_type) {
195c52bd 2537 kfree(lcb->lrh);
b46acd6a
KK
2538 lcb->lrh = hdr;
2539 *lsn = current_lsn;
2540 return 0;
2541 }
2542 }
2543
2544out:
2545 if (hdr != lcb->lrh)
195c52bd 2546 kfree(hdr);
b46acd6a
KK
2547 return err;
2548
2549check_undo_next:
2550 if (lcb_ctx_undo_next == lcb->ctx_mode)
2551 next_lsn = le64_to_cpu(hdr->client_undo_next_lsn);
2552 else if (lcb_ctx_prev == lcb->ctx_mode)
2553 next_lsn = le64_to_cpu(hdr->client_prev_lsn);
2554 else
2555 return 0;
2556
2557 if (!next_lsn)
2558 return 0;
2559
2560 if (!verify_client_lsn(
2561 log, Add2Ptr(log->ra, le16_to_cpu(log->ra->client_off)),
2562 next_lsn))
2563 return 0;
2564
2565 hdr = NULL;
2566 err = read_log_page(log, lsn_to_vbo(log, next_lsn),
2567 (struct RECORD_PAGE_HDR **)&hdr, NULL);
2568 if (err)
2569 return err;
195c52bd 2570 kfree(lcb->lrh);
b46acd6a
KK
2571 lcb->lrh = hdr;
2572
2573 *lsn = next_lsn;
2574
2575 return 0;
2576}
2577
2578static int read_next_log_rec(struct ntfs_log *log, struct lcb *lcb, u64 *lsn)
2579{
2580 int err;
2581
2582 err = find_client_next_lsn(log, lcb, lsn);
2583 if (err)
2584 return err;
2585
2586 if (!*lsn)
2587 return 0;
2588
2589 if (lcb->alloc)
195c52bd 2590 kfree(lcb->log_rec);
b46acd6a
KK
2591
2592 lcb->log_rec = NULL;
2593 lcb->alloc = false;
195c52bd 2594 kfree(lcb->lrh);
b46acd6a
KK
2595 lcb->lrh = NULL;
2596
2597 return find_log_rec(log, *lsn, lcb);
2598}
2599
2600static inline bool check_index_header(const struct INDEX_HDR *hdr, size_t bytes)
2601{
2602 __le16 mask;
2603 u32 min_de, de_off, used, total;
2604 const struct NTFS_DE *e;
2605
2606 if (hdr_has_subnode(hdr)) {
2607 min_de = sizeof(struct NTFS_DE) + sizeof(u64);
2608 mask = NTFS_IE_HAS_SUBNODES;
2609 } else {
2610 min_de = sizeof(struct NTFS_DE);
2611 mask = 0;
2612 }
2613
2614 de_off = le32_to_cpu(hdr->de_off);
2615 used = le32_to_cpu(hdr->used);
2616 total = le32_to_cpu(hdr->total);
2617
2618 if (de_off > bytes - min_de || used > bytes || total > bytes ||
2619 de_off + min_de > used || used > total) {
2620 return false;
2621 }
2622
2623 e = Add2Ptr(hdr, de_off);
2624 for (;;) {
2625 u16 esize = le16_to_cpu(e->size);
2626 struct NTFS_DE *next = Add2Ptr(e, esize);
2627
2628 if (esize < min_de || PtrOffset(hdr, next) > used ||
2629 (e->flags & NTFS_IE_HAS_SUBNODES) != mask) {
2630 return false;
2631 }
2632
2633 if (de_is_last(e))
2634 break;
2635
2636 e = next;
2637 }
2638
2639 return true;
2640}
2641
2642static inline bool check_index_buffer(const struct INDEX_BUFFER *ib, u32 bytes)
2643{
2644 u16 fo;
2645 const struct NTFS_RECORD_HEADER *r = &ib->rhdr;
2646
2647 if (r->sign != NTFS_INDX_SIGNATURE)
2648 return false;
2649
2650 fo = (SECTOR_SIZE - ((bytes >> SECTOR_SHIFT) + 1) * sizeof(short));
2651
2652 if (le16_to_cpu(r->fix_off) > fo)
2653 return false;
2654
2655 if ((le16_to_cpu(r->fix_num) - 1) * SECTOR_SIZE != bytes)
2656 return false;
2657
2658 return check_index_header(&ib->ihdr,
2659 bytes - offsetof(struct INDEX_BUFFER, ihdr));
2660}
2661
2662static inline bool check_index_root(const struct ATTRIB *attr,
2663 struct ntfs_sb_info *sbi)
2664{
2665 bool ret;
2666 const struct INDEX_ROOT *root = resident_data(attr);
2667 u8 index_bits = le32_to_cpu(root->index_block_size) >= sbi->cluster_size
2668 ? sbi->cluster_bits
2669 : SECTOR_SHIFT;
2670 u8 block_clst = root->index_block_clst;
2671
2672 if (le32_to_cpu(attr->res.data_size) < sizeof(struct INDEX_ROOT) ||
2673 (root->type != ATTR_NAME && root->type != ATTR_ZERO) ||
2674 (root->type == ATTR_NAME &&
2675 root->rule != NTFS_COLLATION_TYPE_FILENAME) ||
2676 (le32_to_cpu(root->index_block_size) !=
2677 (block_clst << index_bits)) ||
2678 (block_clst != 1 && block_clst != 2 && block_clst != 4 &&
2679 block_clst != 8 && block_clst != 0x10 && block_clst != 0x20 &&
2680 block_clst != 0x40 && block_clst != 0x80)) {
2681 return false;
2682 }
2683
2684 ret = check_index_header(&root->ihdr,
2685 le32_to_cpu(attr->res.data_size) -
2686 offsetof(struct INDEX_ROOT, ihdr));
2687 return ret;
2688}
2689
2690static inline bool check_attr(const struct MFT_REC *rec,
2691 const struct ATTRIB *attr,
2692 struct ntfs_sb_info *sbi)
2693{
2694 u32 asize = le32_to_cpu(attr->size);
2695 u32 rsize = 0;
2696 u64 dsize, svcn, evcn;
2697 u16 run_off;
2698
e8b8e97f 2699 /* Check the fixed part of the attribute record header. */
b46acd6a
KK
2700 if (asize >= sbi->record_size ||
2701 asize + PtrOffset(rec, attr) >= sbi->record_size ||
2702 (attr->name_len &&
2703 le16_to_cpu(attr->name_off) + attr->name_len * sizeof(short) >
2704 asize)) {
2705 return false;
2706 }
2707
e8b8e97f 2708 /* Check the attribute fields. */
b46acd6a
KK
2709 switch (attr->non_res) {
2710 case 0:
2711 rsize = le32_to_cpu(attr->res.data_size);
2712 if (rsize >= asize ||
2713 le16_to_cpu(attr->res.data_off) + rsize > asize) {
2714 return false;
2715 }
2716 break;
2717
2718 case 1:
2719 dsize = le64_to_cpu(attr->nres.data_size);
2720 svcn = le64_to_cpu(attr->nres.svcn);
2721 evcn = le64_to_cpu(attr->nres.evcn);
2722 run_off = le16_to_cpu(attr->nres.run_off);
2723
2724 if (svcn > evcn + 1 || run_off >= asize ||
2725 le64_to_cpu(attr->nres.valid_size) > dsize ||
2726 dsize > le64_to_cpu(attr->nres.alloc_size)) {
2727 return false;
2728 }
2729
2730 if (run_unpack(NULL, sbi, 0, svcn, evcn, svcn,
2731 Add2Ptr(attr, run_off), asize - run_off) < 0) {
2732 return false;
2733 }
2734
2735 return true;
2736
2737 default:
2738 return false;
2739 }
2740
2741 switch (attr->type) {
2742 case ATTR_NAME:
2743 if (fname_full_size(Add2Ptr(
2744 attr, le16_to_cpu(attr->res.data_off))) > asize) {
2745 return false;
2746 }
2747 break;
2748
2749 case ATTR_ROOT:
2750 return check_index_root(attr, sbi);
2751
2752 case ATTR_STD:
2753 if (rsize < sizeof(struct ATTR_STD_INFO5) &&
2754 rsize != sizeof(struct ATTR_STD_INFO)) {
2755 return false;
2756 }
2757 break;
2758
2759 case ATTR_LIST:
2760 case ATTR_ID:
2761 case ATTR_SECURE:
2762 case ATTR_LABEL:
2763 case ATTR_VOL_INFO:
2764 case ATTR_DATA:
2765 case ATTR_ALLOC:
2766 case ATTR_BITMAP:
2767 case ATTR_REPARSE:
2768 case ATTR_EA_INFO:
2769 case ATTR_EA:
2770 case ATTR_PROPERTYSET:
2771 case ATTR_LOGGED_UTILITY_STREAM:
2772 break;
2773
2774 default:
2775 return false;
2776 }
2777
2778 return true;
2779}
2780
2781static inline bool check_file_record(const struct MFT_REC *rec,
2782 const struct MFT_REC *rec2,
2783 struct ntfs_sb_info *sbi)
2784{
2785 const struct ATTRIB *attr;
2786 u16 fo = le16_to_cpu(rec->rhdr.fix_off);
2787 u16 fn = le16_to_cpu(rec->rhdr.fix_num);
2788 u16 ao = le16_to_cpu(rec->attr_off);
2789 u32 rs = sbi->record_size;
2790
e8b8e97f 2791 /* Check the file record header for consistency. */
b46acd6a
KK
2792 if (rec->rhdr.sign != NTFS_FILE_SIGNATURE ||
2793 fo > (SECTOR_SIZE - ((rs >> SECTOR_SHIFT) + 1) * sizeof(short)) ||
2794 (fn - 1) * SECTOR_SIZE != rs || ao < MFTRECORD_FIXUP_OFFSET_1 ||
2795 ao > sbi->record_size - SIZEOF_RESIDENT || !is_rec_inuse(rec) ||
2796 le32_to_cpu(rec->total) != rs) {
2797 return false;
2798 }
2799
e8b8e97f 2800 /* Loop to check all of the attributes. */
b46acd6a
KK
2801 for (attr = Add2Ptr(rec, ao); attr->type != ATTR_END;
2802 attr = Add2Ptr(attr, le32_to_cpu(attr->size))) {
2803 if (check_attr(rec, attr, sbi))
2804 continue;
2805 return false;
2806 }
2807
2808 return true;
2809}
2810
2811static inline int check_lsn(const struct NTFS_RECORD_HEADER *hdr,
2812 const u64 *rlsn)
2813{
2814 u64 lsn;
2815
2816 if (!rlsn)
2817 return true;
2818
2819 lsn = le64_to_cpu(hdr->lsn);
2820
2821 if (hdr->sign == NTFS_HOLE_SIGNATURE)
2822 return false;
2823
2824 if (*rlsn > lsn)
2825 return true;
2826
2827 return false;
2828}
2829
2830static inline bool check_if_attr(const struct MFT_REC *rec,
2831 const struct LOG_REC_HDR *lrh)
2832{
2833 u16 ro = le16_to_cpu(lrh->record_off);
2834 u16 o = le16_to_cpu(rec->attr_off);
2835 const struct ATTRIB *attr = Add2Ptr(rec, o);
2836
2837 while (o < ro) {
2838 u32 asize;
2839
2840 if (attr->type == ATTR_END)
2841 break;
2842
2843 asize = le32_to_cpu(attr->size);
2844 if (!asize)
2845 break;
2846
2847 o += asize;
2848 attr = Add2Ptr(attr, asize);
2849 }
2850
2851 return o == ro;
2852}
2853
2854static inline bool check_if_index_root(const struct MFT_REC *rec,
2855 const struct LOG_REC_HDR *lrh)
2856{
2857 u16 ro = le16_to_cpu(lrh->record_off);
2858 u16 o = le16_to_cpu(rec->attr_off);
2859 const struct ATTRIB *attr = Add2Ptr(rec, o);
2860
2861 while (o < ro) {
2862 u32 asize;
2863
2864 if (attr->type == ATTR_END)
2865 break;
2866
2867 asize = le32_to_cpu(attr->size);
2868 if (!asize)
2869 break;
2870
2871 o += asize;
2872 attr = Add2Ptr(attr, asize);
2873 }
2874
2875 return o == ro && attr->type == ATTR_ROOT;
2876}
2877
2878static inline bool check_if_root_index(const struct ATTRIB *attr,
2879 const struct INDEX_HDR *hdr,
2880 const struct LOG_REC_HDR *lrh)
2881{
2882 u16 ao = le16_to_cpu(lrh->attr_off);
2883 u32 de_off = le32_to_cpu(hdr->de_off);
2884 u32 o = PtrOffset(attr, hdr) + de_off;
2885 const struct NTFS_DE *e = Add2Ptr(hdr, de_off);
2886 u32 asize = le32_to_cpu(attr->size);
2887
2888 while (o < ao) {
2889 u16 esize;
2890
2891 if (o >= asize)
2892 break;
2893
2894 esize = le16_to_cpu(e->size);
2895 if (!esize)
2896 break;
2897
2898 o += esize;
2899 e = Add2Ptr(e, esize);
2900 }
2901
2902 return o == ao;
2903}
2904
2905static inline bool check_if_alloc_index(const struct INDEX_HDR *hdr,
2906 u32 attr_off)
2907{
2908 u32 de_off = le32_to_cpu(hdr->de_off);
2909 u32 o = offsetof(struct INDEX_BUFFER, ihdr) + de_off;
2910 const struct NTFS_DE *e = Add2Ptr(hdr, de_off);
2911 u32 used = le32_to_cpu(hdr->used);
2912
2913 while (o < attr_off) {
2914 u16 esize;
2915
2916 if (de_off >= used)
2917 break;
2918
2919 esize = le16_to_cpu(e->size);
2920 if (!esize)
2921 break;
2922
2923 o += esize;
2924 de_off += esize;
2925 e = Add2Ptr(e, esize);
2926 }
2927
2928 return o == attr_off;
2929}
2930
2931static inline void change_attr_size(struct MFT_REC *rec, struct ATTRIB *attr,
2932 u32 nsize)
2933{
2934 u32 asize = le32_to_cpu(attr->size);
2935 int dsize = nsize - asize;
2936 u8 *next = Add2Ptr(attr, asize);
2937 u32 used = le32_to_cpu(rec->used);
2938
2939 memmove(Add2Ptr(attr, nsize), next, used - PtrOffset(rec, next));
2940
2941 rec->used = cpu_to_le32(used + dsize);
2942 attr->size = cpu_to_le32(nsize);
2943}
2944
2945struct OpenAttr {
2946 struct ATTRIB *attr;
2947 struct runs_tree *run1;
2948 struct runs_tree run0;
2949 struct ntfs_inode *ni;
2950 // CLST rno;
2951};
2952
e8b8e97f
KA
2953/*
2954 * cmp_type_and_name
2955 *
2956 * Return: 0 if 'attr' has the same type and name.
2957 */
b46acd6a
KK
2958static inline int cmp_type_and_name(const struct ATTRIB *a1,
2959 const struct ATTRIB *a2)
2960{
2961 return a1->type != a2->type || a1->name_len != a2->name_len ||
2962 (a1->name_len && memcmp(attr_name(a1), attr_name(a2),
2963 a1->name_len * sizeof(short)));
2964}
2965
2966static struct OpenAttr *find_loaded_attr(struct ntfs_log *log,
2967 const struct ATTRIB *attr, CLST rno)
2968{
2969 struct OPEN_ATTR_ENRTY *oe = NULL;
2970
2971 while ((oe = enum_rstbl(log->open_attr_tbl, oe))) {
2972 struct OpenAttr *op_attr;
2973
2974 if (ino_get(&oe->ref) != rno)
2975 continue;
2976
2977 op_attr = (struct OpenAttr *)oe->ptr;
2978 if (!cmp_type_and_name(op_attr->attr, attr))
2979 return op_attr;
2980 }
2981 return NULL;
2982}
2983
2984static struct ATTRIB *attr_create_nonres_log(struct ntfs_sb_info *sbi,
2985 enum ATTR_TYPE type, u64 size,
2986 const u16 *name, size_t name_len,
2987 __le16 flags)
2988{
2989 struct ATTRIB *attr;
fa3cacf5 2990 u32 name_size = ALIGN(name_len * sizeof(short), 8);
b46acd6a
KK
2991 bool is_ext = flags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED);
2992 u32 asize = name_size +
2993 (is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT);
2994
195c52bd 2995 attr = kzalloc(asize, GFP_NOFS);
b46acd6a
KK
2996 if (!attr)
2997 return NULL;
2998
2999 attr->type = type;
3000 attr->size = cpu_to_le32(asize);
3001 attr->flags = flags;
3002 attr->non_res = 1;
3003 attr->name_len = name_len;
3004
3005 attr->nres.evcn = cpu_to_le64((u64)bytes_to_cluster(sbi, size) - 1);
3006 attr->nres.alloc_size = cpu_to_le64(ntfs_up_cluster(sbi, size));
3007 attr->nres.data_size = cpu_to_le64(size);
3008 attr->nres.valid_size = attr->nres.data_size;
3009 if (is_ext) {
3010 attr->name_off = SIZEOF_NONRESIDENT_EX_LE;
3011 if (is_attr_compressed(attr))
3012 attr->nres.c_unit = COMPRESSION_UNIT;
3013
3014 attr->nres.run_off =
3015 cpu_to_le16(SIZEOF_NONRESIDENT_EX + name_size);
3016 memcpy(Add2Ptr(attr, SIZEOF_NONRESIDENT_EX), name,
3017 name_len * sizeof(short));
3018 } else {
3019 attr->name_off = SIZEOF_NONRESIDENT_LE;
3020 attr->nres.run_off =
3021 cpu_to_le16(SIZEOF_NONRESIDENT + name_size);
3022 memcpy(Add2Ptr(attr, SIZEOF_NONRESIDENT), name,
3023 name_len * sizeof(short));
3024 }
3025
3026 return attr;
3027}
3028
3029/*
e8b8e97f
KA
3030 * do_action - Common routine for the Redo and Undo Passes.
3031 * @rlsn: If it is NULL then undo.
b46acd6a
KK
3032 */
3033static int do_action(struct ntfs_log *log, struct OPEN_ATTR_ENRTY *oe,
3034 const struct LOG_REC_HDR *lrh, u32 op, void *data,
3035 u32 dlen, u32 rec_len, const u64 *rlsn)
3036{
3037 int err = 0;
3038 struct ntfs_sb_info *sbi = log->ni->mi.sbi;
3039 struct inode *inode = NULL, *inode_parent;
3040 struct mft_inode *mi = NULL, *mi2_child = NULL;
3041 CLST rno = 0, rno_base = 0;
3042 struct INDEX_BUFFER *ib = NULL;
3043 struct MFT_REC *rec = NULL;
3044 struct ATTRIB *attr = NULL, *attr2;
3045 struct INDEX_HDR *hdr;
3046 struct INDEX_ROOT *root;
3047 struct NTFS_DE *e, *e1, *e2;
3048 struct NEW_ATTRIBUTE_SIZES *new_sz;
3049 struct ATTR_FILE_NAME *fname;
3050 struct OpenAttr *oa, *oa2;
3051 u32 nsize, t32, asize, used, esize, bmp_off, bmp_bits;
3052 u16 id, id2;
3053 u32 record_size = sbi->record_size;
3054 u64 t64;
3055 u16 roff = le16_to_cpu(lrh->record_off);
3056 u16 aoff = le16_to_cpu(lrh->attr_off);
3057 u64 lco = 0;
3058 u64 cbo = (u64)le16_to_cpu(lrh->cluster_off) << SECTOR_SHIFT;
3059 u64 tvo = le64_to_cpu(lrh->target_vcn) << sbi->cluster_bits;
3060 u64 vbo = cbo + tvo;
3061 void *buffer_le = NULL;
3062 u32 bytes = 0;
3063 bool a_dirty = false;
3064 u16 data_off;
3065
3066 oa = oe->ptr;
3067
e8b8e97f 3068 /* Big switch to prepare. */
b46acd6a
KK
3069 switch (op) {
3070 /* ============================================================
e8b8e97f 3071 * Process MFT records, as described by the current log record.
b46acd6a
KK
3072 * ============================================================
3073 */
3074 case InitializeFileRecordSegment:
3075 case DeallocateFileRecordSegment:
3076 case WriteEndOfFileRecordSegment:
3077 case CreateAttribute:
3078 case DeleteAttribute:
3079 case UpdateResidentValue:
3080 case UpdateMappingPairs:
3081 case SetNewAttributeSizes:
3082 case AddIndexEntryRoot:
3083 case DeleteIndexEntryRoot:
3084 case SetIndexEntryVcnRoot:
3085 case UpdateFileNameRoot:
3086 case UpdateRecordDataRoot:
3087 case ZeroEndOfFileRecord:
3088 rno = vbo >> sbi->record_bits;
3089 inode = ilookup(sbi->sb, rno);
3090 if (inode) {
3091 mi = &ntfs_i(inode)->mi;
3092 } else if (op == InitializeFileRecordSegment) {
195c52bd 3093 mi = kzalloc(sizeof(struct mft_inode), GFP_NOFS);
b46acd6a
KK
3094 if (!mi)
3095 return -ENOMEM;
3096 err = mi_format_new(mi, sbi, rno, 0, false);
3097 if (err)
3098 goto out;
3099 } else {
e8b8e97f 3100 /* Read from disk. */
b46acd6a
KK
3101 err = mi_get(sbi, rno, &mi);
3102 if (err)
3103 return err;
3104 }
3105 rec = mi->mrec;
3106
3107 if (op == DeallocateFileRecordSegment)
3108 goto skip_load_parent;
3109
3110 if (InitializeFileRecordSegment != op) {
3111 if (rec->rhdr.sign == NTFS_BAAD_SIGNATURE)
3112 goto dirty_vol;
3113 if (!check_lsn(&rec->rhdr, rlsn))
3114 goto out;
3115 if (!check_file_record(rec, NULL, sbi))
3116 goto dirty_vol;
3117 attr = Add2Ptr(rec, roff);
3118 }
3119
3120 if (is_rec_base(rec) || InitializeFileRecordSegment == op) {
3121 rno_base = rno;
3122 goto skip_load_parent;
3123 }
3124
3125 rno_base = ino_get(&rec->parent_ref);
3126 inode_parent = ntfs_iget5(sbi->sb, &rec->parent_ref, NULL);
3127 if (IS_ERR(inode_parent))
3128 goto skip_load_parent;
3129
3130 if (is_bad_inode(inode_parent)) {
3131 iput(inode_parent);
3132 goto skip_load_parent;
3133 }
3134
3135 if (ni_load_mi_ex(ntfs_i(inode_parent), rno, &mi2_child)) {
3136 iput(inode_parent);
3137 } else {
3138 if (mi2_child->mrec != mi->mrec)
3139 memcpy(mi2_child->mrec, mi->mrec,
3140 sbi->record_size);
3141
3142 if (inode)
3143 iput(inode);
3144 else if (mi)
3145 mi_put(mi);
3146
3147 inode = inode_parent;
3148 mi = mi2_child;
3149 rec = mi2_child->mrec;
3150 attr = Add2Ptr(rec, roff);
3151 }
3152
3153skip_load_parent:
3154 inode_parent = NULL;
3155 break;
3156
e8b8e97f
KA
3157 /*
3158 * Process attributes, as described by the current log record.
b46acd6a
KK
3159 */
3160 case UpdateNonresidentValue:
3161 case AddIndexEntryAllocation:
3162 case DeleteIndexEntryAllocation:
3163 case WriteEndOfIndexBuffer:
3164 case SetIndexEntryVcnAllocation:
3165 case UpdateFileNameAllocation:
3166 case SetBitsInNonresidentBitMap:
3167 case ClearBitsInNonresidentBitMap:
3168 case UpdateRecordDataAllocation:
3169 attr = oa->attr;
3170 bytes = UpdateNonresidentValue == op ? dlen : 0;
3171 lco = (u64)le16_to_cpu(lrh->lcns_follow) << sbi->cluster_bits;
3172
3173 if (attr->type == ATTR_ALLOC) {
3174 t32 = le32_to_cpu(oe->bytes_per_index);
3175 if (bytes < t32)
3176 bytes = t32;
3177 }
3178
3179 if (!bytes)
3180 bytes = lco - cbo;
3181
3182 bytes += roff;
3183 if (attr->type == ATTR_ALLOC)
3184 bytes = (bytes + 511) & ~511; // align
3185
195c52bd 3186 buffer_le = kmalloc(bytes, GFP_NOFS);
b46acd6a
KK
3187 if (!buffer_le)
3188 return -ENOMEM;
3189
3190 err = ntfs_read_run_nb(sbi, oa->run1, vbo, buffer_le, bytes,
3191 NULL);
3192 if (err)
3193 goto out;
3194
3195 if (attr->type == ATTR_ALLOC && *(int *)buffer_le)
3196 ntfs_fix_post_read(buffer_le, bytes, false);
3197 break;
3198
3199 default:
3200 WARN_ON(1);
3201 }
3202
e8b8e97f 3203 /* Big switch to do operation. */
b46acd6a
KK
3204 switch (op) {
3205 case InitializeFileRecordSegment:
3206 if (roff + dlen > record_size)
3207 goto dirty_vol;
3208
3209 memcpy(Add2Ptr(rec, roff), data, dlen);
3210 mi->dirty = true;
3211 break;
3212
3213 case DeallocateFileRecordSegment:
3214 clear_rec_inuse(rec);
3215 le16_add_cpu(&rec->seq, 1);
3216 mi->dirty = true;
3217 break;
3218
3219 case WriteEndOfFileRecordSegment:
3220 attr2 = (struct ATTRIB *)data;
3221 if (!check_if_attr(rec, lrh) || roff + dlen > record_size)
3222 goto dirty_vol;
3223
3224 memmove(attr, attr2, dlen);
fa3cacf5 3225 rec->used = cpu_to_le32(ALIGN(roff + dlen, 8));
b46acd6a
KK
3226
3227 mi->dirty = true;
3228 break;
3229
3230 case CreateAttribute:
3231 attr2 = (struct ATTRIB *)data;
3232 asize = le32_to_cpu(attr2->size);
3233 used = le32_to_cpu(rec->used);
3234
3235 if (!check_if_attr(rec, lrh) || dlen < SIZEOF_RESIDENT ||
fa3cacf5 3236 !IS_ALIGNED(asize, 8) ||
b46acd6a
KK
3237 Add2Ptr(attr2, asize) > Add2Ptr(lrh, rec_len) ||
3238 dlen > record_size - used) {
3239 goto dirty_vol;
3240 }
3241
3242 memmove(Add2Ptr(attr, asize), attr, used - roff);
3243 memcpy(attr, attr2, asize);
3244
3245 rec->used = cpu_to_le32(used + asize);
3246 id = le16_to_cpu(rec->next_attr_id);
3247 id2 = le16_to_cpu(attr2->id);
3248 if (id <= id2)
3249 rec->next_attr_id = cpu_to_le16(id2 + 1);
3250 if (is_attr_indexed(attr))
3251 le16_add_cpu(&rec->hard_links, 1);
3252
3253 oa2 = find_loaded_attr(log, attr, rno_base);
3254 if (oa2) {
195c52bd
KA
3255 void *p2 = kmemdup(attr, le32_to_cpu(attr->size),
3256 GFP_NOFS);
b46acd6a
KK
3257 if (p2) {
3258 // run_close(oa2->run1);
195c52bd 3259 kfree(oa2->attr);
b46acd6a
KK
3260 oa2->attr = p2;
3261 }
3262 }
3263
3264 mi->dirty = true;
3265 break;
3266
3267 case DeleteAttribute:
3268 asize = le32_to_cpu(attr->size);
3269 used = le32_to_cpu(rec->used);
3270
3271 if (!check_if_attr(rec, lrh))
3272 goto dirty_vol;
3273
3274 rec->used = cpu_to_le32(used - asize);
3275 if (is_attr_indexed(attr))
3276 le16_add_cpu(&rec->hard_links, -1);
3277
3278 memmove(attr, Add2Ptr(attr, asize), used - asize - roff);
3279
3280 mi->dirty = true;
3281 break;
3282
3283 case UpdateResidentValue:
3284 nsize = aoff + dlen;
3285
3286 if (!check_if_attr(rec, lrh))
3287 goto dirty_vol;
3288
3289 asize = le32_to_cpu(attr->size);
3290 used = le32_to_cpu(rec->used);
3291
3292 if (lrh->redo_len == lrh->undo_len) {
3293 if (nsize > asize)
3294 goto dirty_vol;
3295 goto move_data;
3296 }
3297
3298 if (nsize > asize && nsize - asize > record_size - used)
3299 goto dirty_vol;
3300
fa3cacf5 3301 nsize = ALIGN(nsize, 8);
b46acd6a
KK
3302 data_off = le16_to_cpu(attr->res.data_off);
3303
3304 if (nsize < asize) {
3305 memmove(Add2Ptr(attr, aoff), data, dlen);
e8b8e97f 3306 data = NULL; // To skip below memmove().
b46acd6a
KK
3307 }
3308
3309 memmove(Add2Ptr(attr, nsize), Add2Ptr(attr, asize),
3310 used - le16_to_cpu(lrh->record_off) - asize);
3311
3312 rec->used = cpu_to_le32(used + nsize - asize);
3313 attr->size = cpu_to_le32(nsize);
3314 attr->res.data_size = cpu_to_le32(aoff + dlen - data_off);
3315
3316move_data:
3317 if (data)
3318 memmove(Add2Ptr(attr, aoff), data, dlen);
3319
3320 oa2 = find_loaded_attr(log, attr, rno_base);
3321 if (oa2) {
195c52bd
KA
3322 void *p2 = kmemdup(attr, le32_to_cpu(attr->size),
3323 GFP_NOFS);
b46acd6a
KK
3324 if (p2) {
3325 // run_close(&oa2->run0);
3326 oa2->run1 = &oa2->run0;
195c52bd 3327 kfree(oa2->attr);
b46acd6a
KK
3328 oa2->attr = p2;
3329 }
3330 }
3331
3332 mi->dirty = true;
3333 break;
3334
3335 case UpdateMappingPairs:
3336 nsize = aoff + dlen;
3337 asize = le32_to_cpu(attr->size);
3338 used = le32_to_cpu(rec->used);
3339
3340 if (!check_if_attr(rec, lrh) || !attr->non_res ||
3341 aoff < le16_to_cpu(attr->nres.run_off) || aoff > asize ||
3342 (nsize > asize && nsize - asize > record_size - used)) {
3343 goto dirty_vol;
3344 }
3345
fa3cacf5 3346 nsize = ALIGN(nsize, 8);
b46acd6a
KK
3347
3348 memmove(Add2Ptr(attr, nsize), Add2Ptr(attr, asize),
3349 used - le16_to_cpu(lrh->record_off) - asize);
3350 rec->used = cpu_to_le32(used + nsize - asize);
3351 attr->size = cpu_to_le32(nsize);
3352 memmove(Add2Ptr(attr, aoff), data, dlen);
3353
3354 if (run_get_highest_vcn(le64_to_cpu(attr->nres.svcn),
3355 attr_run(attr), &t64)) {
3356 goto dirty_vol;
3357 }
3358
3359 attr->nres.evcn = cpu_to_le64(t64);
3360 oa2 = find_loaded_attr(log, attr, rno_base);
3361 if (oa2 && oa2->attr->non_res)
3362 oa2->attr->nres.evcn = attr->nres.evcn;
3363
3364 mi->dirty = true;
3365 break;
3366
3367 case SetNewAttributeSizes:
3368 new_sz = data;
3369 if (!check_if_attr(rec, lrh) || !attr->non_res)
3370 goto dirty_vol;
3371
3372 attr->nres.alloc_size = new_sz->alloc_size;
3373 attr->nres.data_size = new_sz->data_size;
3374 attr->nres.valid_size = new_sz->valid_size;
3375
3376 if (dlen >= sizeof(struct NEW_ATTRIBUTE_SIZES))
3377 attr->nres.total_size = new_sz->total_size;
3378
3379 oa2 = find_loaded_attr(log, attr, rno_base);
3380 if (oa2) {
195c52bd
KA
3381 void *p2 = kmemdup(attr, le32_to_cpu(attr->size),
3382 GFP_NOFS);
b46acd6a 3383 if (p2) {
195c52bd 3384 kfree(oa2->attr);
b46acd6a
KK
3385 oa2->attr = p2;
3386 }
3387 }
3388 mi->dirty = true;
3389 break;
3390
3391 case AddIndexEntryRoot:
3392 e = (struct NTFS_DE *)data;
3393 esize = le16_to_cpu(e->size);
3394 root = resident_data(attr);
3395 hdr = &root->ihdr;
3396 used = le32_to_cpu(hdr->used);
3397
3398 if (!check_if_index_root(rec, lrh) ||
3399 !check_if_root_index(attr, hdr, lrh) ||
3400 Add2Ptr(data, esize) > Add2Ptr(lrh, rec_len) ||
3401 esize > le32_to_cpu(rec->total) - le32_to_cpu(rec->used)) {
3402 goto dirty_vol;
3403 }
3404
3405 e1 = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
3406
3407 change_attr_size(rec, attr, le32_to_cpu(attr->size) + esize);
3408
3409 memmove(Add2Ptr(e1, esize), e1,
3410 PtrOffset(e1, Add2Ptr(hdr, used)));
3411 memmove(e1, e, esize);
3412
3413 le32_add_cpu(&attr->res.data_size, esize);
3414 hdr->used = cpu_to_le32(used + esize);
3415 le32_add_cpu(&hdr->total, esize);
3416
3417 mi->dirty = true;
3418 break;
3419
3420 case DeleteIndexEntryRoot:
3421 root = resident_data(attr);
3422 hdr = &root->ihdr;
3423 used = le32_to_cpu(hdr->used);
3424
3425 if (!check_if_index_root(rec, lrh) ||
3426 !check_if_root_index(attr, hdr, lrh)) {
3427 goto dirty_vol;
3428 }
3429
3430 e1 = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
3431 esize = le16_to_cpu(e1->size);
3432 e2 = Add2Ptr(e1, esize);
3433
3434 memmove(e1, e2, PtrOffset(e2, Add2Ptr(hdr, used)));
3435
3436 le32_sub_cpu(&attr->res.data_size, esize);
3437 hdr->used = cpu_to_le32(used - esize);
3438 le32_sub_cpu(&hdr->total, esize);
3439
3440 change_attr_size(rec, attr, le32_to_cpu(attr->size) - esize);
3441
3442 mi->dirty = true;
3443 break;
3444
3445 case SetIndexEntryVcnRoot:
3446 root = resident_data(attr);
3447 hdr = &root->ihdr;
3448
3449 if (!check_if_index_root(rec, lrh) ||
3450 !check_if_root_index(attr, hdr, lrh)) {
3451 goto dirty_vol;
3452 }
3453
3454 e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
3455
3456 de_set_vbn_le(e, *(__le64 *)data);
3457 mi->dirty = true;
3458 break;
3459
3460 case UpdateFileNameRoot:
3461 root = resident_data(attr);
3462 hdr = &root->ihdr;
3463
3464 if (!check_if_index_root(rec, lrh) ||
3465 !check_if_root_index(attr, hdr, lrh)) {
3466 goto dirty_vol;
3467 }
3468
3469 e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
3470 fname = (struct ATTR_FILE_NAME *)(e + 1);
3471 memmove(&fname->dup, data, sizeof(fname->dup)); //
3472 mi->dirty = true;
3473 break;
3474
3475 case UpdateRecordDataRoot:
3476 root = resident_data(attr);
3477 hdr = &root->ihdr;
3478
3479 if (!check_if_index_root(rec, lrh) ||
3480 !check_if_root_index(attr, hdr, lrh)) {
3481 goto dirty_vol;
3482 }
3483
3484 e = Add2Ptr(attr, le16_to_cpu(lrh->attr_off));
3485
3486 memmove(Add2Ptr(e, le16_to_cpu(e->view.data_off)), data, dlen);
3487
3488 mi->dirty = true;
3489 break;
3490
3491 case ZeroEndOfFileRecord:
3492 if (roff + dlen > record_size)
3493 goto dirty_vol;
3494
3495 memset(attr, 0, dlen);
3496 mi->dirty = true;
3497 break;
3498
3499 case UpdateNonresidentValue:
3500 if (lco < cbo + roff + dlen)
3501 goto dirty_vol;
3502
3503 memcpy(Add2Ptr(buffer_le, roff), data, dlen);
3504
3505 a_dirty = true;
3506 if (attr->type == ATTR_ALLOC)
3507 ntfs_fix_pre_write(buffer_le, bytes);
3508 break;
3509
3510 case AddIndexEntryAllocation:
3511 ib = Add2Ptr(buffer_le, roff);
3512 hdr = &ib->ihdr;
3513 e = data;
3514 esize = le16_to_cpu(e->size);
3515 e1 = Add2Ptr(ib, aoff);
3516
3517 if (is_baad(&ib->rhdr))
3518 goto dirty_vol;
3519 if (!check_lsn(&ib->rhdr, rlsn))
3520 goto out;
3521
3522 used = le32_to_cpu(hdr->used);
3523
3524 if (!check_index_buffer(ib, bytes) ||
3525 !check_if_alloc_index(hdr, aoff) ||
3526 Add2Ptr(e, esize) > Add2Ptr(lrh, rec_len) ||
3527 used + esize > le32_to_cpu(hdr->total)) {
3528 goto dirty_vol;
3529 }
3530
3531 memmove(Add2Ptr(e1, esize), e1,
3532 PtrOffset(e1, Add2Ptr(hdr, used)));
3533 memcpy(e1, e, esize);
3534
3535 hdr->used = cpu_to_le32(used + esize);
3536
3537 a_dirty = true;
3538
3539 ntfs_fix_pre_write(&ib->rhdr, bytes);
3540 break;
3541
3542 case DeleteIndexEntryAllocation:
3543 ib = Add2Ptr(buffer_le, roff);
3544 hdr = &ib->ihdr;
3545 e = Add2Ptr(ib, aoff);
3546 esize = le16_to_cpu(e->size);
3547
3548 if (is_baad(&ib->rhdr))
3549 goto dirty_vol;
3550 if (!check_lsn(&ib->rhdr, rlsn))
3551 goto out;
3552
3553 if (!check_index_buffer(ib, bytes) ||
3554 !check_if_alloc_index(hdr, aoff)) {
3555 goto dirty_vol;
3556 }
3557
3558 e1 = Add2Ptr(e, esize);
3559 nsize = esize;
3560 used = le32_to_cpu(hdr->used);
3561
3562 memmove(e, e1, PtrOffset(e1, Add2Ptr(hdr, used)));
3563
3564 hdr->used = cpu_to_le32(used - nsize);
3565
3566 a_dirty = true;
3567
3568 ntfs_fix_pre_write(&ib->rhdr, bytes);
3569 break;
3570
3571 case WriteEndOfIndexBuffer:
3572 ib = Add2Ptr(buffer_le, roff);
3573 hdr = &ib->ihdr;
3574 e = Add2Ptr(ib, aoff);
3575
3576 if (is_baad(&ib->rhdr))
3577 goto dirty_vol;
3578 if (!check_lsn(&ib->rhdr, rlsn))
3579 goto out;
3580 if (!check_index_buffer(ib, bytes) ||
3581 !check_if_alloc_index(hdr, aoff) ||
3582 aoff + dlen > offsetof(struct INDEX_BUFFER, ihdr) +
3583 le32_to_cpu(hdr->total)) {
3584 goto dirty_vol;
3585 }
3586
3587 hdr->used = cpu_to_le32(dlen + PtrOffset(hdr, e));
3588 memmove(e, data, dlen);
3589
3590 a_dirty = true;
3591 ntfs_fix_pre_write(&ib->rhdr, bytes);
3592 break;
3593
3594 case SetIndexEntryVcnAllocation:
3595 ib = Add2Ptr(buffer_le, roff);
3596 hdr = &ib->ihdr;
3597 e = Add2Ptr(ib, aoff);
3598
3599 if (is_baad(&ib->rhdr))
3600 goto dirty_vol;
3601
3602 if (!check_lsn(&ib->rhdr, rlsn))
3603 goto out;
3604 if (!check_index_buffer(ib, bytes) ||
3605 !check_if_alloc_index(hdr, aoff)) {
3606 goto dirty_vol;
3607 }
3608
3609 de_set_vbn_le(e, *(__le64 *)data);
3610
3611 a_dirty = true;
3612 ntfs_fix_pre_write(&ib->rhdr, bytes);
3613 break;
3614
3615 case UpdateFileNameAllocation:
3616 ib = Add2Ptr(buffer_le, roff);
3617 hdr = &ib->ihdr;
3618 e = Add2Ptr(ib, aoff);
3619
3620 if (is_baad(&ib->rhdr))
3621 goto dirty_vol;
3622
3623 if (!check_lsn(&ib->rhdr, rlsn))
3624 goto out;
3625 if (!check_index_buffer(ib, bytes) ||
3626 !check_if_alloc_index(hdr, aoff)) {
3627 goto dirty_vol;
3628 }
3629
3630 fname = (struct ATTR_FILE_NAME *)(e + 1);
3631 memmove(&fname->dup, data, sizeof(fname->dup));
3632
3633 a_dirty = true;
3634 ntfs_fix_pre_write(&ib->rhdr, bytes);
3635 break;
3636
3637 case SetBitsInNonresidentBitMap:
3638 bmp_off =
3639 le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off);
3640 bmp_bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits);
3641
3642 if (cbo + (bmp_off + 7) / 8 > lco ||
3643 cbo + ((bmp_off + bmp_bits + 7) / 8) > lco) {
3644 goto dirty_vol;
3645 }
3646
3647 __bitmap_set(Add2Ptr(buffer_le, roff), bmp_off, bmp_bits);
3648 a_dirty = true;
3649 break;
3650
3651 case ClearBitsInNonresidentBitMap:
3652 bmp_off =
3653 le32_to_cpu(((struct BITMAP_RANGE *)data)->bitmap_off);
3654 bmp_bits = le32_to_cpu(((struct BITMAP_RANGE *)data)->bits);
3655
3656 if (cbo + (bmp_off + 7) / 8 > lco ||
3657 cbo + ((bmp_off + bmp_bits + 7) / 8) > lco) {
3658 goto dirty_vol;
3659 }
3660
3661 __bitmap_clear(Add2Ptr(buffer_le, roff), bmp_off, bmp_bits);
3662 a_dirty = true;
3663 break;
3664
3665 case UpdateRecordDataAllocation:
3666 ib = Add2Ptr(buffer_le, roff);
3667 hdr = &ib->ihdr;
3668 e = Add2Ptr(ib, aoff);
3669
3670 if (is_baad(&ib->rhdr))
3671 goto dirty_vol;
3672
3673 if (!check_lsn(&ib->rhdr, rlsn))
3674 goto out;
3675 if (!check_index_buffer(ib, bytes) ||
3676 !check_if_alloc_index(hdr, aoff)) {
3677 goto dirty_vol;
3678 }
3679
3680 memmove(Add2Ptr(e, le16_to_cpu(e->view.data_off)), data, dlen);
3681
3682 a_dirty = true;
3683 ntfs_fix_pre_write(&ib->rhdr, bytes);
3684 break;
3685
3686 default:
3687 WARN_ON(1);
3688 }
3689
3690 if (rlsn) {
3691 __le64 t64 = cpu_to_le64(*rlsn);
3692
3693 if (rec)
3694 rec->rhdr.lsn = t64;
3695 if (ib)
3696 ib->rhdr.lsn = t64;
3697 }
3698
3699 if (mi && mi->dirty) {
3700 err = mi_write(mi, 0);
3701 if (err)
3702 goto out;
3703 }
3704
3705 if (a_dirty) {
3706 attr = oa->attr;
63544672 3707 err = ntfs_sb_write_run(sbi, oa->run1, vbo, buffer_le, bytes, 0);
b46acd6a
KK
3708 if (err)
3709 goto out;
3710 }
3711
3712out:
3713
3714 if (inode)
3715 iput(inode);
3716 else if (mi != mi2_child)
3717 mi_put(mi);
3718
195c52bd 3719 kfree(buffer_le);
b46acd6a
KK
3720
3721 return err;
3722
3723dirty_vol:
3724 log->set_dirty = true;
3725 goto out;
3726}
3727
3728/*
e8b8e97f 3729 * log_replay - Replays log and empties it.
b46acd6a 3730 *
e8b8e97f
KA
3731 * This function is called during mount operation.
3732 * It replays log and empties it.
3733 * Initialized is set false if logfile contains '-1'.
b46acd6a
KK
3734 */
3735int log_replay(struct ntfs_inode *ni, bool *initialized)
3736{
3737 int err;
3738 struct ntfs_sb_info *sbi = ni->mi.sbi;
3739 struct ntfs_log *log;
3740
3741 struct restart_info rst_info, rst_info2;
3742 u64 rec_lsn, ra_lsn, checkpt_lsn = 0, rlsn = 0;
3743 struct ATTR_NAME_ENTRY *attr_names = NULL;
3744 struct ATTR_NAME_ENTRY *ane;
3745 struct RESTART_TABLE *dptbl = NULL;
3746 struct RESTART_TABLE *trtbl = NULL;
3747 const struct RESTART_TABLE *rt;
3748 struct RESTART_TABLE *oatbl = NULL;
3749 struct inode *inode;
3750 struct OpenAttr *oa;
3751 struct ntfs_inode *ni_oe;
3752 struct ATTRIB *attr = NULL;
3753 u64 size, vcn, undo_next_lsn;
3754 CLST rno, lcn, lcn0, len0, clen;
3755 void *data;
3756 struct NTFS_RESTART *rst = NULL;
3757 struct lcb *lcb = NULL;
3758 struct OPEN_ATTR_ENRTY *oe;
3759 struct TRANSACTION_ENTRY *tr;
3760 struct DIR_PAGE_ENTRY *dp;
3761 u32 i, bytes_per_attr_entry;
3762 u32 l_size = ni->vfs_inode.i_size;
3763 u32 orig_file_size = l_size;
3764 u32 page_size, vbo, tail, off, dlen;
3765 u32 saved_len, rec_len, transact_id;
3766 bool use_second_page;
3767 struct RESTART_AREA *ra2, *ra = NULL;
3768 struct CLIENT_REC *ca, *cr;
3769 __le16 client;
3770 struct RESTART_HDR *rh;
3771 const struct LFS_RECORD_HDR *frh;
3772 const struct LOG_REC_HDR *lrh;
3773 bool is_mapped;
3774 bool is_ro = sb_rdonly(sbi->sb);
3775 u64 t64;
3776 u16 t16;
3777 u32 t32;
3778
e8b8e97f 3779 /* Get the size of page. NOTE: To replay we can use default page. */
b46acd6a
KK
3780#if PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2
3781 page_size = norm_file_page(PAGE_SIZE, &l_size, true);
3782#else
3783 page_size = norm_file_page(PAGE_SIZE, &l_size, false);
3784#endif
3785 if (!page_size)
3786 return -EINVAL;
3787
195c52bd 3788 log = kzalloc(sizeof(struct ntfs_log), GFP_NOFS);
b46acd6a
KK
3789 if (!log)
3790 return -ENOMEM;
3791
f26967b9
NJ
3792 memset(&rst_info, 0, sizeof(struct restart_info));
3793
b46acd6a
KK
3794 log->ni = ni;
3795 log->l_size = l_size;
195c52bd 3796 log->one_page_buf = kmalloc(page_size, GFP_NOFS);
b46acd6a
KK
3797 if (!log->one_page_buf) {
3798 err = -ENOMEM;
3799 goto out;
3800 }
3801
3802 log->page_size = page_size;
3803 log->page_mask = page_size - 1;
3804 log->page_bits = blksize_bits(page_size);
3805
e8b8e97f 3806 /* Look for a restart area on the disk. */
b46acd6a
KK
3807 err = log_read_rst(log, l_size, true, &rst_info);
3808 if (err)
3809 goto out;
3810
3811 /* remember 'initialized' */
3812 *initialized = rst_info.initialized;
3813
3814 if (!rst_info.restart) {
3815 if (rst_info.initialized) {
e8b8e97f 3816 /* No restart area but the file is not initialized. */
b46acd6a
KK
3817 err = -EINVAL;
3818 goto out;
3819 }
3820
3821 log_init_pg_hdr(log, page_size, page_size, 1, 1);
3822 log_create(log, l_size, 0, get_random_int(), false, false);
3823
3824 log->ra = ra;
3825
3826 ra = log_create_ra(log);
3827 if (!ra) {
3828 err = -ENOMEM;
3829 goto out;
3830 }
3831 log->ra = ra;
3832 log->init_ra = true;
3833
3834 goto process_log;
3835 }
3836
3837 /*
3838 * If the restart offset above wasn't zero then we won't
e8b8e97f 3839 * look for a second restart.
b46acd6a
KK
3840 */
3841 if (rst_info.vbo)
3842 goto check_restart_area;
3843
f26967b9 3844 memset(&rst_info2, 0, sizeof(struct restart_info));
b46acd6a
KK
3845 err = log_read_rst(log, l_size, false, &rst_info2);
3846
e8b8e97f 3847 /* Determine which restart area to use. */
b46acd6a
KK
3848 if (!rst_info2.restart || rst_info2.last_lsn <= rst_info.last_lsn)
3849 goto use_first_page;
3850
3851 use_second_page = true;
3852
3853 if (rst_info.chkdsk_was_run && page_size != rst_info.vbo) {
3854 struct RECORD_PAGE_HDR *sp = NULL;
3855 bool usa_error;
3856
3857 if (!read_log_page(log, page_size, &sp, &usa_error) &&
3858 sp->rhdr.sign == NTFS_CHKD_SIGNATURE) {
3859 use_second_page = false;
3860 }
195c52bd 3861 kfree(sp);
b46acd6a
KK
3862 }
3863
3864 if (use_second_page) {
195c52bd 3865 kfree(rst_info.r_page);
b46acd6a
KK
3866 memcpy(&rst_info, &rst_info2, sizeof(struct restart_info));
3867 rst_info2.r_page = NULL;
3868 }
3869
3870use_first_page:
195c52bd 3871 kfree(rst_info2.r_page);
b46acd6a
KK
3872
3873check_restart_area:
e8b8e97f
KA
3874 /*
3875 * If the restart area is at offset 0, we want
3876 * to write the second restart area first.
3877 */
b46acd6a
KK
3878 log->init_ra = !!rst_info.vbo;
3879
e8b8e97f 3880 /* If we have a valid page then grab a pointer to the restart area. */
b46acd6a
KK
3881 ra2 = rst_info.valid_page
3882 ? Add2Ptr(rst_info.r_page,
3883 le16_to_cpu(rst_info.r_page->ra_off))
3884 : NULL;
3885
3886 if (rst_info.chkdsk_was_run ||
3887 (ra2 && ra2->client_idx[1] == LFS_NO_CLIENT_LE)) {
3888 bool wrapped = false;
3889 bool use_multi_page = false;
3890 u32 open_log_count;
3891
e8b8e97f 3892 /* Do some checks based on whether we have a valid log page. */
b46acd6a
KK
3893 if (!rst_info.valid_page) {
3894 open_log_count = get_random_int();
3895 goto init_log_instance;
3896 }
3897 open_log_count = le32_to_cpu(ra2->open_log_count);
3898
3899 /*
3900 * If the restart page size isn't changing then we want to
e8b8e97f 3901 * check how much work we need to do.
b46acd6a
KK
3902 */
3903 if (page_size != le32_to_cpu(rst_info.r_page->sys_page_size))
3904 goto init_log_instance;
3905
3906init_log_instance:
3907 log_init_pg_hdr(log, page_size, page_size, 1, 1);
3908
3909 log_create(log, l_size, rst_info.last_lsn, open_log_count,
3910 wrapped, use_multi_page);
3911
3912 ra = log_create_ra(log);
3913 if (!ra) {
3914 err = -ENOMEM;
3915 goto out;
3916 }
3917 log->ra = ra;
3918
e8b8e97f
KA
3919 /* Put the restart areas and initialize
3920 * the log file as required.
3921 */
b46acd6a
KK
3922 goto process_log;
3923 }
3924
3925 if (!ra2) {
3926 err = -EINVAL;
3927 goto out;
3928 }
3929
3930 /*
e8b8e97f
KA
3931 * If the log page or the system page sizes have changed, we can't
3932 * use the log file. We must use the system page size instead of the
3933 * default size if there is not a clean shutdown.
b46acd6a
KK
3934 */
3935 t32 = le32_to_cpu(rst_info.r_page->sys_page_size);
3936 if (page_size != t32) {
3937 l_size = orig_file_size;
3938 page_size =
3939 norm_file_page(t32, &l_size, t32 == DefaultLogPageSize);
3940 }
3941
3942 if (page_size != t32 ||
3943 page_size != le32_to_cpu(rst_info.r_page->page_size)) {
3944 err = -EINVAL;
3945 goto out;
3946 }
3947
e8b8e97f 3948 /* If the file size has shrunk then we won't mount it. */
b46acd6a
KK
3949 if (l_size < le64_to_cpu(ra2->l_size)) {
3950 err = -EINVAL;
3951 goto out;
3952 }
3953
3954 log_init_pg_hdr(log, page_size, page_size,
3955 le16_to_cpu(rst_info.r_page->major_ver),
3956 le16_to_cpu(rst_info.r_page->minor_ver));
3957
3958 log->l_size = le64_to_cpu(ra2->l_size);
3959 log->seq_num_bits = le32_to_cpu(ra2->seq_num_bits);
3960 log->file_data_bits = sizeof(u64) * 8 - log->seq_num_bits;
3961 log->seq_num_mask = (8 << log->file_data_bits) - 1;
3962 log->last_lsn = le64_to_cpu(ra2->current_lsn);
3963 log->seq_num = log->last_lsn >> log->file_data_bits;
3964 log->ra_off = le16_to_cpu(rst_info.r_page->ra_off);
3965 log->restart_size = log->sys_page_size - log->ra_off;
3966 log->record_header_len = le16_to_cpu(ra2->rec_hdr_len);
3967 log->ra_size = le16_to_cpu(ra2->ra_len);
3968 log->data_off = le16_to_cpu(ra2->data_off);
3969 log->data_size = log->page_size - log->data_off;
3970 log->reserved = log->data_size - log->record_header_len;
3971
3972 vbo = lsn_to_vbo(log, log->last_lsn);
3973
3974 if (vbo < log->first_page) {
e8b8e97f 3975 /* This is a pseudo lsn. */
b46acd6a
KK
3976 log->l_flags |= NTFSLOG_NO_LAST_LSN;
3977 log->next_page = log->first_page;
3978 goto find_oldest;
3979 }
3980
e8b8e97f 3981 /* Find the end of this log record. */
b46acd6a
KK
3982 off = final_log_off(log, log->last_lsn,
3983 le32_to_cpu(ra2->last_lsn_data_len));
3984
e8b8e97f 3985 /* If we wrapped the file then increment the sequence number. */
b46acd6a
KK
3986 if (off <= vbo) {
3987 log->seq_num += 1;
3988 log->l_flags |= NTFSLOG_WRAPPED;
3989 }
3990
e8b8e97f 3991 /* Now compute the next log page to use. */
b46acd6a
KK
3992 vbo &= ~log->sys_page_mask;
3993 tail = log->page_size - (off & log->page_mask) - 1;
3994
e8b8e97f
KA
3995 /*
3996 *If we can fit another log record on the page,
3997 * move back a page the log file.
3998 */
b46acd6a
KK
3999 if (tail >= log->record_header_len) {
4000 log->l_flags |= NTFSLOG_REUSE_TAIL;
4001 log->next_page = vbo;
4002 } else {
4003 log->next_page = next_page_off(log, vbo);
4004 }
4005
4006find_oldest:
e8b8e97f
KA
4007 /*
4008 * Find the oldest client lsn. Use the last
4009 * flushed lsn as a starting point.
4010 */
b46acd6a
KK
4011 log->oldest_lsn = log->last_lsn;
4012 oldest_client_lsn(Add2Ptr(ra2, le16_to_cpu(ra2->client_off)),
4013 ra2->client_idx[1], &log->oldest_lsn);
4014 log->oldest_lsn_off = lsn_to_vbo(log, log->oldest_lsn);
4015
4016 if (log->oldest_lsn_off < log->first_page)
4017 log->l_flags |= NTFSLOG_NO_OLDEST_LSN;
4018
4019 if (!(ra2->flags & RESTART_SINGLE_PAGE_IO))
4020 log->l_flags |= NTFSLOG_WRAPPED | NTFSLOG_MULTIPLE_PAGE_IO;
4021
4022 log->current_openlog_count = le32_to_cpu(ra2->open_log_count);
4023 log->total_avail_pages = log->l_size - log->first_page;
4024 log->total_avail = log->total_avail_pages >> log->page_bits;
4025 log->max_current_avail = log->total_avail * log->reserved;
4026 log->total_avail = log->total_avail * log->data_size;
4027
4028 log->current_avail = current_log_avail(log);
4029
195c52bd 4030 ra = kzalloc(log->restart_size, GFP_NOFS);
b46acd6a
KK
4031 if (!ra) {
4032 err = -ENOMEM;
4033 goto out;
4034 }
4035 log->ra = ra;
4036
4037 t16 = le16_to_cpu(ra2->client_off);
4038 if (t16 == offsetof(struct RESTART_AREA, clients)) {
4039 memcpy(ra, ra2, log->ra_size);
4040 } else {
4041 memcpy(ra, ra2, offsetof(struct RESTART_AREA, clients));
4042 memcpy(ra->clients, Add2Ptr(ra2, t16),
4043 le16_to_cpu(ra2->ra_len) - t16);
4044
4045 log->current_openlog_count = get_random_int();
4046 ra->open_log_count = cpu_to_le32(log->current_openlog_count);
4047 log->ra_size = offsetof(struct RESTART_AREA, clients) +
4048 sizeof(struct CLIENT_REC);
4049 ra->client_off =
4050 cpu_to_le16(offsetof(struct RESTART_AREA, clients));
4051 ra->ra_len = cpu_to_le16(log->ra_size);
4052 }
4053
4054 le32_add_cpu(&ra->open_log_count, 1);
4055
e8b8e97f 4056 /* Now we need to walk through looking for the last lsn. */
b46acd6a
KK
4057 err = last_log_lsn(log);
4058 if (err)
4059 goto out;
4060
4061 log->current_avail = current_log_avail(log);
4062
e8b8e97f 4063 /* Remember which restart area to write first. */
b46acd6a
KK
4064 log->init_ra = rst_info.vbo;
4065
4066process_log:
e8b8e97f 4067 /* 1.0, 1.1, 2.0 log->major_ver/minor_ver - short values. */
b46acd6a
KK
4068 switch ((log->major_ver << 16) + log->minor_ver) {
4069 case 0x10000:
4070 case 0x10001:
4071 case 0x20000:
4072 break;
4073 default:
4074 ntfs_warn(sbi->sb, "\x24LogFile version %d.%d is not supported",
4075 log->major_ver, log->minor_ver);
4076 err = -EOPNOTSUPP;
4077 log->set_dirty = true;
4078 goto out;
4079 }
4080
e8b8e97f 4081 /* One client "NTFS" per logfile. */
b46acd6a
KK
4082 ca = Add2Ptr(ra, le16_to_cpu(ra->client_off));
4083
4084 for (client = ra->client_idx[1];; client = cr->next_client) {
4085 if (client == LFS_NO_CLIENT_LE) {
e8b8e97f 4086 /* Insert "NTFS" client LogFile. */
b46acd6a 4087 client = ra->client_idx[0];
e589f9b7
CJ
4088 if (client == LFS_NO_CLIENT_LE) {
4089 err = -EINVAL;
4090 goto out;
4091 }
b46acd6a
KK
4092
4093 t16 = le16_to_cpu(client);
4094 cr = ca + t16;
4095
4096 remove_client(ca, cr, &ra->client_idx[0]);
4097
4098 cr->restart_lsn = 0;
4099 cr->oldest_lsn = cpu_to_le64(log->oldest_lsn);
4100 cr->name_bytes = cpu_to_le32(8);
4101 cr->name[0] = cpu_to_le16('N');
4102 cr->name[1] = cpu_to_le16('T');
4103 cr->name[2] = cpu_to_le16('F');
4104 cr->name[3] = cpu_to_le16('S');
4105
4106 add_client(ca, t16, &ra->client_idx[1]);
4107 break;
4108 }
4109
4110 cr = ca + le16_to_cpu(client);
4111
4112 if (cpu_to_le32(8) == cr->name_bytes &&
4113 cpu_to_le16('N') == cr->name[0] &&
4114 cpu_to_le16('T') == cr->name[1] &&
4115 cpu_to_le16('F') == cr->name[2] &&
4116 cpu_to_le16('S') == cr->name[3])
4117 break;
4118 }
4119
e8b8e97f 4120 /* Update the client handle with the client block information. */
b46acd6a
KK
4121 log->client_id.seq_num = cr->seq_num;
4122 log->client_id.client_idx = client;
4123
4124 err = read_rst_area(log, &rst, &ra_lsn);
4125 if (err)
4126 goto out;
4127
4128 if (!rst)
4129 goto out;
4130
4131 bytes_per_attr_entry = !rst->major_ver ? 0x2C : 0x28;
4132
4133 checkpt_lsn = le64_to_cpu(rst->check_point_start);
4134 if (!checkpt_lsn)
4135 checkpt_lsn = ra_lsn;
4136
e8b8e97f 4137 /* Allocate and Read the Transaction Table. */
b46acd6a
KK
4138 if (!rst->transact_table_len)
4139 goto check_dirty_page_table;
4140
4141 t64 = le64_to_cpu(rst->transact_table_lsn);
4142 err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
4143 if (err)
4144 goto out;
4145
4146 lrh = lcb->log_rec;
4147 frh = lcb->lrh;
4148 rec_len = le32_to_cpu(frh->client_data_len);
4149
4150 if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
4151 bytes_per_attr_entry)) {
4152 err = -EINVAL;
4153 goto out;
4154 }
4155
4156 t16 = le16_to_cpu(lrh->redo_off);
4157
4158 rt = Add2Ptr(lrh, t16);
4159 t32 = rec_len - t16;
4160
e8b8e97f 4161 /* Now check that this is a valid restart table. */
b46acd6a
KK
4162 if (!check_rstbl(rt, t32)) {
4163 err = -EINVAL;
4164 goto out;
4165 }
4166
195c52bd 4167 trtbl = kmemdup(rt, t32, GFP_NOFS);
b46acd6a
KK
4168 if (!trtbl) {
4169 err = -ENOMEM;
4170 goto out;
4171 }
4172
4173 lcb_put(lcb);
4174 lcb = NULL;
4175
4176check_dirty_page_table:
e8b8e97f 4177 /* The next record back should be the Dirty Pages Table. */
b46acd6a
KK
4178 if (!rst->dirty_pages_len)
4179 goto check_attribute_names;
4180
4181 t64 = le64_to_cpu(rst->dirty_pages_table_lsn);
4182 err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
4183 if (err)
4184 goto out;
4185
4186 lrh = lcb->log_rec;
4187 frh = lcb->lrh;
4188 rec_len = le32_to_cpu(frh->client_data_len);
4189
4190 if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
4191 bytes_per_attr_entry)) {
4192 err = -EINVAL;
4193 goto out;
4194 }
4195
4196 t16 = le16_to_cpu(lrh->redo_off);
4197
4198 rt = Add2Ptr(lrh, t16);
4199 t32 = rec_len - t16;
4200
e8b8e97f 4201 /* Now check that this is a valid restart table. */
b46acd6a
KK
4202 if (!check_rstbl(rt, t32)) {
4203 err = -EINVAL;
4204 goto out;
4205 }
4206
195c52bd 4207 dptbl = kmemdup(rt, t32, GFP_NOFS);
b46acd6a
KK
4208 if (!dptbl) {
4209 err = -ENOMEM;
4210 goto out;
4211 }
4212
e8b8e97f 4213 /* Convert Ra version '0' into version '1'. */
b46acd6a
KK
4214 if (rst->major_ver)
4215 goto end_conv_1;
4216
4217 dp = NULL;
4218 while ((dp = enum_rstbl(dptbl, dp))) {
4219 struct DIR_PAGE_ENTRY_32 *dp0 = (struct DIR_PAGE_ENTRY_32 *)dp;
e8b8e97f 4220 // NOTE: Danger. Check for of boundary.
b46acd6a
KK
4221 memmove(&dp->vcn, &dp0->vcn_low,
4222 2 * sizeof(u64) +
4223 le32_to_cpu(dp->lcns_follow) * sizeof(u64));
4224 }
4225
4226end_conv_1:
4227 lcb_put(lcb);
4228 lcb = NULL;
4229
e8b8e97f
KA
4230 /*
4231 * Go through the table and remove the duplicates,
4232 * remembering the oldest lsn values.
4233 */
b46acd6a
KK
4234 if (sbi->cluster_size <= log->page_size)
4235 goto trace_dp_table;
4236
4237 dp = NULL;
4238 while ((dp = enum_rstbl(dptbl, dp))) {
4239 struct DIR_PAGE_ENTRY *next = dp;
4240
4241 while ((next = enum_rstbl(dptbl, next))) {
4242 if (next->target_attr == dp->target_attr &&
4243 next->vcn == dp->vcn) {
4244 if (le64_to_cpu(next->oldest_lsn) <
4245 le64_to_cpu(dp->oldest_lsn)) {
4246 dp->oldest_lsn = next->oldest_lsn;
4247 }
4248
4249 free_rsttbl_idx(dptbl, PtrOffset(dptbl, next));
4250 }
4251 }
4252 }
4253trace_dp_table:
4254check_attribute_names:
e8b8e97f 4255 /* The next record should be the Attribute Names. */
b46acd6a
KK
4256 if (!rst->attr_names_len)
4257 goto check_attr_table;
4258
4259 t64 = le64_to_cpu(rst->attr_names_lsn);
4260 err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
4261 if (err)
4262 goto out;
4263
4264 lrh = lcb->log_rec;
4265 frh = lcb->lrh;
4266 rec_len = le32_to_cpu(frh->client_data_len);
4267
4268 if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
4269 bytes_per_attr_entry)) {
4270 err = -EINVAL;
4271 goto out;
4272 }
4273
4274 t32 = lrh_length(lrh);
4275 rec_len -= t32;
4276
195c52bd 4277 attr_names = kmemdup(Add2Ptr(lrh, t32), rec_len, GFP_NOFS);
b46acd6a
KK
4278
4279 lcb_put(lcb);
4280 lcb = NULL;
4281
4282check_attr_table:
e8b8e97f 4283 /* The next record should be the attribute Table. */
b46acd6a
KK
4284 if (!rst->open_attr_len)
4285 goto check_attribute_names2;
4286
4287 t64 = le64_to_cpu(rst->open_attr_table_lsn);
4288 err = read_log_rec_lcb(log, t64, lcb_ctx_prev, &lcb);
4289 if (err)
4290 goto out;
4291
4292 lrh = lcb->log_rec;
4293 frh = lcb->lrh;
4294 rec_len = le32_to_cpu(frh->client_data_len);
4295
4296 if (!check_log_rec(lrh, rec_len, le32_to_cpu(frh->transact_id),
4297 bytes_per_attr_entry)) {
4298 err = -EINVAL;
4299 goto out;
4300 }
4301
4302 t16 = le16_to_cpu(lrh->redo_off);
4303
4304 rt = Add2Ptr(lrh, t16);
4305 t32 = rec_len - t16;
4306
4307 if (!check_rstbl(rt, t32)) {
4308 err = -EINVAL;
4309 goto out;
4310 }
4311
195c52bd 4312 oatbl = kmemdup(rt, t32, GFP_NOFS);
b46acd6a
KK
4313 if (!oatbl) {
4314 err = -ENOMEM;
4315 goto out;
4316 }
4317
4318 log->open_attr_tbl = oatbl;
4319
e8b8e97f 4320 /* Clear all of the Attr pointers. */
b46acd6a
KK
4321 oe = NULL;
4322 while ((oe = enum_rstbl(oatbl, oe))) {
4323 if (!rst->major_ver) {
4324 struct OPEN_ATTR_ENRTY_32 oe0;
4325
e8b8e97f 4326 /* Really 'oe' points to OPEN_ATTR_ENRTY_32. */
b46acd6a
KK
4327 memcpy(&oe0, oe, SIZEOF_OPENATTRIBUTEENTRY0);
4328
4329 oe->bytes_per_index = oe0.bytes_per_index;
4330 oe->type = oe0.type;
4331 oe->is_dirty_pages = oe0.is_dirty_pages;
4332 oe->name_len = 0;
4333 oe->ref = oe0.ref;
4334 oe->open_record_lsn = oe0.open_record_lsn;
4335 }
4336
4337 oe->is_attr_name = 0;
4338 oe->ptr = NULL;
4339 }
4340
4341 lcb_put(lcb);
4342 lcb = NULL;
4343
4344check_attribute_names2:
4345 if (!rst->attr_names_len)
4346 goto trace_attribute_table;
4347
4348 ane = attr_names;
4349 if (!oatbl)
4350 goto trace_attribute_table;
4351 while (ane->off) {
4352 /* TODO: Clear table on exit! */
4353 oe = Add2Ptr(oatbl, le16_to_cpu(ane->off));
4354 t16 = le16_to_cpu(ane->name_bytes);
4355 oe->name_len = t16 / sizeof(short);
4356 oe->ptr = ane->name;
4357 oe->is_attr_name = 2;
4358 ane = Add2Ptr(ane, sizeof(struct ATTR_NAME_ENTRY) + t16);
4359 }
4360
4361trace_attribute_table:
4362 /*
4363 * If the checkpt_lsn is zero, then this is a freshly
e8b8e97f 4364 * formatted disk and we have no work to do.
b46acd6a
KK
4365 */
4366 if (!checkpt_lsn) {
4367 err = 0;
4368 goto out;
4369 }
4370
4371 if (!oatbl) {
4372 oatbl = init_rsttbl(bytes_per_attr_entry, 8);
4373 if (!oatbl) {
4374 err = -ENOMEM;
4375 goto out;
4376 }
4377 }
4378
4379 log->open_attr_tbl = oatbl;
4380
4381 /* Start the analysis pass from the Checkpoint lsn. */
4382 rec_lsn = checkpt_lsn;
4383
e8b8e97f 4384 /* Read the first lsn. */
b46acd6a
KK
4385 err = read_log_rec_lcb(log, checkpt_lsn, lcb_ctx_next, &lcb);
4386 if (err)
4387 goto out;
4388
e8b8e97f 4389 /* Loop to read all subsequent records to the end of the log file. */
b46acd6a
KK
4390next_log_record_analyze:
4391 err = read_next_log_rec(log, lcb, &rec_lsn);
4392 if (err)
4393 goto out;
4394
4395 if (!rec_lsn)
4396 goto end_log_records_enumerate;
4397
4398 frh = lcb->lrh;
4399 transact_id = le32_to_cpu(frh->transact_id);
4400 rec_len = le32_to_cpu(frh->client_data_len);
4401 lrh = lcb->log_rec;
4402
4403 if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
4404 err = -EINVAL;
4405 goto out;
4406 }
4407
4408 /*
4409 * The first lsn after the previous lsn remembered
e8b8e97f 4410 * the checkpoint is the first candidate for the rlsn.
b46acd6a
KK
4411 */
4412 if (!rlsn)
4413 rlsn = rec_lsn;
4414
4415 if (LfsClientRecord != frh->record_type)
4416 goto next_log_record_analyze;
4417
4418 /*
e8b8e97f
KA
4419 * Now update the Transaction Table for this transaction. If there
4420 * is no entry present or it is unallocated we allocate the entry.
b46acd6a
KK
4421 */
4422 if (!trtbl) {
4423 trtbl = init_rsttbl(sizeof(struct TRANSACTION_ENTRY),
4424 INITIAL_NUMBER_TRANSACTIONS);
4425 if (!trtbl) {
4426 err = -ENOMEM;
4427 goto out;
4428 }
4429 }
4430
4431 tr = Add2Ptr(trtbl, transact_id);
4432
4433 if (transact_id >= bytes_per_rt(trtbl) ||
4434 tr->next != RESTART_ENTRY_ALLOCATED_LE) {
4435 tr = alloc_rsttbl_from_idx(&trtbl, transact_id);
4436 if (!tr) {
4437 err = -ENOMEM;
4438 goto out;
4439 }
4440 tr->transact_state = TransactionActive;
4441 tr->first_lsn = cpu_to_le64(rec_lsn);
4442 }
4443
4444 tr->prev_lsn = tr->undo_next_lsn = cpu_to_le64(rec_lsn);
4445
4446 /*
4447 * If this is a compensation log record, then change
e8b8e97f 4448 * the undo_next_lsn to be the undo_next_lsn of this record.
b46acd6a
KK
4449 */
4450 if (lrh->undo_op == cpu_to_le16(CompensationLogRecord))
4451 tr->undo_next_lsn = frh->client_undo_next_lsn;
4452
e8b8e97f 4453 /* Dispatch to handle log record depending on type. */
b46acd6a
KK
4454 switch (le16_to_cpu(lrh->redo_op)) {
4455 case InitializeFileRecordSegment:
4456 case DeallocateFileRecordSegment:
4457 case WriteEndOfFileRecordSegment:
4458 case CreateAttribute:
4459 case DeleteAttribute:
4460 case UpdateResidentValue:
4461 case UpdateNonresidentValue:
4462 case UpdateMappingPairs:
4463 case SetNewAttributeSizes:
4464 case AddIndexEntryRoot:
4465 case DeleteIndexEntryRoot:
4466 case AddIndexEntryAllocation:
4467 case DeleteIndexEntryAllocation:
4468 case WriteEndOfIndexBuffer:
4469 case SetIndexEntryVcnRoot:
4470 case SetIndexEntryVcnAllocation:
4471 case UpdateFileNameRoot:
4472 case UpdateFileNameAllocation:
4473 case SetBitsInNonresidentBitMap:
4474 case ClearBitsInNonresidentBitMap:
4475 case UpdateRecordDataRoot:
4476 case UpdateRecordDataAllocation:
4477 case ZeroEndOfFileRecord:
4478 t16 = le16_to_cpu(lrh->target_attr);
4479 t64 = le64_to_cpu(lrh->target_vcn);
4480 dp = find_dp(dptbl, t16, t64);
4481
4482 if (dp)
4483 goto copy_lcns;
4484
4485 /*
4486 * Calculate the number of clusters per page the system
e8b8e97f 4487 * which wrote the checkpoint, possibly creating the table.
b46acd6a
KK
4488 */
4489 if (dptbl) {
4490 t32 = (le16_to_cpu(dptbl->size) -
4491 sizeof(struct DIR_PAGE_ENTRY)) /
4492 sizeof(u64);
4493 } else {
4494 t32 = log->clst_per_page;
195c52bd 4495 kfree(dptbl);
b46acd6a
KK
4496 dptbl = init_rsttbl(struct_size(dp, page_lcns, t32),
4497 32);
4498 if (!dptbl) {
4499 err = -ENOMEM;
4500 goto out;
4501 }
4502 }
4503
4504 dp = alloc_rsttbl_idx(&dptbl);
a1b04d38
DC
4505 if (!dp) {
4506 err = -ENOMEM;
4507 goto out;
4508 }
b46acd6a
KK
4509 dp->target_attr = cpu_to_le32(t16);
4510 dp->transfer_len = cpu_to_le32(t32 << sbi->cluster_bits);
4511 dp->lcns_follow = cpu_to_le32(t32);
4512 dp->vcn = cpu_to_le64(t64 & ~((u64)t32 - 1));
4513 dp->oldest_lsn = cpu_to_le64(rec_lsn);
4514
4515copy_lcns:
4516 /*
e8b8e97f
KA
4517 * Copy the Lcns from the log record into the Dirty Page Entry.
4518 * TODO: For different page size support, must somehow make
4519 * whole routine a loop, case Lcns do not fit below.
b46acd6a
KK
4520 */
4521 t16 = le16_to_cpu(lrh->lcns_follow);
4522 for (i = 0; i < t16; i++) {
4523 size_t j = (size_t)(le64_to_cpu(lrh->target_vcn) -
4524 le64_to_cpu(dp->vcn));
4525 dp->page_lcns[j + i] = lrh->page_lcns[i];
4526 }
4527
4528 goto next_log_record_analyze;
4529
4530 case DeleteDirtyClusters: {
4531 u32 range_count =
4532 le16_to_cpu(lrh->redo_len) / sizeof(struct LCN_RANGE);
4533 const struct LCN_RANGE *r =
4534 Add2Ptr(lrh, le16_to_cpu(lrh->redo_off));
4535
e8b8e97f 4536 /* Loop through all of the Lcn ranges this log record. */
b46acd6a
KK
4537 for (i = 0; i < range_count; i++, r++) {
4538 u64 lcn0 = le64_to_cpu(r->lcn);
4539 u64 lcn_e = lcn0 + le64_to_cpu(r->len) - 1;
4540
4541 dp = NULL;
4542 while ((dp = enum_rstbl(dptbl, dp))) {
4543 u32 j;
4544
4545 t32 = le32_to_cpu(dp->lcns_follow);
4546 for (j = 0; j < t32; j++) {
4547 t64 = le64_to_cpu(dp->page_lcns[j]);
4548 if (t64 >= lcn0 && t64 <= lcn_e)
4549 dp->page_lcns[j] = 0;
4550 }
4551 }
4552 }
4553 goto next_log_record_analyze;
4554 ;
4555 }
4556
4557 case OpenNonresidentAttribute:
4558 t16 = le16_to_cpu(lrh->target_attr);
4559 if (t16 >= bytes_per_rt(oatbl)) {
4560 /*
4561 * Compute how big the table needs to be.
e8b8e97f 4562 * Add 10 extra entries for some cushion.
b46acd6a
KK
4563 */
4564 u32 new_e = t16 / le16_to_cpu(oatbl->size);
4565
4566 new_e += 10 - le16_to_cpu(oatbl->used);
4567
4568 oatbl = extend_rsttbl(oatbl, new_e, ~0u);
4569 log->open_attr_tbl = oatbl;
4570 if (!oatbl) {
4571 err = -ENOMEM;
4572 goto out;
4573 }
4574 }
4575
e8b8e97f 4576 /* Point to the entry being opened. */
b46acd6a
KK
4577 oe = alloc_rsttbl_from_idx(&oatbl, t16);
4578 log->open_attr_tbl = oatbl;
4579 if (!oe) {
4580 err = -ENOMEM;
4581 goto out;
4582 }
4583
e8b8e97f 4584 /* Initialize this entry from the log record. */
b46acd6a
KK
4585 t16 = le16_to_cpu(lrh->redo_off);
4586 if (!rst->major_ver) {
e8b8e97f 4587 /* Convert version '0' into version '1'. */
b46acd6a
KK
4588 struct OPEN_ATTR_ENRTY_32 *oe0 = Add2Ptr(lrh, t16);
4589
4590 oe->bytes_per_index = oe0->bytes_per_index;
4591 oe->type = oe0->type;
4592 oe->is_dirty_pages = oe0->is_dirty_pages;
4593 oe->name_len = 0; //oe0.name_len;
4594 oe->ref = oe0->ref;
4595 oe->open_record_lsn = oe0->open_record_lsn;
4596 } else {
4597 memcpy(oe, Add2Ptr(lrh, t16), bytes_per_attr_entry);
4598 }
4599
4600 t16 = le16_to_cpu(lrh->undo_len);
4601 if (t16) {
195c52bd 4602 oe->ptr = kmalloc(t16, GFP_NOFS);
b46acd6a
KK
4603 if (!oe->ptr) {
4604 err = -ENOMEM;
4605 goto out;
4606 }
4607 oe->name_len = t16 / sizeof(short);
4608 memcpy(oe->ptr,
4609 Add2Ptr(lrh, le16_to_cpu(lrh->undo_off)), t16);
4610 oe->is_attr_name = 1;
4611 } else {
4612 oe->ptr = NULL;
4613 oe->is_attr_name = 0;
4614 }
4615
4616 goto next_log_record_analyze;
4617
4618 case HotFix:
4619 t16 = le16_to_cpu(lrh->target_attr);
4620 t64 = le64_to_cpu(lrh->target_vcn);
4621 dp = find_dp(dptbl, t16, t64);
4622 if (dp) {
4623 size_t j = le64_to_cpu(lrh->target_vcn) -
4624 le64_to_cpu(dp->vcn);
4625 if (dp->page_lcns[j])
4626 dp->page_lcns[j] = lrh->page_lcns[0];
4627 }
4628 goto next_log_record_analyze;
4629
4630 case EndTopLevelAction:
4631 tr = Add2Ptr(trtbl, transact_id);
4632 tr->prev_lsn = cpu_to_le64(rec_lsn);
4633 tr->undo_next_lsn = frh->client_undo_next_lsn;
4634 goto next_log_record_analyze;
4635
4636 case PrepareTransaction:
4637 tr = Add2Ptr(trtbl, transact_id);
4638 tr->transact_state = TransactionPrepared;
4639 goto next_log_record_analyze;
4640
4641 case CommitTransaction:
4642 tr = Add2Ptr(trtbl, transact_id);
4643 tr->transact_state = TransactionCommitted;
4644 goto next_log_record_analyze;
4645
4646 case ForgetTransaction:
4647 free_rsttbl_idx(trtbl, transact_id);
4648 goto next_log_record_analyze;
4649
4650 case Noop:
4651 case OpenAttributeTableDump:
4652 case AttributeNamesDump:
4653 case DirtyPageTableDump:
4654 case TransactionTableDump:
e8b8e97f 4655 /* The following cases require no action the Analysis Pass. */
b46acd6a
KK
4656 goto next_log_record_analyze;
4657
4658 default:
4659 /*
4660 * All codes will be explicitly handled.
e8b8e97f 4661 * If we see a code we do not expect, then we are trouble.
b46acd6a
KK
4662 */
4663 goto next_log_record_analyze;
4664 }
4665
4666end_log_records_enumerate:
4667 lcb_put(lcb);
4668 lcb = NULL;
4669
4670 /*
4671 * Scan the Dirty Page Table and Transaction Table for
e8b8e97f 4672 * the lowest lsn, and return it as the Redo lsn.
b46acd6a
KK
4673 */
4674 dp = NULL;
4675 while ((dp = enum_rstbl(dptbl, dp))) {
4676 t64 = le64_to_cpu(dp->oldest_lsn);
4677 if (t64 && t64 < rlsn)
4678 rlsn = t64;
4679 }
4680
4681 tr = NULL;
4682 while ((tr = enum_rstbl(trtbl, tr))) {
4683 t64 = le64_to_cpu(tr->first_lsn);
4684 if (t64 && t64 < rlsn)
4685 rlsn = t64;
4686 }
4687
e8b8e97f
KA
4688 /*
4689 * Only proceed if the Dirty Page Table or Transaction
4690 * table are not empty.
4691 */
b46acd6a
KK
4692 if ((!dptbl || !dptbl->total) && (!trtbl || !trtbl->total))
4693 goto end_reply;
4694
4695 sbi->flags |= NTFS_FLAGS_NEED_REPLAY;
4696 if (is_ro)
4697 goto out;
4698
e8b8e97f 4699 /* Reopen all of the attributes with dirty pages. */
b46acd6a
KK
4700 oe = NULL;
4701next_open_attribute:
4702
4703 oe = enum_rstbl(oatbl, oe);
4704 if (!oe) {
4705 err = 0;
4706 dp = NULL;
4707 goto next_dirty_page;
4708 }
4709
195c52bd 4710 oa = kzalloc(sizeof(struct OpenAttr), GFP_NOFS);
b46acd6a
KK
4711 if (!oa) {
4712 err = -ENOMEM;
4713 goto out;
4714 }
4715
4716 inode = ntfs_iget5(sbi->sb, &oe->ref, NULL);
4717 if (IS_ERR(inode))
4718 goto fake_attr;
4719
4720 if (is_bad_inode(inode)) {
4721 iput(inode);
4722fake_attr:
4723 if (oa->ni) {
4724 iput(&oa->ni->vfs_inode);
4725 oa->ni = NULL;
4726 }
4727
4728 attr = attr_create_nonres_log(sbi, oe->type, 0, oe->ptr,
4729 oe->name_len, 0);
4730 if (!attr) {
195c52bd 4731 kfree(oa);
b46acd6a
KK
4732 err = -ENOMEM;
4733 goto out;
4734 }
4735 oa->attr = attr;
4736 oa->run1 = &oa->run0;
4737 goto final_oe;
4738 }
4739
4740 ni_oe = ntfs_i(inode);
4741 oa->ni = ni_oe;
4742
4743 attr = ni_find_attr(ni_oe, NULL, NULL, oe->type, oe->ptr, oe->name_len,
4744 NULL, NULL);
4745
4746 if (!attr)
4747 goto fake_attr;
4748
4749 t32 = le32_to_cpu(attr->size);
195c52bd 4750 oa->attr = kmemdup(attr, t32, GFP_NOFS);
b46acd6a
KK
4751 if (!oa->attr)
4752 goto fake_attr;
4753
4754 if (!S_ISDIR(inode->i_mode)) {
4755 if (attr->type == ATTR_DATA && !attr->name_len) {
4756 oa->run1 = &ni_oe->file.run;
4757 goto final_oe;
4758 }
4759 } else {
4760 if (attr->type == ATTR_ALLOC &&
4761 attr->name_len == ARRAY_SIZE(I30_NAME) &&
4762 !memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME))) {
4763 oa->run1 = &ni_oe->dir.alloc_run;
4764 goto final_oe;
4765 }
4766 }
4767
4768 if (attr->non_res) {
4769 u16 roff = le16_to_cpu(attr->nres.run_off);
4770 CLST svcn = le64_to_cpu(attr->nres.svcn);
4771
4772 err = run_unpack(&oa->run0, sbi, inode->i_ino, svcn,
4773 le64_to_cpu(attr->nres.evcn), svcn,
4774 Add2Ptr(attr, roff), t32 - roff);
4775 if (err < 0) {
195c52bd 4776 kfree(oa->attr);
b46acd6a
KK
4777 oa->attr = NULL;
4778 goto fake_attr;
4779 }
4780 err = 0;
4781 }
4782 oa->run1 = &oa->run0;
4783 attr = oa->attr;
4784
4785final_oe:
4786 if (oe->is_attr_name == 1)
195c52bd 4787 kfree(oe->ptr);
b46acd6a
KK
4788 oe->is_attr_name = 0;
4789 oe->ptr = oa;
4790 oe->name_len = attr->name_len;
4791
4792 goto next_open_attribute;
4793
4794 /*
e8b8e97f
KA
4795 * Now loop through the dirty page table to extract all of the Vcn/Lcn.
4796 * Mapping that we have, and insert it into the appropriate run.
b46acd6a
KK
4797 */
4798next_dirty_page:
4799 dp = enum_rstbl(dptbl, dp);
4800 if (!dp)
4801 goto do_redo_1;
4802
4803 oe = Add2Ptr(oatbl, le32_to_cpu(dp->target_attr));
4804
4805 if (oe->next != RESTART_ENTRY_ALLOCATED_LE)
4806 goto next_dirty_page;
4807
4808 oa = oe->ptr;
4809 if (!oa)
4810 goto next_dirty_page;
4811
4812 i = -1;
4813next_dirty_page_vcn:
4814 i += 1;
4815 if (i >= le32_to_cpu(dp->lcns_follow))
4816 goto next_dirty_page;
4817
4818 vcn = le64_to_cpu(dp->vcn) + i;
4819 size = (vcn + 1) << sbi->cluster_bits;
4820
4821 if (!dp->page_lcns[i])
4822 goto next_dirty_page_vcn;
4823
4824 rno = ino_get(&oe->ref);
4825 if (rno <= MFT_REC_MIRR &&
4826 size < (MFT_REC_VOL + 1) * sbi->record_size &&
4827 oe->type == ATTR_DATA) {
4828 goto next_dirty_page_vcn;
4829 }
4830
4831 lcn = le64_to_cpu(dp->page_lcns[i]);
4832
4833 if ((!run_lookup_entry(oa->run1, vcn, &lcn0, &len0, NULL) ||
4834 lcn0 != lcn) &&
4835 !run_add_entry(oa->run1, vcn, lcn, 1, false)) {
4836 err = -ENOMEM;
4837 goto out;
4838 }
4839 attr = oa->attr;
4840 t64 = le64_to_cpu(attr->nres.alloc_size);
4841 if (size > t64) {
4842 attr->nres.valid_size = attr->nres.data_size =
4843 attr->nres.alloc_size = cpu_to_le64(size);
4844 }
4845 goto next_dirty_page_vcn;
4846
4847do_redo_1:
4848 /*
4849 * Perform the Redo Pass, to restore all of the dirty pages to the same
e8b8e97f
KA
4850 * contents that they had immediately before the crash. If the dirty
4851 * page table is empty, then we can skip the entire Redo Pass.
b46acd6a
KK
4852 */
4853 if (!dptbl || !dptbl->total)
4854 goto do_undo_action;
4855
4856 rec_lsn = rlsn;
4857
4858 /*
4859 * Read the record at the Redo lsn, before falling
e8b8e97f 4860 * into common code to handle each record.
b46acd6a
KK
4861 */
4862 err = read_log_rec_lcb(log, rlsn, lcb_ctx_next, &lcb);
4863 if (err)
4864 goto out;
4865
4866 /*
e8b8e97f
KA
4867 * Now loop to read all of our log records forwards, until
4868 * we hit the end of the file, cleaning up at the end.
b46acd6a
KK
4869 */
4870do_action_next:
4871 frh = lcb->lrh;
4872
4873 if (LfsClientRecord != frh->record_type)
4874 goto read_next_log_do_action;
4875
4876 transact_id = le32_to_cpu(frh->transact_id);
4877 rec_len = le32_to_cpu(frh->client_data_len);
4878 lrh = lcb->log_rec;
4879
4880 if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
4881 err = -EINVAL;
4882 goto out;
4883 }
4884
e8b8e97f 4885 /* Ignore log records that do not update pages. */
b46acd6a
KK
4886 if (lrh->lcns_follow)
4887 goto find_dirty_page;
4888
4889 goto read_next_log_do_action;
4890
4891find_dirty_page:
4892 t16 = le16_to_cpu(lrh->target_attr);
4893 t64 = le64_to_cpu(lrh->target_vcn);
4894 dp = find_dp(dptbl, t16, t64);
4895
4896 if (!dp)
4897 goto read_next_log_do_action;
4898
4899 if (rec_lsn < le64_to_cpu(dp->oldest_lsn))
4900 goto read_next_log_do_action;
4901
4902 t16 = le16_to_cpu(lrh->target_attr);
4903 if (t16 >= bytes_per_rt(oatbl)) {
4904 err = -EINVAL;
4905 goto out;
4906 }
4907
4908 oe = Add2Ptr(oatbl, t16);
4909
4910 if (oe->next != RESTART_ENTRY_ALLOCATED_LE) {
4911 err = -EINVAL;
4912 goto out;
4913 }
4914
4915 oa = oe->ptr;
4916
4917 if (!oa) {
4918 err = -EINVAL;
4919 goto out;
4920 }
4921 attr = oa->attr;
4922
4923 vcn = le64_to_cpu(lrh->target_vcn);
4924
4925 if (!run_lookup_entry(oa->run1, vcn, &lcn, NULL, NULL) ||
4926 lcn == SPARSE_LCN) {
4927 goto read_next_log_do_action;
4928 }
4929
e8b8e97f 4930 /* Point to the Redo data and get its length. */
b46acd6a
KK
4931 data = Add2Ptr(lrh, le16_to_cpu(lrh->redo_off));
4932 dlen = le16_to_cpu(lrh->redo_len);
4933
e8b8e97f 4934 /* Shorten length by any Lcns which were deleted. */
b46acd6a
KK
4935 saved_len = dlen;
4936
4937 for (i = le16_to_cpu(lrh->lcns_follow); i; i--) {
4938 size_t j;
4939 u32 alen, voff;
4940
4941 voff = le16_to_cpu(lrh->record_off) +
4942 le16_to_cpu(lrh->attr_off);
4943 voff += le16_to_cpu(lrh->cluster_off) << SECTOR_SHIFT;
4944
e8b8e97f 4945 /* If the Vcn question is allocated, we can just get out. */
b46acd6a
KK
4946 j = le64_to_cpu(lrh->target_vcn) - le64_to_cpu(dp->vcn);
4947 if (dp->page_lcns[j + i - 1])
4948 break;
4949
4950 if (!saved_len)
4951 saved_len = 1;
4952
4953 /*
4954 * Calculate the allocated space left relative to the
e8b8e97f 4955 * log record Vcn, after removing this unallocated Vcn.
b46acd6a
KK
4956 */
4957 alen = (i - 1) << sbi->cluster_bits;
4958
4959 /*
4960 * If the update described this log record goes beyond
e8b8e97f 4961 * the allocated space, then we will have to reduce the length.
b46acd6a
KK
4962 */
4963 if (voff >= alen)
4964 dlen = 0;
4965 else if (voff + dlen > alen)
4966 dlen = alen - voff;
4967 }
4968
e8b8e97f
KA
4969 /*
4970 * If the resulting dlen from above is now zero,
4971 * we can skip this log record.
4972 */
b46acd6a
KK
4973 if (!dlen && saved_len)
4974 goto read_next_log_do_action;
4975
4976 t16 = le16_to_cpu(lrh->redo_op);
4977 if (can_skip_action(t16))
4978 goto read_next_log_do_action;
4979
e8b8e97f 4980 /* Apply the Redo operation a common routine. */
b46acd6a
KK
4981 err = do_action(log, oe, lrh, t16, data, dlen, rec_len, &rec_lsn);
4982 if (err)
4983 goto out;
4984
e8b8e97f 4985 /* Keep reading and looping back until end of file. */
b46acd6a
KK
4986read_next_log_do_action:
4987 err = read_next_log_rec(log, lcb, &rec_lsn);
4988 if (!err && rec_lsn)
4989 goto do_action_next;
4990
4991 lcb_put(lcb);
4992 lcb = NULL;
4993
4994do_undo_action:
e8b8e97f 4995 /* Scan Transaction Table. */
b46acd6a
KK
4996 tr = NULL;
4997transaction_table_next:
4998 tr = enum_rstbl(trtbl, tr);
4999 if (!tr)
5000 goto undo_action_done;
5001
5002 if (TransactionActive != tr->transact_state || !tr->undo_next_lsn) {
5003 free_rsttbl_idx(trtbl, PtrOffset(trtbl, tr));
5004 goto transaction_table_next;
5005 }
5006
5007 log->transaction_id = PtrOffset(trtbl, tr);
5008 undo_next_lsn = le64_to_cpu(tr->undo_next_lsn);
5009
5010 /*
5011 * We only have to do anything if the transaction has
e8b8e97f 5012 * something its undo_next_lsn field.
b46acd6a
KK
5013 */
5014 if (!undo_next_lsn)
5015 goto commit_undo;
5016
e8b8e97f 5017 /* Read the first record to be undone by this transaction. */
b46acd6a
KK
5018 err = read_log_rec_lcb(log, undo_next_lsn, lcb_ctx_undo_next, &lcb);
5019 if (err)
5020 goto out;
5021
5022 /*
5023 * Now loop to read all of our log records forwards,
e8b8e97f 5024 * until we hit the end of the file, cleaning up at the end.
b46acd6a
KK
5025 */
5026undo_action_next:
5027
5028 lrh = lcb->log_rec;
5029 frh = lcb->lrh;
5030 transact_id = le32_to_cpu(frh->transact_id);
5031 rec_len = le32_to_cpu(frh->client_data_len);
5032
5033 if (!check_log_rec(lrh, rec_len, transact_id, bytes_per_attr_entry)) {
5034 err = -EINVAL;
5035 goto out;
5036 }
5037
5038 if (lrh->undo_op == cpu_to_le16(Noop))
5039 goto read_next_log_undo_action;
5040
5041 oe = Add2Ptr(oatbl, le16_to_cpu(lrh->target_attr));
5042 oa = oe->ptr;
5043
5044 t16 = le16_to_cpu(lrh->lcns_follow);
5045 if (!t16)
5046 goto add_allocated_vcns;
5047
5048 is_mapped = run_lookup_entry(oa->run1, le64_to_cpu(lrh->target_vcn),
5049 &lcn, &clen, NULL);
5050
5051 /*
5052 * If the mapping isn't already the table or the mapping
5053 * corresponds to a hole the mapping, we need to make sure
e8b8e97f 5054 * there is no partial page already memory.
b46acd6a
KK
5055 */
5056 if (is_mapped && lcn != SPARSE_LCN && clen >= t16)
5057 goto add_allocated_vcns;
5058
5059 vcn = le64_to_cpu(lrh->target_vcn);
5060 vcn &= ~(log->clst_per_page - 1);
5061
5062add_allocated_vcns:
5063 for (i = 0, vcn = le64_to_cpu(lrh->target_vcn),
5064 size = (vcn + 1) << sbi->cluster_bits;
5065 i < t16; i++, vcn += 1, size += sbi->cluster_size) {
5066 attr = oa->attr;
5067 if (!attr->non_res) {
5068 if (size > le32_to_cpu(attr->res.data_size))
5069 attr->res.data_size = cpu_to_le32(size);
5070 } else {
5071 if (size > le64_to_cpu(attr->nres.data_size))
5072 attr->nres.valid_size = attr->nres.data_size =
5073 attr->nres.alloc_size =
5074 cpu_to_le64(size);
5075 }
5076 }
5077
5078 t16 = le16_to_cpu(lrh->undo_op);
5079 if (can_skip_action(t16))
5080 goto read_next_log_undo_action;
5081
e8b8e97f 5082 /* Point to the Redo data and get its length. */
b46acd6a
KK
5083 data = Add2Ptr(lrh, le16_to_cpu(lrh->undo_off));
5084 dlen = le16_to_cpu(lrh->undo_len);
5085
e8b8e97f 5086 /* It is time to apply the undo action. */
b46acd6a
KK
5087 err = do_action(log, oe, lrh, t16, data, dlen, rec_len, NULL);
5088
5089read_next_log_undo_action:
5090 /*
5091 * Keep reading and looping back until we have read the
e8b8e97f 5092 * last record for this transaction.
b46acd6a
KK
5093 */
5094 err = read_next_log_rec(log, lcb, &rec_lsn);
5095 if (err)
5096 goto out;
5097
5098 if (rec_lsn)
5099 goto undo_action_next;
5100
5101 lcb_put(lcb);
5102 lcb = NULL;
5103
5104commit_undo:
5105 free_rsttbl_idx(trtbl, log->transaction_id);
5106
5107 log->transaction_id = 0;
5108
5109 goto transaction_table_next;
5110
5111undo_action_done:
5112
5113 ntfs_update_mftmirr(sbi, 0);
5114
5115 sbi->flags &= ~NTFS_FLAGS_NEED_REPLAY;
5116
5117end_reply:
5118
5119 err = 0;
5120 if (is_ro)
5121 goto out;
5122
195c52bd 5123 rh = kzalloc(log->page_size, GFP_NOFS);
b46acd6a
KK
5124 if (!rh) {
5125 err = -ENOMEM;
5126 goto out;
5127 }
5128
5129 rh->rhdr.sign = NTFS_RSTR_SIGNATURE;
5130 rh->rhdr.fix_off = cpu_to_le16(offsetof(struct RESTART_HDR, fixups));
5131 t16 = (log->page_size >> SECTOR_SHIFT) + 1;
5132 rh->rhdr.fix_num = cpu_to_le16(t16);
5133 rh->sys_page_size = cpu_to_le32(log->page_size);
5134 rh->page_size = cpu_to_le32(log->page_size);
5135
d3624466
KK
5136 t16 = ALIGN(offsetof(struct RESTART_HDR, fixups) + sizeof(short) * t16,
5137 8);
b46acd6a
KK
5138 rh->ra_off = cpu_to_le16(t16);
5139 rh->minor_ver = cpu_to_le16(1); // 0x1A:
5140 rh->major_ver = cpu_to_le16(1); // 0x1C:
5141
5142 ra2 = Add2Ptr(rh, t16);
5143 memcpy(ra2, ra, sizeof(struct RESTART_AREA));
5144
5145 ra2->client_idx[0] = 0;
5146 ra2->client_idx[1] = LFS_NO_CLIENT_LE;
5147 ra2->flags = cpu_to_le16(2);
5148
5149 le32_add_cpu(&ra2->open_log_count, 1);
5150
5151 ntfs_fix_pre_write(&rh->rhdr, log->page_size);
5152
63544672 5153 err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rh, log->page_size, 0);
b46acd6a
KK
5154 if (!err)
5155 err = ntfs_sb_write_run(sbi, &log->ni->file.run, log->page_size,
63544672 5156 rh, log->page_size, 0);
b46acd6a 5157
195c52bd 5158 kfree(rh);
b46acd6a
KK
5159 if (err)
5160 goto out;
5161
5162out:
195c52bd 5163 kfree(rst);
b46acd6a
KK
5164 if (lcb)
5165 lcb_put(lcb);
5166
e8b8e97f
KA
5167 /*
5168 * Scan the Open Attribute Table to close all of
5169 * the open attributes.
5170 */
b46acd6a
KK
5171 oe = NULL;
5172 while ((oe = enum_rstbl(oatbl, oe))) {
5173 rno = ino_get(&oe->ref);
5174
5175 if (oe->is_attr_name == 1) {
195c52bd 5176 kfree(oe->ptr);
b46acd6a
KK
5177 oe->ptr = NULL;
5178 continue;
5179 }
5180
5181 if (oe->is_attr_name)
5182 continue;
5183
5184 oa = oe->ptr;
5185 if (!oa)
5186 continue;
5187
5188 run_close(&oa->run0);
195c52bd 5189 kfree(oa->attr);
b46acd6a
KK
5190 if (oa->ni)
5191 iput(&oa->ni->vfs_inode);
195c52bd 5192 kfree(oa);
b46acd6a
KK
5193 }
5194
195c52bd
KA
5195 kfree(trtbl);
5196 kfree(oatbl);
5197 kfree(dptbl);
5198 kfree(attr_names);
5199 kfree(rst_info.r_page);
b46acd6a 5200
195c52bd
KA
5201 kfree(ra);
5202 kfree(log->one_page_buf);
b46acd6a
KK
5203
5204 if (err)
5205 sbi->flags |= NTFS_FLAGS_NEED_REPLAY;
5206
5207 if (err == -EROFS)
5208 err = 0;
5209 else if (log->set_dirty)
5210 ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
5211
195c52bd 5212 kfree(log);
b46acd6a
KK
5213
5214 return err;
5215}