Merge tag 'for-6.16-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux-block.git] / fs / dax.c
CommitLineData
2025cf9e 1// SPDX-License-Identifier: GPL-2.0-only
d475c634
MW
2/*
3 * fs/dax.c - Direct Access filesystem code
4 * Copyright (c) 2013-2014 Intel Corporation
5 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
6 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
d475c634
MW
7 */
8
9#include <linux/atomic.h>
10#include <linux/blkdev.h>
11#include <linux/buffer_head.h>
d77e92e2 12#include <linux/dax.h>
d475c634 13#include <linux/fs.h>
f7ca90b1
MW
14#include <linux/highmem.h>
15#include <linux/memcontrol.h>
16#include <linux/mm.h>
d475c634 17#include <linux/mutex.h>
9973c98e 18#include <linux/pagevec.h>
289c6aed 19#include <linux/sched.h>
f361bf4a 20#include <linux/sched/signal.h>
d475c634 21#include <linux/uio.h>
f7ca90b1 22#include <linux/vmstat.h>
34c0fd54 23#include <linux/pfn_t.h>
0e749e54 24#include <linux/sizes.h>
4b4bb46d 25#include <linux/mmu_notifier.h>
a254e568 26#include <linux/iomap.h>
06083a09 27#include <linux/rmap.h>
11cf9d86 28#include <asm/pgalloc.h>
d475c634 29
282a8e03
RZ
30#define CREATE_TRACE_POINTS
31#include <trace/events/fs_dax.h>
32
ac401cc7
JK
33/* We choose 4096 entries - same as per-zone page wait tables */
34#define DAX_WAIT_TABLE_BITS 12
35#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
36
917f3452
RZ
37/* The 'colour' (ie low bits) within a PMD of a page offset. */
38#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
977fbdcd 39#define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
917f3452 40
ce95ab0f 41static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
ac401cc7
JK
42
43static int __init init_dax_wait_table(void)
44{
45 int i;
46
47 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
48 init_waitqueue_head(wait_table + i);
49 return 0;
50}
51fs_initcall(init_dax_wait_table);
52
527b19d0 53/*
3159f943
MW
54 * DAX pagecache entries use XArray value entries so they can't be mistaken
55 * for pages. We use one bit for locking, one bit for the entry size (PMD)
56 * and two more to tell us if the entry is a zero page or an empty entry that
57 * is just used for locking. In total four special bits.
527b19d0
RZ
58 *
59 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
60 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
61 * block allocation.
62 */
3159f943
MW
63#define DAX_SHIFT (4)
64#define DAX_LOCKED (1UL << 0)
65#define DAX_PMD (1UL << 1)
66#define DAX_ZERO_PAGE (1UL << 2)
67#define DAX_EMPTY (1UL << 3)
527b19d0 68
a77d19f4 69static unsigned long dax_to_pfn(void *entry)
527b19d0 70{
3159f943 71 return xa_to_value(entry) >> DAX_SHIFT;
527b19d0
RZ
72}
73
38607c62
AP
74static struct folio *dax_to_folio(void *entry)
75{
76 return page_folio(pfn_to_page(dax_to_pfn(entry)));
77}
78
9f32d221
MW
79static void *dax_make_entry(pfn_t pfn, unsigned long flags)
80{
81 return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
82}
83
cfc93c6c
MW
84static bool dax_is_locked(void *entry)
85{
86 return xa_to_value(entry) & DAX_LOCKED;
87}
88
a77d19f4 89static unsigned int dax_entry_order(void *entry)
527b19d0 90{
3159f943 91 if (xa_to_value(entry) & DAX_PMD)
cfc93c6c 92 return PMD_ORDER;
527b19d0
RZ
93 return 0;
94}
95
fda490d3 96static unsigned long dax_is_pmd_entry(void *entry)
d1a5f2b4 97{
3159f943 98 return xa_to_value(entry) & DAX_PMD;
d1a5f2b4
DW
99}
100
fda490d3 101static bool dax_is_pte_entry(void *entry)
d475c634 102{
3159f943 103 return !(xa_to_value(entry) & DAX_PMD);
d475c634
MW
104}
105
642261ac 106static int dax_is_zero_entry(void *entry)
d475c634 107{
3159f943 108 return xa_to_value(entry) & DAX_ZERO_PAGE;
d475c634
MW
109}
110
642261ac 111static int dax_is_empty_entry(void *entry)
b2e0d162 112{
3159f943 113 return xa_to_value(entry) & DAX_EMPTY;
b2e0d162
DW
114}
115
23c84eb7
MWO
116/*
117 * true if the entry that was found is of a smaller order than the entry
118 * we were looking for
119 */
120static bool dax_is_conflict(void *entry)
121{
122 return entry == XA_RETRY_ENTRY;
123}
124
ac401cc7 125/*
a77d19f4 126 * DAX page cache entry locking
ac401cc7
JK
127 */
128struct exceptional_entry_key {
ec4907ff 129 struct xarray *xa;
63e95b5c 130 pgoff_t entry_start;
ac401cc7
JK
131};
132
133struct wait_exceptional_entry_queue {
ac6424b9 134 wait_queue_entry_t wait;
ac401cc7
JK
135 struct exceptional_entry_key key;
136};
137
698ab77a
VG
138/**
139 * enum dax_wake_mode: waitqueue wakeup behaviour
140 * @WAKE_ALL: wake all waiters in the waitqueue
141 * @WAKE_NEXT: wake only the first waiter in the waitqueue
142 */
143enum dax_wake_mode {
144 WAKE_ALL,
145 WAKE_NEXT,
146};
147
b15cd800
MW
148static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
149 void *entry, struct exceptional_entry_key *key)
63e95b5c
RZ
150{
151 unsigned long hash;
b15cd800 152 unsigned long index = xas->xa_index;
63e95b5c
RZ
153
154 /*
155 * If 'entry' is a PMD, align the 'index' that we use for the wait
156 * queue to the start of that PMD. This ensures that all offsets in
157 * the range covered by the PMD map to the same bit lock.
158 */
642261ac 159 if (dax_is_pmd_entry(entry))
917f3452 160 index &= ~PG_PMD_COLOUR;
b15cd800 161 key->xa = xas->xa;
63e95b5c
RZ
162 key->entry_start = index;
163
b15cd800 164 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
63e95b5c
RZ
165 return wait_table + hash;
166}
167
ec4907ff
MW
168static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
169 unsigned int mode, int sync, void *keyp)
ac401cc7
JK
170{
171 struct exceptional_entry_key *key = keyp;
172 struct wait_exceptional_entry_queue *ewait =
173 container_of(wait, struct wait_exceptional_entry_queue, wait);
174
ec4907ff 175 if (key->xa != ewait->key.xa ||
63e95b5c 176 key->entry_start != ewait->key.entry_start)
ac401cc7
JK
177 return 0;
178 return autoremove_wake_function(wait, mode, sync, NULL);
179}
180
e30331ff 181/*
b93b0163
MW
182 * @entry may no longer be the entry at the index in the mapping.
183 * The important information it's conveying is whether the entry at
184 * this index used to be a PMD entry.
e30331ff 185 */
698ab77a
VG
186static void dax_wake_entry(struct xa_state *xas, void *entry,
187 enum dax_wake_mode mode)
e30331ff
RZ
188{
189 struct exceptional_entry_key key;
190 wait_queue_head_t *wq;
191
b15cd800 192 wq = dax_entry_waitqueue(xas, entry, &key);
e30331ff
RZ
193
194 /*
195 * Checking for locked entry and prepare_to_wait_exclusive() happens
b93b0163 196 * under the i_pages lock, ditto for entry handling in our callers.
e30331ff
RZ
197 * So at this point all tasks that could have seen our entry locked
198 * must be in the waitqueue and the following check will see them.
199 */
200 if (waitqueue_active(wq))
698ab77a 201 __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
e30331ff
RZ
202}
203
cfc93c6c
MW
204/*
205 * Look up entry in page cache, wait for it to become unlocked if it
206 * is a DAX entry and return it. The caller must subsequently call
207 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
23c84eb7
MWO
208 * if it did. The entry returned may have a larger order than @order.
209 * If @order is larger than the order of the entry found in i_pages, this
210 * function returns a dax_is_conflict entry.
cfc93c6c
MW
211 *
212 * Must be called with the i_pages lock held.
213 */
6be3e21d 214static void *get_next_unlocked_entry(struct xa_state *xas, unsigned int order)
cfc93c6c
MW
215{
216 void *entry;
217 struct wait_exceptional_entry_queue ewait;
218 wait_queue_head_t *wq;
219
220 init_wait(&ewait.wait);
221 ewait.wait.func = wake_exceptional_entry_func;
222
223 for (;;) {
0e40de03 224 entry = xas_find_conflict(xas);
6370740e
DW
225 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
226 return entry;
23c84eb7
MWO
227 if (dax_entry_order(entry) < order)
228 return XA_RETRY_ENTRY;
6370740e 229 if (!dax_is_locked(entry))
cfc93c6c
MW
230 return entry;
231
b15cd800 232 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
cfc93c6c
MW
233 prepare_to_wait_exclusive(wq, &ewait.wait,
234 TASK_UNINTERRUPTIBLE);
235 xas_unlock_irq(xas);
236 xas_reset(xas);
237 schedule();
238 finish_wait(wq, &ewait.wait);
239 xas_lock_irq(xas);
240 }
241}
242
6be3e21d
AP
243/*
244 * Wait for the given entry to become unlocked. Caller must hold the i_pages
245 * lock and call either put_unlocked_entry() if it did not lock the entry or
246 * dax_unlock_entry() if it did. Returns an unlocked entry if still present.
247 */
248static void *wait_entry_unlocked_exclusive(struct xa_state *xas, void *entry)
249{
250 struct wait_exceptional_entry_queue ewait;
251 wait_queue_head_t *wq;
252
253 init_wait(&ewait.wait);
254 ewait.wait.func = wake_exceptional_entry_func;
255
256 while (unlikely(dax_is_locked(entry))) {
257 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
258 prepare_to_wait_exclusive(wq, &ewait.wait,
259 TASK_UNINTERRUPTIBLE);
dd59137b 260 xas_reset(xas);
6be3e21d
AP
261 xas_unlock_irq(xas);
262 schedule();
263 finish_wait(wq, &ewait.wait);
264 xas_lock_irq(xas);
265 entry = xas_load(xas);
266 }
267
268 if (xa_is_internal(entry))
269 return NULL;
270
271 return entry;
272}
273
55e56f06
MW
274/*
275 * The only thing keeping the address space around is the i_pages lock
276 * (it's cycled in clear_inode() after removing the entries from i_pages)
277 * After we call xas_unlock_irq(), we cannot touch xas->xa.
278 */
279static void wait_entry_unlocked(struct xa_state *xas, void *entry)
280{
281 struct wait_exceptional_entry_queue ewait;
282 wait_queue_head_t *wq;
283
284 init_wait(&ewait.wait);
285 ewait.wait.func = wake_exceptional_entry_func;
286
287 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
d8a70641 288 /*
6be3e21d 289 * Unlike get_next_unlocked_entry() there is no guarantee that this
d8a70641
DW
290 * path ever successfully retrieves an unlocked entry before an
291 * inode dies. Perform a non-exclusive wait in case this path
292 * never successfully performs its own wake up.
293 */
294 prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
55e56f06
MW
295 xas_unlock_irq(xas);
296 schedule();
297 finish_wait(wq, &ewait.wait);
55e56f06
MW
298}
299
4c3d043d
VG
300static void put_unlocked_entry(struct xa_state *xas, void *entry,
301 enum dax_wake_mode mode)
cfc93c6c 302{
61c30c98 303 if (entry && !dax_is_conflict(entry))
4c3d043d 304 dax_wake_entry(xas, entry, mode);
cfc93c6c
MW
305}
306
307/*
308 * We used the xa_state to get the entry, but then we locked the entry and
309 * dropped the xa_lock, so we know the xa_state is stale and must be reset
310 * before use.
311 */
312static void dax_unlock_entry(struct xa_state *xas, void *entry)
313{
314 void *old;
315
7ae2ea7d 316 BUG_ON(dax_is_locked(entry));
cfc93c6c
MW
317 xas_reset(xas);
318 xas_lock_irq(xas);
319 old = xas_store(xas, entry);
320 xas_unlock_irq(xas);
321 BUG_ON(!dax_is_locked(old));
698ab77a 322 dax_wake_entry(xas, entry, WAKE_NEXT);
cfc93c6c
MW
323}
324
325/*
326 * Return: The entry stored at this location before it was locked.
327 */
328static void *dax_lock_entry(struct xa_state *xas, void *entry)
329{
330 unsigned long v = xa_to_value(entry);
331 return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
332}
333
d2c997c0
DW
334static unsigned long dax_entry_size(void *entry)
335{
336 if (dax_is_zero_entry(entry))
337 return 0;
338 else if (dax_is_empty_entry(entry))
339 return 0;
340 else if (dax_is_pmd_entry(entry))
341 return PMD_SIZE;
342 else
343 return PAGE_SIZE;
344}
345
cbe298d8
AP
346/*
347 * A DAX folio is considered shared if it has no mapping set and ->share (which
348 * shares the ->index field) is non-zero. Note this may return false even if the
349 * page is shared between multiple files but has not yet actually been mapped
350 * into multiple address spaces.
351 */
37cd93fc 352static inline bool dax_folio_is_shared(struct folio *folio)
d2c997c0 353{
38607c62 354 return !folio->mapping && folio->share;
d2c997c0
DW
355}
356
357/*
cbe298d8
AP
358 * When it is called by dax_insert_entry(), the shared flag will indicate
359 * whether this entry is shared by multiple files. If the page has not
360 * previously been associated with any mappings the ->mapping and ->index
361 * fields will be set. If it has already been associated with a mapping
362 * the mapping will be cleared and the share count set. It's then up to
363 * reverse map users like memory_failure() to call back into the filesystem to
364 * recover ->mapping and ->index information. For example by implementing
365 * dax_holder_operations.
d2c997c0 366 */
cbe298d8 367static void dax_folio_make_shared(struct folio *folio)
6061b69b 368{
cbe298d8
AP
369 /*
370 * folio is not currently shared so mark it as shared by clearing
371 * folio->mapping.
372 */
373 folio->mapping = NULL;
374
375 /*
376 * folio has previously been mapped into one address space so set the
377 * share count.
378 */
38607c62 379 folio->share = 1;
6061b69b
SR
380}
381
38607c62 382static inline unsigned long dax_folio_put(struct folio *folio)
6061b69b 383{
38607c62
AP
384 unsigned long ref;
385 int order, i;
386
387 if (!dax_folio_is_shared(folio))
388 ref = 0;
389 else
390 ref = --folio->share;
391
392 if (ref)
393 return ref;
394
395 folio->mapping = NULL;
396 order = folio_order(folio);
397 if (!order)
398 return 0;
98b1917c 399 folio_reset_order(folio);
38607c62
AP
400
401 for (i = 0; i < (1UL << order); i++) {
402 struct dev_pagemap *pgmap = page_pgmap(&folio->page);
403 struct page *page = folio_page(folio, i);
404 struct folio *new_folio = (struct folio *)page;
405
406 ClearPageHead(page);
407 clear_compound_head(page);
408
409 new_folio->mapping = NULL;
6061b69b 410 /*
38607c62
AP
411 * Reset pgmap which was over-written by
412 * prep_compound_page().
6061b69b 413 */
38607c62
AP
414 new_folio->pgmap = pgmap;
415 new_folio->share = 0;
416 WARN_ON_ONCE(folio_ref_count(new_folio));
6061b69b 417 }
38607c62
AP
418
419 return ref;
16900426
SR
420}
421
38607c62 422static void dax_folio_init(void *entry)
16900426 423{
38607c62
AP
424 struct folio *folio = dax_to_folio(entry);
425 int order = dax_entry_order(entry);
426
427 /*
428 * Folio should have been split back to order-0 pages in
429 * dax_folio_put() when they were removed from their
430 * final mapping.
431 */
432 WARN_ON_ONCE(folio_order(folio));
433
434 if (order > 0) {
435 prep_compound_page(&folio->page, order);
436 if (order > 1)
437 INIT_LIST_HEAD(&folio->_deferred_list);
438 WARN_ON_ONCE(folio_ref_count(folio));
439 }
6061b69b
SR
440}
441
73449daf 442static void dax_associate_entry(void *entry, struct address_space *mapping,
38607c62
AP
443 struct vm_area_struct *vma,
444 unsigned long address, bool shared)
d2c997c0 445{
38607c62
AP
446 unsigned long size = dax_entry_size(entry), index;
447 struct folio *folio = dax_to_folio(entry);
d2c997c0 448
98c183a4
AP
449 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry))
450 return;
d2c997c0
DW
451
452 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
453 return;
454
73449daf 455 index = linear_page_index(vma, address & ~(size - 1));
38607c62
AP
456 if (shared && (folio->mapping || dax_folio_is_shared(folio))) {
457 if (folio->mapping)
458 dax_folio_make_shared(folio);
d2c997c0 459
38607c62
AP
460 WARN_ON_ONCE(!folio->share);
461 WARN_ON_ONCE(dax_entry_order(entry) != folio_order(folio));
462 folio->share++;
463 } else {
464 WARN_ON_ONCE(folio->mapping);
465 dax_folio_init(entry);
466 folio = dax_to_folio(entry);
467 folio->mapping = mapping;
468 folio->index = index;
d2c997c0
DW
469 }
470}
471
472static void dax_disassociate_entry(void *entry, struct address_space *mapping,
38607c62 473 bool trunc)
d2c997c0 474{
38607c62 475 struct folio *folio = dax_to_folio(entry);
d2c997c0
DW
476
477 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
478 return;
479
98c183a4
AP
480 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry))
481 return;
482
38607c62 483 dax_folio_put(folio);
d2c997c0
DW
484}
485
5fac7408
DW
486static struct page *dax_busy_page(void *entry)
487{
38607c62 488 struct folio *folio = dax_to_folio(entry);
5fac7408 489
38607c62
AP
490 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry))
491 return NULL;
5fac7408 492
38607c62
AP
493 if (folio_ref_count(folio) - folio_mapcount(folio))
494 return &folio->page;
495 else
496 return NULL;
5fac7408
DW
497}
498
91e79d22
MWO
499/**
500 * dax_lock_folio - Lock the DAX entry corresponding to a folio
501 * @folio: The folio whose entry we want to lock
c5bbd451
MW
502 *
503 * Context: Process context.
91e79d22 504 * Return: A cookie to pass to dax_unlock_folio() or 0 if the entry could
27359fd6 505 * not be locked.
c5bbd451 506 */
91e79d22 507dax_entry_t dax_lock_folio(struct folio *folio)
c2a7d2a1 508{
9f32d221
MW
509 XA_STATE(xas, NULL, 0);
510 void *entry;
c2a7d2a1 511
91e79d22 512 /* Ensure folio->mapping isn't freed while we look at it */
c5bbd451 513 rcu_read_lock();
c2a7d2a1 514 for (;;) {
91e79d22 515 struct address_space *mapping = READ_ONCE(folio->mapping);
c2a7d2a1 516
27359fd6 517 entry = NULL;
c93db7bb 518 if (!mapping || !dax_mapping(mapping))
c5bbd451 519 break;
c2a7d2a1
DW
520
521 /*
522 * In the device-dax case there's no need to lock, a
523 * struct dev_pagemap pin is sufficient to keep the
524 * inode alive, and we assume we have dev_pagemap pin
525 * otherwise we would not have a valid pfn_to_page()
526 * translation.
527 */
27359fd6 528 entry = (void *)~0UL;
9f32d221 529 if (S_ISCHR(mapping->host->i_mode))
c5bbd451 530 break;
c2a7d2a1 531
9f32d221
MW
532 xas.xa = &mapping->i_pages;
533 xas_lock_irq(&xas);
91e79d22 534 if (mapping != folio->mapping) {
9f32d221 535 xas_unlock_irq(&xas);
c2a7d2a1
DW
536 continue;
537 }
91e79d22 538 xas_set(&xas, folio->index);
9f32d221
MW
539 entry = xas_load(&xas);
540 if (dax_is_locked(entry)) {
c5bbd451 541 rcu_read_unlock();
55e56f06 542 wait_entry_unlocked(&xas, entry);
c5bbd451 543 rcu_read_lock();
6d7cd8c1 544 continue;
c2a7d2a1 545 }
9f32d221
MW
546 dax_lock_entry(&xas, entry);
547 xas_unlock_irq(&xas);
c5bbd451 548 break;
c2a7d2a1 549 }
c5bbd451 550 rcu_read_unlock();
27359fd6 551 return (dax_entry_t)entry;
c2a7d2a1
DW
552}
553
91e79d22 554void dax_unlock_folio(struct folio *folio, dax_entry_t cookie)
c2a7d2a1 555{
91e79d22
MWO
556 struct address_space *mapping = folio->mapping;
557 XA_STATE(xas, &mapping->i_pages, folio->index);
c2a7d2a1 558
9f32d221 559 if (S_ISCHR(mapping->host->i_mode))
c2a7d2a1
DW
560 return;
561
27359fd6 562 dax_unlock_entry(&xas, (void *)cookie);
c2a7d2a1
DW
563}
564
2f437eff
SR
565/*
566 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping
567 * @mapping: the file's mapping whose entry we want to lock
568 * @index: the offset within this file
569 * @page: output the dax page corresponding to this dax entry
570 *
571 * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry
572 * could not be locked.
573 */
574dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index,
575 struct page **page)
576{
577 XA_STATE(xas, NULL, 0);
578 void *entry;
579
580 rcu_read_lock();
581 for (;;) {
582 entry = NULL;
583 if (!dax_mapping(mapping))
584 break;
585
586 xas.xa = &mapping->i_pages;
587 xas_lock_irq(&xas);
588 xas_set(&xas, index);
589 entry = xas_load(&xas);
590 if (dax_is_locked(entry)) {
591 rcu_read_unlock();
592 wait_entry_unlocked(&xas, entry);
593 rcu_read_lock();
594 continue;
595 }
596 if (!entry ||
597 dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
598 /*
599 * Because we are looking for entry from file's mapping
600 * and index, so the entry may not be inserted for now,
601 * or even a zero/empty entry. We don't think this is
602 * an error case. So, return a special value and do
603 * not output @page.
604 */
605 entry = (void *)~0UL;
606 } else {
607 *page = pfn_to_page(dax_to_pfn(entry));
608 dax_lock_entry(&xas, entry);
609 }
610 xas_unlock_irq(&xas);
611 break;
612 }
613 rcu_read_unlock();
614 return (dax_entry_t)entry;
615}
616
617void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index,
618 dax_entry_t cookie)
619{
620 XA_STATE(xas, &mapping->i_pages, index);
621
622 if (cookie == ~0UL)
623 return;
624
625 dax_unlock_entry(&xas, (void *)cookie);
626}
627
ac401cc7 628/*
a77d19f4
MW
629 * Find page cache entry at given index. If it is a DAX entry, return it
630 * with the entry locked. If the page cache doesn't contain an entry at
631 * that index, add a locked empty entry.
ac401cc7 632 *
3159f943 633 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
b15cd800
MW
634 * either return that locked entry or will return VM_FAULT_FALLBACK.
635 * This will happen if there are any PTE entries within the PMD range
636 * that we are requesting.
642261ac 637 *
b15cd800
MW
638 * We always favor PTE entries over PMD entries. There isn't a flow where we
639 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD
640 * insertion will fail if it finds any PTE entries already in the tree, and a
641 * PTE insertion will cause an existing PMD entry to be unmapped and
642 * downgraded to PTE entries. This happens for both PMD zero pages as
643 * well as PMD empty entries.
642261ac 644 *
b15cd800
MW
645 * The exception to this downgrade path is for PMD entries that have
646 * real storage backing them. We will leave these real PMD entries in
647 * the tree, and PTE writes will simply dirty the entire PMD entry.
642261ac 648 *
ac401cc7
JK
649 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
650 * persistent memory the benefit is doubtful. We can add that later if we can
651 * show it helps.
b15cd800
MW
652 *
653 * On error, this function does not return an ERR_PTR. Instead it returns
654 * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values
655 * overlap with xarray value entries.
ac401cc7 656 */
b15cd800 657static void *grab_mapping_entry(struct xa_state *xas,
23c84eb7 658 struct address_space *mapping, unsigned int order)
ac401cc7 659{
b15cd800 660 unsigned long index = xas->xa_index;
1a14e377 661 bool pmd_downgrade; /* splitting PMD entry into PTE entries? */
b15cd800 662 void *entry;
642261ac 663
b15cd800 664retry:
1a14e377 665 pmd_downgrade = false;
b15cd800 666 xas_lock_irq(xas);
6be3e21d 667 entry = get_next_unlocked_entry(xas, order);
91d25ba8 668
642261ac 669 if (entry) {
23c84eb7
MWO
670 if (dax_is_conflict(entry))
671 goto fallback;
0e40de03 672 if (!xa_is_value(entry)) {
49688e65 673 xas_set_err(xas, -EIO);
b15cd800
MW
674 goto out_unlock;
675 }
676
23c84eb7 677 if (order == 0) {
91d25ba8 678 if (dax_is_pmd_entry(entry) &&
642261ac
RZ
679 (dax_is_zero_entry(entry) ||
680 dax_is_empty_entry(entry))) {
681 pmd_downgrade = true;
682 }
683 }
684 }
685
b15cd800
MW
686 if (pmd_downgrade) {
687 /*
688 * Make sure 'entry' remains valid while we drop
689 * the i_pages lock.
690 */
691 dax_lock_entry(xas, entry);
642261ac 692
642261ac
RZ
693 /*
694 * Besides huge zero pages the only other thing that gets
695 * downgraded are empty entries which don't need to be
696 * unmapped.
697 */
b15cd800
MW
698 if (dax_is_zero_entry(entry)) {
699 xas_unlock_irq(xas);
700 unmap_mapping_pages(mapping,
701 xas->xa_index & ~PG_PMD_COLOUR,
702 PG_PMD_NR, false);
703 xas_reset(xas);
704 xas_lock_irq(xas);
e11f8b7b
RZ
705 }
706
b15cd800
MW
707 dax_disassociate_entry(entry, mapping, false);
708 xas_store(xas, NULL); /* undo the PMD join */
698ab77a 709 dax_wake_entry(xas, entry, WAKE_ALL);
7f0e07fb 710 mapping->nrpages -= PG_PMD_NR;
b15cd800
MW
711 entry = NULL;
712 xas_set(xas, index);
713 }
642261ac 714
b15cd800
MW
715 if (entry) {
716 dax_lock_entry(xas, entry);
717 } else {
23c84eb7
MWO
718 unsigned long flags = DAX_EMPTY;
719
720 if (order > 0)
721 flags |= DAX_PMD;
722 entry = dax_make_entry(pfn_to_pfn_t(0), flags);
b15cd800
MW
723 dax_lock_entry(xas, entry);
724 if (xas_error(xas))
725 goto out_unlock;
7f0e07fb 726 mapping->nrpages += 1UL << order;
ac401cc7 727 }
b15cd800
MW
728
729out_unlock:
730 xas_unlock_irq(xas);
731 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
732 goto retry;
733 if (xas->xa_node == XA_ERROR(-ENOMEM))
734 return xa_mk_internal(VM_FAULT_OOM);
735 if (xas_error(xas))
736 return xa_mk_internal(VM_FAULT_SIGBUS);
e3ad61c6 737 return entry;
b15cd800
MW
738fallback:
739 xas_unlock_irq(xas);
740 return xa_mk_internal(VM_FAULT_FALLBACK);
ac401cc7
JK
741}
742
5fac7408 743/**
6bbdd563 744 * dax_layout_busy_page_range - find first pinned page in @mapping
5fac7408 745 * @mapping: address space to scan for a page with ref count > 1
6bbdd563
VG
746 * @start: Starting offset. Page containing 'start' is included.
747 * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX,
748 * pages from 'start' till the end of file are included.
5fac7408
DW
749 *
750 * DAX requires ZONE_DEVICE mapped pages. These pages are never
751 * 'onlined' to the page allocator so they are considered idle when
752 * page->count == 1. A filesystem uses this interface to determine if
753 * any page in the mapping is busy, i.e. for DMA, or other
754 * get_user_pages() usages.
755 *
756 * It is expected that the filesystem is holding locks to block the
757 * establishment of new mappings in this address_space. I.e. it expects
758 * to be able to run unmap_mapping_range() and subsequently not race
759 * mapping_mapped() becoming true.
760 */
6bbdd563
VG
761struct page *dax_layout_busy_page_range(struct address_space *mapping,
762 loff_t start, loff_t end)
5fac7408 763{
084a8990
MW
764 void *entry;
765 unsigned int scanned = 0;
5fac7408 766 struct page *page = NULL;
6bbdd563
VG
767 pgoff_t start_idx = start >> PAGE_SHIFT;
768 pgoff_t end_idx;
769 XA_STATE(xas, &mapping->i_pages, start_idx);
5fac7408
DW
770
771 /*
772 * In the 'limited' case get_user_pages() for dax is disabled.
773 */
774 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
775 return NULL;
776
cee91fa1 777 if (!dax_mapping(mapping))
5fac7408
DW
778 return NULL;
779
6bbdd563
VG
780 /* If end == LLONG_MAX, all pages from start to till end of file */
781 if (end == LLONG_MAX)
782 end_idx = ULONG_MAX;
783 else
784 end_idx = end >> PAGE_SHIFT;
5fac7408
DW
785 /*
786 * If we race get_user_pages_fast() here either we'll see the
084a8990 787 * elevated page count in the iteration and wait, or
5fac7408
DW
788 * get_user_pages_fast() will see that the page it took a reference
789 * against is no longer mapped in the page tables and bail to the
790 * get_user_pages() slow path. The slow path is protected by
791 * pte_lock() and pmd_lock(). New references are not taken without
6bbdd563 792 * holding those locks, and unmap_mapping_pages() will not zero the
5fac7408
DW
793 * pte or pmd without holding the respective lock, so we are
794 * guaranteed to either see new references or prevent new
795 * references from being established.
796 */
6bbdd563 797 unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0);
5fac7408 798
084a8990 799 xas_lock_irq(&xas);
6bbdd563 800 xas_for_each(&xas, entry, end_idx) {
084a8990
MW
801 if (WARN_ON_ONCE(!xa_is_value(entry)))
802 continue;
6be3e21d 803 entry = wait_entry_unlocked_exclusive(&xas, entry);
084a8990
MW
804 if (entry)
805 page = dax_busy_page(entry);
4c3d043d 806 put_unlocked_entry(&xas, entry, WAKE_NEXT);
5fac7408
DW
807 if (page)
808 break;
084a8990
MW
809 if (++scanned % XA_CHECK_SCHED)
810 continue;
811
812 xas_pause(&xas);
813 xas_unlock_irq(&xas);
814 cond_resched();
815 xas_lock_irq(&xas);
5fac7408 816 }
084a8990 817 xas_unlock_irq(&xas);
5fac7408
DW
818 return page;
819}
6bbdd563
VG
820EXPORT_SYMBOL_GPL(dax_layout_busy_page_range);
821
822struct page *dax_layout_busy_page(struct address_space *mapping)
823{
824 return dax_layout_busy_page_range(mapping, 0, LLONG_MAX);
825}
5fac7408
DW
826EXPORT_SYMBOL_GPL(dax_layout_busy_page);
827
a77d19f4 828static int __dax_invalidate_entry(struct address_space *mapping,
38607c62 829 pgoff_t index, bool trunc)
c6dcf52c 830{
07f2d89c 831 XA_STATE(xas, &mapping->i_pages, index);
c6dcf52c
JK
832 int ret = 0;
833 void *entry;
c6dcf52c 834
07f2d89c 835 xas_lock_irq(&xas);
6be3e21d 836 entry = get_next_unlocked_entry(&xas, 0);
3159f943 837 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
c6dcf52c
JK
838 goto out;
839 if (!trunc &&
07f2d89c
MW
840 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
841 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
c6dcf52c 842 goto out;
d2c997c0 843 dax_disassociate_entry(entry, mapping, trunc);
07f2d89c 844 xas_store(&xas, NULL);
7f0e07fb 845 mapping->nrpages -= 1UL << dax_entry_order(entry);
c6dcf52c
JK
846 ret = 1;
847out:
23738832 848 put_unlocked_entry(&xas, entry, WAKE_ALL);
07f2d89c 849 xas_unlock_irq(&xas);
c6dcf52c
JK
850 return ret;
851}
07f2d89c 852
f76b3a32
SR
853static int __dax_clear_dirty_range(struct address_space *mapping,
854 pgoff_t start, pgoff_t end)
855{
856 XA_STATE(xas, &mapping->i_pages, start);
857 unsigned int scanned = 0;
858 void *entry;
859
860 xas_lock_irq(&xas);
861 xas_for_each(&xas, entry, end) {
6be3e21d
AP
862 entry = wait_entry_unlocked_exclusive(&xas, entry);
863 if (!entry)
864 continue;
f76b3a32
SR
865 xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
866 xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
867 put_unlocked_entry(&xas, entry, WAKE_NEXT);
868
869 if (++scanned % XA_CHECK_SCHED)
870 continue;
871
872 xas_pause(&xas);
873 xas_unlock_irq(&xas);
874 cond_resched();
875 xas_lock_irq(&xas);
876 }
877 xas_unlock_irq(&xas);
878
879 return 0;
880}
881
ac401cc7 882/*
3159f943
MW
883 * Delete DAX entry at @index from @mapping. Wait for it
884 * to be unlocked before deleting it.
ac401cc7
JK
885 */
886int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
887{
a77d19f4 888 int ret = __dax_invalidate_entry(mapping, index, true);
ac401cc7 889
ac401cc7
JK
890 /*
891 * This gets called from truncate / punch_hole path. As such, the caller
892 * must hold locks protecting against concurrent modifications of the
a77d19f4 893 * page cache (usually fs-private i_mmap_sem for writing). Since the
3159f943 894 * caller has seen a DAX entry for this index, we better find it
ac401cc7
JK
895 * at that index as well...
896 */
c6dcf52c
JK
897 WARN_ON_ONCE(!ret);
898 return ret;
899}
900
bde708f1
AP
901void dax_delete_mapping_range(struct address_space *mapping,
902 loff_t start, loff_t end)
903{
904 void *entry;
905 pgoff_t start_idx = start >> PAGE_SHIFT;
906 pgoff_t end_idx;
907 XA_STATE(xas, &mapping->i_pages, start_idx);
908
909 /* If end == LLONG_MAX, all pages from start to till end of file */
910 if (end == LLONG_MAX)
911 end_idx = ULONG_MAX;
912 else
913 end_idx = end >> PAGE_SHIFT;
914
915 xas_lock_irq(&xas);
916 xas_for_each(&xas, entry, end_idx) {
917 if (!xa_is_value(entry))
918 continue;
919 entry = wait_entry_unlocked_exclusive(&xas, entry);
920 if (!entry)
921 continue;
922 dax_disassociate_entry(entry, mapping, true);
923 xas_store(&xas, NULL);
924 mapping->nrpages -= 1UL << dax_entry_order(entry);
925 put_unlocked_entry(&xas, entry, WAKE_ALL);
926 }
927 xas_unlock_irq(&xas);
928}
929EXPORT_SYMBOL_GPL(dax_delete_mapping_range);
930
d5b3afea
AP
931static int wait_page_idle(struct page *page,
932 void (cb)(struct inode *),
933 struct inode *inode)
934{
935 return ___wait_var_event(page, dax_page_is_idle(page),
936 TASK_INTERRUPTIBLE, 0, 0, cb(inode));
937}
938
0e2f80af
AP
939static void wait_page_idle_uninterruptible(struct page *page,
940 struct inode *inode)
941{
942 ___wait_var_event(page, dax_page_is_idle(page),
943 TASK_UNINTERRUPTIBLE, 0, 0, schedule());
944}
945
d5b3afea
AP
946/*
947 * Unmaps the inode and waits for any DMA to complete prior to deleting the
948 * DAX mapping entries for the range.
bde708f1
AP
949 *
950 * For NOWAIT behavior, pass @cb as NULL to early-exit on first found
951 * busy page
d5b3afea
AP
952 */
953int dax_break_layout(struct inode *inode, loff_t start, loff_t end,
954 void (cb)(struct inode *))
955{
956 struct page *page;
957 int error = 0;
958
959 if (!dax_mapping(inode->i_mapping))
960 return 0;
961
962 do {
963 page = dax_layout_busy_page_range(inode->i_mapping, start, end);
964 if (!page)
965 break;
bde708f1
AP
966 if (!cb) {
967 error = -ERESTARTSYS;
968 break;
969 }
d5b3afea
AP
970
971 error = wait_page_idle(page, cb, inode);
972 } while (error == 0);
973
bde708f1
AP
974 if (!page)
975 dax_delete_mapping_range(inode->i_mapping, start, end);
976
d5b3afea
AP
977 return error;
978}
979EXPORT_SYMBOL_GPL(dax_break_layout);
980
0e2f80af
AP
981void dax_break_layout_final(struct inode *inode)
982{
983 struct page *page;
984
985 if (!dax_mapping(inode->i_mapping))
986 return;
987
988 do {
989 page = dax_layout_busy_page_range(inode->i_mapping, 0,
990 LLONG_MAX);
991 if (!page)
992 break;
993
994 wait_page_idle_uninterruptible(page, inode);
995 } while (true);
996
38607c62
AP
997 if (!page)
998 dax_delete_mapping_range(inode->i_mapping, 0, LLONG_MAX);
0e2f80af
AP
999}
1000EXPORT_SYMBOL_GPL(dax_break_layout_final);
1001
c6dcf52c 1002/*
3159f943 1003 * Invalidate DAX entry if it is clean.
c6dcf52c
JK
1004 */
1005int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
1006 pgoff_t index)
1007{
a77d19f4 1008 return __dax_invalidate_entry(mapping, index, false);
ac401cc7
JK
1009}
1010
60696eb2 1011static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos)
f7ca90b1 1012{
de205114 1013 return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset);
429f8de7
CH
1014}
1015
1016static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)
1017{
60696eb2 1018 pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos);
cccbce67 1019 void *vto, *kaddr;
cccbce67
DW
1020 long rc;
1021 int id;
1022
cccbce67 1023 id = dax_read_lock();
e511c4a3
JC
1024 rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS,
1025 &kaddr, NULL);
cccbce67
DW
1026 if (rc < 0) {
1027 dax_read_unlock(id);
1028 return rc;
1029 }
429f8de7
CH
1030 vto = kmap_atomic(vmf->cow_page);
1031 copy_user_page(vto, kaddr, vmf->address, vmf->cow_page);
f7ca90b1 1032 kunmap_atomic(vto);
cccbce67 1033 dax_read_unlock(id);
f7ca90b1
MW
1034 return 0;
1035}
1036
e5d6df73
SR
1037/*
1038 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1039 * flushed on write-faults (non-cow), but not read-faults.
1040 */
1041static bool dax_fault_is_synchronous(const struct iomap_iter *iter,
1042 struct vm_area_struct *vma)
1043{
1044 return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) &&
1045 (iter->iomap.flags & IOMAP_F_DIRTY);
1046}
1047
642261ac
RZ
1048/*
1049 * By this point grab_mapping_entry() has ensured that we have a locked entry
1050 * of the appropriate size so we don't have to worry about downgrading PMDs to
1051 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
1052 * already in the tree, we will skip the insertion and just dirty the PMD as
1053 * appropriate.
1054 */
e5d6df73
SR
1055static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
1056 const struct iomap_iter *iter, void *entry, pfn_t pfn,
1057 unsigned long flags)
9973c98e 1058{
e5d6df73 1059 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
b15cd800 1060 void *new_entry = dax_make_entry(pfn, flags);
c6f0b395
SR
1061 bool write = iter->flags & IOMAP_WRITE;
1062 bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma);
1063 bool shared = iter->iomap.flags & IOMAP_F_SHARED;
9973c98e 1064
f5b7b748 1065 if (dirty)
d2b2a28e 1066 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
9973c98e 1067
c6f0b395 1068 if (shared || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) {
b15cd800 1069 unsigned long index = xas->xa_index;
91d25ba8
RZ
1070 /* we are replacing a zero page with block mapping */
1071 if (dax_is_pmd_entry(entry))
977fbdcd 1072 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
b15cd800 1073 PG_PMD_NR, false);
91d25ba8 1074 else /* pte entry */
b15cd800 1075 unmap_mapping_pages(mapping, index, 1, false);
9973c98e
RZ
1076 }
1077
b15cd800
MW
1078 xas_reset(xas);
1079 xas_lock_irq(xas);
c6f0b395 1080 if (shared || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
1571c029
JK
1081 void *old;
1082
d2c997c0 1083 dax_disassociate_entry(entry, mapping, false);
98c183a4 1084 dax_associate_entry(new_entry, mapping, vmf->vma,
38607c62
AP
1085 vmf->address, shared);
1086
642261ac 1087 /*
a77d19f4 1088 * Only swap our new entry into the page cache if the current
642261ac 1089 * entry is a zero page or an empty entry. If a normal PTE or
a77d19f4 1090 * PMD entry is already in the cache, we leave it alone. This
642261ac
RZ
1091 * means that if we are trying to insert a PTE and the
1092 * existing entry is a PMD, we will just leave the PMD in the
1093 * tree and dirty it if necessary.
1094 */
1571c029 1095 old = dax_lock_entry(xas, new_entry);
b15cd800
MW
1096 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
1097 DAX_LOCKED));
91d25ba8 1098 entry = new_entry;
b15cd800
MW
1099 } else {
1100 xas_load(xas); /* Walk the xa_state */
9973c98e 1101 }
91d25ba8 1102
f5b7b748 1103 if (dirty)
b15cd800 1104 xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
91d25ba8 1105
c6f0b395 1106 if (write && shared)
e5d6df73
SR
1107 xas_set_mark(xas, PAGECACHE_TAG_TOWRITE);
1108
b15cd800 1109 xas_unlock_irq(xas);
91d25ba8 1110 return entry;
9973c98e
RZ
1111}
1112
9fc747f6
MW
1113static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
1114 struct address_space *mapping, void *entry)
9973c98e 1115{
06083a09 1116 unsigned long pfn, index, count, end;
3fe0791c 1117 long ret = 0;
06083a09 1118 struct vm_area_struct *vma;
9973c98e 1119
9973c98e 1120 /*
a6abc2c0
JK
1121 * A page got tagged dirty in DAX mapping? Something is seriously
1122 * wrong.
9973c98e 1123 */
3159f943 1124 if (WARN_ON(!xa_is_value(entry)))
a6abc2c0 1125 return -EIO;
9973c98e 1126
9fc747f6
MW
1127 if (unlikely(dax_is_locked(entry))) {
1128 void *old_entry = entry;
1129
6be3e21d 1130 entry = get_next_unlocked_entry(xas, 0);
9fc747f6
MW
1131
1132 /* Entry got punched out / reallocated? */
1133 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
1134 goto put_unlocked;
1135 /*
1136 * Entry got reallocated elsewhere? No need to writeback.
1137 * We have to compare pfns as we must not bail out due to
1138 * difference in lockbit or entry type.
1139 */
1140 if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
1141 goto put_unlocked;
1142 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
1143 dax_is_zero_entry(entry))) {
1144 ret = -EIO;
1145 goto put_unlocked;
1146 }
1147
1148 /* Another fsync thread may have already done this entry */
1149 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
1150 goto put_unlocked;
9973c98e
RZ
1151 }
1152
a6abc2c0 1153 /* Lock the entry to serialize with page faults */
9fc747f6
MW
1154 dax_lock_entry(xas, entry);
1155
a6abc2c0
JK
1156 /*
1157 * We can clear the tag now but we have to be careful so that concurrent
1158 * dax_writeback_one() calls for the same index cannot finish before we
1159 * actually flush the caches. This is achieved as the calls will look
b93b0163
MW
1160 * at the entry only under the i_pages lock and once they do that
1161 * they will see the entry locked and wait for it to unlock.
a6abc2c0 1162 */
9fc747f6
MW
1163 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
1164 xas_unlock_irq(xas);
a6abc2c0 1165
642261ac 1166 /*
e4b3448b
MW
1167 * If dax_writeback_mapping_range() was given a wbc->range_start
1168 * in the middle of a PMD, the 'index' we use needs to be
1169 * aligned to the start of the PMD.
3fe0791c
DW
1170 * This allows us to flush for PMD_SIZE and not have to worry about
1171 * partial PMD writebacks.
642261ac 1172 */
a77d19f4 1173 pfn = dax_to_pfn(entry);
e4b3448b
MW
1174 count = 1UL << dax_entry_order(entry);
1175 index = xas->xa_index & ~(count - 1);
06083a09
MS
1176 end = index + count - 1;
1177
1178 /* Walk all mappings of a given index of a file and writeprotect them */
1179 i_mmap_lock_read(mapping);
1180 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) {
1181 pfn_mkclean_range(pfn, count, index, vma);
1182 cond_resched();
1183 }
1184 i_mmap_unlock_read(mapping);
cccbce67 1185
e4b3448b 1186 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
4b4bb46d
JK
1187 /*
1188 * After we have flushed the cache, we can clear the dirty tag. There
1189 * cannot be new dirty data in the pfn after the flush has completed as
1190 * the pfn mappings are writeprotected and fault waits for mapping
1191 * entry lock.
1192 */
9fc747f6
MW
1193 xas_reset(xas);
1194 xas_lock_irq(xas);
1195 xas_store(xas, entry);
1196 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
698ab77a 1197 dax_wake_entry(xas, entry, WAKE_NEXT);
9fc747f6 1198
e4b3448b 1199 trace_dax_writeback_one(mapping->host, index, count);
9973c98e
RZ
1200 return ret;
1201
a6abc2c0 1202 put_unlocked:
4c3d043d 1203 put_unlocked_entry(xas, entry, WAKE_NEXT);
9973c98e
RZ
1204 return ret;
1205}
1206
1207/*
1208 * Flush the mapping to the persistent domain within the byte range of [start,
1209 * end]. This is required by data integrity operations to ensure file data is
1210 * on persistent storage prior to completion of the operation.
1211 */
7f6d5b52 1212int dax_writeback_mapping_range(struct address_space *mapping,
3f666c56 1213 struct dax_device *dax_dev, struct writeback_control *wbc)
9973c98e 1214{
9fc747f6 1215 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
9973c98e 1216 struct inode *inode = mapping->host;
9fc747f6 1217 pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
9fc747f6
MW
1218 void *entry;
1219 int ret = 0;
1220 unsigned int scanned = 0;
9973c98e
RZ
1221
1222 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
1223 return -EIO;
1224
7716506a 1225 if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL)
7f6d5b52
RZ
1226 return 0;
1227
9fc747f6 1228 trace_dax_writeback_range(inode, xas.xa_index, end_index);
9973c98e 1229
9fc747f6 1230 tag_pages_for_writeback(mapping, xas.xa_index, end_index);
9973c98e 1231
9fc747f6
MW
1232 xas_lock_irq(&xas);
1233 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
1234 ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
1235 if (ret < 0) {
1236 mapping_set_error(mapping, ret);
9973c98e 1237 break;
9973c98e 1238 }
9fc747f6
MW
1239 if (++scanned % XA_CHECK_SCHED)
1240 continue;
1241
1242 xas_pause(&xas);
1243 xas_unlock_irq(&xas);
1244 cond_resched();
1245 xas_lock_irq(&xas);
9973c98e 1246 }
9fc747f6 1247 xas_unlock_irq(&xas);
9fc747f6
MW
1248 trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
1249 return ret;
9973c98e
RZ
1250}
1251EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
1252
e28cd3e5
SR
1253static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
1254 size_t size, void **kaddr, pfn_t *pfnp)
f7ca90b1 1255{
60696eb2 1256 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
e28cd3e5 1257 int id, rc = 0;
5e161e40 1258 long length;
f7ca90b1 1259
cccbce67 1260 id = dax_read_lock();
5e161e40 1261 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
e28cd3e5 1262 DAX_ACCESS, kaddr, pfnp);
5e161e40
JK
1263 if (length < 0) {
1264 rc = length;
1265 goto out;
cccbce67 1266 }
e28cd3e5
SR
1267 if (!pfnp)
1268 goto out_check_addr;
5e161e40
JK
1269 rc = -EINVAL;
1270 if (PFN_PHYS(length) < size)
1271 goto out;
1272 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1273 goto out;
38607c62 1274
5e161e40 1275 rc = 0;
e28cd3e5
SR
1276
1277out_check_addr:
1278 if (!kaddr)
1279 goto out;
1280 if (!*kaddr)
1281 rc = -EFAULT;
5e161e40 1282out:
cccbce67 1283 dax_read_unlock(id);
5e161e40 1284 return rc;
0e3b210c 1285}
0e3b210c 1286
ff17b8df 1287/**
708dfad2
SR
1288 * dax_iomap_copy_around - Prepare for an unaligned write to a shared/cow page
1289 * by copying the data before and after the range to be written.
ff17b8df
SR
1290 * @pos: address to do copy from.
1291 * @length: size of copy operation.
1292 * @align_size: aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE)
1293 * @srcmap: iomap srcmap
1294 * @daddr: destination address to copy to.
1295 *
1296 * This can be called from two places. Either during DAX write fault (page
1297 * aligned), to copy the length size data to daddr. Or, while doing normal DAX
708dfad2 1298 * write operation, dax_iomap_iter() might call this to do the copy of either
ff17b8df 1299 * start or end unaligned address. In the latter case the rest of the copy of
708dfad2
SR
1300 * aligned ranges is taken care by dax_iomap_iter() itself.
1301 * If the srcmap contains invalid data, such as HOLE and UNWRITTEN, zero the
1302 * area to make sure no old data remains.
ff17b8df 1303 */
708dfad2 1304static int dax_iomap_copy_around(loff_t pos, uint64_t length, size_t align_size,
ff17b8df
SR
1305 const struct iomap *srcmap, void *daddr)
1306{
1307 loff_t head_off = pos & (align_size - 1);
1308 size_t size = ALIGN(head_off + length, align_size);
1309 loff_t end = pos + length;
1310 loff_t pg_end = round_up(end, align_size);
708dfad2 1311 /* copy_all is usually in page fault case */
ff17b8df 1312 bool copy_all = head_off == 0 && end == pg_end;
708dfad2
SR
1313 /* zero the edges if srcmap is a HOLE or IOMAP_UNWRITTEN */
1314 bool zero_edge = srcmap->flags & IOMAP_F_SHARED ||
1315 srcmap->type == IOMAP_UNWRITTEN;
297945d9 1316 void *saddr = NULL;
ff17b8df
SR
1317 int ret = 0;
1318
708dfad2
SR
1319 if (!zero_edge) {
1320 ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL);
1321 if (ret)
1ea7ca1b 1322 return dax_mem2blk_err(ret);
708dfad2 1323 }
ff17b8df
SR
1324
1325 if (copy_all) {
708dfad2
SR
1326 if (zero_edge)
1327 memset(daddr, 0, size);
1328 else
1329 ret = copy_mc_to_kernel(daddr, saddr, length);
1330 goto out;
ff17b8df
SR
1331 }
1332
1333 /* Copy the head part of the range */
1334 if (head_off) {
708dfad2
SR
1335 if (zero_edge)
1336 memset(daddr, 0, head_off);
1337 else {
1338 ret = copy_mc_to_kernel(daddr, saddr, head_off);
1339 if (ret)
1340 return -EIO;
1341 }
ff17b8df
SR
1342 }
1343
1344 /* Copy the tail part of the range */
1345 if (end < pg_end) {
1346 loff_t tail_off = head_off + length;
1347 loff_t tail_len = pg_end - end;
1348
708dfad2
SR
1349 if (zero_edge)
1350 memset(daddr + tail_off, 0, tail_len);
1351 else {
1352 ret = copy_mc_to_kernel(daddr + tail_off,
1353 saddr + tail_off, tail_len);
1354 if (ret)
1355 return -EIO;
1356 }
ff17b8df 1357 }
708dfad2
SR
1358out:
1359 if (zero_edge)
1360 dax_flush(srcmap->dax_dev, daddr, size);
1361 return ret ? -EIO : 0;
ff17b8df
SR
1362}
1363
e30331ff 1364/*
91d25ba8
RZ
1365 * The user has performed a load from a hole in the file. Allocating a new
1366 * page in the file would cause excessive storage usage for workloads with
1367 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1368 * If this page is ever written to we will re-fault and change the mapping to
1369 * point to real DAX storage instead.
e30331ff 1370 */
e5d6df73
SR
1371static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1372 const struct iomap_iter *iter, void **entry)
e30331ff 1373{
e5d6df73 1374 struct inode *inode = iter->inode;
91d25ba8 1375 unsigned long vaddr = vmf->address;
b90ca5cc
MW
1376 pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1377 vm_fault_t ret;
e30331ff 1378
e5d6df73 1379 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
3159f943 1380
38607c62 1381 ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), false);
e30331ff
RZ
1382 trace_dax_load_hole(inode, vmf, ret);
1383 return ret;
1384}
1385
c2436190
SR
1386#ifdef CONFIG_FS_DAX_PMD
1387static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
e5d6df73 1388 const struct iomap_iter *iter, void **entry)
c2436190
SR
1389{
1390 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1391 unsigned long pmd_addr = vmf->address & PMD_MASK;
1392 struct vm_area_struct *vma = vmf->vma;
1393 struct inode *inode = mapping->host;
1394 pgtable_t pgtable = NULL;
c93012d8 1395 struct folio *zero_folio;
c2436190
SR
1396 spinlock_t *ptl;
1397 pmd_t pmd_entry;
1398 pfn_t pfn;
1399
c93012d8 1400 zero_folio = mm_get_huge_zero_folio(vmf->vma->vm_mm);
c2436190 1401
c93012d8 1402 if (unlikely(!zero_folio))
c2436190
SR
1403 goto fallback;
1404
c93012d8 1405 pfn = page_to_pfn_t(&zero_folio->page);
e5d6df73
SR
1406 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn,
1407 DAX_PMD | DAX_ZERO_PAGE);
c2436190
SR
1408
1409 if (arch_needs_pgtable_deposit()) {
1410 pgtable = pte_alloc_one(vma->vm_mm);
1411 if (!pgtable)
1412 return VM_FAULT_OOM;
1413 }
1414
1415 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1416 if (!pmd_none(*(vmf->pmd))) {
1417 spin_unlock(ptl);
1418 goto fallback;
1419 }
1420
1421 if (pgtable) {
1422 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1423 mm_inc_nr_ptes(vma->vm_mm);
1424 }
e3981db4 1425 pmd_entry = folio_mk_pmd(zero_folio, vmf->vma->vm_page_prot);
c2436190
SR
1426 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1427 spin_unlock(ptl);
c93012d8 1428 trace_dax_pmd_load_hole(inode, vmf, zero_folio, *entry);
c2436190
SR
1429 return VM_FAULT_NOPAGE;
1430
1431fallback:
1432 if (pgtable)
1433 pte_free(vma->vm_mm, pgtable);
c93012d8 1434 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_folio, *entry);
c2436190
SR
1435 return VM_FAULT_FALLBACK;
1436}
1437#else
1438static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
e5d6df73 1439 const struct iomap_iter *iter, void **entry)
c2436190
SR
1440{
1441 return VM_FAULT_FALLBACK;
1442}
1443#endif /* CONFIG_FS_DAX_PMD */
1444
edd3e3b7 1445static int dax_unshare_iter(struct iomap_iter *iter)
d984648e
SR
1446{
1447 struct iomap *iomap = &iter->iomap;
1448 const struct iomap *srcmap = iomap_iter_srcmap(iter);
50793801
DW
1449 loff_t copy_pos = iter->pos;
1450 u64 copy_len = iomap_length(iter);
1451 u32 mod;
d984648e 1452 int id = 0;
d79c9cc5 1453 s64 ret;
d984648e
SR
1454 void *daddr = NULL, *saddr = NULL;
1455
6ef6a0e8 1456 if (!iomap_want_unshare_iter(iter))
d79c9cc5 1457 return iomap_iter_advance_full(iter);
50793801
DW
1458
1459 /*
1460 * Extend the file range to be aligned to fsblock/pagesize, because
1461 * we need to copy entire blocks, not just the byte range specified.
1462 * Invalidate the mapping because we're about to CoW.
1463 */
1464 mod = offset_in_page(copy_pos);
1465 if (mod) {
1466 copy_len += mod;
1467 copy_pos -= mod;
1468 }
1469
1470 mod = offset_in_page(copy_pos + copy_len);
1471 if (mod)
1472 copy_len += PAGE_SIZE - mod;
1473
1474 invalidate_inode_pages2_range(iter->inode->i_mapping,
1475 copy_pos >> PAGE_SHIFT,
1476 (copy_pos + copy_len - 1) >> PAGE_SHIFT);
d984648e
SR
1477
1478 id = dax_read_lock();
50793801 1479 ret = dax_iomap_direct_access(iomap, copy_pos, copy_len, &daddr, NULL);
d984648e
SR
1480 if (ret < 0)
1481 goto out_unlock;
1482
50793801 1483 ret = dax_iomap_direct_access(srcmap, copy_pos, copy_len, &saddr, NULL);
d984648e
SR
1484 if (ret < 0)
1485 goto out_unlock;
1486
d79c9cc5 1487 if (copy_mc_to_kernel(daddr, saddr, copy_len) != 0)
d984648e
SR
1488 ret = -EIO;
1489
1490out_unlock:
1491 dax_read_unlock(id);
9ba439cb
BF
1492 if (ret < 0)
1493 return dax_mem2blk_err(ret);
d79c9cc5 1494 return iomap_iter_advance_full(iter);
d984648e
SR
1495}
1496
1497int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
1498 const struct iomap_ops *ops)
1499{
1500 struct iomap_iter iter = {
1501 .inode = inode,
1502 .pos = pos,
d984648e
SR
1503 .flags = IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX,
1504 };
a311a08a 1505 loff_t size = i_size_read(inode);
d984648e
SR
1506 int ret;
1507
a311a08a
DW
1508 if (pos < 0 || pos >= size)
1509 return 0;
1510
1511 iter.len = min(len, size - pos);
d984648e 1512 while ((ret = iomap_iter(&iter, ops)) > 0)
edd3e3b7 1513 iter.status = dax_unshare_iter(&iter);
d984648e
SR
1514 return ret;
1515}
1516EXPORT_SYMBOL_GPL(dax_file_unshare);
1517
8dbfc76d 1518static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size)
e5c71954 1519{
8dbfc76d
SR
1520 const struct iomap *iomap = &iter->iomap;
1521 const struct iomap *srcmap = iomap_iter_srcmap(iter);
1522 unsigned offset = offset_in_page(pos);
1523 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
e5c71954
CH
1524 void *kaddr;
1525 long ret;
1526
8dbfc76d
SR
1527 ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr,
1528 NULL);
1529 if (ret < 0)
1ea7ca1b
JC
1530 return dax_mem2blk_err(ret);
1531
8dbfc76d 1532 memset(kaddr + offset, 0, size);
708dfad2
SR
1533 if (iomap->flags & IOMAP_F_SHARED)
1534 ret = dax_iomap_copy_around(pos, size, PAGE_SIZE, srcmap,
1535 kaddr);
1536 else
8dbfc76d 1537 dax_flush(iomap->dax_dev, kaddr + offset, size);
e5c71954
CH
1538 return ret;
1539}
1540
edd3e3b7 1541static int dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
679c8bd3 1542{
c6f40468
CH
1543 const struct iomap *iomap = &iter->iomap;
1544 const struct iomap *srcmap = iomap_iter_srcmap(iter);
c6f40468 1545 u64 length = iomap_length(iter);
edd3e3b7 1546 int ret;
c6f40468
CH
1547
1548 /* already zeroed? we're done. */
1549 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
80fce305 1550 return iomap_iter_advance(iter, &length);
c6f40468 1551
f80e1668
SR
1552 /*
1553 * invalidate the pages whose sharing state is to be changed
1554 * because of CoW.
1555 */
1556 if (iomap->flags & IOMAP_F_SHARED)
1557 invalidate_inode_pages2_range(iter->inode->i_mapping,
80fce305
BF
1558 iter->pos >> PAGE_SHIFT,
1559 (iter->pos + length - 1) >> PAGE_SHIFT);
f80e1668 1560
c6f40468 1561 do {
80fce305 1562 loff_t pos = iter->pos;
c6f40468 1563 unsigned offset = offset_in_page(pos);
c6f40468 1564 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
c6f40468
CH
1565 int id;
1566
80fce305
BF
1567 length = min_t(u64, PAGE_SIZE - offset, length);
1568
c6f40468 1569 id = dax_read_lock();
80fce305
BF
1570 if (IS_ALIGNED(pos, PAGE_SIZE) && length == PAGE_SIZE)
1571 ret = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
c6f40468 1572 else
80fce305 1573 ret = dax_memzero(iter, pos, length);
c6f40468 1574 dax_read_unlock(id);
cccbce67 1575
80fce305
BF
1576 if (ret < 0)
1577 return ret;
1578
1579 ret = iomap_iter_advance(iter, &length);
1580 if (ret)
1581 return ret;
c6f40468 1582 } while (length > 0);
e5c71954 1583
f8189d5d
KX
1584 if (did_zero)
1585 *did_zero = true;
80fce305 1586 return ret;
c6f40468
CH
1587}
1588
1589int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1590 const struct iomap_ops *ops)
1591{
1592 struct iomap_iter iter = {
1593 .inode = inode,
1594 .pos = pos,
1595 .len = len,
952da063 1596 .flags = IOMAP_DAX | IOMAP_ZERO,
c6f40468
CH
1597 };
1598 int ret;
1599
1600 while ((ret = iomap_iter(&iter, ops)) > 0)
edd3e3b7 1601 iter.status = dax_zero_iter(&iter, did_zero);
c6f40468
CH
1602 return ret;
1603}
1604EXPORT_SYMBOL_GPL(dax_zero_range);
1605
1606int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1607 const struct iomap_ops *ops)
1608{
1609 unsigned int blocksize = i_blocksize(inode);
1610 unsigned int off = pos & (blocksize - 1);
1611
1612 /* Block boundary? Nothing to do */
1613 if (!off)
1614 return 0;
1615 return dax_zero_range(inode, pos, blocksize - off, did_zero, ops);
679c8bd3 1616}
c6f40468 1617EXPORT_SYMBOL_GPL(dax_truncate_page);
679c8bd3 1618
e1dae77b 1619static int dax_iomap_iter(struct iomap_iter *iomi, struct iov_iter *iter)
a254e568 1620{
ca289e0b 1621 const struct iomap *iomap = &iomi->iomap;
f80e1668 1622 const struct iomap *srcmap = iomap_iter_srcmap(iomi);
ca289e0b
CH
1623 loff_t length = iomap_length(iomi);
1624 loff_t pos = iomi->pos;
cccbce67 1625 struct dax_device *dax_dev = iomap->dax_dev;
a254e568 1626 loff_t end = pos + length, done = 0;
ff17b8df 1627 bool write = iov_iter_rw(iter) == WRITE;
f80e1668 1628 bool cow = write && iomap->flags & IOMAP_F_SHARED;
a254e568 1629 ssize_t ret = 0;
a77d4786 1630 size_t xfer;
cccbce67 1631 int id;
a254e568 1632
ff17b8df 1633 if (!write) {
ca289e0b 1634 end = min(end, i_size_read(iomi->inode));
a254e568
CH
1635 if (pos >= end)
1636 return 0;
1637
e1dae77b
BF
1638 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) {
1639 done = iov_iter_zero(min(length, end - pos), iter);
1640 return iomap_iter_advance(iomi, &done);
1641 }
a254e568
CH
1642 }
1643
ff17b8df
SR
1644 /*
1645 * In DAX mode, enforce either pure overwrites of written extents, or
1646 * writes to unwritten extents as part of a copy-on-write operation.
1647 */
1648 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED &&
1649 !(iomap->flags & IOMAP_F_SHARED)))
a254e568
CH
1650 return -EIO;
1651
e3fce68c
JK
1652 /*
1653 * Write can allocate block for an area which has a hole page mapped
1654 * into page tables. We have to tear down these mappings so that data
1655 * written by write(2) is visible in mmap.
1656 */
f80e1668 1657 if (iomap->flags & IOMAP_F_NEW || cow) {
f76b3a32
SR
1658 /*
1659 * Filesystem allows CoW on non-shared extents. The src extents
1660 * may have been mmapped with dirty mark before. To be able to
1661 * invalidate its dax entries, we need to clear the dirty mark
1662 * in advance.
1663 */
1664 if (cow)
1665 __dax_clear_dirty_range(iomi->inode->i_mapping,
1666 pos >> PAGE_SHIFT,
1667 (end - 1) >> PAGE_SHIFT);
ca289e0b 1668 invalidate_inode_pages2_range(iomi->inode->i_mapping,
e3fce68c
JK
1669 pos >> PAGE_SHIFT,
1670 (end - 1) >> PAGE_SHIFT);
1671 }
1672
cccbce67 1673 id = dax_read_lock();
e1dae77b 1674 while ((pos = iomi->pos) < end) {
a254e568 1675 unsigned offset = pos & (PAGE_SIZE - 1);
cccbce67 1676 const size_t size = ALIGN(length + offset, PAGE_SIZE);
60696eb2 1677 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
a254e568 1678 ssize_t map_len;
047218ec 1679 bool recovery = false;
cccbce67 1680 void *kaddr;
a254e568 1681
d1908f52
MH
1682 if (fatal_signal_pending(current)) {
1683 ret = -EINTR;
1684 break;
1685 }
1686
cccbce67 1687 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
e511c4a3 1688 DAX_ACCESS, &kaddr, NULL);
1ea7ca1b 1689 if (map_len == -EHWPOISON && iov_iter_rw(iter) == WRITE) {
047218ec
JC
1690 map_len = dax_direct_access(dax_dev, pgoff,
1691 PHYS_PFN(size), DAX_RECOVERY_WRITE,
1692 &kaddr, NULL);
1693 if (map_len > 0)
1694 recovery = true;
1695 }
a254e568 1696 if (map_len < 0) {
1ea7ca1b 1697 ret = dax_mem2blk_err(map_len);
a254e568
CH
1698 break;
1699 }
1700
f80e1668 1701 if (cow) {
708dfad2
SR
1702 ret = dax_iomap_copy_around(pos, length, PAGE_SIZE,
1703 srcmap, kaddr);
ff17b8df
SR
1704 if (ret)
1705 break;
1706 }
1707
cccbce67
DW
1708 map_len = PFN_PHYS(map_len);
1709 kaddr += offset;
a254e568
CH
1710 map_len -= offset;
1711 if (map_len > end - pos)
1712 map_len = end - pos;
1713
047218ec
JC
1714 if (recovery)
1715 xfer = dax_recovery_write(dax_dev, pgoff, kaddr,
1716 map_len, iter);
ff17b8df 1717 else if (write)
a77d4786 1718 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
fec53774 1719 map_len, iter);
a254e568 1720 else
a77d4786 1721 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
b3a9a0c3 1722 map_len, iter);
a254e568 1723
e1dae77b
BF
1724 length = xfer;
1725 ret = iomap_iter_advance(iomi, &length);
1726 if (!ret && xfer == 0)
a77d4786
DW
1727 ret = -EFAULT;
1728 if (xfer < map_len)
1729 break;
a254e568 1730 }
cccbce67 1731 dax_read_unlock(id);
a254e568 1732
e1dae77b 1733 return ret;
a254e568
CH
1734}
1735
1736/**
11c59c92 1737 * dax_iomap_rw - Perform I/O to a DAX file
a254e568
CH
1738 * @iocb: The control block for this I/O
1739 * @iter: The addresses to do I/O from or to
1740 * @ops: iomap ops passed from the file system
1741 *
1742 * This function performs read and write operations to directly mapped
1743 * persistent memory. The callers needs to take care of read/write exclusion
1744 * and evicting any page cache pages in the region under I/O.
1745 */
1746ssize_t
11c59c92 1747dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
8ff6daa1 1748 const struct iomap_ops *ops)
a254e568 1749{
ca289e0b
CH
1750 struct iomap_iter iomi = {
1751 .inode = iocb->ki_filp->f_mapping->host,
1752 .pos = iocb->ki_pos,
1753 .len = iov_iter_count(iter),
952da063 1754 .flags = IOMAP_DAX,
ca289e0b
CH
1755 };
1756 loff_t done = 0;
1757 int ret;
a254e568 1758
17d9c15c
LJ
1759 if (!iomi.len)
1760 return 0;
1761
168316db 1762 if (iov_iter_rw(iter) == WRITE) {
ca289e0b
CH
1763 lockdep_assert_held_write(&iomi.inode->i_rwsem);
1764 iomi.flags |= IOMAP_WRITE;
168316db 1765 } else {
ca289e0b 1766 lockdep_assert_held(&iomi.inode->i_rwsem);
168316db 1767 }
a254e568 1768
96222d53 1769 if (iocb->ki_flags & IOCB_NOWAIT)
ca289e0b 1770 iomi.flags |= IOMAP_NOWAIT;
96222d53 1771
e1dae77b 1772 while ((ret = iomap_iter(&iomi, ops)) > 0)
edd3e3b7 1773 iomi.status = dax_iomap_iter(&iomi, iter);
a254e568 1774
ca289e0b
CH
1775 done = iomi.pos - iocb->ki_pos;
1776 iocb->ki_pos = iomi.pos;
a254e568
CH
1777 return done ? done : ret;
1778}
11c59c92 1779EXPORT_SYMBOL_GPL(dax_iomap_rw);
a7d73fe6 1780
ab77dab4 1781static vm_fault_t dax_fault_return(int error)
9f141d6e
JK
1782{
1783 if (error == 0)
1784 return VM_FAULT_NOPAGE;
c9aed74e 1785 return vmf_error(error);
9f141d6e
JK
1786}
1787
55f81639
SR
1788/*
1789 * When handling a synchronous page fault and the inode need a fsync, we can
1790 * insert the PTE/PMD into page tables only after that fsync happened. Skip
1791 * insertion for now and return the pfn so that caller can insert it after the
1792 * fsync is done.
1793 */
1794static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
1795{
1796 if (WARN_ON_ONCE(!pfnp))
1797 return VM_FAULT_SIGBUS;
1798 *pfnp = pfn;
1799 return VM_FAULT_NEEDDSYNC;
1800}
1801
65dd814a
CH
1802static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
1803 const struct iomap_iter *iter)
55f81639 1804{
55f81639
SR
1805 vm_fault_t ret;
1806 int error = 0;
1807
65dd814a 1808 switch (iter->iomap.type) {
55f81639
SR
1809 case IOMAP_HOLE:
1810 case IOMAP_UNWRITTEN:
429f8de7 1811 clear_user_highpage(vmf->cow_page, vmf->address);
55f81639
SR
1812 break;
1813 case IOMAP_MAPPED:
429f8de7 1814 error = copy_cow_page_dax(vmf, iter);
55f81639
SR
1815 break;
1816 default:
1817 WARN_ON_ONCE(1);
1818 error = -EIO;
1819 break;
1820 }
1821
1822 if (error)
1823 return dax_fault_return(error);
1824
1825 __SetPageUptodate(vmf->cow_page);
1826 ret = finish_fault(vmf);
1827 if (!ret)
1828 return VM_FAULT_DONE_COW;
1829 return ret;
1830}
1831
c2436190 1832/**
65dd814a 1833 * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault.
c2436190 1834 * @vmf: vm fault instance
65dd814a 1835 * @iter: iomap iter
c2436190
SR
1836 * @pfnp: pfn to be returned
1837 * @xas: the dax mapping tree of a file
1838 * @entry: an unlocked dax entry to be inserted
1839 * @pmd: distinguish whether it is a pmd fault
c2436190 1840 */
65dd814a
CH
1841static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
1842 const struct iomap_iter *iter, pfn_t *pfnp,
1843 struct xa_state *xas, void **entry, bool pmd)
c2436190 1844{
65dd814a 1845 const struct iomap *iomap = &iter->iomap;
708dfad2 1846 const struct iomap *srcmap = iomap_iter_srcmap(iter);
c2436190
SR
1847 size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
1848 loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
e5d6df73 1849 bool write = iter->flags & IOMAP_WRITE;
c2436190 1850 unsigned long entry_flags = pmd ? DAX_PMD : 0;
38607c62
AP
1851 struct folio *folio;
1852 int ret, err = 0;
c2436190 1853 pfn_t pfn;
ff17b8df 1854 void *kaddr;
c2436190 1855
65dd814a
CH
1856 if (!pmd && vmf->cow_page)
1857 return dax_fault_cow_page(vmf, iter);
1858
c2436190
SR
1859 /* if we are reading UNWRITTEN and HOLE, return a hole. */
1860 if (!write &&
1861 (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) {
1862 if (!pmd)
e5d6df73
SR
1863 return dax_load_hole(xas, vmf, iter, entry);
1864 return dax_pmd_load_hole(xas, vmf, iter, entry);
c2436190
SR
1865 }
1866
ff17b8df 1867 if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) {
c2436190
SR
1868 WARN_ON_ONCE(1);
1869 return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS;
1870 }
1871
ff17b8df 1872 err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn);
c2436190
SR
1873 if (err)
1874 return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err);
1875
e5d6df73 1876 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags);
c2436190 1877
708dfad2
SR
1878 if (write && iomap->flags & IOMAP_F_SHARED) {
1879 err = dax_iomap_copy_around(pos, size, size, srcmap, kaddr);
ff17b8df
SR
1880 if (err)
1881 return dax_fault_return(err);
1882 }
c2436190 1883
38607c62 1884 folio = dax_to_folio(*entry);
e5d6df73 1885 if (dax_fault_is_synchronous(iter, vmf->vma))
c2436190
SR
1886 return dax_fault_synchronous_pfnp(pfnp, pfn);
1887
38607c62 1888 folio_ref_inc(folio);
c2436190 1889 if (pmd)
38607c62
AP
1890 ret = vmf_insert_folio_pmd(vmf, pfn_folio(pfn_t_to_pfn(pfn)),
1891 write);
1892 else
1893 ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), write);
1894 folio_put(folio);
c2436190 1895
38607c62 1896 return ret;
c2436190
SR
1897}
1898
ab77dab4 1899static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
c0b24625 1900 int *iomap_errp, const struct iomap_ops *ops)
a7d73fe6 1901{
65dd814a 1902 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
b15cd800 1903 XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
65dd814a
CH
1904 struct iomap_iter iter = {
1905 .inode = mapping->host,
1906 .pos = (loff_t)vmf->pgoff << PAGE_SHIFT,
1907 .len = PAGE_SIZE,
952da063 1908 .flags = IOMAP_DAX | IOMAP_FAULT,
65dd814a 1909 };
ab77dab4 1910 vm_fault_t ret = 0;
a7d73fe6 1911 void *entry;
65dd814a 1912 int error;
a7d73fe6 1913
65dd814a 1914 trace_dax_pte_fault(iter.inode, vmf, ret);
a7d73fe6
CH
1915 /*
1916 * Check whether offset isn't beyond end of file now. Caller is supposed
1917 * to hold locks serializing us with truncate / punch hole so this is
1918 * a reliable test.
1919 */
65dd814a 1920 if (iter.pos >= i_size_read(iter.inode)) {
ab77dab4 1921 ret = VM_FAULT_SIGBUS;
a9c42b33
RZ
1922 goto out;
1923 }
a7d73fe6 1924
65dd814a
CH
1925 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1926 iter.flags |= IOMAP_WRITE;
a7d73fe6 1927
b15cd800
MW
1928 entry = grab_mapping_entry(&xas, mapping, 0);
1929 if (xa_is_internal(entry)) {
1930 ret = xa_to_internal(entry);
13e451fd
JK
1931 goto out;
1932 }
1933
e2093926
RZ
1934 /*
1935 * It is possible, particularly with mixed reads & writes to private
1936 * mappings, that we have raced with a PMD fault that overlaps with
1937 * the PTE we need to set up. If so just return and the fault will be
1938 * retried.
1939 */
1940 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
ab77dab4 1941 ret = VM_FAULT_NOPAGE;
e2093926
RZ
1942 goto unlock_entry;
1943 }
1944
65dd814a
CH
1945 while ((error = iomap_iter(&iter, ops)) > 0) {
1946 if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) {
edd3e3b7 1947 iter.status = -EIO; /* fs corruption? */
65dd814a 1948 continue;
a7d73fe6
CH
1949 }
1950
65dd814a
CH
1951 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false);
1952 if (ret != VM_FAULT_SIGBUS &&
1953 (iter.iomap.flags & IOMAP_F_NEW)) {
a7d73fe6 1954 count_vm_event(PGMAJFAULT);
65dd814a
CH
1955 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
1956 ret |= VM_FAULT_MAJOR;
a7d73fe6 1957 }
1b5a1cb2 1958
6fe32fe1
BF
1959 if (!(ret & VM_FAULT_ERROR)) {
1960 u64 length = PAGE_SIZE;
edd3e3b7 1961 iter.status = iomap_iter_advance(&iter, &length);
6fe32fe1 1962 }
a7d73fe6
CH
1963 }
1964
65dd814a
CH
1965 if (iomap_errp)
1966 *iomap_errp = error;
1967 if (!ret && error)
1968 ret = dax_fault_return(error);
9f141d6e 1969
c2436190 1970unlock_entry:
b15cd800 1971 dax_unlock_entry(&xas, entry);
c2436190 1972out:
65dd814a
CH
1973 trace_dax_pte_fault_done(iter.inode, vmf, ret);
1974 return ret;
a7d73fe6 1975}
642261ac
RZ
1976
1977#ifdef CONFIG_FS_DAX_PMD
55f81639
SR
1978static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
1979 pgoff_t max_pgoff)
642261ac 1980{
f4200391 1981 unsigned long pmd_addr = vmf->address & PMD_MASK;
55f81639 1982 bool write = vmf->flags & FAULT_FLAG_WRITE;
642261ac 1983
55f81639
SR
1984 /*
1985 * Make sure that the faulting address's PMD offset (color) matches
1986 * the PMD offset from the start of the file. This is necessary so
1987 * that a PMD range in the page table overlaps exactly with a PMD
1988 * range in the page cache.
1989 */
1990 if ((vmf->pgoff & PG_PMD_COLOUR) !=
1991 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1992 return true;
642261ac 1993
55f81639
SR
1994 /* Fall back to PTEs if we're going to COW */
1995 if (write && !(vmf->vma->vm_flags & VM_SHARED))
1996 return true;
11cf9d86 1997
55f81639
SR
1998 /* If the PMD would extend outside the VMA */
1999 if (pmd_addr < vmf->vma->vm_start)
2000 return true;
2001 if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
2002 return true;
642261ac 2003
55f81639
SR
2004 /* If the PMD would extend beyond the file size */
2005 if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff)
2006 return true;
653b2ea3 2007
55f81639 2008 return false;
642261ac
RZ
2009}
2010
ab77dab4 2011static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
a2d58167 2012 const struct iomap_ops *ops)
642261ac 2013{
65dd814a 2014 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
b15cd800 2015 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
65dd814a
CH
2016 struct iomap_iter iter = {
2017 .inode = mapping->host,
2018 .len = PMD_SIZE,
952da063 2019 .flags = IOMAP_DAX | IOMAP_FAULT,
65dd814a 2020 };
c2436190 2021 vm_fault_t ret = VM_FAULT_FALLBACK;
b15cd800 2022 pgoff_t max_pgoff;
642261ac 2023 void *entry;
642261ac 2024
65dd814a
CH
2025 if (vmf->flags & FAULT_FLAG_WRITE)
2026 iter.flags |= IOMAP_WRITE;
642261ac 2027
282a8e03
RZ
2028 /*
2029 * Check whether offset isn't beyond end of file now. Caller is
2030 * supposed to hold locks serializing us with truncate / punch hole so
2031 * this is a reliable test.
2032 */
65dd814a 2033 max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE);
fffa281b 2034
65dd814a 2035 trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0);
642261ac 2036
b15cd800 2037 if (xas.xa_index >= max_pgoff) {
c2436190 2038 ret = VM_FAULT_SIGBUS;
282a8e03
RZ
2039 goto out;
2040 }
642261ac 2041
55f81639 2042 if (dax_fault_check_fallback(vmf, &xas, max_pgoff))
642261ac
RZ
2043 goto fallback;
2044
876f2946 2045 /*
b15cd800
MW
2046 * grab_mapping_entry() will make sure we get an empty PMD entry,
2047 * a zero PMD entry or a DAX PMD. If it can't (because a PTE
2048 * entry is already in the array, for instance), it will return
2049 * VM_FAULT_FALLBACK.
876f2946 2050 */
23c84eb7 2051 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
b15cd800 2052 if (xa_is_internal(entry)) {
c2436190 2053 ret = xa_to_internal(entry);
876f2946 2054 goto fallback;
b15cd800 2055 }
876f2946 2056
e2093926
RZ
2057 /*
2058 * It is possible, particularly with mixed reads & writes to private
2059 * mappings, that we have raced with a PTE fault that overlaps with
2060 * the PMD we need to set up. If so just return and the fault will be
2061 * retried.
2062 */
2063 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
2064 !pmd_devmap(*vmf->pmd)) {
c2436190 2065 ret = 0;
e2093926
RZ
2066 goto unlock_entry;
2067 }
2068
65dd814a 2069 iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT;
dd0c6425 2070 while (iomap_iter(&iter, ops) > 0) {
65dd814a
CH
2071 if (iomap_length(&iter) < PMD_SIZE)
2072 continue; /* actually breaks out of the loop */
caa51d26 2073
65dd814a 2074 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true);
6fe32fe1
BF
2075 if (ret != VM_FAULT_FALLBACK) {
2076 u64 length = PMD_SIZE;
edd3e3b7 2077 iter.status = iomap_iter_advance(&iter, &length);
6fe32fe1 2078 }
642261ac
RZ
2079 }
2080
c2436190 2081unlock_entry:
b15cd800 2082 dax_unlock_entry(&xas, entry);
c2436190
SR
2083fallback:
2084 if (ret == VM_FAULT_FALLBACK) {
65dd814a 2085 split_huge_pmd(vmf->vma, vmf->pmd, vmf->address);
642261ac
RZ
2086 count_vm_event(THP_FAULT_FALLBACK);
2087 }
282a8e03 2088out:
65dd814a 2089 trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret);
c2436190 2090 return ret;
642261ac 2091}
a2d58167 2092#else
ab77dab4 2093static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
01cddfe9 2094 const struct iomap_ops *ops)
a2d58167
DJ
2095{
2096 return VM_FAULT_FALLBACK;
2097}
642261ac 2098#endif /* CONFIG_FS_DAX_PMD */
a2d58167
DJ
2099
2100/**
2101 * dax_iomap_fault - handle a page fault on a DAX file
2102 * @vmf: The description of the fault
1d024e7a 2103 * @order: Order of the page to fault in
9a0dd422 2104 * @pfnp: PFN to insert for synchronous faults if fsync is required
c0b24625 2105 * @iomap_errp: Storage for detailed error code in case of error
cec04e8c 2106 * @ops: Iomap ops passed from the file system
a2d58167
DJ
2107 *
2108 * When a page fault occurs, filesystems may call this helper in
2109 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
2110 * has done all the necessary locking for page fault to proceed
2111 * successfully.
2112 */
1d024e7a 2113vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
c0b24625 2114 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
a2d58167 2115{
1d024e7a 2116 if (order == 0)
c0b24625 2117 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1d024e7a 2118 else if (order == PMD_ORDER)
9a0dd422 2119 return dax_iomap_pmd_fault(vmf, pfnp, ops);
1d024e7a 2120 else
a2d58167 2121 return VM_FAULT_FALLBACK;
a2d58167
DJ
2122}
2123EXPORT_SYMBOL_GPL(dax_iomap_fault);
71eab6df 2124
a77d19f4 2125/*
71eab6df
JK
2126 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
2127 * @vmf: The description of the fault
71eab6df 2128 * @pfn: PFN to insert
cfc93c6c 2129 * @order: Order of entry to insert.
71eab6df 2130 *
a77d19f4
MW
2131 * This function inserts a writeable PTE or PMD entry into the page tables
2132 * for an mmaped DAX file. It also marks the page cache entry as dirty.
71eab6df 2133 */
cfc93c6c
MW
2134static vm_fault_t
2135dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
71eab6df
JK
2136{
2137 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
cfc93c6c 2138 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
38607c62 2139 struct folio *folio;
cfc93c6c 2140 void *entry;
ab77dab4 2141 vm_fault_t ret;
71eab6df 2142
cfc93c6c 2143 xas_lock_irq(&xas);
6be3e21d 2144 entry = get_next_unlocked_entry(&xas, order);
71eab6df 2145 /* Did we race with someone splitting entry or so? */
23c84eb7
MWO
2146 if (!entry || dax_is_conflict(entry) ||
2147 (order == 0 && !dax_is_pte_entry(entry))) {
4c3d043d 2148 put_unlocked_entry(&xas, entry, WAKE_NEXT);
cfc93c6c 2149 xas_unlock_irq(&xas);
71eab6df
JK
2150 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
2151 VM_FAULT_NOPAGE);
2152 return VM_FAULT_NOPAGE;
2153 }
cfc93c6c
MW
2154 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
2155 dax_lock_entry(&xas, entry);
2156 xas_unlock_irq(&xas);
38607c62
AP
2157 folio = pfn_folio(pfn_t_to_pfn(pfn));
2158 folio_ref_inc(folio);
cfc93c6c 2159 if (order == 0)
38607c62 2160 ret = vmf_insert_page_mkwrite(vmf, &folio->page, true);
71eab6df 2161#ifdef CONFIG_FS_DAX_PMD
cfc93c6c 2162 else if (order == PMD_ORDER)
38607c62 2163 ret = vmf_insert_folio_pmd(vmf, folio, FAULT_FLAG_WRITE);
71eab6df 2164#endif
cfc93c6c 2165 else
ab77dab4 2166 ret = VM_FAULT_FALLBACK;
38607c62 2167 folio_put(folio);
cfc93c6c 2168 dax_unlock_entry(&xas, entry);
ab77dab4
SJ
2169 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
2170 return ret;
71eab6df
JK
2171}
2172
2173/**
2174 * dax_finish_sync_fault - finish synchronous page fault
2175 * @vmf: The description of the fault
1d024e7a 2176 * @order: Order of entry to be inserted
71eab6df
JK
2177 * @pfn: PFN to insert
2178 *
2179 * This function ensures that the file range touched by the page fault is
2180 * stored persistently on the media and handles inserting of appropriate page
2181 * table entry.
2182 */
1d024e7a
MWO
2183vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, unsigned int order,
2184 pfn_t pfn)
71eab6df
JK
2185{
2186 int err;
2187 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
cfc93c6c 2188 size_t len = PAGE_SIZE << order;
71eab6df 2189
71eab6df
JK
2190 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
2191 if (err)
2192 return VM_FAULT_SIGBUS;
cfc93c6c 2193 return dax_insert_pfn_mkwrite(vmf, pfn, order);
71eab6df
JK
2194}
2195EXPORT_SYMBOL_GPL(dax_finish_sync_fault);
6f7db389 2196
39eb0511 2197static int dax_range_compare_iter(struct iomap_iter *it_src,
6f7db389
SR
2198 struct iomap_iter *it_dest, u64 len, bool *same)
2199{
2200 const struct iomap *smap = &it_src->iomap;
2201 const struct iomap *dmap = &it_dest->iomap;
2202 loff_t pos1 = it_src->pos, pos2 = it_dest->pos;
39eb0511 2203 u64 dest_len;
6f7db389
SR
2204 void *saddr, *daddr;
2205 int id, ret;
2206
2207 len = min(len, min(smap->length, dmap->length));
2208
2209 if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) {
2210 *same = true;
39eb0511 2211 goto advance;
6f7db389
SR
2212 }
2213
2214 if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) {
2215 *same = false;
2216 return 0;
2217 }
2218
2219 id = dax_read_lock();
2220 ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE),
2221 &saddr, NULL);
2222 if (ret < 0)
2223 goto out_unlock;
2224
2225 ret = dax_iomap_direct_access(dmap, pos2, ALIGN(pos2 + len, PAGE_SIZE),
2226 &daddr, NULL);
2227 if (ret < 0)
2228 goto out_unlock;
2229
2230 *same = !memcmp(saddr, daddr, len);
2231 if (!*same)
2232 len = 0;
2233 dax_read_unlock(id);
39eb0511
BF
2234
2235advance:
2236 dest_len = len;
2237 ret = iomap_iter_advance(it_src, &len);
2238 if (!ret)
2239 ret = iomap_iter_advance(it_dest, &dest_len);
2240 return ret;
6f7db389
SR
2241
2242out_unlock:
2243 dax_read_unlock(id);
2244 return -EIO;
2245}
2246
2247int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
2248 struct inode *dst, loff_t dstoff, loff_t len, bool *same,
2249 const struct iomap_ops *ops)
2250{
2251 struct iomap_iter src_iter = {
2252 .inode = src,
2253 .pos = srcoff,
2254 .len = len,
2255 .flags = IOMAP_DAX,
2256 };
2257 struct iomap_iter dst_iter = {
2258 .inode = dst,
2259 .pos = dstoff,
2260 .len = len,
2261 .flags = IOMAP_DAX,
2262 };
39eb0511 2263 int ret, status;
6f7db389 2264
0e79e373
SR
2265 while ((ret = iomap_iter(&src_iter, ops)) > 0 &&
2266 (ret = iomap_iter(&dst_iter, ops)) > 0) {
39eb0511 2267 status = dax_range_compare_iter(&src_iter, &dst_iter,
e900ba10 2268 min(src_iter.len, dst_iter.len), same);
39eb0511 2269 if (status < 0)
0e79e373 2270 return ret;
edd3e3b7 2271 src_iter.status = dst_iter.status = status;
6f7db389
SR
2272 }
2273 return ret;
2274}
2275
2276int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in,
2277 struct file *file_out, loff_t pos_out,
2278 loff_t *len, unsigned int remap_flags,
2279 const struct iomap_ops *ops)
2280{
2281 return __generic_remap_file_range_prep(file_in, pos_in, file_out,
2282 pos_out, len, remap_flags, ops);
2283}
2284EXPORT_SYMBOL_GPL(dax_remap_file_range_prep);