Merge part 2 of branch 'sysfs-devel'
[linux-block.git] / fs / dax.c
CommitLineData
2025cf9e 1// SPDX-License-Identifier: GPL-2.0-only
d475c634
MW
2/*
3 * fs/dax.c - Direct Access filesystem code
4 * Copyright (c) 2013-2014 Intel Corporation
5 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
6 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
d475c634
MW
7 */
8
9#include <linux/atomic.h>
10#include <linux/blkdev.h>
11#include <linux/buffer_head.h>
d77e92e2 12#include <linux/dax.h>
d475c634
MW
13#include <linux/fs.h>
14#include <linux/genhd.h>
f7ca90b1
MW
15#include <linux/highmem.h>
16#include <linux/memcontrol.h>
17#include <linux/mm.h>
d475c634 18#include <linux/mutex.h>
9973c98e 19#include <linux/pagevec.h>
289c6aed 20#include <linux/sched.h>
f361bf4a 21#include <linux/sched/signal.h>
d475c634 22#include <linux/uio.h>
f7ca90b1 23#include <linux/vmstat.h>
34c0fd54 24#include <linux/pfn_t.h>
0e749e54 25#include <linux/sizes.h>
4b4bb46d 26#include <linux/mmu_notifier.h>
a254e568 27#include <linux/iomap.h>
11cf9d86 28#include <asm/pgalloc.h>
d475c634 29
282a8e03
RZ
30#define CREATE_TRACE_POINTS
31#include <trace/events/fs_dax.h>
32
cfc93c6c
MW
33static inline unsigned int pe_order(enum page_entry_size pe_size)
34{
35 if (pe_size == PE_SIZE_PTE)
36 return PAGE_SHIFT - PAGE_SHIFT;
37 if (pe_size == PE_SIZE_PMD)
38 return PMD_SHIFT - PAGE_SHIFT;
39 if (pe_size == PE_SIZE_PUD)
40 return PUD_SHIFT - PAGE_SHIFT;
41 return ~0;
42}
43
ac401cc7
JK
44/* We choose 4096 entries - same as per-zone page wait tables */
45#define DAX_WAIT_TABLE_BITS 12
46#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
47
917f3452
RZ
48/* The 'colour' (ie low bits) within a PMD of a page offset. */
49#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
977fbdcd 50#define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
917f3452 51
cfc93c6c
MW
52/* The order of a PMD entry */
53#define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
54
ce95ab0f 55static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
ac401cc7
JK
56
57static int __init init_dax_wait_table(void)
58{
59 int i;
60
61 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
62 init_waitqueue_head(wait_table + i);
63 return 0;
64}
65fs_initcall(init_dax_wait_table);
66
527b19d0 67/*
3159f943
MW
68 * DAX pagecache entries use XArray value entries so they can't be mistaken
69 * for pages. We use one bit for locking, one bit for the entry size (PMD)
70 * and two more to tell us if the entry is a zero page or an empty entry that
71 * is just used for locking. In total four special bits.
527b19d0
RZ
72 *
73 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
74 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
75 * block allocation.
76 */
3159f943
MW
77#define DAX_SHIFT (4)
78#define DAX_LOCKED (1UL << 0)
79#define DAX_PMD (1UL << 1)
80#define DAX_ZERO_PAGE (1UL << 2)
81#define DAX_EMPTY (1UL << 3)
527b19d0 82
a77d19f4 83static unsigned long dax_to_pfn(void *entry)
527b19d0 84{
3159f943 85 return xa_to_value(entry) >> DAX_SHIFT;
527b19d0
RZ
86}
87
9f32d221
MW
88static void *dax_make_entry(pfn_t pfn, unsigned long flags)
89{
90 return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
91}
92
cfc93c6c
MW
93static bool dax_is_locked(void *entry)
94{
95 return xa_to_value(entry) & DAX_LOCKED;
96}
97
a77d19f4 98static unsigned int dax_entry_order(void *entry)
527b19d0 99{
3159f943 100 if (xa_to_value(entry) & DAX_PMD)
cfc93c6c 101 return PMD_ORDER;
527b19d0
RZ
102 return 0;
103}
104
fda490d3 105static unsigned long dax_is_pmd_entry(void *entry)
d1a5f2b4 106{
3159f943 107 return xa_to_value(entry) & DAX_PMD;
d1a5f2b4
DW
108}
109
fda490d3 110static bool dax_is_pte_entry(void *entry)
d475c634 111{
3159f943 112 return !(xa_to_value(entry) & DAX_PMD);
d475c634
MW
113}
114
642261ac 115static int dax_is_zero_entry(void *entry)
d475c634 116{
3159f943 117 return xa_to_value(entry) & DAX_ZERO_PAGE;
d475c634
MW
118}
119
642261ac 120static int dax_is_empty_entry(void *entry)
b2e0d162 121{
3159f943 122 return xa_to_value(entry) & DAX_EMPTY;
b2e0d162
DW
123}
124
23c84eb7
MWO
125/*
126 * true if the entry that was found is of a smaller order than the entry
127 * we were looking for
128 */
129static bool dax_is_conflict(void *entry)
130{
131 return entry == XA_RETRY_ENTRY;
132}
133
ac401cc7 134/*
a77d19f4 135 * DAX page cache entry locking
ac401cc7
JK
136 */
137struct exceptional_entry_key {
ec4907ff 138 struct xarray *xa;
63e95b5c 139 pgoff_t entry_start;
ac401cc7
JK
140};
141
142struct wait_exceptional_entry_queue {
ac6424b9 143 wait_queue_entry_t wait;
ac401cc7
JK
144 struct exceptional_entry_key key;
145};
146
698ab77a
VG
147/**
148 * enum dax_wake_mode: waitqueue wakeup behaviour
149 * @WAKE_ALL: wake all waiters in the waitqueue
150 * @WAKE_NEXT: wake only the first waiter in the waitqueue
151 */
152enum dax_wake_mode {
153 WAKE_ALL,
154 WAKE_NEXT,
155};
156
b15cd800
MW
157static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
158 void *entry, struct exceptional_entry_key *key)
63e95b5c
RZ
159{
160 unsigned long hash;
b15cd800 161 unsigned long index = xas->xa_index;
63e95b5c
RZ
162
163 /*
164 * If 'entry' is a PMD, align the 'index' that we use for the wait
165 * queue to the start of that PMD. This ensures that all offsets in
166 * the range covered by the PMD map to the same bit lock.
167 */
642261ac 168 if (dax_is_pmd_entry(entry))
917f3452 169 index &= ~PG_PMD_COLOUR;
b15cd800 170 key->xa = xas->xa;
63e95b5c
RZ
171 key->entry_start = index;
172
b15cd800 173 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
63e95b5c
RZ
174 return wait_table + hash;
175}
176
ec4907ff
MW
177static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
178 unsigned int mode, int sync, void *keyp)
ac401cc7
JK
179{
180 struct exceptional_entry_key *key = keyp;
181 struct wait_exceptional_entry_queue *ewait =
182 container_of(wait, struct wait_exceptional_entry_queue, wait);
183
ec4907ff 184 if (key->xa != ewait->key.xa ||
63e95b5c 185 key->entry_start != ewait->key.entry_start)
ac401cc7
JK
186 return 0;
187 return autoremove_wake_function(wait, mode, sync, NULL);
188}
189
e30331ff 190/*
b93b0163
MW
191 * @entry may no longer be the entry at the index in the mapping.
192 * The important information it's conveying is whether the entry at
193 * this index used to be a PMD entry.
e30331ff 194 */
698ab77a
VG
195static void dax_wake_entry(struct xa_state *xas, void *entry,
196 enum dax_wake_mode mode)
e30331ff
RZ
197{
198 struct exceptional_entry_key key;
199 wait_queue_head_t *wq;
200
b15cd800 201 wq = dax_entry_waitqueue(xas, entry, &key);
e30331ff
RZ
202
203 /*
204 * Checking for locked entry and prepare_to_wait_exclusive() happens
b93b0163 205 * under the i_pages lock, ditto for entry handling in our callers.
e30331ff
RZ
206 * So at this point all tasks that could have seen our entry locked
207 * must be in the waitqueue and the following check will see them.
208 */
209 if (waitqueue_active(wq))
698ab77a 210 __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
e30331ff
RZ
211}
212
cfc93c6c
MW
213/*
214 * Look up entry in page cache, wait for it to become unlocked if it
215 * is a DAX entry and return it. The caller must subsequently call
216 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
23c84eb7
MWO
217 * if it did. The entry returned may have a larger order than @order.
218 * If @order is larger than the order of the entry found in i_pages, this
219 * function returns a dax_is_conflict entry.
cfc93c6c
MW
220 *
221 * Must be called with the i_pages lock held.
222 */
23c84eb7 223static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
cfc93c6c
MW
224{
225 void *entry;
226 struct wait_exceptional_entry_queue ewait;
227 wait_queue_head_t *wq;
228
229 init_wait(&ewait.wait);
230 ewait.wait.func = wake_exceptional_entry_func;
231
232 for (;;) {
0e40de03 233 entry = xas_find_conflict(xas);
6370740e
DW
234 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
235 return entry;
23c84eb7
MWO
236 if (dax_entry_order(entry) < order)
237 return XA_RETRY_ENTRY;
6370740e 238 if (!dax_is_locked(entry))
cfc93c6c
MW
239 return entry;
240
b15cd800 241 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
cfc93c6c
MW
242 prepare_to_wait_exclusive(wq, &ewait.wait,
243 TASK_UNINTERRUPTIBLE);
244 xas_unlock_irq(xas);
245 xas_reset(xas);
246 schedule();
247 finish_wait(wq, &ewait.wait);
248 xas_lock_irq(xas);
249 }
250}
251
55e56f06
MW
252/*
253 * The only thing keeping the address space around is the i_pages lock
254 * (it's cycled in clear_inode() after removing the entries from i_pages)
255 * After we call xas_unlock_irq(), we cannot touch xas->xa.
256 */
257static void wait_entry_unlocked(struct xa_state *xas, void *entry)
258{
259 struct wait_exceptional_entry_queue ewait;
260 wait_queue_head_t *wq;
261
262 init_wait(&ewait.wait);
263 ewait.wait.func = wake_exceptional_entry_func;
264
265 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
d8a70641
DW
266 /*
267 * Unlike get_unlocked_entry() there is no guarantee that this
268 * path ever successfully retrieves an unlocked entry before an
269 * inode dies. Perform a non-exclusive wait in case this path
270 * never successfully performs its own wake up.
271 */
272 prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
55e56f06
MW
273 xas_unlock_irq(xas);
274 schedule();
275 finish_wait(wq, &ewait.wait);
55e56f06
MW
276}
277
4c3d043d
VG
278static void put_unlocked_entry(struct xa_state *xas, void *entry,
279 enum dax_wake_mode mode)
cfc93c6c 280{
61c30c98 281 if (entry && !dax_is_conflict(entry))
4c3d043d 282 dax_wake_entry(xas, entry, mode);
cfc93c6c
MW
283}
284
285/*
286 * We used the xa_state to get the entry, but then we locked the entry and
287 * dropped the xa_lock, so we know the xa_state is stale and must be reset
288 * before use.
289 */
290static void dax_unlock_entry(struct xa_state *xas, void *entry)
291{
292 void *old;
293
7ae2ea7d 294 BUG_ON(dax_is_locked(entry));
cfc93c6c
MW
295 xas_reset(xas);
296 xas_lock_irq(xas);
297 old = xas_store(xas, entry);
298 xas_unlock_irq(xas);
299 BUG_ON(!dax_is_locked(old));
698ab77a 300 dax_wake_entry(xas, entry, WAKE_NEXT);
cfc93c6c
MW
301}
302
303/*
304 * Return: The entry stored at this location before it was locked.
305 */
306static void *dax_lock_entry(struct xa_state *xas, void *entry)
307{
308 unsigned long v = xa_to_value(entry);
309 return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
310}
311
d2c997c0
DW
312static unsigned long dax_entry_size(void *entry)
313{
314 if (dax_is_zero_entry(entry))
315 return 0;
316 else if (dax_is_empty_entry(entry))
317 return 0;
318 else if (dax_is_pmd_entry(entry))
319 return PMD_SIZE;
320 else
321 return PAGE_SIZE;
322}
323
a77d19f4 324static unsigned long dax_end_pfn(void *entry)
d2c997c0 325{
a77d19f4 326 return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
d2c997c0
DW
327}
328
329/*
330 * Iterate through all mapped pfns represented by an entry, i.e. skip
331 * 'empty' and 'zero' entries.
332 */
333#define for_each_mapped_pfn(entry, pfn) \
a77d19f4
MW
334 for (pfn = dax_to_pfn(entry); \
335 pfn < dax_end_pfn(entry); pfn++)
d2c997c0 336
73449daf
DW
337/*
338 * TODO: for reflink+dax we need a way to associate a single page with
339 * multiple address_space instances at different linear_page_index()
340 * offsets.
341 */
342static void dax_associate_entry(void *entry, struct address_space *mapping,
343 struct vm_area_struct *vma, unsigned long address)
d2c997c0 344{
73449daf
DW
345 unsigned long size = dax_entry_size(entry), pfn, index;
346 int i = 0;
d2c997c0
DW
347
348 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
349 return;
350
73449daf 351 index = linear_page_index(vma, address & ~(size - 1));
d2c997c0
DW
352 for_each_mapped_pfn(entry, pfn) {
353 struct page *page = pfn_to_page(pfn);
354
355 WARN_ON_ONCE(page->mapping);
356 page->mapping = mapping;
73449daf 357 page->index = index + i++;
d2c997c0
DW
358 }
359}
360
361static void dax_disassociate_entry(void *entry, struct address_space *mapping,
362 bool trunc)
363{
364 unsigned long pfn;
365
366 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
367 return;
368
369 for_each_mapped_pfn(entry, pfn) {
370 struct page *page = pfn_to_page(pfn);
371
372 WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
373 WARN_ON_ONCE(page->mapping && page->mapping != mapping);
374 page->mapping = NULL;
73449daf 375 page->index = 0;
d2c997c0
DW
376 }
377}
378
5fac7408
DW
379static struct page *dax_busy_page(void *entry)
380{
381 unsigned long pfn;
382
383 for_each_mapped_pfn(entry, pfn) {
384 struct page *page = pfn_to_page(pfn);
385
386 if (page_ref_count(page) > 1)
387 return page;
388 }
389 return NULL;
390}
391
c5bbd451
MW
392/*
393 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
394 * @page: The page whose entry we want to lock
395 *
396 * Context: Process context.
27359fd6
MW
397 * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
398 * not be locked.
c5bbd451 399 */
27359fd6 400dax_entry_t dax_lock_page(struct page *page)
c2a7d2a1 401{
9f32d221
MW
402 XA_STATE(xas, NULL, 0);
403 void *entry;
c2a7d2a1 404
c5bbd451
MW
405 /* Ensure page->mapping isn't freed while we look at it */
406 rcu_read_lock();
c2a7d2a1 407 for (;;) {
9f32d221 408 struct address_space *mapping = READ_ONCE(page->mapping);
c2a7d2a1 409
27359fd6 410 entry = NULL;
c93db7bb 411 if (!mapping || !dax_mapping(mapping))
c5bbd451 412 break;
c2a7d2a1
DW
413
414 /*
415 * In the device-dax case there's no need to lock, a
416 * struct dev_pagemap pin is sufficient to keep the
417 * inode alive, and we assume we have dev_pagemap pin
418 * otherwise we would not have a valid pfn_to_page()
419 * translation.
420 */
27359fd6 421 entry = (void *)~0UL;
9f32d221 422 if (S_ISCHR(mapping->host->i_mode))
c5bbd451 423 break;
c2a7d2a1 424
9f32d221
MW
425 xas.xa = &mapping->i_pages;
426 xas_lock_irq(&xas);
c2a7d2a1 427 if (mapping != page->mapping) {
9f32d221 428 xas_unlock_irq(&xas);
c2a7d2a1
DW
429 continue;
430 }
9f32d221
MW
431 xas_set(&xas, page->index);
432 entry = xas_load(&xas);
433 if (dax_is_locked(entry)) {
c5bbd451 434 rcu_read_unlock();
55e56f06 435 wait_entry_unlocked(&xas, entry);
c5bbd451 436 rcu_read_lock();
6d7cd8c1 437 continue;
c2a7d2a1 438 }
9f32d221
MW
439 dax_lock_entry(&xas, entry);
440 xas_unlock_irq(&xas);
c5bbd451 441 break;
c2a7d2a1 442 }
c5bbd451 443 rcu_read_unlock();
27359fd6 444 return (dax_entry_t)entry;
c2a7d2a1
DW
445}
446
27359fd6 447void dax_unlock_page(struct page *page, dax_entry_t cookie)
c2a7d2a1
DW
448{
449 struct address_space *mapping = page->mapping;
9f32d221 450 XA_STATE(xas, &mapping->i_pages, page->index);
c2a7d2a1 451
9f32d221 452 if (S_ISCHR(mapping->host->i_mode))
c2a7d2a1
DW
453 return;
454
27359fd6 455 dax_unlock_entry(&xas, (void *)cookie);
c2a7d2a1
DW
456}
457
ac401cc7 458/*
a77d19f4
MW
459 * Find page cache entry at given index. If it is a DAX entry, return it
460 * with the entry locked. If the page cache doesn't contain an entry at
461 * that index, add a locked empty entry.
ac401cc7 462 *
3159f943 463 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
b15cd800
MW
464 * either return that locked entry or will return VM_FAULT_FALLBACK.
465 * This will happen if there are any PTE entries within the PMD range
466 * that we are requesting.
642261ac 467 *
b15cd800
MW
468 * We always favor PTE entries over PMD entries. There isn't a flow where we
469 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD
470 * insertion will fail if it finds any PTE entries already in the tree, and a
471 * PTE insertion will cause an existing PMD entry to be unmapped and
472 * downgraded to PTE entries. This happens for both PMD zero pages as
473 * well as PMD empty entries.
642261ac 474 *
b15cd800
MW
475 * The exception to this downgrade path is for PMD entries that have
476 * real storage backing them. We will leave these real PMD entries in
477 * the tree, and PTE writes will simply dirty the entire PMD entry.
642261ac 478 *
ac401cc7
JK
479 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
480 * persistent memory the benefit is doubtful. We can add that later if we can
481 * show it helps.
b15cd800
MW
482 *
483 * On error, this function does not return an ERR_PTR. Instead it returns
484 * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values
485 * overlap with xarray value entries.
ac401cc7 486 */
b15cd800 487static void *grab_mapping_entry(struct xa_state *xas,
23c84eb7 488 struct address_space *mapping, unsigned int order)
ac401cc7 489{
b15cd800
MW
490 unsigned long index = xas->xa_index;
491 bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
492 void *entry;
642261ac 493
b15cd800
MW
494retry:
495 xas_lock_irq(xas);
23c84eb7 496 entry = get_unlocked_entry(xas, order);
91d25ba8 497
642261ac 498 if (entry) {
23c84eb7
MWO
499 if (dax_is_conflict(entry))
500 goto fallback;
0e40de03 501 if (!xa_is_value(entry)) {
49688e65 502 xas_set_err(xas, -EIO);
b15cd800
MW
503 goto out_unlock;
504 }
505
23c84eb7 506 if (order == 0) {
91d25ba8 507 if (dax_is_pmd_entry(entry) &&
642261ac
RZ
508 (dax_is_zero_entry(entry) ||
509 dax_is_empty_entry(entry))) {
510 pmd_downgrade = true;
511 }
512 }
513 }
514
b15cd800
MW
515 if (pmd_downgrade) {
516 /*
517 * Make sure 'entry' remains valid while we drop
518 * the i_pages lock.
519 */
520 dax_lock_entry(xas, entry);
642261ac 521
642261ac
RZ
522 /*
523 * Besides huge zero pages the only other thing that gets
524 * downgraded are empty entries which don't need to be
525 * unmapped.
526 */
b15cd800
MW
527 if (dax_is_zero_entry(entry)) {
528 xas_unlock_irq(xas);
529 unmap_mapping_pages(mapping,
530 xas->xa_index & ~PG_PMD_COLOUR,
531 PG_PMD_NR, false);
532 xas_reset(xas);
533 xas_lock_irq(xas);
e11f8b7b
RZ
534 }
535
b15cd800
MW
536 dax_disassociate_entry(entry, mapping, false);
537 xas_store(xas, NULL); /* undo the PMD join */
698ab77a 538 dax_wake_entry(xas, entry, WAKE_ALL);
7f0e07fb 539 mapping->nrpages -= PG_PMD_NR;
b15cd800
MW
540 entry = NULL;
541 xas_set(xas, index);
542 }
642261ac 543
b15cd800
MW
544 if (entry) {
545 dax_lock_entry(xas, entry);
546 } else {
23c84eb7
MWO
547 unsigned long flags = DAX_EMPTY;
548
549 if (order > 0)
550 flags |= DAX_PMD;
551 entry = dax_make_entry(pfn_to_pfn_t(0), flags);
b15cd800
MW
552 dax_lock_entry(xas, entry);
553 if (xas_error(xas))
554 goto out_unlock;
7f0e07fb 555 mapping->nrpages += 1UL << order;
ac401cc7 556 }
b15cd800
MW
557
558out_unlock:
559 xas_unlock_irq(xas);
560 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
561 goto retry;
562 if (xas->xa_node == XA_ERROR(-ENOMEM))
563 return xa_mk_internal(VM_FAULT_OOM);
564 if (xas_error(xas))
565 return xa_mk_internal(VM_FAULT_SIGBUS);
e3ad61c6 566 return entry;
b15cd800
MW
567fallback:
568 xas_unlock_irq(xas);
569 return xa_mk_internal(VM_FAULT_FALLBACK);
ac401cc7
JK
570}
571
5fac7408 572/**
6bbdd563 573 * dax_layout_busy_page_range - find first pinned page in @mapping
5fac7408 574 * @mapping: address space to scan for a page with ref count > 1
6bbdd563
VG
575 * @start: Starting offset. Page containing 'start' is included.
576 * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX,
577 * pages from 'start' till the end of file are included.
5fac7408
DW
578 *
579 * DAX requires ZONE_DEVICE mapped pages. These pages are never
580 * 'onlined' to the page allocator so they are considered idle when
581 * page->count == 1. A filesystem uses this interface to determine if
582 * any page in the mapping is busy, i.e. for DMA, or other
583 * get_user_pages() usages.
584 *
585 * It is expected that the filesystem is holding locks to block the
586 * establishment of new mappings in this address_space. I.e. it expects
587 * to be able to run unmap_mapping_range() and subsequently not race
588 * mapping_mapped() becoming true.
589 */
6bbdd563
VG
590struct page *dax_layout_busy_page_range(struct address_space *mapping,
591 loff_t start, loff_t end)
5fac7408 592{
084a8990
MW
593 void *entry;
594 unsigned int scanned = 0;
5fac7408 595 struct page *page = NULL;
6bbdd563
VG
596 pgoff_t start_idx = start >> PAGE_SHIFT;
597 pgoff_t end_idx;
598 XA_STATE(xas, &mapping->i_pages, start_idx);
5fac7408
DW
599
600 /*
601 * In the 'limited' case get_user_pages() for dax is disabled.
602 */
603 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
604 return NULL;
605
606 if (!dax_mapping(mapping) || !mapping_mapped(mapping))
607 return NULL;
608
6bbdd563
VG
609 /* If end == LLONG_MAX, all pages from start to till end of file */
610 if (end == LLONG_MAX)
611 end_idx = ULONG_MAX;
612 else
613 end_idx = end >> PAGE_SHIFT;
5fac7408
DW
614 /*
615 * If we race get_user_pages_fast() here either we'll see the
084a8990 616 * elevated page count in the iteration and wait, or
5fac7408
DW
617 * get_user_pages_fast() will see that the page it took a reference
618 * against is no longer mapped in the page tables and bail to the
619 * get_user_pages() slow path. The slow path is protected by
620 * pte_lock() and pmd_lock(). New references are not taken without
6bbdd563 621 * holding those locks, and unmap_mapping_pages() will not zero the
5fac7408
DW
622 * pte or pmd without holding the respective lock, so we are
623 * guaranteed to either see new references or prevent new
624 * references from being established.
625 */
6bbdd563 626 unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0);
5fac7408 627
084a8990 628 xas_lock_irq(&xas);
6bbdd563 629 xas_for_each(&xas, entry, end_idx) {
084a8990
MW
630 if (WARN_ON_ONCE(!xa_is_value(entry)))
631 continue;
632 if (unlikely(dax_is_locked(entry)))
23c84eb7 633 entry = get_unlocked_entry(&xas, 0);
084a8990
MW
634 if (entry)
635 page = dax_busy_page(entry);
4c3d043d 636 put_unlocked_entry(&xas, entry, WAKE_NEXT);
5fac7408
DW
637 if (page)
638 break;
084a8990
MW
639 if (++scanned % XA_CHECK_SCHED)
640 continue;
641
642 xas_pause(&xas);
643 xas_unlock_irq(&xas);
644 cond_resched();
645 xas_lock_irq(&xas);
5fac7408 646 }
084a8990 647 xas_unlock_irq(&xas);
5fac7408
DW
648 return page;
649}
6bbdd563
VG
650EXPORT_SYMBOL_GPL(dax_layout_busy_page_range);
651
652struct page *dax_layout_busy_page(struct address_space *mapping)
653{
654 return dax_layout_busy_page_range(mapping, 0, LLONG_MAX);
655}
5fac7408
DW
656EXPORT_SYMBOL_GPL(dax_layout_busy_page);
657
a77d19f4 658static int __dax_invalidate_entry(struct address_space *mapping,
c6dcf52c
JK
659 pgoff_t index, bool trunc)
660{
07f2d89c 661 XA_STATE(xas, &mapping->i_pages, index);
c6dcf52c
JK
662 int ret = 0;
663 void *entry;
c6dcf52c 664
07f2d89c 665 xas_lock_irq(&xas);
23c84eb7 666 entry = get_unlocked_entry(&xas, 0);
3159f943 667 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
c6dcf52c
JK
668 goto out;
669 if (!trunc &&
07f2d89c
MW
670 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
671 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
c6dcf52c 672 goto out;
d2c997c0 673 dax_disassociate_entry(entry, mapping, trunc);
07f2d89c 674 xas_store(&xas, NULL);
7f0e07fb 675 mapping->nrpages -= 1UL << dax_entry_order(entry);
c6dcf52c
JK
676 ret = 1;
677out:
23738832 678 put_unlocked_entry(&xas, entry, WAKE_ALL);
07f2d89c 679 xas_unlock_irq(&xas);
c6dcf52c
JK
680 return ret;
681}
07f2d89c 682
ac401cc7 683/*
3159f943
MW
684 * Delete DAX entry at @index from @mapping. Wait for it
685 * to be unlocked before deleting it.
ac401cc7
JK
686 */
687int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
688{
a77d19f4 689 int ret = __dax_invalidate_entry(mapping, index, true);
ac401cc7 690
ac401cc7
JK
691 /*
692 * This gets called from truncate / punch_hole path. As such, the caller
693 * must hold locks protecting against concurrent modifications of the
a77d19f4 694 * page cache (usually fs-private i_mmap_sem for writing). Since the
3159f943 695 * caller has seen a DAX entry for this index, we better find it
ac401cc7
JK
696 * at that index as well...
697 */
c6dcf52c
JK
698 WARN_ON_ONCE(!ret);
699 return ret;
700}
701
c6dcf52c 702/*
3159f943 703 * Invalidate DAX entry if it is clean.
c6dcf52c
JK
704 */
705int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
706 pgoff_t index)
707{
a77d19f4 708 return __dax_invalidate_entry(mapping, index, false);
ac401cc7
JK
709}
710
c7fe193f
IW
711static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_dev,
712 sector_t sector, struct page *to, unsigned long vaddr)
f7ca90b1 713{
cccbce67
DW
714 void *vto, *kaddr;
715 pgoff_t pgoff;
cccbce67
DW
716 long rc;
717 int id;
718
c7fe193f 719 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
cccbce67
DW
720 if (rc)
721 return rc;
722
723 id = dax_read_lock();
c7fe193f 724 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(PAGE_SIZE), &kaddr, NULL);
cccbce67
DW
725 if (rc < 0) {
726 dax_read_unlock(id);
727 return rc;
728 }
f7ca90b1 729 vto = kmap_atomic(to);
cccbce67 730 copy_user_page(vto, (void __force *)kaddr, vaddr, to);
f7ca90b1 731 kunmap_atomic(vto);
cccbce67 732 dax_read_unlock(id);
f7ca90b1
MW
733 return 0;
734}
735
642261ac
RZ
736/*
737 * By this point grab_mapping_entry() has ensured that we have a locked entry
738 * of the appropriate size so we don't have to worry about downgrading PMDs to
739 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
740 * already in the tree, we will skip the insertion and just dirty the PMD as
741 * appropriate.
742 */
b15cd800
MW
743static void *dax_insert_entry(struct xa_state *xas,
744 struct address_space *mapping, struct vm_fault *vmf,
745 void *entry, pfn_t pfn, unsigned long flags, bool dirty)
9973c98e 746{
b15cd800 747 void *new_entry = dax_make_entry(pfn, flags);
9973c98e 748
f5b7b748 749 if (dirty)
d2b2a28e 750 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
9973c98e 751
3159f943 752 if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
b15cd800 753 unsigned long index = xas->xa_index;
91d25ba8
RZ
754 /* we are replacing a zero page with block mapping */
755 if (dax_is_pmd_entry(entry))
977fbdcd 756 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
b15cd800 757 PG_PMD_NR, false);
91d25ba8 758 else /* pte entry */
b15cd800 759 unmap_mapping_pages(mapping, index, 1, false);
9973c98e
RZ
760 }
761
b15cd800
MW
762 xas_reset(xas);
763 xas_lock_irq(xas);
1571c029
JK
764 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
765 void *old;
766
d2c997c0 767 dax_disassociate_entry(entry, mapping, false);
73449daf 768 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
642261ac 769 /*
a77d19f4 770 * Only swap our new entry into the page cache if the current
642261ac 771 * entry is a zero page or an empty entry. If a normal PTE or
a77d19f4 772 * PMD entry is already in the cache, we leave it alone. This
642261ac
RZ
773 * means that if we are trying to insert a PTE and the
774 * existing entry is a PMD, we will just leave the PMD in the
775 * tree and dirty it if necessary.
776 */
1571c029 777 old = dax_lock_entry(xas, new_entry);
b15cd800
MW
778 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
779 DAX_LOCKED));
91d25ba8 780 entry = new_entry;
b15cd800
MW
781 } else {
782 xas_load(xas); /* Walk the xa_state */
9973c98e 783 }
91d25ba8 784
f5b7b748 785 if (dirty)
b15cd800 786 xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
91d25ba8 787
b15cd800 788 xas_unlock_irq(xas);
91d25ba8 789 return entry;
9973c98e
RZ
790}
791
a77d19f4
MW
792static inline
793unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
4b4bb46d
JK
794{
795 unsigned long address;
796
797 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
798 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
799 return address;
800}
801
802/* Walk all mappings of a given index of a file and writeprotect them */
a77d19f4
MW
803static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
804 unsigned long pfn)
4b4bb46d
JK
805{
806 struct vm_area_struct *vma;
f729c8c9
RZ
807 pte_t pte, *ptep = NULL;
808 pmd_t *pmdp = NULL;
4b4bb46d 809 spinlock_t *ptl;
4b4bb46d
JK
810
811 i_mmap_lock_read(mapping);
812 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
ac46d4f3
JG
813 struct mmu_notifier_range range;
814 unsigned long address;
4b4bb46d
JK
815
816 cond_resched();
817
818 if (!(vma->vm_flags & VM_SHARED))
819 continue;
820
821 address = pgoff_address(index, vma);
a4d1a885
JG
822
823 /*
9fd6dad1 824 * follow_invalidate_pte() will use the range to call
ff5c19ed
CH
825 * mmu_notifier_invalidate_range_start() on our behalf before
826 * taking any lock.
a4d1a885 827 */
9fd6dad1
PB
828 if (follow_invalidate_pte(vma->vm_mm, address, &range, &ptep,
829 &pmdp, &ptl))
4b4bb46d 830 continue;
4b4bb46d 831
0f10851e
JG
832 /*
833 * No need to call mmu_notifier_invalidate_range() as we are
834 * downgrading page table protection not changing it to point
835 * to a new page.
836 *
ad56b738 837 * See Documentation/vm/mmu_notifier.rst
0f10851e 838 */
f729c8c9
RZ
839 if (pmdp) {
840#ifdef CONFIG_FS_DAX_PMD
841 pmd_t pmd;
842
843 if (pfn != pmd_pfn(*pmdp))
844 goto unlock_pmd;
f6f37321 845 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
f729c8c9
RZ
846 goto unlock_pmd;
847
848 flush_cache_page(vma, address, pfn);
024eee0e 849 pmd = pmdp_invalidate(vma, address, pmdp);
f729c8c9
RZ
850 pmd = pmd_wrprotect(pmd);
851 pmd = pmd_mkclean(pmd);
852 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
f729c8c9 853unlock_pmd:
f729c8c9 854#endif
ee190ca6 855 spin_unlock(ptl);
f729c8c9
RZ
856 } else {
857 if (pfn != pte_pfn(*ptep))
858 goto unlock_pte;
859 if (!pte_dirty(*ptep) && !pte_write(*ptep))
860 goto unlock_pte;
861
862 flush_cache_page(vma, address, pfn);
863 pte = ptep_clear_flush(vma, address, ptep);
864 pte = pte_wrprotect(pte);
865 pte = pte_mkclean(pte);
866 set_pte_at(vma->vm_mm, address, ptep, pte);
f729c8c9
RZ
867unlock_pte:
868 pte_unmap_unlock(ptep, ptl);
869 }
4b4bb46d 870
ac46d4f3 871 mmu_notifier_invalidate_range_end(&range);
4b4bb46d
JK
872 }
873 i_mmap_unlock_read(mapping);
874}
875
9fc747f6
MW
876static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
877 struct address_space *mapping, void *entry)
9973c98e 878{
e4b3448b 879 unsigned long pfn, index, count;
3fe0791c 880 long ret = 0;
9973c98e 881
9973c98e 882 /*
a6abc2c0
JK
883 * A page got tagged dirty in DAX mapping? Something is seriously
884 * wrong.
9973c98e 885 */
3159f943 886 if (WARN_ON(!xa_is_value(entry)))
a6abc2c0 887 return -EIO;
9973c98e 888
9fc747f6
MW
889 if (unlikely(dax_is_locked(entry))) {
890 void *old_entry = entry;
891
23c84eb7 892 entry = get_unlocked_entry(xas, 0);
9fc747f6
MW
893
894 /* Entry got punched out / reallocated? */
895 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
896 goto put_unlocked;
897 /*
898 * Entry got reallocated elsewhere? No need to writeback.
899 * We have to compare pfns as we must not bail out due to
900 * difference in lockbit or entry type.
901 */
902 if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
903 goto put_unlocked;
904 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
905 dax_is_zero_entry(entry))) {
906 ret = -EIO;
907 goto put_unlocked;
908 }
909
910 /* Another fsync thread may have already done this entry */
911 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
912 goto put_unlocked;
9973c98e
RZ
913 }
914
a6abc2c0 915 /* Lock the entry to serialize with page faults */
9fc747f6
MW
916 dax_lock_entry(xas, entry);
917
a6abc2c0
JK
918 /*
919 * We can clear the tag now but we have to be careful so that concurrent
920 * dax_writeback_one() calls for the same index cannot finish before we
921 * actually flush the caches. This is achieved as the calls will look
b93b0163
MW
922 * at the entry only under the i_pages lock and once they do that
923 * they will see the entry locked and wait for it to unlock.
a6abc2c0 924 */
9fc747f6
MW
925 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
926 xas_unlock_irq(xas);
a6abc2c0 927
642261ac 928 /*
e4b3448b
MW
929 * If dax_writeback_mapping_range() was given a wbc->range_start
930 * in the middle of a PMD, the 'index' we use needs to be
931 * aligned to the start of the PMD.
3fe0791c
DW
932 * This allows us to flush for PMD_SIZE and not have to worry about
933 * partial PMD writebacks.
642261ac 934 */
a77d19f4 935 pfn = dax_to_pfn(entry);
e4b3448b
MW
936 count = 1UL << dax_entry_order(entry);
937 index = xas->xa_index & ~(count - 1);
cccbce67 938
e4b3448b
MW
939 dax_entry_mkclean(mapping, index, pfn);
940 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
4b4bb46d
JK
941 /*
942 * After we have flushed the cache, we can clear the dirty tag. There
943 * cannot be new dirty data in the pfn after the flush has completed as
944 * the pfn mappings are writeprotected and fault waits for mapping
945 * entry lock.
946 */
9fc747f6
MW
947 xas_reset(xas);
948 xas_lock_irq(xas);
949 xas_store(xas, entry);
950 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
698ab77a 951 dax_wake_entry(xas, entry, WAKE_NEXT);
9fc747f6 952
e4b3448b 953 trace_dax_writeback_one(mapping->host, index, count);
9973c98e
RZ
954 return ret;
955
a6abc2c0 956 put_unlocked:
4c3d043d 957 put_unlocked_entry(xas, entry, WAKE_NEXT);
9973c98e
RZ
958 return ret;
959}
960
961/*
962 * Flush the mapping to the persistent domain within the byte range of [start,
963 * end]. This is required by data integrity operations to ensure file data is
964 * on persistent storage prior to completion of the operation.
965 */
7f6d5b52 966int dax_writeback_mapping_range(struct address_space *mapping,
3f666c56 967 struct dax_device *dax_dev, struct writeback_control *wbc)
9973c98e 968{
9fc747f6 969 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
9973c98e 970 struct inode *inode = mapping->host;
9fc747f6 971 pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
9fc747f6
MW
972 void *entry;
973 int ret = 0;
974 unsigned int scanned = 0;
9973c98e
RZ
975
976 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
977 return -EIO;
978
7716506a 979 if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL)
7f6d5b52
RZ
980 return 0;
981
9fc747f6 982 trace_dax_writeback_range(inode, xas.xa_index, end_index);
9973c98e 983
9fc747f6 984 tag_pages_for_writeback(mapping, xas.xa_index, end_index);
9973c98e 985
9fc747f6
MW
986 xas_lock_irq(&xas);
987 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
988 ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
989 if (ret < 0) {
990 mapping_set_error(mapping, ret);
9973c98e 991 break;
9973c98e 992 }
9fc747f6
MW
993 if (++scanned % XA_CHECK_SCHED)
994 continue;
995
996 xas_pause(&xas);
997 xas_unlock_irq(&xas);
998 cond_resched();
999 xas_lock_irq(&xas);
9973c98e 1000 }
9fc747f6 1001 xas_unlock_irq(&xas);
9fc747f6
MW
1002 trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
1003 return ret;
9973c98e
RZ
1004}
1005EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
1006
31a6f1a6 1007static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
f7ca90b1 1008{
a3841f94 1009 return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
31a6f1a6
JK
1010}
1011
5e161e40
JK
1012static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
1013 pfn_t *pfnp)
f7ca90b1 1014{
31a6f1a6 1015 const sector_t sector = dax_iomap_sector(iomap, pos);
cccbce67
DW
1016 pgoff_t pgoff;
1017 int id, rc;
5e161e40 1018 long length;
f7ca90b1 1019
5e161e40 1020 rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
cccbce67
DW
1021 if (rc)
1022 return rc;
cccbce67 1023 id = dax_read_lock();
5e161e40 1024 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
86ed913b 1025 NULL, pfnp);
5e161e40
JK
1026 if (length < 0) {
1027 rc = length;
1028 goto out;
cccbce67 1029 }
5e161e40
JK
1030 rc = -EINVAL;
1031 if (PFN_PHYS(length) < size)
1032 goto out;
1033 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1034 goto out;
1035 /* For larger pages we need devmap */
1036 if (length > 1 && !pfn_t_devmap(*pfnp))
1037 goto out;
1038 rc = 0;
1039out:
cccbce67 1040 dax_read_unlock(id);
5e161e40 1041 return rc;
0e3b210c 1042}
0e3b210c 1043
e30331ff 1044/*
91d25ba8
RZ
1045 * The user has performed a load from a hole in the file. Allocating a new
1046 * page in the file would cause excessive storage usage for workloads with
1047 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1048 * If this page is ever written to we will re-fault and change the mapping to
1049 * point to real DAX storage instead.
e30331ff 1050 */
b15cd800
MW
1051static vm_fault_t dax_load_hole(struct xa_state *xas,
1052 struct address_space *mapping, void **entry,
1053 struct vm_fault *vmf)
e30331ff
RZ
1054{
1055 struct inode *inode = mapping->host;
91d25ba8 1056 unsigned long vaddr = vmf->address;
b90ca5cc
MW
1057 pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1058 vm_fault_t ret;
e30331ff 1059
b15cd800 1060 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
3159f943
MW
1061 DAX_ZERO_PAGE, false);
1062
ab77dab4 1063 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
e30331ff
RZ
1064 trace_dax_load_hole(inode, vmf, ret);
1065 return ret;
1066}
1067
81ee8e52 1068s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
679c8bd3 1069{
4f3b4f16 1070 sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
0a23f9ff
VG
1071 pgoff_t pgoff;
1072 long rc, id;
1073 void *kaddr;
1074 bool page_aligned = false;
81ee8e52
MWO
1075 unsigned offset = offset_in_page(pos);
1076 unsigned size = min_t(u64, PAGE_SIZE - offset, length);
cccbce67 1077
0a23f9ff 1078 if (IS_ALIGNED(sector << SECTOR_SHIFT, PAGE_SIZE) &&
81ee8e52 1079 (size == PAGE_SIZE))
0a23f9ff 1080 page_aligned = true;
cccbce67 1081
4f3b4f16 1082 rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff);
0a23f9ff
VG
1083 if (rc)
1084 return rc;
1085
1086 id = dax_read_lock();
1087
1088 if (page_aligned)
81ee8e52 1089 rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
0a23f9ff 1090 else
4f3b4f16 1091 rc = dax_direct_access(iomap->dax_dev, pgoff, 1, &kaddr, NULL);
0a23f9ff
VG
1092 if (rc < 0) {
1093 dax_read_unlock(id);
1094 return rc;
1095 }
1096
1097 if (!page_aligned) {
81f55870 1098 memset(kaddr + offset, 0, size);
4f3b4f16 1099 dax_flush(iomap->dax_dev, kaddr + offset, size);
4b0228fa 1100 }
0a23f9ff 1101 dax_read_unlock(id);
81ee8e52 1102 return size;
679c8bd3 1103}
679c8bd3 1104
a254e568 1105static loff_t
11c59c92 1106dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
c039b997 1107 struct iomap *iomap, struct iomap *srcmap)
a254e568 1108{
cccbce67
DW
1109 struct block_device *bdev = iomap->bdev;
1110 struct dax_device *dax_dev = iomap->dax_dev;
a254e568
CH
1111 struct iov_iter *iter = data;
1112 loff_t end = pos + length, done = 0;
1113 ssize_t ret = 0;
a77d4786 1114 size_t xfer;
cccbce67 1115 int id;
a254e568
CH
1116
1117 if (iov_iter_rw(iter) == READ) {
1118 end = min(end, i_size_read(inode));
1119 if (pos >= end)
1120 return 0;
1121
1122 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1123 return iov_iter_zero(min(length, end - pos), iter);
1124 }
1125
1126 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1127 return -EIO;
1128
e3fce68c
JK
1129 /*
1130 * Write can allocate block for an area which has a hole page mapped
1131 * into page tables. We have to tear down these mappings so that data
1132 * written by write(2) is visible in mmap.
1133 */
cd656375 1134 if (iomap->flags & IOMAP_F_NEW) {
e3fce68c
JK
1135 invalidate_inode_pages2_range(inode->i_mapping,
1136 pos >> PAGE_SHIFT,
1137 (end - 1) >> PAGE_SHIFT);
1138 }
1139
cccbce67 1140 id = dax_read_lock();
a254e568
CH
1141 while (pos < end) {
1142 unsigned offset = pos & (PAGE_SIZE - 1);
cccbce67
DW
1143 const size_t size = ALIGN(length + offset, PAGE_SIZE);
1144 const sector_t sector = dax_iomap_sector(iomap, pos);
a254e568 1145 ssize_t map_len;
cccbce67
DW
1146 pgoff_t pgoff;
1147 void *kaddr;
a254e568 1148
d1908f52
MH
1149 if (fatal_signal_pending(current)) {
1150 ret = -EINTR;
1151 break;
1152 }
1153
cccbce67
DW
1154 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1155 if (ret)
1156 break;
1157
1158 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
86ed913b 1159 &kaddr, NULL);
a254e568
CH
1160 if (map_len < 0) {
1161 ret = map_len;
1162 break;
1163 }
1164
cccbce67
DW
1165 map_len = PFN_PHYS(map_len);
1166 kaddr += offset;
a254e568
CH
1167 map_len -= offset;
1168 if (map_len > end - pos)
1169 map_len = end - pos;
1170
a2e050f5
RZ
1171 /*
1172 * The userspace address for the memory copy has already been
1173 * validated via access_ok() in either vfs_read() or
1174 * vfs_write(), depending on which operation we are doing.
1175 */
a254e568 1176 if (iov_iter_rw(iter) == WRITE)
a77d4786 1177 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
fec53774 1178 map_len, iter);
a254e568 1179 else
a77d4786 1180 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
b3a9a0c3 1181 map_len, iter);
a254e568 1182
a77d4786
DW
1183 pos += xfer;
1184 length -= xfer;
1185 done += xfer;
1186
1187 if (xfer == 0)
1188 ret = -EFAULT;
1189 if (xfer < map_len)
1190 break;
a254e568 1191 }
cccbce67 1192 dax_read_unlock(id);
a254e568
CH
1193
1194 return done ? done : ret;
1195}
1196
1197/**
11c59c92 1198 * dax_iomap_rw - Perform I/O to a DAX file
a254e568
CH
1199 * @iocb: The control block for this I/O
1200 * @iter: The addresses to do I/O from or to
1201 * @ops: iomap ops passed from the file system
1202 *
1203 * This function performs read and write operations to directly mapped
1204 * persistent memory. The callers needs to take care of read/write exclusion
1205 * and evicting any page cache pages in the region under I/O.
1206 */
1207ssize_t
11c59c92 1208dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
8ff6daa1 1209 const struct iomap_ops *ops)
a254e568
CH
1210{
1211 struct address_space *mapping = iocb->ki_filp->f_mapping;
1212 struct inode *inode = mapping->host;
1213 loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1214 unsigned flags = 0;
1215
168316db 1216 if (iov_iter_rw(iter) == WRITE) {
9ffbe8ac 1217 lockdep_assert_held_write(&inode->i_rwsem);
a254e568 1218 flags |= IOMAP_WRITE;
168316db
CH
1219 } else {
1220 lockdep_assert_held(&inode->i_rwsem);
1221 }
a254e568 1222
96222d53
JM
1223 if (iocb->ki_flags & IOCB_NOWAIT)
1224 flags |= IOMAP_NOWAIT;
1225
a254e568
CH
1226 while (iov_iter_count(iter)) {
1227 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
11c59c92 1228 iter, dax_iomap_actor);
a254e568
CH
1229 if (ret <= 0)
1230 break;
1231 pos += ret;
1232 done += ret;
1233 }
1234
1235 iocb->ki_pos += done;
1236 return done ? done : ret;
1237}
11c59c92 1238EXPORT_SYMBOL_GPL(dax_iomap_rw);
a7d73fe6 1239
ab77dab4 1240static vm_fault_t dax_fault_return(int error)
9f141d6e
JK
1241{
1242 if (error == 0)
1243 return VM_FAULT_NOPAGE;
c9aed74e 1244 return vmf_error(error);
9f141d6e
JK
1245}
1246
aaa422c4
DW
1247/*
1248 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1249 * flushed on write-faults (non-cow), but not read-faults.
1250 */
1251static bool dax_fault_is_synchronous(unsigned long flags,
1252 struct vm_area_struct *vma, struct iomap *iomap)
1253{
1254 return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1255 && (iomap->flags & IOMAP_F_DIRTY);
1256}
1257
ab77dab4 1258static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
c0b24625 1259 int *iomap_errp, const struct iomap_ops *ops)
a7d73fe6 1260{
a0987ad5
JK
1261 struct vm_area_struct *vma = vmf->vma;
1262 struct address_space *mapping = vma->vm_file->f_mapping;
b15cd800 1263 XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
a7d73fe6 1264 struct inode *inode = mapping->host;
1a29d85e 1265 unsigned long vaddr = vmf->address;
a7d73fe6 1266 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
c039b997
GR
1267 struct iomap iomap = { .type = IOMAP_HOLE };
1268 struct iomap srcmap = { .type = IOMAP_HOLE };
9484ab1b 1269 unsigned flags = IOMAP_FAULT;
a7d73fe6 1270 int error, major = 0;
d2c43ef1 1271 bool write = vmf->flags & FAULT_FLAG_WRITE;
caa51d26 1272 bool sync;
ab77dab4 1273 vm_fault_t ret = 0;
a7d73fe6 1274 void *entry;
1b5a1cb2 1275 pfn_t pfn;
a7d73fe6 1276
ab77dab4 1277 trace_dax_pte_fault(inode, vmf, ret);
a7d73fe6
CH
1278 /*
1279 * Check whether offset isn't beyond end of file now. Caller is supposed
1280 * to hold locks serializing us with truncate / punch hole so this is
1281 * a reliable test.
1282 */
a9c42b33 1283 if (pos >= i_size_read(inode)) {
ab77dab4 1284 ret = VM_FAULT_SIGBUS;
a9c42b33
RZ
1285 goto out;
1286 }
a7d73fe6 1287
d2c43ef1 1288 if (write && !vmf->cow_page)
a7d73fe6
CH
1289 flags |= IOMAP_WRITE;
1290
b15cd800
MW
1291 entry = grab_mapping_entry(&xas, mapping, 0);
1292 if (xa_is_internal(entry)) {
1293 ret = xa_to_internal(entry);
13e451fd
JK
1294 goto out;
1295 }
1296
e2093926
RZ
1297 /*
1298 * It is possible, particularly with mixed reads & writes to private
1299 * mappings, that we have raced with a PMD fault that overlaps with
1300 * the PTE we need to set up. If so just return and the fault will be
1301 * retried.
1302 */
1303 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
ab77dab4 1304 ret = VM_FAULT_NOPAGE;
e2093926
RZ
1305 goto unlock_entry;
1306 }
1307
a7d73fe6
CH
1308 /*
1309 * Note that we don't bother to use iomap_apply here: DAX required
1310 * the file system block size to be equal the page size, which means
1311 * that we never have to deal with more than a single extent here.
1312 */
c039b997 1313 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap, &srcmap);
c0b24625
JK
1314 if (iomap_errp)
1315 *iomap_errp = error;
a9c42b33 1316 if (error) {
ab77dab4 1317 ret = dax_fault_return(error);
13e451fd 1318 goto unlock_entry;
a9c42b33 1319 }
a7d73fe6 1320 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
13e451fd
JK
1321 error = -EIO; /* fs corruption? */
1322 goto error_finish_iomap;
a7d73fe6
CH
1323 }
1324
a7d73fe6 1325 if (vmf->cow_page) {
31a6f1a6
JK
1326 sector_t sector = dax_iomap_sector(&iomap, pos);
1327
a7d73fe6
CH
1328 switch (iomap.type) {
1329 case IOMAP_HOLE:
1330 case IOMAP_UNWRITTEN:
1331 clear_user_highpage(vmf->cow_page, vaddr);
1332 break;
1333 case IOMAP_MAPPED:
c7fe193f
IW
1334 error = copy_cow_page_dax(iomap.bdev, iomap.dax_dev,
1335 sector, vmf->cow_page, vaddr);
a7d73fe6
CH
1336 break;
1337 default:
1338 WARN_ON_ONCE(1);
1339 error = -EIO;
1340 break;
1341 }
1342
1343 if (error)
13e451fd 1344 goto error_finish_iomap;
b1aa812b
JK
1345
1346 __SetPageUptodate(vmf->cow_page);
ab77dab4
SJ
1347 ret = finish_fault(vmf);
1348 if (!ret)
1349 ret = VM_FAULT_DONE_COW;
13e451fd 1350 goto finish_iomap;
a7d73fe6
CH
1351 }
1352
aaa422c4 1353 sync = dax_fault_is_synchronous(flags, vma, &iomap);
caa51d26 1354
a7d73fe6
CH
1355 switch (iomap.type) {
1356 case IOMAP_MAPPED:
1357 if (iomap.flags & IOMAP_F_NEW) {
1358 count_vm_event(PGMAJFAULT);
a0987ad5 1359 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
a7d73fe6
CH
1360 major = VM_FAULT_MAJOR;
1361 }
1b5a1cb2
JK
1362 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1363 if (error < 0)
1364 goto error_finish_iomap;
1365
b15cd800 1366 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
caa51d26 1367 0, write && !sync);
1b5a1cb2 1368
caa51d26
JK
1369 /*
1370 * If we are doing synchronous page fault and inode needs fsync,
1371 * we can insert PTE into page tables only after that happens.
1372 * Skip insertion for now and return the pfn so that caller can
1373 * insert it after fsync is done.
1374 */
1375 if (sync) {
1376 if (WARN_ON_ONCE(!pfnp)) {
1377 error = -EIO;
1378 goto error_finish_iomap;
1379 }
1380 *pfnp = pfn;
ab77dab4 1381 ret = VM_FAULT_NEEDDSYNC | major;
caa51d26
JK
1382 goto finish_iomap;
1383 }
1b5a1cb2
JK
1384 trace_dax_insert_mapping(inode, vmf, entry);
1385 if (write)
ab77dab4 1386 ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
1b5a1cb2 1387 else
ab77dab4 1388 ret = vmf_insert_mixed(vma, vaddr, pfn);
1b5a1cb2 1389
ab77dab4 1390 goto finish_iomap;
a7d73fe6
CH
1391 case IOMAP_UNWRITTEN:
1392 case IOMAP_HOLE:
d2c43ef1 1393 if (!write) {
b15cd800 1394 ret = dax_load_hole(&xas, mapping, &entry, vmf);
13e451fd 1395 goto finish_iomap;
1550290b 1396 }
df561f66 1397 fallthrough;
a7d73fe6
CH
1398 default:
1399 WARN_ON_ONCE(1);
1400 error = -EIO;
1401 break;
1402 }
1403
13e451fd 1404 error_finish_iomap:
ab77dab4 1405 ret = dax_fault_return(error);
9f141d6e
JK
1406 finish_iomap:
1407 if (ops->iomap_end) {
1408 int copied = PAGE_SIZE;
1409
ab77dab4 1410 if (ret & VM_FAULT_ERROR)
9f141d6e
JK
1411 copied = 0;
1412 /*
1413 * The fault is done by now and there's no way back (other
1414 * thread may be already happily using PTE we have installed).
1415 * Just ignore error from ->iomap_end since we cannot do much
1416 * with it.
1417 */
1418 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1550290b 1419 }
13e451fd 1420 unlock_entry:
b15cd800 1421 dax_unlock_entry(&xas, entry);
13e451fd 1422 out:
ab77dab4
SJ
1423 trace_dax_pte_fault_done(inode, vmf, ret);
1424 return ret | major;
a7d73fe6 1425}
642261ac
RZ
1426
1427#ifdef CONFIG_FS_DAX_PMD
b15cd800
MW
1428static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1429 struct iomap *iomap, void **entry)
642261ac 1430{
f4200391
DJ
1431 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1432 unsigned long pmd_addr = vmf->address & PMD_MASK;
11cf9d86 1433 struct vm_area_struct *vma = vmf->vma;
653b2ea3 1434 struct inode *inode = mapping->host;
11cf9d86 1435 pgtable_t pgtable = NULL;
642261ac
RZ
1436 struct page *zero_page;
1437 spinlock_t *ptl;
1438 pmd_t pmd_entry;
3fe0791c 1439 pfn_t pfn;
642261ac 1440
f4200391 1441 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
642261ac
RZ
1442
1443 if (unlikely(!zero_page))
653b2ea3 1444 goto fallback;
642261ac 1445
3fe0791c 1446 pfn = page_to_pfn_t(zero_page);
b15cd800 1447 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
3159f943 1448 DAX_PMD | DAX_ZERO_PAGE, false);
642261ac 1449
11cf9d86
AK
1450 if (arch_needs_pgtable_deposit()) {
1451 pgtable = pte_alloc_one(vma->vm_mm);
1452 if (!pgtable)
1453 return VM_FAULT_OOM;
1454 }
1455
f4200391
DJ
1456 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1457 if (!pmd_none(*(vmf->pmd))) {
642261ac 1458 spin_unlock(ptl);
653b2ea3 1459 goto fallback;
642261ac
RZ
1460 }
1461
11cf9d86
AK
1462 if (pgtable) {
1463 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1464 mm_inc_nr_ptes(vma->vm_mm);
1465 }
f4200391 1466 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
642261ac 1467 pmd_entry = pmd_mkhuge(pmd_entry);
f4200391 1468 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
642261ac 1469 spin_unlock(ptl);
b15cd800 1470 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
642261ac 1471 return VM_FAULT_NOPAGE;
653b2ea3
RZ
1472
1473fallback:
11cf9d86
AK
1474 if (pgtable)
1475 pte_free(vma->vm_mm, pgtable);
b15cd800 1476 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
653b2ea3 1477 return VM_FAULT_FALLBACK;
642261ac
RZ
1478}
1479
ab77dab4 1480static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
a2d58167 1481 const struct iomap_ops *ops)
642261ac 1482{
f4200391 1483 struct vm_area_struct *vma = vmf->vma;
642261ac 1484 struct address_space *mapping = vma->vm_file->f_mapping;
b15cd800 1485 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
d8a849e1
DJ
1486 unsigned long pmd_addr = vmf->address & PMD_MASK;
1487 bool write = vmf->flags & FAULT_FLAG_WRITE;
caa51d26 1488 bool sync;
9484ab1b 1489 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
642261ac 1490 struct inode *inode = mapping->host;
ab77dab4 1491 vm_fault_t result = VM_FAULT_FALLBACK;
c039b997
GR
1492 struct iomap iomap = { .type = IOMAP_HOLE };
1493 struct iomap srcmap = { .type = IOMAP_HOLE };
b15cd800 1494 pgoff_t max_pgoff;
642261ac
RZ
1495 void *entry;
1496 loff_t pos;
1497 int error;
302a5e31 1498 pfn_t pfn;
642261ac 1499
282a8e03
RZ
1500 /*
1501 * Check whether offset isn't beyond end of file now. Caller is
1502 * supposed to hold locks serializing us with truncate / punch hole so
1503 * this is a reliable test.
1504 */
957ac8c4 1505 max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
282a8e03 1506
f4200391 1507 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
282a8e03 1508
fffa281b
RZ
1509 /*
1510 * Make sure that the faulting address's PMD offset (color) matches
1511 * the PMD offset from the start of the file. This is necessary so
1512 * that a PMD range in the page table overlaps exactly with a PMD
a77d19f4 1513 * range in the page cache.
fffa281b
RZ
1514 */
1515 if ((vmf->pgoff & PG_PMD_COLOUR) !=
1516 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1517 goto fallback;
1518
642261ac
RZ
1519 /* Fall back to PTEs if we're going to COW */
1520 if (write && !(vma->vm_flags & VM_SHARED))
1521 goto fallback;
1522
1523 /* If the PMD would extend outside the VMA */
1524 if (pmd_addr < vma->vm_start)
1525 goto fallback;
1526 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1527 goto fallback;
1528
b15cd800 1529 if (xas.xa_index >= max_pgoff) {
282a8e03
RZ
1530 result = VM_FAULT_SIGBUS;
1531 goto out;
1532 }
642261ac
RZ
1533
1534 /* If the PMD would extend beyond the file size */
b15cd800 1535 if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff)
642261ac
RZ
1536 goto fallback;
1537
876f2946 1538 /*
b15cd800
MW
1539 * grab_mapping_entry() will make sure we get an empty PMD entry,
1540 * a zero PMD entry or a DAX PMD. If it can't (because a PTE
1541 * entry is already in the array, for instance), it will return
1542 * VM_FAULT_FALLBACK.
876f2946 1543 */
23c84eb7 1544 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
b15cd800
MW
1545 if (xa_is_internal(entry)) {
1546 result = xa_to_internal(entry);
876f2946 1547 goto fallback;
b15cd800 1548 }
876f2946 1549
e2093926
RZ
1550 /*
1551 * It is possible, particularly with mixed reads & writes to private
1552 * mappings, that we have raced with a PTE fault that overlaps with
1553 * the PMD we need to set up. If so just return and the fault will be
1554 * retried.
1555 */
1556 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1557 !pmd_devmap(*vmf->pmd)) {
1558 result = 0;
1559 goto unlock_entry;
1560 }
1561
642261ac
RZ
1562 /*
1563 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1564 * setting up a mapping, so really we're using iomap_begin() as a way
1565 * to look up our filesystem block.
1566 */
b15cd800 1567 pos = (loff_t)xas.xa_index << PAGE_SHIFT;
c039b997
GR
1568 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap,
1569 &srcmap);
642261ac 1570 if (error)
876f2946 1571 goto unlock_entry;
9f141d6e 1572
642261ac
RZ
1573 if (iomap.offset + iomap.length < pos + PMD_SIZE)
1574 goto finish_iomap;
1575
aaa422c4 1576 sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
caa51d26 1577
642261ac
RZ
1578 switch (iomap.type) {
1579 case IOMAP_MAPPED:
302a5e31
JK
1580 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1581 if (error < 0)
1582 goto finish_iomap;
1583
b15cd800 1584 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
3159f943 1585 DAX_PMD, write && !sync);
302a5e31 1586
caa51d26
JK
1587 /*
1588 * If we are doing synchronous page fault and inode needs fsync,
1589 * we can insert PMD into page tables only after that happens.
1590 * Skip insertion for now and return the pfn so that caller can
1591 * insert it after fsync is done.
1592 */
1593 if (sync) {
1594 if (WARN_ON_ONCE(!pfnp))
1595 goto finish_iomap;
1596 *pfnp = pfn;
1597 result = VM_FAULT_NEEDDSYNC;
1598 goto finish_iomap;
1599 }
1600
302a5e31 1601 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
fce86ff5 1602 result = vmf_insert_pfn_pmd(vmf, pfn, write);
642261ac
RZ
1603 break;
1604 case IOMAP_UNWRITTEN:
1605 case IOMAP_HOLE:
1606 if (WARN_ON_ONCE(write))
876f2946 1607 break;
b15cd800 1608 result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry);
642261ac
RZ
1609 break;
1610 default:
1611 WARN_ON_ONCE(1);
1612 break;
1613 }
1614
1615 finish_iomap:
1616 if (ops->iomap_end) {
9f141d6e
JK
1617 int copied = PMD_SIZE;
1618
1619 if (result == VM_FAULT_FALLBACK)
1620 copied = 0;
1621 /*
1622 * The fault is done by now and there's no way back (other
1623 * thread may be already happily using PMD we have installed).
1624 * Just ignore error from ->iomap_end since we cannot do much
1625 * with it.
1626 */
1627 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1628 &iomap);
642261ac 1629 }
876f2946 1630 unlock_entry:
b15cd800 1631 dax_unlock_entry(&xas, entry);
642261ac
RZ
1632 fallback:
1633 if (result == VM_FAULT_FALLBACK) {
d8a849e1 1634 split_huge_pmd(vma, vmf->pmd, vmf->address);
642261ac
RZ
1635 count_vm_event(THP_FAULT_FALLBACK);
1636 }
282a8e03 1637out:
f4200391 1638 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
642261ac
RZ
1639 return result;
1640}
a2d58167 1641#else
ab77dab4 1642static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
01cddfe9 1643 const struct iomap_ops *ops)
a2d58167
DJ
1644{
1645 return VM_FAULT_FALLBACK;
1646}
642261ac 1647#endif /* CONFIG_FS_DAX_PMD */
a2d58167
DJ
1648
1649/**
1650 * dax_iomap_fault - handle a page fault on a DAX file
1651 * @vmf: The description of the fault
cec04e8c 1652 * @pe_size: Size of the page to fault in
9a0dd422 1653 * @pfnp: PFN to insert for synchronous faults if fsync is required
c0b24625 1654 * @iomap_errp: Storage for detailed error code in case of error
cec04e8c 1655 * @ops: Iomap ops passed from the file system
a2d58167
DJ
1656 *
1657 * When a page fault occurs, filesystems may call this helper in
1658 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1659 * has done all the necessary locking for page fault to proceed
1660 * successfully.
1661 */
ab77dab4 1662vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
c0b24625 1663 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
a2d58167 1664{
c791ace1
DJ
1665 switch (pe_size) {
1666 case PE_SIZE_PTE:
c0b24625 1667 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
c791ace1 1668 case PE_SIZE_PMD:
9a0dd422 1669 return dax_iomap_pmd_fault(vmf, pfnp, ops);
a2d58167
DJ
1670 default:
1671 return VM_FAULT_FALLBACK;
1672 }
1673}
1674EXPORT_SYMBOL_GPL(dax_iomap_fault);
71eab6df 1675
a77d19f4 1676/*
71eab6df
JK
1677 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1678 * @vmf: The description of the fault
71eab6df 1679 * @pfn: PFN to insert
cfc93c6c 1680 * @order: Order of entry to insert.
71eab6df 1681 *
a77d19f4
MW
1682 * This function inserts a writeable PTE or PMD entry into the page tables
1683 * for an mmaped DAX file. It also marks the page cache entry as dirty.
71eab6df 1684 */
cfc93c6c
MW
1685static vm_fault_t
1686dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
71eab6df
JK
1687{
1688 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
cfc93c6c
MW
1689 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1690 void *entry;
ab77dab4 1691 vm_fault_t ret;
71eab6df 1692
cfc93c6c 1693 xas_lock_irq(&xas);
23c84eb7 1694 entry = get_unlocked_entry(&xas, order);
71eab6df 1695 /* Did we race with someone splitting entry or so? */
23c84eb7
MWO
1696 if (!entry || dax_is_conflict(entry) ||
1697 (order == 0 && !dax_is_pte_entry(entry))) {
4c3d043d 1698 put_unlocked_entry(&xas, entry, WAKE_NEXT);
cfc93c6c 1699 xas_unlock_irq(&xas);
71eab6df
JK
1700 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1701 VM_FAULT_NOPAGE);
1702 return VM_FAULT_NOPAGE;
1703 }
cfc93c6c
MW
1704 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1705 dax_lock_entry(&xas, entry);
1706 xas_unlock_irq(&xas);
1707 if (order == 0)
ab77dab4 1708 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
71eab6df 1709#ifdef CONFIG_FS_DAX_PMD
cfc93c6c 1710 else if (order == PMD_ORDER)
fce86ff5 1711 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
71eab6df 1712#endif
cfc93c6c 1713 else
ab77dab4 1714 ret = VM_FAULT_FALLBACK;
cfc93c6c 1715 dax_unlock_entry(&xas, entry);
ab77dab4
SJ
1716 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1717 return ret;
71eab6df
JK
1718}
1719
1720/**
1721 * dax_finish_sync_fault - finish synchronous page fault
1722 * @vmf: The description of the fault
1723 * @pe_size: Size of entry to be inserted
1724 * @pfn: PFN to insert
1725 *
1726 * This function ensures that the file range touched by the page fault is
1727 * stored persistently on the media and handles inserting of appropriate page
1728 * table entry.
1729 */
ab77dab4
SJ
1730vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1731 enum page_entry_size pe_size, pfn_t pfn)
71eab6df
JK
1732{
1733 int err;
1734 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
cfc93c6c
MW
1735 unsigned int order = pe_order(pe_size);
1736 size_t len = PAGE_SIZE << order;
71eab6df 1737
71eab6df
JK
1738 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1739 if (err)
1740 return VM_FAULT_SIGBUS;
cfc93c6c 1741 return dax_insert_pfn_mkwrite(vmf, pfn, order);
71eab6df
JK
1742}
1743EXPORT_SYMBOL_GPL(dax_finish_sync_fault);