fsdax: replace mmap entry in case of CoW
[linux-block.git] / fs / dax.c
CommitLineData
2025cf9e 1// SPDX-License-Identifier: GPL-2.0-only
d475c634
MW
2/*
3 * fs/dax.c - Direct Access filesystem code
4 * Copyright (c) 2013-2014 Intel Corporation
5 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
6 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
d475c634
MW
7 */
8
9#include <linux/atomic.h>
10#include <linux/blkdev.h>
11#include <linux/buffer_head.h>
d77e92e2 12#include <linux/dax.h>
d475c634 13#include <linux/fs.h>
f7ca90b1
MW
14#include <linux/highmem.h>
15#include <linux/memcontrol.h>
16#include <linux/mm.h>
d475c634 17#include <linux/mutex.h>
9973c98e 18#include <linux/pagevec.h>
289c6aed 19#include <linux/sched.h>
f361bf4a 20#include <linux/sched/signal.h>
d475c634 21#include <linux/uio.h>
f7ca90b1 22#include <linux/vmstat.h>
34c0fd54 23#include <linux/pfn_t.h>
0e749e54 24#include <linux/sizes.h>
4b4bb46d 25#include <linux/mmu_notifier.h>
a254e568 26#include <linux/iomap.h>
06083a09 27#include <linux/rmap.h>
11cf9d86 28#include <asm/pgalloc.h>
d475c634 29
282a8e03
RZ
30#define CREATE_TRACE_POINTS
31#include <trace/events/fs_dax.h>
32
cfc93c6c
MW
33static inline unsigned int pe_order(enum page_entry_size pe_size)
34{
35 if (pe_size == PE_SIZE_PTE)
36 return PAGE_SHIFT - PAGE_SHIFT;
37 if (pe_size == PE_SIZE_PMD)
38 return PMD_SHIFT - PAGE_SHIFT;
39 if (pe_size == PE_SIZE_PUD)
40 return PUD_SHIFT - PAGE_SHIFT;
41 return ~0;
42}
43
ac401cc7
JK
44/* We choose 4096 entries - same as per-zone page wait tables */
45#define DAX_WAIT_TABLE_BITS 12
46#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
47
917f3452
RZ
48/* The 'colour' (ie low bits) within a PMD of a page offset. */
49#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
977fbdcd 50#define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
917f3452 51
cfc93c6c
MW
52/* The order of a PMD entry */
53#define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
54
ce95ab0f 55static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
ac401cc7
JK
56
57static int __init init_dax_wait_table(void)
58{
59 int i;
60
61 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
62 init_waitqueue_head(wait_table + i);
63 return 0;
64}
65fs_initcall(init_dax_wait_table);
66
527b19d0 67/*
3159f943
MW
68 * DAX pagecache entries use XArray value entries so they can't be mistaken
69 * for pages. We use one bit for locking, one bit for the entry size (PMD)
70 * and two more to tell us if the entry is a zero page or an empty entry that
71 * is just used for locking. In total four special bits.
527b19d0
RZ
72 *
73 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
74 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
75 * block allocation.
76 */
3159f943
MW
77#define DAX_SHIFT (4)
78#define DAX_LOCKED (1UL << 0)
79#define DAX_PMD (1UL << 1)
80#define DAX_ZERO_PAGE (1UL << 2)
81#define DAX_EMPTY (1UL << 3)
527b19d0 82
a77d19f4 83static unsigned long dax_to_pfn(void *entry)
527b19d0 84{
3159f943 85 return xa_to_value(entry) >> DAX_SHIFT;
527b19d0
RZ
86}
87
9f32d221
MW
88static void *dax_make_entry(pfn_t pfn, unsigned long flags)
89{
90 return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
91}
92
cfc93c6c
MW
93static bool dax_is_locked(void *entry)
94{
95 return xa_to_value(entry) & DAX_LOCKED;
96}
97
a77d19f4 98static unsigned int dax_entry_order(void *entry)
527b19d0 99{
3159f943 100 if (xa_to_value(entry) & DAX_PMD)
cfc93c6c 101 return PMD_ORDER;
527b19d0
RZ
102 return 0;
103}
104
fda490d3 105static unsigned long dax_is_pmd_entry(void *entry)
d1a5f2b4 106{
3159f943 107 return xa_to_value(entry) & DAX_PMD;
d1a5f2b4
DW
108}
109
fda490d3 110static bool dax_is_pte_entry(void *entry)
d475c634 111{
3159f943 112 return !(xa_to_value(entry) & DAX_PMD);
d475c634
MW
113}
114
642261ac 115static int dax_is_zero_entry(void *entry)
d475c634 116{
3159f943 117 return xa_to_value(entry) & DAX_ZERO_PAGE;
d475c634
MW
118}
119
642261ac 120static int dax_is_empty_entry(void *entry)
b2e0d162 121{
3159f943 122 return xa_to_value(entry) & DAX_EMPTY;
b2e0d162
DW
123}
124
23c84eb7
MWO
125/*
126 * true if the entry that was found is of a smaller order than the entry
127 * we were looking for
128 */
129static bool dax_is_conflict(void *entry)
130{
131 return entry == XA_RETRY_ENTRY;
132}
133
ac401cc7 134/*
a77d19f4 135 * DAX page cache entry locking
ac401cc7
JK
136 */
137struct exceptional_entry_key {
ec4907ff 138 struct xarray *xa;
63e95b5c 139 pgoff_t entry_start;
ac401cc7
JK
140};
141
142struct wait_exceptional_entry_queue {
ac6424b9 143 wait_queue_entry_t wait;
ac401cc7
JK
144 struct exceptional_entry_key key;
145};
146
698ab77a
VG
147/**
148 * enum dax_wake_mode: waitqueue wakeup behaviour
149 * @WAKE_ALL: wake all waiters in the waitqueue
150 * @WAKE_NEXT: wake only the first waiter in the waitqueue
151 */
152enum dax_wake_mode {
153 WAKE_ALL,
154 WAKE_NEXT,
155};
156
b15cd800
MW
157static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
158 void *entry, struct exceptional_entry_key *key)
63e95b5c
RZ
159{
160 unsigned long hash;
b15cd800 161 unsigned long index = xas->xa_index;
63e95b5c
RZ
162
163 /*
164 * If 'entry' is a PMD, align the 'index' that we use for the wait
165 * queue to the start of that PMD. This ensures that all offsets in
166 * the range covered by the PMD map to the same bit lock.
167 */
642261ac 168 if (dax_is_pmd_entry(entry))
917f3452 169 index &= ~PG_PMD_COLOUR;
b15cd800 170 key->xa = xas->xa;
63e95b5c
RZ
171 key->entry_start = index;
172
b15cd800 173 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
63e95b5c
RZ
174 return wait_table + hash;
175}
176
ec4907ff
MW
177static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
178 unsigned int mode, int sync, void *keyp)
ac401cc7
JK
179{
180 struct exceptional_entry_key *key = keyp;
181 struct wait_exceptional_entry_queue *ewait =
182 container_of(wait, struct wait_exceptional_entry_queue, wait);
183
ec4907ff 184 if (key->xa != ewait->key.xa ||
63e95b5c 185 key->entry_start != ewait->key.entry_start)
ac401cc7
JK
186 return 0;
187 return autoremove_wake_function(wait, mode, sync, NULL);
188}
189
e30331ff 190/*
b93b0163
MW
191 * @entry may no longer be the entry at the index in the mapping.
192 * The important information it's conveying is whether the entry at
193 * this index used to be a PMD entry.
e30331ff 194 */
698ab77a
VG
195static void dax_wake_entry(struct xa_state *xas, void *entry,
196 enum dax_wake_mode mode)
e30331ff
RZ
197{
198 struct exceptional_entry_key key;
199 wait_queue_head_t *wq;
200
b15cd800 201 wq = dax_entry_waitqueue(xas, entry, &key);
e30331ff
RZ
202
203 /*
204 * Checking for locked entry and prepare_to_wait_exclusive() happens
b93b0163 205 * under the i_pages lock, ditto for entry handling in our callers.
e30331ff
RZ
206 * So at this point all tasks that could have seen our entry locked
207 * must be in the waitqueue and the following check will see them.
208 */
209 if (waitqueue_active(wq))
698ab77a 210 __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key);
e30331ff
RZ
211}
212
cfc93c6c
MW
213/*
214 * Look up entry in page cache, wait for it to become unlocked if it
215 * is a DAX entry and return it. The caller must subsequently call
216 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
23c84eb7
MWO
217 * if it did. The entry returned may have a larger order than @order.
218 * If @order is larger than the order of the entry found in i_pages, this
219 * function returns a dax_is_conflict entry.
cfc93c6c
MW
220 *
221 * Must be called with the i_pages lock held.
222 */
23c84eb7 223static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
cfc93c6c
MW
224{
225 void *entry;
226 struct wait_exceptional_entry_queue ewait;
227 wait_queue_head_t *wq;
228
229 init_wait(&ewait.wait);
230 ewait.wait.func = wake_exceptional_entry_func;
231
232 for (;;) {
0e40de03 233 entry = xas_find_conflict(xas);
6370740e
DW
234 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
235 return entry;
23c84eb7
MWO
236 if (dax_entry_order(entry) < order)
237 return XA_RETRY_ENTRY;
6370740e 238 if (!dax_is_locked(entry))
cfc93c6c
MW
239 return entry;
240
b15cd800 241 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
cfc93c6c
MW
242 prepare_to_wait_exclusive(wq, &ewait.wait,
243 TASK_UNINTERRUPTIBLE);
244 xas_unlock_irq(xas);
245 xas_reset(xas);
246 schedule();
247 finish_wait(wq, &ewait.wait);
248 xas_lock_irq(xas);
249 }
250}
251
55e56f06
MW
252/*
253 * The only thing keeping the address space around is the i_pages lock
254 * (it's cycled in clear_inode() after removing the entries from i_pages)
255 * After we call xas_unlock_irq(), we cannot touch xas->xa.
256 */
257static void wait_entry_unlocked(struct xa_state *xas, void *entry)
258{
259 struct wait_exceptional_entry_queue ewait;
260 wait_queue_head_t *wq;
261
262 init_wait(&ewait.wait);
263 ewait.wait.func = wake_exceptional_entry_func;
264
265 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
d8a70641
DW
266 /*
267 * Unlike get_unlocked_entry() there is no guarantee that this
268 * path ever successfully retrieves an unlocked entry before an
269 * inode dies. Perform a non-exclusive wait in case this path
270 * never successfully performs its own wake up.
271 */
272 prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
55e56f06
MW
273 xas_unlock_irq(xas);
274 schedule();
275 finish_wait(wq, &ewait.wait);
55e56f06
MW
276}
277
4c3d043d
VG
278static void put_unlocked_entry(struct xa_state *xas, void *entry,
279 enum dax_wake_mode mode)
cfc93c6c 280{
61c30c98 281 if (entry && !dax_is_conflict(entry))
4c3d043d 282 dax_wake_entry(xas, entry, mode);
cfc93c6c
MW
283}
284
285/*
286 * We used the xa_state to get the entry, but then we locked the entry and
287 * dropped the xa_lock, so we know the xa_state is stale and must be reset
288 * before use.
289 */
290static void dax_unlock_entry(struct xa_state *xas, void *entry)
291{
292 void *old;
293
7ae2ea7d 294 BUG_ON(dax_is_locked(entry));
cfc93c6c
MW
295 xas_reset(xas);
296 xas_lock_irq(xas);
297 old = xas_store(xas, entry);
298 xas_unlock_irq(xas);
299 BUG_ON(!dax_is_locked(old));
698ab77a 300 dax_wake_entry(xas, entry, WAKE_NEXT);
cfc93c6c
MW
301}
302
303/*
304 * Return: The entry stored at this location before it was locked.
305 */
306static void *dax_lock_entry(struct xa_state *xas, void *entry)
307{
308 unsigned long v = xa_to_value(entry);
309 return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
310}
311
d2c997c0
DW
312static unsigned long dax_entry_size(void *entry)
313{
314 if (dax_is_zero_entry(entry))
315 return 0;
316 else if (dax_is_empty_entry(entry))
317 return 0;
318 else if (dax_is_pmd_entry(entry))
319 return PMD_SIZE;
320 else
321 return PAGE_SIZE;
322}
323
a77d19f4 324static unsigned long dax_end_pfn(void *entry)
d2c997c0 325{
a77d19f4 326 return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
d2c997c0
DW
327}
328
329/*
330 * Iterate through all mapped pfns represented by an entry, i.e. skip
331 * 'empty' and 'zero' entries.
332 */
333#define for_each_mapped_pfn(entry, pfn) \
a77d19f4
MW
334 for (pfn = dax_to_pfn(entry); \
335 pfn < dax_end_pfn(entry); pfn++)
d2c997c0 336
6061b69b
SR
337static inline bool dax_mapping_is_cow(struct address_space *mapping)
338{
339 return (unsigned long)mapping == PAGE_MAPPING_DAX_COW;
340}
341
73449daf 342/*
6061b69b
SR
343 * Set the page->mapping with FS_DAX_MAPPING_COW flag, increase the refcount.
344 */
345static inline void dax_mapping_set_cow(struct page *page)
346{
347 if ((uintptr_t)page->mapping != PAGE_MAPPING_DAX_COW) {
348 /*
349 * Reset the index if the page was already mapped
350 * regularly before.
351 */
352 if (page->mapping)
353 page->index = 1;
354 page->mapping = (void *)PAGE_MAPPING_DAX_COW;
355 }
356 page->index++;
357}
358
359/*
360 * When it is called in dax_insert_entry(), the cow flag will indicate that
361 * whether this entry is shared by multiple files. If so, set the page->mapping
362 * FS_DAX_MAPPING_COW, and use page->index as refcount.
73449daf
DW
363 */
364static void dax_associate_entry(void *entry, struct address_space *mapping,
6061b69b 365 struct vm_area_struct *vma, unsigned long address, bool cow)
d2c997c0 366{
73449daf
DW
367 unsigned long size = dax_entry_size(entry), pfn, index;
368 int i = 0;
d2c997c0
DW
369
370 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
371 return;
372
73449daf 373 index = linear_page_index(vma, address & ~(size - 1));
d2c997c0
DW
374 for_each_mapped_pfn(entry, pfn) {
375 struct page *page = pfn_to_page(pfn);
376
6061b69b
SR
377 if (cow) {
378 dax_mapping_set_cow(page);
379 } else {
380 WARN_ON_ONCE(page->mapping);
381 page->mapping = mapping;
382 page->index = index + i++;
383 }
d2c997c0
DW
384 }
385}
386
387static void dax_disassociate_entry(void *entry, struct address_space *mapping,
388 bool trunc)
389{
390 unsigned long pfn;
391
392 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
393 return;
394
395 for_each_mapped_pfn(entry, pfn) {
396 struct page *page = pfn_to_page(pfn);
397
398 WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
6061b69b
SR
399 if (dax_mapping_is_cow(page->mapping)) {
400 /* keep the CoW flag if this page is still shared */
401 if (page->index-- > 0)
402 continue;
403 } else
404 WARN_ON_ONCE(page->mapping && page->mapping != mapping);
d2c997c0 405 page->mapping = NULL;
73449daf 406 page->index = 0;
d2c997c0
DW
407 }
408}
409
5fac7408
DW
410static struct page *dax_busy_page(void *entry)
411{
412 unsigned long pfn;
413
414 for_each_mapped_pfn(entry, pfn) {
415 struct page *page = pfn_to_page(pfn);
416
417 if (page_ref_count(page) > 1)
418 return page;
419 }
420 return NULL;
421}
422
c5bbd451 423/*
c2e8021a 424 * dax_lock_page - Lock the DAX entry corresponding to a page
c5bbd451
MW
425 * @page: The page whose entry we want to lock
426 *
427 * Context: Process context.
27359fd6
MW
428 * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
429 * not be locked.
c5bbd451 430 */
27359fd6 431dax_entry_t dax_lock_page(struct page *page)
c2a7d2a1 432{
9f32d221
MW
433 XA_STATE(xas, NULL, 0);
434 void *entry;
c2a7d2a1 435
c5bbd451
MW
436 /* Ensure page->mapping isn't freed while we look at it */
437 rcu_read_lock();
c2a7d2a1 438 for (;;) {
9f32d221 439 struct address_space *mapping = READ_ONCE(page->mapping);
c2a7d2a1 440
27359fd6 441 entry = NULL;
c93db7bb 442 if (!mapping || !dax_mapping(mapping))
c5bbd451 443 break;
c2a7d2a1
DW
444
445 /*
446 * In the device-dax case there's no need to lock, a
447 * struct dev_pagemap pin is sufficient to keep the
448 * inode alive, and we assume we have dev_pagemap pin
449 * otherwise we would not have a valid pfn_to_page()
450 * translation.
451 */
27359fd6 452 entry = (void *)~0UL;
9f32d221 453 if (S_ISCHR(mapping->host->i_mode))
c5bbd451 454 break;
c2a7d2a1 455
9f32d221
MW
456 xas.xa = &mapping->i_pages;
457 xas_lock_irq(&xas);
c2a7d2a1 458 if (mapping != page->mapping) {
9f32d221 459 xas_unlock_irq(&xas);
c2a7d2a1
DW
460 continue;
461 }
9f32d221
MW
462 xas_set(&xas, page->index);
463 entry = xas_load(&xas);
464 if (dax_is_locked(entry)) {
c5bbd451 465 rcu_read_unlock();
55e56f06 466 wait_entry_unlocked(&xas, entry);
c5bbd451 467 rcu_read_lock();
6d7cd8c1 468 continue;
c2a7d2a1 469 }
9f32d221
MW
470 dax_lock_entry(&xas, entry);
471 xas_unlock_irq(&xas);
c5bbd451 472 break;
c2a7d2a1 473 }
c5bbd451 474 rcu_read_unlock();
27359fd6 475 return (dax_entry_t)entry;
c2a7d2a1
DW
476}
477
27359fd6 478void dax_unlock_page(struct page *page, dax_entry_t cookie)
c2a7d2a1
DW
479{
480 struct address_space *mapping = page->mapping;
9f32d221 481 XA_STATE(xas, &mapping->i_pages, page->index);
c2a7d2a1 482
9f32d221 483 if (S_ISCHR(mapping->host->i_mode))
c2a7d2a1
DW
484 return;
485
27359fd6 486 dax_unlock_entry(&xas, (void *)cookie);
c2a7d2a1
DW
487}
488
2f437eff
SR
489/*
490 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping
491 * @mapping: the file's mapping whose entry we want to lock
492 * @index: the offset within this file
493 * @page: output the dax page corresponding to this dax entry
494 *
495 * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry
496 * could not be locked.
497 */
498dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index,
499 struct page **page)
500{
501 XA_STATE(xas, NULL, 0);
502 void *entry;
503
504 rcu_read_lock();
505 for (;;) {
506 entry = NULL;
507 if (!dax_mapping(mapping))
508 break;
509
510 xas.xa = &mapping->i_pages;
511 xas_lock_irq(&xas);
512 xas_set(&xas, index);
513 entry = xas_load(&xas);
514 if (dax_is_locked(entry)) {
515 rcu_read_unlock();
516 wait_entry_unlocked(&xas, entry);
517 rcu_read_lock();
518 continue;
519 }
520 if (!entry ||
521 dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
522 /*
523 * Because we are looking for entry from file's mapping
524 * and index, so the entry may not be inserted for now,
525 * or even a zero/empty entry. We don't think this is
526 * an error case. So, return a special value and do
527 * not output @page.
528 */
529 entry = (void *)~0UL;
530 } else {
531 *page = pfn_to_page(dax_to_pfn(entry));
532 dax_lock_entry(&xas, entry);
533 }
534 xas_unlock_irq(&xas);
535 break;
536 }
537 rcu_read_unlock();
538 return (dax_entry_t)entry;
539}
540
541void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index,
542 dax_entry_t cookie)
543{
544 XA_STATE(xas, &mapping->i_pages, index);
545
546 if (cookie == ~0UL)
547 return;
548
549 dax_unlock_entry(&xas, (void *)cookie);
550}
551
ac401cc7 552/*
a77d19f4
MW
553 * Find page cache entry at given index. If it is a DAX entry, return it
554 * with the entry locked. If the page cache doesn't contain an entry at
555 * that index, add a locked empty entry.
ac401cc7 556 *
3159f943 557 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
b15cd800
MW
558 * either return that locked entry or will return VM_FAULT_FALLBACK.
559 * This will happen if there are any PTE entries within the PMD range
560 * that we are requesting.
642261ac 561 *
b15cd800
MW
562 * We always favor PTE entries over PMD entries. There isn't a flow where we
563 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD
564 * insertion will fail if it finds any PTE entries already in the tree, and a
565 * PTE insertion will cause an existing PMD entry to be unmapped and
566 * downgraded to PTE entries. This happens for both PMD zero pages as
567 * well as PMD empty entries.
642261ac 568 *
b15cd800
MW
569 * The exception to this downgrade path is for PMD entries that have
570 * real storage backing them. We will leave these real PMD entries in
571 * the tree, and PTE writes will simply dirty the entire PMD entry.
642261ac 572 *
ac401cc7
JK
573 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
574 * persistent memory the benefit is doubtful. We can add that later if we can
575 * show it helps.
b15cd800
MW
576 *
577 * On error, this function does not return an ERR_PTR. Instead it returns
578 * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values
579 * overlap with xarray value entries.
ac401cc7 580 */
b15cd800 581static void *grab_mapping_entry(struct xa_state *xas,
23c84eb7 582 struct address_space *mapping, unsigned int order)
ac401cc7 583{
b15cd800 584 unsigned long index = xas->xa_index;
1a14e377 585 bool pmd_downgrade; /* splitting PMD entry into PTE entries? */
b15cd800 586 void *entry;
642261ac 587
b15cd800 588retry:
1a14e377 589 pmd_downgrade = false;
b15cd800 590 xas_lock_irq(xas);
23c84eb7 591 entry = get_unlocked_entry(xas, order);
91d25ba8 592
642261ac 593 if (entry) {
23c84eb7
MWO
594 if (dax_is_conflict(entry))
595 goto fallback;
0e40de03 596 if (!xa_is_value(entry)) {
49688e65 597 xas_set_err(xas, -EIO);
b15cd800
MW
598 goto out_unlock;
599 }
600
23c84eb7 601 if (order == 0) {
91d25ba8 602 if (dax_is_pmd_entry(entry) &&
642261ac
RZ
603 (dax_is_zero_entry(entry) ||
604 dax_is_empty_entry(entry))) {
605 pmd_downgrade = true;
606 }
607 }
608 }
609
b15cd800
MW
610 if (pmd_downgrade) {
611 /*
612 * Make sure 'entry' remains valid while we drop
613 * the i_pages lock.
614 */
615 dax_lock_entry(xas, entry);
642261ac 616
642261ac
RZ
617 /*
618 * Besides huge zero pages the only other thing that gets
619 * downgraded are empty entries which don't need to be
620 * unmapped.
621 */
b15cd800
MW
622 if (dax_is_zero_entry(entry)) {
623 xas_unlock_irq(xas);
624 unmap_mapping_pages(mapping,
625 xas->xa_index & ~PG_PMD_COLOUR,
626 PG_PMD_NR, false);
627 xas_reset(xas);
628 xas_lock_irq(xas);
e11f8b7b
RZ
629 }
630
b15cd800
MW
631 dax_disassociate_entry(entry, mapping, false);
632 xas_store(xas, NULL); /* undo the PMD join */
698ab77a 633 dax_wake_entry(xas, entry, WAKE_ALL);
7f0e07fb 634 mapping->nrpages -= PG_PMD_NR;
b15cd800
MW
635 entry = NULL;
636 xas_set(xas, index);
637 }
642261ac 638
b15cd800
MW
639 if (entry) {
640 dax_lock_entry(xas, entry);
641 } else {
23c84eb7
MWO
642 unsigned long flags = DAX_EMPTY;
643
644 if (order > 0)
645 flags |= DAX_PMD;
646 entry = dax_make_entry(pfn_to_pfn_t(0), flags);
b15cd800
MW
647 dax_lock_entry(xas, entry);
648 if (xas_error(xas))
649 goto out_unlock;
7f0e07fb 650 mapping->nrpages += 1UL << order;
ac401cc7 651 }
b15cd800
MW
652
653out_unlock:
654 xas_unlock_irq(xas);
655 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
656 goto retry;
657 if (xas->xa_node == XA_ERROR(-ENOMEM))
658 return xa_mk_internal(VM_FAULT_OOM);
659 if (xas_error(xas))
660 return xa_mk_internal(VM_FAULT_SIGBUS);
e3ad61c6 661 return entry;
b15cd800
MW
662fallback:
663 xas_unlock_irq(xas);
664 return xa_mk_internal(VM_FAULT_FALLBACK);
ac401cc7
JK
665}
666
5fac7408 667/**
6bbdd563 668 * dax_layout_busy_page_range - find first pinned page in @mapping
5fac7408 669 * @mapping: address space to scan for a page with ref count > 1
6bbdd563
VG
670 * @start: Starting offset. Page containing 'start' is included.
671 * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX,
672 * pages from 'start' till the end of file are included.
5fac7408
DW
673 *
674 * DAX requires ZONE_DEVICE mapped pages. These pages are never
675 * 'onlined' to the page allocator so they are considered idle when
676 * page->count == 1. A filesystem uses this interface to determine if
677 * any page in the mapping is busy, i.e. for DMA, or other
678 * get_user_pages() usages.
679 *
680 * It is expected that the filesystem is holding locks to block the
681 * establishment of new mappings in this address_space. I.e. it expects
682 * to be able to run unmap_mapping_range() and subsequently not race
683 * mapping_mapped() becoming true.
684 */
6bbdd563
VG
685struct page *dax_layout_busy_page_range(struct address_space *mapping,
686 loff_t start, loff_t end)
5fac7408 687{
084a8990
MW
688 void *entry;
689 unsigned int scanned = 0;
5fac7408 690 struct page *page = NULL;
6bbdd563
VG
691 pgoff_t start_idx = start >> PAGE_SHIFT;
692 pgoff_t end_idx;
693 XA_STATE(xas, &mapping->i_pages, start_idx);
5fac7408
DW
694
695 /*
696 * In the 'limited' case get_user_pages() for dax is disabled.
697 */
698 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
699 return NULL;
700
701 if (!dax_mapping(mapping) || !mapping_mapped(mapping))
702 return NULL;
703
6bbdd563
VG
704 /* If end == LLONG_MAX, all pages from start to till end of file */
705 if (end == LLONG_MAX)
706 end_idx = ULONG_MAX;
707 else
708 end_idx = end >> PAGE_SHIFT;
5fac7408
DW
709 /*
710 * If we race get_user_pages_fast() here either we'll see the
084a8990 711 * elevated page count in the iteration and wait, or
5fac7408
DW
712 * get_user_pages_fast() will see that the page it took a reference
713 * against is no longer mapped in the page tables and bail to the
714 * get_user_pages() slow path. The slow path is protected by
715 * pte_lock() and pmd_lock(). New references are not taken without
6bbdd563 716 * holding those locks, and unmap_mapping_pages() will not zero the
5fac7408
DW
717 * pte or pmd without holding the respective lock, so we are
718 * guaranteed to either see new references or prevent new
719 * references from being established.
720 */
6bbdd563 721 unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0);
5fac7408 722
084a8990 723 xas_lock_irq(&xas);
6bbdd563 724 xas_for_each(&xas, entry, end_idx) {
084a8990
MW
725 if (WARN_ON_ONCE(!xa_is_value(entry)))
726 continue;
727 if (unlikely(dax_is_locked(entry)))
23c84eb7 728 entry = get_unlocked_entry(&xas, 0);
084a8990
MW
729 if (entry)
730 page = dax_busy_page(entry);
4c3d043d 731 put_unlocked_entry(&xas, entry, WAKE_NEXT);
5fac7408
DW
732 if (page)
733 break;
084a8990
MW
734 if (++scanned % XA_CHECK_SCHED)
735 continue;
736
737 xas_pause(&xas);
738 xas_unlock_irq(&xas);
739 cond_resched();
740 xas_lock_irq(&xas);
5fac7408 741 }
084a8990 742 xas_unlock_irq(&xas);
5fac7408
DW
743 return page;
744}
6bbdd563
VG
745EXPORT_SYMBOL_GPL(dax_layout_busy_page_range);
746
747struct page *dax_layout_busy_page(struct address_space *mapping)
748{
749 return dax_layout_busy_page_range(mapping, 0, LLONG_MAX);
750}
5fac7408
DW
751EXPORT_SYMBOL_GPL(dax_layout_busy_page);
752
a77d19f4 753static int __dax_invalidate_entry(struct address_space *mapping,
c6dcf52c
JK
754 pgoff_t index, bool trunc)
755{
07f2d89c 756 XA_STATE(xas, &mapping->i_pages, index);
c6dcf52c
JK
757 int ret = 0;
758 void *entry;
c6dcf52c 759
07f2d89c 760 xas_lock_irq(&xas);
23c84eb7 761 entry = get_unlocked_entry(&xas, 0);
3159f943 762 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
c6dcf52c
JK
763 goto out;
764 if (!trunc &&
07f2d89c
MW
765 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
766 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
c6dcf52c 767 goto out;
d2c997c0 768 dax_disassociate_entry(entry, mapping, trunc);
07f2d89c 769 xas_store(&xas, NULL);
7f0e07fb 770 mapping->nrpages -= 1UL << dax_entry_order(entry);
c6dcf52c
JK
771 ret = 1;
772out:
23738832 773 put_unlocked_entry(&xas, entry, WAKE_ALL);
07f2d89c 774 xas_unlock_irq(&xas);
c6dcf52c
JK
775 return ret;
776}
07f2d89c 777
ac401cc7 778/*
3159f943
MW
779 * Delete DAX entry at @index from @mapping. Wait for it
780 * to be unlocked before deleting it.
ac401cc7
JK
781 */
782int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
783{
a77d19f4 784 int ret = __dax_invalidate_entry(mapping, index, true);
ac401cc7 785
ac401cc7
JK
786 /*
787 * This gets called from truncate / punch_hole path. As such, the caller
788 * must hold locks protecting against concurrent modifications of the
a77d19f4 789 * page cache (usually fs-private i_mmap_sem for writing). Since the
3159f943 790 * caller has seen a DAX entry for this index, we better find it
ac401cc7
JK
791 * at that index as well...
792 */
c6dcf52c
JK
793 WARN_ON_ONCE(!ret);
794 return ret;
795}
796
c6dcf52c 797/*
3159f943 798 * Invalidate DAX entry if it is clean.
c6dcf52c
JK
799 */
800int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
801 pgoff_t index)
802{
a77d19f4 803 return __dax_invalidate_entry(mapping, index, false);
ac401cc7
JK
804}
805
60696eb2 806static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos)
f7ca90b1 807{
de205114 808 return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset);
429f8de7
CH
809}
810
811static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)
812{
60696eb2 813 pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos);
cccbce67 814 void *vto, *kaddr;
cccbce67
DW
815 long rc;
816 int id;
817
cccbce67 818 id = dax_read_lock();
e511c4a3
JC
819 rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS,
820 &kaddr, NULL);
cccbce67
DW
821 if (rc < 0) {
822 dax_read_unlock(id);
823 return rc;
824 }
429f8de7
CH
825 vto = kmap_atomic(vmf->cow_page);
826 copy_user_page(vto, kaddr, vmf->address, vmf->cow_page);
f7ca90b1 827 kunmap_atomic(vto);
cccbce67 828 dax_read_unlock(id);
f7ca90b1
MW
829 return 0;
830}
831
e5d6df73
SR
832/*
833 * MAP_SYNC on a dax mapping guarantees dirty metadata is
834 * flushed on write-faults (non-cow), but not read-faults.
835 */
836static bool dax_fault_is_synchronous(const struct iomap_iter *iter,
837 struct vm_area_struct *vma)
838{
839 return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) &&
840 (iter->iomap.flags & IOMAP_F_DIRTY);
841}
842
843static bool dax_fault_is_cow(const struct iomap_iter *iter)
844{
845 return (iter->flags & IOMAP_WRITE) &&
846 (iter->iomap.flags & IOMAP_F_SHARED);
847}
848
642261ac
RZ
849/*
850 * By this point grab_mapping_entry() has ensured that we have a locked entry
851 * of the appropriate size so we don't have to worry about downgrading PMDs to
852 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
853 * already in the tree, we will skip the insertion and just dirty the PMD as
854 * appropriate.
855 */
e5d6df73
SR
856static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
857 const struct iomap_iter *iter, void *entry, pfn_t pfn,
858 unsigned long flags)
9973c98e 859{
e5d6df73 860 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
b15cd800 861 void *new_entry = dax_make_entry(pfn, flags);
e5d6df73
SR
862 bool dirty = !dax_fault_is_synchronous(iter, vmf->vma);
863 bool cow = dax_fault_is_cow(iter);
9973c98e 864
f5b7b748 865 if (dirty)
d2b2a28e 866 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
9973c98e 867
e5d6df73 868 if (cow || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) {
b15cd800 869 unsigned long index = xas->xa_index;
91d25ba8
RZ
870 /* we are replacing a zero page with block mapping */
871 if (dax_is_pmd_entry(entry))
977fbdcd 872 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
b15cd800 873 PG_PMD_NR, false);
91d25ba8 874 else /* pte entry */
b15cd800 875 unmap_mapping_pages(mapping, index, 1, false);
9973c98e
RZ
876 }
877
b15cd800
MW
878 xas_reset(xas);
879 xas_lock_irq(xas);
e5d6df73 880 if (cow || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
1571c029
JK
881 void *old;
882
d2c997c0 883 dax_disassociate_entry(entry, mapping, false);
6061b69b 884 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
e5d6df73 885 cow);
642261ac 886 /*
a77d19f4 887 * Only swap our new entry into the page cache if the current
642261ac 888 * entry is a zero page or an empty entry. If a normal PTE or
a77d19f4 889 * PMD entry is already in the cache, we leave it alone. This
642261ac
RZ
890 * means that if we are trying to insert a PTE and the
891 * existing entry is a PMD, we will just leave the PMD in the
892 * tree and dirty it if necessary.
893 */
1571c029 894 old = dax_lock_entry(xas, new_entry);
b15cd800
MW
895 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
896 DAX_LOCKED));
91d25ba8 897 entry = new_entry;
b15cd800
MW
898 } else {
899 xas_load(xas); /* Walk the xa_state */
9973c98e 900 }
91d25ba8 901
f5b7b748 902 if (dirty)
b15cd800 903 xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
91d25ba8 904
e5d6df73
SR
905 if (cow)
906 xas_set_mark(xas, PAGECACHE_TAG_TOWRITE);
907
b15cd800 908 xas_unlock_irq(xas);
91d25ba8 909 return entry;
9973c98e
RZ
910}
911
9fc747f6
MW
912static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
913 struct address_space *mapping, void *entry)
9973c98e 914{
06083a09 915 unsigned long pfn, index, count, end;
3fe0791c 916 long ret = 0;
06083a09 917 struct vm_area_struct *vma;
9973c98e 918
9973c98e 919 /*
a6abc2c0
JK
920 * A page got tagged dirty in DAX mapping? Something is seriously
921 * wrong.
9973c98e 922 */
3159f943 923 if (WARN_ON(!xa_is_value(entry)))
a6abc2c0 924 return -EIO;
9973c98e 925
9fc747f6
MW
926 if (unlikely(dax_is_locked(entry))) {
927 void *old_entry = entry;
928
23c84eb7 929 entry = get_unlocked_entry(xas, 0);
9fc747f6
MW
930
931 /* Entry got punched out / reallocated? */
932 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
933 goto put_unlocked;
934 /*
935 * Entry got reallocated elsewhere? No need to writeback.
936 * We have to compare pfns as we must not bail out due to
937 * difference in lockbit or entry type.
938 */
939 if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
940 goto put_unlocked;
941 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
942 dax_is_zero_entry(entry))) {
943 ret = -EIO;
944 goto put_unlocked;
945 }
946
947 /* Another fsync thread may have already done this entry */
948 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
949 goto put_unlocked;
9973c98e
RZ
950 }
951
a6abc2c0 952 /* Lock the entry to serialize with page faults */
9fc747f6
MW
953 dax_lock_entry(xas, entry);
954
a6abc2c0
JK
955 /*
956 * We can clear the tag now but we have to be careful so that concurrent
957 * dax_writeback_one() calls for the same index cannot finish before we
958 * actually flush the caches. This is achieved as the calls will look
b93b0163
MW
959 * at the entry only under the i_pages lock and once they do that
960 * they will see the entry locked and wait for it to unlock.
a6abc2c0 961 */
9fc747f6
MW
962 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
963 xas_unlock_irq(xas);
a6abc2c0 964
642261ac 965 /*
e4b3448b
MW
966 * If dax_writeback_mapping_range() was given a wbc->range_start
967 * in the middle of a PMD, the 'index' we use needs to be
968 * aligned to the start of the PMD.
3fe0791c
DW
969 * This allows us to flush for PMD_SIZE and not have to worry about
970 * partial PMD writebacks.
642261ac 971 */
a77d19f4 972 pfn = dax_to_pfn(entry);
e4b3448b
MW
973 count = 1UL << dax_entry_order(entry);
974 index = xas->xa_index & ~(count - 1);
06083a09
MS
975 end = index + count - 1;
976
977 /* Walk all mappings of a given index of a file and writeprotect them */
978 i_mmap_lock_read(mapping);
979 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) {
980 pfn_mkclean_range(pfn, count, index, vma);
981 cond_resched();
982 }
983 i_mmap_unlock_read(mapping);
cccbce67 984
e4b3448b 985 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
4b4bb46d
JK
986 /*
987 * After we have flushed the cache, we can clear the dirty tag. There
988 * cannot be new dirty data in the pfn after the flush has completed as
989 * the pfn mappings are writeprotected and fault waits for mapping
990 * entry lock.
991 */
9fc747f6
MW
992 xas_reset(xas);
993 xas_lock_irq(xas);
994 xas_store(xas, entry);
995 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
698ab77a 996 dax_wake_entry(xas, entry, WAKE_NEXT);
9fc747f6 997
e4b3448b 998 trace_dax_writeback_one(mapping->host, index, count);
9973c98e
RZ
999 return ret;
1000
a6abc2c0 1001 put_unlocked:
4c3d043d 1002 put_unlocked_entry(xas, entry, WAKE_NEXT);
9973c98e
RZ
1003 return ret;
1004}
1005
1006/*
1007 * Flush the mapping to the persistent domain within the byte range of [start,
1008 * end]. This is required by data integrity operations to ensure file data is
1009 * on persistent storage prior to completion of the operation.
1010 */
7f6d5b52 1011int dax_writeback_mapping_range(struct address_space *mapping,
3f666c56 1012 struct dax_device *dax_dev, struct writeback_control *wbc)
9973c98e 1013{
9fc747f6 1014 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
9973c98e 1015 struct inode *inode = mapping->host;
9fc747f6 1016 pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
9fc747f6
MW
1017 void *entry;
1018 int ret = 0;
1019 unsigned int scanned = 0;
9973c98e
RZ
1020
1021 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
1022 return -EIO;
1023
7716506a 1024 if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL)
7f6d5b52
RZ
1025 return 0;
1026
9fc747f6 1027 trace_dax_writeback_range(inode, xas.xa_index, end_index);
9973c98e 1028
9fc747f6 1029 tag_pages_for_writeback(mapping, xas.xa_index, end_index);
9973c98e 1030
9fc747f6
MW
1031 xas_lock_irq(&xas);
1032 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
1033 ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
1034 if (ret < 0) {
1035 mapping_set_error(mapping, ret);
9973c98e 1036 break;
9973c98e 1037 }
9fc747f6
MW
1038 if (++scanned % XA_CHECK_SCHED)
1039 continue;
1040
1041 xas_pause(&xas);
1042 xas_unlock_irq(&xas);
1043 cond_resched();
1044 xas_lock_irq(&xas);
9973c98e 1045 }
9fc747f6 1046 xas_unlock_irq(&xas);
9fc747f6
MW
1047 trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
1048 return ret;
9973c98e
RZ
1049}
1050EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
1051
e28cd3e5
SR
1052static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
1053 size_t size, void **kaddr, pfn_t *pfnp)
f7ca90b1 1054{
60696eb2 1055 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
e28cd3e5 1056 int id, rc = 0;
5e161e40 1057 long length;
f7ca90b1 1058
cccbce67 1059 id = dax_read_lock();
5e161e40 1060 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
e28cd3e5 1061 DAX_ACCESS, kaddr, pfnp);
5e161e40
JK
1062 if (length < 0) {
1063 rc = length;
1064 goto out;
cccbce67 1065 }
e28cd3e5
SR
1066 if (!pfnp)
1067 goto out_check_addr;
5e161e40
JK
1068 rc = -EINVAL;
1069 if (PFN_PHYS(length) < size)
1070 goto out;
1071 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1072 goto out;
1073 /* For larger pages we need devmap */
1074 if (length > 1 && !pfn_t_devmap(*pfnp))
1075 goto out;
1076 rc = 0;
e28cd3e5
SR
1077
1078out_check_addr:
1079 if (!kaddr)
1080 goto out;
1081 if (!*kaddr)
1082 rc = -EFAULT;
5e161e40 1083out:
cccbce67 1084 dax_read_unlock(id);
5e161e40 1085 return rc;
0e3b210c 1086}
0e3b210c 1087
ff17b8df
SR
1088/**
1089 * dax_iomap_cow_copy - Copy the data from source to destination before write
1090 * @pos: address to do copy from.
1091 * @length: size of copy operation.
1092 * @align_size: aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE)
1093 * @srcmap: iomap srcmap
1094 * @daddr: destination address to copy to.
1095 *
1096 * This can be called from two places. Either during DAX write fault (page
1097 * aligned), to copy the length size data to daddr. Or, while doing normal DAX
1098 * write operation, dax_iomap_actor() might call this to do the copy of either
1099 * start or end unaligned address. In the latter case the rest of the copy of
1100 * aligned ranges is taken care by dax_iomap_actor() itself.
1101 */
1102static int dax_iomap_cow_copy(loff_t pos, uint64_t length, size_t align_size,
1103 const struct iomap *srcmap, void *daddr)
1104{
1105 loff_t head_off = pos & (align_size - 1);
1106 size_t size = ALIGN(head_off + length, align_size);
1107 loff_t end = pos + length;
1108 loff_t pg_end = round_up(end, align_size);
1109 bool copy_all = head_off == 0 && end == pg_end;
1110 void *saddr = 0;
1111 int ret = 0;
1112
1113 ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL);
1114 if (ret)
1115 return ret;
1116
1117 if (copy_all) {
1118 ret = copy_mc_to_kernel(daddr, saddr, length);
1119 return ret ? -EIO : 0;
1120 }
1121
1122 /* Copy the head part of the range */
1123 if (head_off) {
1124 ret = copy_mc_to_kernel(daddr, saddr, head_off);
1125 if (ret)
1126 return -EIO;
1127 }
1128
1129 /* Copy the tail part of the range */
1130 if (end < pg_end) {
1131 loff_t tail_off = head_off + length;
1132 loff_t tail_len = pg_end - end;
1133
1134 ret = copy_mc_to_kernel(daddr + tail_off, saddr + tail_off,
1135 tail_len);
1136 if (ret)
1137 return -EIO;
1138 }
1139 return 0;
1140}
1141
e30331ff 1142/*
91d25ba8
RZ
1143 * The user has performed a load from a hole in the file. Allocating a new
1144 * page in the file would cause excessive storage usage for workloads with
1145 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1146 * If this page is ever written to we will re-fault and change the mapping to
1147 * point to real DAX storage instead.
e30331ff 1148 */
e5d6df73
SR
1149static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1150 const struct iomap_iter *iter, void **entry)
e30331ff 1151{
e5d6df73 1152 struct inode *inode = iter->inode;
91d25ba8 1153 unsigned long vaddr = vmf->address;
b90ca5cc
MW
1154 pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1155 vm_fault_t ret;
e30331ff 1156
e5d6df73 1157 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
3159f943 1158
ab77dab4 1159 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
e30331ff
RZ
1160 trace_dax_load_hole(inode, vmf, ret);
1161 return ret;
1162}
1163
c2436190
SR
1164#ifdef CONFIG_FS_DAX_PMD
1165static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
e5d6df73 1166 const struct iomap_iter *iter, void **entry)
c2436190
SR
1167{
1168 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1169 unsigned long pmd_addr = vmf->address & PMD_MASK;
1170 struct vm_area_struct *vma = vmf->vma;
1171 struct inode *inode = mapping->host;
1172 pgtable_t pgtable = NULL;
1173 struct page *zero_page;
1174 spinlock_t *ptl;
1175 pmd_t pmd_entry;
1176 pfn_t pfn;
1177
1178 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1179
1180 if (unlikely(!zero_page))
1181 goto fallback;
1182
1183 pfn = page_to_pfn_t(zero_page);
e5d6df73
SR
1184 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn,
1185 DAX_PMD | DAX_ZERO_PAGE);
c2436190
SR
1186
1187 if (arch_needs_pgtable_deposit()) {
1188 pgtable = pte_alloc_one(vma->vm_mm);
1189 if (!pgtable)
1190 return VM_FAULT_OOM;
1191 }
1192
1193 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1194 if (!pmd_none(*(vmf->pmd))) {
1195 spin_unlock(ptl);
1196 goto fallback;
1197 }
1198
1199 if (pgtable) {
1200 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1201 mm_inc_nr_ptes(vma->vm_mm);
1202 }
1203 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1204 pmd_entry = pmd_mkhuge(pmd_entry);
1205 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1206 spin_unlock(ptl);
1207 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
1208 return VM_FAULT_NOPAGE;
1209
1210fallback:
1211 if (pgtable)
1212 pte_free(vma->vm_mm, pgtable);
1213 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
1214 return VM_FAULT_FALLBACK;
1215}
1216#else
1217static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
e5d6df73 1218 const struct iomap_iter *iter, void **entry)
c2436190
SR
1219{
1220 return VM_FAULT_FALLBACK;
1221}
1222#endif /* CONFIG_FS_DAX_PMD */
1223
e5c71954
CH
1224static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff,
1225 unsigned int offset, size_t size)
1226{
1227 void *kaddr;
1228 long ret;
1229
e511c4a3 1230 ret = dax_direct_access(dax_dev, pgoff, 1, DAX_ACCESS, &kaddr, NULL);
e5c71954
CH
1231 if (ret > 0) {
1232 memset(kaddr + offset, 0, size);
1233 dax_flush(dax_dev, kaddr + offset, size);
1234 }
1235 return ret;
1236}
1237
c6f40468 1238static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
679c8bd3 1239{
c6f40468
CH
1240 const struct iomap *iomap = &iter->iomap;
1241 const struct iomap *srcmap = iomap_iter_srcmap(iter);
1242 loff_t pos = iter->pos;
1243 u64 length = iomap_length(iter);
1244 s64 written = 0;
1245
1246 /* already zeroed? we're done. */
1247 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1248 return length;
1249
1250 do {
1251 unsigned offset = offset_in_page(pos);
1252 unsigned size = min_t(u64, PAGE_SIZE - offset, length);
1253 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
1254 long rc;
1255 int id;
1256
1257 id = dax_read_lock();
1258 if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
1259 rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
1260 else
1261 rc = dax_memzero(iomap->dax_dev, pgoff, offset, size);
1262 dax_read_unlock(id);
cccbce67 1263
c6f40468
CH
1264 if (rc < 0)
1265 return rc;
1266 pos += size;
1267 length -= size;
1268 written += size;
1269 if (did_zero)
1270 *did_zero = true;
1271 } while (length > 0);
e5c71954 1272
c6f40468
CH
1273 return written;
1274}
1275
1276int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1277 const struct iomap_ops *ops)
1278{
1279 struct iomap_iter iter = {
1280 .inode = inode,
1281 .pos = pos,
1282 .len = len,
952da063 1283 .flags = IOMAP_DAX | IOMAP_ZERO,
c6f40468
CH
1284 };
1285 int ret;
1286
1287 while ((ret = iomap_iter(&iter, ops)) > 0)
1288 iter.processed = dax_zero_iter(&iter, did_zero);
1289 return ret;
1290}
1291EXPORT_SYMBOL_GPL(dax_zero_range);
1292
1293int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1294 const struct iomap_ops *ops)
1295{
1296 unsigned int blocksize = i_blocksize(inode);
1297 unsigned int off = pos & (blocksize - 1);
1298
1299 /* Block boundary? Nothing to do */
1300 if (!off)
1301 return 0;
1302 return dax_zero_range(inode, pos, blocksize - off, did_zero, ops);
679c8bd3 1303}
c6f40468 1304EXPORT_SYMBOL_GPL(dax_truncate_page);
679c8bd3 1305
ca289e0b
CH
1306static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
1307 struct iov_iter *iter)
a254e568 1308{
ca289e0b 1309 const struct iomap *iomap = &iomi->iomap;
ff17b8df 1310 const struct iomap *srcmap = &iomi->srcmap;
ca289e0b
CH
1311 loff_t length = iomap_length(iomi);
1312 loff_t pos = iomi->pos;
cccbce67 1313 struct dax_device *dax_dev = iomap->dax_dev;
a254e568 1314 loff_t end = pos + length, done = 0;
ff17b8df 1315 bool write = iov_iter_rw(iter) == WRITE;
a254e568 1316 ssize_t ret = 0;
a77d4786 1317 size_t xfer;
cccbce67 1318 int id;
a254e568 1319
ff17b8df 1320 if (!write) {
ca289e0b 1321 end = min(end, i_size_read(iomi->inode));
a254e568
CH
1322 if (pos >= end)
1323 return 0;
1324
1325 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1326 return iov_iter_zero(min(length, end - pos), iter);
1327 }
1328
ff17b8df
SR
1329 /*
1330 * In DAX mode, enforce either pure overwrites of written extents, or
1331 * writes to unwritten extents as part of a copy-on-write operation.
1332 */
1333 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED &&
1334 !(iomap->flags & IOMAP_F_SHARED)))
a254e568
CH
1335 return -EIO;
1336
e3fce68c
JK
1337 /*
1338 * Write can allocate block for an area which has a hole page mapped
1339 * into page tables. We have to tear down these mappings so that data
1340 * written by write(2) is visible in mmap.
1341 */
cd656375 1342 if (iomap->flags & IOMAP_F_NEW) {
ca289e0b 1343 invalidate_inode_pages2_range(iomi->inode->i_mapping,
e3fce68c
JK
1344 pos >> PAGE_SHIFT,
1345 (end - 1) >> PAGE_SHIFT);
1346 }
1347
cccbce67 1348 id = dax_read_lock();
a254e568
CH
1349 while (pos < end) {
1350 unsigned offset = pos & (PAGE_SIZE - 1);
cccbce67 1351 const size_t size = ALIGN(length + offset, PAGE_SIZE);
60696eb2 1352 pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
a254e568 1353 ssize_t map_len;
047218ec 1354 bool recovery = false;
cccbce67 1355 void *kaddr;
a254e568 1356
d1908f52
MH
1357 if (fatal_signal_pending(current)) {
1358 ret = -EINTR;
1359 break;
1360 }
1361
cccbce67 1362 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
e511c4a3 1363 DAX_ACCESS, &kaddr, NULL);
047218ec
JC
1364 if (map_len == -EIO && iov_iter_rw(iter) == WRITE) {
1365 map_len = dax_direct_access(dax_dev, pgoff,
1366 PHYS_PFN(size), DAX_RECOVERY_WRITE,
1367 &kaddr, NULL);
1368 if (map_len > 0)
1369 recovery = true;
1370 }
a254e568
CH
1371 if (map_len < 0) {
1372 ret = map_len;
1373 break;
1374 }
1375
ff17b8df
SR
1376 if (write &&
1377 srcmap->type != IOMAP_HOLE && srcmap->addr != iomap->addr) {
1378 ret = dax_iomap_cow_copy(pos, length, PAGE_SIZE, srcmap,
1379 kaddr);
1380 if (ret)
1381 break;
1382 }
1383
cccbce67
DW
1384 map_len = PFN_PHYS(map_len);
1385 kaddr += offset;
a254e568
CH
1386 map_len -= offset;
1387 if (map_len > end - pos)
1388 map_len = end - pos;
1389
047218ec
JC
1390 if (recovery)
1391 xfer = dax_recovery_write(dax_dev, pgoff, kaddr,
1392 map_len, iter);
ff17b8df 1393 else if (write)
a77d4786 1394 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
fec53774 1395 map_len, iter);
a254e568 1396 else
a77d4786 1397 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
b3a9a0c3 1398 map_len, iter);
a254e568 1399
a77d4786
DW
1400 pos += xfer;
1401 length -= xfer;
1402 done += xfer;
1403
1404 if (xfer == 0)
1405 ret = -EFAULT;
1406 if (xfer < map_len)
1407 break;
a254e568 1408 }
cccbce67 1409 dax_read_unlock(id);
a254e568
CH
1410
1411 return done ? done : ret;
1412}
1413
1414/**
11c59c92 1415 * dax_iomap_rw - Perform I/O to a DAX file
a254e568
CH
1416 * @iocb: The control block for this I/O
1417 * @iter: The addresses to do I/O from or to
1418 * @ops: iomap ops passed from the file system
1419 *
1420 * This function performs read and write operations to directly mapped
1421 * persistent memory. The callers needs to take care of read/write exclusion
1422 * and evicting any page cache pages in the region under I/O.
1423 */
1424ssize_t
11c59c92 1425dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
8ff6daa1 1426 const struct iomap_ops *ops)
a254e568 1427{
ca289e0b
CH
1428 struct iomap_iter iomi = {
1429 .inode = iocb->ki_filp->f_mapping->host,
1430 .pos = iocb->ki_pos,
1431 .len = iov_iter_count(iter),
952da063 1432 .flags = IOMAP_DAX,
ca289e0b
CH
1433 };
1434 loff_t done = 0;
1435 int ret;
a254e568 1436
168316db 1437 if (iov_iter_rw(iter) == WRITE) {
ca289e0b
CH
1438 lockdep_assert_held_write(&iomi.inode->i_rwsem);
1439 iomi.flags |= IOMAP_WRITE;
168316db 1440 } else {
ca289e0b 1441 lockdep_assert_held(&iomi.inode->i_rwsem);
168316db 1442 }
a254e568 1443
96222d53 1444 if (iocb->ki_flags & IOCB_NOWAIT)
ca289e0b 1445 iomi.flags |= IOMAP_NOWAIT;
96222d53 1446
ca289e0b
CH
1447 while ((ret = iomap_iter(&iomi, ops)) > 0)
1448 iomi.processed = dax_iomap_iter(&iomi, iter);
a254e568 1449
ca289e0b
CH
1450 done = iomi.pos - iocb->ki_pos;
1451 iocb->ki_pos = iomi.pos;
a254e568
CH
1452 return done ? done : ret;
1453}
11c59c92 1454EXPORT_SYMBOL_GPL(dax_iomap_rw);
a7d73fe6 1455
ab77dab4 1456static vm_fault_t dax_fault_return(int error)
9f141d6e
JK
1457{
1458 if (error == 0)
1459 return VM_FAULT_NOPAGE;
c9aed74e 1460 return vmf_error(error);
9f141d6e
JK
1461}
1462
55f81639
SR
1463/*
1464 * When handling a synchronous page fault and the inode need a fsync, we can
1465 * insert the PTE/PMD into page tables only after that fsync happened. Skip
1466 * insertion for now and return the pfn so that caller can insert it after the
1467 * fsync is done.
1468 */
1469static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
1470{
1471 if (WARN_ON_ONCE(!pfnp))
1472 return VM_FAULT_SIGBUS;
1473 *pfnp = pfn;
1474 return VM_FAULT_NEEDDSYNC;
1475}
1476
65dd814a
CH
1477static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
1478 const struct iomap_iter *iter)
55f81639 1479{
55f81639
SR
1480 vm_fault_t ret;
1481 int error = 0;
1482
65dd814a 1483 switch (iter->iomap.type) {
55f81639
SR
1484 case IOMAP_HOLE:
1485 case IOMAP_UNWRITTEN:
429f8de7 1486 clear_user_highpage(vmf->cow_page, vmf->address);
55f81639
SR
1487 break;
1488 case IOMAP_MAPPED:
429f8de7 1489 error = copy_cow_page_dax(vmf, iter);
55f81639
SR
1490 break;
1491 default:
1492 WARN_ON_ONCE(1);
1493 error = -EIO;
1494 break;
1495 }
1496
1497 if (error)
1498 return dax_fault_return(error);
1499
1500 __SetPageUptodate(vmf->cow_page);
1501 ret = finish_fault(vmf);
1502 if (!ret)
1503 return VM_FAULT_DONE_COW;
1504 return ret;
1505}
1506
c2436190 1507/**
65dd814a 1508 * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault.
c2436190 1509 * @vmf: vm fault instance
65dd814a 1510 * @iter: iomap iter
c2436190
SR
1511 * @pfnp: pfn to be returned
1512 * @xas: the dax mapping tree of a file
1513 * @entry: an unlocked dax entry to be inserted
1514 * @pmd: distinguish whether it is a pmd fault
c2436190 1515 */
65dd814a
CH
1516static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
1517 const struct iomap_iter *iter, pfn_t *pfnp,
1518 struct xa_state *xas, void **entry, bool pmd)
c2436190 1519{
65dd814a 1520 const struct iomap *iomap = &iter->iomap;
ff17b8df 1521 const struct iomap *srcmap = &iter->srcmap;
c2436190
SR
1522 size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
1523 loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
e5d6df73 1524 bool write = iter->flags & IOMAP_WRITE;
c2436190
SR
1525 unsigned long entry_flags = pmd ? DAX_PMD : 0;
1526 int err = 0;
1527 pfn_t pfn;
ff17b8df 1528 void *kaddr;
c2436190 1529
65dd814a
CH
1530 if (!pmd && vmf->cow_page)
1531 return dax_fault_cow_page(vmf, iter);
1532
c2436190
SR
1533 /* if we are reading UNWRITTEN and HOLE, return a hole. */
1534 if (!write &&
1535 (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) {
1536 if (!pmd)
e5d6df73
SR
1537 return dax_load_hole(xas, vmf, iter, entry);
1538 return dax_pmd_load_hole(xas, vmf, iter, entry);
c2436190
SR
1539 }
1540
ff17b8df 1541 if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) {
c2436190
SR
1542 WARN_ON_ONCE(1);
1543 return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS;
1544 }
1545
ff17b8df 1546 err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn);
c2436190
SR
1547 if (err)
1548 return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err);
1549
e5d6df73 1550 *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags);
c2436190 1551
ff17b8df
SR
1552 if (write &&
1553 srcmap->type != IOMAP_HOLE && srcmap->addr != iomap->addr) {
1554 err = dax_iomap_cow_copy(pos, size, size, srcmap, kaddr);
1555 if (err)
1556 return dax_fault_return(err);
1557 }
1558
e5d6df73 1559 if (dax_fault_is_synchronous(iter, vmf->vma))
c2436190
SR
1560 return dax_fault_synchronous_pfnp(pfnp, pfn);
1561
1562 /* insert PMD pfn */
1563 if (pmd)
1564 return vmf_insert_pfn_pmd(vmf, pfn, write);
1565
1566 /* insert PTE pfn */
1567 if (write)
1568 return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1569 return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
1570}
1571
ab77dab4 1572static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
c0b24625 1573 int *iomap_errp, const struct iomap_ops *ops)
a7d73fe6 1574{
65dd814a 1575 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
b15cd800 1576 XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
65dd814a
CH
1577 struct iomap_iter iter = {
1578 .inode = mapping->host,
1579 .pos = (loff_t)vmf->pgoff << PAGE_SHIFT,
1580 .len = PAGE_SIZE,
952da063 1581 .flags = IOMAP_DAX | IOMAP_FAULT,
65dd814a 1582 };
ab77dab4 1583 vm_fault_t ret = 0;
a7d73fe6 1584 void *entry;
65dd814a 1585 int error;
a7d73fe6 1586
65dd814a 1587 trace_dax_pte_fault(iter.inode, vmf, ret);
a7d73fe6
CH
1588 /*
1589 * Check whether offset isn't beyond end of file now. Caller is supposed
1590 * to hold locks serializing us with truncate / punch hole so this is
1591 * a reliable test.
1592 */
65dd814a 1593 if (iter.pos >= i_size_read(iter.inode)) {
ab77dab4 1594 ret = VM_FAULT_SIGBUS;
a9c42b33
RZ
1595 goto out;
1596 }
a7d73fe6 1597
65dd814a
CH
1598 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1599 iter.flags |= IOMAP_WRITE;
a7d73fe6 1600
b15cd800
MW
1601 entry = grab_mapping_entry(&xas, mapping, 0);
1602 if (xa_is_internal(entry)) {
1603 ret = xa_to_internal(entry);
13e451fd
JK
1604 goto out;
1605 }
1606
e2093926
RZ
1607 /*
1608 * It is possible, particularly with mixed reads & writes to private
1609 * mappings, that we have raced with a PMD fault that overlaps with
1610 * the PTE we need to set up. If so just return and the fault will be
1611 * retried.
1612 */
1613 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
ab77dab4 1614 ret = VM_FAULT_NOPAGE;
e2093926
RZ
1615 goto unlock_entry;
1616 }
1617
65dd814a
CH
1618 while ((error = iomap_iter(&iter, ops)) > 0) {
1619 if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) {
1620 iter.processed = -EIO; /* fs corruption? */
1621 continue;
a7d73fe6
CH
1622 }
1623
65dd814a
CH
1624 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false);
1625 if (ret != VM_FAULT_SIGBUS &&
1626 (iter.iomap.flags & IOMAP_F_NEW)) {
a7d73fe6 1627 count_vm_event(PGMAJFAULT);
65dd814a
CH
1628 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
1629 ret |= VM_FAULT_MAJOR;
a7d73fe6 1630 }
1b5a1cb2 1631
65dd814a
CH
1632 if (!(ret & VM_FAULT_ERROR))
1633 iter.processed = PAGE_SIZE;
a7d73fe6
CH
1634 }
1635
65dd814a
CH
1636 if (iomap_errp)
1637 *iomap_errp = error;
1638 if (!ret && error)
1639 ret = dax_fault_return(error);
9f141d6e 1640
c2436190 1641unlock_entry:
b15cd800 1642 dax_unlock_entry(&xas, entry);
c2436190 1643out:
65dd814a
CH
1644 trace_dax_pte_fault_done(iter.inode, vmf, ret);
1645 return ret;
a7d73fe6 1646}
642261ac
RZ
1647
1648#ifdef CONFIG_FS_DAX_PMD
55f81639
SR
1649static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas,
1650 pgoff_t max_pgoff)
642261ac 1651{
f4200391 1652 unsigned long pmd_addr = vmf->address & PMD_MASK;
55f81639 1653 bool write = vmf->flags & FAULT_FLAG_WRITE;
642261ac 1654
55f81639
SR
1655 /*
1656 * Make sure that the faulting address's PMD offset (color) matches
1657 * the PMD offset from the start of the file. This is necessary so
1658 * that a PMD range in the page table overlaps exactly with a PMD
1659 * range in the page cache.
1660 */
1661 if ((vmf->pgoff & PG_PMD_COLOUR) !=
1662 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1663 return true;
642261ac 1664
55f81639
SR
1665 /* Fall back to PTEs if we're going to COW */
1666 if (write && !(vmf->vma->vm_flags & VM_SHARED))
1667 return true;
11cf9d86 1668
55f81639
SR
1669 /* If the PMD would extend outside the VMA */
1670 if (pmd_addr < vmf->vma->vm_start)
1671 return true;
1672 if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
1673 return true;
642261ac 1674
55f81639
SR
1675 /* If the PMD would extend beyond the file size */
1676 if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff)
1677 return true;
653b2ea3 1678
55f81639 1679 return false;
642261ac
RZ
1680}
1681
ab77dab4 1682static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
a2d58167 1683 const struct iomap_ops *ops)
642261ac 1684{
65dd814a 1685 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
b15cd800 1686 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
65dd814a
CH
1687 struct iomap_iter iter = {
1688 .inode = mapping->host,
1689 .len = PMD_SIZE,
952da063 1690 .flags = IOMAP_DAX | IOMAP_FAULT,
65dd814a 1691 };
c2436190 1692 vm_fault_t ret = VM_FAULT_FALLBACK;
b15cd800 1693 pgoff_t max_pgoff;
642261ac 1694 void *entry;
642261ac
RZ
1695 int error;
1696
65dd814a
CH
1697 if (vmf->flags & FAULT_FLAG_WRITE)
1698 iter.flags |= IOMAP_WRITE;
642261ac 1699
282a8e03
RZ
1700 /*
1701 * Check whether offset isn't beyond end of file now. Caller is
1702 * supposed to hold locks serializing us with truncate / punch hole so
1703 * this is a reliable test.
1704 */
65dd814a 1705 max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE);
fffa281b 1706
65dd814a 1707 trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0);
642261ac 1708
b15cd800 1709 if (xas.xa_index >= max_pgoff) {
c2436190 1710 ret = VM_FAULT_SIGBUS;
282a8e03
RZ
1711 goto out;
1712 }
642261ac 1713
55f81639 1714 if (dax_fault_check_fallback(vmf, &xas, max_pgoff))
642261ac
RZ
1715 goto fallback;
1716
876f2946 1717 /*
b15cd800
MW
1718 * grab_mapping_entry() will make sure we get an empty PMD entry,
1719 * a zero PMD entry or a DAX PMD. If it can't (because a PTE
1720 * entry is already in the array, for instance), it will return
1721 * VM_FAULT_FALLBACK.
876f2946 1722 */
23c84eb7 1723 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
b15cd800 1724 if (xa_is_internal(entry)) {
c2436190 1725 ret = xa_to_internal(entry);
876f2946 1726 goto fallback;
b15cd800 1727 }
876f2946 1728
e2093926
RZ
1729 /*
1730 * It is possible, particularly with mixed reads & writes to private
1731 * mappings, that we have raced with a PTE fault that overlaps with
1732 * the PMD we need to set up. If so just return and the fault will be
1733 * retried.
1734 */
1735 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1736 !pmd_devmap(*vmf->pmd)) {
c2436190 1737 ret = 0;
e2093926
RZ
1738 goto unlock_entry;
1739 }
1740
65dd814a
CH
1741 iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT;
1742 while ((error = iomap_iter(&iter, ops)) > 0) {
1743 if (iomap_length(&iter) < PMD_SIZE)
1744 continue; /* actually breaks out of the loop */
caa51d26 1745
65dd814a
CH
1746 ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true);
1747 if (ret != VM_FAULT_FALLBACK)
1748 iter.processed = PMD_SIZE;
642261ac
RZ
1749 }
1750
c2436190 1751unlock_entry:
b15cd800 1752 dax_unlock_entry(&xas, entry);
c2436190
SR
1753fallback:
1754 if (ret == VM_FAULT_FALLBACK) {
65dd814a 1755 split_huge_pmd(vmf->vma, vmf->pmd, vmf->address);
642261ac
RZ
1756 count_vm_event(THP_FAULT_FALLBACK);
1757 }
282a8e03 1758out:
65dd814a 1759 trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret);
c2436190 1760 return ret;
642261ac 1761}
a2d58167 1762#else
ab77dab4 1763static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
01cddfe9 1764 const struct iomap_ops *ops)
a2d58167
DJ
1765{
1766 return VM_FAULT_FALLBACK;
1767}
642261ac 1768#endif /* CONFIG_FS_DAX_PMD */
a2d58167
DJ
1769
1770/**
1771 * dax_iomap_fault - handle a page fault on a DAX file
1772 * @vmf: The description of the fault
cec04e8c 1773 * @pe_size: Size of the page to fault in
9a0dd422 1774 * @pfnp: PFN to insert for synchronous faults if fsync is required
c0b24625 1775 * @iomap_errp: Storage for detailed error code in case of error
cec04e8c 1776 * @ops: Iomap ops passed from the file system
a2d58167
DJ
1777 *
1778 * When a page fault occurs, filesystems may call this helper in
1779 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1780 * has done all the necessary locking for page fault to proceed
1781 * successfully.
1782 */
ab77dab4 1783vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
c0b24625 1784 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
a2d58167 1785{
c791ace1
DJ
1786 switch (pe_size) {
1787 case PE_SIZE_PTE:
c0b24625 1788 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
c791ace1 1789 case PE_SIZE_PMD:
9a0dd422 1790 return dax_iomap_pmd_fault(vmf, pfnp, ops);
a2d58167
DJ
1791 default:
1792 return VM_FAULT_FALLBACK;
1793 }
1794}
1795EXPORT_SYMBOL_GPL(dax_iomap_fault);
71eab6df 1796
a77d19f4 1797/*
71eab6df
JK
1798 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1799 * @vmf: The description of the fault
71eab6df 1800 * @pfn: PFN to insert
cfc93c6c 1801 * @order: Order of entry to insert.
71eab6df 1802 *
a77d19f4
MW
1803 * This function inserts a writeable PTE or PMD entry into the page tables
1804 * for an mmaped DAX file. It also marks the page cache entry as dirty.
71eab6df 1805 */
cfc93c6c
MW
1806static vm_fault_t
1807dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
71eab6df
JK
1808{
1809 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
cfc93c6c
MW
1810 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1811 void *entry;
ab77dab4 1812 vm_fault_t ret;
71eab6df 1813
cfc93c6c 1814 xas_lock_irq(&xas);
23c84eb7 1815 entry = get_unlocked_entry(&xas, order);
71eab6df 1816 /* Did we race with someone splitting entry or so? */
23c84eb7
MWO
1817 if (!entry || dax_is_conflict(entry) ||
1818 (order == 0 && !dax_is_pte_entry(entry))) {
4c3d043d 1819 put_unlocked_entry(&xas, entry, WAKE_NEXT);
cfc93c6c 1820 xas_unlock_irq(&xas);
71eab6df
JK
1821 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1822 VM_FAULT_NOPAGE);
1823 return VM_FAULT_NOPAGE;
1824 }
cfc93c6c
MW
1825 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1826 dax_lock_entry(&xas, entry);
1827 xas_unlock_irq(&xas);
1828 if (order == 0)
ab77dab4 1829 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
71eab6df 1830#ifdef CONFIG_FS_DAX_PMD
cfc93c6c 1831 else if (order == PMD_ORDER)
fce86ff5 1832 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
71eab6df 1833#endif
cfc93c6c 1834 else
ab77dab4 1835 ret = VM_FAULT_FALLBACK;
cfc93c6c 1836 dax_unlock_entry(&xas, entry);
ab77dab4
SJ
1837 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1838 return ret;
71eab6df
JK
1839}
1840
1841/**
1842 * dax_finish_sync_fault - finish synchronous page fault
1843 * @vmf: The description of the fault
1844 * @pe_size: Size of entry to be inserted
1845 * @pfn: PFN to insert
1846 *
1847 * This function ensures that the file range touched by the page fault is
1848 * stored persistently on the media and handles inserting of appropriate page
1849 * table entry.
1850 */
ab77dab4
SJ
1851vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1852 enum page_entry_size pe_size, pfn_t pfn)
71eab6df
JK
1853{
1854 int err;
1855 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
cfc93c6c
MW
1856 unsigned int order = pe_order(pe_size);
1857 size_t len = PAGE_SIZE << order;
71eab6df 1858
71eab6df
JK
1859 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1860 if (err)
1861 return VM_FAULT_SIGBUS;
cfc93c6c 1862 return dax_insert_pfn_mkwrite(vmf, pfn, order);
71eab6df
JK
1863}
1864EXPORT_SYMBOL_GPL(dax_finish_sync_fault);