Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
[linux-2.6-block.git] / fs / dax.c
1 /*
2  * fs/dax.c - Direct Access filesystem code
3  * Copyright (c) 2013-2014 Intel Corporation
4  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  */
16
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
21 #include <linux/fs.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/sched.h>
29 #include <linux/sched/signal.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/mmu_notifier.h>
35 #include <linux/iomap.h>
36 #include "internal.h"
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/fs_dax.h>
40
41 /* We choose 4096 entries - same as per-zone page wait tables */
42 #define DAX_WAIT_TABLE_BITS 12
43 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
44
45 /* The 'colour' (ie low bits) within a PMD of a page offset.  */
46 #define PG_PMD_COLOUR   ((PMD_SIZE >> PAGE_SHIFT) - 1)
47 #define PG_PMD_NR       (PMD_SIZE >> PAGE_SHIFT)
48
49 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
50
51 static int __init init_dax_wait_table(void)
52 {
53         int i;
54
55         for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
56                 init_waitqueue_head(wait_table + i);
57         return 0;
58 }
59 fs_initcall(init_dax_wait_table);
60
61 /*
62  * We use lowest available bit in exceptional entry for locking, one bit for
63  * the entry size (PMD) and two more to tell us if the entry is a zero page or
64  * an empty entry that is just used for locking.  In total four special bits.
65  *
66  * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
67  * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
68  * block allocation.
69  */
70 #define RADIX_DAX_SHIFT         (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
71 #define RADIX_DAX_ENTRY_LOCK    (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
72 #define RADIX_DAX_PMD           (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
73 #define RADIX_DAX_ZERO_PAGE     (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
74 #define RADIX_DAX_EMPTY         (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
75
76 static unsigned long dax_radix_pfn(void *entry)
77 {
78         return (unsigned long)entry >> RADIX_DAX_SHIFT;
79 }
80
81 static void *dax_radix_locked_entry(unsigned long pfn, unsigned long flags)
82 {
83         return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
84                         (pfn << RADIX_DAX_SHIFT) | RADIX_DAX_ENTRY_LOCK);
85 }
86
87 static unsigned int dax_radix_order(void *entry)
88 {
89         if ((unsigned long)entry & RADIX_DAX_PMD)
90                 return PMD_SHIFT - PAGE_SHIFT;
91         return 0;
92 }
93
94 static int dax_is_pmd_entry(void *entry)
95 {
96         return (unsigned long)entry & RADIX_DAX_PMD;
97 }
98
99 static int dax_is_pte_entry(void *entry)
100 {
101         return !((unsigned long)entry & RADIX_DAX_PMD);
102 }
103
104 static int dax_is_zero_entry(void *entry)
105 {
106         return (unsigned long)entry & RADIX_DAX_ZERO_PAGE;
107 }
108
109 static int dax_is_empty_entry(void *entry)
110 {
111         return (unsigned long)entry & RADIX_DAX_EMPTY;
112 }
113
114 /*
115  * DAX radix tree locking
116  */
117 struct exceptional_entry_key {
118         struct address_space *mapping;
119         pgoff_t entry_start;
120 };
121
122 struct wait_exceptional_entry_queue {
123         wait_queue_entry_t wait;
124         struct exceptional_entry_key key;
125 };
126
127 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
128                 pgoff_t index, void *entry, struct exceptional_entry_key *key)
129 {
130         unsigned long hash;
131
132         /*
133          * If 'entry' is a PMD, align the 'index' that we use for the wait
134          * queue to the start of that PMD.  This ensures that all offsets in
135          * the range covered by the PMD map to the same bit lock.
136          */
137         if (dax_is_pmd_entry(entry))
138                 index &= ~PG_PMD_COLOUR;
139
140         key->mapping = mapping;
141         key->entry_start = index;
142
143         hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
144         return wait_table + hash;
145 }
146
147 static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
148                                        int sync, void *keyp)
149 {
150         struct exceptional_entry_key *key = keyp;
151         struct wait_exceptional_entry_queue *ewait =
152                 container_of(wait, struct wait_exceptional_entry_queue, wait);
153
154         if (key->mapping != ewait->key.mapping ||
155             key->entry_start != ewait->key.entry_start)
156                 return 0;
157         return autoremove_wake_function(wait, mode, sync, NULL);
158 }
159
160 /*
161  * @entry may no longer be the entry at the index in the mapping.
162  * The important information it's conveying is whether the entry at
163  * this index used to be a PMD entry.
164  */
165 static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
166                 pgoff_t index, void *entry, bool wake_all)
167 {
168         struct exceptional_entry_key key;
169         wait_queue_head_t *wq;
170
171         wq = dax_entry_waitqueue(mapping, index, entry, &key);
172
173         /*
174          * Checking for locked entry and prepare_to_wait_exclusive() happens
175          * under the i_pages lock, ditto for entry handling in our callers.
176          * So at this point all tasks that could have seen our entry locked
177          * must be in the waitqueue and the following check will see them.
178          */
179         if (waitqueue_active(wq))
180                 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
181 }
182
183 /*
184  * Check whether the given slot is locked.  Must be called with the i_pages
185  * lock held.
186  */
187 static inline int slot_locked(struct address_space *mapping, void **slot)
188 {
189         unsigned long entry = (unsigned long)
190                 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
191         return entry & RADIX_DAX_ENTRY_LOCK;
192 }
193
194 /*
195  * Mark the given slot as locked.  Must be called with the i_pages lock held.
196  */
197 static inline void *lock_slot(struct address_space *mapping, void **slot)
198 {
199         unsigned long entry = (unsigned long)
200                 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
201
202         entry |= RADIX_DAX_ENTRY_LOCK;
203         radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
204         return (void *)entry;
205 }
206
207 /*
208  * Mark the given slot as unlocked.  Must be called with the i_pages lock held.
209  */
210 static inline void *unlock_slot(struct address_space *mapping, void **slot)
211 {
212         unsigned long entry = (unsigned long)
213                 radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock);
214
215         entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
216         radix_tree_replace_slot(&mapping->i_pages, slot, (void *)entry);
217         return (void *)entry;
218 }
219
220 /*
221  * Lookup entry in radix tree, wait for it to become unlocked if it is
222  * exceptional entry and return it. The caller must call
223  * put_unlocked_mapping_entry() when he decided not to lock the entry or
224  * put_locked_mapping_entry() when he locked the entry and now wants to
225  * unlock it.
226  *
227  * Must be called with the i_pages lock held.
228  */
229 static void *get_unlocked_mapping_entry(struct address_space *mapping,
230                                         pgoff_t index, void ***slotp)
231 {
232         void *entry, **slot;
233         struct wait_exceptional_entry_queue ewait;
234         wait_queue_head_t *wq;
235
236         init_wait(&ewait.wait);
237         ewait.wait.func = wake_exceptional_entry_func;
238
239         for (;;) {
240                 entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
241                                           &slot);
242                 if (!entry ||
243                     WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) ||
244                     !slot_locked(mapping, slot)) {
245                         if (slotp)
246                                 *slotp = slot;
247                         return entry;
248                 }
249
250                 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
251                 prepare_to_wait_exclusive(wq, &ewait.wait,
252                                           TASK_UNINTERRUPTIBLE);
253                 xa_unlock_irq(&mapping->i_pages);
254                 schedule();
255                 finish_wait(wq, &ewait.wait);
256                 xa_lock_irq(&mapping->i_pages);
257         }
258 }
259
260 static void dax_unlock_mapping_entry(struct address_space *mapping,
261                                      pgoff_t index)
262 {
263         void *entry, **slot;
264
265         xa_lock_irq(&mapping->i_pages);
266         entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot);
267         if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
268                          !slot_locked(mapping, slot))) {
269                 xa_unlock_irq(&mapping->i_pages);
270                 return;
271         }
272         unlock_slot(mapping, slot);
273         xa_unlock_irq(&mapping->i_pages);
274         dax_wake_mapping_entry_waiter(mapping, index, entry, false);
275 }
276
277 static void put_locked_mapping_entry(struct address_space *mapping,
278                 pgoff_t index)
279 {
280         dax_unlock_mapping_entry(mapping, index);
281 }
282
283 /*
284  * Called when we are done with radix tree entry we looked up via
285  * get_unlocked_mapping_entry() and which we didn't lock in the end.
286  */
287 static void put_unlocked_mapping_entry(struct address_space *mapping,
288                                        pgoff_t index, void *entry)
289 {
290         if (!entry)
291                 return;
292
293         /* We have to wake up next waiter for the radix tree entry lock */
294         dax_wake_mapping_entry_waiter(mapping, index, entry, false);
295 }
296
297 static unsigned long dax_entry_size(void *entry)
298 {
299         if (dax_is_zero_entry(entry))
300                 return 0;
301         else if (dax_is_empty_entry(entry))
302                 return 0;
303         else if (dax_is_pmd_entry(entry))
304                 return PMD_SIZE;
305         else
306                 return PAGE_SIZE;
307 }
308
309 static unsigned long dax_radix_end_pfn(void *entry)
310 {
311         return dax_radix_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
312 }
313
314 /*
315  * Iterate through all mapped pfns represented by an entry, i.e. skip
316  * 'empty' and 'zero' entries.
317  */
318 #define for_each_mapped_pfn(entry, pfn) \
319         for (pfn = dax_radix_pfn(entry); \
320                         pfn < dax_radix_end_pfn(entry); pfn++)
321
322 static void dax_associate_entry(void *entry, struct address_space *mapping)
323 {
324         unsigned long pfn;
325
326         if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
327                 return;
328
329         for_each_mapped_pfn(entry, pfn) {
330                 struct page *page = pfn_to_page(pfn);
331
332                 WARN_ON_ONCE(page->mapping);
333                 page->mapping = mapping;
334         }
335 }
336
337 static void dax_disassociate_entry(void *entry, struct address_space *mapping,
338                 bool trunc)
339 {
340         unsigned long pfn;
341
342         if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
343                 return;
344
345         for_each_mapped_pfn(entry, pfn) {
346                 struct page *page = pfn_to_page(pfn);
347
348                 WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
349                 WARN_ON_ONCE(page->mapping && page->mapping != mapping);
350                 page->mapping = NULL;
351         }
352 }
353
354 static struct page *dax_busy_page(void *entry)
355 {
356         unsigned long pfn;
357
358         for_each_mapped_pfn(entry, pfn) {
359                 struct page *page = pfn_to_page(pfn);
360
361                 if (page_ref_count(page) > 1)
362                         return page;
363         }
364         return NULL;
365 }
366
367 /*
368  * Find radix tree entry at given index. If it points to an exceptional entry,
369  * return it with the radix tree entry locked. If the radix tree doesn't
370  * contain given index, create an empty exceptional entry for the index and
371  * return with it locked.
372  *
373  * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
374  * either return that locked entry or will return an error.  This error will
375  * happen if there are any 4k entries within the 2MiB range that we are
376  * requesting.
377  *
378  * We always favor 4k entries over 2MiB entries. There isn't a flow where we
379  * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
380  * insertion will fail if it finds any 4k entries already in the tree, and a
381  * 4k insertion will cause an existing 2MiB entry to be unmapped and
382  * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
383  * well as 2MiB empty entries.
384  *
385  * The exception to this downgrade path is for 2MiB DAX PMD entries that have
386  * real storage backing them.  We will leave these real 2MiB DAX entries in
387  * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
388  *
389  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
390  * persistent memory the benefit is doubtful. We can add that later if we can
391  * show it helps.
392  */
393 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
394                 unsigned long size_flag)
395 {
396         bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
397         void *entry, **slot;
398
399 restart:
400         xa_lock_irq(&mapping->i_pages);
401         entry = get_unlocked_mapping_entry(mapping, index, &slot);
402
403         if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) {
404                 entry = ERR_PTR(-EIO);
405                 goto out_unlock;
406         }
407
408         if (entry) {
409                 if (size_flag & RADIX_DAX_PMD) {
410                         if (dax_is_pte_entry(entry)) {
411                                 put_unlocked_mapping_entry(mapping, index,
412                                                 entry);
413                                 entry = ERR_PTR(-EEXIST);
414                                 goto out_unlock;
415                         }
416                 } else { /* trying to grab a PTE entry */
417                         if (dax_is_pmd_entry(entry) &&
418                             (dax_is_zero_entry(entry) ||
419                              dax_is_empty_entry(entry))) {
420                                 pmd_downgrade = true;
421                         }
422                 }
423         }
424
425         /* No entry for given index? Make sure radix tree is big enough. */
426         if (!entry || pmd_downgrade) {
427                 int err;
428
429                 if (pmd_downgrade) {
430                         /*
431                          * Make sure 'entry' remains valid while we drop
432                          * the i_pages lock.
433                          */
434                         entry = lock_slot(mapping, slot);
435                 }
436
437                 xa_unlock_irq(&mapping->i_pages);
438                 /*
439                  * Besides huge zero pages the only other thing that gets
440                  * downgraded are empty entries which don't need to be
441                  * unmapped.
442                  */
443                 if (pmd_downgrade && dax_is_zero_entry(entry))
444                         unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
445                                                         PG_PMD_NR, false);
446
447                 err = radix_tree_preload(
448                                 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
449                 if (err) {
450                         if (pmd_downgrade)
451                                 put_locked_mapping_entry(mapping, index);
452                         return ERR_PTR(err);
453                 }
454                 xa_lock_irq(&mapping->i_pages);
455
456                 if (!entry) {
457                         /*
458                          * We needed to drop the i_pages lock while calling
459                          * radix_tree_preload() and we didn't have an entry to
460                          * lock.  See if another thread inserted an entry at
461                          * our index during this time.
462                          */
463                         entry = __radix_tree_lookup(&mapping->i_pages, index,
464                                         NULL, &slot);
465                         if (entry) {
466                                 radix_tree_preload_end();
467                                 xa_unlock_irq(&mapping->i_pages);
468                                 goto restart;
469                         }
470                 }
471
472                 if (pmd_downgrade) {
473                         dax_disassociate_entry(entry, mapping, false);
474                         radix_tree_delete(&mapping->i_pages, index);
475                         mapping->nrexceptional--;
476                         dax_wake_mapping_entry_waiter(mapping, index, entry,
477                                         true);
478                 }
479
480                 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
481
482                 err = __radix_tree_insert(&mapping->i_pages, index,
483                                 dax_radix_order(entry), entry);
484                 radix_tree_preload_end();
485                 if (err) {
486                         xa_unlock_irq(&mapping->i_pages);
487                         /*
488                          * Our insertion of a DAX entry failed, most likely
489                          * because we were inserting a PMD entry and it
490                          * collided with a PTE sized entry at a different
491                          * index in the PMD range.  We haven't inserted
492                          * anything into the radix tree and have no waiters to
493                          * wake.
494                          */
495                         return ERR_PTR(err);
496                 }
497                 /* Good, we have inserted empty locked entry into the tree. */
498                 mapping->nrexceptional++;
499                 xa_unlock_irq(&mapping->i_pages);
500                 return entry;
501         }
502         entry = lock_slot(mapping, slot);
503  out_unlock:
504         xa_unlock_irq(&mapping->i_pages);
505         return entry;
506 }
507
508 /**
509  * dax_layout_busy_page - find first pinned page in @mapping
510  * @mapping: address space to scan for a page with ref count > 1
511  *
512  * DAX requires ZONE_DEVICE mapped pages. These pages are never
513  * 'onlined' to the page allocator so they are considered idle when
514  * page->count == 1. A filesystem uses this interface to determine if
515  * any page in the mapping is busy, i.e. for DMA, or other
516  * get_user_pages() usages.
517  *
518  * It is expected that the filesystem is holding locks to block the
519  * establishment of new mappings in this address_space. I.e. it expects
520  * to be able to run unmap_mapping_range() and subsequently not race
521  * mapping_mapped() becoming true.
522  */
523 struct page *dax_layout_busy_page(struct address_space *mapping)
524 {
525         pgoff_t indices[PAGEVEC_SIZE];
526         struct page *page = NULL;
527         struct pagevec pvec;
528         pgoff_t index, end;
529         unsigned i;
530
531         /*
532          * In the 'limited' case get_user_pages() for dax is disabled.
533          */
534         if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
535                 return NULL;
536
537         if (!dax_mapping(mapping) || !mapping_mapped(mapping))
538                 return NULL;
539
540         pagevec_init(&pvec);
541         index = 0;
542         end = -1;
543
544         /*
545          * If we race get_user_pages_fast() here either we'll see the
546          * elevated page count in the pagevec_lookup and wait, or
547          * get_user_pages_fast() will see that the page it took a reference
548          * against is no longer mapped in the page tables and bail to the
549          * get_user_pages() slow path.  The slow path is protected by
550          * pte_lock() and pmd_lock(). New references are not taken without
551          * holding those locks, and unmap_mapping_range() will not zero the
552          * pte or pmd without holding the respective lock, so we are
553          * guaranteed to either see new references or prevent new
554          * references from being established.
555          */
556         unmap_mapping_range(mapping, 0, 0, 1);
557
558         while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
559                                 min(end - index, (pgoff_t)PAGEVEC_SIZE),
560                                 indices)) {
561                 for (i = 0; i < pagevec_count(&pvec); i++) {
562                         struct page *pvec_ent = pvec.pages[i];
563                         void *entry;
564
565                         index = indices[i];
566                         if (index >= end)
567                                 break;
568
569                         if (WARN_ON_ONCE(
570                              !radix_tree_exceptional_entry(pvec_ent)))
571                                 continue;
572
573                         xa_lock_irq(&mapping->i_pages);
574                         entry = get_unlocked_mapping_entry(mapping, index, NULL);
575                         if (entry)
576                                 page = dax_busy_page(entry);
577                         put_unlocked_mapping_entry(mapping, index, entry);
578                         xa_unlock_irq(&mapping->i_pages);
579                         if (page)
580                                 break;
581                 }
582
583                 /*
584                  * We don't expect normal struct page entries to exist in our
585                  * tree, but we keep these pagevec calls so that this code is
586                  * consistent with the common pattern for handling pagevecs
587                  * throughout the kernel.
588                  */
589                 pagevec_remove_exceptionals(&pvec);
590                 pagevec_release(&pvec);
591                 index++;
592
593                 if (page)
594                         break;
595         }
596         return page;
597 }
598 EXPORT_SYMBOL_GPL(dax_layout_busy_page);
599
600 static int __dax_invalidate_mapping_entry(struct address_space *mapping,
601                                           pgoff_t index, bool trunc)
602 {
603         int ret = 0;
604         void *entry;
605         struct radix_tree_root *pages = &mapping->i_pages;
606
607         xa_lock_irq(pages);
608         entry = get_unlocked_mapping_entry(mapping, index, NULL);
609         if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)))
610                 goto out;
611         if (!trunc &&
612             (radix_tree_tag_get(pages, index, PAGECACHE_TAG_DIRTY) ||
613              radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE)))
614                 goto out;
615         dax_disassociate_entry(entry, mapping, trunc);
616         radix_tree_delete(pages, index);
617         mapping->nrexceptional--;
618         ret = 1;
619 out:
620         put_unlocked_mapping_entry(mapping, index, entry);
621         xa_unlock_irq(pages);
622         return ret;
623 }
624 /*
625  * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
626  * entry to get unlocked before deleting it.
627  */
628 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
629 {
630         int ret = __dax_invalidate_mapping_entry(mapping, index, true);
631
632         /*
633          * This gets called from truncate / punch_hole path. As such, the caller
634          * must hold locks protecting against concurrent modifications of the
635          * radix tree (usually fs-private i_mmap_sem for writing). Since the
636          * caller has seen exceptional entry for this index, we better find it
637          * at that index as well...
638          */
639         WARN_ON_ONCE(!ret);
640         return ret;
641 }
642
643 /*
644  * Invalidate exceptional DAX entry if it is clean.
645  */
646 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
647                                       pgoff_t index)
648 {
649         return __dax_invalidate_mapping_entry(mapping, index, false);
650 }
651
652 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
653                 sector_t sector, size_t size, struct page *to,
654                 unsigned long vaddr)
655 {
656         void *vto, *kaddr;
657         pgoff_t pgoff;
658         pfn_t pfn;
659         long rc;
660         int id;
661
662         rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
663         if (rc)
664                 return rc;
665
666         id = dax_read_lock();
667         rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
668         if (rc < 0) {
669                 dax_read_unlock(id);
670                 return rc;
671         }
672         vto = kmap_atomic(to);
673         copy_user_page(vto, (void __force *)kaddr, vaddr, to);
674         kunmap_atomic(vto);
675         dax_read_unlock(id);
676         return 0;
677 }
678
679 /*
680  * By this point grab_mapping_entry() has ensured that we have a locked entry
681  * of the appropriate size so we don't have to worry about downgrading PMDs to
682  * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
683  * already in the tree, we will skip the insertion and just dirty the PMD as
684  * appropriate.
685  */
686 static void *dax_insert_mapping_entry(struct address_space *mapping,
687                                       struct vm_fault *vmf,
688                                       void *entry, pfn_t pfn_t,
689                                       unsigned long flags, bool dirty)
690 {
691         struct radix_tree_root *pages = &mapping->i_pages;
692         unsigned long pfn = pfn_t_to_pfn(pfn_t);
693         pgoff_t index = vmf->pgoff;
694         void *new_entry;
695
696         if (dirty)
697                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
698
699         if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
700                 /* we are replacing a zero page with block mapping */
701                 if (dax_is_pmd_entry(entry))
702                         unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
703                                                         PG_PMD_NR, false);
704                 else /* pte entry */
705                         unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
706         }
707
708         xa_lock_irq(pages);
709         new_entry = dax_radix_locked_entry(pfn, flags);
710         if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
711                 dax_disassociate_entry(entry, mapping, false);
712                 dax_associate_entry(new_entry, mapping);
713         }
714
715         if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
716                 /*
717                  * Only swap our new entry into the radix tree if the current
718                  * entry is a zero page or an empty entry.  If a normal PTE or
719                  * PMD entry is already in the tree, we leave it alone.  This
720                  * means that if we are trying to insert a PTE and the
721                  * existing entry is a PMD, we will just leave the PMD in the
722                  * tree and dirty it if necessary.
723                  */
724                 struct radix_tree_node *node;
725                 void **slot;
726                 void *ret;
727
728                 ret = __radix_tree_lookup(pages, index, &node, &slot);
729                 WARN_ON_ONCE(ret != entry);
730                 __radix_tree_replace(pages, node, slot,
731                                      new_entry, NULL);
732                 entry = new_entry;
733         }
734
735         if (dirty)
736                 radix_tree_tag_set(pages, index, PAGECACHE_TAG_DIRTY);
737
738         xa_unlock_irq(pages);
739         return entry;
740 }
741
742 static inline unsigned long
743 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
744 {
745         unsigned long address;
746
747         address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
748         VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
749         return address;
750 }
751
752 /* Walk all mappings of a given index of a file and writeprotect them */
753 static void dax_mapping_entry_mkclean(struct address_space *mapping,
754                                       pgoff_t index, unsigned long pfn)
755 {
756         struct vm_area_struct *vma;
757         pte_t pte, *ptep = NULL;
758         pmd_t *pmdp = NULL;
759         spinlock_t *ptl;
760
761         i_mmap_lock_read(mapping);
762         vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
763                 unsigned long address, start, end;
764
765                 cond_resched();
766
767                 if (!(vma->vm_flags & VM_SHARED))
768                         continue;
769
770                 address = pgoff_address(index, vma);
771
772                 /*
773                  * Note because we provide start/end to follow_pte_pmd it will
774                  * call mmu_notifier_invalidate_range_start() on our behalf
775                  * before taking any lock.
776                  */
777                 if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
778                         continue;
779
780                 /*
781                  * No need to call mmu_notifier_invalidate_range() as we are
782                  * downgrading page table protection not changing it to point
783                  * to a new page.
784                  *
785                  * See Documentation/vm/mmu_notifier.rst
786                  */
787                 if (pmdp) {
788 #ifdef CONFIG_FS_DAX_PMD
789                         pmd_t pmd;
790
791                         if (pfn != pmd_pfn(*pmdp))
792                                 goto unlock_pmd;
793                         if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
794                                 goto unlock_pmd;
795
796                         flush_cache_page(vma, address, pfn);
797                         pmd = pmdp_huge_clear_flush(vma, address, pmdp);
798                         pmd = pmd_wrprotect(pmd);
799                         pmd = pmd_mkclean(pmd);
800                         set_pmd_at(vma->vm_mm, address, pmdp, pmd);
801 unlock_pmd:
802 #endif
803                         spin_unlock(ptl);
804                 } else {
805                         if (pfn != pte_pfn(*ptep))
806                                 goto unlock_pte;
807                         if (!pte_dirty(*ptep) && !pte_write(*ptep))
808                                 goto unlock_pte;
809
810                         flush_cache_page(vma, address, pfn);
811                         pte = ptep_clear_flush(vma, address, ptep);
812                         pte = pte_wrprotect(pte);
813                         pte = pte_mkclean(pte);
814                         set_pte_at(vma->vm_mm, address, ptep, pte);
815 unlock_pte:
816                         pte_unmap_unlock(ptep, ptl);
817                 }
818
819                 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
820         }
821         i_mmap_unlock_read(mapping);
822 }
823
824 static int dax_writeback_one(struct dax_device *dax_dev,
825                 struct address_space *mapping, pgoff_t index, void *entry)
826 {
827         struct radix_tree_root *pages = &mapping->i_pages;
828         void *entry2, **slot;
829         unsigned long pfn;
830         long ret = 0;
831         size_t size;
832
833         /*
834          * A page got tagged dirty in DAX mapping? Something is seriously
835          * wrong.
836          */
837         if (WARN_ON(!radix_tree_exceptional_entry(entry)))
838                 return -EIO;
839
840         xa_lock_irq(pages);
841         entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
842         /* Entry got punched out / reallocated? */
843         if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2)))
844                 goto put_unlocked;
845         /*
846          * Entry got reallocated elsewhere? No need to writeback. We have to
847          * compare pfns as we must not bail out due to difference in lockbit
848          * or entry type.
849          */
850         if (dax_radix_pfn(entry2) != dax_radix_pfn(entry))
851                 goto put_unlocked;
852         if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
853                                 dax_is_zero_entry(entry))) {
854                 ret = -EIO;
855                 goto put_unlocked;
856         }
857
858         /* Another fsync thread may have already written back this entry */
859         if (!radix_tree_tag_get(pages, index, PAGECACHE_TAG_TOWRITE))
860                 goto put_unlocked;
861         /* Lock the entry to serialize with page faults */
862         entry = lock_slot(mapping, slot);
863         /*
864          * We can clear the tag now but we have to be careful so that concurrent
865          * dax_writeback_one() calls for the same index cannot finish before we
866          * actually flush the caches. This is achieved as the calls will look
867          * at the entry only under the i_pages lock and once they do that
868          * they will see the entry locked and wait for it to unlock.
869          */
870         radix_tree_tag_clear(pages, index, PAGECACHE_TAG_TOWRITE);
871         xa_unlock_irq(pages);
872
873         /*
874          * Even if dax_writeback_mapping_range() was given a wbc->range_start
875          * in the middle of a PMD, the 'index' we are given will be aligned to
876          * the start index of the PMD, as will the pfn we pull from 'entry'.
877          * This allows us to flush for PMD_SIZE and not have to worry about
878          * partial PMD writebacks.
879          */
880         pfn = dax_radix_pfn(entry);
881         size = PAGE_SIZE << dax_radix_order(entry);
882
883         dax_mapping_entry_mkclean(mapping, index, pfn);
884         dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
885         /*
886          * After we have flushed the cache, we can clear the dirty tag. There
887          * cannot be new dirty data in the pfn after the flush has completed as
888          * the pfn mappings are writeprotected and fault waits for mapping
889          * entry lock.
890          */
891         xa_lock_irq(pages);
892         radix_tree_tag_clear(pages, index, PAGECACHE_TAG_DIRTY);
893         xa_unlock_irq(pages);
894         trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
895         put_locked_mapping_entry(mapping, index);
896         return ret;
897
898  put_unlocked:
899         put_unlocked_mapping_entry(mapping, index, entry2);
900         xa_unlock_irq(pages);
901         return ret;
902 }
903
904 /*
905  * Flush the mapping to the persistent domain within the byte range of [start,
906  * end]. This is required by data integrity operations to ensure file data is
907  * on persistent storage prior to completion of the operation.
908  */
909 int dax_writeback_mapping_range(struct address_space *mapping,
910                 struct block_device *bdev, struct writeback_control *wbc)
911 {
912         struct inode *inode = mapping->host;
913         pgoff_t start_index, end_index;
914         pgoff_t indices[PAGEVEC_SIZE];
915         struct dax_device *dax_dev;
916         struct pagevec pvec;
917         bool done = false;
918         int i, ret = 0;
919
920         if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
921                 return -EIO;
922
923         if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
924                 return 0;
925
926         dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
927         if (!dax_dev)
928                 return -EIO;
929
930         start_index = wbc->range_start >> PAGE_SHIFT;
931         end_index = wbc->range_end >> PAGE_SHIFT;
932
933         trace_dax_writeback_range(inode, start_index, end_index);
934
935         tag_pages_for_writeback(mapping, start_index, end_index);
936
937         pagevec_init(&pvec);
938         while (!done) {
939                 pvec.nr = find_get_entries_tag(mapping, start_index,
940                                 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
941                                 pvec.pages, indices);
942
943                 if (pvec.nr == 0)
944                         break;
945
946                 for (i = 0; i < pvec.nr; i++) {
947                         if (indices[i] > end_index) {
948                                 done = true;
949                                 break;
950                         }
951
952                         ret = dax_writeback_one(dax_dev, mapping, indices[i],
953                                         pvec.pages[i]);
954                         if (ret < 0) {
955                                 mapping_set_error(mapping, ret);
956                                 goto out;
957                         }
958                 }
959                 start_index = indices[pvec.nr - 1] + 1;
960         }
961 out:
962         put_dax(dax_dev);
963         trace_dax_writeback_range_done(inode, start_index, end_index);
964         return (ret < 0 ? ret : 0);
965 }
966 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
967
968 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
969 {
970         return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
971 }
972
973 static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
974                          pfn_t *pfnp)
975 {
976         const sector_t sector = dax_iomap_sector(iomap, pos);
977         pgoff_t pgoff;
978         void *kaddr;
979         int id, rc;
980         long length;
981
982         rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
983         if (rc)
984                 return rc;
985         id = dax_read_lock();
986         length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
987                                    &kaddr, pfnp);
988         if (length < 0) {
989                 rc = length;
990                 goto out;
991         }
992         rc = -EINVAL;
993         if (PFN_PHYS(length) < size)
994                 goto out;
995         if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
996                 goto out;
997         /* For larger pages we need devmap */
998         if (length > 1 && !pfn_t_devmap(*pfnp))
999                 goto out;
1000         rc = 0;
1001 out:
1002         dax_read_unlock(id);
1003         return rc;
1004 }
1005
1006 /*
1007  * The user has performed a load from a hole in the file.  Allocating a new
1008  * page in the file would cause excessive storage usage for workloads with
1009  * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
1010  * If this page is ever written to we will re-fault and change the mapping to
1011  * point to real DAX storage instead.
1012  */
1013 static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
1014                          struct vm_fault *vmf)
1015 {
1016         struct inode *inode = mapping->host;
1017         unsigned long vaddr = vmf->address;
1018         vm_fault_t ret = VM_FAULT_NOPAGE;
1019         struct page *zero_page;
1020         pfn_t pfn;
1021
1022         zero_page = ZERO_PAGE(0);
1023         if (unlikely(!zero_page)) {
1024                 ret = VM_FAULT_OOM;
1025                 goto out;
1026         }
1027
1028         pfn = page_to_pfn_t(zero_page);
1029         dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE,
1030                         false);
1031         ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1032 out:
1033         trace_dax_load_hole(inode, vmf, ret);
1034         return ret;
1035 }
1036
1037 static bool dax_range_is_aligned(struct block_device *bdev,
1038                                  unsigned int offset, unsigned int length)
1039 {
1040         unsigned short sector_size = bdev_logical_block_size(bdev);
1041
1042         if (!IS_ALIGNED(offset, sector_size))
1043                 return false;
1044         if (!IS_ALIGNED(length, sector_size))
1045                 return false;
1046
1047         return true;
1048 }
1049
1050 int __dax_zero_page_range(struct block_device *bdev,
1051                 struct dax_device *dax_dev, sector_t sector,
1052                 unsigned int offset, unsigned int size)
1053 {
1054         if (dax_range_is_aligned(bdev, offset, size)) {
1055                 sector_t start_sector = sector + (offset >> 9);
1056
1057                 return blkdev_issue_zeroout(bdev, start_sector,
1058                                 size >> 9, GFP_NOFS, 0);
1059         } else {
1060                 pgoff_t pgoff;
1061                 long rc, id;
1062                 void *kaddr;
1063                 pfn_t pfn;
1064
1065                 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
1066                 if (rc)
1067                         return rc;
1068
1069                 id = dax_read_lock();
1070                 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr,
1071                                 &pfn);
1072                 if (rc < 0) {
1073                         dax_read_unlock(id);
1074                         return rc;
1075                 }
1076                 memset(kaddr + offset, 0, size);
1077                 dax_flush(dax_dev, kaddr + offset, size);
1078                 dax_read_unlock(id);
1079         }
1080         return 0;
1081 }
1082 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
1083
1084 static loff_t
1085 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1086                 struct iomap *iomap)
1087 {
1088         struct block_device *bdev = iomap->bdev;
1089         struct dax_device *dax_dev = iomap->dax_dev;
1090         struct iov_iter *iter = data;
1091         loff_t end = pos + length, done = 0;
1092         ssize_t ret = 0;
1093         size_t xfer;
1094         int id;
1095
1096         if (iov_iter_rw(iter) == READ) {
1097                 end = min(end, i_size_read(inode));
1098                 if (pos >= end)
1099                         return 0;
1100
1101                 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1102                         return iov_iter_zero(min(length, end - pos), iter);
1103         }
1104
1105         if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1106                 return -EIO;
1107
1108         /*
1109          * Write can allocate block for an area which has a hole page mapped
1110          * into page tables. We have to tear down these mappings so that data
1111          * written by write(2) is visible in mmap.
1112          */
1113         if (iomap->flags & IOMAP_F_NEW) {
1114                 invalidate_inode_pages2_range(inode->i_mapping,
1115                                               pos >> PAGE_SHIFT,
1116                                               (end - 1) >> PAGE_SHIFT);
1117         }
1118
1119         id = dax_read_lock();
1120         while (pos < end) {
1121                 unsigned offset = pos & (PAGE_SIZE - 1);
1122                 const size_t size = ALIGN(length + offset, PAGE_SIZE);
1123                 const sector_t sector = dax_iomap_sector(iomap, pos);
1124                 ssize_t map_len;
1125                 pgoff_t pgoff;
1126                 void *kaddr;
1127                 pfn_t pfn;
1128
1129                 if (fatal_signal_pending(current)) {
1130                         ret = -EINTR;
1131                         break;
1132                 }
1133
1134                 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1135                 if (ret)
1136                         break;
1137
1138                 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1139                                 &kaddr, &pfn);
1140                 if (map_len < 0) {
1141                         ret = map_len;
1142                         break;
1143                 }
1144
1145                 map_len = PFN_PHYS(map_len);
1146                 kaddr += offset;
1147                 map_len -= offset;
1148                 if (map_len > end - pos)
1149                         map_len = end - pos;
1150
1151                 /*
1152                  * The userspace address for the memory copy has already been
1153                  * validated via access_ok() in either vfs_read() or
1154                  * vfs_write(), depending on which operation we are doing.
1155                  */
1156                 if (iov_iter_rw(iter) == WRITE)
1157                         xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1158                                         map_len, iter);
1159                 else
1160                         xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1161                                         map_len, iter);
1162
1163                 pos += xfer;
1164                 length -= xfer;
1165                 done += xfer;
1166
1167                 if (xfer == 0)
1168                         ret = -EFAULT;
1169                 if (xfer < map_len)
1170                         break;
1171         }
1172         dax_read_unlock(id);
1173
1174         return done ? done : ret;
1175 }
1176
1177 /**
1178  * dax_iomap_rw - Perform I/O to a DAX file
1179  * @iocb:       The control block for this I/O
1180  * @iter:       The addresses to do I/O from or to
1181  * @ops:        iomap ops passed from the file system
1182  *
1183  * This function performs read and write operations to directly mapped
1184  * persistent memory.  The callers needs to take care of read/write exclusion
1185  * and evicting any page cache pages in the region under I/O.
1186  */
1187 ssize_t
1188 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1189                 const struct iomap_ops *ops)
1190 {
1191         struct address_space *mapping = iocb->ki_filp->f_mapping;
1192         struct inode *inode = mapping->host;
1193         loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1194         unsigned flags = 0;
1195
1196         if (iov_iter_rw(iter) == WRITE) {
1197                 lockdep_assert_held_exclusive(&inode->i_rwsem);
1198                 flags |= IOMAP_WRITE;
1199         } else {
1200                 lockdep_assert_held(&inode->i_rwsem);
1201         }
1202
1203         while (iov_iter_count(iter)) {
1204                 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1205                                 iter, dax_iomap_actor);
1206                 if (ret <= 0)
1207                         break;
1208                 pos += ret;
1209                 done += ret;
1210         }
1211
1212         iocb->ki_pos += done;
1213         return done ? done : ret;
1214 }
1215 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1216
1217 static vm_fault_t dax_fault_return(int error)
1218 {
1219         if (error == 0)
1220                 return VM_FAULT_NOPAGE;
1221         if (error == -ENOMEM)
1222                 return VM_FAULT_OOM;
1223         return VM_FAULT_SIGBUS;
1224 }
1225
1226 /*
1227  * MAP_SYNC on a dax mapping guarantees dirty metadata is
1228  * flushed on write-faults (non-cow), but not read-faults.
1229  */
1230 static bool dax_fault_is_synchronous(unsigned long flags,
1231                 struct vm_area_struct *vma, struct iomap *iomap)
1232 {
1233         return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1234                 && (iomap->flags & IOMAP_F_DIRTY);
1235 }
1236
1237 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1238                                int *iomap_errp, const struct iomap_ops *ops)
1239 {
1240         struct vm_area_struct *vma = vmf->vma;
1241         struct address_space *mapping = vma->vm_file->f_mapping;
1242         struct inode *inode = mapping->host;
1243         unsigned long vaddr = vmf->address;
1244         loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1245         struct iomap iomap = { 0 };
1246         unsigned flags = IOMAP_FAULT;
1247         int error, major = 0;
1248         bool write = vmf->flags & FAULT_FLAG_WRITE;
1249         bool sync;
1250         vm_fault_t ret = 0;
1251         void *entry;
1252         pfn_t pfn;
1253
1254         trace_dax_pte_fault(inode, vmf, ret);
1255         /*
1256          * Check whether offset isn't beyond end of file now. Caller is supposed
1257          * to hold locks serializing us with truncate / punch hole so this is
1258          * a reliable test.
1259          */
1260         if (pos >= i_size_read(inode)) {
1261                 ret = VM_FAULT_SIGBUS;
1262                 goto out;
1263         }
1264
1265         if (write && !vmf->cow_page)
1266                 flags |= IOMAP_WRITE;
1267
1268         entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1269         if (IS_ERR(entry)) {
1270                 ret = dax_fault_return(PTR_ERR(entry));
1271                 goto out;
1272         }
1273
1274         /*
1275          * It is possible, particularly with mixed reads & writes to private
1276          * mappings, that we have raced with a PMD fault that overlaps with
1277          * the PTE we need to set up.  If so just return and the fault will be
1278          * retried.
1279          */
1280         if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1281                 ret = VM_FAULT_NOPAGE;
1282                 goto unlock_entry;
1283         }
1284
1285         /*
1286          * Note that we don't bother to use iomap_apply here: DAX required
1287          * the file system block size to be equal the page size, which means
1288          * that we never have to deal with more than a single extent here.
1289          */
1290         error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1291         if (iomap_errp)
1292                 *iomap_errp = error;
1293         if (error) {
1294                 ret = dax_fault_return(error);
1295                 goto unlock_entry;
1296         }
1297         if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1298                 error = -EIO;   /* fs corruption? */
1299                 goto error_finish_iomap;
1300         }
1301
1302         if (vmf->cow_page) {
1303                 sector_t sector = dax_iomap_sector(&iomap, pos);
1304
1305                 switch (iomap.type) {
1306                 case IOMAP_HOLE:
1307                 case IOMAP_UNWRITTEN:
1308                         clear_user_highpage(vmf->cow_page, vaddr);
1309                         break;
1310                 case IOMAP_MAPPED:
1311                         error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1312                                         sector, PAGE_SIZE, vmf->cow_page, vaddr);
1313                         break;
1314                 default:
1315                         WARN_ON_ONCE(1);
1316                         error = -EIO;
1317                         break;
1318                 }
1319
1320                 if (error)
1321                         goto error_finish_iomap;
1322
1323                 __SetPageUptodate(vmf->cow_page);
1324                 ret = finish_fault(vmf);
1325                 if (!ret)
1326                         ret = VM_FAULT_DONE_COW;
1327                 goto finish_iomap;
1328         }
1329
1330         sync = dax_fault_is_synchronous(flags, vma, &iomap);
1331
1332         switch (iomap.type) {
1333         case IOMAP_MAPPED:
1334                 if (iomap.flags & IOMAP_F_NEW) {
1335                         count_vm_event(PGMAJFAULT);
1336                         count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
1337                         major = VM_FAULT_MAJOR;
1338                 }
1339                 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1340                 if (error < 0)
1341                         goto error_finish_iomap;
1342
1343                 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1344                                                  0, write && !sync);
1345
1346                 /*
1347                  * If we are doing synchronous page fault and inode needs fsync,
1348                  * we can insert PTE into page tables only after that happens.
1349                  * Skip insertion for now and return the pfn so that caller can
1350                  * insert it after fsync is done.
1351                  */
1352                 if (sync) {
1353                         if (WARN_ON_ONCE(!pfnp)) {
1354                                 error = -EIO;
1355                                 goto error_finish_iomap;
1356                         }
1357                         *pfnp = pfn;
1358                         ret = VM_FAULT_NEEDDSYNC | major;
1359                         goto finish_iomap;
1360                 }
1361                 trace_dax_insert_mapping(inode, vmf, entry);
1362                 if (write)
1363                         ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
1364                 else
1365                         ret = vmf_insert_mixed(vma, vaddr, pfn);
1366
1367                 goto finish_iomap;
1368         case IOMAP_UNWRITTEN:
1369         case IOMAP_HOLE:
1370                 if (!write) {
1371                         ret = dax_load_hole(mapping, entry, vmf);
1372                         goto finish_iomap;
1373                 }
1374                 /*FALLTHRU*/
1375         default:
1376                 WARN_ON_ONCE(1);
1377                 error = -EIO;
1378                 break;
1379         }
1380
1381  error_finish_iomap:
1382         ret = dax_fault_return(error);
1383  finish_iomap:
1384         if (ops->iomap_end) {
1385                 int copied = PAGE_SIZE;
1386
1387                 if (ret & VM_FAULT_ERROR)
1388                         copied = 0;
1389                 /*
1390                  * The fault is done by now and there's no way back (other
1391                  * thread may be already happily using PTE we have installed).
1392                  * Just ignore error from ->iomap_end since we cannot do much
1393                  * with it.
1394                  */
1395                 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1396         }
1397  unlock_entry:
1398         put_locked_mapping_entry(mapping, vmf->pgoff);
1399  out:
1400         trace_dax_pte_fault_done(inode, vmf, ret);
1401         return ret | major;
1402 }
1403
1404 #ifdef CONFIG_FS_DAX_PMD
1405 static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1406                 void *entry)
1407 {
1408         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1409         unsigned long pmd_addr = vmf->address & PMD_MASK;
1410         struct inode *inode = mapping->host;
1411         struct page *zero_page;
1412         void *ret = NULL;
1413         spinlock_t *ptl;
1414         pmd_t pmd_entry;
1415         pfn_t pfn;
1416
1417         zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1418
1419         if (unlikely(!zero_page))
1420                 goto fallback;
1421
1422         pfn = page_to_pfn_t(zero_page);
1423         ret = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1424                         RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
1425
1426         ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1427         if (!pmd_none(*(vmf->pmd))) {
1428                 spin_unlock(ptl);
1429                 goto fallback;
1430         }
1431
1432         pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1433         pmd_entry = pmd_mkhuge(pmd_entry);
1434         set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1435         spin_unlock(ptl);
1436         trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1437         return VM_FAULT_NOPAGE;
1438
1439 fallback:
1440         trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1441         return VM_FAULT_FALLBACK;
1442 }
1443
1444 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1445                                const struct iomap_ops *ops)
1446 {
1447         struct vm_area_struct *vma = vmf->vma;
1448         struct address_space *mapping = vma->vm_file->f_mapping;
1449         unsigned long pmd_addr = vmf->address & PMD_MASK;
1450         bool write = vmf->flags & FAULT_FLAG_WRITE;
1451         bool sync;
1452         unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1453         struct inode *inode = mapping->host;
1454         vm_fault_t result = VM_FAULT_FALLBACK;
1455         struct iomap iomap = { 0 };
1456         pgoff_t max_pgoff, pgoff;
1457         void *entry;
1458         loff_t pos;
1459         int error;
1460         pfn_t pfn;
1461
1462         /*
1463          * Check whether offset isn't beyond end of file now. Caller is
1464          * supposed to hold locks serializing us with truncate / punch hole so
1465          * this is a reliable test.
1466          */
1467         pgoff = linear_page_index(vma, pmd_addr);
1468         max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1469
1470         trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1471
1472         /*
1473          * Make sure that the faulting address's PMD offset (color) matches
1474          * the PMD offset from the start of the file.  This is necessary so
1475          * that a PMD range in the page table overlaps exactly with a PMD
1476          * range in the radix tree.
1477          */
1478         if ((vmf->pgoff & PG_PMD_COLOUR) !=
1479             ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1480                 goto fallback;
1481
1482         /* Fall back to PTEs if we're going to COW */
1483         if (write && !(vma->vm_flags & VM_SHARED))
1484                 goto fallback;
1485
1486         /* If the PMD would extend outside the VMA */
1487         if (pmd_addr < vma->vm_start)
1488                 goto fallback;
1489         if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1490                 goto fallback;
1491
1492         if (pgoff >= max_pgoff) {
1493                 result = VM_FAULT_SIGBUS;
1494                 goto out;
1495         }
1496
1497         /* If the PMD would extend beyond the file size */
1498         if ((pgoff | PG_PMD_COLOUR) >= max_pgoff)
1499                 goto fallback;
1500
1501         /*
1502          * grab_mapping_entry() will make sure we get a 2MiB empty entry, a
1503          * 2MiB zero page entry or a DAX PMD.  If it can't (because a 4k page
1504          * is already in the tree, for instance), it will return -EEXIST and
1505          * we just fall back to 4k entries.
1506          */
1507         entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1508         if (IS_ERR(entry))
1509                 goto fallback;
1510
1511         /*
1512          * It is possible, particularly with mixed reads & writes to private
1513          * mappings, that we have raced with a PTE fault that overlaps with
1514          * the PMD we need to set up.  If so just return and the fault will be
1515          * retried.
1516          */
1517         if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1518                         !pmd_devmap(*vmf->pmd)) {
1519                 result = 0;
1520                 goto unlock_entry;
1521         }
1522
1523         /*
1524          * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1525          * setting up a mapping, so really we're using iomap_begin() as a way
1526          * to look up our filesystem block.
1527          */
1528         pos = (loff_t)pgoff << PAGE_SHIFT;
1529         error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1530         if (error)
1531                 goto unlock_entry;
1532
1533         if (iomap.offset + iomap.length < pos + PMD_SIZE)
1534                 goto finish_iomap;
1535
1536         sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
1537
1538         switch (iomap.type) {
1539         case IOMAP_MAPPED:
1540                 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1541                 if (error < 0)
1542                         goto finish_iomap;
1543
1544                 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1545                                                 RADIX_DAX_PMD, write && !sync);
1546
1547                 /*
1548                  * If we are doing synchronous page fault and inode needs fsync,
1549                  * we can insert PMD into page tables only after that happens.
1550                  * Skip insertion for now and return the pfn so that caller can
1551                  * insert it after fsync is done.
1552                  */
1553                 if (sync) {
1554                         if (WARN_ON_ONCE(!pfnp))
1555                                 goto finish_iomap;
1556                         *pfnp = pfn;
1557                         result = VM_FAULT_NEEDDSYNC;
1558                         goto finish_iomap;
1559                 }
1560
1561                 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1562                 result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
1563                                             write);
1564                 break;
1565         case IOMAP_UNWRITTEN:
1566         case IOMAP_HOLE:
1567                 if (WARN_ON_ONCE(write))
1568                         break;
1569                 result = dax_pmd_load_hole(vmf, &iomap, entry);
1570                 break;
1571         default:
1572                 WARN_ON_ONCE(1);
1573                 break;
1574         }
1575
1576  finish_iomap:
1577         if (ops->iomap_end) {
1578                 int copied = PMD_SIZE;
1579
1580                 if (result == VM_FAULT_FALLBACK)
1581                         copied = 0;
1582                 /*
1583                  * The fault is done by now and there's no way back (other
1584                  * thread may be already happily using PMD we have installed).
1585                  * Just ignore error from ->iomap_end since we cannot do much
1586                  * with it.
1587                  */
1588                 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1589                                 &iomap);
1590         }
1591  unlock_entry:
1592         put_locked_mapping_entry(mapping, pgoff);
1593  fallback:
1594         if (result == VM_FAULT_FALLBACK) {
1595                 split_huge_pmd(vma, vmf->pmd, vmf->address);
1596                 count_vm_event(THP_FAULT_FALLBACK);
1597         }
1598 out:
1599         trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1600         return result;
1601 }
1602 #else
1603 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1604                                const struct iomap_ops *ops)
1605 {
1606         return VM_FAULT_FALLBACK;
1607 }
1608 #endif /* CONFIG_FS_DAX_PMD */
1609
1610 /**
1611  * dax_iomap_fault - handle a page fault on a DAX file
1612  * @vmf: The description of the fault
1613  * @pe_size: Size of the page to fault in
1614  * @pfnp: PFN to insert for synchronous faults if fsync is required
1615  * @iomap_errp: Storage for detailed error code in case of error
1616  * @ops: Iomap ops passed from the file system
1617  *
1618  * When a page fault occurs, filesystems may call this helper in
1619  * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1620  * has done all the necessary locking for page fault to proceed
1621  * successfully.
1622  */
1623 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1624                     pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1625 {
1626         switch (pe_size) {
1627         case PE_SIZE_PTE:
1628                 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1629         case PE_SIZE_PMD:
1630                 return dax_iomap_pmd_fault(vmf, pfnp, ops);
1631         default:
1632                 return VM_FAULT_FALLBACK;
1633         }
1634 }
1635 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1636
1637 /**
1638  * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1639  * @vmf: The description of the fault
1640  * @pe_size: Size of entry to be inserted
1641  * @pfn: PFN to insert
1642  *
1643  * This function inserts writeable PTE or PMD entry into page tables for mmaped
1644  * DAX file.  It takes care of marking corresponding radix tree entry as dirty
1645  * as well.
1646  */
1647 static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
1648                                   enum page_entry_size pe_size,
1649                                   pfn_t pfn)
1650 {
1651         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1652         void *entry, **slot;
1653         pgoff_t index = vmf->pgoff;
1654         vm_fault_t ret;
1655
1656         xa_lock_irq(&mapping->i_pages);
1657         entry = get_unlocked_mapping_entry(mapping, index, &slot);
1658         /* Did we race with someone splitting entry or so? */
1659         if (!entry ||
1660             (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) ||
1661             (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) {
1662                 put_unlocked_mapping_entry(mapping, index, entry);
1663                 xa_unlock_irq(&mapping->i_pages);
1664                 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1665                                                       VM_FAULT_NOPAGE);
1666                 return VM_FAULT_NOPAGE;
1667         }
1668         radix_tree_tag_set(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY);
1669         entry = lock_slot(mapping, slot);
1670         xa_unlock_irq(&mapping->i_pages);
1671         switch (pe_size) {
1672         case PE_SIZE_PTE:
1673                 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1674                 break;
1675 #ifdef CONFIG_FS_DAX_PMD
1676         case PE_SIZE_PMD:
1677                 ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1678                         pfn, true);
1679                 break;
1680 #endif
1681         default:
1682                 ret = VM_FAULT_FALLBACK;
1683         }
1684         put_locked_mapping_entry(mapping, index);
1685         trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1686         return ret;
1687 }
1688
1689 /**
1690  * dax_finish_sync_fault - finish synchronous page fault
1691  * @vmf: The description of the fault
1692  * @pe_size: Size of entry to be inserted
1693  * @pfn: PFN to insert
1694  *
1695  * This function ensures that the file range touched by the page fault is
1696  * stored persistently on the media and handles inserting of appropriate page
1697  * table entry.
1698  */
1699 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1700                 enum page_entry_size pe_size, pfn_t pfn)
1701 {
1702         int err;
1703         loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1704         size_t len = 0;
1705
1706         if (pe_size == PE_SIZE_PTE)
1707                 len = PAGE_SIZE;
1708         else if (pe_size == PE_SIZE_PMD)
1709                 len = PMD_SIZE;
1710         else
1711                 WARN_ON_ONCE(1);
1712         err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1713         if (err)
1714                 return VM_FAULT_SIGBUS;
1715         return dax_insert_pfn_mkwrite(vmf, pe_size, pfn);
1716 }
1717 EXPORT_SYMBOL_GPL(dax_finish_sync_fault);