Commit | Line | Data |
---|---|---|
2025cf9e | 1 | // SPDX-License-Identifier: GPL-2.0-only |
d475c634 MW |
2 | /* |
3 | * fs/dax.c - Direct Access filesystem code | |
4 | * Copyright (c) 2013-2014 Intel Corporation | |
5 | * Author: Matthew Wilcox <matthew.r.wilcox@intel.com> | |
6 | * Author: Ross Zwisler <ross.zwisler@linux.intel.com> | |
d475c634 MW |
7 | */ |
8 | ||
9 | #include <linux/atomic.h> | |
10 | #include <linux/blkdev.h> | |
11 | #include <linux/buffer_head.h> | |
d77e92e2 | 12 | #include <linux/dax.h> |
d475c634 | 13 | #include <linux/fs.h> |
f7ca90b1 MW |
14 | #include <linux/highmem.h> |
15 | #include <linux/memcontrol.h> | |
16 | #include <linux/mm.h> | |
d475c634 | 17 | #include <linux/mutex.h> |
9973c98e | 18 | #include <linux/pagevec.h> |
289c6aed | 19 | #include <linux/sched.h> |
f361bf4a | 20 | #include <linux/sched/signal.h> |
d475c634 | 21 | #include <linux/uio.h> |
f7ca90b1 | 22 | #include <linux/vmstat.h> |
34c0fd54 | 23 | #include <linux/pfn_t.h> |
0e749e54 | 24 | #include <linux/sizes.h> |
4b4bb46d | 25 | #include <linux/mmu_notifier.h> |
a254e568 | 26 | #include <linux/iomap.h> |
06083a09 | 27 | #include <linux/rmap.h> |
11cf9d86 | 28 | #include <asm/pgalloc.h> |
d475c634 | 29 | |
282a8e03 RZ |
30 | #define CREATE_TRACE_POINTS |
31 | #include <trace/events/fs_dax.h> | |
32 | ||
cfc93c6c MW |
33 | static inline unsigned int pe_order(enum page_entry_size pe_size) |
34 | { | |
35 | if (pe_size == PE_SIZE_PTE) | |
36 | return PAGE_SHIFT - PAGE_SHIFT; | |
37 | if (pe_size == PE_SIZE_PMD) | |
38 | return PMD_SHIFT - PAGE_SHIFT; | |
39 | if (pe_size == PE_SIZE_PUD) | |
40 | return PUD_SHIFT - PAGE_SHIFT; | |
41 | return ~0; | |
42 | } | |
43 | ||
ac401cc7 JK |
44 | /* We choose 4096 entries - same as per-zone page wait tables */ |
45 | #define DAX_WAIT_TABLE_BITS 12 | |
46 | #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS) | |
47 | ||
917f3452 RZ |
48 | /* The 'colour' (ie low bits) within a PMD of a page offset. */ |
49 | #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1) | |
977fbdcd | 50 | #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT) |
917f3452 | 51 | |
ce95ab0f | 52 | static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES]; |
ac401cc7 JK |
53 | |
54 | static int __init init_dax_wait_table(void) | |
55 | { | |
56 | int i; | |
57 | ||
58 | for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++) | |
59 | init_waitqueue_head(wait_table + i); | |
60 | return 0; | |
61 | } | |
62 | fs_initcall(init_dax_wait_table); | |
63 | ||
527b19d0 | 64 | /* |
3159f943 MW |
65 | * DAX pagecache entries use XArray value entries so they can't be mistaken |
66 | * for pages. We use one bit for locking, one bit for the entry size (PMD) | |
67 | * and two more to tell us if the entry is a zero page or an empty entry that | |
68 | * is just used for locking. In total four special bits. | |
527b19d0 RZ |
69 | * |
70 | * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE | |
71 | * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem | |
72 | * block allocation. | |
73 | */ | |
3159f943 MW |
74 | #define DAX_SHIFT (4) |
75 | #define DAX_LOCKED (1UL << 0) | |
76 | #define DAX_PMD (1UL << 1) | |
77 | #define DAX_ZERO_PAGE (1UL << 2) | |
78 | #define DAX_EMPTY (1UL << 3) | |
527b19d0 | 79 | |
a77d19f4 | 80 | static unsigned long dax_to_pfn(void *entry) |
527b19d0 | 81 | { |
3159f943 | 82 | return xa_to_value(entry) >> DAX_SHIFT; |
527b19d0 RZ |
83 | } |
84 | ||
9f32d221 MW |
85 | static void *dax_make_entry(pfn_t pfn, unsigned long flags) |
86 | { | |
87 | return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT)); | |
88 | } | |
89 | ||
cfc93c6c MW |
90 | static bool dax_is_locked(void *entry) |
91 | { | |
92 | return xa_to_value(entry) & DAX_LOCKED; | |
93 | } | |
94 | ||
a77d19f4 | 95 | static unsigned int dax_entry_order(void *entry) |
527b19d0 | 96 | { |
3159f943 | 97 | if (xa_to_value(entry) & DAX_PMD) |
cfc93c6c | 98 | return PMD_ORDER; |
527b19d0 RZ |
99 | return 0; |
100 | } | |
101 | ||
fda490d3 | 102 | static unsigned long dax_is_pmd_entry(void *entry) |
d1a5f2b4 | 103 | { |
3159f943 | 104 | return xa_to_value(entry) & DAX_PMD; |
d1a5f2b4 DW |
105 | } |
106 | ||
fda490d3 | 107 | static bool dax_is_pte_entry(void *entry) |
d475c634 | 108 | { |
3159f943 | 109 | return !(xa_to_value(entry) & DAX_PMD); |
d475c634 MW |
110 | } |
111 | ||
642261ac | 112 | static int dax_is_zero_entry(void *entry) |
d475c634 | 113 | { |
3159f943 | 114 | return xa_to_value(entry) & DAX_ZERO_PAGE; |
d475c634 MW |
115 | } |
116 | ||
642261ac | 117 | static int dax_is_empty_entry(void *entry) |
b2e0d162 | 118 | { |
3159f943 | 119 | return xa_to_value(entry) & DAX_EMPTY; |
b2e0d162 DW |
120 | } |
121 | ||
23c84eb7 MWO |
122 | /* |
123 | * true if the entry that was found is of a smaller order than the entry | |
124 | * we were looking for | |
125 | */ | |
126 | static bool dax_is_conflict(void *entry) | |
127 | { | |
128 | return entry == XA_RETRY_ENTRY; | |
129 | } | |
130 | ||
ac401cc7 | 131 | /* |
a77d19f4 | 132 | * DAX page cache entry locking |
ac401cc7 JK |
133 | */ |
134 | struct exceptional_entry_key { | |
ec4907ff | 135 | struct xarray *xa; |
63e95b5c | 136 | pgoff_t entry_start; |
ac401cc7 JK |
137 | }; |
138 | ||
139 | struct wait_exceptional_entry_queue { | |
ac6424b9 | 140 | wait_queue_entry_t wait; |
ac401cc7 JK |
141 | struct exceptional_entry_key key; |
142 | }; | |
143 | ||
698ab77a VG |
144 | /** |
145 | * enum dax_wake_mode: waitqueue wakeup behaviour | |
146 | * @WAKE_ALL: wake all waiters in the waitqueue | |
147 | * @WAKE_NEXT: wake only the first waiter in the waitqueue | |
148 | */ | |
149 | enum dax_wake_mode { | |
150 | WAKE_ALL, | |
151 | WAKE_NEXT, | |
152 | }; | |
153 | ||
b15cd800 MW |
154 | static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas, |
155 | void *entry, struct exceptional_entry_key *key) | |
63e95b5c RZ |
156 | { |
157 | unsigned long hash; | |
b15cd800 | 158 | unsigned long index = xas->xa_index; |
63e95b5c RZ |
159 | |
160 | /* | |
161 | * If 'entry' is a PMD, align the 'index' that we use for the wait | |
162 | * queue to the start of that PMD. This ensures that all offsets in | |
163 | * the range covered by the PMD map to the same bit lock. | |
164 | */ | |
642261ac | 165 | if (dax_is_pmd_entry(entry)) |
917f3452 | 166 | index &= ~PG_PMD_COLOUR; |
b15cd800 | 167 | key->xa = xas->xa; |
63e95b5c RZ |
168 | key->entry_start = index; |
169 | ||
b15cd800 | 170 | hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS); |
63e95b5c RZ |
171 | return wait_table + hash; |
172 | } | |
173 | ||
ec4907ff MW |
174 | static int wake_exceptional_entry_func(wait_queue_entry_t *wait, |
175 | unsigned int mode, int sync, void *keyp) | |
ac401cc7 JK |
176 | { |
177 | struct exceptional_entry_key *key = keyp; | |
178 | struct wait_exceptional_entry_queue *ewait = | |
179 | container_of(wait, struct wait_exceptional_entry_queue, wait); | |
180 | ||
ec4907ff | 181 | if (key->xa != ewait->key.xa || |
63e95b5c | 182 | key->entry_start != ewait->key.entry_start) |
ac401cc7 JK |
183 | return 0; |
184 | return autoremove_wake_function(wait, mode, sync, NULL); | |
185 | } | |
186 | ||
e30331ff | 187 | /* |
b93b0163 MW |
188 | * @entry may no longer be the entry at the index in the mapping. |
189 | * The important information it's conveying is whether the entry at | |
190 | * this index used to be a PMD entry. | |
e30331ff | 191 | */ |
698ab77a VG |
192 | static void dax_wake_entry(struct xa_state *xas, void *entry, |
193 | enum dax_wake_mode mode) | |
e30331ff RZ |
194 | { |
195 | struct exceptional_entry_key key; | |
196 | wait_queue_head_t *wq; | |
197 | ||
b15cd800 | 198 | wq = dax_entry_waitqueue(xas, entry, &key); |
e30331ff RZ |
199 | |
200 | /* | |
201 | * Checking for locked entry and prepare_to_wait_exclusive() happens | |
b93b0163 | 202 | * under the i_pages lock, ditto for entry handling in our callers. |
e30331ff RZ |
203 | * So at this point all tasks that could have seen our entry locked |
204 | * must be in the waitqueue and the following check will see them. | |
205 | */ | |
206 | if (waitqueue_active(wq)) | |
698ab77a | 207 | __wake_up(wq, TASK_NORMAL, mode == WAKE_ALL ? 0 : 1, &key); |
e30331ff RZ |
208 | } |
209 | ||
cfc93c6c MW |
210 | /* |
211 | * Look up entry in page cache, wait for it to become unlocked if it | |
212 | * is a DAX entry and return it. The caller must subsequently call | |
213 | * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry() | |
23c84eb7 MWO |
214 | * if it did. The entry returned may have a larger order than @order. |
215 | * If @order is larger than the order of the entry found in i_pages, this | |
216 | * function returns a dax_is_conflict entry. | |
cfc93c6c MW |
217 | * |
218 | * Must be called with the i_pages lock held. | |
219 | */ | |
23c84eb7 | 220 | static void *get_unlocked_entry(struct xa_state *xas, unsigned int order) |
cfc93c6c MW |
221 | { |
222 | void *entry; | |
223 | struct wait_exceptional_entry_queue ewait; | |
224 | wait_queue_head_t *wq; | |
225 | ||
226 | init_wait(&ewait.wait); | |
227 | ewait.wait.func = wake_exceptional_entry_func; | |
228 | ||
229 | for (;;) { | |
0e40de03 | 230 | entry = xas_find_conflict(xas); |
6370740e DW |
231 | if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) |
232 | return entry; | |
23c84eb7 MWO |
233 | if (dax_entry_order(entry) < order) |
234 | return XA_RETRY_ENTRY; | |
6370740e | 235 | if (!dax_is_locked(entry)) |
cfc93c6c MW |
236 | return entry; |
237 | ||
b15cd800 | 238 | wq = dax_entry_waitqueue(xas, entry, &ewait.key); |
cfc93c6c MW |
239 | prepare_to_wait_exclusive(wq, &ewait.wait, |
240 | TASK_UNINTERRUPTIBLE); | |
241 | xas_unlock_irq(xas); | |
242 | xas_reset(xas); | |
243 | schedule(); | |
244 | finish_wait(wq, &ewait.wait); | |
245 | xas_lock_irq(xas); | |
246 | } | |
247 | } | |
248 | ||
55e56f06 MW |
249 | /* |
250 | * The only thing keeping the address space around is the i_pages lock | |
251 | * (it's cycled in clear_inode() after removing the entries from i_pages) | |
252 | * After we call xas_unlock_irq(), we cannot touch xas->xa. | |
253 | */ | |
254 | static void wait_entry_unlocked(struct xa_state *xas, void *entry) | |
255 | { | |
256 | struct wait_exceptional_entry_queue ewait; | |
257 | wait_queue_head_t *wq; | |
258 | ||
259 | init_wait(&ewait.wait); | |
260 | ewait.wait.func = wake_exceptional_entry_func; | |
261 | ||
262 | wq = dax_entry_waitqueue(xas, entry, &ewait.key); | |
d8a70641 DW |
263 | /* |
264 | * Unlike get_unlocked_entry() there is no guarantee that this | |
265 | * path ever successfully retrieves an unlocked entry before an | |
266 | * inode dies. Perform a non-exclusive wait in case this path | |
267 | * never successfully performs its own wake up. | |
268 | */ | |
269 | prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); | |
55e56f06 MW |
270 | xas_unlock_irq(xas); |
271 | schedule(); | |
272 | finish_wait(wq, &ewait.wait); | |
55e56f06 MW |
273 | } |
274 | ||
4c3d043d VG |
275 | static void put_unlocked_entry(struct xa_state *xas, void *entry, |
276 | enum dax_wake_mode mode) | |
cfc93c6c | 277 | { |
61c30c98 | 278 | if (entry && !dax_is_conflict(entry)) |
4c3d043d | 279 | dax_wake_entry(xas, entry, mode); |
cfc93c6c MW |
280 | } |
281 | ||
282 | /* | |
283 | * We used the xa_state to get the entry, but then we locked the entry and | |
284 | * dropped the xa_lock, so we know the xa_state is stale and must be reset | |
285 | * before use. | |
286 | */ | |
287 | static void dax_unlock_entry(struct xa_state *xas, void *entry) | |
288 | { | |
289 | void *old; | |
290 | ||
7ae2ea7d | 291 | BUG_ON(dax_is_locked(entry)); |
cfc93c6c MW |
292 | xas_reset(xas); |
293 | xas_lock_irq(xas); | |
294 | old = xas_store(xas, entry); | |
295 | xas_unlock_irq(xas); | |
296 | BUG_ON(!dax_is_locked(old)); | |
698ab77a | 297 | dax_wake_entry(xas, entry, WAKE_NEXT); |
cfc93c6c MW |
298 | } |
299 | ||
300 | /* | |
301 | * Return: The entry stored at this location before it was locked. | |
302 | */ | |
303 | static void *dax_lock_entry(struct xa_state *xas, void *entry) | |
304 | { | |
305 | unsigned long v = xa_to_value(entry); | |
306 | return xas_store(xas, xa_mk_value(v | DAX_LOCKED)); | |
307 | } | |
308 | ||
d2c997c0 DW |
309 | static unsigned long dax_entry_size(void *entry) |
310 | { | |
311 | if (dax_is_zero_entry(entry)) | |
312 | return 0; | |
313 | else if (dax_is_empty_entry(entry)) | |
314 | return 0; | |
315 | else if (dax_is_pmd_entry(entry)) | |
316 | return PMD_SIZE; | |
317 | else | |
318 | return PAGE_SIZE; | |
319 | } | |
320 | ||
a77d19f4 | 321 | static unsigned long dax_end_pfn(void *entry) |
d2c997c0 | 322 | { |
a77d19f4 | 323 | return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE; |
d2c997c0 DW |
324 | } |
325 | ||
326 | /* | |
327 | * Iterate through all mapped pfns represented by an entry, i.e. skip | |
328 | * 'empty' and 'zero' entries. | |
329 | */ | |
330 | #define for_each_mapped_pfn(entry, pfn) \ | |
a77d19f4 MW |
331 | for (pfn = dax_to_pfn(entry); \ |
332 | pfn < dax_end_pfn(entry); pfn++) | |
d2c997c0 | 333 | |
16900426 | 334 | static inline bool dax_page_is_shared(struct page *page) |
6061b69b | 335 | { |
16900426 | 336 | return page->mapping == PAGE_MAPPING_DAX_SHARED; |
6061b69b SR |
337 | } |
338 | ||
73449daf | 339 | /* |
16900426 SR |
340 | * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the |
341 | * refcount. | |
6061b69b | 342 | */ |
16900426 | 343 | static inline void dax_page_share_get(struct page *page) |
6061b69b | 344 | { |
16900426 | 345 | if (page->mapping != PAGE_MAPPING_DAX_SHARED) { |
6061b69b SR |
346 | /* |
347 | * Reset the index if the page was already mapped | |
348 | * regularly before. | |
349 | */ | |
350 | if (page->mapping) | |
16900426 SR |
351 | page->share = 1; |
352 | page->mapping = PAGE_MAPPING_DAX_SHARED; | |
6061b69b | 353 | } |
16900426 SR |
354 | page->share++; |
355 | } | |
356 | ||
357 | static inline unsigned long dax_page_share_put(struct page *page) | |
358 | { | |
359 | return --page->share; | |
6061b69b SR |
360 | } |
361 | ||
362 | /* | |
16900426 | 363 | * When it is called in dax_insert_entry(), the shared flag will indicate that |
6061b69b | 364 | * whether this entry is shared by multiple files. If so, set the page->mapping |
16900426 | 365 | * PAGE_MAPPING_DAX_SHARED, and use page->share as refcount. |
73449daf DW |
366 | */ |
367 | static void dax_associate_entry(void *entry, struct address_space *mapping, | |
16900426 | 368 | struct vm_area_struct *vma, unsigned long address, bool shared) |
d2c997c0 | 369 | { |
73449daf DW |
370 | unsigned long size = dax_entry_size(entry), pfn, index; |
371 | int i = 0; | |
d2c997c0 DW |
372 | |
373 | if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) | |
374 | return; | |
375 | ||
73449daf | 376 | index = linear_page_index(vma, address & ~(size - 1)); |
d2c997c0 DW |
377 | for_each_mapped_pfn(entry, pfn) { |
378 | struct page *page = pfn_to_page(pfn); | |
379 | ||
16900426 SR |
380 | if (shared) { |
381 | dax_page_share_get(page); | |
6061b69b SR |
382 | } else { |
383 | WARN_ON_ONCE(page->mapping); | |
384 | page->mapping = mapping; | |
385 | page->index = index + i++; | |
386 | } | |
d2c997c0 DW |
387 | } |
388 | } | |
389 | ||
390 | static void dax_disassociate_entry(void *entry, struct address_space *mapping, | |
391 | bool trunc) | |
392 | { | |
393 | unsigned long pfn; | |
394 | ||
395 | if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) | |
396 | return; | |
397 | ||
398 | for_each_mapped_pfn(entry, pfn) { | |
399 | struct page *page = pfn_to_page(pfn); | |
400 | ||
401 | WARN_ON_ONCE(trunc && page_ref_count(page) > 1); | |
16900426 SR |
402 | if (dax_page_is_shared(page)) { |
403 | /* keep the shared flag if this page is still shared */ | |
404 | if (dax_page_share_put(page) > 0) | |
6061b69b SR |
405 | continue; |
406 | } else | |
407 | WARN_ON_ONCE(page->mapping && page->mapping != mapping); | |
d2c997c0 | 408 | page->mapping = NULL; |
73449daf | 409 | page->index = 0; |
d2c997c0 DW |
410 | } |
411 | } | |
412 | ||
5fac7408 DW |
413 | static struct page *dax_busy_page(void *entry) |
414 | { | |
415 | unsigned long pfn; | |
416 | ||
417 | for_each_mapped_pfn(entry, pfn) { | |
418 | struct page *page = pfn_to_page(pfn); | |
419 | ||
420 | if (page_ref_count(page) > 1) | |
421 | return page; | |
422 | } | |
423 | return NULL; | |
424 | } | |
425 | ||
c5bbd451 | 426 | /* |
c2e8021a | 427 | * dax_lock_page - Lock the DAX entry corresponding to a page |
c5bbd451 MW |
428 | * @page: The page whose entry we want to lock |
429 | * | |
430 | * Context: Process context. | |
27359fd6 MW |
431 | * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could |
432 | * not be locked. | |
c5bbd451 | 433 | */ |
27359fd6 | 434 | dax_entry_t dax_lock_page(struct page *page) |
c2a7d2a1 | 435 | { |
9f32d221 MW |
436 | XA_STATE(xas, NULL, 0); |
437 | void *entry; | |
c2a7d2a1 | 438 | |
c5bbd451 MW |
439 | /* Ensure page->mapping isn't freed while we look at it */ |
440 | rcu_read_lock(); | |
c2a7d2a1 | 441 | for (;;) { |
9f32d221 | 442 | struct address_space *mapping = READ_ONCE(page->mapping); |
c2a7d2a1 | 443 | |
27359fd6 | 444 | entry = NULL; |
c93db7bb | 445 | if (!mapping || !dax_mapping(mapping)) |
c5bbd451 | 446 | break; |
c2a7d2a1 DW |
447 | |
448 | /* | |
449 | * In the device-dax case there's no need to lock, a | |
450 | * struct dev_pagemap pin is sufficient to keep the | |
451 | * inode alive, and we assume we have dev_pagemap pin | |
452 | * otherwise we would not have a valid pfn_to_page() | |
453 | * translation. | |
454 | */ | |
27359fd6 | 455 | entry = (void *)~0UL; |
9f32d221 | 456 | if (S_ISCHR(mapping->host->i_mode)) |
c5bbd451 | 457 | break; |
c2a7d2a1 | 458 | |
9f32d221 MW |
459 | xas.xa = &mapping->i_pages; |
460 | xas_lock_irq(&xas); | |
c2a7d2a1 | 461 | if (mapping != page->mapping) { |
9f32d221 | 462 | xas_unlock_irq(&xas); |
c2a7d2a1 DW |
463 | continue; |
464 | } | |
9f32d221 MW |
465 | xas_set(&xas, page->index); |
466 | entry = xas_load(&xas); | |
467 | if (dax_is_locked(entry)) { | |
c5bbd451 | 468 | rcu_read_unlock(); |
55e56f06 | 469 | wait_entry_unlocked(&xas, entry); |
c5bbd451 | 470 | rcu_read_lock(); |
6d7cd8c1 | 471 | continue; |
c2a7d2a1 | 472 | } |
9f32d221 MW |
473 | dax_lock_entry(&xas, entry); |
474 | xas_unlock_irq(&xas); | |
c5bbd451 | 475 | break; |
c2a7d2a1 | 476 | } |
c5bbd451 | 477 | rcu_read_unlock(); |
27359fd6 | 478 | return (dax_entry_t)entry; |
c2a7d2a1 DW |
479 | } |
480 | ||
27359fd6 | 481 | void dax_unlock_page(struct page *page, dax_entry_t cookie) |
c2a7d2a1 DW |
482 | { |
483 | struct address_space *mapping = page->mapping; | |
9f32d221 | 484 | XA_STATE(xas, &mapping->i_pages, page->index); |
c2a7d2a1 | 485 | |
9f32d221 | 486 | if (S_ISCHR(mapping->host->i_mode)) |
c2a7d2a1 DW |
487 | return; |
488 | ||
27359fd6 | 489 | dax_unlock_entry(&xas, (void *)cookie); |
c2a7d2a1 DW |
490 | } |
491 | ||
2f437eff SR |
492 | /* |
493 | * dax_lock_mapping_entry - Lock the DAX entry corresponding to a mapping | |
494 | * @mapping: the file's mapping whose entry we want to lock | |
495 | * @index: the offset within this file | |
496 | * @page: output the dax page corresponding to this dax entry | |
497 | * | |
498 | * Return: A cookie to pass to dax_unlock_mapping_entry() or 0 if the entry | |
499 | * could not be locked. | |
500 | */ | |
501 | dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, pgoff_t index, | |
502 | struct page **page) | |
503 | { | |
504 | XA_STATE(xas, NULL, 0); | |
505 | void *entry; | |
506 | ||
507 | rcu_read_lock(); | |
508 | for (;;) { | |
509 | entry = NULL; | |
510 | if (!dax_mapping(mapping)) | |
511 | break; | |
512 | ||
513 | xas.xa = &mapping->i_pages; | |
514 | xas_lock_irq(&xas); | |
515 | xas_set(&xas, index); | |
516 | entry = xas_load(&xas); | |
517 | if (dax_is_locked(entry)) { | |
518 | rcu_read_unlock(); | |
519 | wait_entry_unlocked(&xas, entry); | |
520 | rcu_read_lock(); | |
521 | continue; | |
522 | } | |
523 | if (!entry || | |
524 | dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { | |
525 | /* | |
526 | * Because we are looking for entry from file's mapping | |
527 | * and index, so the entry may not be inserted for now, | |
528 | * or even a zero/empty entry. We don't think this is | |
529 | * an error case. So, return a special value and do | |
530 | * not output @page. | |
531 | */ | |
532 | entry = (void *)~0UL; | |
533 | } else { | |
534 | *page = pfn_to_page(dax_to_pfn(entry)); | |
535 | dax_lock_entry(&xas, entry); | |
536 | } | |
537 | xas_unlock_irq(&xas); | |
538 | break; | |
539 | } | |
540 | rcu_read_unlock(); | |
541 | return (dax_entry_t)entry; | |
542 | } | |
543 | ||
544 | void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index, | |
545 | dax_entry_t cookie) | |
546 | { | |
547 | XA_STATE(xas, &mapping->i_pages, index); | |
548 | ||
549 | if (cookie == ~0UL) | |
550 | return; | |
551 | ||
552 | dax_unlock_entry(&xas, (void *)cookie); | |
553 | } | |
554 | ||
ac401cc7 | 555 | /* |
a77d19f4 MW |
556 | * Find page cache entry at given index. If it is a DAX entry, return it |
557 | * with the entry locked. If the page cache doesn't contain an entry at | |
558 | * that index, add a locked empty entry. | |
ac401cc7 | 559 | * |
3159f943 | 560 | * When requesting an entry with size DAX_PMD, grab_mapping_entry() will |
b15cd800 MW |
561 | * either return that locked entry or will return VM_FAULT_FALLBACK. |
562 | * This will happen if there are any PTE entries within the PMD range | |
563 | * that we are requesting. | |
642261ac | 564 | * |
b15cd800 MW |
565 | * We always favor PTE entries over PMD entries. There isn't a flow where we |
566 | * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD | |
567 | * insertion will fail if it finds any PTE entries already in the tree, and a | |
568 | * PTE insertion will cause an existing PMD entry to be unmapped and | |
569 | * downgraded to PTE entries. This happens for both PMD zero pages as | |
570 | * well as PMD empty entries. | |
642261ac | 571 | * |
b15cd800 MW |
572 | * The exception to this downgrade path is for PMD entries that have |
573 | * real storage backing them. We will leave these real PMD entries in | |
574 | * the tree, and PTE writes will simply dirty the entire PMD entry. | |
642261ac | 575 | * |
ac401cc7 JK |
576 | * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For |
577 | * persistent memory the benefit is doubtful. We can add that later if we can | |
578 | * show it helps. | |
b15cd800 MW |
579 | * |
580 | * On error, this function does not return an ERR_PTR. Instead it returns | |
581 | * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values | |
582 | * overlap with xarray value entries. | |
ac401cc7 | 583 | */ |
b15cd800 | 584 | static void *grab_mapping_entry(struct xa_state *xas, |
23c84eb7 | 585 | struct address_space *mapping, unsigned int order) |
ac401cc7 | 586 | { |
b15cd800 | 587 | unsigned long index = xas->xa_index; |
1a14e377 | 588 | bool pmd_downgrade; /* splitting PMD entry into PTE entries? */ |
b15cd800 | 589 | void *entry; |
642261ac | 590 | |
b15cd800 | 591 | retry: |
1a14e377 | 592 | pmd_downgrade = false; |
b15cd800 | 593 | xas_lock_irq(xas); |
23c84eb7 | 594 | entry = get_unlocked_entry(xas, order); |
91d25ba8 | 595 | |
642261ac | 596 | if (entry) { |
23c84eb7 MWO |
597 | if (dax_is_conflict(entry)) |
598 | goto fallback; | |
0e40de03 | 599 | if (!xa_is_value(entry)) { |
49688e65 | 600 | xas_set_err(xas, -EIO); |
b15cd800 MW |
601 | goto out_unlock; |
602 | } | |
603 | ||
23c84eb7 | 604 | if (order == 0) { |
91d25ba8 | 605 | if (dax_is_pmd_entry(entry) && |
642261ac RZ |
606 | (dax_is_zero_entry(entry) || |
607 | dax_is_empty_entry(entry))) { | |
608 | pmd_downgrade = true; | |
609 | } | |
610 | } | |
611 | } | |
612 | ||
b15cd800 MW |
613 | if (pmd_downgrade) { |
614 | /* | |
615 | * Make sure 'entry' remains valid while we drop | |
616 | * the i_pages lock. | |
617 | */ | |
618 | dax_lock_entry(xas, entry); | |
642261ac | 619 | |
642261ac RZ |
620 | /* |
621 | * Besides huge zero pages the only other thing that gets | |
622 | * downgraded are empty entries which don't need to be | |
623 | * unmapped. | |
624 | */ | |
b15cd800 MW |
625 | if (dax_is_zero_entry(entry)) { |
626 | xas_unlock_irq(xas); | |
627 | unmap_mapping_pages(mapping, | |
628 | xas->xa_index & ~PG_PMD_COLOUR, | |
629 | PG_PMD_NR, false); | |
630 | xas_reset(xas); | |
631 | xas_lock_irq(xas); | |
e11f8b7b RZ |
632 | } |
633 | ||
b15cd800 MW |
634 | dax_disassociate_entry(entry, mapping, false); |
635 | xas_store(xas, NULL); /* undo the PMD join */ | |
698ab77a | 636 | dax_wake_entry(xas, entry, WAKE_ALL); |
7f0e07fb | 637 | mapping->nrpages -= PG_PMD_NR; |
b15cd800 MW |
638 | entry = NULL; |
639 | xas_set(xas, index); | |
640 | } | |
642261ac | 641 | |
b15cd800 MW |
642 | if (entry) { |
643 | dax_lock_entry(xas, entry); | |
644 | } else { | |
23c84eb7 MWO |
645 | unsigned long flags = DAX_EMPTY; |
646 | ||
647 | if (order > 0) | |
648 | flags |= DAX_PMD; | |
649 | entry = dax_make_entry(pfn_to_pfn_t(0), flags); | |
b15cd800 MW |
650 | dax_lock_entry(xas, entry); |
651 | if (xas_error(xas)) | |
652 | goto out_unlock; | |
7f0e07fb | 653 | mapping->nrpages += 1UL << order; |
ac401cc7 | 654 | } |
b15cd800 MW |
655 | |
656 | out_unlock: | |
657 | xas_unlock_irq(xas); | |
658 | if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) | |
659 | goto retry; | |
660 | if (xas->xa_node == XA_ERROR(-ENOMEM)) | |
661 | return xa_mk_internal(VM_FAULT_OOM); | |
662 | if (xas_error(xas)) | |
663 | return xa_mk_internal(VM_FAULT_SIGBUS); | |
e3ad61c6 | 664 | return entry; |
b15cd800 MW |
665 | fallback: |
666 | xas_unlock_irq(xas); | |
667 | return xa_mk_internal(VM_FAULT_FALLBACK); | |
ac401cc7 JK |
668 | } |
669 | ||
5fac7408 | 670 | /** |
6bbdd563 | 671 | * dax_layout_busy_page_range - find first pinned page in @mapping |
5fac7408 | 672 | * @mapping: address space to scan for a page with ref count > 1 |
6bbdd563 VG |
673 | * @start: Starting offset. Page containing 'start' is included. |
674 | * @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX, | |
675 | * pages from 'start' till the end of file are included. | |
5fac7408 DW |
676 | * |
677 | * DAX requires ZONE_DEVICE mapped pages. These pages are never | |
678 | * 'onlined' to the page allocator so they are considered idle when | |
679 | * page->count == 1. A filesystem uses this interface to determine if | |
680 | * any page in the mapping is busy, i.e. for DMA, or other | |
681 | * get_user_pages() usages. | |
682 | * | |
683 | * It is expected that the filesystem is holding locks to block the | |
684 | * establishment of new mappings in this address_space. I.e. it expects | |
685 | * to be able to run unmap_mapping_range() and subsequently not race | |
686 | * mapping_mapped() becoming true. | |
687 | */ | |
6bbdd563 VG |
688 | struct page *dax_layout_busy_page_range(struct address_space *mapping, |
689 | loff_t start, loff_t end) | |
5fac7408 | 690 | { |
084a8990 MW |
691 | void *entry; |
692 | unsigned int scanned = 0; | |
5fac7408 | 693 | struct page *page = NULL; |
6bbdd563 VG |
694 | pgoff_t start_idx = start >> PAGE_SHIFT; |
695 | pgoff_t end_idx; | |
696 | XA_STATE(xas, &mapping->i_pages, start_idx); | |
5fac7408 DW |
697 | |
698 | /* | |
699 | * In the 'limited' case get_user_pages() for dax is disabled. | |
700 | */ | |
701 | if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) | |
702 | return NULL; | |
703 | ||
704 | if (!dax_mapping(mapping) || !mapping_mapped(mapping)) | |
705 | return NULL; | |
706 | ||
6bbdd563 VG |
707 | /* If end == LLONG_MAX, all pages from start to till end of file */ |
708 | if (end == LLONG_MAX) | |
709 | end_idx = ULONG_MAX; | |
710 | else | |
711 | end_idx = end >> PAGE_SHIFT; | |
5fac7408 DW |
712 | /* |
713 | * If we race get_user_pages_fast() here either we'll see the | |
084a8990 | 714 | * elevated page count in the iteration and wait, or |
5fac7408 DW |
715 | * get_user_pages_fast() will see that the page it took a reference |
716 | * against is no longer mapped in the page tables and bail to the | |
717 | * get_user_pages() slow path. The slow path is protected by | |
718 | * pte_lock() and pmd_lock(). New references are not taken without | |
6bbdd563 | 719 | * holding those locks, and unmap_mapping_pages() will not zero the |
5fac7408 DW |
720 | * pte or pmd without holding the respective lock, so we are |
721 | * guaranteed to either see new references or prevent new | |
722 | * references from being established. | |
723 | */ | |
6bbdd563 | 724 | unmap_mapping_pages(mapping, start_idx, end_idx - start_idx + 1, 0); |
5fac7408 | 725 | |
084a8990 | 726 | xas_lock_irq(&xas); |
6bbdd563 | 727 | xas_for_each(&xas, entry, end_idx) { |
084a8990 MW |
728 | if (WARN_ON_ONCE(!xa_is_value(entry))) |
729 | continue; | |
730 | if (unlikely(dax_is_locked(entry))) | |
23c84eb7 | 731 | entry = get_unlocked_entry(&xas, 0); |
084a8990 MW |
732 | if (entry) |
733 | page = dax_busy_page(entry); | |
4c3d043d | 734 | put_unlocked_entry(&xas, entry, WAKE_NEXT); |
5fac7408 DW |
735 | if (page) |
736 | break; | |
084a8990 MW |
737 | if (++scanned % XA_CHECK_SCHED) |
738 | continue; | |
739 | ||
740 | xas_pause(&xas); | |
741 | xas_unlock_irq(&xas); | |
742 | cond_resched(); | |
743 | xas_lock_irq(&xas); | |
5fac7408 | 744 | } |
084a8990 | 745 | xas_unlock_irq(&xas); |
5fac7408 DW |
746 | return page; |
747 | } | |
6bbdd563 VG |
748 | EXPORT_SYMBOL_GPL(dax_layout_busy_page_range); |
749 | ||
750 | struct page *dax_layout_busy_page(struct address_space *mapping) | |
751 | { | |
752 | return dax_layout_busy_page_range(mapping, 0, LLONG_MAX); | |
753 | } | |
5fac7408 DW |
754 | EXPORT_SYMBOL_GPL(dax_layout_busy_page); |
755 | ||
a77d19f4 | 756 | static int __dax_invalidate_entry(struct address_space *mapping, |
c6dcf52c JK |
757 | pgoff_t index, bool trunc) |
758 | { | |
07f2d89c | 759 | XA_STATE(xas, &mapping->i_pages, index); |
c6dcf52c JK |
760 | int ret = 0; |
761 | void *entry; | |
c6dcf52c | 762 | |
07f2d89c | 763 | xas_lock_irq(&xas); |
23c84eb7 | 764 | entry = get_unlocked_entry(&xas, 0); |
3159f943 | 765 | if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) |
c6dcf52c JK |
766 | goto out; |
767 | if (!trunc && | |
07f2d89c MW |
768 | (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) || |
769 | xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE))) | |
c6dcf52c | 770 | goto out; |
d2c997c0 | 771 | dax_disassociate_entry(entry, mapping, trunc); |
07f2d89c | 772 | xas_store(&xas, NULL); |
7f0e07fb | 773 | mapping->nrpages -= 1UL << dax_entry_order(entry); |
c6dcf52c JK |
774 | ret = 1; |
775 | out: | |
23738832 | 776 | put_unlocked_entry(&xas, entry, WAKE_ALL); |
07f2d89c | 777 | xas_unlock_irq(&xas); |
c6dcf52c JK |
778 | return ret; |
779 | } | |
07f2d89c | 780 | |
f76b3a32 SR |
781 | static int __dax_clear_dirty_range(struct address_space *mapping, |
782 | pgoff_t start, pgoff_t end) | |
783 | { | |
784 | XA_STATE(xas, &mapping->i_pages, start); | |
785 | unsigned int scanned = 0; | |
786 | void *entry; | |
787 | ||
788 | xas_lock_irq(&xas); | |
789 | xas_for_each(&xas, entry, end) { | |
790 | entry = get_unlocked_entry(&xas, 0); | |
791 | xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); | |
792 | xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE); | |
793 | put_unlocked_entry(&xas, entry, WAKE_NEXT); | |
794 | ||
795 | if (++scanned % XA_CHECK_SCHED) | |
796 | continue; | |
797 | ||
798 | xas_pause(&xas); | |
799 | xas_unlock_irq(&xas); | |
800 | cond_resched(); | |
801 | xas_lock_irq(&xas); | |
802 | } | |
803 | xas_unlock_irq(&xas); | |
804 | ||
805 | return 0; | |
806 | } | |
807 | ||
ac401cc7 | 808 | /* |
3159f943 MW |
809 | * Delete DAX entry at @index from @mapping. Wait for it |
810 | * to be unlocked before deleting it. | |
ac401cc7 JK |
811 | */ |
812 | int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) | |
813 | { | |
a77d19f4 | 814 | int ret = __dax_invalidate_entry(mapping, index, true); |
ac401cc7 | 815 | |
ac401cc7 JK |
816 | /* |
817 | * This gets called from truncate / punch_hole path. As such, the caller | |
818 | * must hold locks protecting against concurrent modifications of the | |
a77d19f4 | 819 | * page cache (usually fs-private i_mmap_sem for writing). Since the |
3159f943 | 820 | * caller has seen a DAX entry for this index, we better find it |
ac401cc7 JK |
821 | * at that index as well... |
822 | */ | |
c6dcf52c JK |
823 | WARN_ON_ONCE(!ret); |
824 | return ret; | |
825 | } | |
826 | ||
c6dcf52c | 827 | /* |
3159f943 | 828 | * Invalidate DAX entry if it is clean. |
c6dcf52c JK |
829 | */ |
830 | int dax_invalidate_mapping_entry_sync(struct address_space *mapping, | |
831 | pgoff_t index) | |
832 | { | |
a77d19f4 | 833 | return __dax_invalidate_entry(mapping, index, false); |
ac401cc7 JK |
834 | } |
835 | ||
60696eb2 | 836 | static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos) |
f7ca90b1 | 837 | { |
de205114 | 838 | return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset); |
429f8de7 CH |
839 | } |
840 | ||
841 | static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter) | |
842 | { | |
60696eb2 | 843 | pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos); |
cccbce67 | 844 | void *vto, *kaddr; |
cccbce67 DW |
845 | long rc; |
846 | int id; | |
847 | ||
cccbce67 | 848 | id = dax_read_lock(); |
e511c4a3 JC |
849 | rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, DAX_ACCESS, |
850 | &kaddr, NULL); | |
cccbce67 DW |
851 | if (rc < 0) { |
852 | dax_read_unlock(id); | |
853 | return rc; | |
854 | } | |
429f8de7 CH |
855 | vto = kmap_atomic(vmf->cow_page); |
856 | copy_user_page(vto, kaddr, vmf->address, vmf->cow_page); | |
f7ca90b1 | 857 | kunmap_atomic(vto); |
cccbce67 | 858 | dax_read_unlock(id); |
f7ca90b1 MW |
859 | return 0; |
860 | } | |
861 | ||
e5d6df73 SR |
862 | /* |
863 | * MAP_SYNC on a dax mapping guarantees dirty metadata is | |
864 | * flushed on write-faults (non-cow), but not read-faults. | |
865 | */ | |
866 | static bool dax_fault_is_synchronous(const struct iomap_iter *iter, | |
867 | struct vm_area_struct *vma) | |
868 | { | |
869 | return (iter->flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC) && | |
870 | (iter->iomap.flags & IOMAP_F_DIRTY); | |
871 | } | |
872 | ||
642261ac RZ |
873 | /* |
874 | * By this point grab_mapping_entry() has ensured that we have a locked entry | |
875 | * of the appropriate size so we don't have to worry about downgrading PMDs to | |
876 | * PTEs. If we happen to be trying to insert a PTE and there is a PMD | |
877 | * already in the tree, we will skip the insertion and just dirty the PMD as | |
878 | * appropriate. | |
879 | */ | |
e5d6df73 SR |
880 | static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, |
881 | const struct iomap_iter *iter, void *entry, pfn_t pfn, | |
882 | unsigned long flags) | |
9973c98e | 883 | { |
e5d6df73 | 884 | struct address_space *mapping = vmf->vma->vm_file->f_mapping; |
b15cd800 | 885 | void *new_entry = dax_make_entry(pfn, flags); |
c6f0b395 SR |
886 | bool write = iter->flags & IOMAP_WRITE; |
887 | bool dirty = write && !dax_fault_is_synchronous(iter, vmf->vma); | |
888 | bool shared = iter->iomap.flags & IOMAP_F_SHARED; | |
9973c98e | 889 | |
f5b7b748 | 890 | if (dirty) |
d2b2a28e | 891 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); |
9973c98e | 892 | |
c6f0b395 | 893 | if (shared || (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE))) { |
b15cd800 | 894 | unsigned long index = xas->xa_index; |
91d25ba8 RZ |
895 | /* we are replacing a zero page with block mapping */ |
896 | if (dax_is_pmd_entry(entry)) | |
977fbdcd | 897 | unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, |
b15cd800 | 898 | PG_PMD_NR, false); |
91d25ba8 | 899 | else /* pte entry */ |
b15cd800 | 900 | unmap_mapping_pages(mapping, index, 1, false); |
9973c98e RZ |
901 | } |
902 | ||
b15cd800 MW |
903 | xas_reset(xas); |
904 | xas_lock_irq(xas); | |
c6f0b395 | 905 | if (shared || dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) { |
1571c029 JK |
906 | void *old; |
907 | ||
d2c997c0 | 908 | dax_disassociate_entry(entry, mapping, false); |
6061b69b | 909 | dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, |
c6f0b395 | 910 | shared); |
642261ac | 911 | /* |
a77d19f4 | 912 | * Only swap our new entry into the page cache if the current |
642261ac | 913 | * entry is a zero page or an empty entry. If a normal PTE or |
a77d19f4 | 914 | * PMD entry is already in the cache, we leave it alone. This |
642261ac RZ |
915 | * means that if we are trying to insert a PTE and the |
916 | * existing entry is a PMD, we will just leave the PMD in the | |
917 | * tree and dirty it if necessary. | |
918 | */ | |
1571c029 | 919 | old = dax_lock_entry(xas, new_entry); |
b15cd800 MW |
920 | WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) | |
921 | DAX_LOCKED)); | |
91d25ba8 | 922 | entry = new_entry; |
b15cd800 MW |
923 | } else { |
924 | xas_load(xas); /* Walk the xa_state */ | |
9973c98e | 925 | } |
91d25ba8 | 926 | |
f5b7b748 | 927 | if (dirty) |
b15cd800 | 928 | xas_set_mark(xas, PAGECACHE_TAG_DIRTY); |
91d25ba8 | 929 | |
c6f0b395 | 930 | if (write && shared) |
e5d6df73 SR |
931 | xas_set_mark(xas, PAGECACHE_TAG_TOWRITE); |
932 | ||
b15cd800 | 933 | xas_unlock_irq(xas); |
91d25ba8 | 934 | return entry; |
9973c98e RZ |
935 | } |
936 | ||
9fc747f6 MW |
937 | static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, |
938 | struct address_space *mapping, void *entry) | |
9973c98e | 939 | { |
06083a09 | 940 | unsigned long pfn, index, count, end; |
3fe0791c | 941 | long ret = 0; |
06083a09 | 942 | struct vm_area_struct *vma; |
9973c98e | 943 | |
9973c98e | 944 | /* |
a6abc2c0 JK |
945 | * A page got tagged dirty in DAX mapping? Something is seriously |
946 | * wrong. | |
9973c98e | 947 | */ |
3159f943 | 948 | if (WARN_ON(!xa_is_value(entry))) |
a6abc2c0 | 949 | return -EIO; |
9973c98e | 950 | |
9fc747f6 MW |
951 | if (unlikely(dax_is_locked(entry))) { |
952 | void *old_entry = entry; | |
953 | ||
23c84eb7 | 954 | entry = get_unlocked_entry(xas, 0); |
9fc747f6 MW |
955 | |
956 | /* Entry got punched out / reallocated? */ | |
957 | if (!entry || WARN_ON_ONCE(!xa_is_value(entry))) | |
958 | goto put_unlocked; | |
959 | /* | |
960 | * Entry got reallocated elsewhere? No need to writeback. | |
961 | * We have to compare pfns as we must not bail out due to | |
962 | * difference in lockbit or entry type. | |
963 | */ | |
964 | if (dax_to_pfn(old_entry) != dax_to_pfn(entry)) | |
965 | goto put_unlocked; | |
966 | if (WARN_ON_ONCE(dax_is_empty_entry(entry) || | |
967 | dax_is_zero_entry(entry))) { | |
968 | ret = -EIO; | |
969 | goto put_unlocked; | |
970 | } | |
971 | ||
972 | /* Another fsync thread may have already done this entry */ | |
973 | if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE)) | |
974 | goto put_unlocked; | |
9973c98e RZ |
975 | } |
976 | ||
a6abc2c0 | 977 | /* Lock the entry to serialize with page faults */ |
9fc747f6 MW |
978 | dax_lock_entry(xas, entry); |
979 | ||
a6abc2c0 JK |
980 | /* |
981 | * We can clear the tag now but we have to be careful so that concurrent | |
982 | * dax_writeback_one() calls for the same index cannot finish before we | |
983 | * actually flush the caches. This is achieved as the calls will look | |
b93b0163 MW |
984 | * at the entry only under the i_pages lock and once they do that |
985 | * they will see the entry locked and wait for it to unlock. | |
a6abc2c0 | 986 | */ |
9fc747f6 MW |
987 | xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE); |
988 | xas_unlock_irq(xas); | |
a6abc2c0 | 989 | |
642261ac | 990 | /* |
e4b3448b MW |
991 | * If dax_writeback_mapping_range() was given a wbc->range_start |
992 | * in the middle of a PMD, the 'index' we use needs to be | |
993 | * aligned to the start of the PMD. | |
3fe0791c DW |
994 | * This allows us to flush for PMD_SIZE and not have to worry about |
995 | * partial PMD writebacks. | |
642261ac | 996 | */ |
a77d19f4 | 997 | pfn = dax_to_pfn(entry); |
e4b3448b MW |
998 | count = 1UL << dax_entry_order(entry); |
999 | index = xas->xa_index & ~(count - 1); | |
06083a09 MS |
1000 | end = index + count - 1; |
1001 | ||
1002 | /* Walk all mappings of a given index of a file and writeprotect them */ | |
1003 | i_mmap_lock_read(mapping); | |
1004 | vma_interval_tree_foreach(vma, &mapping->i_mmap, index, end) { | |
1005 | pfn_mkclean_range(pfn, count, index, vma); | |
1006 | cond_resched(); | |
1007 | } | |
1008 | i_mmap_unlock_read(mapping); | |
cccbce67 | 1009 | |
e4b3448b | 1010 | dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE); |
4b4bb46d JK |
1011 | /* |
1012 | * After we have flushed the cache, we can clear the dirty tag. There | |
1013 | * cannot be new dirty data in the pfn after the flush has completed as | |
1014 | * the pfn mappings are writeprotected and fault waits for mapping | |
1015 | * entry lock. | |
1016 | */ | |
9fc747f6 MW |
1017 | xas_reset(xas); |
1018 | xas_lock_irq(xas); | |
1019 | xas_store(xas, entry); | |
1020 | xas_clear_mark(xas, PAGECACHE_TAG_DIRTY); | |
698ab77a | 1021 | dax_wake_entry(xas, entry, WAKE_NEXT); |
9fc747f6 | 1022 | |
e4b3448b | 1023 | trace_dax_writeback_one(mapping->host, index, count); |
9973c98e RZ |
1024 | return ret; |
1025 | ||
a6abc2c0 | 1026 | put_unlocked: |
4c3d043d | 1027 | put_unlocked_entry(xas, entry, WAKE_NEXT); |
9973c98e RZ |
1028 | return ret; |
1029 | } | |
1030 | ||
1031 | /* | |
1032 | * Flush the mapping to the persistent domain within the byte range of [start, | |
1033 | * end]. This is required by data integrity operations to ensure file data is | |
1034 | * on persistent storage prior to completion of the operation. | |
1035 | */ | |
7f6d5b52 | 1036 | int dax_writeback_mapping_range(struct address_space *mapping, |
3f666c56 | 1037 | struct dax_device *dax_dev, struct writeback_control *wbc) |
9973c98e | 1038 | { |
9fc747f6 | 1039 | XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); |
9973c98e | 1040 | struct inode *inode = mapping->host; |
9fc747f6 | 1041 | pgoff_t end_index = wbc->range_end >> PAGE_SHIFT; |
9fc747f6 MW |
1042 | void *entry; |
1043 | int ret = 0; | |
1044 | unsigned int scanned = 0; | |
9973c98e RZ |
1045 | |
1046 | if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT)) | |
1047 | return -EIO; | |
1048 | ||
7716506a | 1049 | if (mapping_empty(mapping) || wbc->sync_mode != WB_SYNC_ALL) |
7f6d5b52 RZ |
1050 | return 0; |
1051 | ||
9fc747f6 | 1052 | trace_dax_writeback_range(inode, xas.xa_index, end_index); |
9973c98e | 1053 | |
9fc747f6 | 1054 | tag_pages_for_writeback(mapping, xas.xa_index, end_index); |
9973c98e | 1055 | |
9fc747f6 MW |
1056 | xas_lock_irq(&xas); |
1057 | xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) { | |
1058 | ret = dax_writeback_one(&xas, dax_dev, mapping, entry); | |
1059 | if (ret < 0) { | |
1060 | mapping_set_error(mapping, ret); | |
9973c98e | 1061 | break; |
9973c98e | 1062 | } |
9fc747f6 MW |
1063 | if (++scanned % XA_CHECK_SCHED) |
1064 | continue; | |
1065 | ||
1066 | xas_pause(&xas); | |
1067 | xas_unlock_irq(&xas); | |
1068 | cond_resched(); | |
1069 | xas_lock_irq(&xas); | |
9973c98e | 1070 | } |
9fc747f6 | 1071 | xas_unlock_irq(&xas); |
9fc747f6 MW |
1072 | trace_dax_writeback_range_done(inode, xas.xa_index, end_index); |
1073 | return ret; | |
9973c98e RZ |
1074 | } |
1075 | EXPORT_SYMBOL_GPL(dax_writeback_mapping_range); | |
1076 | ||
e28cd3e5 SR |
1077 | static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos, |
1078 | size_t size, void **kaddr, pfn_t *pfnp) | |
f7ca90b1 | 1079 | { |
60696eb2 | 1080 | pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); |
e28cd3e5 | 1081 | int id, rc = 0; |
5e161e40 | 1082 | long length; |
f7ca90b1 | 1083 | |
cccbce67 | 1084 | id = dax_read_lock(); |
5e161e40 | 1085 | length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size), |
e28cd3e5 | 1086 | DAX_ACCESS, kaddr, pfnp); |
5e161e40 JK |
1087 | if (length < 0) { |
1088 | rc = length; | |
1089 | goto out; | |
cccbce67 | 1090 | } |
e28cd3e5 SR |
1091 | if (!pfnp) |
1092 | goto out_check_addr; | |
5e161e40 JK |
1093 | rc = -EINVAL; |
1094 | if (PFN_PHYS(length) < size) | |
1095 | goto out; | |
1096 | if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1)) | |
1097 | goto out; | |
1098 | /* For larger pages we need devmap */ | |
1099 | if (length > 1 && !pfn_t_devmap(*pfnp)) | |
1100 | goto out; | |
1101 | rc = 0; | |
e28cd3e5 SR |
1102 | |
1103 | out_check_addr: | |
1104 | if (!kaddr) | |
1105 | goto out; | |
1106 | if (!*kaddr) | |
1107 | rc = -EFAULT; | |
5e161e40 | 1108 | out: |
cccbce67 | 1109 | dax_read_unlock(id); |
5e161e40 | 1110 | return rc; |
0e3b210c | 1111 | } |
0e3b210c | 1112 | |
ff17b8df | 1113 | /** |
708dfad2 SR |
1114 | * dax_iomap_copy_around - Prepare for an unaligned write to a shared/cow page |
1115 | * by copying the data before and after the range to be written. | |
ff17b8df SR |
1116 | * @pos: address to do copy from. |
1117 | * @length: size of copy operation. | |
1118 | * @align_size: aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE) | |
1119 | * @srcmap: iomap srcmap | |
1120 | * @daddr: destination address to copy to. | |
1121 | * | |
1122 | * This can be called from two places. Either during DAX write fault (page | |
1123 | * aligned), to copy the length size data to daddr. Or, while doing normal DAX | |
708dfad2 | 1124 | * write operation, dax_iomap_iter() might call this to do the copy of either |
ff17b8df | 1125 | * start or end unaligned address. In the latter case the rest of the copy of |
708dfad2 SR |
1126 | * aligned ranges is taken care by dax_iomap_iter() itself. |
1127 | * If the srcmap contains invalid data, such as HOLE and UNWRITTEN, zero the | |
1128 | * area to make sure no old data remains. | |
ff17b8df | 1129 | */ |
708dfad2 | 1130 | static int dax_iomap_copy_around(loff_t pos, uint64_t length, size_t align_size, |
ff17b8df SR |
1131 | const struct iomap *srcmap, void *daddr) |
1132 | { | |
1133 | loff_t head_off = pos & (align_size - 1); | |
1134 | size_t size = ALIGN(head_off + length, align_size); | |
1135 | loff_t end = pos + length; | |
1136 | loff_t pg_end = round_up(end, align_size); | |
708dfad2 | 1137 | /* copy_all is usually in page fault case */ |
ff17b8df | 1138 | bool copy_all = head_off == 0 && end == pg_end; |
708dfad2 SR |
1139 | /* zero the edges if srcmap is a HOLE or IOMAP_UNWRITTEN */ |
1140 | bool zero_edge = srcmap->flags & IOMAP_F_SHARED || | |
1141 | srcmap->type == IOMAP_UNWRITTEN; | |
ff17b8df SR |
1142 | void *saddr = 0; |
1143 | int ret = 0; | |
1144 | ||
708dfad2 SR |
1145 | if (!zero_edge) { |
1146 | ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL); | |
1147 | if (ret) | |
1ea7ca1b | 1148 | return dax_mem2blk_err(ret); |
708dfad2 | 1149 | } |
ff17b8df SR |
1150 | |
1151 | if (copy_all) { | |
708dfad2 SR |
1152 | if (zero_edge) |
1153 | memset(daddr, 0, size); | |
1154 | else | |
1155 | ret = copy_mc_to_kernel(daddr, saddr, length); | |
1156 | goto out; | |
ff17b8df SR |
1157 | } |
1158 | ||
1159 | /* Copy the head part of the range */ | |
1160 | if (head_off) { | |
708dfad2 SR |
1161 | if (zero_edge) |
1162 | memset(daddr, 0, head_off); | |
1163 | else { | |
1164 | ret = copy_mc_to_kernel(daddr, saddr, head_off); | |
1165 | if (ret) | |
1166 | return -EIO; | |
1167 | } | |
ff17b8df SR |
1168 | } |
1169 | ||
1170 | /* Copy the tail part of the range */ | |
1171 | if (end < pg_end) { | |
1172 | loff_t tail_off = head_off + length; | |
1173 | loff_t tail_len = pg_end - end; | |
1174 | ||
708dfad2 SR |
1175 | if (zero_edge) |
1176 | memset(daddr + tail_off, 0, tail_len); | |
1177 | else { | |
1178 | ret = copy_mc_to_kernel(daddr + tail_off, | |
1179 | saddr + tail_off, tail_len); | |
1180 | if (ret) | |
1181 | return -EIO; | |
1182 | } | |
ff17b8df | 1183 | } |
708dfad2 SR |
1184 | out: |
1185 | if (zero_edge) | |
1186 | dax_flush(srcmap->dax_dev, daddr, size); | |
1187 | return ret ? -EIO : 0; | |
ff17b8df SR |
1188 | } |
1189 | ||
e30331ff | 1190 | /* |
91d25ba8 RZ |
1191 | * The user has performed a load from a hole in the file. Allocating a new |
1192 | * page in the file would cause excessive storage usage for workloads with | |
1193 | * sparse files. Instead we insert a read-only mapping of the 4k zero page. | |
1194 | * If this page is ever written to we will re-fault and change the mapping to | |
1195 | * point to real DAX storage instead. | |
e30331ff | 1196 | */ |
e5d6df73 SR |
1197 | static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, |
1198 | const struct iomap_iter *iter, void **entry) | |
e30331ff | 1199 | { |
e5d6df73 | 1200 | struct inode *inode = iter->inode; |
91d25ba8 | 1201 | unsigned long vaddr = vmf->address; |
b90ca5cc MW |
1202 | pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); |
1203 | vm_fault_t ret; | |
e30331ff | 1204 | |
e5d6df73 | 1205 | *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); |
3159f943 | 1206 | |
ab77dab4 | 1207 | ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); |
e30331ff RZ |
1208 | trace_dax_load_hole(inode, vmf, ret); |
1209 | return ret; | |
1210 | } | |
1211 | ||
c2436190 SR |
1212 | #ifdef CONFIG_FS_DAX_PMD |
1213 | static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, | |
e5d6df73 | 1214 | const struct iomap_iter *iter, void **entry) |
c2436190 SR |
1215 | { |
1216 | struct address_space *mapping = vmf->vma->vm_file->f_mapping; | |
1217 | unsigned long pmd_addr = vmf->address & PMD_MASK; | |
1218 | struct vm_area_struct *vma = vmf->vma; | |
1219 | struct inode *inode = mapping->host; | |
1220 | pgtable_t pgtable = NULL; | |
1221 | struct page *zero_page; | |
1222 | spinlock_t *ptl; | |
1223 | pmd_t pmd_entry; | |
1224 | pfn_t pfn; | |
1225 | ||
1226 | zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); | |
1227 | ||
1228 | if (unlikely(!zero_page)) | |
1229 | goto fallback; | |
1230 | ||
1231 | pfn = page_to_pfn_t(zero_page); | |
e5d6df73 SR |
1232 | *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, |
1233 | DAX_PMD | DAX_ZERO_PAGE); | |
c2436190 SR |
1234 | |
1235 | if (arch_needs_pgtable_deposit()) { | |
1236 | pgtable = pte_alloc_one(vma->vm_mm); | |
1237 | if (!pgtable) | |
1238 | return VM_FAULT_OOM; | |
1239 | } | |
1240 | ||
1241 | ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); | |
1242 | if (!pmd_none(*(vmf->pmd))) { | |
1243 | spin_unlock(ptl); | |
1244 | goto fallback; | |
1245 | } | |
1246 | ||
1247 | if (pgtable) { | |
1248 | pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); | |
1249 | mm_inc_nr_ptes(vma->vm_mm); | |
1250 | } | |
1251 | pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot); | |
1252 | pmd_entry = pmd_mkhuge(pmd_entry); | |
1253 | set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry); | |
1254 | spin_unlock(ptl); | |
1255 | trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry); | |
1256 | return VM_FAULT_NOPAGE; | |
1257 | ||
1258 | fallback: | |
1259 | if (pgtable) | |
1260 | pte_free(vma->vm_mm, pgtable); | |
1261 | trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry); | |
1262 | return VM_FAULT_FALLBACK; | |
1263 | } | |
1264 | #else | |
1265 | static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, | |
e5d6df73 | 1266 | const struct iomap_iter *iter, void **entry) |
c2436190 SR |
1267 | { |
1268 | return VM_FAULT_FALLBACK; | |
1269 | } | |
1270 | #endif /* CONFIG_FS_DAX_PMD */ | |
1271 | ||
d984648e SR |
1272 | static s64 dax_unshare_iter(struct iomap_iter *iter) |
1273 | { | |
1274 | struct iomap *iomap = &iter->iomap; | |
1275 | const struct iomap *srcmap = iomap_iter_srcmap(iter); | |
1276 | loff_t pos = iter->pos; | |
1277 | loff_t length = iomap_length(iter); | |
1278 | int id = 0; | |
1279 | s64 ret = 0; | |
1280 | void *daddr = NULL, *saddr = NULL; | |
1281 | ||
1282 | /* don't bother with blocks that are not shared to start with */ | |
1283 | if (!(iomap->flags & IOMAP_F_SHARED)) | |
1284 | return length; | |
d984648e SR |
1285 | |
1286 | id = dax_read_lock(); | |
1287 | ret = dax_iomap_direct_access(iomap, pos, length, &daddr, NULL); | |
1288 | if (ret < 0) | |
1289 | goto out_unlock; | |
1290 | ||
13dd4e04 SR |
1291 | /* zero the distance if srcmap is HOLE or UNWRITTEN */ |
1292 | if (srcmap->flags & IOMAP_F_SHARED || srcmap->type == IOMAP_UNWRITTEN) { | |
1293 | memset(daddr, 0, length); | |
1294 | dax_flush(iomap->dax_dev, daddr, length); | |
1295 | ret = length; | |
1296 | goto out_unlock; | |
1297 | } | |
1298 | ||
d984648e SR |
1299 | ret = dax_iomap_direct_access(srcmap, pos, length, &saddr, NULL); |
1300 | if (ret < 0) | |
1301 | goto out_unlock; | |
1302 | ||
388bc034 SR |
1303 | if (copy_mc_to_kernel(daddr, saddr, length) == 0) |
1304 | ret = length; | |
1305 | else | |
d984648e SR |
1306 | ret = -EIO; |
1307 | ||
1308 | out_unlock: | |
1309 | dax_read_unlock(id); | |
1ea7ca1b | 1310 | return dax_mem2blk_err(ret); |
d984648e SR |
1311 | } |
1312 | ||
1313 | int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len, | |
1314 | const struct iomap_ops *ops) | |
1315 | { | |
1316 | struct iomap_iter iter = { | |
1317 | .inode = inode, | |
1318 | .pos = pos, | |
1319 | .len = len, | |
1320 | .flags = IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX, | |
1321 | }; | |
1322 | int ret; | |
1323 | ||
1324 | while ((ret = iomap_iter(&iter, ops)) > 0) | |
1325 | iter.processed = dax_unshare_iter(&iter); | |
1326 | return ret; | |
1327 | } | |
1328 | EXPORT_SYMBOL_GPL(dax_file_unshare); | |
1329 | ||
8dbfc76d | 1330 | static int dax_memzero(struct iomap_iter *iter, loff_t pos, size_t size) |
e5c71954 | 1331 | { |
8dbfc76d SR |
1332 | const struct iomap *iomap = &iter->iomap; |
1333 | const struct iomap *srcmap = iomap_iter_srcmap(iter); | |
1334 | unsigned offset = offset_in_page(pos); | |
1335 | pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); | |
e5c71954 CH |
1336 | void *kaddr; |
1337 | long ret; | |
1338 | ||
8dbfc76d SR |
1339 | ret = dax_direct_access(iomap->dax_dev, pgoff, 1, DAX_ACCESS, &kaddr, |
1340 | NULL); | |
1341 | if (ret < 0) | |
1ea7ca1b JC |
1342 | return dax_mem2blk_err(ret); |
1343 | ||
8dbfc76d | 1344 | memset(kaddr + offset, 0, size); |
708dfad2 SR |
1345 | if (iomap->flags & IOMAP_F_SHARED) |
1346 | ret = dax_iomap_copy_around(pos, size, PAGE_SIZE, srcmap, | |
1347 | kaddr); | |
1348 | else | |
8dbfc76d | 1349 | dax_flush(iomap->dax_dev, kaddr + offset, size); |
e5c71954 CH |
1350 | return ret; |
1351 | } | |
1352 | ||
c6f40468 | 1353 | static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero) |
679c8bd3 | 1354 | { |
c6f40468 CH |
1355 | const struct iomap *iomap = &iter->iomap; |
1356 | const struct iomap *srcmap = iomap_iter_srcmap(iter); | |
1357 | loff_t pos = iter->pos; | |
1358 | u64 length = iomap_length(iter); | |
1359 | s64 written = 0; | |
1360 | ||
1361 | /* already zeroed? we're done. */ | |
1362 | if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) | |
1363 | return length; | |
1364 | ||
f80e1668 SR |
1365 | /* |
1366 | * invalidate the pages whose sharing state is to be changed | |
1367 | * because of CoW. | |
1368 | */ | |
1369 | if (iomap->flags & IOMAP_F_SHARED) | |
1370 | invalidate_inode_pages2_range(iter->inode->i_mapping, | |
1371 | pos >> PAGE_SHIFT, | |
1372 | (pos + length - 1) >> PAGE_SHIFT); | |
1373 | ||
c6f40468 CH |
1374 | do { |
1375 | unsigned offset = offset_in_page(pos); | |
1376 | unsigned size = min_t(u64, PAGE_SIZE - offset, length); | |
1377 | pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); | |
1378 | long rc; | |
1379 | int id; | |
1380 | ||
1381 | id = dax_read_lock(); | |
1382 | if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE) | |
1383 | rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1); | |
1384 | else | |
8dbfc76d | 1385 | rc = dax_memzero(iter, pos, size); |
c6f40468 | 1386 | dax_read_unlock(id); |
cccbce67 | 1387 | |
c6f40468 CH |
1388 | if (rc < 0) |
1389 | return rc; | |
1390 | pos += size; | |
1391 | length -= size; | |
1392 | written += size; | |
c6f40468 | 1393 | } while (length > 0); |
e5c71954 | 1394 | |
f8189d5d KX |
1395 | if (did_zero) |
1396 | *did_zero = true; | |
c6f40468 CH |
1397 | return written; |
1398 | } | |
1399 | ||
1400 | int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, | |
1401 | const struct iomap_ops *ops) | |
1402 | { | |
1403 | struct iomap_iter iter = { | |
1404 | .inode = inode, | |
1405 | .pos = pos, | |
1406 | .len = len, | |
952da063 | 1407 | .flags = IOMAP_DAX | IOMAP_ZERO, |
c6f40468 CH |
1408 | }; |
1409 | int ret; | |
1410 | ||
1411 | while ((ret = iomap_iter(&iter, ops)) > 0) | |
1412 | iter.processed = dax_zero_iter(&iter, did_zero); | |
1413 | return ret; | |
1414 | } | |
1415 | EXPORT_SYMBOL_GPL(dax_zero_range); | |
1416 | ||
1417 | int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, | |
1418 | const struct iomap_ops *ops) | |
1419 | { | |
1420 | unsigned int blocksize = i_blocksize(inode); | |
1421 | unsigned int off = pos & (blocksize - 1); | |
1422 | ||
1423 | /* Block boundary? Nothing to do */ | |
1424 | if (!off) | |
1425 | return 0; | |
1426 | return dax_zero_range(inode, pos, blocksize - off, did_zero, ops); | |
679c8bd3 | 1427 | } |
c6f40468 | 1428 | EXPORT_SYMBOL_GPL(dax_truncate_page); |
679c8bd3 | 1429 | |
ca289e0b CH |
1430 | static loff_t dax_iomap_iter(const struct iomap_iter *iomi, |
1431 | struct iov_iter *iter) | |
a254e568 | 1432 | { |
ca289e0b | 1433 | const struct iomap *iomap = &iomi->iomap; |
f80e1668 | 1434 | const struct iomap *srcmap = iomap_iter_srcmap(iomi); |
ca289e0b CH |
1435 | loff_t length = iomap_length(iomi); |
1436 | loff_t pos = iomi->pos; | |
cccbce67 | 1437 | struct dax_device *dax_dev = iomap->dax_dev; |
a254e568 | 1438 | loff_t end = pos + length, done = 0; |
ff17b8df | 1439 | bool write = iov_iter_rw(iter) == WRITE; |
f80e1668 | 1440 | bool cow = write && iomap->flags & IOMAP_F_SHARED; |
a254e568 | 1441 | ssize_t ret = 0; |
a77d4786 | 1442 | size_t xfer; |
cccbce67 | 1443 | int id; |
a254e568 | 1444 | |
ff17b8df | 1445 | if (!write) { |
ca289e0b | 1446 | end = min(end, i_size_read(iomi->inode)); |
a254e568 CH |
1447 | if (pos >= end) |
1448 | return 0; | |
1449 | ||
1450 | if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN) | |
1451 | return iov_iter_zero(min(length, end - pos), iter); | |
1452 | } | |
1453 | ||
ff17b8df SR |
1454 | /* |
1455 | * In DAX mode, enforce either pure overwrites of written extents, or | |
1456 | * writes to unwritten extents as part of a copy-on-write operation. | |
1457 | */ | |
1458 | if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED && | |
1459 | !(iomap->flags & IOMAP_F_SHARED))) | |
a254e568 CH |
1460 | return -EIO; |
1461 | ||
e3fce68c JK |
1462 | /* |
1463 | * Write can allocate block for an area which has a hole page mapped | |
1464 | * into page tables. We have to tear down these mappings so that data | |
1465 | * written by write(2) is visible in mmap. | |
1466 | */ | |
f80e1668 | 1467 | if (iomap->flags & IOMAP_F_NEW || cow) { |
f76b3a32 SR |
1468 | /* |
1469 | * Filesystem allows CoW on non-shared extents. The src extents | |
1470 | * may have been mmapped with dirty mark before. To be able to | |
1471 | * invalidate its dax entries, we need to clear the dirty mark | |
1472 | * in advance. | |
1473 | */ | |
1474 | if (cow) | |
1475 | __dax_clear_dirty_range(iomi->inode->i_mapping, | |
1476 | pos >> PAGE_SHIFT, | |
1477 | (end - 1) >> PAGE_SHIFT); | |
ca289e0b | 1478 | invalidate_inode_pages2_range(iomi->inode->i_mapping, |
e3fce68c JK |
1479 | pos >> PAGE_SHIFT, |
1480 | (end - 1) >> PAGE_SHIFT); | |
1481 | } | |
1482 | ||
cccbce67 | 1483 | id = dax_read_lock(); |
a254e568 CH |
1484 | while (pos < end) { |
1485 | unsigned offset = pos & (PAGE_SIZE - 1); | |
cccbce67 | 1486 | const size_t size = ALIGN(length + offset, PAGE_SIZE); |
60696eb2 | 1487 | pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); |
a254e568 | 1488 | ssize_t map_len; |
047218ec | 1489 | bool recovery = false; |
cccbce67 | 1490 | void *kaddr; |
a254e568 | 1491 | |
d1908f52 MH |
1492 | if (fatal_signal_pending(current)) { |
1493 | ret = -EINTR; | |
1494 | break; | |
1495 | } | |
1496 | ||
cccbce67 | 1497 | map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), |
e511c4a3 | 1498 | DAX_ACCESS, &kaddr, NULL); |
1ea7ca1b | 1499 | if (map_len == -EHWPOISON && iov_iter_rw(iter) == WRITE) { |
047218ec JC |
1500 | map_len = dax_direct_access(dax_dev, pgoff, |
1501 | PHYS_PFN(size), DAX_RECOVERY_WRITE, | |
1502 | &kaddr, NULL); | |
1503 | if (map_len > 0) | |
1504 | recovery = true; | |
1505 | } | |
a254e568 | 1506 | if (map_len < 0) { |
1ea7ca1b | 1507 | ret = dax_mem2blk_err(map_len); |
a254e568 CH |
1508 | break; |
1509 | } | |
1510 | ||
f80e1668 | 1511 | if (cow) { |
708dfad2 SR |
1512 | ret = dax_iomap_copy_around(pos, length, PAGE_SIZE, |
1513 | srcmap, kaddr); | |
ff17b8df SR |
1514 | if (ret) |
1515 | break; | |
1516 | } | |
1517 | ||
cccbce67 DW |
1518 | map_len = PFN_PHYS(map_len); |
1519 | kaddr += offset; | |
a254e568 CH |
1520 | map_len -= offset; |
1521 | if (map_len > end - pos) | |
1522 | map_len = end - pos; | |
1523 | ||
047218ec JC |
1524 | if (recovery) |
1525 | xfer = dax_recovery_write(dax_dev, pgoff, kaddr, | |
1526 | map_len, iter); | |
ff17b8df | 1527 | else if (write) |
a77d4786 | 1528 | xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, |
fec53774 | 1529 | map_len, iter); |
a254e568 | 1530 | else |
a77d4786 | 1531 | xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr, |
b3a9a0c3 | 1532 | map_len, iter); |
a254e568 | 1533 | |
a77d4786 DW |
1534 | pos += xfer; |
1535 | length -= xfer; | |
1536 | done += xfer; | |
1537 | ||
1538 | if (xfer == 0) | |
1539 | ret = -EFAULT; | |
1540 | if (xfer < map_len) | |
1541 | break; | |
a254e568 | 1542 | } |
cccbce67 | 1543 | dax_read_unlock(id); |
a254e568 CH |
1544 | |
1545 | return done ? done : ret; | |
1546 | } | |
1547 | ||
1548 | /** | |
11c59c92 | 1549 | * dax_iomap_rw - Perform I/O to a DAX file |
a254e568 CH |
1550 | * @iocb: The control block for this I/O |
1551 | * @iter: The addresses to do I/O from or to | |
1552 | * @ops: iomap ops passed from the file system | |
1553 | * | |
1554 | * This function performs read and write operations to directly mapped | |
1555 | * persistent memory. The callers needs to take care of read/write exclusion | |
1556 | * and evicting any page cache pages in the region under I/O. | |
1557 | */ | |
1558 | ssize_t | |
11c59c92 | 1559 | dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, |
8ff6daa1 | 1560 | const struct iomap_ops *ops) |
a254e568 | 1561 | { |
ca289e0b CH |
1562 | struct iomap_iter iomi = { |
1563 | .inode = iocb->ki_filp->f_mapping->host, | |
1564 | .pos = iocb->ki_pos, | |
1565 | .len = iov_iter_count(iter), | |
952da063 | 1566 | .flags = IOMAP_DAX, |
ca289e0b CH |
1567 | }; |
1568 | loff_t done = 0; | |
1569 | int ret; | |
a254e568 | 1570 | |
17d9c15c LJ |
1571 | if (!iomi.len) |
1572 | return 0; | |
1573 | ||
168316db | 1574 | if (iov_iter_rw(iter) == WRITE) { |
ca289e0b CH |
1575 | lockdep_assert_held_write(&iomi.inode->i_rwsem); |
1576 | iomi.flags |= IOMAP_WRITE; | |
168316db | 1577 | } else { |
ca289e0b | 1578 | lockdep_assert_held(&iomi.inode->i_rwsem); |
168316db | 1579 | } |
a254e568 | 1580 | |
96222d53 | 1581 | if (iocb->ki_flags & IOCB_NOWAIT) |
ca289e0b | 1582 | iomi.flags |= IOMAP_NOWAIT; |
96222d53 | 1583 | |
ca289e0b CH |
1584 | while ((ret = iomap_iter(&iomi, ops)) > 0) |
1585 | iomi.processed = dax_iomap_iter(&iomi, iter); | |
a254e568 | 1586 | |
ca289e0b CH |
1587 | done = iomi.pos - iocb->ki_pos; |
1588 | iocb->ki_pos = iomi.pos; | |
a254e568 CH |
1589 | return done ? done : ret; |
1590 | } | |
11c59c92 | 1591 | EXPORT_SYMBOL_GPL(dax_iomap_rw); |
a7d73fe6 | 1592 | |
ab77dab4 | 1593 | static vm_fault_t dax_fault_return(int error) |
9f141d6e JK |
1594 | { |
1595 | if (error == 0) | |
1596 | return VM_FAULT_NOPAGE; | |
c9aed74e | 1597 | return vmf_error(error); |
9f141d6e JK |
1598 | } |
1599 | ||
55f81639 SR |
1600 | /* |
1601 | * When handling a synchronous page fault and the inode need a fsync, we can | |
1602 | * insert the PTE/PMD into page tables only after that fsync happened. Skip | |
1603 | * insertion for now and return the pfn so that caller can insert it after the | |
1604 | * fsync is done. | |
1605 | */ | |
1606 | static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn) | |
1607 | { | |
1608 | if (WARN_ON_ONCE(!pfnp)) | |
1609 | return VM_FAULT_SIGBUS; | |
1610 | *pfnp = pfn; | |
1611 | return VM_FAULT_NEEDDSYNC; | |
1612 | } | |
1613 | ||
65dd814a CH |
1614 | static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf, |
1615 | const struct iomap_iter *iter) | |
55f81639 | 1616 | { |
55f81639 SR |
1617 | vm_fault_t ret; |
1618 | int error = 0; | |
1619 | ||
65dd814a | 1620 | switch (iter->iomap.type) { |
55f81639 SR |
1621 | case IOMAP_HOLE: |
1622 | case IOMAP_UNWRITTEN: | |
429f8de7 | 1623 | clear_user_highpage(vmf->cow_page, vmf->address); |
55f81639 SR |
1624 | break; |
1625 | case IOMAP_MAPPED: | |
429f8de7 | 1626 | error = copy_cow_page_dax(vmf, iter); |
55f81639 SR |
1627 | break; |
1628 | default: | |
1629 | WARN_ON_ONCE(1); | |
1630 | error = -EIO; | |
1631 | break; | |
1632 | } | |
1633 | ||
1634 | if (error) | |
1635 | return dax_fault_return(error); | |
1636 | ||
1637 | __SetPageUptodate(vmf->cow_page); | |
1638 | ret = finish_fault(vmf); | |
1639 | if (!ret) | |
1640 | return VM_FAULT_DONE_COW; | |
1641 | return ret; | |
1642 | } | |
1643 | ||
c2436190 | 1644 | /** |
65dd814a | 1645 | * dax_fault_iter - Common actor to handle pfn insertion in PTE/PMD fault. |
c2436190 | 1646 | * @vmf: vm fault instance |
65dd814a | 1647 | * @iter: iomap iter |
c2436190 SR |
1648 | * @pfnp: pfn to be returned |
1649 | * @xas: the dax mapping tree of a file | |
1650 | * @entry: an unlocked dax entry to be inserted | |
1651 | * @pmd: distinguish whether it is a pmd fault | |
c2436190 | 1652 | */ |
65dd814a CH |
1653 | static vm_fault_t dax_fault_iter(struct vm_fault *vmf, |
1654 | const struct iomap_iter *iter, pfn_t *pfnp, | |
1655 | struct xa_state *xas, void **entry, bool pmd) | |
c2436190 | 1656 | { |
65dd814a | 1657 | const struct iomap *iomap = &iter->iomap; |
708dfad2 | 1658 | const struct iomap *srcmap = iomap_iter_srcmap(iter); |
c2436190 SR |
1659 | size_t size = pmd ? PMD_SIZE : PAGE_SIZE; |
1660 | loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; | |
e5d6df73 | 1661 | bool write = iter->flags & IOMAP_WRITE; |
c2436190 SR |
1662 | unsigned long entry_flags = pmd ? DAX_PMD : 0; |
1663 | int err = 0; | |
1664 | pfn_t pfn; | |
ff17b8df | 1665 | void *kaddr; |
c2436190 | 1666 | |
65dd814a CH |
1667 | if (!pmd && vmf->cow_page) |
1668 | return dax_fault_cow_page(vmf, iter); | |
1669 | ||
c2436190 SR |
1670 | /* if we are reading UNWRITTEN and HOLE, return a hole. */ |
1671 | if (!write && | |
1672 | (iomap->type == IOMAP_UNWRITTEN || iomap->type == IOMAP_HOLE)) { | |
1673 | if (!pmd) | |
e5d6df73 SR |
1674 | return dax_load_hole(xas, vmf, iter, entry); |
1675 | return dax_pmd_load_hole(xas, vmf, iter, entry); | |
c2436190 SR |
1676 | } |
1677 | ||
ff17b8df | 1678 | if (iomap->type != IOMAP_MAPPED && !(iomap->flags & IOMAP_F_SHARED)) { |
c2436190 SR |
1679 | WARN_ON_ONCE(1); |
1680 | return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS; | |
1681 | } | |
1682 | ||
ff17b8df | 1683 | err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn); |
c2436190 SR |
1684 | if (err) |
1685 | return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err); | |
1686 | ||
e5d6df73 | 1687 | *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags); |
c2436190 | 1688 | |
708dfad2 SR |
1689 | if (write && iomap->flags & IOMAP_F_SHARED) { |
1690 | err = dax_iomap_copy_around(pos, size, size, srcmap, kaddr); | |
ff17b8df SR |
1691 | if (err) |
1692 | return dax_fault_return(err); | |
1693 | } | |
c2436190 | 1694 | |
e5d6df73 | 1695 | if (dax_fault_is_synchronous(iter, vmf->vma)) |
c2436190 SR |
1696 | return dax_fault_synchronous_pfnp(pfnp, pfn); |
1697 | ||
1698 | /* insert PMD pfn */ | |
1699 | if (pmd) | |
1700 | return vmf_insert_pfn_pmd(vmf, pfn, write); | |
1701 | ||
1702 | /* insert PTE pfn */ | |
1703 | if (write) | |
1704 | return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); | |
1705 | return vmf_insert_mixed(vmf->vma, vmf->address, pfn); | |
1706 | } | |
1707 | ||
ab77dab4 | 1708 | static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, |
c0b24625 | 1709 | int *iomap_errp, const struct iomap_ops *ops) |
a7d73fe6 | 1710 | { |
65dd814a | 1711 | struct address_space *mapping = vmf->vma->vm_file->f_mapping; |
b15cd800 | 1712 | XA_STATE(xas, &mapping->i_pages, vmf->pgoff); |
65dd814a CH |
1713 | struct iomap_iter iter = { |
1714 | .inode = mapping->host, | |
1715 | .pos = (loff_t)vmf->pgoff << PAGE_SHIFT, | |
1716 | .len = PAGE_SIZE, | |
952da063 | 1717 | .flags = IOMAP_DAX | IOMAP_FAULT, |
65dd814a | 1718 | }; |
ab77dab4 | 1719 | vm_fault_t ret = 0; |
a7d73fe6 | 1720 | void *entry; |
65dd814a | 1721 | int error; |
a7d73fe6 | 1722 | |
65dd814a | 1723 | trace_dax_pte_fault(iter.inode, vmf, ret); |
a7d73fe6 CH |
1724 | /* |
1725 | * Check whether offset isn't beyond end of file now. Caller is supposed | |
1726 | * to hold locks serializing us with truncate / punch hole so this is | |
1727 | * a reliable test. | |
1728 | */ | |
65dd814a | 1729 | if (iter.pos >= i_size_read(iter.inode)) { |
ab77dab4 | 1730 | ret = VM_FAULT_SIGBUS; |
a9c42b33 RZ |
1731 | goto out; |
1732 | } | |
a7d73fe6 | 1733 | |
65dd814a CH |
1734 | if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) |
1735 | iter.flags |= IOMAP_WRITE; | |
a7d73fe6 | 1736 | |
b15cd800 MW |
1737 | entry = grab_mapping_entry(&xas, mapping, 0); |
1738 | if (xa_is_internal(entry)) { | |
1739 | ret = xa_to_internal(entry); | |
13e451fd JK |
1740 | goto out; |
1741 | } | |
1742 | ||
e2093926 RZ |
1743 | /* |
1744 | * It is possible, particularly with mixed reads & writes to private | |
1745 | * mappings, that we have raced with a PMD fault that overlaps with | |
1746 | * the PTE we need to set up. If so just return and the fault will be | |
1747 | * retried. | |
1748 | */ | |
1749 | if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { | |
ab77dab4 | 1750 | ret = VM_FAULT_NOPAGE; |
e2093926 RZ |
1751 | goto unlock_entry; |
1752 | } | |
1753 | ||
65dd814a CH |
1754 | while ((error = iomap_iter(&iter, ops)) > 0) { |
1755 | if (WARN_ON_ONCE(iomap_length(&iter) < PAGE_SIZE)) { | |
1756 | iter.processed = -EIO; /* fs corruption? */ | |
1757 | continue; | |
a7d73fe6 CH |
1758 | } |
1759 | ||
65dd814a CH |
1760 | ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, false); |
1761 | if (ret != VM_FAULT_SIGBUS && | |
1762 | (iter.iomap.flags & IOMAP_F_NEW)) { | |
a7d73fe6 | 1763 | count_vm_event(PGMAJFAULT); |
65dd814a CH |
1764 | count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); |
1765 | ret |= VM_FAULT_MAJOR; | |
a7d73fe6 | 1766 | } |
1b5a1cb2 | 1767 | |
65dd814a CH |
1768 | if (!(ret & VM_FAULT_ERROR)) |
1769 | iter.processed = PAGE_SIZE; | |
a7d73fe6 CH |
1770 | } |
1771 | ||
65dd814a CH |
1772 | if (iomap_errp) |
1773 | *iomap_errp = error; | |
1774 | if (!ret && error) | |
1775 | ret = dax_fault_return(error); | |
9f141d6e | 1776 | |
c2436190 | 1777 | unlock_entry: |
b15cd800 | 1778 | dax_unlock_entry(&xas, entry); |
c2436190 | 1779 | out: |
65dd814a CH |
1780 | trace_dax_pte_fault_done(iter.inode, vmf, ret); |
1781 | return ret; | |
a7d73fe6 | 1782 | } |
642261ac RZ |
1783 | |
1784 | #ifdef CONFIG_FS_DAX_PMD | |
55f81639 SR |
1785 | static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas, |
1786 | pgoff_t max_pgoff) | |
642261ac | 1787 | { |
f4200391 | 1788 | unsigned long pmd_addr = vmf->address & PMD_MASK; |
55f81639 | 1789 | bool write = vmf->flags & FAULT_FLAG_WRITE; |
642261ac | 1790 | |
55f81639 SR |
1791 | /* |
1792 | * Make sure that the faulting address's PMD offset (color) matches | |
1793 | * the PMD offset from the start of the file. This is necessary so | |
1794 | * that a PMD range in the page table overlaps exactly with a PMD | |
1795 | * range in the page cache. | |
1796 | */ | |
1797 | if ((vmf->pgoff & PG_PMD_COLOUR) != | |
1798 | ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR)) | |
1799 | return true; | |
642261ac | 1800 | |
55f81639 SR |
1801 | /* Fall back to PTEs if we're going to COW */ |
1802 | if (write && !(vmf->vma->vm_flags & VM_SHARED)) | |
1803 | return true; | |
11cf9d86 | 1804 | |
55f81639 SR |
1805 | /* If the PMD would extend outside the VMA */ |
1806 | if (pmd_addr < vmf->vma->vm_start) | |
1807 | return true; | |
1808 | if ((pmd_addr + PMD_SIZE) > vmf->vma->vm_end) | |
1809 | return true; | |
642261ac | 1810 | |
55f81639 SR |
1811 | /* If the PMD would extend beyond the file size */ |
1812 | if ((xas->xa_index | PG_PMD_COLOUR) >= max_pgoff) | |
1813 | return true; | |
653b2ea3 | 1814 | |
55f81639 | 1815 | return false; |
642261ac RZ |
1816 | } |
1817 | ||
ab77dab4 | 1818 | static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, |
a2d58167 | 1819 | const struct iomap_ops *ops) |
642261ac | 1820 | { |
65dd814a | 1821 | struct address_space *mapping = vmf->vma->vm_file->f_mapping; |
b15cd800 | 1822 | XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); |
65dd814a CH |
1823 | struct iomap_iter iter = { |
1824 | .inode = mapping->host, | |
1825 | .len = PMD_SIZE, | |
952da063 | 1826 | .flags = IOMAP_DAX | IOMAP_FAULT, |
65dd814a | 1827 | }; |
c2436190 | 1828 | vm_fault_t ret = VM_FAULT_FALLBACK; |
b15cd800 | 1829 | pgoff_t max_pgoff; |
642261ac | 1830 | void *entry; |
642261ac | 1831 | |
65dd814a CH |
1832 | if (vmf->flags & FAULT_FLAG_WRITE) |
1833 | iter.flags |= IOMAP_WRITE; | |
642261ac | 1834 | |
282a8e03 RZ |
1835 | /* |
1836 | * Check whether offset isn't beyond end of file now. Caller is | |
1837 | * supposed to hold locks serializing us with truncate / punch hole so | |
1838 | * this is a reliable test. | |
1839 | */ | |
65dd814a | 1840 | max_pgoff = DIV_ROUND_UP(i_size_read(iter.inode), PAGE_SIZE); |
fffa281b | 1841 | |
65dd814a | 1842 | trace_dax_pmd_fault(iter.inode, vmf, max_pgoff, 0); |
642261ac | 1843 | |
b15cd800 | 1844 | if (xas.xa_index >= max_pgoff) { |
c2436190 | 1845 | ret = VM_FAULT_SIGBUS; |
282a8e03 RZ |
1846 | goto out; |
1847 | } | |
642261ac | 1848 | |
55f81639 | 1849 | if (dax_fault_check_fallback(vmf, &xas, max_pgoff)) |
642261ac RZ |
1850 | goto fallback; |
1851 | ||
876f2946 | 1852 | /* |
b15cd800 MW |
1853 | * grab_mapping_entry() will make sure we get an empty PMD entry, |
1854 | * a zero PMD entry or a DAX PMD. If it can't (because a PTE | |
1855 | * entry is already in the array, for instance), it will return | |
1856 | * VM_FAULT_FALLBACK. | |
876f2946 | 1857 | */ |
23c84eb7 | 1858 | entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); |
b15cd800 | 1859 | if (xa_is_internal(entry)) { |
c2436190 | 1860 | ret = xa_to_internal(entry); |
876f2946 | 1861 | goto fallback; |
b15cd800 | 1862 | } |
876f2946 | 1863 | |
e2093926 RZ |
1864 | /* |
1865 | * It is possible, particularly with mixed reads & writes to private | |
1866 | * mappings, that we have raced with a PTE fault that overlaps with | |
1867 | * the PMD we need to set up. If so just return and the fault will be | |
1868 | * retried. | |
1869 | */ | |
1870 | if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && | |
1871 | !pmd_devmap(*vmf->pmd)) { | |
c2436190 | 1872 | ret = 0; |
e2093926 RZ |
1873 | goto unlock_entry; |
1874 | } | |
1875 | ||
65dd814a | 1876 | iter.pos = (loff_t)xas.xa_index << PAGE_SHIFT; |
dd0c6425 | 1877 | while (iomap_iter(&iter, ops) > 0) { |
65dd814a CH |
1878 | if (iomap_length(&iter) < PMD_SIZE) |
1879 | continue; /* actually breaks out of the loop */ | |
caa51d26 | 1880 | |
65dd814a CH |
1881 | ret = dax_fault_iter(vmf, &iter, pfnp, &xas, &entry, true); |
1882 | if (ret != VM_FAULT_FALLBACK) | |
1883 | iter.processed = PMD_SIZE; | |
642261ac RZ |
1884 | } |
1885 | ||
c2436190 | 1886 | unlock_entry: |
b15cd800 | 1887 | dax_unlock_entry(&xas, entry); |
c2436190 SR |
1888 | fallback: |
1889 | if (ret == VM_FAULT_FALLBACK) { | |
65dd814a | 1890 | split_huge_pmd(vmf->vma, vmf->pmd, vmf->address); |
642261ac RZ |
1891 | count_vm_event(THP_FAULT_FALLBACK); |
1892 | } | |
282a8e03 | 1893 | out: |
65dd814a | 1894 | trace_dax_pmd_fault_done(iter.inode, vmf, max_pgoff, ret); |
c2436190 | 1895 | return ret; |
642261ac | 1896 | } |
a2d58167 | 1897 | #else |
ab77dab4 | 1898 | static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp, |
01cddfe9 | 1899 | const struct iomap_ops *ops) |
a2d58167 DJ |
1900 | { |
1901 | return VM_FAULT_FALLBACK; | |
1902 | } | |
642261ac | 1903 | #endif /* CONFIG_FS_DAX_PMD */ |
a2d58167 DJ |
1904 | |
1905 | /** | |
1906 | * dax_iomap_fault - handle a page fault on a DAX file | |
1907 | * @vmf: The description of the fault | |
cec04e8c | 1908 | * @pe_size: Size of the page to fault in |
9a0dd422 | 1909 | * @pfnp: PFN to insert for synchronous faults if fsync is required |
c0b24625 | 1910 | * @iomap_errp: Storage for detailed error code in case of error |
cec04e8c | 1911 | * @ops: Iomap ops passed from the file system |
a2d58167 DJ |
1912 | * |
1913 | * When a page fault occurs, filesystems may call this helper in | |
1914 | * their fault handler for DAX files. dax_iomap_fault() assumes the caller | |
1915 | * has done all the necessary locking for page fault to proceed | |
1916 | * successfully. | |
1917 | */ | |
ab77dab4 | 1918 | vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, |
c0b24625 | 1919 | pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops) |
a2d58167 | 1920 | { |
c791ace1 DJ |
1921 | switch (pe_size) { |
1922 | case PE_SIZE_PTE: | |
c0b24625 | 1923 | return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops); |
c791ace1 | 1924 | case PE_SIZE_PMD: |
9a0dd422 | 1925 | return dax_iomap_pmd_fault(vmf, pfnp, ops); |
a2d58167 DJ |
1926 | default: |
1927 | return VM_FAULT_FALLBACK; | |
1928 | } | |
1929 | } | |
1930 | EXPORT_SYMBOL_GPL(dax_iomap_fault); | |
71eab6df | 1931 | |
a77d19f4 | 1932 | /* |
71eab6df JK |
1933 | * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables |
1934 | * @vmf: The description of the fault | |
71eab6df | 1935 | * @pfn: PFN to insert |
cfc93c6c | 1936 | * @order: Order of entry to insert. |
71eab6df | 1937 | * |
a77d19f4 MW |
1938 | * This function inserts a writeable PTE or PMD entry into the page tables |
1939 | * for an mmaped DAX file. It also marks the page cache entry as dirty. | |
71eab6df | 1940 | */ |
cfc93c6c MW |
1941 | static vm_fault_t |
1942 | dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) | |
71eab6df JK |
1943 | { |
1944 | struct address_space *mapping = vmf->vma->vm_file->f_mapping; | |
cfc93c6c MW |
1945 | XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); |
1946 | void *entry; | |
ab77dab4 | 1947 | vm_fault_t ret; |
71eab6df | 1948 | |
cfc93c6c | 1949 | xas_lock_irq(&xas); |
23c84eb7 | 1950 | entry = get_unlocked_entry(&xas, order); |
71eab6df | 1951 | /* Did we race with someone splitting entry or so? */ |
23c84eb7 MWO |
1952 | if (!entry || dax_is_conflict(entry) || |
1953 | (order == 0 && !dax_is_pte_entry(entry))) { | |
4c3d043d | 1954 | put_unlocked_entry(&xas, entry, WAKE_NEXT); |
cfc93c6c | 1955 | xas_unlock_irq(&xas); |
71eab6df JK |
1956 | trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, |
1957 | VM_FAULT_NOPAGE); | |
1958 | return VM_FAULT_NOPAGE; | |
1959 | } | |
cfc93c6c MW |
1960 | xas_set_mark(&xas, PAGECACHE_TAG_DIRTY); |
1961 | dax_lock_entry(&xas, entry); | |
1962 | xas_unlock_irq(&xas); | |
1963 | if (order == 0) | |
ab77dab4 | 1964 | ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn); |
71eab6df | 1965 | #ifdef CONFIG_FS_DAX_PMD |
cfc93c6c | 1966 | else if (order == PMD_ORDER) |
fce86ff5 | 1967 | ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE); |
71eab6df | 1968 | #endif |
cfc93c6c | 1969 | else |
ab77dab4 | 1970 | ret = VM_FAULT_FALLBACK; |
cfc93c6c | 1971 | dax_unlock_entry(&xas, entry); |
ab77dab4 SJ |
1972 | trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); |
1973 | return ret; | |
71eab6df JK |
1974 | } |
1975 | ||
1976 | /** | |
1977 | * dax_finish_sync_fault - finish synchronous page fault | |
1978 | * @vmf: The description of the fault | |
1979 | * @pe_size: Size of entry to be inserted | |
1980 | * @pfn: PFN to insert | |
1981 | * | |
1982 | * This function ensures that the file range touched by the page fault is | |
1983 | * stored persistently on the media and handles inserting of appropriate page | |
1984 | * table entry. | |
1985 | */ | |
ab77dab4 SJ |
1986 | vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, |
1987 | enum page_entry_size pe_size, pfn_t pfn) | |
71eab6df JK |
1988 | { |
1989 | int err; | |
1990 | loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT; | |
cfc93c6c MW |
1991 | unsigned int order = pe_order(pe_size); |
1992 | size_t len = PAGE_SIZE << order; | |
71eab6df | 1993 | |
71eab6df JK |
1994 | err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1); |
1995 | if (err) | |
1996 | return VM_FAULT_SIGBUS; | |
cfc93c6c | 1997 | return dax_insert_pfn_mkwrite(vmf, pfn, order); |
71eab6df JK |
1998 | } |
1999 | EXPORT_SYMBOL_GPL(dax_finish_sync_fault); | |
6f7db389 SR |
2000 | |
2001 | static loff_t dax_range_compare_iter(struct iomap_iter *it_src, | |
2002 | struct iomap_iter *it_dest, u64 len, bool *same) | |
2003 | { | |
2004 | const struct iomap *smap = &it_src->iomap; | |
2005 | const struct iomap *dmap = &it_dest->iomap; | |
2006 | loff_t pos1 = it_src->pos, pos2 = it_dest->pos; | |
2007 | void *saddr, *daddr; | |
2008 | int id, ret; | |
2009 | ||
2010 | len = min(len, min(smap->length, dmap->length)); | |
2011 | ||
2012 | if (smap->type == IOMAP_HOLE && dmap->type == IOMAP_HOLE) { | |
2013 | *same = true; | |
2014 | return len; | |
2015 | } | |
2016 | ||
2017 | if (smap->type == IOMAP_HOLE || dmap->type == IOMAP_HOLE) { | |
2018 | *same = false; | |
2019 | return 0; | |
2020 | } | |
2021 | ||
2022 | id = dax_read_lock(); | |
2023 | ret = dax_iomap_direct_access(smap, pos1, ALIGN(pos1 + len, PAGE_SIZE), | |
2024 | &saddr, NULL); | |
2025 | if (ret < 0) | |
2026 | goto out_unlock; | |
2027 | ||
2028 | ret = dax_iomap_direct_access(dmap, pos2, ALIGN(pos2 + len, PAGE_SIZE), | |
2029 | &daddr, NULL); | |
2030 | if (ret < 0) | |
2031 | goto out_unlock; | |
2032 | ||
2033 | *same = !memcmp(saddr, daddr, len); | |
2034 | if (!*same) | |
2035 | len = 0; | |
2036 | dax_read_unlock(id); | |
2037 | return len; | |
2038 | ||
2039 | out_unlock: | |
2040 | dax_read_unlock(id); | |
2041 | return -EIO; | |
2042 | } | |
2043 | ||
2044 | int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff, | |
2045 | struct inode *dst, loff_t dstoff, loff_t len, bool *same, | |
2046 | const struct iomap_ops *ops) | |
2047 | { | |
2048 | struct iomap_iter src_iter = { | |
2049 | .inode = src, | |
2050 | .pos = srcoff, | |
2051 | .len = len, | |
2052 | .flags = IOMAP_DAX, | |
2053 | }; | |
2054 | struct iomap_iter dst_iter = { | |
2055 | .inode = dst, | |
2056 | .pos = dstoff, | |
2057 | .len = len, | |
2058 | .flags = IOMAP_DAX, | |
2059 | }; | |
0e79e373 | 2060 | int ret, compared = 0; |
6f7db389 | 2061 | |
0e79e373 SR |
2062 | while ((ret = iomap_iter(&src_iter, ops)) > 0 && |
2063 | (ret = iomap_iter(&dst_iter, ops)) > 0) { | |
e900ba10 SR |
2064 | compared = dax_range_compare_iter(&src_iter, &dst_iter, |
2065 | min(src_iter.len, dst_iter.len), same); | |
0e79e373 SR |
2066 | if (compared < 0) |
2067 | return ret; | |
2068 | src_iter.processed = dst_iter.processed = compared; | |
6f7db389 SR |
2069 | } |
2070 | return ret; | |
2071 | } | |
2072 | ||
2073 | int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in, | |
2074 | struct file *file_out, loff_t pos_out, | |
2075 | loff_t *len, unsigned int remap_flags, | |
2076 | const struct iomap_ops *ops) | |
2077 | { | |
2078 | return __generic_remap_file_range_prep(file_in, pos_in, file_out, | |
2079 | pos_out, len, remap_flags, ops); | |
2080 | } | |
2081 | EXPORT_SYMBOL_GPL(dax_remap_file_range_prep); |