Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
[linux-2.6-block.git] / fs / dax.c
CommitLineData
d475c634
MW
1/*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
20#include <linux/fs.h>
21#include <linux/genhd.h>
f7ca90b1
MW
22#include <linux/highmem.h>
23#include <linux/memcontrol.h>
24#include <linux/mm.h>
d475c634 25#include <linux/mutex.h>
2765cfbb 26#include <linux/pmem.h>
289c6aed 27#include <linux/sched.h>
d475c634 28#include <linux/uio.h>
f7ca90b1 29#include <linux/vmstat.h>
d475c634 30
289c6aed
MW
31int dax_clear_blocks(struct inode *inode, sector_t block, long size)
32{
33 struct block_device *bdev = inode->i_sb->s_bdev;
34 sector_t sector = block << (inode->i_blkbits - 9);
35
36 might_sleep();
37 do {
e2e05394 38 void __pmem *addr;
289c6aed
MW
39 unsigned long pfn;
40 long count;
41
42 count = bdev_direct_access(bdev, sector, &addr, &pfn, size);
43 if (count < 0)
44 return count;
45 BUG_ON(size < count);
46 while (count > 0) {
47 unsigned pgsz = PAGE_SIZE - offset_in_page(addr);
48 if (pgsz > count)
49 pgsz = count;
e2e05394 50 clear_pmem(addr, pgsz);
289c6aed
MW
51 addr += pgsz;
52 size -= pgsz;
53 count -= pgsz;
54 BUG_ON(pgsz & 511);
55 sector += pgsz / 512;
56 cond_resched();
57 }
58 } while (size);
59
2765cfbb 60 wmb_pmem();
289c6aed
MW
61 return 0;
62}
63EXPORT_SYMBOL_GPL(dax_clear_blocks);
64
e2e05394
RZ
65static long dax_get_addr(struct buffer_head *bh, void __pmem **addr,
66 unsigned blkbits)
d475c634
MW
67{
68 unsigned long pfn;
69 sector_t sector = bh->b_blocknr << (blkbits - 9);
70 return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size);
71}
72
2765cfbb 73/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
e2e05394
RZ
74static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
75 loff_t pos, loff_t end)
d475c634
MW
76{
77 loff_t final = end - pos + first; /* The final byte of the buffer */
78
79 if (first > 0)
e2e05394 80 clear_pmem(addr, first);
d475c634 81 if (final < size)
e2e05394 82 clear_pmem(addr + final, size - final);
d475c634
MW
83}
84
85static bool buffer_written(struct buffer_head *bh)
86{
87 return buffer_mapped(bh) && !buffer_unwritten(bh);
88}
89
90/*
91 * When ext4 encounters a hole, it returns without modifying the buffer_head
92 * which means that we can't trust b_size. To cope with this, we set b_state
93 * to 0 before calling get_block and, if any bit is set, we know we can trust
94 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
95 * and would save us time calling get_block repeatedly.
96 */
97static bool buffer_size_valid(struct buffer_head *bh)
98{
99 return bh->b_state != 0;
100}
101
a95cd631
OS
102static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
103 loff_t start, loff_t end, get_block_t get_block,
104 struct buffer_head *bh)
d475c634
MW
105{
106 ssize_t retval = 0;
107 loff_t pos = start;
108 loff_t max = start;
109 loff_t bh_max = start;
e2e05394 110 void __pmem *addr;
d475c634 111 bool hole = false;
2765cfbb 112 bool need_wmb = false;
d475c634 113
a95cd631 114 if (iov_iter_rw(iter) != WRITE)
d475c634
MW
115 end = min(end, i_size_read(inode));
116
117 while (pos < end) {
2765cfbb 118 size_t len;
d475c634
MW
119 if (pos == max) {
120 unsigned blkbits = inode->i_blkbits;
121 sector_t block = pos >> blkbits;
122 unsigned first = pos - (block << blkbits);
123 long size;
124
125 if (pos == bh_max) {
126 bh->b_size = PAGE_ALIGN(end - pos);
127 bh->b_state = 0;
128 retval = get_block(inode, block, bh,
a95cd631 129 iov_iter_rw(iter) == WRITE);
d475c634
MW
130 if (retval)
131 break;
132 if (!buffer_size_valid(bh))
133 bh->b_size = 1 << blkbits;
134 bh_max = pos - first + bh->b_size;
135 } else {
136 unsigned done = bh->b_size -
137 (bh_max - (pos - first));
138 bh->b_blocknr += done >> blkbits;
139 bh->b_size -= done;
140 }
141
a95cd631 142 hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh);
d475c634
MW
143 if (hole) {
144 addr = NULL;
145 size = bh->b_size - first;
146 } else {
147 retval = dax_get_addr(bh, &addr, blkbits);
148 if (retval < 0)
149 break;
2765cfbb 150 if (buffer_unwritten(bh) || buffer_new(bh)) {
d475c634
MW
151 dax_new_buf(addr, retval, first, pos,
152 end);
2765cfbb
RZ
153 need_wmb = true;
154 }
d475c634
MW
155 addr += first;
156 size = retval - first;
157 }
158 max = min(pos + size, end);
159 }
160
2765cfbb 161 if (iov_iter_rw(iter) == WRITE) {
e2e05394 162 len = copy_from_iter_pmem(addr, max - pos, iter);
2765cfbb
RZ
163 need_wmb = true;
164 } else if (!hole)
e2e05394
RZ
165 len = copy_to_iter((void __force *)addr, max - pos,
166 iter);
d475c634
MW
167 else
168 len = iov_iter_zero(max - pos, iter);
169
170 if (!len)
171 break;
172
173 pos += len;
174 addr += len;
175 }
176
2765cfbb
RZ
177 if (need_wmb)
178 wmb_pmem();
179
d475c634
MW
180 return (pos == start) ? retval : pos - start;
181}
182
183/**
184 * dax_do_io - Perform I/O to a DAX file
d475c634
MW
185 * @iocb: The control block for this I/O
186 * @inode: The file which the I/O is directed at
187 * @iter: The addresses to do I/O from or to
188 * @pos: The file offset where the I/O starts
189 * @get_block: The filesystem method used to translate file offsets to blocks
190 * @end_io: A filesystem callback for I/O completion
191 * @flags: See below
192 *
193 * This function uses the same locking scheme as do_blockdev_direct_IO:
194 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
195 * caller for writes. For reads, we take and release the i_mutex ourselves.
196 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
197 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
198 * is in progress.
199 */
a95cd631
OS
200ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
201 struct iov_iter *iter, loff_t pos, get_block_t get_block,
202 dio_iodone_t end_io, int flags)
d475c634
MW
203{
204 struct buffer_head bh;
205 ssize_t retval = -EINVAL;
206 loff_t end = pos + iov_iter_count(iter);
207
208 memset(&bh, 0, sizeof(bh));
209
a95cd631 210 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
d475c634
MW
211 struct address_space *mapping = inode->i_mapping;
212 mutex_lock(&inode->i_mutex);
213 retval = filemap_write_and_wait_range(mapping, pos, end - 1);
214 if (retval) {
215 mutex_unlock(&inode->i_mutex);
216 goto out;
217 }
218 }
219
220 /* Protects against truncate */
bbab37dd
MW
221 if (!(flags & DIO_SKIP_DIO_COUNT))
222 inode_dio_begin(inode);
d475c634 223
a95cd631 224 retval = dax_io(inode, iter, pos, end, get_block, &bh);
d475c634 225
a95cd631 226 if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
d475c634
MW
227 mutex_unlock(&inode->i_mutex);
228
229 if ((retval > 0) && end_io)
230 end_io(iocb, pos, retval, bh.b_private);
231
bbab37dd
MW
232 if (!(flags & DIO_SKIP_DIO_COUNT))
233 inode_dio_end(inode);
d475c634
MW
234 out:
235 return retval;
236}
237EXPORT_SYMBOL_GPL(dax_do_io);
f7ca90b1
MW
238
239/*
240 * The user has performed a load from a hole in the file. Allocating
241 * a new page in the file would cause excessive storage usage for
242 * workloads with sparse files. We allocate a page cache page instead.
243 * We'll kick it out of the page cache if it's ever written to,
244 * otherwise it will simply fall out of the page cache under memory
245 * pressure without ever having been dirtied.
246 */
247static int dax_load_hole(struct address_space *mapping, struct page *page,
248 struct vm_fault *vmf)
249{
250 unsigned long size;
251 struct inode *inode = mapping->host;
252 if (!page)
253 page = find_or_create_page(mapping, vmf->pgoff,
254 GFP_KERNEL | __GFP_ZERO);
255 if (!page)
256 return VM_FAULT_OOM;
257 /* Recheck i_size under page lock to avoid truncate race */
258 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
259 if (vmf->pgoff >= size) {
260 unlock_page(page);
261 page_cache_release(page);
262 return VM_FAULT_SIGBUS;
263 }
264
265 vmf->page = page;
266 return VM_FAULT_LOCKED;
267}
268
269static int copy_user_bh(struct page *to, struct buffer_head *bh,
270 unsigned blkbits, unsigned long vaddr)
271{
e2e05394
RZ
272 void __pmem *vfrom;
273 void *vto;
274
f7ca90b1
MW
275 if (dax_get_addr(bh, &vfrom, blkbits) < 0)
276 return -EIO;
277 vto = kmap_atomic(to);
e2e05394 278 copy_user_page(vto, (void __force *)vfrom, vaddr, to);
f7ca90b1
MW
279 kunmap_atomic(vto);
280 return 0;
281}
282
283static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
284 struct vm_area_struct *vma, struct vm_fault *vmf)
285{
f7ca90b1
MW
286 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
287 unsigned long vaddr = (unsigned long)vmf->virtual_address;
e2e05394 288 void __pmem *addr;
f7ca90b1
MW
289 unsigned long pfn;
290 pgoff_t size;
291 int error;
292
f7ca90b1
MW
293 /*
294 * Check truncate didn't happen while we were allocating a block.
295 * If it did, this block may or may not be still allocated to the
296 * file. We can't tell the filesystem to free it because we can't
297 * take i_mutex here. In the worst case, the file still has blocks
298 * allocated past the end of the file.
299 */
300 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
301 if (unlikely(vmf->pgoff >= size)) {
302 error = -EIO;
303 goto out;
304 }
305
306 error = bdev_direct_access(bh->b_bdev, sector, &addr, &pfn, bh->b_size);
307 if (error < 0)
308 goto out;
309 if (error < PAGE_SIZE) {
310 error = -EIO;
311 goto out;
312 }
313
2765cfbb 314 if (buffer_unwritten(bh) || buffer_new(bh)) {
e2e05394 315 clear_pmem(addr, PAGE_SIZE);
2765cfbb
RZ
316 wmb_pmem();
317 }
f7ca90b1
MW
318
319 error = vm_insert_mixed(vma, vaddr, pfn);
320
321 out:
f7ca90b1
MW
322 return error;
323}
324
ce5c5d55
DC
325/**
326 * __dax_fault - handle a page fault on a DAX file
327 * @vma: The virtual memory area where the fault occurred
328 * @vmf: The description of the fault
329 * @get_block: The filesystem method used to translate file offsets to blocks
b2442c5a
DC
330 * @complete_unwritten: The filesystem method used to convert unwritten blocks
331 * to written so the data written to them is exposed. This is required for
332 * required by write faults for filesystems that will return unwritten
333 * extent mappings from @get_block, but it is optional for reads as
334 * dax_insert_mapping() will always zero unwritten blocks. If the fs does
335 * not support unwritten extents, the it should pass NULL.
ce5c5d55
DC
336 *
337 * When a page fault occurs, filesystems may call this helper in their
338 * fault handler for DAX files. __dax_fault() assumes the caller has done all
339 * the necessary locking for the page fault to proceed successfully.
340 */
341int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
e842f290 342 get_block_t get_block, dax_iodone_t complete_unwritten)
f7ca90b1
MW
343{
344 struct file *file = vma->vm_file;
345 struct address_space *mapping = file->f_mapping;
346 struct inode *inode = mapping->host;
347 struct page *page;
348 struct buffer_head bh;
349 unsigned long vaddr = (unsigned long)vmf->virtual_address;
350 unsigned blkbits = inode->i_blkbits;
351 sector_t block;
352 pgoff_t size;
353 int error;
354 int major = 0;
355
356 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
357 if (vmf->pgoff >= size)
358 return VM_FAULT_SIGBUS;
359
360 memset(&bh, 0, sizeof(bh));
361 block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
362 bh.b_size = PAGE_SIZE;
363
364 repeat:
365 page = find_get_page(mapping, vmf->pgoff);
366 if (page) {
367 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
368 page_cache_release(page);
369 return VM_FAULT_RETRY;
370 }
371 if (unlikely(page->mapping != mapping)) {
372 unlock_page(page);
373 page_cache_release(page);
374 goto repeat;
375 }
376 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
377 if (unlikely(vmf->pgoff >= size)) {
378 /*
379 * We have a struct page covering a hole in the file
380 * from a read fault and we've raced with a truncate
381 */
382 error = -EIO;
84317297 383 goto unlock;
f7ca90b1 384 }
84317297
MW
385 } else {
386 i_mmap_lock_write(mapping);
f7ca90b1
MW
387 }
388
389 error = get_block(inode, block, &bh, 0);
390 if (!error && (bh.b_size < PAGE_SIZE))
391 error = -EIO; /* fs corruption? */
392 if (error)
84317297 393 goto unlock;
f7ca90b1
MW
394
395 if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
396 if (vmf->flags & FAULT_FLAG_WRITE) {
397 error = get_block(inode, block, &bh, 1);
398 count_vm_event(PGMAJFAULT);
399 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
400 major = VM_FAULT_MAJOR;
401 if (!error && (bh.b_size < PAGE_SIZE))
402 error = -EIO;
403 if (error)
84317297 404 goto unlock;
f7ca90b1 405 } else {
84317297 406 i_mmap_unlock_write(mapping);
f7ca90b1
MW
407 return dax_load_hole(mapping, page, vmf);
408 }
409 }
410
411 if (vmf->cow_page) {
412 struct page *new_page = vmf->cow_page;
413 if (buffer_written(&bh))
414 error = copy_user_bh(new_page, &bh, blkbits, vaddr);
415 else
416 clear_user_highpage(new_page, vaddr);
417 if (error)
84317297 418 goto unlock;
f7ca90b1
MW
419 vmf->page = page;
420 if (!page) {
f7ca90b1
MW
421 /* Check we didn't race with truncate */
422 size = (i_size_read(inode) + PAGE_SIZE - 1) >>
423 PAGE_SHIFT;
424 if (vmf->pgoff >= size) {
f7ca90b1 425 error = -EIO;
84317297 426 goto unlock;
f7ca90b1
MW
427 }
428 }
429 return VM_FAULT_LOCKED;
430 }
431
432 /* Check we didn't race with a read fault installing a new page */
433 if (!page && major)
434 page = find_lock_page(mapping, vmf->pgoff);
435
436 if (page) {
437 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
438 PAGE_CACHE_SIZE, 0);
439 delete_from_page_cache(page);
440 unlock_page(page);
441 page_cache_release(page);
442 }
443
e842f290
DC
444 /*
445 * If we successfully insert the new mapping over an unwritten extent,
446 * we need to ensure we convert the unwritten extent. If there is an
447 * error inserting the mapping, the filesystem needs to leave it as
448 * unwritten to prevent exposure of the stale underlying data to
449 * userspace, but we still need to call the completion function so
450 * the private resources on the mapping buffer can be released. We
451 * indicate what the callback should do via the uptodate variable, same
452 * as for normal BH based IO completions.
453 */
f7ca90b1 454 error = dax_insert_mapping(inode, &bh, vma, vmf);
b2442c5a
DC
455 if (buffer_unwritten(&bh)) {
456 if (complete_unwritten)
457 complete_unwritten(&bh, !error);
458 else
459 WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
460 }
f7ca90b1 461
84317297
MW
462 if (!page)
463 i_mmap_unlock_write(mapping);
f7ca90b1
MW
464 out:
465 if (error == -ENOMEM)
466 return VM_FAULT_OOM | major;
467 /* -EBUSY is fine, somebody else faulted on the same PTE */
468 if ((error < 0) && (error != -EBUSY))
469 return VM_FAULT_SIGBUS | major;
470 return VM_FAULT_NOPAGE | major;
471
84317297 472 unlock:
f7ca90b1
MW
473 if (page) {
474 unlock_page(page);
475 page_cache_release(page);
84317297
MW
476 } else {
477 i_mmap_unlock_write(mapping);
f7ca90b1 478 }
84317297 479
f7ca90b1
MW
480 goto out;
481}
ce5c5d55 482EXPORT_SYMBOL(__dax_fault);
f7ca90b1
MW
483
484/**
485 * dax_fault - handle a page fault on a DAX file
486 * @vma: The virtual memory area where the fault occurred
487 * @vmf: The description of the fault
488 * @get_block: The filesystem method used to translate file offsets to blocks
489 *
490 * When a page fault occurs, filesystems may call this helper in their
491 * fault handler for DAX files.
492 */
493int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
e842f290 494 get_block_t get_block, dax_iodone_t complete_unwritten)
f7ca90b1
MW
495{
496 int result;
497 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
498
499 if (vmf->flags & FAULT_FLAG_WRITE) {
500 sb_start_pagefault(sb);
501 file_update_time(vma->vm_file);
502 }
ce5c5d55 503 result = __dax_fault(vma, vmf, get_block, complete_unwritten);
f7ca90b1
MW
504 if (vmf->flags & FAULT_FLAG_WRITE)
505 sb_end_pagefault(sb);
506
507 return result;
508}
509EXPORT_SYMBOL_GPL(dax_fault);
4c0ccfef 510
844f35db
MW
511#ifdef CONFIG_TRANSPARENT_HUGEPAGE
512/*
513 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
514 * more often than one might expect in the below function.
515 */
516#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
517
518int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
519 pmd_t *pmd, unsigned int flags, get_block_t get_block,
520 dax_iodone_t complete_unwritten)
521{
522 struct file *file = vma->vm_file;
523 struct address_space *mapping = file->f_mapping;
524 struct inode *inode = mapping->host;
525 struct buffer_head bh;
526 unsigned blkbits = inode->i_blkbits;
527 unsigned long pmd_addr = address & PMD_MASK;
528 bool write = flags & FAULT_FLAG_WRITE;
529 long length;
530 void *kaddr;
531 pgoff_t size, pgoff;
532 sector_t block, sector;
533 unsigned long pfn;
534 int result = 0;
535
536 /* Fall back to PTEs if we're going to COW */
537 if (write && !(vma->vm_flags & VM_SHARED))
538 return VM_FAULT_FALLBACK;
539 /* If the PMD would extend outside the VMA */
540 if (pmd_addr < vma->vm_start)
541 return VM_FAULT_FALLBACK;
542 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
543 return VM_FAULT_FALLBACK;
544
3fdd1b47 545 pgoff = linear_page_index(vma, pmd_addr);
844f35db
MW
546 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
547 if (pgoff >= size)
548 return VM_FAULT_SIGBUS;
549 /* If the PMD would cover blocks out of the file */
550 if ((pgoff | PG_PMD_COLOUR) >= size)
551 return VM_FAULT_FALLBACK;
552
553 memset(&bh, 0, sizeof(bh));
554 block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
555
556 bh.b_size = PMD_SIZE;
84317297 557 i_mmap_lock_write(mapping);
844f35db
MW
558 length = get_block(inode, block, &bh, write);
559 if (length)
560 return VM_FAULT_SIGBUS;
844f35db
MW
561
562 /*
563 * If the filesystem isn't willing to tell us the length of a hole,
564 * just fall back to PTEs. Calling get_block 512 times in a loop
565 * would be silly.
566 */
567 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
568 goto fallback;
569
46c043ed
KS
570 if (buffer_unwritten(&bh) || buffer_new(&bh)) {
571 int i;
572 for (i = 0; i < PTRS_PER_PMD; i++)
573 clear_page(kaddr + i * PAGE_SIZE);
574 count_vm_event(PGMAJFAULT);
575 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
576 result |= VM_FAULT_MAJOR;
577 }
578
579 /*
580 * If we allocated new storage, make sure no process has any
581 * zero pages covering this hole
582 */
583 if (buffer_new(&bh)) {
584 i_mmap_unlock_write(mapping);
585 unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
586 i_mmap_lock_write(mapping);
587 }
588
84c4e5e6
MW
589 /*
590 * If a truncate happened while we were allocating blocks, we may
591 * leave blocks allocated to the file that are beyond EOF. We can't
592 * take i_mutex here, so just leave them hanging; they'll be freed
593 * when the file is deleted.
594 */
844f35db
MW
595 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
596 if (pgoff >= size) {
597 result = VM_FAULT_SIGBUS;
598 goto out;
599 }
600 if ((pgoff | PG_PMD_COLOUR) >= size)
601 goto fallback;
602
844f35db 603 if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
844f35db 604 spinlock_t *ptl;
d295e341 605 pmd_t entry;
844f35db 606 struct page *zero_page = get_huge_zero_page();
d295e341 607
844f35db
MW
608 if (unlikely(!zero_page))
609 goto fallback;
610
d295e341
KS
611 ptl = pmd_lock(vma->vm_mm, pmd);
612 if (!pmd_none(*pmd)) {
613 spin_unlock(ptl);
614 goto fallback;
615 }
616
617 entry = mk_pmd(zero_page, vma->vm_page_prot);
618 entry = pmd_mkhuge(entry);
619 set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
844f35db 620 result = VM_FAULT_NOPAGE;
d295e341 621 spin_unlock(ptl);
844f35db
MW
622 } else {
623 sector = bh.b_blocknr << (blkbits - 9);
624 length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
625 bh.b_size);
626 if (length < 0) {
627 result = VM_FAULT_SIGBUS;
628 goto out;
629 }
630 if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
631 goto fallback;
632
844f35db
MW
633 result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write);
634 }
635
636 out:
844f35db
MW
637 if (buffer_unwritten(&bh))
638 complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
639
84317297
MW
640 i_mmap_unlock_write(mapping);
641
844f35db
MW
642 return result;
643
644 fallback:
645 count_vm_event(THP_FAULT_FALLBACK);
646 result = VM_FAULT_FALLBACK;
647 goto out;
648}
649EXPORT_SYMBOL_GPL(__dax_pmd_fault);
650
651/**
652 * dax_pmd_fault - handle a PMD fault on a DAX file
653 * @vma: The virtual memory area where the fault occurred
654 * @vmf: The description of the fault
655 * @get_block: The filesystem method used to translate file offsets to blocks
656 *
657 * When a page fault occurs, filesystems may call this helper in their
658 * pmd_fault handler for DAX files.
659 */
660int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
661 pmd_t *pmd, unsigned int flags, get_block_t get_block,
662 dax_iodone_t complete_unwritten)
663{
664 int result;
665 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
666
667 if (flags & FAULT_FLAG_WRITE) {
668 sb_start_pagefault(sb);
669 file_update_time(vma->vm_file);
670 }
671 result = __dax_pmd_fault(vma, address, pmd, flags, get_block,
672 complete_unwritten);
673 if (flags & FAULT_FLAG_WRITE)
674 sb_end_pagefault(sb);
675
676 return result;
677}
678EXPORT_SYMBOL_GPL(dax_pmd_fault);
dd8a2b6c 679#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
844f35db 680
0e3b210c
BH
681/**
682 * dax_pfn_mkwrite - handle first write to DAX page
683 * @vma: The virtual memory area where the fault occurred
684 * @vmf: The description of the fault
685 *
686 */
687int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
688{
689 struct super_block *sb = file_inode(vma->vm_file)->i_sb;
690
691 sb_start_pagefault(sb);
692 file_update_time(vma->vm_file);
693 sb_end_pagefault(sb);
694 return VM_FAULT_NOPAGE;
695}
696EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
697
4c0ccfef 698/**
25726bc1 699 * dax_zero_page_range - zero a range within a page of a DAX file
4c0ccfef
MW
700 * @inode: The file being truncated
701 * @from: The file offset that is being truncated to
25726bc1 702 * @length: The number of bytes to zero
4c0ccfef
MW
703 * @get_block: The filesystem method used to translate file offsets to blocks
704 *
25726bc1
MW
705 * This function can be called by a filesystem when it is zeroing part of a
706 * page in a DAX file. This is intended for hole-punch operations. If
707 * you are truncating a file, the helper function dax_truncate_page() may be
708 * more convenient.
4c0ccfef
MW
709 *
710 * We work in terms of PAGE_CACHE_SIZE here for commonality with
711 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
712 * took care of disposing of the unnecessary blocks. Even if the filesystem
713 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
25726bc1 714 * since the file might be mmapped.
4c0ccfef 715 */
25726bc1
MW
716int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
717 get_block_t get_block)
4c0ccfef
MW
718{
719 struct buffer_head bh;
720 pgoff_t index = from >> PAGE_CACHE_SHIFT;
721 unsigned offset = from & (PAGE_CACHE_SIZE-1);
4c0ccfef
MW
722 int err;
723
724 /* Block boundary? Nothing to do */
725 if (!length)
726 return 0;
25726bc1 727 BUG_ON((offset + length) > PAGE_CACHE_SIZE);
4c0ccfef
MW
728
729 memset(&bh, 0, sizeof(bh));
730 bh.b_size = PAGE_CACHE_SIZE;
731 err = get_block(inode, index, &bh, 0);
732 if (err < 0)
733 return err;
734 if (buffer_written(&bh)) {
e2e05394 735 void __pmem *addr;
4c0ccfef
MW
736 err = dax_get_addr(&bh, &addr, inode->i_blkbits);
737 if (err < 0)
738 return err;
e2e05394 739 clear_pmem(addr + offset, length);
2765cfbb 740 wmb_pmem();
4c0ccfef
MW
741 }
742
743 return 0;
744}
25726bc1
MW
745EXPORT_SYMBOL_GPL(dax_zero_page_range);
746
747/**
748 * dax_truncate_page - handle a partial page being truncated in a DAX file
749 * @inode: The file being truncated
750 * @from: The file offset that is being truncated to
751 * @get_block: The filesystem method used to translate file offsets to blocks
752 *
753 * Similar to block_truncate_page(), this function can be called by a
754 * filesystem when it is truncating a DAX file to handle the partial page.
755 *
756 * We work in terms of PAGE_CACHE_SIZE here for commonality with
757 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
758 * took care of disposing of the unnecessary blocks. Even if the filesystem
759 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
760 * since the file might be mmapped.
761 */
762int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
763{
764 unsigned length = PAGE_CACHE_ALIGN(from) - from;
765 return dax_zero_page_range(inode, from, length, get_block);
766}
4c0ccfef 767EXPORT_SYMBOL_GPL(dax_truncate_page);