mm: xip fix fault vs sparse page invalidate race
[linux-2.6-block.git] / mm / filemap_xip.c
CommitLineData
ceffc078
CO
1/*
2 * linux/mm/filemap_xip.c
3 *
4 * Copyright (C) 2005 IBM Corporation
5 * Author: Carsten Otte <cotte@de.ibm.com>
6 *
7 * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
8 *
9 */
10
11#include <linux/fs.h>
12#include <linux/pagemap.h>
13#include <linux/module.h>
14#include <linux/uio.h>
15#include <linux/rmap.h>
cddb8a5c 16#include <linux/mmu_notifier.h>
e8edc6e0 17#include <linux/sched.h>
538f8ea6
NP
18#include <linux/seqlock.h>
19#include <linux/mutex.h>
ceffc078 20#include <asm/tlbflush.h>
70688e4d 21#include <asm/io.h>
ceffc078 22
a76c0b97
CO
23/*
24 * We do use our own empty page to avoid interference with other users
25 * of ZERO_PAGE(), such as /dev/zero
26 */
538f8ea6
NP
27static DEFINE_MUTEX(xip_sparse_mutex);
28static seqcount_t xip_sparse_seq = SEQCNT_ZERO;
a76c0b97
CO
29static struct page *__xip_sparse_page;
30
538f8ea6 31/* called under xip_sparse_mutex */
a76c0b97
CO
32static struct page *xip_sparse_page(void)
33{
34 if (!__xip_sparse_page) {
c51b1a16
AM
35 struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
36
538f8ea6
NP
37 if (page)
38 __xip_sparse_page = page;
a76c0b97
CO
39 }
40 return __xip_sparse_page;
41}
42
ceffc078
CO
43/*
44 * This is a file read routine for execute in place files, and uses
70688e4d 45 * the mapping->a_ops->get_xip_mem() function for the actual low-level
ceffc078
CO
46 * stuff.
47 *
48 * Note the struct file* is not used at all. It may be NULL.
49 */
70688e4d 50static ssize_t
ceffc078
CO
51do_xip_mapping_read(struct address_space *mapping,
52 struct file_ra_state *_ra,
53 struct file *filp,
70688e4d
NP
54 char __user *buf,
55 size_t len,
56 loff_t *ppos)
ceffc078
CO
57{
58 struct inode *inode = mapping->host;
2004dc8e
JK
59 pgoff_t index, end_index;
60 unsigned long offset;
70688e4d
NP
61 loff_t isize, pos;
62 size_t copied = 0, error = 0;
ceffc078 63
70688e4d 64 BUG_ON(!mapping->a_ops->get_xip_mem);
ceffc078 65
70688e4d
NP
66 pos = *ppos;
67 index = pos >> PAGE_CACHE_SHIFT;
68 offset = pos & ~PAGE_CACHE_MASK;
ceffc078
CO
69
70 isize = i_size_read(inode);
71 if (!isize)
72 goto out;
73
74 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
70688e4d
NP
75 do {
76 unsigned long nr, left;
77 void *xip_mem;
78 unsigned long xip_pfn;
79 int zero = 0;
ceffc078
CO
80
81 /* nr is the maximum number of bytes to copy from this page */
82 nr = PAGE_CACHE_SIZE;
83 if (index >= end_index) {
84 if (index > end_index)
85 goto out;
86 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
87 if (nr <= offset) {
88 goto out;
89 }
90 }
91 nr = nr - offset;
70688e4d
NP
92 if (nr > len)
93 nr = len;
ceffc078 94
70688e4d
NP
95 error = mapping->a_ops->get_xip_mem(mapping, index, 0,
96 &xip_mem, &xip_pfn);
97 if (unlikely(error)) {
98 if (error == -ENODATA) {
ceffc078 99 /* sparse */
70688e4d
NP
100 zero = 1;
101 } else
ceffc078 102 goto out;
afa597ba 103 }
ceffc078
CO
104
105 /* If users can be writing to this page using arbitrary
106 * virtual addresses, take care about potential aliasing
107 * before reading the page on the kernel side.
108 */
109 if (mapping_writably_mapped(mapping))
70688e4d 110 /* address based flush */ ;
ceffc078
CO
111
112 /*
70688e4d 113 * Ok, we have the mem, so now we can copy it to user space...
ceffc078
CO
114 *
115 * The actor routine returns how many bytes were actually used..
116 * NOTE! This may not be the same as how much of a user buffer
117 * we filled up (we may be padding etc), so we can only update
118 * "pos" here (the actor routine has to update the user buffer
119 * pointers and the remaining count).
120 */
70688e4d
NP
121 if (!zero)
122 left = __copy_to_user(buf+copied, xip_mem+offset, nr);
123 else
124 left = __clear_user(buf + copied, nr);
ceffc078 125
70688e4d
NP
126 if (left) {
127 error = -EFAULT;
128 goto out;
129 }
ceffc078 130
70688e4d
NP
131 copied += (nr - left);
132 offset += (nr - left);
133 index += offset >> PAGE_CACHE_SHIFT;
134 offset &= ~PAGE_CACHE_MASK;
135 } while (copied < len);
ceffc078
CO
136
137out:
70688e4d 138 *ppos = pos + copied;
ceffc078
CO
139 if (filp)
140 file_accessed(filp);
70688e4d
NP
141
142 return (copied ? copied : error);
ceffc078
CO
143}
144
ceffc078 145ssize_t
eb6fe0c3 146xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
ceffc078 147{
eb6fe0c3
CO
148 if (!access_ok(VERIFY_WRITE, buf, len))
149 return -EFAULT;
ceffc078 150
70688e4d
NP
151 return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
152 buf, len, ppos);
ceffc078 153}
eb6fe0c3 154EXPORT_SYMBOL_GPL(xip_file_read);
ceffc078 155
ceffc078
CO
156/*
157 * __xip_unmap is invoked from xip_unmap and
158 * xip_write
159 *
160 * This function walks all vmas of the address_space and unmaps the
a76c0b97 161 * __xip_sparse_page when found at pgoff.
ceffc078
CO
162 */
163static void
164__xip_unmap (struct address_space * mapping,
165 unsigned long pgoff)
166{
167 struct vm_area_struct *vma;
168 struct mm_struct *mm;
169 struct prio_tree_iter iter;
170 unsigned long address;
171 pte_t *pte;
172 pte_t pteval;
c0718806 173 spinlock_t *ptl;
67b02f11 174 struct page *page;
538f8ea6
NP
175 unsigned count;
176 int locked = 0;
177
178 count = read_seqcount_begin(&xip_sparse_seq);
ceffc078 179
a76c0b97
CO
180 page = __xip_sparse_page;
181 if (!page)
182 return;
183
538f8ea6 184retry:
ceffc078
CO
185 spin_lock(&mapping->i_mmap_lock);
186 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
187 mm = vma->vm_mm;
188 address = vma->vm_start +
189 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
190 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
479db0bf 191 pte = page_check_address(page, mm, address, &ptl, 1);
c0718806 192 if (pte) {
ceffc078 193 /* Nuke the page table entry. */
082ff0a9 194 flush_cache_page(vma, address, pte_pfn(*pte));
cddb8a5c 195 pteval = ptep_clear_flush_notify(vma, address, pte);
7de6b805 196 page_remove_rmap(page, vma);
b5810039 197 dec_mm_counter(mm, file_rss);
ceffc078 198 BUG_ON(pte_dirty(pteval));
c0718806 199 pte_unmap_unlock(pte, ptl);
b5810039 200 page_cache_release(page);
ceffc078
CO
201 }
202 }
203 spin_unlock(&mapping->i_mmap_lock);
538f8ea6
NP
204
205 if (locked) {
206 mutex_unlock(&xip_sparse_mutex);
207 } else if (read_seqcount_retry(&xip_sparse_seq, count)) {
208 mutex_lock(&xip_sparse_mutex);
209 locked = 1;
210 goto retry;
211 }
ceffc078
CO
212}
213
214/*
54cb8821 215 * xip_fault() is invoked via the vma operations vector for a
ceffc078
CO
216 * mapped memory region to read in file data during a page fault.
217 *
54cb8821 218 * This function is derived from filemap_fault, but used for execute in place
ceffc078 219 */
70688e4d 220static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ceffc078 221{
70688e4d 222 struct file *file = vma->vm_file;
ceffc078
CO
223 struct address_space *mapping = file->f_mapping;
224 struct inode *inode = mapping->host;
54cb8821 225 pgoff_t size;
70688e4d
NP
226 void *xip_mem;
227 unsigned long xip_pfn;
228 struct page *page;
229 int error;
ceffc078 230
54cb8821 231 /* XXX: are VM_FAULT_ codes OK? */
538f8ea6 232again:
ceffc078 233 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
d0217ac0
NP
234 if (vmf->pgoff >= size)
235 return VM_FAULT_SIGBUS;
ceffc078 236
70688e4d
NP
237 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
238 &xip_mem, &xip_pfn);
239 if (likely(!error))
240 goto found;
241 if (error != -ENODATA)
d0217ac0 242 return VM_FAULT_OOM;
ceffc078
CO
243
244 /* sparse block */
70688e4d
NP
245 if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
246 (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) &&
ceffc078 247 (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
70688e4d
NP
248 int err;
249
ceffc078 250 /* maybe shared writable, allocate new block */
70688e4d
NP
251 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
252 &xip_mem, &xip_pfn);
253 if (error)
d0217ac0 254 return VM_FAULT_SIGBUS;
70688e4d 255 /* unmap sparse mappings at pgoff from all other vmas */
d0217ac0 256 __xip_unmap(mapping, vmf->pgoff);
70688e4d
NP
257
258found:
538f8ea6 259 printk("%s insert %lx@%lx\n", current->comm, (unsigned long)vmf->virtual_address, xip_pfn);
70688e4d
NP
260 err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
261 xip_pfn);
262 if (err == -ENOMEM)
263 return VM_FAULT_OOM;
264 BUG_ON(err);
265 return VM_FAULT_NOPAGE;
ceffc078 266 } else {
538f8ea6
NP
267 int err, ret = VM_FAULT_OOM;
268
269 mutex_lock(&xip_sparse_mutex);
270 write_seqcount_begin(&xip_sparse_seq);
271 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
272 &xip_mem, &xip_pfn);
273 if (unlikely(!error)) {
274 write_seqcount_end(&xip_sparse_seq);
275 mutex_unlock(&xip_sparse_mutex);
276 goto again;
277 }
278 if (error != -ENODATA)
279 goto out;
a76c0b97
CO
280 /* not shared and writable, use xip_sparse_page() */
281 page = xip_sparse_page();
d0217ac0 282 if (!page)
538f8ea6
NP
283 goto out;
284 err = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
285 page);
286 if (err == -ENOMEM)
287 goto out;
288
289 ret = VM_FAULT_NOPAGE;
290out:
291 write_seqcount_end(&xip_sparse_seq);
292 mutex_unlock(&xip_sparse_mutex);
ceffc078 293
538f8ea6 294 return ret;
70688e4d 295 }
ceffc078
CO
296}
297
298static struct vm_operations_struct xip_file_vm_ops = {
54cb8821 299 .fault = xip_file_fault,
ceffc078
CO
300};
301
302int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
303{
70688e4d 304 BUG_ON(!file->f_mapping->a_ops->get_xip_mem);
ceffc078
CO
305
306 file_accessed(file);
307 vma->vm_ops = &xip_file_vm_ops;
70688e4d 308 vma->vm_flags |= VM_CAN_NONLINEAR | VM_MIXEDMAP;
ceffc078
CO
309 return 0;
310}
311EXPORT_SYMBOL_GPL(xip_file_mmap);
312
313static ssize_t
eb6fe0c3
CO
314__xip_file_write(struct file *filp, const char __user *buf,
315 size_t count, loff_t pos, loff_t *ppos)
ceffc078 316{
eb6fe0c3 317 struct address_space * mapping = filp->f_mapping;
f5e54d6e 318 const struct address_space_operations *a_ops = mapping->a_ops;
ceffc078
CO
319 struct inode *inode = mapping->host;
320 long status = 0;
ceffc078 321 size_t bytes;
ceffc078
CO
322 ssize_t written = 0;
323
70688e4d 324 BUG_ON(!mapping->a_ops->get_xip_mem);
ceffc078 325
ceffc078
CO
326 do {
327 unsigned long index;
328 unsigned long offset;
329 size_t copied;
70688e4d
NP
330 void *xip_mem;
331 unsigned long xip_pfn;
ceffc078
CO
332
333 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
334 index = pos >> PAGE_CACHE_SHIFT;
335 bytes = PAGE_CACHE_SIZE - offset;
336 if (bytes > count)
337 bytes = count;
338
70688e4d
NP
339 status = a_ops->get_xip_mem(mapping, index, 0,
340 &xip_mem, &xip_pfn);
341 if (status == -ENODATA) {
ceffc078 342 /* we allocate a new page unmap it */
70688e4d
NP
343 status = a_ops->get_xip_mem(mapping, index, 1,
344 &xip_mem, &xip_pfn);
345 if (!status)
eb6fe0c3
CO
346 /* unmap page at pgoff from all other vmas */
347 __xip_unmap(mapping, index);
ceffc078
CO
348 }
349
70688e4d 350 if (status)
ceffc078 351 break;
ceffc078 352
4a9e5ef1 353 copied = bytes -
70688e4d 354 __copy_from_user_nocache(xip_mem + offset, buf, bytes);
4a9e5ef1 355
ceffc078
CO
356 if (likely(copied > 0)) {
357 status = copied;
358
359 if (status >= 0) {
360 written += status;
361 count -= status;
362 pos += status;
363 buf += status;
ceffc078
CO
364 }
365 }
366 if (unlikely(copied != bytes))
367 if (status >= 0)
368 status = -EFAULT;
369 if (status < 0)
370 break;
371 } while (count);
372 *ppos = pos;
373 /*
374 * No need to use i_size_read() here, the i_size
1b1dcc1b 375 * cannot change under us because we hold i_mutex.
ceffc078
CO
376 */
377 if (pos > inode->i_size) {
378 i_size_write(inode, pos);
379 mark_inode_dirty(inode);
380 }
381
382 return written ? written : status;
383}
384
eb6fe0c3
CO
385ssize_t
386xip_file_write(struct file *filp, const char __user *buf, size_t len,
387 loff_t *ppos)
ceffc078 388{
eb6fe0c3
CO
389 struct address_space *mapping = filp->f_mapping;
390 struct inode *inode = mapping->host;
391 size_t count;
392 loff_t pos;
393 ssize_t ret;
ceffc078 394
1b1dcc1b 395 mutex_lock(&inode->i_mutex);
ceffc078 396
eb6fe0c3
CO
397 if (!access_ok(VERIFY_READ, buf, len)) {
398 ret=-EFAULT;
399 goto out_up;
ceffc078
CO
400 }
401
ceffc078 402 pos = *ppos;
eb6fe0c3 403 count = len;
ceffc078
CO
404
405 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
406
eb6fe0c3
CO
407 /* We can write back this queue in page reclaim */
408 current->backing_dev_info = mapping->backing_dev_info;
ceffc078 409
eb6fe0c3
CO
410 ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
411 if (ret)
412 goto out_backing;
ceffc078 413 if (count == 0)
eb6fe0c3 414 goto out_backing;
ceffc078 415
2f1936b8 416 ret = file_remove_suid(filp);
eb6fe0c3
CO
417 if (ret)
418 goto out_backing;
ceffc078 419
870f4817 420 file_update_time(filp);
ceffc078 421
eb6fe0c3 422 ret = __xip_file_write (filp, buf, count, pos, ppos);
ceffc078 423
eb6fe0c3
CO
424 out_backing:
425 current->backing_dev_info = NULL;
426 out_up:
1b1dcc1b 427 mutex_unlock(&inode->i_mutex);
ceffc078
CO
428 return ret;
429}
eb6fe0c3 430EXPORT_SYMBOL_GPL(xip_file_write);
ceffc078
CO
431
432/*
433 * truncate a page used for execute in place
70688e4d 434 * functionality is analog to block_truncate_page but does use get_xip_mem
ceffc078
CO
435 * to get the page instead of page cache
436 */
437int
438xip_truncate_page(struct address_space *mapping, loff_t from)
439{
440 pgoff_t index = from >> PAGE_CACHE_SHIFT;
441 unsigned offset = from & (PAGE_CACHE_SIZE-1);
442 unsigned blocksize;
443 unsigned length;
70688e4d
NP
444 void *xip_mem;
445 unsigned long xip_pfn;
446 int err;
ceffc078 447
70688e4d 448 BUG_ON(!mapping->a_ops->get_xip_mem);
ceffc078
CO
449
450 blocksize = 1 << mapping->host->i_blkbits;
451 length = offset & (blocksize - 1);
452
453 /* Block boundary? Nothing to do */
454 if (!length)
455 return 0;
456
457 length = blocksize - length;
458
70688e4d
NP
459 err = mapping->a_ops->get_xip_mem(mapping, index, 0,
460 &xip_mem, &xip_pfn);
461 if (unlikely(err)) {
462 if (err == -ENODATA)
ceffc078
CO
463 /* Hole? No need to truncate */
464 return 0;
eb6fe0c3 465 else
70688e4d 466 return err;
afa597ba 467 }
70688e4d 468 memset(xip_mem + offset, 0, length);
eb6fe0c3 469 return 0;
ceffc078
CO
470}
471EXPORT_SYMBOL_GPL(xip_truncate_page);