mm, fs: reduce fault, page_mkwrite, and pfn_mkwrite to take only vmf
[linux-block.git] / drivers / video / fbdev / core / fb_defio.c
CommitLineData
60b59bea
JK
1/*
2 * linux/drivers/video/fb_defio.c
3 *
4 * Copyright (C) 2006 Jaya Kumar
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
de7c6d15 7 * License. See the file COPYING in the main directory of this archive
60b59bea
JK
8 * for more details.
9 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/mm.h>
60b59bea
JK
16#include <linux/vmalloc.h>
17#include <linux/delay.h>
18#include <linux/interrupt.h>
19#include <linux/fb.h>
20#include <linux/list.h>
60b59bea
JK
21
22/* to support deferred IO */
23#include <linux/rmap.h>
24#include <linux/pagemap.h>
25
43b1aee0 26static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
37b48379
MD
27{
28 void *screen_base = (void __force *) info->screen_base;
29 struct page *page;
30
31 if (is_vmalloc_addr(screen_base + offs))
32 page = vmalloc_to_page(screen_base + offs);
33 else
34 page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
35
36 return page;
37}
38
60b59bea 39/* this is to find and return the vmalloc-ed fb pages */
11bac800 40static int fb_deferred_io_fault(struct vm_fault *vmf)
60b59bea
JK
41{
42 unsigned long offset;
43 struct page *page;
11bac800 44 struct fb_info *info = vmf->vma->vm_private_data;
60b59bea 45
529e55b6 46 offset = vmf->pgoff << PAGE_SHIFT;
60b59bea 47 if (offset >= info->fix.smem_len)
529e55b6 48 return VM_FAULT_SIGBUS;
60b59bea 49
37b48379 50 page = fb_deferred_io_page(info, offset);
60b59bea 51 if (!page)
529e55b6 52 return VM_FAULT_SIGBUS;
60b59bea
JK
53
54 get_page(page);
de7c6d15 55
11bac800
DJ
56 if (vmf->vma->vm_file)
57 page->mapping = vmf->vma->vm_file->f_mapping;
de7c6d15
JK
58 else
59 printk(KERN_ERR "no mapping available\n");
60
61 BUG_ON(!page->mapping);
62 page->index = vmf->pgoff;
63
529e55b6
NP
64 vmf->page = page;
65 return 0;
60b59bea
JK
66}
67
02c24a82 68int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
5e841b88
PM
69{
70 struct fb_info *info = file->private_data;
496ad9aa 71 struct inode *inode = file_inode(file);
02c24a82
JB
72 int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
73 if (err)
74 return err;
5e841b88 75
94e2bd68 76 /* Skip if deferred io is compiled-in but disabled on this fbdev */
87884bd8
MD
77 if (!info->fbdefio)
78 return 0;
79
5955102c 80 inode_lock(inode);
5e841b88 81 /* Kill off the delayed work */
afe2c511 82 cancel_delayed_work_sync(&info->deferred_work);
5e841b88
PM
83
84 /* Run it immediately */
30ea9c52 85 schedule_delayed_work(&info->deferred_work, 0);
5955102c 86 inode_unlock(inode);
30ea9c52
TV
87
88 return 0;
5e841b88
PM
89}
90EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
91
60b59bea 92/* vm_ops->page_mkwrite handler */
11bac800 93static int fb_deferred_io_mkwrite(struct vm_fault *vmf)
60b59bea 94{
c2ec175c 95 struct page *page = vmf->page;
11bac800 96 struct fb_info *info = vmf->vma->vm_private_data;
60b59bea 97 struct fb_deferred_io *fbdefio = info->fbdefio;
f31ad92f 98 struct page *cur;
60b59bea
JK
99
100 /* this is a callback we get when userspace first tries to
101 write to the page. we schedule a workqueue. that workqueue
102 will eventually mkclean the touched pages and execute the
103 deferred framebuffer IO. then if userspace touches a page
104 again, we repeat the same scheme */
105
11bac800 106 file_update_time(vmf->vma->vm_file);
183fef91 107
60b59bea
JK
108 /* protect against the workqueue changing the page list */
109 mutex_lock(&fbdefio->lock);
f31ad92f 110
1f45f9db
HS
111 /* first write in this cycle, notify the driver */
112 if (fbdefio->first_io && list_empty(&fbdefio->pagelist))
113 fbdefio->first_io(info);
114
d6d03f91
AH
115 /*
116 * We want the page to remain locked from ->page_mkwrite until
117 * the PTE is marked dirty to avoid page_mkclean() being called
118 * before the PTE is updated, which would leave the page ignored
119 * by defio.
120 * Do this by locking the page here and informing the caller
121 * about it with VM_FAULT_LOCKED.
122 */
123 lock_page(page);
124
f31ad92f
JK
125 /* we loop through the pagelist before adding in order
126 to keep the pagelist sorted */
127 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
128 /* this check is to catch the case where a new
129 process could start writing to the same page
130 through a new pte. this new access can cause the
131 mkwrite even when the original ps's pte is marked
132 writable */
133 if (unlikely(cur == page))
134 goto page_already_added;
135 else if (cur->index > page->index)
136 break;
137 }
138
139 list_add_tail(&page->lru, &cur->lru);
140
141page_already_added:
60b59bea
JK
142 mutex_unlock(&fbdefio->lock);
143
144 /* come back after delay to process the deferred IO */
145 schedule_delayed_work(&info->deferred_work, fbdefio->delay);
d6d03f91 146 return VM_FAULT_LOCKED;
60b59bea
JK
147}
148
f0f37e2f 149static const struct vm_operations_struct fb_deferred_io_vm_ops = {
529e55b6 150 .fault = fb_deferred_io_fault,
60b59bea
JK
151 .page_mkwrite = fb_deferred_io_mkwrite,
152};
153
d847471d
IC
154static int fb_deferred_io_set_page_dirty(struct page *page)
155{
156 if (!PageDirty(page))
157 SetPageDirty(page);
158 return 0;
159}
160
161static const struct address_space_operations fb_deferred_io_aops = {
162 .set_page_dirty = fb_deferred_io_set_page_dirty,
163};
164
ba026334 165int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
60b59bea
JK
166{
167 vma->vm_ops = &fb_deferred_io_vm_ops;
314e51b9 168 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
7164bb43
KRW
169 if (!(info->flags & FBINFO_VIRTFB))
170 vma->vm_flags |= VM_IO;
60b59bea
JK
171 vma->vm_private_data = info;
172 return 0;
173}
ba026334 174EXPORT_SYMBOL(fb_deferred_io_mmap);
60b59bea
JK
175
176/* workqueue callback */
177static void fb_deferred_io_work(struct work_struct *work)
178{
179 struct fb_info *info = container_of(work, struct fb_info,
180 deferred_work.work);
3f505ca4
AH
181 struct list_head *node, *next;
182 struct page *cur;
60b59bea
JK
183 struct fb_deferred_io *fbdefio = info->fbdefio;
184
185 /* here we mkclean the pages, then do all deferred IO */
186 mutex_lock(&fbdefio->lock);
3f505ca4
AH
187 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
188 lock_page(cur);
189 page_mkclean(cur);
190 unlock_page(cur);
60b59bea
JK
191 }
192
193 /* driver's callback with pagelist */
194 fbdefio->deferred_io(info, &fbdefio->pagelist);
195
3f505ca4
AH
196 /* clear the list */
197 list_for_each_safe(node, next, &fbdefio->pagelist) {
60b59bea
JK
198 list_del(node);
199 }
200 mutex_unlock(&fbdefio->lock);
201}
202
203void fb_deferred_io_init(struct fb_info *info)
204{
205 struct fb_deferred_io *fbdefio = info->fbdefio;
206
207 BUG_ON(!fbdefio);
208 mutex_init(&fbdefio->lock);
209 info->fbops->fb_mmap = fb_deferred_io_mmap;
210 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
211 INIT_LIST_HEAD(&fbdefio->pagelist);
212 if (fbdefio->delay == 0) /* set a default of 1 s */
213 fbdefio->delay = HZ;
214}
215EXPORT_SYMBOL_GPL(fb_deferred_io_init);
216
d847471d
IC
217void fb_deferred_io_open(struct fb_info *info,
218 struct inode *inode,
219 struct file *file)
220{
221 file->f_mapping->a_ops = &fb_deferred_io_aops;
222}
223EXPORT_SYMBOL_GPL(fb_deferred_io_open);
224
60b59bea
JK
225void fb_deferred_io_cleanup(struct fb_info *info)
226{
227 struct fb_deferred_io *fbdefio = info->fbdefio;
de7c6d15
JK
228 struct page *page;
229 int i;
60b59bea
JK
230
231 BUG_ON(!fbdefio);
181b74ef 232 cancel_delayed_work_sync(&info->deferred_work);
de7c6d15
JK
233
234 /* clear out the mapping that we setup */
235 for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
37b48379 236 page = fb_deferred_io_page(info, i);
de7c6d15
JK
237 page->mapping = NULL;
238 }
6e1038a9
MD
239
240 info->fbops->fb_mmap = NULL;
241 mutex_destroy(&fbdefio->lock);
60b59bea
JK
242}
243EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);