Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
642fb4d1 DH |
2 | /* file-nommu.c: no-MMU version of ramfs |
3 | * | |
4 | * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. | |
5 | * Written by David Howells (dhowells@redhat.com) | |
642fb4d1 DH |
6 | */ |
7 | ||
8 | #include <linux/module.h> | |
9 | #include <linux/fs.h> | |
131612df | 10 | #include <linux/mm.h> |
642fb4d1 DH |
11 | #include <linux/pagemap.h> |
12 | #include <linux/highmem.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/string.h> | |
642fb4d1 DH |
15 | #include <linux/backing-dev.h> |
16 | #include <linux/ramfs.h> | |
642fb4d1 DH |
17 | #include <linux/pagevec.h> |
18 | #include <linux/mman.h> | |
5c805365 | 19 | #include <linux/sched.h> |
5a0e3ad6 | 20 | #include <linux/slab.h> |
642fb4d1 | 21 | |
7c0f6ba6 | 22 | #include <linux/uaccess.h> |
642fb4d1 DH |
23 | #include "internal.h" |
24 | ||
c1632a0f | 25 | static int ramfs_nommu_setattr(struct mnt_idmap *, struct dentry *, struct iattr *); |
0fa9aa20 AL |
26 | static unsigned long ramfs_nommu_get_unmapped_area(struct file *file, |
27 | unsigned long addr, | |
28 | unsigned long len, | |
29 | unsigned long pgoff, | |
30 | unsigned long flags); | |
31 | static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); | |
642fb4d1 | 32 | |
b4caecd4 CH |
33 | static unsigned ramfs_mmap_capabilities(struct file *file) |
34 | { | |
35 | return NOMMU_MAP_DIRECT | NOMMU_MAP_COPY | NOMMU_MAP_READ | | |
36 | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC; | |
37 | } | |
38 | ||
4b6f5d20 | 39 | const struct file_operations ramfs_file_operations = { |
b4caecd4 | 40 | .mmap_capabilities = ramfs_mmap_capabilities, |
642fb4d1 DH |
41 | .mmap = ramfs_nommu_mmap, |
42 | .get_unmapped_area = ramfs_nommu_get_unmapped_area, | |
aad4f8bb | 43 | .read_iter = generic_file_read_iter, |
8174202b | 44 | .write_iter = generic_file_write_iter, |
1b061d92 | 45 | .fsync = noop_fsync, |
5ffc4ef4 | 46 | .splice_read = generic_file_splice_read, |
8d020765 | 47 | .splice_write = iter_file_splice_write, |
642fb4d1 DH |
48 | .llseek = generic_file_llseek, |
49 | }; | |
50 | ||
c5ef1c42 | 51 | const struct inode_operations ramfs_file_inode_operations = { |
642fb4d1 DH |
52 | .setattr = ramfs_nommu_setattr, |
53 | .getattr = simple_getattr, | |
54 | }; | |
55 | ||
56 | /*****************************************************************************/ | |
57 | /* | |
58 | * add a contiguous set of pages into a ramfs inode when it's truncated from | |
59 | * size 0 on the assumption that it's going to be used for an mmap of shared | |
60 | * memory | |
61 | */ | |
4b19de6d | 62 | int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) |
642fb4d1 | 63 | { |
0f67b0b0 | 64 | unsigned long npages, xpages, loop; |
642fb4d1 DH |
65 | struct page *pages; |
66 | unsigned order; | |
67 | void *data; | |
68 | int ret; | |
063d99b4 | 69 | gfp_t gfp = mapping_gfp_mask(inode->i_mapping); |
642fb4d1 DH |
70 | |
71 | /* make various checks */ | |
72 | order = get_order(newsize); | |
73 | if (unlikely(order >= MAX_ORDER)) | |
c08d3b0e | 74 | return -EFBIG; |
642fb4d1 | 75 | |
c08d3b0e | 76 | ret = inode_newsize_ok(inode, newsize); |
77 | if (ret) | |
78 | return ret; | |
642fb4d1 DH |
79 | |
80 | i_size_write(inode, newsize); | |
81 | ||
82 | /* allocate enough contiguous pages to be able to satisfy the | |
83 | * request */ | |
063d99b4 | 84 | pages = alloc_pages(gfp, order); |
642fb4d1 DH |
85 | if (!pages) |
86 | return -ENOMEM; | |
87 | ||
88 | /* split the high-order page into an array of single pages */ | |
89 | xpages = 1UL << order; | |
90 | npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
91 | ||
84097518 | 92 | split_page(pages, order); |
642fb4d1 DH |
93 | |
94 | /* trim off any pages we don't actually require */ | |
95 | for (loop = npages; loop < xpages; loop++) | |
96 | __free_page(pages + loop); | |
97 | ||
98 | /* clear the memory we allocated */ | |
99 | newsize = PAGE_SIZE * npages; | |
100 | data = page_address(pages); | |
101 | memset(data, 0, newsize); | |
102 | ||
103 | /* attach all the pages to the inode's address space */ | |
642fb4d1 DH |
104 | for (loop = 0; loop < npages; loop++) { |
105 | struct page *page = pages + loop; | |
106 | ||
2678958e | 107 | ret = add_to_page_cache_lru(page, inode->i_mapping, loop, |
063d99b4 | 108 | gfp); |
642fb4d1 DH |
109 | if (ret < 0) |
110 | goto add_error; | |
111 | ||
020fe22f EB |
112 | /* prevent the page from being discarded on memory pressure */ |
113 | SetPageDirty(page); | |
fea9f718 | 114 | SetPageUptodate(page); |
020fe22f | 115 | |
642fb4d1 | 116 | unlock_page(page); |
b836aec5 | 117 | put_page(page); |
642fb4d1 DH |
118 | } |
119 | ||
642fb4d1 DH |
120 | return 0; |
121 | ||
c08d3b0e | 122 | add_error: |
2678958e JW |
123 | while (loop < npages) |
124 | __free_page(pages + loop++); | |
642fb4d1 DH |
125 | return ret; |
126 | } | |
127 | ||
642fb4d1 DH |
128 | /*****************************************************************************/ |
129 | /* | |
130 | * | |
131 | */ | |
132 | static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size) | |
133 | { | |
134 | int ret; | |
135 | ||
136 | /* assume a truncate from zero size is going to be for the purposes of | |
137 | * shared mmap */ | |
138 | if (size == 0) { | |
139 | if (unlikely(newsize >> 32)) | |
140 | return -EFBIG; | |
141 | ||
142 | return ramfs_nommu_expand_for_mapping(inode, newsize); | |
143 | } | |
144 | ||
145 | /* check that a decrease in size doesn't cut off any shared mappings */ | |
146 | if (newsize < size) { | |
7e660872 | 147 | ret = nommu_shrink_inode_mappings(inode, size, newsize); |
642fb4d1 DH |
148 | if (ret < 0) |
149 | return ret; | |
150 | } | |
151 | ||
2c27c65e CH |
152 | truncate_setsize(inode, newsize); |
153 | return 0; | |
642fb4d1 DH |
154 | } |
155 | ||
156 | /*****************************************************************************/ | |
157 | /* | |
158 | * handle a change of attributes | |
159 | * - we're specifically interested in a change of size | |
160 | */ | |
c1632a0f | 161 | static int ramfs_nommu_setattr(struct mnt_idmap *idmap, |
549c7297 | 162 | struct dentry *dentry, struct iattr *ia) |
642fb4d1 | 163 | { |
2b0143b5 | 164 | struct inode *inode = d_inode(dentry); |
642fb4d1 DH |
165 | unsigned int old_ia_valid = ia->ia_valid; |
166 | int ret = 0; | |
167 | ||
85f6038f | 168 | /* POSIX UID/GID verification for setting inode attributes */ |
c1632a0f | 169 | ret = setattr_prepare(&nop_mnt_idmap, dentry, ia); |
85f6038f BW |
170 | if (ret) |
171 | return ret; | |
172 | ||
642fb4d1 DH |
173 | /* pick out size-changing events */ |
174 | if (ia->ia_valid & ATTR_SIZE) { | |
3322e79a NP |
175 | loff_t size = inode->i_size; |
176 | ||
642fb4d1 DH |
177 | if (ia->ia_size != size) { |
178 | ret = ramfs_nommu_resize(inode, ia->ia_size, size); | |
179 | if (ret < 0 || ia->ia_valid == ATTR_SIZE) | |
180 | goto out; | |
181 | } else { | |
182 | /* we skipped the truncate but must still update | |
183 | * timestamps | |
184 | */ | |
185 | ia->ia_valid |= ATTR_MTIME|ATTR_CTIME; | |
186 | } | |
187 | } | |
188 | ||
c1632a0f | 189 | setattr_copy(&nop_mnt_idmap, inode, ia); |
642fb4d1 DH |
190 | out: |
191 | ia->ia_valid = old_ia_valid; | |
192 | return ret; | |
193 | } | |
194 | ||
195 | /*****************************************************************************/ | |
196 | /* | |
197 | * try to determine where a shared mapping can be made | |
198 | * - we require that: | |
199 | * - the pages to be mapped must exist | |
200 | * - the pages be physically contiguous in sequence | |
201 | */ | |
0fa9aa20 | 202 | static unsigned long ramfs_nommu_get_unmapped_area(struct file *file, |
642fb4d1 DH |
203 | unsigned long addr, unsigned long len, |
204 | unsigned long pgoff, unsigned long flags) | |
205 | { | |
60aac486 | 206 | unsigned long maxpages, lpages, nr_folios, loop, ret, nr_pages, pfn; |
496ad9aa | 207 | struct inode *inode = file_inode(file); |
60aac486 | 208 | struct folio_batch fbatch; |
642fb4d1 DH |
209 | loff_t isize; |
210 | ||
642fb4d1 DH |
211 | /* the mapping mustn't extend beyond the EOF */ |
212 | lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
213 | isize = i_size_read(inode); | |
214 | ||
63678c32 | 215 | ret = -ENOSYS; |
642fb4d1 DH |
216 | maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT; |
217 | if (pgoff >= maxpages) | |
218 | goto out; | |
219 | ||
220 | if (maxpages - pgoff < lpages) | |
221 | goto out; | |
222 | ||
223 | /* gang-find the pages */ | |
60aac486 VMO |
224 | folio_batch_init(&fbatch); |
225 | nr_pages = 0; | |
226 | repeat: | |
227 | nr_folios = filemap_get_folios_contig(inode->i_mapping, &pgoff, | |
228 | ULONG_MAX, &fbatch); | |
229 | if (!nr_folios) { | |
230 | ret = -ENOSYS; | |
231 | return ret; | |
232 | } | |
642fb4d1 | 233 | |
60aac486 VMO |
234 | if (ret == -ENOSYS) { |
235 | ret = (unsigned long) folio_address(fbatch.folios[0]); | |
236 | pfn = folio_pfn(fbatch.folios[0]); | |
237 | } | |
642fb4d1 | 238 | /* check the pages for physical adjacency */ |
60aac486 VMO |
239 | for (loop = 0; loop < nr_folios; loop++) { |
240 | if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) { | |
241 | ret = -ENOSYS; | |
242 | goto out_free; /* leave if not physical adjacent */ | |
243 | } | |
244 | nr_pages += folio_nr_pages(fbatch.folios[loop]); | |
245 | if (nr_pages >= lpages) | |
246 | goto out_free; /* successfully found desired pages*/ | |
247 | } | |
642fb4d1 | 248 | |
60aac486 VMO |
249 | if (nr_pages < lpages) { |
250 | folio_batch_release(&fbatch); | |
251 | goto repeat; /* loop if pages are missing */ | |
252 | } | |
642fb4d1 | 253 | /* okay - all conditions fulfilled */ |
642fb4d1 | 254 | |
0e8f989a | 255 | out_free: |
60aac486 | 256 | folio_batch_release(&fbatch); |
0e8f989a | 257 | out: |
642fb4d1 DH |
258 | return ret; |
259 | } | |
260 | ||
261 | /*****************************************************************************/ | |
262 | /* | |
21ff8216 | 263 | * set up a mapping for shared memory segments |
642fb4d1 | 264 | */ |
0fa9aa20 | 265 | static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma) |
642fb4d1 | 266 | { |
fc4f4be9 | 267 | if (!is_nommu_shared_mapping(vma->vm_flags)) |
2e92a3ba DH |
268 | return -ENOSYS; |
269 | ||
270 | file_accessed(file); | |
271 | vma->vm_ops = &generic_file_vm_ops; | |
272 | return 0; | |
642fb4d1 | 273 | } |