net/mlx5e: Prepare IPsec packet offload for switchdev mode
[linux-2.6-block.git] / mm / mincore.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/mm/mincore.c
4 *
2f77d107 5 * Copyright (C) 1994-2006 Linus Torvalds
1da177e4
LT
6 */
7
8/*
9 * The mincore() system call.
10 */
1da177e4 11#include <linux/pagemap.h>
5a0e3ad6 12#include <linux/gfp.h>
a520110e 13#include <linux/pagewalk.h>
1da177e4
LT
14#include <linux/mman.h>
15#include <linux/syscalls.h>
42da9cbd
NP
16#include <linux/swap.h>
17#include <linux/swapops.h>
3a4f8a0b 18#include <linux/shmem_fs.h>
4f16fc10 19#include <linux/hugetlb.h>
65fddcfc 20#include <linux/pgtable.h>
1da177e4 21
7c0f6ba6 22#include <linux/uaccess.h>
014bb1de 23#include "swap.h"
1da177e4 24
1e25a271
NH
25static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
26 unsigned long end, struct mm_walk *walk)
f4884010
JW
27{
28#ifdef CONFIG_HUGETLB_PAGE
1e25a271
NH
29 unsigned char present;
30 unsigned char *vec = walk->private;
f4884010 31
1e25a271
NH
32 /*
33 * Hugepages under user process are always in RAM and never
34 * swapped out, but theoretically it needs to be checked.
35 */
63cf5842 36 present = pte && !huge_pte_none_mostly(huge_ptep_get(pte));
1e25a271
NH
37 for (; addr != end; vec++, addr += PAGE_SIZE)
38 *vec = present;
39 walk->private = vec;
f4884010
JW
40#else
41 BUG();
42#endif
1e25a271 43 return 0;
f4884010
JW
44}
45
30bac164
LT
46/*
47 * Later we can get more picky about what "in core" means precisely.
48 * For now, simply check to see if the page is in the page cache,
49 * and is up to date; i.e. that no page-in operation would be required
50 * at this time if an application were to map and access this page.
51 */
61ef1865 52static unsigned char mincore_page(struct address_space *mapping, pgoff_t index)
30bac164
LT
53{
54 unsigned char present = 0;
524984ff 55 struct folio *folio;
30bac164
LT
56
57 /*
58 * When tmpfs swaps out a page from a file, any process mapping that
59 * file will not get a swp_entry_t in its pte, but rather it is like
60 * any other file mapping (ie. marked !present and faulted in with
61 * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
62 */
524984ff 63 folio = filemap_get_incore_folio(mapping, index);
66dabbb6 64 if (!IS_ERR(folio)) {
524984ff
MWO
65 present = folio_test_uptodate(folio);
66 folio_put(folio);
30bac164
LT
67 }
68
69 return present;
70}
71
72static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
73 struct vm_area_struct *vma, unsigned char *vec)
1e25a271 74{
574823bf 75 unsigned long nr = (end - addr) >> PAGE_SHIFT;
30bac164 76 int i;
574823bf 77
30bac164
LT
78 if (vma->vm_file) {
79 pgoff_t pgoff;
80
81 pgoff = linear_page_index(vma, addr);
82 for (i = 0; i < nr; i++, pgoff++)
83 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
84 } else {
85 for (i = 0; i < nr; i++)
86 vec[i] = 0;
87 }
88 return nr;
89}
90
91static int mincore_unmapped_range(unsigned long addr, unsigned long end,
b7a16c7a 92 __always_unused int depth,
30bac164
LT
93 struct mm_walk *walk)
94{
95 walk->private += __mincore_unmapped_range(addr, end,
96 walk->vma, walk->private);
1e25a271 97 return 0;
f4884010
JW
98}
99
1e25a271
NH
100static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
101 struct mm_walk *walk)
f4884010
JW
102{
103 spinlock_t *ptl;
1e25a271 104 struct vm_area_struct *vma = walk->vma;
f4884010 105 pte_t *ptep;
1e25a271
NH
106 unsigned char *vec = walk->private;
107 int nr = (end - addr) >> PAGE_SHIFT;
108
b6ec57f4
KS
109 ptl = pmd_trans_huge_lock(pmd, vma);
110 if (ptl) {
1e25a271
NH
111 memset(vec, 1, nr);
112 spin_unlock(ptl);
113 goto out;
114 }
f4884010 115
1e25a271 116 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
7780d040
HD
117 if (!ptep) {
118 walk->action = ACTION_AGAIN;
119 return 0;
120 }
1e25a271 121 for (; addr != end; ptep++, addr += PAGE_SIZE) {
c33c7948 122 pte_t pte = ptep_get(ptep);
f4884010 123
5c041f5d
PX
124 /* We need to do cache lookup too for pte markers */
125 if (pte_none_mostly(pte))
30bac164
LT
126 __mincore_unmapped_range(addr, addr + PAGE_SIZE,
127 vma, vec);
f4884010 128 else if (pte_present(pte))
25ef0e50 129 *vec = 1;
0661a336 130 else { /* pte is a swap entry */
f4884010
JW
131 swp_entry_t entry = pte_to_swp_entry(pte);
132
30bac164
LT
133 if (non_swap_entry(entry)) {
134 /*
135 * migration or hwpoison entries are always
136 * uptodate
137 */
138 *vec = 1;
139 } else {
140#ifdef CONFIG_SWAP
141 *vec = mincore_page(swap_address_space(entry),
142 swp_offset(entry));
143#else
144 WARN_ON(1);
145 *vec = 1;
146#endif
147 }
f4884010 148 }
25ef0e50 149 vec++;
1e25a271 150 }
f4884010 151 pte_unmap_unlock(ptep - 1, ptl);
1e25a271
NH
152out:
153 walk->private += nr;
154 cond_resched();
155 return 0;
e48293fd
JW
156}
157
134fca90
JK
158static inline bool can_do_mincore(struct vm_area_struct *vma)
159{
160 if (vma_is_anonymous(vma))
161 return true;
162 if (!vma->vm_file)
163 return false;
164 /*
165 * Reveal pagecache information only for non-anonymous mappings that
166 * correspond to the files the calling process could (if tried) open
167 * for writing; otherwise we'd be including shared non-exclusive
168 * mappings, which opens a side channel.
169 */
01beba79 170 return inode_owner_or_capable(&nop_mnt_idmap,
21cb47be 171 file_inode(vma->vm_file)) ||
02f92b38 172 file_permission(vma->vm_file, MAY_WRITE) == 0;
134fca90
JK
173}
174
7b86ac33
CH
175static const struct mm_walk_ops mincore_walk_ops = {
176 .pmd_entry = mincore_pte_range,
177 .pte_hole = mincore_unmapped_range,
178 .hugetlb_entry = mincore_hugetlb,
179};
180
2f77d107
LT
181/*
182 * Do a chunk of "sys_mincore()". We've already checked
183 * all the arguments, we hold the mmap semaphore: we should
184 * just return the amount of info we're asked for.
185 */
6a60f1b3 186static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
1da177e4 187{
6a60f1b3 188 struct vm_area_struct *vma;
25ef0e50 189 unsigned long end;
1e25a271 190 int err;
1da177e4 191
97955f69
DW
192 vma = vma_lookup(current->mm, addr);
193 if (!vma)
4fb23e43 194 return -ENOMEM;
25ef0e50 195 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
134fca90
JK
196 if (!can_do_mincore(vma)) {
197 unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
198 memset(vec, 1, pages);
199 return pages;
200 }
7b86ac33 201 err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec);
1e25a271
NH
202 if (err < 0)
203 return err;
25ef0e50 204 return (end - addr) >> PAGE_SHIFT;
1da177e4
LT
205}
206
207/*
208 * The mincore(2) system call.
209 *
210 * mincore() returns the memory residency status of the pages in the
211 * current process's address space specified by [addr, addr + len).
212 * The status is returned in a vector of bytes. The least significant
213 * bit of each byte is 1 if the referenced page is in memory, otherwise
214 * it is zero.
215 *
216 * Because the status of a page can change after mincore() checks it
217 * but before it returns to the application, the returned vector may
218 * contain stale information. Only locked pages are guaranteed to
219 * remain in memory.
220 *
221 * return values:
222 * zero - success
223 * -EFAULT - vec points to an illegal address
ea1754a0 224 * -EINVAL - addr is not a multiple of PAGE_SIZE
1da177e4
LT
225 * -ENOMEM - Addresses in the range [addr, addr + len] are
226 * invalid for the address space of this process, or
227 * specify one or more pages which are not currently
228 * mapped
229 * -EAGAIN - A kernel resource was temporarily unavailable.
230 */
3480b257
HC
231SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
232 unsigned char __user *, vec)
1da177e4 233{
2f77d107
LT
234 long retval;
235 unsigned long pages;
236 unsigned char *tmp;
1da177e4 237
057d3389
AK
238 start = untagged_addr(start);
239
2f77d107 240 /* Check the start address: needs to be page-aligned.. */
09cbfeaf 241 if (start & ~PAGE_MASK)
2f77d107 242 return -EINVAL;
1da177e4 243
2f77d107 244 /* ..and we need to be passed a valid user-space range */
96d4f267 245 if (!access_ok((void __user *) start, len))
2f77d107 246 return -ENOMEM;
1da177e4 247
ea1754a0 248 /* This also avoids any overflows on PAGE_ALIGN */
2f77d107 249 pages = len >> PAGE_SHIFT;
e7bbdd07 250 pages += (offset_in_page(len)) != 0;
1da177e4 251
96d4f267 252 if (!access_ok(vec, pages))
2f77d107 253 return -EFAULT;
1da177e4 254
2f77d107
LT
255 tmp = (void *) __get_free_page(GFP_USER);
256 if (!tmp)
4fb23e43 257 return -EAGAIN;
2f77d107
LT
258
259 retval = 0;
260 while (pages) {
261 /*
262 * Do at most PAGE_SIZE entries per iteration, due to
263 * the temporary buffer size.
264 */
d8ed45c5 265 mmap_read_lock(current->mm);
6a60f1b3 266 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
d8ed45c5 267 mmap_read_unlock(current->mm);
2f77d107
LT
268
269 if (retval <= 0)
270 break;
271 if (copy_to_user(vec, tmp, retval)) {
272 retval = -EFAULT;
273 break;
1da177e4 274 }
2f77d107
LT
275 pages -= retval;
276 vec += retval;
277 start += retval << PAGE_SHIFT;
278 retval = 0;
1da177e4 279 }
2f77d107
LT
280 free_page((unsigned long) tmp);
281 return retval;
1da177e4 282}