Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
a520110e | 2 | #include <linux/pagewalk.h> |
615d6e87 | 3 | #include <linux/vmacache.h> |
1da177e4 | 4 | #include <linux/hugetlb.h> |
22e057c5 | 5 | #include <linux/huge_mm.h> |
1da177e4 LT |
6 | #include <linux/mount.h> |
7 | #include <linux/seq_file.h> | |
e070ad49 | 8 | #include <linux/highmem.h> |
5096add8 | 9 | #include <linux/ptrace.h> |
5a0e3ad6 | 10 | #include <linux/slab.h> |
6e21c8f1 CL |
11 | #include <linux/pagemap.h> |
12 | #include <linux/mempolicy.h> | |
22e057c5 | 13 | #include <linux/rmap.h> |
85863e47 | 14 | #include <linux/swap.h> |
6e84f315 | 15 | #include <linux/sched/mm.h> |
85863e47 | 16 | #include <linux/swapops.h> |
0f8975ec | 17 | #include <linux/mmu_notifier.h> |
33c3fc71 | 18 | #include <linux/page_idle.h> |
6a15a370 | 19 | #include <linux/shmem_fs.h> |
b3a81d08 | 20 | #include <linux/uaccess.h> |
27cca866 | 21 | #include <linux/pkeys.h> |
e070ad49 | 22 | |
1da177e4 | 23 | #include <asm/elf.h> |
b3a81d08 | 24 | #include <asm/tlb.h> |
e070ad49 | 25 | #include <asm/tlbflush.h> |
1da177e4 LT |
26 | #include "internal.h" |
27 | ||
d1be35cb AV |
28 | #define SEQ_PUT_DEC(str, val) \ |
29 | seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8) | |
df5f8314 | 30 | void task_mem(struct seq_file *m, struct mm_struct *mm) |
1da177e4 | 31 | { |
af5b0f6a | 32 | unsigned long text, lib, swap, anon, file, shmem; |
365e9c87 HD |
33 | unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; |
34 | ||
8cee852e JM |
35 | anon = get_mm_counter(mm, MM_ANONPAGES); |
36 | file = get_mm_counter(mm, MM_FILEPAGES); | |
37 | shmem = get_mm_counter(mm, MM_SHMEMPAGES); | |
38 | ||
365e9c87 HD |
39 | /* |
40 | * Note: to minimize their overhead, mm maintains hiwater_vm and | |
41 | * hiwater_rss only when about to *lower* total_vm or rss. Any | |
42 | * collector of these hiwater stats must therefore get total_vm | |
43 | * and rss too, which will usually be the higher. Barriers? not | |
44 | * worth the effort, such snapshots can always be inconsistent. | |
45 | */ | |
46 | hiwater_vm = total_vm = mm->total_vm; | |
47 | if (hiwater_vm < mm->hiwater_vm) | |
48 | hiwater_vm = mm->hiwater_vm; | |
8cee852e | 49 | hiwater_rss = total_rss = anon + file + shmem; |
365e9c87 HD |
50 | if (hiwater_rss < mm->hiwater_rss) |
51 | hiwater_rss = mm->hiwater_rss; | |
1da177e4 | 52 | |
8526d84f KK |
53 | /* split executable areas between text and lib */ |
54 | text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK); | |
55 | text = min(text, mm->exec_vm << PAGE_SHIFT); | |
56 | lib = (mm->exec_vm << PAGE_SHIFT) - text; | |
57 | ||
b084d435 | 58 | swap = get_mm_counter(mm, MM_SWAPENTS); |
d1be35cb AV |
59 | SEQ_PUT_DEC("VmPeak:\t", hiwater_vm); |
60 | SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm); | |
61 | SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm); | |
70f8a3ca | 62 | SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm)); |
d1be35cb AV |
63 | SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss); |
64 | SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss); | |
65 | SEQ_PUT_DEC(" kB\nRssAnon:\t", anon); | |
66 | SEQ_PUT_DEC(" kB\nRssFile:\t", file); | |
67 | SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem); | |
68 | SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm); | |
69 | SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm); | |
70 | seq_put_decimal_ull_width(m, | |
71 | " kB\nVmExe:\t", text >> 10, 8); | |
72 | seq_put_decimal_ull_width(m, | |
73 | " kB\nVmLib:\t", lib >> 10, 8); | |
74 | seq_put_decimal_ull_width(m, | |
75 | " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8); | |
76 | SEQ_PUT_DEC(" kB\nVmSwap:\t", swap); | |
77 | seq_puts(m, " kB\n"); | |
5d317b2b | 78 | hugetlb_report_usage(m, mm); |
1da177e4 | 79 | } |
d1be35cb | 80 | #undef SEQ_PUT_DEC |
1da177e4 LT |
81 | |
82 | unsigned long task_vsize(struct mm_struct *mm) | |
83 | { | |
84 | return PAGE_SIZE * mm->total_vm; | |
85 | } | |
86 | ||
a2ade7b6 AD |
87 | unsigned long task_statm(struct mm_struct *mm, |
88 | unsigned long *shared, unsigned long *text, | |
89 | unsigned long *data, unsigned long *resident) | |
1da177e4 | 90 | { |
eca56ff9 JM |
91 | *shared = get_mm_counter(mm, MM_FILEPAGES) + |
92 | get_mm_counter(mm, MM_SHMEMPAGES); | |
1da177e4 LT |
93 | *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) |
94 | >> PAGE_SHIFT; | |
84638335 | 95 | *data = mm->data_vm + mm->stack_vm; |
d559db08 | 96 | *resident = *shared + get_mm_counter(mm, MM_ANONPAGES); |
1da177e4 LT |
97 | return mm->total_vm; |
98 | } | |
99 | ||
9e781440 KH |
100 | #ifdef CONFIG_NUMA |
101 | /* | |
498f2371 | 102 | * Save get_task_policy() for show_numa_map(). |
9e781440 KH |
103 | */ |
104 | static void hold_task_mempolicy(struct proc_maps_private *priv) | |
105 | { | |
106 | struct task_struct *task = priv->task; | |
107 | ||
108 | task_lock(task); | |
498f2371 | 109 | priv->task_mempolicy = get_task_policy(task); |
9e781440 KH |
110 | mpol_get(priv->task_mempolicy); |
111 | task_unlock(task); | |
112 | } | |
113 | static void release_task_mempolicy(struct proc_maps_private *priv) | |
114 | { | |
115 | mpol_put(priv->task_mempolicy); | |
116 | } | |
117 | #else | |
118 | static void hold_task_mempolicy(struct proc_maps_private *priv) | |
119 | { | |
120 | } | |
121 | static void release_task_mempolicy(struct proc_maps_private *priv) | |
122 | { | |
123 | } | |
124 | #endif | |
125 | ||
0c255321 | 126 | static void *m_start(struct seq_file *m, loff_t *ppos) |
e070ad49 | 127 | { |
a6198797 | 128 | struct proc_maps_private *priv = m->private; |
4781f2c3 | 129 | unsigned long last_addr = *ppos; |
a6198797 | 130 | struct mm_struct *mm; |
0c255321 | 131 | struct vm_area_struct *vma; |
a6198797 | 132 | |
c2e88d22 | 133 | /* See m_next(). Zero at the start or after lseek. */ |
b8c20a9b ON |
134 | if (last_addr == -1UL) |
135 | return NULL; | |
136 | ||
2c03376d | 137 | priv->task = get_proc_task(priv->inode); |
a6198797 | 138 | if (!priv->task) |
ec6fd8a4 | 139 | return ERR_PTR(-ESRCH); |
a6198797 | 140 | |
29a40ace | 141 | mm = priv->mm; |
d07ded61 MWO |
142 | if (!mm || !mmget_not_zero(mm)) { |
143 | put_task_struct(priv->task); | |
144 | priv->task = NULL; | |
29a40ace | 145 | return NULL; |
d07ded61 | 146 | } |
a6198797 | 147 | |
d8ed45c5 | 148 | if (mmap_read_lock_killable(mm)) { |
8a713e7d | 149 | mmput(mm); |
d07ded61 MWO |
150 | put_task_struct(priv->task); |
151 | priv->task = NULL; | |
8a713e7d KK |
152 | return ERR_PTR(-EINTR); |
153 | } | |
154 | ||
9e781440 | 155 | hold_task_mempolicy(priv); |
0c255321 | 156 | priv->tail_vma = get_gate_vma(mm); |
a6198797 | 157 | |
c2e88d22 MWO |
158 | vma = find_vma(mm, last_addr); |
159 | if (vma) | |
a6198797 | 160 | return vma; |
59b4bf12 | 161 | |
c2e88d22 | 162 | return priv->tail_vma; |
a6198797 MM |
163 | } |
164 | ||
4781f2c3 | 165 | static void *m_next(struct seq_file *m, void *v, loff_t *ppos) |
a6198797 MM |
166 | { |
167 | struct proc_maps_private *priv = m->private; | |
fad95500 MWO |
168 | struct vm_area_struct *next, *vma = v; |
169 | ||
170 | if (vma == priv->tail_vma) | |
171 | next = NULL; | |
172 | else if (vma->vm_next) | |
173 | next = vma->vm_next; | |
174 | else | |
175 | next = priv->tail_vma; | |
a6198797 | 176 | |
4781f2c3 | 177 | *ppos = next ? next->vm_start : -1UL; |
c2e88d22 | 178 | |
59b4bf12 | 179 | return next; |
a6198797 MM |
180 | } |
181 | ||
182 | static void m_stop(struct seq_file *m, void *v) | |
183 | { | |
184 | struct proc_maps_private *priv = m->private; | |
d07ded61 | 185 | struct mm_struct *mm = priv->mm; |
a6198797 | 186 | |
d07ded61 MWO |
187 | if (!priv->task) |
188 | return; | |
189 | ||
190 | release_task_mempolicy(priv); | |
d8ed45c5 | 191 | mmap_read_unlock(mm); |
d07ded61 MWO |
192 | mmput(mm); |
193 | put_task_struct(priv->task); | |
194 | priv->task = NULL; | |
a6198797 MM |
195 | } |
196 | ||
4db7d0ee ON |
197 | static int proc_maps_open(struct inode *inode, struct file *file, |
198 | const struct seq_operations *ops, int psize) | |
199 | { | |
200 | struct proc_maps_private *priv = __seq_open_private(file, ops, psize); | |
201 | ||
202 | if (!priv) | |
203 | return -ENOMEM; | |
204 | ||
2c03376d | 205 | priv->inode = inode; |
29a40ace ON |
206 | priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); |
207 | if (IS_ERR(priv->mm)) { | |
208 | int err = PTR_ERR(priv->mm); | |
209 | ||
210 | seq_release_private(inode, file); | |
211 | return err; | |
212 | } | |
213 | ||
4db7d0ee ON |
214 | return 0; |
215 | } | |
216 | ||
29a40ace ON |
217 | static int proc_map_release(struct inode *inode, struct file *file) |
218 | { | |
219 | struct seq_file *seq = file->private_data; | |
220 | struct proc_maps_private *priv = seq->private; | |
221 | ||
222 | if (priv->mm) | |
223 | mmdrop(priv->mm); | |
224 | ||
225 | return seq_release_private(inode, file); | |
226 | } | |
227 | ||
a6198797 | 228 | static int do_maps_open(struct inode *inode, struct file *file, |
03a44825 | 229 | const struct seq_operations *ops) |
a6198797 | 230 | { |
4db7d0ee ON |
231 | return proc_maps_open(inode, file, ops, |
232 | sizeof(struct proc_maps_private)); | |
a6198797 | 233 | } |
e070ad49 | 234 | |
65376df5 JW |
235 | /* |
236 | * Indicate if the VMA is a stack for the given task; for | |
237 | * /proc/PID/maps that is the stack of the main task. | |
238 | */ | |
1240ea0d | 239 | static int is_stack(struct vm_area_struct *vma) |
58cb6548 | 240 | { |
b18cb64e AL |
241 | /* |
242 | * We make no effort to guess what a given thread considers to be | |
243 | * its "stack". It's not even well-defined for programs written | |
244 | * languages like Go. | |
245 | */ | |
246 | return vma->vm_start <= vma->vm_mm->start_stack && | |
247 | vma->vm_end >= vma->vm_mm->start_stack; | |
58cb6548 ON |
248 | } |
249 | ||
493b0e9d DC |
250 | static void show_vma_header_prefix(struct seq_file *m, |
251 | unsigned long start, unsigned long end, | |
252 | vm_flags_t flags, unsigned long long pgoff, | |
253 | dev_t dev, unsigned long ino) | |
254 | { | |
255 | seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); | |
0e3dc019 AV |
256 | seq_put_hex_ll(m, NULL, start, 8); |
257 | seq_put_hex_ll(m, "-", end, 8); | |
258 | seq_putc(m, ' '); | |
259 | seq_putc(m, flags & VM_READ ? 'r' : '-'); | |
260 | seq_putc(m, flags & VM_WRITE ? 'w' : '-'); | |
261 | seq_putc(m, flags & VM_EXEC ? 'x' : '-'); | |
262 | seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p'); | |
263 | seq_put_hex_ll(m, " ", pgoff, 8); | |
264 | seq_put_hex_ll(m, " ", MAJOR(dev), 2); | |
265 | seq_put_hex_ll(m, ":", MINOR(dev), 2); | |
266 | seq_put_decimal_ull(m, " ", ino); | |
267 | seq_putc(m, ' '); | |
493b0e9d DC |
268 | } |
269 | ||
b7643757 | 270 | static void |
871305bb | 271 | show_map_vma(struct seq_file *m, struct vm_area_struct *vma) |
1da177e4 | 272 | { |
e070ad49 ML |
273 | struct mm_struct *mm = vma->vm_mm; |
274 | struct file *file = vma->vm_file; | |
ca16d140 | 275 | vm_flags_t flags = vma->vm_flags; |
1da177e4 | 276 | unsigned long ino = 0; |
6260a4b0 | 277 | unsigned long long pgoff = 0; |
a09a79f6 | 278 | unsigned long start, end; |
1da177e4 | 279 | dev_t dev = 0; |
b7643757 | 280 | const char *name = NULL; |
1da177e4 LT |
281 | |
282 | if (file) { | |
496ad9aa | 283 | struct inode *inode = file_inode(vma->vm_file); |
1da177e4 LT |
284 | dev = inode->i_sb->s_dev; |
285 | ino = inode->i_ino; | |
6260a4b0 | 286 | pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; |
1da177e4 LT |
287 | } |
288 | ||
d7824370 | 289 | start = vma->vm_start; |
a09a79f6 | 290 | end = vma->vm_end; |
493b0e9d | 291 | show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino); |
1da177e4 LT |
292 | |
293 | /* | |
294 | * Print the dentry name for named mappings, and a | |
295 | * special [heap] marker for the heap: | |
296 | */ | |
e070ad49 | 297 | if (file) { |
652586df | 298 | seq_pad(m, ' '); |
2726d566 | 299 | seq_file_path(m, file, "\n"); |
b7643757 SP |
300 | goto done; |
301 | } | |
302 | ||
78d683e8 AL |
303 | if (vma->vm_ops && vma->vm_ops->name) { |
304 | name = vma->vm_ops->name(vma); | |
305 | if (name) | |
306 | goto done; | |
307 | } | |
308 | ||
b7643757 SP |
309 | name = arch_vma_name(vma); |
310 | if (!name) { | |
9a10064f CC |
311 | const char *anon_name; |
312 | ||
b7643757 SP |
313 | if (!mm) { |
314 | name = "[vdso]"; | |
315 | goto done; | |
316 | } | |
317 | ||
318 | if (vma->vm_start <= mm->brk && | |
319 | vma->vm_end >= mm->start_brk) { | |
320 | name = "[heap]"; | |
321 | goto done; | |
322 | } | |
323 | ||
9a10064f | 324 | if (is_stack(vma)) { |
65376df5 | 325 | name = "[stack]"; |
9a10064f CC |
326 | goto done; |
327 | } | |
328 | ||
329 | anon_name = vma_anon_name(vma); | |
330 | if (anon_name) { | |
331 | seq_pad(m, ' '); | |
332 | seq_printf(m, "[anon:%s]", anon_name); | |
333 | } | |
b7643757 SP |
334 | } |
335 | ||
336 | done: | |
337 | if (name) { | |
652586df | 338 | seq_pad(m, ' '); |
b7643757 | 339 | seq_puts(m, name); |
1da177e4 LT |
340 | } |
341 | seq_putc(m, '\n'); | |
7c88db0c JK |
342 | } |
343 | ||
871305bb | 344 | static int show_map(struct seq_file *m, void *v) |
7c88db0c | 345 | { |
871305bb | 346 | show_map_vma(m, v); |
1da177e4 LT |
347 | return 0; |
348 | } | |
349 | ||
03a44825 | 350 | static const struct seq_operations proc_pid_maps_op = { |
a6198797 MM |
351 | .start = m_start, |
352 | .next = m_next, | |
353 | .stop = m_stop, | |
871305bb | 354 | .show = show_map |
a6198797 MM |
355 | }; |
356 | ||
b7643757 | 357 | static int pid_maps_open(struct inode *inode, struct file *file) |
a6198797 MM |
358 | { |
359 | return do_maps_open(inode, file, &proc_pid_maps_op); | |
360 | } | |
361 | ||
b7643757 SP |
362 | const struct file_operations proc_pid_maps_operations = { |
363 | .open = pid_maps_open, | |
364 | .read = seq_read, | |
365 | .llseek = seq_lseek, | |
29a40ace | 366 | .release = proc_map_release, |
b7643757 SP |
367 | }; |
368 | ||
a6198797 MM |
369 | /* |
370 | * Proportional Set Size(PSS): my share of RSS. | |
371 | * | |
372 | * PSS of a process is the count of pages it has in memory, where each | |
373 | * page is divided by the number of processes sharing it. So if a | |
374 | * process has 1000 pages all to itself, and 1000 shared with one other | |
375 | * process, its PSS will be 1500. | |
376 | * | |
377 | * To keep (accumulated) division errors low, we adopt a 64bit | |
378 | * fixed-point pss counter to minimize division errors. So (pss >> | |
379 | * PSS_SHIFT) would be the real byte count. | |
380 | * | |
381 | * A shift of 12 before division means (assuming 4K page size): | |
382 | * - 1M 3-user-pages add up to 8KB errors; | |
383 | * - supports mapcount up to 2^24, or 16M; | |
384 | * - supports PSS up to 2^52 bytes, or 4PB. | |
385 | */ | |
386 | #define PSS_SHIFT 12 | |
387 | ||
1e883281 | 388 | #ifdef CONFIG_PROC_PAGE_MONITOR |
214e471f | 389 | struct mem_size_stats { |
a6198797 MM |
390 | unsigned long resident; |
391 | unsigned long shared_clean; | |
392 | unsigned long shared_dirty; | |
393 | unsigned long private_clean; | |
394 | unsigned long private_dirty; | |
395 | unsigned long referenced; | |
b40d4f84 | 396 | unsigned long anonymous; |
cf8496ea | 397 | unsigned long lazyfree; |
4031a219 | 398 | unsigned long anonymous_thp; |
65c45377 | 399 | unsigned long shmem_thp; |
60fbf0ab | 400 | unsigned long file_thp; |
214e471f | 401 | unsigned long swap; |
25ee01a2 NH |
402 | unsigned long shared_hugetlb; |
403 | unsigned long private_hugetlb; | |
a6198797 | 404 | u64 pss; |
ee2ad71b LS |
405 | u64 pss_anon; |
406 | u64 pss_file; | |
407 | u64 pss_shmem; | |
493b0e9d | 408 | u64 pss_locked; |
8334b962 | 409 | u64 swap_pss; |
a6198797 MM |
410 | }; |
411 | ||
ee2ad71b LS |
412 | static void smaps_page_accumulate(struct mem_size_stats *mss, |
413 | struct page *page, unsigned long size, unsigned long pss, | |
414 | bool dirty, bool locked, bool private) | |
415 | { | |
416 | mss->pss += pss; | |
417 | ||
418 | if (PageAnon(page)) | |
419 | mss->pss_anon += pss; | |
420 | else if (PageSwapBacked(page)) | |
421 | mss->pss_shmem += pss; | |
422 | else | |
423 | mss->pss_file += pss; | |
424 | ||
425 | if (locked) | |
426 | mss->pss_locked += pss; | |
427 | ||
428 | if (dirty || PageDirty(page)) { | |
429 | if (private) | |
430 | mss->private_dirty += size; | |
431 | else | |
432 | mss->shared_dirty += size; | |
433 | } else { | |
434 | if (private) | |
435 | mss->private_clean += size; | |
436 | else | |
437 | mss->shared_clean += size; | |
438 | } | |
439 | } | |
440 | ||
c164e038 | 441 | static void smaps_account(struct mem_size_stats *mss, struct page *page, |
27dd768e | 442 | bool compound, bool young, bool dirty, bool locked) |
c164e038 | 443 | { |
d8c6546b | 444 | int i, nr = compound ? compound_nr(page) : 1; |
afd9883f | 445 | unsigned long size = nr * PAGE_SIZE; |
c164e038 | 446 | |
ee2ad71b LS |
447 | /* |
448 | * First accumulate quantities that depend only on |size| and the type | |
449 | * of the compound page. | |
450 | */ | |
cf8496ea | 451 | if (PageAnon(page)) { |
c164e038 | 452 | mss->anonymous += size; |
cf8496ea SL |
453 | if (!PageSwapBacked(page) && !dirty && !PageDirty(page)) |
454 | mss->lazyfree += size; | |
455 | } | |
c164e038 KS |
456 | |
457 | mss->resident += size; | |
458 | /* Accumulate the size in pages that have been accessed. */ | |
33c3fc71 | 459 | if (young || page_is_young(page) || PageReferenced(page)) |
c164e038 | 460 | mss->referenced += size; |
c164e038 | 461 | |
afd9883f | 462 | /* |
ee2ad71b LS |
463 | * Then accumulate quantities that may depend on sharing, or that may |
464 | * differ page-by-page. | |
465 | * | |
afd9883f KS |
466 | * page_count(page) == 1 guarantees the page is mapped exactly once. |
467 | * If any subpage of the compound page mapped with PTE it would elevate | |
468 | * page_count(). | |
469 | */ | |
470 | if (page_count(page) == 1) { | |
ee2ad71b LS |
471 | smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty, |
472 | locked, true); | |
afd9883f KS |
473 | return; |
474 | } | |
afd9883f KS |
475 | for (i = 0; i < nr; i++, page++) { |
476 | int mapcount = page_mapcount(page); | |
ee2ad71b LS |
477 | unsigned long pss = PAGE_SIZE << PSS_SHIFT; |
478 | if (mapcount >= 2) | |
479 | pss /= mapcount; | |
480 | smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked, | |
481 | mapcount < 2); | |
c164e038 KS |
482 | } |
483 | } | |
ae11c4d9 | 484 | |
c261e7d9 | 485 | #ifdef CONFIG_SHMEM |
c261e7d9 | 486 | static int smaps_pte_hole(unsigned long addr, unsigned long end, |
b7a16c7a | 487 | __always_unused int depth, struct mm_walk *walk) |
c261e7d9 VB |
488 | { |
489 | struct mem_size_stats *mss = walk->private; | |
10c848c8 | 490 | struct vm_area_struct *vma = walk->vma; |
c261e7d9 | 491 | |
10c848c8 PX |
492 | mss->swap += shmem_partial_swap_usage(walk->vma->vm_file->f_mapping, |
493 | linear_page_index(vma, addr), | |
494 | linear_page_index(vma, end)); | |
c261e7d9 VB |
495 | |
496 | return 0; | |
497 | } | |
7b86ac33 CH |
498 | #else |
499 | #define smaps_pte_hole NULL | |
500 | #endif /* CONFIG_SHMEM */ | |
c261e7d9 | 501 | |
23010032 PX |
502 | static void smaps_pte_hole_lookup(unsigned long addr, struct mm_walk *walk) |
503 | { | |
504 | #ifdef CONFIG_SHMEM | |
505 | if (walk->ops->pte_hole) { | |
506 | /* depth is not used */ | |
507 | smaps_pte_hole(addr, addr + PAGE_SIZE, 0, walk); | |
508 | } | |
509 | #endif | |
510 | } | |
511 | ||
c164e038 KS |
512 | static void smaps_pte_entry(pte_t *pte, unsigned long addr, |
513 | struct mm_walk *walk) | |
ae11c4d9 DH |
514 | { |
515 | struct mem_size_stats *mss = walk->private; | |
14eb6fdd | 516 | struct vm_area_struct *vma = walk->vma; |
27dd768e | 517 | bool locked = !!(vma->vm_flags & VM_LOCKED); |
b1d4d9e0 | 518 | struct page *page = NULL; |
ae11c4d9 | 519 | |
c164e038 KS |
520 | if (pte_present(*pte)) { |
521 | page = vm_normal_page(vma, addr, *pte); | |
522 | } else if (is_swap_pte(*pte)) { | |
523 | swp_entry_t swpent = pte_to_swp_entry(*pte); | |
ae11c4d9 | 524 | |
8334b962 MK |
525 | if (!non_swap_entry(swpent)) { |
526 | int mapcount; | |
527 | ||
c164e038 | 528 | mss->swap += PAGE_SIZE; |
8334b962 MK |
529 | mapcount = swp_swapcount(swpent); |
530 | if (mapcount >= 2) { | |
531 | u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT; | |
532 | ||
533 | do_div(pss_delta, mapcount); | |
534 | mss->swap_pss += pss_delta; | |
535 | } else { | |
536 | mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT; | |
537 | } | |
af5cdaf8 AP |
538 | } else if (is_pfn_swap_entry(swpent)) |
539 | page = pfn_swap_entry_to_page(swpent); | |
23010032 PX |
540 | } else { |
541 | smaps_pte_hole_lookup(addr, walk); | |
48131e03 | 542 | return; |
b1d4d9e0 | 543 | } |
ae11c4d9 | 544 | |
ae11c4d9 DH |
545 | if (!page) |
546 | return; | |
afd9883f | 547 | |
27dd768e | 548 | smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked); |
ae11c4d9 DH |
549 | } |
550 | ||
c164e038 KS |
551 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
552 | static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, | |
553 | struct mm_walk *walk) | |
554 | { | |
555 | struct mem_size_stats *mss = walk->private; | |
14eb6fdd | 556 | struct vm_area_struct *vma = walk->vma; |
27dd768e | 557 | bool locked = !!(vma->vm_flags & VM_LOCKED); |
c94b6923 HY |
558 | struct page *page = NULL; |
559 | ||
560 | if (pmd_present(*pmd)) { | |
561 | /* FOLL_DUMP will return -EFAULT on huge zero page */ | |
562 | page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); | |
563 | } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) { | |
564 | swp_entry_t entry = pmd_to_swp_entry(*pmd); | |
c164e038 | 565 | |
c94b6923 | 566 | if (is_migration_entry(entry)) |
af5cdaf8 | 567 | page = pfn_swap_entry_to_page(entry); |
c94b6923 | 568 | } |
c164e038 KS |
569 | if (IS_ERR_OR_NULL(page)) |
570 | return; | |
65c45377 KS |
571 | if (PageAnon(page)) |
572 | mss->anonymous_thp += HPAGE_PMD_SIZE; | |
573 | else if (PageSwapBacked(page)) | |
574 | mss->shmem_thp += HPAGE_PMD_SIZE; | |
ca120cf6 DW |
575 | else if (is_zone_device_page(page)) |
576 | /* pass */; | |
65c45377 | 577 | else |
60fbf0ab | 578 | mss->file_thp += HPAGE_PMD_SIZE; |
27dd768e | 579 | smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked); |
c164e038 KS |
580 | } |
581 | #else | |
582 | static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, | |
583 | struct mm_walk *walk) | |
584 | { | |
585 | } | |
586 | #endif | |
587 | ||
b3ae5acb | 588 | static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, |
2165009b | 589 | struct mm_walk *walk) |
e070ad49 | 590 | { |
14eb6fdd | 591 | struct vm_area_struct *vma = walk->vma; |
ae11c4d9 | 592 | pte_t *pte; |
705e87c0 | 593 | spinlock_t *ptl; |
e070ad49 | 594 | |
b6ec57f4 KS |
595 | ptl = pmd_trans_huge_lock(pmd, vma); |
596 | if (ptl) { | |
c94b6923 | 597 | smaps_pmd_entry(pmd, addr, walk); |
bf929152 | 598 | spin_unlock(ptl); |
14038302 | 599 | goto out; |
22e057c5 | 600 | } |
1a5a9906 AA |
601 | |
602 | if (pmd_trans_unstable(pmd)) | |
14038302 | 603 | goto out; |
22e057c5 | 604 | /* |
c1e8d7c6 | 605 | * The mmap_lock held all the way back in m_start() is what |
22e057c5 DH |
606 | * keeps khugepaged out of here and from collapsing things |
607 | * in here. | |
608 | */ | |
705e87c0 | 609 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
ae11c4d9 | 610 | for (; addr != end; pte++, addr += PAGE_SIZE) |
c164e038 | 611 | smaps_pte_entry(pte, addr, walk); |
705e87c0 | 612 | pte_unmap_unlock(pte - 1, ptl); |
14038302 | 613 | out: |
705e87c0 | 614 | cond_resched(); |
b3ae5acb | 615 | return 0; |
e070ad49 ML |
616 | } |
617 | ||
834f82e2 CG |
618 | static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) |
619 | { | |
620 | /* | |
621 | * Don't forget to update Documentation/ on changes. | |
622 | */ | |
623 | static const char mnemonics[BITS_PER_LONG][2] = { | |
624 | /* | |
625 | * In case if we meet a flag we don't know about. | |
626 | */ | |
627 | [0 ... (BITS_PER_LONG-1)] = "??", | |
628 | ||
629 | [ilog2(VM_READ)] = "rd", | |
630 | [ilog2(VM_WRITE)] = "wr", | |
631 | [ilog2(VM_EXEC)] = "ex", | |
632 | [ilog2(VM_SHARED)] = "sh", | |
633 | [ilog2(VM_MAYREAD)] = "mr", | |
634 | [ilog2(VM_MAYWRITE)] = "mw", | |
635 | [ilog2(VM_MAYEXEC)] = "me", | |
636 | [ilog2(VM_MAYSHARE)] = "ms", | |
637 | [ilog2(VM_GROWSDOWN)] = "gd", | |
638 | [ilog2(VM_PFNMAP)] = "pf", | |
834f82e2 CG |
639 | [ilog2(VM_LOCKED)] = "lo", |
640 | [ilog2(VM_IO)] = "io", | |
641 | [ilog2(VM_SEQ_READ)] = "sr", | |
642 | [ilog2(VM_RAND_READ)] = "rr", | |
643 | [ilog2(VM_DONTCOPY)] = "dc", | |
644 | [ilog2(VM_DONTEXPAND)] = "de", | |
645 | [ilog2(VM_ACCOUNT)] = "ac", | |
646 | [ilog2(VM_NORESERVE)] = "nr", | |
647 | [ilog2(VM_HUGETLB)] = "ht", | |
b6fb293f | 648 | [ilog2(VM_SYNC)] = "sf", |
834f82e2 | 649 | [ilog2(VM_ARCH_1)] = "ar", |
d2cd9ede | 650 | [ilog2(VM_WIPEONFORK)] = "wf", |
834f82e2 | 651 | [ilog2(VM_DONTDUMP)] = "dd", |
424037b7 DK |
652 | #ifdef CONFIG_ARM64_BTI |
653 | [ilog2(VM_ARM64_BTI)] = "bt", | |
654 | #endif | |
ec8e41ae NH |
655 | #ifdef CONFIG_MEM_SOFT_DIRTY |
656 | [ilog2(VM_SOFTDIRTY)] = "sd", | |
657 | #endif | |
834f82e2 CG |
658 | [ilog2(VM_MIXEDMAP)] = "mm", |
659 | [ilog2(VM_HUGEPAGE)] = "hg", | |
660 | [ilog2(VM_NOHUGEPAGE)] = "nh", | |
661 | [ilog2(VM_MERGEABLE)] = "mg", | |
16ba6f81 AA |
662 | [ilog2(VM_UFFD_MISSING)]= "um", |
663 | [ilog2(VM_UFFD_WP)] = "uw", | |
9f341931 CM |
664 | #ifdef CONFIG_ARM64_MTE |
665 | [ilog2(VM_MTE)] = "mt", | |
666 | [ilog2(VM_MTE_ALLOWED)] = "", | |
667 | #endif | |
5212213a | 668 | #ifdef CONFIG_ARCH_HAS_PKEYS |
c1192f84 DH |
669 | /* These come out via ProtectionKey: */ |
670 | [ilog2(VM_PKEY_BIT0)] = "", | |
671 | [ilog2(VM_PKEY_BIT1)] = "", | |
672 | [ilog2(VM_PKEY_BIT2)] = "", | |
673 | [ilog2(VM_PKEY_BIT3)] = "", | |
2c9e0a6f RP |
674 | #if VM_PKEY_BIT4 |
675 | [ilog2(VM_PKEY_BIT4)] = "", | |
c1192f84 | 676 | #endif |
5212213a | 677 | #endif /* CONFIG_ARCH_HAS_PKEYS */ |
7677f7fd AR |
678 | #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR |
679 | [ilog2(VM_UFFD_MINOR)] = "ui", | |
680 | #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */ | |
834f82e2 CG |
681 | }; |
682 | size_t i; | |
683 | ||
684 | seq_puts(m, "VmFlags: "); | |
685 | for (i = 0; i < BITS_PER_LONG; i++) { | |
c1192f84 DH |
686 | if (!mnemonics[i][0]) |
687 | continue; | |
834f82e2 | 688 | if (vma->vm_flags & (1UL << i)) { |
f6640663 AV |
689 | seq_putc(m, mnemonics[i][0]); |
690 | seq_putc(m, mnemonics[i][1]); | |
691 | seq_putc(m, ' '); | |
834f82e2 CG |
692 | } |
693 | } | |
694 | seq_putc(m, '\n'); | |
695 | } | |
696 | ||
25ee01a2 NH |
697 | #ifdef CONFIG_HUGETLB_PAGE |
698 | static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, | |
699 | unsigned long addr, unsigned long end, | |
700 | struct mm_walk *walk) | |
701 | { | |
702 | struct mem_size_stats *mss = walk->private; | |
703 | struct vm_area_struct *vma = walk->vma; | |
704 | struct page *page = NULL; | |
705 | ||
706 | if (pte_present(*pte)) { | |
707 | page = vm_normal_page(vma, addr, *pte); | |
708 | } else if (is_swap_pte(*pte)) { | |
709 | swp_entry_t swpent = pte_to_swp_entry(*pte); | |
710 | ||
af5cdaf8 AP |
711 | if (is_pfn_swap_entry(swpent)) |
712 | page = pfn_swap_entry_to_page(swpent); | |
25ee01a2 NH |
713 | } |
714 | if (page) { | |
715 | int mapcount = page_mapcount(page); | |
716 | ||
717 | if (mapcount >= 2) | |
718 | mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); | |
719 | else | |
720 | mss->private_hugetlb += huge_page_size(hstate_vma(vma)); | |
721 | } | |
722 | return 0; | |
723 | } | |
7b86ac33 CH |
724 | #else |
725 | #define smaps_hugetlb_range NULL | |
25ee01a2 NH |
726 | #endif /* HUGETLB_PAGE */ |
727 | ||
7b86ac33 CH |
728 | static const struct mm_walk_ops smaps_walk_ops = { |
729 | .pmd_entry = smaps_pte_range, | |
730 | .hugetlb_entry = smaps_hugetlb_range, | |
731 | }; | |
732 | ||
733 | static const struct mm_walk_ops smaps_shmem_walk_ops = { | |
734 | .pmd_entry = smaps_pte_range, | |
735 | .hugetlb_entry = smaps_hugetlb_range, | |
736 | .pte_hole = smaps_pte_hole, | |
737 | }; | |
738 | ||
03b4b114 CC |
739 | /* |
740 | * Gather mem stats from @vma with the indicated beginning | |
741 | * address @start, and keep them in @mss. | |
742 | * | |
743 | * Use vm_start of @vma as the beginning address if @start is 0. | |
744 | */ | |
8e68d689 | 745 | static void smap_gather_stats(struct vm_area_struct *vma, |
03b4b114 | 746 | struct mem_size_stats *mss, unsigned long start) |
e070ad49 | 747 | { |
03b4b114 CC |
748 | const struct mm_walk_ops *ops = &smaps_walk_ops; |
749 | ||
750 | /* Invalid start */ | |
751 | if (start >= vma->vm_end) | |
752 | return; | |
753 | ||
c261e7d9 VB |
754 | #ifdef CONFIG_SHMEM |
755 | if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) { | |
6a15a370 VB |
756 | /* |
757 | * For shared or readonly shmem mappings we know that all | |
758 | * swapped out pages belong to the shmem object, and we can | |
759 | * obtain the swap value much more efficiently. For private | |
760 | * writable mappings, we might have COW pages that are | |
761 | * not affected by the parent swapped out pages of the shmem | |
762 | * object, so we have to distinguish them during the page walk. | |
763 | * Unless we know that the shmem object (or the part mapped by | |
764 | * our VMA) has no swapped out pages at all. | |
765 | */ | |
766 | unsigned long shmem_swapped = shmem_swap_usage(vma); | |
767 | ||
03b4b114 CC |
768 | if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) || |
769 | !(vma->vm_flags & VM_WRITE))) { | |
fa76da46 | 770 | mss->swap += shmem_swapped; |
6a15a370 | 771 | } else { |
03b4b114 | 772 | ops = &smaps_shmem_walk_ops; |
6a15a370 | 773 | } |
c261e7d9 VB |
774 | } |
775 | #endif | |
c1e8d7c6 | 776 | /* mmap_lock is held in m_start */ |
03b4b114 CC |
777 | if (!start) |
778 | walk_page_vma(vma, ops, mss); | |
779 | else | |
780 | walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss); | |
8e68d689 VB |
781 | } |
782 | ||
783 | #define SEQ_PUT_DEC(str, val) \ | |
784 | seq_put_decimal_ull_width(m, str, (val) >> 10, 8) | |
f1547959 VB |
785 | |
786 | /* Show the contents common for smaps and smaps_rollup */ | |
ee2ad71b LS |
787 | static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss, |
788 | bool rollup_mode) | |
f1547959 VB |
789 | { |
790 | SEQ_PUT_DEC("Rss: ", mss->resident); | |
791 | SEQ_PUT_DEC(" kB\nPss: ", mss->pss >> PSS_SHIFT); | |
ee2ad71b LS |
792 | if (rollup_mode) { |
793 | /* | |
794 | * These are meaningful only for smaps_rollup, otherwise two of | |
795 | * them are zero, and the other one is the same as Pss. | |
796 | */ | |
797 | SEQ_PUT_DEC(" kB\nPss_Anon: ", | |
798 | mss->pss_anon >> PSS_SHIFT); | |
799 | SEQ_PUT_DEC(" kB\nPss_File: ", | |
800 | mss->pss_file >> PSS_SHIFT); | |
801 | SEQ_PUT_DEC(" kB\nPss_Shmem: ", | |
802 | mss->pss_shmem >> PSS_SHIFT); | |
803 | } | |
f1547959 VB |
804 | SEQ_PUT_DEC(" kB\nShared_Clean: ", mss->shared_clean); |
805 | SEQ_PUT_DEC(" kB\nShared_Dirty: ", mss->shared_dirty); | |
806 | SEQ_PUT_DEC(" kB\nPrivate_Clean: ", mss->private_clean); | |
807 | SEQ_PUT_DEC(" kB\nPrivate_Dirty: ", mss->private_dirty); | |
808 | SEQ_PUT_DEC(" kB\nReferenced: ", mss->referenced); | |
809 | SEQ_PUT_DEC(" kB\nAnonymous: ", mss->anonymous); | |
810 | SEQ_PUT_DEC(" kB\nLazyFree: ", mss->lazyfree); | |
811 | SEQ_PUT_DEC(" kB\nAnonHugePages: ", mss->anonymous_thp); | |
812 | SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp); | |
471e78cc | 813 | SEQ_PUT_DEC(" kB\nFilePmdMapped: ", mss->file_thp); |
f1547959 VB |
814 | SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb); |
815 | seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ", | |
816 | mss->private_hugetlb >> 10, 7); | |
817 | SEQ_PUT_DEC(" kB\nSwap: ", mss->swap); | |
818 | SEQ_PUT_DEC(" kB\nSwapPss: ", | |
819 | mss->swap_pss >> PSS_SHIFT); | |
820 | SEQ_PUT_DEC(" kB\nLocked: ", | |
821 | mss->pss_locked >> PSS_SHIFT); | |
822 | seq_puts(m, " kB\n"); | |
823 | } | |
824 | ||
8e68d689 VB |
825 | static int show_smap(struct seq_file *m, void *v) |
826 | { | |
8e68d689 | 827 | struct vm_area_struct *vma = v; |
258f669e VB |
828 | struct mem_size_stats mss; |
829 | ||
830 | memset(&mss, 0, sizeof(mss)); | |
831 | ||
03b4b114 | 832 | smap_gather_stats(vma, &mss, 0); |
258f669e VB |
833 | |
834 | show_map_vma(m, vma); | |
835 | ||
836 | SEQ_PUT_DEC("Size: ", vma->vm_end - vma->vm_start); | |
837 | SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma)); | |
838 | SEQ_PUT_DEC(" kB\nMMUPageSize: ", vma_mmu_pagesize(vma)); | |
839 | seq_puts(m, " kB\n"); | |
840 | ||
ee2ad71b | 841 | __show_smap(m, &mss, false); |
258f669e | 842 | |
471e78cc | 843 | seq_printf(m, "THPeligible: %d\n", |
e6be37b2 | 844 | transparent_hugepage_active(vma)); |
7635d9cb | 845 | |
258f669e VB |
846 | if (arch_pkeys_enabled()) |
847 | seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); | |
848 | show_smap_vma_flags(m, vma); | |
849 | ||
258f669e VB |
850 | return 0; |
851 | } | |
852 | ||
853 | static int show_smaps_rollup(struct seq_file *m, void *v) | |
854 | { | |
855 | struct proc_maps_private *priv = m->private; | |
856 | struct mem_size_stats mss; | |
857 | struct mm_struct *mm; | |
858 | struct vm_area_struct *vma; | |
859 | unsigned long last_vma_end = 0; | |
8e68d689 | 860 | int ret = 0; |
8e68d689 | 861 | |
258f669e VB |
862 | priv->task = get_proc_task(priv->inode); |
863 | if (!priv->task) | |
864 | return -ESRCH; | |
493b0e9d | 865 | |
258f669e VB |
866 | mm = priv->mm; |
867 | if (!mm || !mmget_not_zero(mm)) { | |
868 | ret = -ESRCH; | |
869 | goto out_put_task; | |
493b0e9d | 870 | } |
4752c369 | 871 | |
258f669e | 872 | memset(&mss, 0, sizeof(mss)); |
493b0e9d | 873 | |
d8ed45c5 | 874 | ret = mmap_read_lock_killable(mm); |
a26a9781 KK |
875 | if (ret) |
876 | goto out_put_mm; | |
877 | ||
258f669e | 878 | hold_task_mempolicy(priv); |
f1547959 | 879 | |
ff9f47f6 | 880 | for (vma = priv->mm->mmap; vma;) { |
03b4b114 | 881 | smap_gather_stats(vma, &mss, 0); |
258f669e | 882 | last_vma_end = vma->vm_end; |
ff9f47f6 CC |
883 | |
884 | /* | |
885 | * Release mmap_lock temporarily if someone wants to | |
886 | * access it for write request. | |
887 | */ | |
888 | if (mmap_lock_is_contended(mm)) { | |
889 | mmap_read_unlock(mm); | |
890 | ret = mmap_read_lock_killable(mm); | |
891 | if (ret) { | |
892 | release_task_mempolicy(priv); | |
893 | goto out_put_mm; | |
894 | } | |
895 | ||
896 | /* | |
897 | * After dropping the lock, there are four cases to | |
898 | * consider. See the following example for explanation. | |
899 | * | |
900 | * +------+------+-----------+ | |
901 | * | VMA1 | VMA2 | VMA3 | | |
902 | * +------+------+-----------+ | |
903 | * | | | | | |
904 | * 4k 8k 16k 400k | |
905 | * | |
906 | * Suppose we drop the lock after reading VMA2 due to | |
907 | * contention, then we get: | |
908 | * | |
909 | * last_vma_end = 16k | |
910 | * | |
911 | * 1) VMA2 is freed, but VMA3 exists: | |
912 | * | |
913 | * find_vma(mm, 16k - 1) will return VMA3. | |
914 | * In this case, just continue from VMA3. | |
915 | * | |
916 | * 2) VMA2 still exists: | |
917 | * | |
918 | * find_vma(mm, 16k - 1) will return VMA2. | |
919 | * Iterate the loop like the original one. | |
920 | * | |
921 | * 3) No more VMAs can be found: | |
922 | * | |
923 | * find_vma(mm, 16k - 1) will return NULL. | |
924 | * No more things to do, just break. | |
925 | * | |
926 | * 4) (last_vma_end - 1) is the middle of a vma (VMA'): | |
927 | * | |
928 | * find_vma(mm, 16k - 1) will return VMA' whose range | |
929 | * contains last_vma_end. | |
930 | * Iterate VMA' from last_vma_end. | |
931 | */ | |
932 | vma = find_vma(mm, last_vma_end - 1); | |
933 | /* Case 3 above */ | |
934 | if (!vma) | |
935 | break; | |
936 | ||
937 | /* Case 1 above */ | |
938 | if (vma->vm_start >= last_vma_end) | |
939 | continue; | |
940 | ||
941 | /* Case 4 above */ | |
942 | if (vma->vm_end > last_vma_end) | |
943 | smap_gather_stats(vma, &mss, last_vma_end); | |
944 | } | |
945 | /* Case 2 above */ | |
946 | vma = vma->vm_next; | |
493b0e9d | 947 | } |
258f669e VB |
948 | |
949 | show_vma_header_prefix(m, priv->mm->mmap->vm_start, | |
950 | last_vma_end, 0, 0, 0, 0); | |
951 | seq_pad(m, ' '); | |
952 | seq_puts(m, "[rollup]\n"); | |
953 | ||
ee2ad71b | 954 | __show_smap(m, &mss, true); |
258f669e VB |
955 | |
956 | release_task_mempolicy(priv); | |
d8ed45c5 | 957 | mmap_read_unlock(mm); |
258f669e | 958 | |
a26a9781 KK |
959 | out_put_mm: |
960 | mmput(mm); | |
258f669e VB |
961 | out_put_task: |
962 | put_task_struct(priv->task); | |
963 | priv->task = NULL; | |
964 | ||
493b0e9d | 965 | return ret; |
e070ad49 | 966 | } |
d1be35cb | 967 | #undef SEQ_PUT_DEC |
e070ad49 | 968 | |
03a44825 | 969 | static const struct seq_operations proc_pid_smaps_op = { |
a6198797 MM |
970 | .start = m_start, |
971 | .next = m_next, | |
972 | .stop = m_stop, | |
871305bb | 973 | .show = show_smap |
a6198797 MM |
974 | }; |
975 | ||
b7643757 | 976 | static int pid_smaps_open(struct inode *inode, struct file *file) |
a6198797 MM |
977 | { |
978 | return do_maps_open(inode, file, &proc_pid_smaps_op); | |
979 | } | |
980 | ||
258f669e | 981 | static int smaps_rollup_open(struct inode *inode, struct file *file) |
493b0e9d | 982 | { |
258f669e | 983 | int ret; |
493b0e9d | 984 | struct proc_maps_private *priv; |
258f669e VB |
985 | |
986 | priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT); | |
987 | if (!priv) | |
493b0e9d | 988 | return -ENOMEM; |
258f669e VB |
989 | |
990 | ret = single_open(file, show_smaps_rollup, priv); | |
991 | if (ret) | |
992 | goto out_free; | |
993 | ||
994 | priv->inode = inode; | |
995 | priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); | |
996 | if (IS_ERR(priv->mm)) { | |
997 | ret = PTR_ERR(priv->mm); | |
998 | ||
999 | single_release(inode, file); | |
1000 | goto out_free; | |
493b0e9d | 1001 | } |
258f669e | 1002 | |
493b0e9d | 1003 | return 0; |
258f669e VB |
1004 | |
1005 | out_free: | |
1006 | kfree(priv); | |
1007 | return ret; | |
1008 | } | |
1009 | ||
1010 | static int smaps_rollup_release(struct inode *inode, struct file *file) | |
1011 | { | |
1012 | struct seq_file *seq = file->private_data; | |
1013 | struct proc_maps_private *priv = seq->private; | |
1014 | ||
1015 | if (priv->mm) | |
1016 | mmdrop(priv->mm); | |
1017 | ||
1018 | kfree(priv); | |
1019 | return single_release(inode, file); | |
493b0e9d DC |
1020 | } |
1021 | ||
b7643757 SP |
1022 | const struct file_operations proc_pid_smaps_operations = { |
1023 | .open = pid_smaps_open, | |
1024 | .read = seq_read, | |
1025 | .llseek = seq_lseek, | |
29a40ace | 1026 | .release = proc_map_release, |
b7643757 SP |
1027 | }; |
1028 | ||
493b0e9d | 1029 | const struct file_operations proc_pid_smaps_rollup_operations = { |
258f669e | 1030 | .open = smaps_rollup_open, |
493b0e9d DC |
1031 | .read = seq_read, |
1032 | .llseek = seq_lseek, | |
258f669e | 1033 | .release = smaps_rollup_release, |
493b0e9d DC |
1034 | }; |
1035 | ||
040fa020 PE |
1036 | enum clear_refs_types { |
1037 | CLEAR_REFS_ALL = 1, | |
1038 | CLEAR_REFS_ANON, | |
1039 | CLEAR_REFS_MAPPED, | |
0f8975ec | 1040 | CLEAR_REFS_SOFT_DIRTY, |
695f0559 | 1041 | CLEAR_REFS_MM_HIWATER_RSS, |
040fa020 PE |
1042 | CLEAR_REFS_LAST, |
1043 | }; | |
1044 | ||
af9de7eb | 1045 | struct clear_refs_private { |
0f8975ec | 1046 | enum clear_refs_types type; |
af9de7eb PE |
1047 | }; |
1048 | ||
7d5b3bfa | 1049 | #ifdef CONFIG_MEM_SOFT_DIRTY |
9348b73c | 1050 | |
9348b73c LT |
1051 | static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte) |
1052 | { | |
1053 | struct page *page; | |
1054 | ||
1055 | if (!pte_write(pte)) | |
1056 | return false; | |
1057 | if (!is_cow_mapping(vma->vm_flags)) | |
1058 | return false; | |
a458b76a | 1059 | if (likely(!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))) |
9348b73c LT |
1060 | return false; |
1061 | page = vm_normal_page(vma, addr, pte); | |
1062 | if (!page) | |
1063 | return false; | |
1064 | return page_maybe_dma_pinned(page); | |
1065 | } | |
1066 | ||
0f8975ec PE |
1067 | static inline void clear_soft_dirty(struct vm_area_struct *vma, |
1068 | unsigned long addr, pte_t *pte) | |
1069 | { | |
0f8975ec PE |
1070 | /* |
1071 | * The soft-dirty tracker uses #PF-s to catch writes | |
1072 | * to pages, so write-protect the pte as well. See the | |
1ad1335d | 1073 | * Documentation/admin-guide/mm/soft-dirty.rst for full description |
0f8975ec PE |
1074 | * of how soft-dirty works. |
1075 | */ | |
1076 | pte_t ptent = *pte; | |
179ef71c CG |
1077 | |
1078 | if (pte_present(ptent)) { | |
04a86453 AK |
1079 | pte_t old_pte; |
1080 | ||
9348b73c LT |
1081 | if (pte_is_pinned(vma, addr, ptent)) |
1082 | return; | |
04a86453 AK |
1083 | old_pte = ptep_modify_prot_start(vma, addr, pte); |
1084 | ptent = pte_wrprotect(old_pte); | |
a7b76174 | 1085 | ptent = pte_clear_soft_dirty(ptent); |
04a86453 | 1086 | ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent); |
179ef71c CG |
1087 | } else if (is_swap_pte(ptent)) { |
1088 | ptent = pte_swp_clear_soft_dirty(ptent); | |
326c2597 | 1089 | set_pte_at(vma->vm_mm, addr, pte, ptent); |
179ef71c | 1090 | } |
0f8975ec | 1091 | } |
5d3875a0 LD |
1092 | #else |
1093 | static inline void clear_soft_dirty(struct vm_area_struct *vma, | |
1094 | unsigned long addr, pte_t *pte) | |
1095 | { | |
1096 | } | |
1097 | #endif | |
0f8975ec | 1098 | |
5d3875a0 | 1099 | #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE) |
7d5b3bfa KS |
1100 | static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, |
1101 | unsigned long addr, pmd_t *pmdp) | |
1102 | { | |
a3cf988f | 1103 | pmd_t old, pmd = *pmdp; |
5b7abeae | 1104 | |
ab6e3d09 NH |
1105 | if (pmd_present(pmd)) { |
1106 | /* See comment in change_huge_pmd() */ | |
a3cf988f KS |
1107 | old = pmdp_invalidate(vma, addr, pmdp); |
1108 | if (pmd_dirty(old)) | |
ab6e3d09 | 1109 | pmd = pmd_mkdirty(pmd); |
a3cf988f | 1110 | if (pmd_young(old)) |
ab6e3d09 NH |
1111 | pmd = pmd_mkyoung(pmd); |
1112 | ||
1113 | pmd = pmd_wrprotect(pmd); | |
1114 | pmd = pmd_clear_soft_dirty(pmd); | |
1115 | ||
1116 | set_pmd_at(vma->vm_mm, addr, pmdp, pmd); | |
1117 | } else if (is_migration_entry(pmd_to_swp_entry(pmd))) { | |
1118 | pmd = pmd_swp_clear_soft_dirty(pmd); | |
1119 | set_pmd_at(vma->vm_mm, addr, pmdp, pmd); | |
1120 | } | |
7d5b3bfa | 1121 | } |
7d5b3bfa | 1122 | #else |
7d5b3bfa KS |
1123 | static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, |
1124 | unsigned long addr, pmd_t *pmdp) | |
1125 | { | |
1126 | } | |
1127 | #endif | |
1128 | ||
a6198797 | 1129 | static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, |
2165009b | 1130 | unsigned long end, struct mm_walk *walk) |
a6198797 | 1131 | { |
af9de7eb | 1132 | struct clear_refs_private *cp = walk->private; |
5c64f52a | 1133 | struct vm_area_struct *vma = walk->vma; |
a6198797 MM |
1134 | pte_t *pte, ptent; |
1135 | spinlock_t *ptl; | |
1136 | struct page *page; | |
1137 | ||
b6ec57f4 KS |
1138 | ptl = pmd_trans_huge_lock(pmd, vma); |
1139 | if (ptl) { | |
7d5b3bfa KS |
1140 | if (cp->type == CLEAR_REFS_SOFT_DIRTY) { |
1141 | clear_soft_dirty_pmd(vma, addr, pmd); | |
1142 | goto out; | |
1143 | } | |
1144 | ||
84c3fc4e ZY |
1145 | if (!pmd_present(*pmd)) |
1146 | goto out; | |
1147 | ||
7d5b3bfa KS |
1148 | page = pmd_page(*pmd); |
1149 | ||
1150 | /* Clear accessed and referenced bits. */ | |
1151 | pmdp_test_and_clear_young(vma, addr, pmd); | |
33c3fc71 | 1152 | test_and_clear_page_young(page); |
7d5b3bfa KS |
1153 | ClearPageReferenced(page); |
1154 | out: | |
1155 | spin_unlock(ptl); | |
1156 | return 0; | |
1157 | } | |
1158 | ||
1a5a9906 AA |
1159 | if (pmd_trans_unstable(pmd)) |
1160 | return 0; | |
03319327 | 1161 | |
a6198797 MM |
1162 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
1163 | for (; addr != end; pte++, addr += PAGE_SIZE) { | |
1164 | ptent = *pte; | |
a6198797 | 1165 | |
0f8975ec PE |
1166 | if (cp->type == CLEAR_REFS_SOFT_DIRTY) { |
1167 | clear_soft_dirty(vma, addr, pte); | |
1168 | continue; | |
1169 | } | |
1170 | ||
179ef71c CG |
1171 | if (!pte_present(ptent)) |
1172 | continue; | |
1173 | ||
a6198797 MM |
1174 | page = vm_normal_page(vma, addr, ptent); |
1175 | if (!page) | |
1176 | continue; | |
1177 | ||
1178 | /* Clear accessed and referenced bits. */ | |
1179 | ptep_test_and_clear_young(vma, addr, pte); | |
33c3fc71 | 1180 | test_and_clear_page_young(page); |
a6198797 MM |
1181 | ClearPageReferenced(page); |
1182 | } | |
1183 | pte_unmap_unlock(pte - 1, ptl); | |
1184 | cond_resched(); | |
1185 | return 0; | |
1186 | } | |
1187 | ||
5c64f52a NH |
1188 | static int clear_refs_test_walk(unsigned long start, unsigned long end, |
1189 | struct mm_walk *walk) | |
1190 | { | |
1191 | struct clear_refs_private *cp = walk->private; | |
1192 | struct vm_area_struct *vma = walk->vma; | |
1193 | ||
48684a65 NH |
1194 | if (vma->vm_flags & VM_PFNMAP) |
1195 | return 1; | |
1196 | ||
5c64f52a NH |
1197 | /* |
1198 | * Writing 1 to /proc/pid/clear_refs affects all pages. | |
1199 | * Writing 2 to /proc/pid/clear_refs only affects anonymous pages. | |
1200 | * Writing 3 to /proc/pid/clear_refs only affects file mapped pages. | |
1201 | * Writing 4 to /proc/pid/clear_refs affects all pages. | |
1202 | */ | |
1203 | if (cp->type == CLEAR_REFS_ANON && vma->vm_file) | |
1204 | return 1; | |
1205 | if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) | |
1206 | return 1; | |
1207 | return 0; | |
1208 | } | |
1209 | ||
7b86ac33 CH |
1210 | static const struct mm_walk_ops clear_refs_walk_ops = { |
1211 | .pmd_entry = clear_refs_pte_range, | |
1212 | .test_walk = clear_refs_test_walk, | |
1213 | }; | |
1214 | ||
f248dcb3 MM |
1215 | static ssize_t clear_refs_write(struct file *file, const char __user *buf, |
1216 | size_t count, loff_t *ppos) | |
b813e931 | 1217 | { |
f248dcb3 | 1218 | struct task_struct *task; |
fb92a4b0 | 1219 | char buffer[PROC_NUMBUF]; |
f248dcb3 | 1220 | struct mm_struct *mm; |
b813e931 | 1221 | struct vm_area_struct *vma; |
040fa020 PE |
1222 | enum clear_refs_types type; |
1223 | int itype; | |
0a8cb8e3 | 1224 | int rv; |
b813e931 | 1225 | |
f248dcb3 MM |
1226 | memset(buffer, 0, sizeof(buffer)); |
1227 | if (count > sizeof(buffer) - 1) | |
1228 | count = sizeof(buffer) - 1; | |
1229 | if (copy_from_user(buffer, buf, count)) | |
1230 | return -EFAULT; | |
040fa020 | 1231 | rv = kstrtoint(strstrip(buffer), 10, &itype); |
0a8cb8e3 AD |
1232 | if (rv < 0) |
1233 | return rv; | |
040fa020 PE |
1234 | type = (enum clear_refs_types)itype; |
1235 | if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST) | |
f248dcb3 | 1236 | return -EINVAL; |
541c237c | 1237 | |
496ad9aa | 1238 | task = get_proc_task(file_inode(file)); |
f248dcb3 MM |
1239 | if (!task) |
1240 | return -ESRCH; | |
1241 | mm = get_task_mm(task); | |
1242 | if (mm) { | |
ac46d4f3 | 1243 | struct mmu_notifier_range range; |
af9de7eb | 1244 | struct clear_refs_private cp = { |
0f8975ec | 1245 | .type = type, |
af9de7eb | 1246 | }; |
695f0559 | 1247 | |
29a951df LT |
1248 | if (mmap_write_lock_killable(mm)) { |
1249 | count = -EINTR; | |
1250 | goto out_mm; | |
1251 | } | |
695f0559 PC |
1252 | if (type == CLEAR_REFS_MM_HIWATER_RSS) { |
1253 | /* | |
1254 | * Writing 5 to /proc/pid/clear_refs resets the peak | |
1255 | * resident set size to this mm's current rss value. | |
1256 | */ | |
695f0559 | 1257 | reset_mm_hiwater_rss(mm); |
29a951df | 1258 | goto out_unlock; |
695f0559 PC |
1259 | } |
1260 | ||
64e45507 PF |
1261 | if (type == CLEAR_REFS_SOFT_DIRTY) { |
1262 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | |
1263 | if (!(vma->vm_flags & VM_SOFTDIRTY)) | |
1264 | continue; | |
29a951df LT |
1265 | vma->vm_flags &= ~VM_SOFTDIRTY; |
1266 | vma_set_page_prot(vma); | |
64e45507 | 1267 | } |
ac46d4f3 | 1268 | |
912efa17 | 1269 | inc_tlb_flush_pending(mm); |
7269f999 JG |
1270 | mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY, |
1271 | 0, NULL, mm, 0, -1UL); | |
ac46d4f3 | 1272 | mmu_notifier_invalidate_range_start(&range); |
64e45507 | 1273 | } |
7b86ac33 CH |
1274 | walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops, |
1275 | &cp); | |
912efa17 | 1276 | if (type == CLEAR_REFS_SOFT_DIRTY) { |
ac46d4f3 | 1277 | mmu_notifier_invalidate_range_end(&range); |
912efa17 WD |
1278 | flush_tlb_mm(mm); |
1279 | dec_tlb_flush_pending(mm); | |
1280 | } | |
29a951df LT |
1281 | out_unlock: |
1282 | mmap_write_unlock(mm); | |
695f0559 | 1283 | out_mm: |
f248dcb3 MM |
1284 | mmput(mm); |
1285 | } | |
1286 | put_task_struct(task); | |
fb92a4b0 VL |
1287 | |
1288 | return count; | |
b813e931 DR |
1289 | } |
1290 | ||
f248dcb3 MM |
1291 | const struct file_operations proc_clear_refs_operations = { |
1292 | .write = clear_refs_write, | |
6038f373 | 1293 | .llseek = noop_llseek, |
f248dcb3 MM |
1294 | }; |
1295 | ||
092b50ba NH |
1296 | typedef struct { |
1297 | u64 pme; | |
1298 | } pagemap_entry_t; | |
1299 | ||
85863e47 | 1300 | struct pagemapread { |
8c829622 | 1301 | int pos, len; /* units: PM_ENTRY_BYTES, not bytes */ |
092b50ba | 1302 | pagemap_entry_t *buffer; |
1c90308e | 1303 | bool show_pfn; |
85863e47 MM |
1304 | }; |
1305 | ||
5aaabe83 NH |
1306 | #define PAGEMAP_WALK_SIZE (PMD_SIZE) |
1307 | #define PAGEMAP_WALK_MASK (PMD_MASK) | |
1308 | ||
deb94544 KK |
1309 | #define PM_ENTRY_BYTES sizeof(pagemap_entry_t) |
1310 | #define PM_PFRAME_BITS 55 | |
1311 | #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0) | |
1312 | #define PM_SOFT_DIRTY BIT_ULL(55) | |
77bb499b | 1313 | #define PM_MMAP_EXCLUSIVE BIT_ULL(56) |
fb8e37f3 | 1314 | #define PM_UFFD_WP BIT_ULL(57) |
deb94544 KK |
1315 | #define PM_FILE BIT_ULL(61) |
1316 | #define PM_SWAP BIT_ULL(62) | |
1317 | #define PM_PRESENT BIT_ULL(63) | |
1318 | ||
85863e47 MM |
1319 | #define PM_END_OF_BUFFER 1 |
1320 | ||
deb94544 | 1321 | static inline pagemap_entry_t make_pme(u64 frame, u64 flags) |
092b50ba | 1322 | { |
deb94544 | 1323 | return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags }; |
092b50ba NH |
1324 | } |
1325 | ||
1326 | static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme, | |
85863e47 MM |
1327 | struct pagemapread *pm) |
1328 | { | |
092b50ba | 1329 | pm->buffer[pm->pos++] = *pme; |
d82ef020 | 1330 | if (pm->pos >= pm->len) |
aae8679b | 1331 | return PM_END_OF_BUFFER; |
85863e47 MM |
1332 | return 0; |
1333 | } | |
1334 | ||
1335 | static int pagemap_pte_hole(unsigned long start, unsigned long end, | |
b7a16c7a | 1336 | __always_unused int depth, struct mm_walk *walk) |
85863e47 | 1337 | { |
2165009b | 1338 | struct pagemapread *pm = walk->private; |
68b5a652 | 1339 | unsigned long addr = start; |
85863e47 | 1340 | int err = 0; |
092b50ba | 1341 | |
68b5a652 PF |
1342 | while (addr < end) { |
1343 | struct vm_area_struct *vma = find_vma(walk->mm, addr); | |
deb94544 | 1344 | pagemap_entry_t pme = make_pme(0, 0); |
87e6d49a PF |
1345 | /* End of address space hole, which we mark as non-present. */ |
1346 | unsigned long hole_end; | |
68b5a652 | 1347 | |
87e6d49a PF |
1348 | if (vma) |
1349 | hole_end = min(end, vma->vm_start); | |
1350 | else | |
1351 | hole_end = end; | |
1352 | ||
1353 | for (; addr < hole_end; addr += PAGE_SIZE) { | |
1354 | err = add_to_pagemap(addr, &pme, pm); | |
1355 | if (err) | |
1356 | goto out; | |
68b5a652 PF |
1357 | } |
1358 | ||
87e6d49a PF |
1359 | if (!vma) |
1360 | break; | |
1361 | ||
1362 | /* Addresses in the VMA. */ | |
1363 | if (vma->vm_flags & VM_SOFTDIRTY) | |
deb94544 | 1364 | pme = make_pme(0, PM_SOFT_DIRTY); |
87e6d49a | 1365 | for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { |
68b5a652 PF |
1366 | err = add_to_pagemap(addr, &pme, pm); |
1367 | if (err) | |
1368 | goto out; | |
1369 | } | |
85863e47 | 1370 | } |
68b5a652 | 1371 | out: |
85863e47 MM |
1372 | return err; |
1373 | } | |
1374 | ||
deb94544 | 1375 | static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, |
052fb0d6 | 1376 | struct vm_area_struct *vma, unsigned long addr, pte_t pte) |
85863e47 | 1377 | { |
deb94544 | 1378 | u64 frame = 0, flags = 0; |
052fb0d6 | 1379 | struct page *page = NULL; |
85863e47 | 1380 | |
052fb0d6 | 1381 | if (pte_present(pte)) { |
1c90308e KK |
1382 | if (pm->show_pfn) |
1383 | frame = pte_pfn(pte); | |
deb94544 | 1384 | flags |= PM_PRESENT; |
25b2995a | 1385 | page = vm_normal_page(vma, addr, pte); |
e9cdd6e7 | 1386 | if (pte_soft_dirty(pte)) |
deb94544 | 1387 | flags |= PM_SOFT_DIRTY; |
fb8e37f3 PX |
1388 | if (pte_uffd_wp(pte)) |
1389 | flags |= PM_UFFD_WP; | |
052fb0d6 | 1390 | } else if (is_swap_pte(pte)) { |
179ef71c CG |
1391 | swp_entry_t entry; |
1392 | if (pte_swp_soft_dirty(pte)) | |
deb94544 | 1393 | flags |= PM_SOFT_DIRTY; |
fb8e37f3 PX |
1394 | if (pte_swp_uffd_wp(pte)) |
1395 | flags |= PM_UFFD_WP; | |
179ef71c | 1396 | entry = pte_to_swp_entry(pte); |
ab6ecf24 HY |
1397 | if (pm->show_pfn) |
1398 | frame = swp_type(entry) | | |
1399 | (swp_offset(entry) << MAX_SWAPFILES_SHIFT); | |
deb94544 | 1400 | flags |= PM_SWAP; |
af5cdaf8 AP |
1401 | if (is_pfn_swap_entry(entry)) |
1402 | page = pfn_swap_entry_to_page(entry); | |
052fb0d6 KK |
1403 | } |
1404 | ||
1405 | if (page && !PageAnon(page)) | |
1406 | flags |= PM_FILE; | |
77bb499b KK |
1407 | if (page && page_mapcount(page) == 1) |
1408 | flags |= PM_MMAP_EXCLUSIVE; | |
deb94544 KK |
1409 | if (vma->vm_flags & VM_SOFTDIRTY) |
1410 | flags |= PM_SOFT_DIRTY; | |
052fb0d6 | 1411 | |
deb94544 | 1412 | return make_pme(frame, flags); |
bcf8039e DH |
1413 | } |
1414 | ||
356515e7 | 1415 | static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, |
2165009b | 1416 | struct mm_walk *walk) |
85863e47 | 1417 | { |
f995ece2 | 1418 | struct vm_area_struct *vma = walk->vma; |
2165009b | 1419 | struct pagemapread *pm = walk->private; |
bf929152 | 1420 | spinlock_t *ptl; |
05fbf357 | 1421 | pte_t *pte, *orig_pte; |
85863e47 MM |
1422 | int err = 0; |
1423 | ||
356515e7 | 1424 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
b6ec57f4 KS |
1425 | ptl = pmd_trans_huge_lock(pmdp, vma); |
1426 | if (ptl) { | |
356515e7 KK |
1427 | u64 flags = 0, frame = 0; |
1428 | pmd_t pmd = *pmdp; | |
84c3fc4e | 1429 | struct page *page = NULL; |
0f8975ec | 1430 | |
b83d7e43 | 1431 | if (vma->vm_flags & VM_SOFTDIRTY) |
deb94544 | 1432 | flags |= PM_SOFT_DIRTY; |
d9104d1c | 1433 | |
356515e7 | 1434 | if (pmd_present(pmd)) { |
84c3fc4e | 1435 | page = pmd_page(pmd); |
77bb499b | 1436 | |
356515e7 | 1437 | flags |= PM_PRESENT; |
b83d7e43 HY |
1438 | if (pmd_soft_dirty(pmd)) |
1439 | flags |= PM_SOFT_DIRTY; | |
fb8e37f3 PX |
1440 | if (pmd_uffd_wp(pmd)) |
1441 | flags |= PM_UFFD_WP; | |
1c90308e KK |
1442 | if (pm->show_pfn) |
1443 | frame = pmd_pfn(pmd) + | |
1444 | ((addr & ~PMD_MASK) >> PAGE_SHIFT); | |
356515e7 | 1445 | } |
84c3fc4e ZY |
1446 | #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION |
1447 | else if (is_swap_pmd(pmd)) { | |
1448 | swp_entry_t entry = pmd_to_swp_entry(pmd); | |
ab6ecf24 | 1449 | unsigned long offset; |
84c3fc4e | 1450 | |
ab6ecf24 HY |
1451 | if (pm->show_pfn) { |
1452 | offset = swp_offset(entry) + | |
1453 | ((addr & ~PMD_MASK) >> PAGE_SHIFT); | |
1454 | frame = swp_type(entry) | | |
1455 | (offset << MAX_SWAPFILES_SHIFT); | |
1456 | } | |
84c3fc4e | 1457 | flags |= PM_SWAP; |
b83d7e43 HY |
1458 | if (pmd_swp_soft_dirty(pmd)) |
1459 | flags |= PM_SOFT_DIRTY; | |
fb8e37f3 PX |
1460 | if (pmd_swp_uffd_wp(pmd)) |
1461 | flags |= PM_UFFD_WP; | |
84c3fc4e | 1462 | VM_BUG_ON(!is_pmd_migration_entry(pmd)); |
af5cdaf8 | 1463 | page = pfn_swap_entry_to_page(entry); |
84c3fc4e ZY |
1464 | } |
1465 | #endif | |
1466 | ||
1467 | if (page && page_mapcount(page) == 1) | |
1468 | flags |= PM_MMAP_EXCLUSIVE; | |
356515e7 | 1469 | |
025c5b24 | 1470 | for (; addr != end; addr += PAGE_SIZE) { |
356515e7 | 1471 | pagemap_entry_t pme = make_pme(frame, flags); |
025c5b24 | 1472 | |
092b50ba | 1473 | err = add_to_pagemap(addr, &pme, pm); |
025c5b24 NH |
1474 | if (err) |
1475 | break; | |
ab6ecf24 HY |
1476 | if (pm->show_pfn) { |
1477 | if (flags & PM_PRESENT) | |
1478 | frame++; | |
1479 | else if (flags & PM_SWAP) | |
1480 | frame += (1 << MAX_SWAPFILES_SHIFT); | |
1481 | } | |
5aaabe83 | 1482 | } |
bf929152 | 1483 | spin_unlock(ptl); |
025c5b24 | 1484 | return err; |
5aaabe83 NH |
1485 | } |
1486 | ||
356515e7 | 1487 | if (pmd_trans_unstable(pmdp)) |
45f83cef | 1488 | return 0; |
356515e7 | 1489 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
81d0fa62 | 1490 | |
f995ece2 NH |
1491 | /* |
1492 | * We can assume that @vma always points to a valid one and @end never | |
1493 | * goes beyond vma->vm_end. | |
1494 | */ | |
356515e7 | 1495 | orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); |
f995ece2 NH |
1496 | for (; addr < end; pte++, addr += PAGE_SIZE) { |
1497 | pagemap_entry_t pme; | |
05fbf357 | 1498 | |
deb94544 | 1499 | pme = pte_to_pagemap_entry(pm, vma, addr, *pte); |
f995ece2 | 1500 | err = add_to_pagemap(addr, &pme, pm); |
05fbf357 | 1501 | if (err) |
81d0fa62 | 1502 | break; |
85863e47 | 1503 | } |
f995ece2 | 1504 | pte_unmap_unlock(orig_pte, ptl); |
85863e47 MM |
1505 | |
1506 | cond_resched(); | |
1507 | ||
1508 | return err; | |
1509 | } | |
1510 | ||
1a5cb814 | 1511 | #ifdef CONFIG_HUGETLB_PAGE |
116354d1 | 1512 | /* This function walks within one hugetlb entry in the single call */ |
356515e7 | 1513 | static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, |
116354d1 NH |
1514 | unsigned long addr, unsigned long end, |
1515 | struct mm_walk *walk) | |
5dc37642 | 1516 | { |
5dc37642 | 1517 | struct pagemapread *pm = walk->private; |
f995ece2 | 1518 | struct vm_area_struct *vma = walk->vma; |
356515e7 | 1519 | u64 flags = 0, frame = 0; |
5dc37642 | 1520 | int err = 0; |
356515e7 | 1521 | pte_t pte; |
5dc37642 | 1522 | |
f995ece2 | 1523 | if (vma->vm_flags & VM_SOFTDIRTY) |
deb94544 | 1524 | flags |= PM_SOFT_DIRTY; |
d9104d1c | 1525 | |
356515e7 KK |
1526 | pte = huge_ptep_get(ptep); |
1527 | if (pte_present(pte)) { | |
1528 | struct page *page = pte_page(pte); | |
1529 | ||
1530 | if (!PageAnon(page)) | |
1531 | flags |= PM_FILE; | |
1532 | ||
77bb499b KK |
1533 | if (page_mapcount(page) == 1) |
1534 | flags |= PM_MMAP_EXCLUSIVE; | |
1535 | ||
356515e7 | 1536 | flags |= PM_PRESENT; |
1c90308e KK |
1537 | if (pm->show_pfn) |
1538 | frame = pte_pfn(pte) + | |
1539 | ((addr & ~hmask) >> PAGE_SHIFT); | |
356515e7 KK |
1540 | } |
1541 | ||
5dc37642 | 1542 | for (; addr != end; addr += PAGE_SIZE) { |
356515e7 KK |
1543 | pagemap_entry_t pme = make_pme(frame, flags); |
1544 | ||
092b50ba | 1545 | err = add_to_pagemap(addr, &pme, pm); |
5dc37642 NH |
1546 | if (err) |
1547 | return err; | |
1c90308e | 1548 | if (pm->show_pfn && (flags & PM_PRESENT)) |
356515e7 | 1549 | frame++; |
5dc37642 NH |
1550 | } |
1551 | ||
1552 | cond_resched(); | |
1553 | ||
1554 | return err; | |
1555 | } | |
7b86ac33 CH |
1556 | #else |
1557 | #define pagemap_hugetlb_range NULL | |
1a5cb814 | 1558 | #endif /* HUGETLB_PAGE */ |
5dc37642 | 1559 | |
7b86ac33 CH |
1560 | static const struct mm_walk_ops pagemap_ops = { |
1561 | .pmd_entry = pagemap_pmd_range, | |
1562 | .pte_hole = pagemap_pte_hole, | |
1563 | .hugetlb_entry = pagemap_hugetlb_range, | |
1564 | }; | |
1565 | ||
85863e47 MM |
1566 | /* |
1567 | * /proc/pid/pagemap - an array mapping virtual pages to pfns | |
1568 | * | |
f16278c6 HR |
1569 | * For each page in the address space, this file contains one 64-bit entry |
1570 | * consisting of the following: | |
1571 | * | |
052fb0d6 | 1572 | * Bits 0-54 page frame number (PFN) if present |
f16278c6 | 1573 | * Bits 0-4 swap type if swapped |
052fb0d6 | 1574 | * Bits 5-54 swap offset if swapped |
1ad1335d | 1575 | * Bit 55 pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst) |
77bb499b KK |
1576 | * Bit 56 page exclusively mapped |
1577 | * Bits 57-60 zero | |
052fb0d6 | 1578 | * Bit 61 page is file-page or shared-anon |
f16278c6 HR |
1579 | * Bit 62 page swapped |
1580 | * Bit 63 page present | |
1581 | * | |
1582 | * If the page is not present but in swap, then the PFN contains an | |
1583 | * encoding of the swap file number and the page's offset into the | |
1584 | * swap. Unmapped pages return a null PFN. This allows determining | |
85863e47 MM |
1585 | * precisely which pages are mapped (or in swap) and comparing mapped |
1586 | * pages between processes. | |
1587 | * | |
1588 | * Efficient users of this interface will use /proc/pid/maps to | |
1589 | * determine which areas of memory are actually mapped and llseek to | |
1590 | * skip over unmapped regions. | |
1591 | */ | |
1592 | static ssize_t pagemap_read(struct file *file, char __user *buf, | |
1593 | size_t count, loff_t *ppos) | |
1594 | { | |
a06db751 | 1595 | struct mm_struct *mm = file->private_data; |
85863e47 | 1596 | struct pagemapread pm; |
5d7e0d2b AM |
1597 | unsigned long src; |
1598 | unsigned long svpfn; | |
1599 | unsigned long start_vaddr; | |
1600 | unsigned long end_vaddr; | |
a06db751 | 1601 | int ret = 0, copied = 0; |
85863e47 | 1602 | |
388f7934 | 1603 | if (!mm || !mmget_not_zero(mm)) |
85863e47 MM |
1604 | goto out; |
1605 | ||
85863e47 MM |
1606 | ret = -EINVAL; |
1607 | /* file position must be aligned */ | |
aae8679b | 1608 | if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) |
a06db751 | 1609 | goto out_mm; |
85863e47 MM |
1610 | |
1611 | ret = 0; | |
08161786 | 1612 | if (!count) |
a06db751 | 1613 | goto out_mm; |
08161786 | 1614 | |
1c90308e KK |
1615 | /* do not disclose physical addresses: attack vector */ |
1616 | pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN); | |
1617 | ||
8c829622 | 1618 | pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); |
6da2ec56 | 1619 | pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL); |
5d7e0d2b | 1620 | ret = -ENOMEM; |
d82ef020 | 1621 | if (!pm.buffer) |
a06db751 | 1622 | goto out_mm; |
85863e47 | 1623 | |
5d7e0d2b AM |
1624 | src = *ppos; |
1625 | svpfn = src / PM_ENTRY_BYTES; | |
a06db751 | 1626 | end_vaddr = mm->task_size; |
5d7e0d2b AM |
1627 | |
1628 | /* watch out for wraparound */ | |
40d6366e MC |
1629 | start_vaddr = end_vaddr; |
1630 | if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) | |
1631 | start_vaddr = untagged_addr(svpfn << PAGE_SHIFT); | |
1632 | ||
1633 | /* Ensure the address is inside the task */ | |
1634 | if (start_vaddr > mm->task_size) | |
5d7e0d2b AM |
1635 | start_vaddr = end_vaddr; |
1636 | ||
1637 | /* | |
1638 | * The odds are that this will stop walking way | |
1639 | * before end_vaddr, because the length of the | |
1640 | * user buffer is tracked in "pm", and the walk | |
1641 | * will stop when we hit the end of the buffer. | |
1642 | */ | |
d82ef020 KH |
1643 | ret = 0; |
1644 | while (count && (start_vaddr < end_vaddr)) { | |
1645 | int len; | |
1646 | unsigned long end; | |
1647 | ||
1648 | pm.pos = 0; | |
ea251c1d | 1649 | end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK; |
d82ef020 KH |
1650 | /* overflow ? */ |
1651 | if (end < start_vaddr || end > end_vaddr) | |
1652 | end = end_vaddr; | |
d8ed45c5 | 1653 | ret = mmap_read_lock_killable(mm); |
ad80b932 KK |
1654 | if (ret) |
1655 | goto out_free; | |
7b86ac33 | 1656 | ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm); |
d8ed45c5 | 1657 | mmap_read_unlock(mm); |
d82ef020 KH |
1658 | start_vaddr = end; |
1659 | ||
1660 | len = min(count, PM_ENTRY_BYTES * pm.pos); | |
309361e0 | 1661 | if (copy_to_user(buf, pm.buffer, len)) { |
d82ef020 | 1662 | ret = -EFAULT; |
a06db751 | 1663 | goto out_free; |
d82ef020 KH |
1664 | } |
1665 | copied += len; | |
1666 | buf += len; | |
1667 | count -= len; | |
85863e47 | 1668 | } |
d82ef020 KH |
1669 | *ppos += copied; |
1670 | if (!ret || ret == PM_END_OF_BUFFER) | |
1671 | ret = copied; | |
1672 | ||
98bc93e5 KM |
1673 | out_free: |
1674 | kfree(pm.buffer); | |
a06db751 KK |
1675 | out_mm: |
1676 | mmput(mm); | |
85863e47 MM |
1677 | out: |
1678 | return ret; | |
1679 | } | |
1680 | ||
541c237c PE |
1681 | static int pagemap_open(struct inode *inode, struct file *file) |
1682 | { | |
a06db751 KK |
1683 | struct mm_struct *mm; |
1684 | ||
a06db751 KK |
1685 | mm = proc_mem_open(inode, PTRACE_MODE_READ); |
1686 | if (IS_ERR(mm)) | |
1687 | return PTR_ERR(mm); | |
1688 | file->private_data = mm; | |
1689 | return 0; | |
1690 | } | |
1691 | ||
1692 | static int pagemap_release(struct inode *inode, struct file *file) | |
1693 | { | |
1694 | struct mm_struct *mm = file->private_data; | |
1695 | ||
1696 | if (mm) | |
1697 | mmdrop(mm); | |
541c237c PE |
1698 | return 0; |
1699 | } | |
1700 | ||
85863e47 MM |
1701 | const struct file_operations proc_pagemap_operations = { |
1702 | .llseek = mem_lseek, /* borrow this */ | |
1703 | .read = pagemap_read, | |
541c237c | 1704 | .open = pagemap_open, |
a06db751 | 1705 | .release = pagemap_release, |
85863e47 | 1706 | }; |
1e883281 | 1707 | #endif /* CONFIG_PROC_PAGE_MONITOR */ |
85863e47 | 1708 | |
6e21c8f1 | 1709 | #ifdef CONFIG_NUMA |
6e21c8f1 | 1710 | |
f69ff943 | 1711 | struct numa_maps { |
f69ff943 SW |
1712 | unsigned long pages; |
1713 | unsigned long anon; | |
1714 | unsigned long active; | |
1715 | unsigned long writeback; | |
1716 | unsigned long mapcount_max; | |
1717 | unsigned long dirty; | |
1718 | unsigned long swapcache; | |
1719 | unsigned long node[MAX_NUMNODES]; | |
1720 | }; | |
1721 | ||
5b52fc89 SW |
1722 | struct numa_maps_private { |
1723 | struct proc_maps_private proc_maps; | |
1724 | struct numa_maps md; | |
1725 | }; | |
1726 | ||
eb4866d0 DH |
1727 | static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, |
1728 | unsigned long nr_pages) | |
f69ff943 SW |
1729 | { |
1730 | int count = page_mapcount(page); | |
1731 | ||
eb4866d0 | 1732 | md->pages += nr_pages; |
f69ff943 | 1733 | if (pte_dirty || PageDirty(page)) |
eb4866d0 | 1734 | md->dirty += nr_pages; |
f69ff943 SW |
1735 | |
1736 | if (PageSwapCache(page)) | |
eb4866d0 | 1737 | md->swapcache += nr_pages; |
f69ff943 SW |
1738 | |
1739 | if (PageActive(page) || PageUnevictable(page)) | |
eb4866d0 | 1740 | md->active += nr_pages; |
f69ff943 SW |
1741 | |
1742 | if (PageWriteback(page)) | |
eb4866d0 | 1743 | md->writeback += nr_pages; |
f69ff943 SW |
1744 | |
1745 | if (PageAnon(page)) | |
eb4866d0 | 1746 | md->anon += nr_pages; |
f69ff943 SW |
1747 | |
1748 | if (count > md->mapcount_max) | |
1749 | md->mapcount_max = count; | |
1750 | ||
eb4866d0 | 1751 | md->node[page_to_nid(page)] += nr_pages; |
f69ff943 SW |
1752 | } |
1753 | ||
3200a8aa DH |
1754 | static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, |
1755 | unsigned long addr) | |
1756 | { | |
1757 | struct page *page; | |
1758 | int nid; | |
1759 | ||
1760 | if (!pte_present(pte)) | |
1761 | return NULL; | |
1762 | ||
1763 | page = vm_normal_page(vma, addr, pte); | |
1764 | if (!page) | |
1765 | return NULL; | |
1766 | ||
1767 | if (PageReserved(page)) | |
1768 | return NULL; | |
1769 | ||
1770 | nid = page_to_nid(page); | |
4ff1b2c2 | 1771 | if (!node_isset(nid, node_states[N_MEMORY])) |
3200a8aa DH |
1772 | return NULL; |
1773 | ||
1774 | return page; | |
1775 | } | |
1776 | ||
28093f9f GS |
1777 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1778 | static struct page *can_gather_numa_stats_pmd(pmd_t pmd, | |
1779 | struct vm_area_struct *vma, | |
1780 | unsigned long addr) | |
1781 | { | |
1782 | struct page *page; | |
1783 | int nid; | |
1784 | ||
1785 | if (!pmd_present(pmd)) | |
1786 | return NULL; | |
1787 | ||
1788 | page = vm_normal_page_pmd(vma, addr, pmd); | |
1789 | if (!page) | |
1790 | return NULL; | |
1791 | ||
1792 | if (PageReserved(page)) | |
1793 | return NULL; | |
1794 | ||
1795 | nid = page_to_nid(page); | |
1796 | if (!node_isset(nid, node_states[N_MEMORY])) | |
1797 | return NULL; | |
1798 | ||
1799 | return page; | |
1800 | } | |
1801 | #endif | |
1802 | ||
f69ff943 SW |
1803 | static int gather_pte_stats(pmd_t *pmd, unsigned long addr, |
1804 | unsigned long end, struct mm_walk *walk) | |
1805 | { | |
d85f4d6d NH |
1806 | struct numa_maps *md = walk->private; |
1807 | struct vm_area_struct *vma = walk->vma; | |
f69ff943 SW |
1808 | spinlock_t *ptl; |
1809 | pte_t *orig_pte; | |
1810 | pte_t *pte; | |
1811 | ||
28093f9f | 1812 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
b6ec57f4 KS |
1813 | ptl = pmd_trans_huge_lock(pmd, vma); |
1814 | if (ptl) { | |
025c5b24 NH |
1815 | struct page *page; |
1816 | ||
28093f9f | 1817 | page = can_gather_numa_stats_pmd(*pmd, vma, addr); |
025c5b24 | 1818 | if (page) |
28093f9f | 1819 | gather_stats(page, md, pmd_dirty(*pmd), |
025c5b24 | 1820 | HPAGE_PMD_SIZE/PAGE_SIZE); |
bf929152 | 1821 | spin_unlock(ptl); |
025c5b24 | 1822 | return 0; |
32ef4384 DH |
1823 | } |
1824 | ||
1a5a9906 AA |
1825 | if (pmd_trans_unstable(pmd)) |
1826 | return 0; | |
28093f9f | 1827 | #endif |
f69ff943 SW |
1828 | orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); |
1829 | do { | |
d85f4d6d | 1830 | struct page *page = can_gather_numa_stats(*pte, vma, addr); |
f69ff943 SW |
1831 | if (!page) |
1832 | continue; | |
eb4866d0 | 1833 | gather_stats(page, md, pte_dirty(*pte), 1); |
f69ff943 SW |
1834 | |
1835 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
1836 | pte_unmap_unlock(orig_pte, ptl); | |
a66c0410 | 1837 | cond_resched(); |
f69ff943 SW |
1838 | return 0; |
1839 | } | |
1840 | #ifdef CONFIG_HUGETLB_PAGE | |
632fd60f | 1841 | static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, |
f69ff943 SW |
1842 | unsigned long addr, unsigned long end, struct mm_walk *walk) |
1843 | { | |
5c2ff95e | 1844 | pte_t huge_pte = huge_ptep_get(pte); |
f69ff943 SW |
1845 | struct numa_maps *md; |
1846 | struct page *page; | |
1847 | ||
5c2ff95e | 1848 | if (!pte_present(huge_pte)) |
f69ff943 SW |
1849 | return 0; |
1850 | ||
5c2ff95e | 1851 | page = pte_page(huge_pte); |
f69ff943 SW |
1852 | if (!page) |
1853 | return 0; | |
1854 | ||
1855 | md = walk->private; | |
5c2ff95e | 1856 | gather_stats(page, md, pte_dirty(huge_pte), 1); |
f69ff943 SW |
1857 | return 0; |
1858 | } | |
1859 | ||
1860 | #else | |
632fd60f | 1861 | static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, |
f69ff943 SW |
1862 | unsigned long addr, unsigned long end, struct mm_walk *walk) |
1863 | { | |
1864 | return 0; | |
1865 | } | |
1866 | #endif | |
1867 | ||
7b86ac33 CH |
1868 | static const struct mm_walk_ops show_numa_ops = { |
1869 | .hugetlb_entry = gather_hugetlb_stats, | |
1870 | .pmd_entry = gather_pte_stats, | |
1871 | }; | |
1872 | ||
f69ff943 SW |
1873 | /* |
1874 | * Display pages allocated per node and memory policy via /proc. | |
1875 | */ | |
871305bb | 1876 | static int show_numa_map(struct seq_file *m, void *v) |
f69ff943 | 1877 | { |
5b52fc89 SW |
1878 | struct numa_maps_private *numa_priv = m->private; |
1879 | struct proc_maps_private *proc_priv = &numa_priv->proc_maps; | |
f69ff943 | 1880 | struct vm_area_struct *vma = v; |
5b52fc89 | 1881 | struct numa_maps *md = &numa_priv->md; |
f69ff943 SW |
1882 | struct file *file = vma->vm_file; |
1883 | struct mm_struct *mm = vma->vm_mm; | |
f69ff943 | 1884 | struct mempolicy *pol; |
948927ee DR |
1885 | char buffer[64]; |
1886 | int nid; | |
f69ff943 SW |
1887 | |
1888 | if (!mm) | |
1889 | return 0; | |
1890 | ||
5b52fc89 SW |
1891 | /* Ensure we start with an empty set of numa_maps statistics. */ |
1892 | memset(md, 0, sizeof(*md)); | |
f69ff943 | 1893 | |
498f2371 ON |
1894 | pol = __get_vma_policy(vma, vma->vm_start); |
1895 | if (pol) { | |
1896 | mpol_to_str(buffer, sizeof(buffer), pol); | |
1897 | mpol_cond_put(pol); | |
1898 | } else { | |
1899 | mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy); | |
1900 | } | |
f69ff943 SW |
1901 | |
1902 | seq_printf(m, "%08lx %s", vma->vm_start, buffer); | |
1903 | ||
1904 | if (file) { | |
17c2b4ee | 1905 | seq_puts(m, " file="); |
2726d566 | 1906 | seq_file_path(m, file, "\n\t= "); |
f69ff943 | 1907 | } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { |
17c2b4ee | 1908 | seq_puts(m, " heap"); |
1240ea0d | 1909 | } else if (is_stack(vma)) { |
65376df5 | 1910 | seq_puts(m, " stack"); |
f69ff943 SW |
1911 | } |
1912 | ||
fc360bd9 | 1913 | if (is_vm_hugetlb_page(vma)) |
17c2b4ee | 1914 | seq_puts(m, " huge"); |
fc360bd9 | 1915 | |
c1e8d7c6 | 1916 | /* mmap_lock is held by m_start */ |
7b86ac33 | 1917 | walk_page_vma(vma, &show_numa_ops, md); |
f69ff943 SW |
1918 | |
1919 | if (!md->pages) | |
1920 | goto out; | |
1921 | ||
1922 | if (md->anon) | |
1923 | seq_printf(m, " anon=%lu", md->anon); | |
1924 | ||
1925 | if (md->dirty) | |
1926 | seq_printf(m, " dirty=%lu", md->dirty); | |
1927 | ||
1928 | if (md->pages != md->anon && md->pages != md->dirty) | |
1929 | seq_printf(m, " mapped=%lu", md->pages); | |
1930 | ||
1931 | if (md->mapcount_max > 1) | |
1932 | seq_printf(m, " mapmax=%lu", md->mapcount_max); | |
1933 | ||
1934 | if (md->swapcache) | |
1935 | seq_printf(m, " swapcache=%lu", md->swapcache); | |
1936 | ||
1937 | if (md->active < md->pages && !is_vm_hugetlb_page(vma)) | |
1938 | seq_printf(m, " active=%lu", md->active); | |
1939 | ||
1940 | if (md->writeback) | |
1941 | seq_printf(m, " writeback=%lu", md->writeback); | |
1942 | ||
948927ee DR |
1943 | for_each_node_state(nid, N_MEMORY) |
1944 | if (md->node[nid]) | |
1945 | seq_printf(m, " N%d=%lu", nid, md->node[nid]); | |
198d1597 RA |
1946 | |
1947 | seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10); | |
f69ff943 SW |
1948 | out: |
1949 | seq_putc(m, '\n'); | |
f69ff943 SW |
1950 | return 0; |
1951 | } | |
5b52fc89 | 1952 | |
03a44825 | 1953 | static const struct seq_operations proc_pid_numa_maps_op = { |
b7643757 SP |
1954 | .start = m_start, |
1955 | .next = m_next, | |
1956 | .stop = m_stop, | |
871305bb | 1957 | .show = show_numa_map, |
6e21c8f1 | 1958 | }; |
662795de | 1959 | |
b7643757 SP |
1960 | static int pid_numa_maps_open(struct inode *inode, struct file *file) |
1961 | { | |
871305bb VB |
1962 | return proc_maps_open(inode, file, &proc_pid_numa_maps_op, |
1963 | sizeof(struct numa_maps_private)); | |
b7643757 SP |
1964 | } |
1965 | ||
1966 | const struct file_operations proc_pid_numa_maps_operations = { | |
1967 | .open = pid_numa_maps_open, | |
1968 | .read = seq_read, | |
1969 | .llseek = seq_lseek, | |
29a40ace | 1970 | .release = proc_map_release, |
b7643757 SP |
1971 | }; |
1972 | ||
f69ff943 | 1973 | #endif /* CONFIG_NUMA */ |