2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
5 #include <linux/highmem.h>
6 #include <linux/ptrace.h>
7 #include <linux/pagemap.h>
8 #include <linux/mempolicy.h>
11 #include <asm/uaccess.h>
12 #include <asm/tlbflush.h>
15 char *task_mem(struct mm_struct *mm, char *buffer)
17 unsigned long data, text, lib;
18 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
21 * Note: to minimize their overhead, mm maintains hiwater_vm and
22 * hiwater_rss only when about to *lower* total_vm or rss. Any
23 * collector of these hiwater stats must therefore get total_vm
24 * and rss too, which will usually be the higher. Barriers? not
25 * worth the effort, such snapshots can always be inconsistent.
27 hiwater_vm = total_vm = mm->total_vm;
28 if (hiwater_vm < mm->hiwater_vm)
29 hiwater_vm = mm->hiwater_vm;
30 hiwater_rss = total_rss = get_mm_rss(mm);
31 if (hiwater_rss < mm->hiwater_rss)
32 hiwater_rss = mm->hiwater_rss;
34 data = mm->total_vm - mm->shared_vm - mm->stack_vm;
35 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
36 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
37 buffer += sprintf(buffer,
48 hiwater_vm << (PAGE_SHIFT-10),
49 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
50 mm->locked_vm << (PAGE_SHIFT-10),
51 hiwater_rss << (PAGE_SHIFT-10),
52 total_rss << (PAGE_SHIFT-10),
53 data << (PAGE_SHIFT-10),
54 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
55 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
59 unsigned long task_vsize(struct mm_struct *mm)
61 return PAGE_SIZE * mm->total_vm;
64 int task_statm(struct mm_struct *mm, int *shared, int *text,
65 int *data, int *resident)
67 *shared = get_mm_counter(mm, file_rss);
68 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
70 *data = mm->total_vm - mm->shared_vm;
71 *resident = *shared + get_mm_counter(mm, anon_rss);
75 int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
77 struct vm_area_struct * vma;
79 struct task_struct *task = get_proc_task(inode);
80 struct mm_struct * mm = NULL;
83 mm = get_task_mm(task);
84 put_task_struct(task);
88 down_read(&mm->mmap_sem);
92 if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
98 *mnt = mntget(vma->vm_file->f_path.mnt);
99 *dentry = dget(vma->vm_file->f_path.dentry);
103 up_read(&mm->mmap_sem);
109 static void pad_len_spaces(struct seq_file *m, int len)
111 len = 25 + sizeof(void*) * 6 - len;
114 seq_printf(m, "%*c", len, ' ');
117 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
119 if (vma && vma != priv->tail_vma) {
120 struct mm_struct *mm = vma->vm_mm;
121 up_read(&mm->mmap_sem);
126 static void *m_start(struct seq_file *m, loff_t *pos)
128 struct proc_maps_private *priv = m->private;
129 unsigned long last_addr = m->version;
130 struct mm_struct *mm;
131 struct vm_area_struct *vma, *tail_vma = NULL;
134 /* Clear the per syscall fields in priv */
136 priv->tail_vma = NULL;
139 * We remember last_addr rather than next_addr to hit with
140 * mmap_cache most of the time. We have zero last_addr at
141 * the beginning and also after lseek. We will have -1 last_addr
142 * after the end of the vmas.
145 if (last_addr == -1UL)
148 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
152 mm = mm_for_maps(priv->task);
156 tail_vma = get_gate_vma(priv->task);
157 priv->tail_vma = tail_vma;
159 /* Start with last addr hint */
160 vma = find_vma(mm, last_addr);
161 if (last_addr && vma) {
167 * Check the vma index is within the range and do
168 * sequential scan until m_index.
171 if ((unsigned long)l < mm->map_count) {
178 if (l != mm->map_count)
179 tail_vma = NULL; /* After gate vma */
185 /* End of vmas has been reached */
186 m->version = (tail_vma != NULL)? 0: -1UL;
187 up_read(&mm->mmap_sem);
192 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
194 struct proc_maps_private *priv = m->private;
195 struct vm_area_struct *vma = v;
196 struct vm_area_struct *tail_vma = priv->tail_vma;
199 if (vma && (vma != tail_vma) && vma->vm_next)
202 return (vma != tail_vma)? tail_vma: NULL;
205 static void m_stop(struct seq_file *m, void *v)
207 struct proc_maps_private *priv = m->private;
208 struct vm_area_struct *vma = v;
212 put_task_struct(priv->task);
215 static int do_maps_open(struct inode *inode, struct file *file,
216 struct seq_operations *ops)
218 struct proc_maps_private *priv;
220 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
222 priv->pid = proc_pid(inode);
223 ret = seq_open(file, ops);
225 struct seq_file *m = file->private_data;
234 static int show_map(struct seq_file *m, void *v)
236 struct proc_maps_private *priv = m->private;
237 struct task_struct *task = priv->task;
238 struct vm_area_struct *vma = v;
239 struct mm_struct *mm = vma->vm_mm;
240 struct file *file = vma->vm_file;
241 int flags = vma->vm_flags;
242 unsigned long ino = 0;
246 if (maps_protect && !ptrace_may_attach(task))
250 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
251 dev = inode->i_sb->s_dev;
255 seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
258 flags & VM_READ ? 'r' : '-',
259 flags & VM_WRITE ? 'w' : '-',
260 flags & VM_EXEC ? 'x' : '-',
261 flags & VM_MAYSHARE ? 's' : 'p',
262 vma->vm_pgoff << PAGE_SHIFT,
263 MAJOR(dev), MINOR(dev), ino, &len);
266 * Print the dentry name for named mappings, and a
267 * special [heap] marker for the heap:
270 pad_len_spaces(m, len);
271 seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n");
273 const char *name = arch_vma_name(vma);
276 if (vma->vm_start <= mm->start_brk &&
277 vma->vm_end >= mm->brk) {
279 } else if (vma->vm_start <= mm->start_stack &&
280 vma->vm_end >= mm->start_stack) {
288 pad_len_spaces(m, len);
294 if (m->count < m->size) /* vma is copied successfully */
295 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
299 static struct seq_operations proc_pid_maps_op = {
306 static int maps_open(struct inode *inode, struct file *file)
308 return do_maps_open(inode, file, &proc_pid_maps_op);
311 const struct file_operations proc_maps_operations = {
315 .release = seq_release_private,
319 * Proportional Set Size(PSS): my share of RSS.
321 * PSS of a process is the count of pages it has in memory, where each
322 * page is divided by the number of processes sharing it. So if a
323 * process has 1000 pages all to itself, and 1000 shared with one other
324 * process, its PSS will be 1500.
326 * To keep (accumulated) division errors low, we adopt a 64bit
327 * fixed-point pss counter to minimize division errors. So (pss >>
328 * PSS_SHIFT) would be the real byte count.
330 * A shift of 12 before division means (assuming 4K page size):
331 * - 1M 3-user-pages add up to 8KB errors;
332 * - supports mapcount up to 2^24, or 16M;
333 * - supports PSS up to 2^52 bytes, or 4PB.
337 struct mem_size_stats
339 struct vm_area_struct *vma;
340 unsigned long resident;
341 unsigned long shared_clean;
342 unsigned long shared_dirty;
343 unsigned long private_clean;
344 unsigned long private_dirty;
345 unsigned long referenced;
349 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
352 struct mem_size_stats *mss = private;
353 struct vm_area_struct *vma = mss->vma;
359 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
360 for (; addr != end; pte++, addr += PAGE_SIZE) {
362 if (!pte_present(ptent))
365 mss->resident += PAGE_SIZE;
367 page = vm_normal_page(vma, addr, ptent);
371 /* Accumulate the size in pages that have been accessed. */
372 if (pte_young(ptent) || PageReferenced(page))
373 mss->referenced += PAGE_SIZE;
374 mapcount = page_mapcount(page);
376 if (pte_dirty(ptent))
377 mss->shared_dirty += PAGE_SIZE;
379 mss->shared_clean += PAGE_SIZE;
380 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
382 if (pte_dirty(ptent))
383 mss->private_dirty += PAGE_SIZE;
385 mss->private_clean += PAGE_SIZE;
386 mss->pss += (PAGE_SIZE << PSS_SHIFT);
389 pte_unmap_unlock(pte - 1, ptl);
394 static struct mm_walk smaps_walk = { .pmd_entry = smaps_pte_range };
396 static int show_smap(struct seq_file *m, void *v)
398 struct vm_area_struct *vma = v;
399 struct mem_size_stats mss;
402 memset(&mss, 0, sizeof mss);
404 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
405 walk_page_range(vma->vm_mm, vma->vm_start, vma->vm_end,
408 ret = show_map(m, v);
416 "Shared_Clean: %8lu kB\n"
417 "Shared_Dirty: %8lu kB\n"
418 "Private_Clean: %8lu kB\n"
419 "Private_Dirty: %8lu kB\n"
420 "Referenced: %8lu kB\n",
421 (vma->vm_end - vma->vm_start) >> 10,
423 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
424 mss.shared_clean >> 10,
425 mss.shared_dirty >> 10,
426 mss.private_clean >> 10,
427 mss.private_dirty >> 10,
428 mss.referenced >> 10);
433 static struct seq_operations proc_pid_smaps_op = {
440 static int smaps_open(struct inode *inode, struct file *file)
442 return do_maps_open(inode, file, &proc_pid_smaps_op);
445 const struct file_operations proc_smaps_operations = {
449 .release = seq_release_private,
452 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
453 unsigned long end, void *private)
455 struct vm_area_struct *vma = private;
460 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
461 for (; addr != end; pte++, addr += PAGE_SIZE) {
463 if (!pte_present(ptent))
466 page = vm_normal_page(vma, addr, ptent);
470 /* Clear accessed and referenced bits. */
471 ptep_test_and_clear_young(vma, addr, pte);
472 ClearPageReferenced(page);
474 pte_unmap_unlock(pte - 1, ptl);
479 static struct mm_walk clear_refs_walk = { .pmd_entry = clear_refs_pte_range };
481 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
482 size_t count, loff_t *ppos)
484 struct task_struct *task;
485 char buffer[PROC_NUMBUF], *end;
486 struct mm_struct *mm;
487 struct vm_area_struct *vma;
489 memset(buffer, 0, sizeof(buffer));
490 if (count > sizeof(buffer) - 1)
491 count = sizeof(buffer) - 1;
492 if (copy_from_user(buffer, buf, count))
494 if (!simple_strtol(buffer, &end, 0))
498 task = get_proc_task(file->f_path.dentry->d_inode);
501 mm = get_task_mm(task);
503 down_read(&mm->mmap_sem);
504 for (vma = mm->mmap; vma; vma = vma->vm_next)
505 if (!is_vm_hugetlb_page(vma))
506 walk_page_range(mm, vma->vm_start, vma->vm_end,
507 &clear_refs_walk, vma);
509 up_read(&mm->mmap_sem);
512 put_task_struct(task);
513 if (end - buffer == 0)
518 const struct file_operations proc_clear_refs_operations = {
519 .write = clear_refs_write,
523 extern int show_numa_map(struct seq_file *m, void *v);
525 static int show_numa_map_checked(struct seq_file *m, void *v)
527 struct proc_maps_private *priv = m->private;
528 struct task_struct *task = priv->task;
530 if (maps_protect && !ptrace_may_attach(task))
533 return show_numa_map(m, v);
536 static struct seq_operations proc_pid_numa_maps_op = {
540 .show = show_numa_map_checked
543 static int numa_maps_open(struct inode *inode, struct file *file)
545 return do_maps_open(inode, file, &proc_pid_numa_maps_op);
548 const struct file_operations proc_numa_maps_operations = {
549 .open = numa_maps_open,
552 .release = seq_release_private,