1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/file.h>
5 #include <linux/fdtable.h>
6 #include <linux/fs_struct.h>
7 #include <linux/mount.h>
8 #include <linux/ptrace.h>
9 #include <linux/slab.h>
10 #include <linux/seq_file.h>
11 #include <linux/sched/mm.h>
16 * Logic: we've got two memory sums for each process, "shared", and
17 * "non-shared". Shared memory may get counted more than once, for
18 * each process that owns it. Non-shared memory is counted
21 void task_mem(struct seq_file *m, struct mm_struct *mm)
23 VMA_ITERATOR(vmi, mm, 0);
24 struct vm_area_struct *vma;
25 struct vm_region *region;
26 unsigned long bytes = 0, sbytes = 0, slack = 0, size;
29 for_each_vma(vmi, vma) {
30 bytes += kobjsize(vma);
32 region = vma->vm_region;
34 size = kobjsize(region);
35 size += region->vm_end - region->vm_start;
37 size = vma->vm_end - vma->vm_start;
40 if (atomic_read(&mm->mm_count) > 1 ||
41 is_nommu_shared_mapping(vma->vm_flags)) {
46 slack = region->vm_end - vma->vm_end;
50 if (atomic_read(&mm->mm_count) > 1)
51 sbytes += kobjsize(mm);
53 bytes += kobjsize(mm);
55 if (current->fs && current->fs->users > 1)
56 sbytes += kobjsize(current->fs);
58 bytes += kobjsize(current->fs);
60 if (current->files && atomic_read(¤t->files->count) > 1)
61 sbytes += kobjsize(current->files);
63 bytes += kobjsize(current->files);
65 if (current->sighand && refcount_read(¤t->sighand->count) > 1)
66 sbytes += kobjsize(current->sighand);
68 bytes += kobjsize(current->sighand);
70 bytes += kobjsize(current); /* includes kernel stack */
74 "Slack:\t%8lu bytes\n"
75 "Shared:\t%8lu bytes\n",
76 bytes, slack, sbytes);
81 unsigned long task_vsize(struct mm_struct *mm)
83 VMA_ITERATOR(vmi, mm, 0);
84 struct vm_area_struct *vma;
85 unsigned long vsize = 0;
88 for_each_vma(vmi, vma)
89 vsize += vma->vm_end - vma->vm_start;
94 unsigned long task_statm(struct mm_struct *mm,
95 unsigned long *shared, unsigned long *text,
96 unsigned long *data, unsigned long *resident)
98 VMA_ITERATOR(vmi, mm, 0);
99 struct vm_area_struct *vma;
100 struct vm_region *region;
101 unsigned long size = kobjsize(mm);
104 for_each_vma(vmi, vma) {
105 size += kobjsize(vma);
106 region = vma->vm_region;
108 size += kobjsize(region);
109 size += region->vm_end - region->vm_start;
113 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
115 *data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
117 mmap_read_unlock(mm);
119 size += *text + *data;
124 static int is_stack(struct vm_area_struct *vma)
126 struct mm_struct *mm = vma->vm_mm;
129 * We make no effort to guess what a given thread considers to be
130 * its "stack". It's not even well-defined for programs written
133 return vma->vm_start <= mm->start_stack &&
134 vma->vm_end >= mm->start_stack;
138 * display a single VMA to a sequenced file
140 static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
142 struct mm_struct *mm = vma->vm_mm;
143 unsigned long ino = 0;
147 unsigned long long pgoff = 0;
149 flags = vma->vm_flags;
153 struct inode *inode = file_inode(vma->vm_file);
154 dev = inode->i_sb->s_dev;
156 pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
159 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
161 "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
164 flags & VM_READ ? 'r' : '-',
165 flags & VM_WRITE ? 'w' : '-',
166 flags & VM_EXEC ? 'x' : '-',
167 flags & VM_MAYSHARE ? flags & VM_SHARED ? 'S' : 's' : 'p',
169 MAJOR(dev), MINOR(dev), ino);
173 seq_file_path(m, file, "");
174 } else if (mm && is_stack(vma)) {
176 seq_puts(m, "[stack]");
184 * display mapping lines for a particular process's /proc/pid/maps
186 static int show_map(struct seq_file *m, void *_p)
188 return nommu_vma_show(m, _p);
191 static void *m_start(struct seq_file *m, loff_t *pos)
193 struct proc_maps_private *priv = m->private;
194 struct mm_struct *mm;
195 struct vm_area_struct *vma;
196 unsigned long addr = *pos;
198 /* See m_next(). Zero at the start or after lseek. */
202 /* pin the task and mm whilst we play with them */
203 priv->task = get_proc_task(priv->inode);
205 return ERR_PTR(-ESRCH);
208 if (!mm || !mmget_not_zero(mm))
211 if (mmap_read_lock_killable(mm)) {
213 return ERR_PTR(-EINTR);
216 /* start the next element from addr */
217 vma = find_vma(mm, addr);
221 mmap_read_unlock(mm);
226 static void m_stop(struct seq_file *m, void *_vml)
228 struct proc_maps_private *priv = m->private;
230 if (!IS_ERR_OR_NULL(_vml)) {
231 mmap_read_unlock(priv->mm);
235 put_task_struct(priv->task);
240 static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
242 struct vm_area_struct *vma = _p;
245 return find_vma(vma->vm_mm, vma->vm_end);
248 static const struct seq_operations proc_pid_maps_ops = {
255 static int maps_open(struct inode *inode, struct file *file,
256 const struct seq_operations *ops)
258 struct proc_maps_private *priv;
260 priv = __seq_open_private(file, ops, sizeof(*priv));
265 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
266 if (IS_ERR(priv->mm)) {
267 int err = PTR_ERR(priv->mm);
269 seq_release_private(inode, file);
277 static int map_release(struct inode *inode, struct file *file)
279 struct seq_file *seq = file->private_data;
280 struct proc_maps_private *priv = seq->private;
285 return seq_release_private(inode, file);
288 static int pid_maps_open(struct inode *inode, struct file *file)
290 return maps_open(inode, file, &proc_pid_maps_ops);
293 const struct file_operations proc_pid_maps_operations = {
294 .open = pid_maps_open,
297 .release = map_release,