Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #include <linux/mm.h> |
2 | #include <linux/hugetlb.h> | |
3 | #include <linux/mount.h> | |
4 | #include <linux/seq_file.h> | |
e070ad49 | 5 | #include <linux/highmem.h> |
6e21c8f1 CL |
6 | #include <linux/pagemap.h> |
7 | #include <linux/mempolicy.h> | |
e070ad49 | 8 | |
1da177e4 LT |
9 | #include <asm/elf.h> |
10 | #include <asm/uaccess.h> | |
e070ad49 | 11 | #include <asm/tlbflush.h> |
1da177e4 LT |
12 | #include "internal.h" |
13 | ||
14 | char *task_mem(struct mm_struct *mm, char *buffer) | |
15 | { | |
16 | unsigned long data, text, lib; | |
17 | ||
18 | data = mm->total_vm - mm->shared_vm - mm->stack_vm; | |
19 | text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; | |
20 | lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; | |
21 | buffer += sprintf(buffer, | |
22 | "VmSize:\t%8lu kB\n" | |
23 | "VmLck:\t%8lu kB\n" | |
24 | "VmRSS:\t%8lu kB\n" | |
25 | "VmData:\t%8lu kB\n" | |
26 | "VmStk:\t%8lu kB\n" | |
27 | "VmExe:\t%8lu kB\n" | |
28 | "VmLib:\t%8lu kB\n" | |
29 | "VmPTE:\t%8lu kB\n", | |
30 | (mm->total_vm - mm->reserved_vm) << (PAGE_SHIFT-10), | |
31 | mm->locked_vm << (PAGE_SHIFT-10), | |
4294621f | 32 | get_mm_rss(mm) << (PAGE_SHIFT-10), |
1da177e4 LT |
33 | data << (PAGE_SHIFT-10), |
34 | mm->stack_vm << (PAGE_SHIFT-10), text, lib, | |
35 | (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10); | |
36 | return buffer; | |
37 | } | |
38 | ||
39 | unsigned long task_vsize(struct mm_struct *mm) | |
40 | { | |
41 | return PAGE_SIZE * mm->total_vm; | |
42 | } | |
43 | ||
44 | int task_statm(struct mm_struct *mm, int *shared, int *text, | |
45 | int *data, int *resident) | |
46 | { | |
4294621f | 47 | *shared = get_mm_counter(mm, file_rss); |
1da177e4 LT |
48 | *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) |
49 | >> PAGE_SHIFT; | |
50 | *data = mm->total_vm - mm->shared_vm; | |
4294621f | 51 | *resident = *shared + get_mm_counter(mm, anon_rss); |
1da177e4 LT |
52 | return mm->total_vm; |
53 | } | |
54 | ||
55 | int proc_exe_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) | |
56 | { | |
57 | struct vm_area_struct * vma; | |
58 | int result = -ENOENT; | |
59 | struct task_struct *task = proc_task(inode); | |
60 | struct mm_struct * mm = get_task_mm(task); | |
61 | ||
62 | if (!mm) | |
63 | goto out; | |
64 | down_read(&mm->mmap_sem); | |
65 | ||
66 | vma = mm->mmap; | |
67 | while (vma) { | |
68 | if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file) | |
69 | break; | |
70 | vma = vma->vm_next; | |
71 | } | |
72 | ||
73 | if (vma) { | |
74 | *mnt = mntget(vma->vm_file->f_vfsmnt); | |
75 | *dentry = dget(vma->vm_file->f_dentry); | |
76 | result = 0; | |
77 | } | |
78 | ||
79 | up_read(&mm->mmap_sem); | |
80 | mmput(mm); | |
81 | out: | |
82 | return result; | |
83 | } | |
84 | ||
85 | static void pad_len_spaces(struct seq_file *m, int len) | |
86 | { | |
87 | len = 25 + sizeof(void*) * 6 - len; | |
88 | if (len < 1) | |
89 | len = 1; | |
90 | seq_printf(m, "%*c", len, ' '); | |
91 | } | |
92 | ||
e070ad49 ML |
93 | struct mem_size_stats |
94 | { | |
95 | unsigned long resident; | |
96 | unsigned long shared_clean; | |
97 | unsigned long shared_dirty; | |
98 | unsigned long private_clean; | |
99 | unsigned long private_dirty; | |
100 | }; | |
101 | ||
102 | static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss) | |
1da177e4 LT |
103 | { |
104 | struct task_struct *task = m->private; | |
e070ad49 ML |
105 | struct vm_area_struct *vma = v; |
106 | struct mm_struct *mm = vma->vm_mm; | |
107 | struct file *file = vma->vm_file; | |
108 | int flags = vma->vm_flags; | |
1da177e4 LT |
109 | unsigned long ino = 0; |
110 | dev_t dev = 0; | |
111 | int len; | |
112 | ||
113 | if (file) { | |
e070ad49 | 114 | struct inode *inode = vma->vm_file->f_dentry->d_inode; |
1da177e4 LT |
115 | dev = inode->i_sb->s_dev; |
116 | ino = inode->i_ino; | |
117 | } | |
118 | ||
119 | seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n", | |
e070ad49 ML |
120 | vma->vm_start, |
121 | vma->vm_end, | |
1da177e4 LT |
122 | flags & VM_READ ? 'r' : '-', |
123 | flags & VM_WRITE ? 'w' : '-', | |
124 | flags & VM_EXEC ? 'x' : '-', | |
125 | flags & VM_MAYSHARE ? 's' : 'p', | |
e070ad49 | 126 | vma->vm_pgoff << PAGE_SHIFT, |
1da177e4 LT |
127 | MAJOR(dev), MINOR(dev), ino, &len); |
128 | ||
129 | /* | |
130 | * Print the dentry name for named mappings, and a | |
131 | * special [heap] marker for the heap: | |
132 | */ | |
e070ad49 | 133 | if (file) { |
1da177e4 | 134 | pad_len_spaces(m, len); |
e070ad49 | 135 | seq_path(m, file->f_vfsmnt, file->f_dentry, "\n"); |
1da177e4 LT |
136 | } else { |
137 | if (mm) { | |
e070ad49 ML |
138 | if (vma->vm_start <= mm->start_brk && |
139 | vma->vm_end >= mm->brk) { | |
1da177e4 LT |
140 | pad_len_spaces(m, len); |
141 | seq_puts(m, "[heap]"); | |
142 | } else { | |
e070ad49 ML |
143 | if (vma->vm_start <= mm->start_stack && |
144 | vma->vm_end >= mm->start_stack) { | |
1da177e4 LT |
145 | |
146 | pad_len_spaces(m, len); | |
147 | seq_puts(m, "[stack]"); | |
148 | } | |
149 | } | |
150 | } else { | |
151 | pad_len_spaces(m, len); | |
152 | seq_puts(m, "[vdso]"); | |
153 | } | |
154 | } | |
155 | seq_putc(m, '\n'); | |
e070ad49 ML |
156 | |
157 | if (mss) | |
158 | seq_printf(m, | |
159 | "Size: %8lu kB\n" | |
160 | "Rss: %8lu kB\n" | |
161 | "Shared_Clean: %8lu kB\n" | |
162 | "Shared_Dirty: %8lu kB\n" | |
163 | "Private_Clean: %8lu kB\n" | |
164 | "Private_Dirty: %8lu kB\n", | |
165 | (vma->vm_end - vma->vm_start) >> 10, | |
166 | mss->resident >> 10, | |
167 | mss->shared_clean >> 10, | |
168 | mss->shared_dirty >> 10, | |
169 | mss->private_clean >> 10, | |
170 | mss->private_dirty >> 10); | |
171 | ||
172 | if (m->count < m->size) /* vma is copied successfully */ | |
173 | m->version = (vma != get_gate_vma(task))? vma->vm_start: 0; | |
1da177e4 LT |
174 | return 0; |
175 | } | |
176 | ||
e070ad49 ML |
177 | static int show_map(struct seq_file *m, void *v) |
178 | { | |
179 | return show_map_internal(m, v, 0); | |
180 | } | |
181 | ||
182 | static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |
183 | unsigned long addr, unsigned long end, | |
184 | struct mem_size_stats *mss) | |
185 | { | |
186 | pte_t *pte, ptent; | |
187 | unsigned long pfn; | |
188 | struct page *page; | |
189 | ||
190 | pte = pte_offset_map(pmd, addr); | |
191 | do { | |
192 | ptent = *pte; | |
193 | if (pte_none(ptent) || !pte_present(ptent)) | |
194 | continue; | |
195 | ||
196 | mss->resident += PAGE_SIZE; | |
197 | pfn = pte_pfn(ptent); | |
198 | if (!pfn_valid(pfn)) | |
199 | continue; | |
200 | ||
201 | page = pfn_to_page(pfn); | |
202 | if (page_count(page) >= 2) { | |
203 | if (pte_dirty(ptent)) | |
204 | mss->shared_dirty += PAGE_SIZE; | |
205 | else | |
206 | mss->shared_clean += PAGE_SIZE; | |
207 | } else { | |
208 | if (pte_dirty(ptent)) | |
209 | mss->private_dirty += PAGE_SIZE; | |
210 | else | |
211 | mss->private_clean += PAGE_SIZE; | |
212 | } | |
213 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
214 | pte_unmap(pte - 1); | |
215 | cond_resched_lock(&vma->vm_mm->page_table_lock); | |
216 | } | |
217 | ||
218 | static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud, | |
219 | unsigned long addr, unsigned long end, | |
220 | struct mem_size_stats *mss) | |
221 | { | |
222 | pmd_t *pmd; | |
223 | unsigned long next; | |
224 | ||
225 | pmd = pmd_offset(pud, addr); | |
226 | do { | |
227 | next = pmd_addr_end(addr, end); | |
228 | if (pmd_none_or_clear_bad(pmd)) | |
229 | continue; | |
230 | smaps_pte_range(vma, pmd, addr, next, mss); | |
231 | } while (pmd++, addr = next, addr != end); | |
232 | } | |
233 | ||
234 | static inline void smaps_pud_range(struct vm_area_struct *vma, pgd_t *pgd, | |
235 | unsigned long addr, unsigned long end, | |
236 | struct mem_size_stats *mss) | |
237 | { | |
238 | pud_t *pud; | |
239 | unsigned long next; | |
240 | ||
241 | pud = pud_offset(pgd, addr); | |
242 | do { | |
243 | next = pud_addr_end(addr, end); | |
244 | if (pud_none_or_clear_bad(pud)) | |
245 | continue; | |
246 | smaps_pmd_range(vma, pud, addr, next, mss); | |
247 | } while (pud++, addr = next, addr != end); | |
248 | } | |
249 | ||
250 | static inline void smaps_pgd_range(struct vm_area_struct *vma, | |
251 | unsigned long addr, unsigned long end, | |
252 | struct mem_size_stats *mss) | |
253 | { | |
254 | pgd_t *pgd; | |
255 | unsigned long next; | |
256 | ||
257 | pgd = pgd_offset(vma->vm_mm, addr); | |
258 | do { | |
259 | next = pgd_addr_end(addr, end); | |
260 | if (pgd_none_or_clear_bad(pgd)) | |
261 | continue; | |
262 | smaps_pud_range(vma, pgd, addr, next, mss); | |
263 | } while (pgd++, addr = next, addr != end); | |
264 | } | |
265 | ||
266 | static int show_smap(struct seq_file *m, void *v) | |
267 | { | |
268 | struct vm_area_struct *vma = v; | |
269 | struct mm_struct *mm = vma->vm_mm; | |
270 | struct mem_size_stats mss; | |
271 | ||
272 | memset(&mss, 0, sizeof mss); | |
273 | ||
274 | if (mm) { | |
275 | spin_lock(&mm->page_table_lock); | |
276 | smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss); | |
277 | spin_unlock(&mm->page_table_lock); | |
278 | } | |
279 | ||
280 | return show_map_internal(m, v, &mss); | |
281 | } | |
282 | ||
1da177e4 LT |
283 | static void *m_start(struct seq_file *m, loff_t *pos) |
284 | { | |
285 | struct task_struct *task = m->private; | |
286 | unsigned long last_addr = m->version; | |
287 | struct mm_struct *mm; | |
e070ad49 | 288 | struct vm_area_struct *vma, *tail_vma; |
1da177e4 LT |
289 | loff_t l = *pos; |
290 | ||
291 | /* | |
292 | * We remember last_addr rather than next_addr to hit with | |
293 | * mmap_cache most of the time. We have zero last_addr at | |
e070ad49 ML |
294 | * the beginning and also after lseek. We will have -1 last_addr |
295 | * after the end of the vmas. | |
1da177e4 LT |
296 | */ |
297 | ||
298 | if (last_addr == -1UL) | |
299 | return NULL; | |
300 | ||
301 | mm = get_task_mm(task); | |
302 | if (!mm) | |
303 | return NULL; | |
304 | ||
e070ad49 | 305 | tail_vma = get_gate_vma(task); |
1da177e4 LT |
306 | down_read(&mm->mmap_sem); |
307 | ||
308 | /* Start with last addr hint */ | |
e070ad49 ML |
309 | if (last_addr && (vma = find_vma(mm, last_addr))) { |
310 | vma = vma->vm_next; | |
1da177e4 LT |
311 | goto out; |
312 | } | |
313 | ||
314 | /* | |
e070ad49 | 315 | * Check the vma index is within the range and do |
1da177e4 LT |
316 | * sequential scan until m_index. |
317 | */ | |
e070ad49 | 318 | vma = NULL; |
1da177e4 | 319 | if ((unsigned long)l < mm->map_count) { |
e070ad49 ML |
320 | vma = mm->mmap; |
321 | while (l-- && vma) | |
322 | vma = vma->vm_next; | |
1da177e4 LT |
323 | goto out; |
324 | } | |
325 | ||
326 | if (l != mm->map_count) | |
e070ad49 | 327 | tail_vma = NULL; /* After gate vma */ |
1da177e4 LT |
328 | |
329 | out: | |
e070ad49 ML |
330 | if (vma) |
331 | return vma; | |
1da177e4 | 332 | |
e070ad49 ML |
333 | /* End of vmas has been reached */ |
334 | m->version = (tail_vma != NULL)? 0: -1UL; | |
1da177e4 LT |
335 | up_read(&mm->mmap_sem); |
336 | mmput(mm); | |
e070ad49 | 337 | return tail_vma; |
1da177e4 LT |
338 | } |
339 | ||
340 | static void m_stop(struct seq_file *m, void *v) | |
341 | { | |
342 | struct task_struct *task = m->private; | |
e070ad49 ML |
343 | struct vm_area_struct *vma = v; |
344 | if (vma && vma != get_gate_vma(task)) { | |
345 | struct mm_struct *mm = vma->vm_mm; | |
1da177e4 LT |
346 | up_read(&mm->mmap_sem); |
347 | mmput(mm); | |
348 | } | |
349 | } | |
350 | ||
351 | static void *m_next(struct seq_file *m, void *v, loff_t *pos) | |
352 | { | |
353 | struct task_struct *task = m->private; | |
e070ad49 ML |
354 | struct vm_area_struct *vma = v; |
355 | struct vm_area_struct *tail_vma = get_gate_vma(task); | |
1da177e4 LT |
356 | |
357 | (*pos)++; | |
e070ad49 ML |
358 | if (vma && (vma != tail_vma) && vma->vm_next) |
359 | return vma->vm_next; | |
1da177e4 | 360 | m_stop(m, v); |
e070ad49 | 361 | return (vma != tail_vma)? tail_vma: NULL; |
1da177e4 LT |
362 | } |
363 | ||
364 | struct seq_operations proc_pid_maps_op = { | |
365 | .start = m_start, | |
366 | .next = m_next, | |
367 | .stop = m_stop, | |
368 | .show = show_map | |
369 | }; | |
6e21c8f1 | 370 | |
e070ad49 ML |
371 | struct seq_operations proc_pid_smaps_op = { |
372 | .start = m_start, | |
373 | .next = m_next, | |
374 | .stop = m_stop, | |
375 | .show = show_smap | |
376 | }; | |
377 | ||
6e21c8f1 CL |
378 | #ifdef CONFIG_NUMA |
379 | ||
380 | struct numa_maps { | |
381 | unsigned long pages; | |
382 | unsigned long anon; | |
383 | unsigned long mapped; | |
384 | unsigned long mapcount_max; | |
385 | unsigned long node[MAX_NUMNODES]; | |
386 | }; | |
387 | ||
388 | /* | |
389 | * Calculate numa node maps for a vma | |
390 | */ | |
391 | static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma) | |
392 | { | |
393 | struct page *page; | |
394 | unsigned long vaddr; | |
395 | struct mm_struct *mm = vma->vm_mm; | |
396 | int i; | |
397 | struct numa_maps *md = kmalloc(sizeof(struct numa_maps), GFP_KERNEL); | |
398 | ||
399 | if (!md) | |
400 | return NULL; | |
401 | md->pages = 0; | |
402 | md->anon = 0; | |
403 | md->mapped = 0; | |
404 | md->mapcount_max = 0; | |
405 | for_each_node(i) | |
406 | md->node[i] =0; | |
407 | ||
408 | spin_lock(&mm->page_table_lock); | |
409 | for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) { | |
410 | page = follow_page(mm, vaddr, 0); | |
411 | if (page) { | |
412 | int count = page_mapcount(page); | |
413 | ||
414 | if (count) | |
415 | md->mapped++; | |
416 | if (count > md->mapcount_max) | |
417 | md->mapcount_max = count; | |
418 | md->pages++; | |
419 | if (PageAnon(page)) | |
420 | md->anon++; | |
421 | md->node[page_to_nid(page)]++; | |
422 | } | |
423 | } | |
424 | spin_unlock(&mm->page_table_lock); | |
425 | return md; | |
426 | } | |
427 | ||
428 | static int show_numa_map(struct seq_file *m, void *v) | |
429 | { | |
430 | struct task_struct *task = m->private; | |
431 | struct vm_area_struct *vma = v; | |
432 | struct mempolicy *pol; | |
433 | struct numa_maps *md; | |
434 | struct zone **z; | |
435 | int n; | |
436 | int first; | |
437 | ||
438 | if (!vma->vm_mm) | |
439 | return 0; | |
440 | ||
441 | md = get_numa_maps(vma); | |
442 | if (!md) | |
443 | return 0; | |
444 | ||
445 | seq_printf(m, "%08lx", vma->vm_start); | |
446 | pol = get_vma_policy(task, vma, vma->vm_start); | |
447 | /* Print policy */ | |
448 | switch (pol->policy) { | |
449 | case MPOL_PREFERRED: | |
450 | seq_printf(m, " prefer=%d", pol->v.preferred_node); | |
451 | break; | |
452 | case MPOL_BIND: | |
453 | seq_printf(m, " bind={"); | |
454 | first = 1; | |
455 | for (z = pol->v.zonelist->zones; *z; z++) { | |
456 | ||
457 | if (!first) | |
458 | seq_putc(m, ','); | |
459 | else | |
460 | first = 0; | |
461 | seq_printf(m, "%d/%s", (*z)->zone_pgdat->node_id, | |
462 | (*z)->name); | |
463 | } | |
464 | seq_putc(m, '}'); | |
465 | break; | |
466 | case MPOL_INTERLEAVE: | |
467 | seq_printf(m, " interleave={"); | |
468 | first = 1; | |
469 | for_each_node(n) { | |
dfcd3c0d | 470 | if (node_isset(n, pol->v.nodes)) { |
6e21c8f1 CL |
471 | if (!first) |
472 | seq_putc(m,','); | |
473 | else | |
474 | first = 0; | |
475 | seq_printf(m, "%d",n); | |
476 | } | |
477 | } | |
478 | seq_putc(m, '}'); | |
479 | break; | |
480 | default: | |
481 | seq_printf(m," default"); | |
482 | break; | |
483 | } | |
484 | seq_printf(m, " MaxRef=%lu Pages=%lu Mapped=%lu", | |
485 | md->mapcount_max, md->pages, md->mapped); | |
486 | if (md->anon) | |
487 | seq_printf(m," Anon=%lu",md->anon); | |
488 | ||
489 | for_each_online_node(n) { | |
490 | if (md->node[n]) | |
491 | seq_printf(m, " N%d=%lu", n, md->node[n]); | |
492 | } | |
493 | seq_putc(m, '\n'); | |
494 | kfree(md); | |
495 | if (m->count < m->size) /* vma is copied successfully */ | |
496 | m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0; | |
497 | return 0; | |
498 | } | |
499 | ||
500 | struct seq_operations proc_pid_numa_maps_op = { | |
501 | .start = m_start, | |
502 | .next = m_next, | |
503 | .stop = m_stop, | |
504 | .show = show_numa_map | |
505 | }; | |
506 | #endif |