Merge tag 'block-6.1-2022-10-20' of git://git.kernel.dk/linux
[linux-block.git] / arch / arm64 / kernel / elfcore.c
CommitLineData
6dd8b1a0
CM
1// SPDX-License-Identifier: GPL-2.0-only
2
3#include <linux/coredump.h>
4#include <linux/elfcore.h>
5#include <linux/kernel.h>
6#include <linux/mm.h>
7
8#include <asm/cpufeature.h>
9#include <asm/mte.h>
10
ef770d18 11#define for_each_mte_vma(vmi, vma) \
6dd8b1a0 12 if (system_supports_mte()) \
ef770d18 13 for_each_vma(vmi, vma) \
6dd8b1a0
CM
14 if (vma->vm_flags & VM_MTE)
15
16static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
17{
18 if (vma->vm_flags & VM_DONTDUMP)
19 return 0;
20
21 return vma_pages(vma) * MTE_PAGE_TAG_STORAGE;
22}
23
24/* Derived from dump_user_range(); start/end must be page-aligned */
25static int mte_dump_tag_range(struct coredump_params *cprm,
26 unsigned long start, unsigned long end)
27{
16decce2 28 int ret = 1;
6dd8b1a0 29 unsigned long addr;
16decce2 30 void *tags = NULL;
6dd8b1a0
CM
31
32 for (addr = start; addr < end; addr += PAGE_SIZE) {
6dd8b1a0
CM
33 struct page *page = get_dump_page(addr);
34
35 /*
36 * get_dump_page() returns NULL when encountering an empty
37 * page table entry that would otherwise have been filled with
38 * the zero page. Skip the equivalent tag dump which would
39 * have been all zeros.
40 */
41 if (!page) {
42 dump_skip(cprm, MTE_PAGE_TAG_STORAGE);
43 continue;
44 }
45
46 /*
47 * Pages mapped in user space as !pte_access_permitted() (e.g.
48 * PROT_EXEC only) may not have the PG_mte_tagged flag set.
49 */
50 if (!test_bit(PG_mte_tagged, &page->flags)) {
51 put_page(page);
52 dump_skip(cprm, MTE_PAGE_TAG_STORAGE);
53 continue;
54 }
55
16decce2
CM
56 if (!tags) {
57 tags = mte_allocate_tag_storage();
58 if (!tags) {
59 put_page(page);
60 ret = 0;
61 break;
62 }
63 }
64
6dd8b1a0
CM
65 mte_save_page_tags(page_address(page), tags);
66 put_page(page);
16decce2
CM
67 if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) {
68 mte_free_tag_storage(tags);
69 ret = 0;
70 break;
71 }
6dd8b1a0
CM
72 }
73
16decce2
CM
74 if (tags)
75 mte_free_tag_storage(tags);
76
77 return ret;
6dd8b1a0
CM
78}
79
80Elf_Half elf_core_extra_phdrs(void)
81{
82 struct vm_area_struct *vma;
83 int vma_count = 0;
ef770d18 84 VMA_ITERATOR(vmi, current->mm, 0);
6dd8b1a0 85
ef770d18 86 for_each_mte_vma(vmi, vma)
6dd8b1a0
CM
87 vma_count++;
88
89 return vma_count;
90}
91
92int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
93{
94 struct vm_area_struct *vma;
ef770d18 95 VMA_ITERATOR(vmi, current->mm, 0);
6dd8b1a0 96
ef770d18 97 for_each_mte_vma(vmi, vma) {
6dd8b1a0
CM
98 struct elf_phdr phdr;
99
c35fe2a6 100 phdr.p_type = PT_AARCH64_MEMTAG_MTE;
6dd8b1a0
CM
101 phdr.p_offset = offset;
102 phdr.p_vaddr = vma->vm_start;
103 phdr.p_paddr = 0;
104 phdr.p_filesz = mte_vma_tag_dump_size(vma);
105 phdr.p_memsz = vma->vm_end - vma->vm_start;
106 offset += phdr.p_filesz;
107 phdr.p_flags = 0;
108 phdr.p_align = 0;
109
110 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
111 return 0;
112 }
113
114 return 1;
115}
116
117size_t elf_core_extra_data_size(void)
118{
119 struct vm_area_struct *vma;
120 size_t data_size = 0;
ef770d18 121 VMA_ITERATOR(vmi, current->mm, 0);
6dd8b1a0 122
ef770d18 123 for_each_mte_vma(vmi, vma)
6dd8b1a0
CM
124 data_size += mte_vma_tag_dump_size(vma);
125
126 return data_size;
127}
128
129int elf_core_write_extra_data(struct coredump_params *cprm)
130{
131 struct vm_area_struct *vma;
ef770d18 132 VMA_ITERATOR(vmi, current->mm, 0);
6dd8b1a0 133
ef770d18 134 for_each_mte_vma(vmi, vma) {
6dd8b1a0
CM
135 if (vma->vm_flags & VM_DONTDUMP)
136 continue;
137
138 if (!mte_dump_tag_range(cprm, vma->vm_start, vma->vm_end))
139 return 0;
140 }
141
142 return 1;
143}