vmcore: convert __read_vmcore to use an iov_iter
[linux-2.6-block.git] / fs / proc / vmcore.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
666bfddb
VG
2/*
3 * fs/proc/vmcore.c Interface for accessing the crash
4 * dump from the system's previous life.
5 * Heavily borrowed from fs/proc/kcore.c
6 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
7 * Copyright (C) IBM Corporation, 2004. All rights reserved
8 *
9 */
10
666bfddb 11#include <linux/mm.h>
2f96b8c1 12#include <linux/kcore.h>
666bfddb 13#include <linux/user.h>
666bfddb
VG
14#include <linux/elf.h>
15#include <linux/elfcore.h>
afeacc8c 16#include <linux/export.h>
5a0e3ad6 17#include <linux/slab.h>
666bfddb 18#include <linux/highmem.h>
87ebdc00 19#include <linux/printk.h>
57c8a661 20#include <linux/memblock.h>
666bfddb
VG
21#include <linux/init.h>
22#include <linux/crash_dump.h>
23#include <linux/list.h>
c6c40533 24#include <linux/moduleparam.h>
2724273e 25#include <linux/mutex.h>
83086978 26#include <linux/vmalloc.h>
9cb21813 27#include <linux/pagemap.h>
7c0f6ba6 28#include <linux/uaccess.h>
5d8de293 29#include <linux/uio.h>
e9d1d2bb 30#include <linux/cc_platform.h>
666bfddb 31#include <asm/io.h>
2f96b8c1 32#include "internal.h"
666bfddb
VG
33
34/* List representing chunks of contiguous memory areas and their offsets in
35 * vmcore file.
36 */
37static LIST_HEAD(vmcore_list);
38
39/* Stores the pointer to the buffer containing kernel elf core headers. */
40static char *elfcorebuf;
41static size_t elfcorebuf_sz;
f2bdacdd 42static size_t elfcorebuf_sz_orig;
666bfddb 43
087350c9
HD
44static char *elfnotes_buf;
45static size_t elfnotes_sz;
7efe48df
RL
46/* Size of all notes minus the device dump notes */
47static size_t elfnotes_orig_sz;
087350c9 48
666bfddb
VG
49/* Total size of vmcore file. */
50static u64 vmcore_size;
51
a05e16ad 52static struct proc_dir_entry *proc_vmcore;
666bfddb 53
2724273e
RL
54#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
55/* Device Dump list and mutex to synchronize access to list */
56static LIST_HEAD(vmcoredd_list);
57static DEFINE_MUTEX(vmcoredd_mutex);
c6c40533
KS
58
59static bool vmcoredd_disabled;
60core_param(novmcoredd, vmcoredd_disabled, bool, 0);
2724273e
RL
61#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
62
7efe48df
RL
63/* Device Dump Size */
64static size_t vmcoredd_orig_sz;
65
5039b170
DH
66static DEFINE_SPINLOCK(vmcore_cb_lock);
67DEFINE_STATIC_SRCU(vmcore_cb_srcu);
cc5f2704
DH
68/* List of registered vmcore callbacks. */
69static LIST_HEAD(vmcore_cb_list);
cc5f2704
DH
70/* Whether the vmcore has been opened once. */
71static bool vmcore_opened;
72
73void register_vmcore_cb(struct vmcore_cb *cb)
997c136f 74{
cc5f2704 75 INIT_LIST_HEAD(&cb->next);
5039b170 76 spin_lock(&vmcore_cb_lock);
cc5f2704
DH
77 list_add_tail(&cb->next, &vmcore_cb_list);
78 /*
79 * Registering a vmcore callback after the vmcore was opened is
80 * very unusual (e.g., manual driver loading).
81 */
82 if (vmcore_opened)
83 pr_warn_once("Unexpected vmcore callback registration\n");
5039b170 84 spin_unlock(&vmcore_cb_lock);
997c136f 85}
cc5f2704 86EXPORT_SYMBOL_GPL(register_vmcore_cb);
997c136f 87
cc5f2704 88void unregister_vmcore_cb(struct vmcore_cb *cb)
997c136f 89{
5039b170
DH
90 spin_lock(&vmcore_cb_lock);
91 list_del_rcu(&cb->next);
cc5f2704
DH
92 /*
93 * Unregistering a vmcore callback after the vmcore was opened is
94 * very unusual (e.g., forced driver removal), but we cannot stop
95 * unregistering.
96 */
25bc5b0d 97 if (vmcore_opened)
cc5f2704 98 pr_warn_once("Unexpected vmcore callback unregistration\n");
5039b170
DH
99 spin_unlock(&vmcore_cb_lock);
100
101 synchronize_srcu(&vmcore_cb_srcu);
997c136f 102}
cc5f2704 103EXPORT_SYMBOL_GPL(unregister_vmcore_cb);
997c136f 104
2c9feeae 105static bool pfn_is_ram(unsigned long pfn)
997c136f 106{
cc5f2704 107 struct vmcore_cb *cb;
2c9feeae 108 bool ret = true;
997c136f 109
5039b170
DH
110 list_for_each_entry_srcu(cb, &vmcore_cb_list, next,
111 srcu_read_lock_held(&vmcore_cb_srcu)) {
cc5f2704
DH
112 if (unlikely(!cb->pfn_is_ram))
113 continue;
114 ret = cb->pfn_is_ram(cb, pfn);
115 if (!ret)
116 break;
117 }
997c136f
OH
118
119 return ret;
120}
121
cc5f2704
DH
122static int open_vmcore(struct inode *inode, struct file *file)
123{
5039b170 124 spin_lock(&vmcore_cb_lock);
cc5f2704 125 vmcore_opened = true;
5039b170 126 spin_unlock(&vmcore_cb_lock);
cc5f2704
DH
127
128 return 0;
129}
130
666bfddb 131/* Reads a page from the oldmem device from given offset. */
5d8de293
MWO
132static ssize_t read_from_oldmem_iter(struct iov_iter *iter, size_t count,
133 u64 *ppos, bool encrypted)
666bfddb
VG
134{
135 unsigned long pfn, offset;
136 size_t nr_bytes;
137 ssize_t read = 0, tmp;
5039b170 138 int idx;
666bfddb
VG
139
140 if (!count)
141 return 0;
142
143 offset = (unsigned long)(*ppos % PAGE_SIZE);
144 pfn = (unsigned long)(*ppos / PAGE_SIZE);
666bfddb 145
5039b170 146 idx = srcu_read_lock(&vmcore_cb_srcu);
666bfddb
VG
147 do {
148 if (count > (PAGE_SIZE - offset))
149 nr_bytes = PAGE_SIZE - offset;
150 else
151 nr_bytes = count;
152
997c136f 153 /* If pfn is not ram, return zeros for sparse dump files */
c1e63117 154 if (!pfn_is_ram(pfn)) {
5d8de293 155 tmp = iov_iter_zero(nr_bytes, iter);
c1e63117 156 } else {
992b649a 157 if (encrypted)
5d8de293 158 tmp = copy_oldmem_page_encrypted(iter, pfn,
992b649a 159 nr_bytes,
5d8de293 160 offset);
992b649a 161 else
5d8de293
MWO
162 tmp = copy_oldmem_page(iter, pfn, nr_bytes,
163 offset);
997c136f 164 }
5d8de293 165 if (tmp < nr_bytes) {
5039b170 166 srcu_read_unlock(&vmcore_cb_srcu, idx);
5d8de293 167 return -EFAULT;
c1e63117
DH
168 }
169
666bfddb
VG
170 *ppos += nr_bytes;
171 count -= nr_bytes;
666bfddb
VG
172 read += nr_bytes;
173 ++pfn;
174 offset = 0;
175 } while (count);
5039b170 176 srcu_read_unlock(&vmcore_cb_srcu, idx);
666bfddb
VG
177
178 return read;
179}
180
5d8de293
MWO
181ssize_t read_from_oldmem(char *buf, size_t count,
182 u64 *ppos, int userbuf,
183 bool encrypted)
184{
185 struct iov_iter iter;
186 struct iovec iov;
187 struct kvec kvec;
188
189 if (userbuf) {
190 iov.iov_base = (__force void __user *)buf;
191 iov.iov_len = count;
192 iov_iter_init(&iter, READ, &iov, 1, count);
193 } else {
194 kvec.iov_base = buf;
195 kvec.iov_len = count;
196 iov_iter_kvec(&iter, READ, &kvec, 1, count);
197 }
198
199 return read_from_oldmem_iter(&iter, count, ppos, encrypted);
200}
201
be8a8d06
MH
202/*
203 * Architectures may override this function to allocate ELF header in 2nd kernel
204 */
205int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
206{
207 return 0;
208}
209
210/*
211 * Architectures may override this function to free header
212 */
213void __weak elfcorehdr_free(unsigned long long addr)
214{}
215
216/*
217 * Architectures may override this function to read from ELF header
218 */
219ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
220{
ae7eb82a 221 return read_from_oldmem(buf, count, ppos, 0, false);
be8a8d06
MH
222}
223
224/*
225 * Architectures may override this function to read from notes sections
226 */
227ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
228{
e9d1d2bb 229 return read_from_oldmem(buf, count, ppos, 0, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
be8a8d06
MH
230}
231
9cb21813
MH
232/*
233 * Architectures may override this function to map oldmem
234 */
235int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
236 unsigned long from, unsigned long pfn,
237 unsigned long size, pgprot_t prot)
238{
992b649a 239 prot = pgprot_encrypted(prot);
9cb21813
MH
240 return remap_pfn_range(vma, from, pfn, size, prot);
241}
242
cf089611
BP
243/*
244 * Architectures which support memory encryption override this.
245 */
5d8de293
MWO
246ssize_t __weak copy_oldmem_page_encrypted(struct iov_iter *iter,
247 unsigned long pfn, size_t csize, unsigned long offset)
cf089611 248{
5d8de293 249 return copy_oldmem_page(iter, pfn, csize, offset);
cf089611
BP
250}
251
7efe48df 252#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
4a22fd20 253static int vmcoredd_copy_dumps(struct iov_iter *iter, u64 start, size_t size)
7efe48df
RL
254{
255 struct vmcoredd_node *dump;
256 u64 offset = 0;
257 int ret = 0;
258 size_t tsz;
259 char *buf;
260
261 mutex_lock(&vmcoredd_mutex);
262 list_for_each_entry(dump, &vmcoredd_list, list) {
263 if (start < offset + dump->size) {
264 tsz = min(offset + (u64)dump->size - start, (u64)size);
265 buf = dump->buf + start - offset;
4a22fd20 266 if (copy_to_iter(buf, tsz, iter) < tsz) {
7efe48df
RL
267 ret = -EFAULT;
268 goto out_unlock;
269 }
270
271 size -= tsz;
272 start += tsz;
7efe48df
RL
273
274 /* Leave now if buffer filled already */
275 if (!size)
276 goto out_unlock;
277 }
278 offset += dump->size;
279 }
280
281out_unlock:
282 mutex_unlock(&vmcoredd_mutex);
283 return ret;
284}
285
a2036a1e 286#ifdef CONFIG_MMU
7efe48df
RL
287static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
288 u64 start, size_t size)
289{
290 struct vmcoredd_node *dump;
291 u64 offset = 0;
292 int ret = 0;
293 size_t tsz;
294 char *buf;
295
296 mutex_lock(&vmcoredd_mutex);
297 list_for_each_entry(dump, &vmcoredd_list, list) {
298 if (start < offset + dump->size) {
299 tsz = min(offset + (u64)dump->size - start, (u64)size);
300 buf = dump->buf + start - offset;
bdebd6a2
JH
301 if (remap_vmalloc_range_partial(vma, dst, buf, 0,
302 tsz)) {
7efe48df
RL
303 ret = -EFAULT;
304 goto out_unlock;
305 }
306
307 size -= tsz;
308 start += tsz;
309 dst += tsz;
310
311 /* Leave now if buffer filled already */
312 if (!size)
313 goto out_unlock;
314 }
315 offset += dump->size;
316 }
317
318out_unlock:
319 mutex_unlock(&vmcoredd_mutex);
320 return ret;
321}
a2036a1e 322#endif /* CONFIG_MMU */
7efe48df
RL
323#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
324
666bfddb
VG
325/* Read from the ELF header and then the crash dump. On error, negative value is
326 * returned otherwise number of bytes read are returned.
327 */
4a22fd20 328static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
666bfddb
VG
329{
330 ssize_t acc = 0, tmp;
80e8ff63 331 size_t tsz;
b27eb186
HD
332 u64 start;
333 struct vmcore *m = NULL;
666bfddb 334
4a22fd20 335 if (!iov_iter_count(iter) || *fpos >= vmcore_size)
666bfddb
VG
336 return 0;
337
4a22fd20 338 iov_iter_truncate(iter, vmcore_size - *fpos);
666bfddb
VG
339
340 /* Read ELF core header */
341 if (*fpos < elfcorebuf_sz) {
4a22fd20
MWO
342 tsz = min(elfcorebuf_sz - (size_t)*fpos, iov_iter_count(iter));
343 if (copy_to_iter(elfcorebuf + *fpos, tsz, iter) < tsz)
666bfddb 344 return -EFAULT;
666bfddb 345 *fpos += tsz;
666bfddb
VG
346 acc += tsz;
347
348 /* leave now if filled buffer already */
4a22fd20 349 if (!iov_iter_count(iter))
666bfddb
VG
350 return acc;
351 }
352
087350c9
HD
353 /* Read Elf note segment */
354 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
355 void *kaddr;
356
7efe48df
RL
357 /* We add device dumps before other elf notes because the
358 * other elf notes may not fill the elf notes buffer
359 * completely and we will end up with zero-filled data
360 * between the elf notes and the device dumps. Tools will
361 * then try to decode this zero-filled data as valid notes
362 * and we don't want that. Hence, adding device dumps before
363 * the other elf notes ensure that zero-filled data can be
364 * avoided.
365 */
366#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
367 /* Read device dumps */
368 if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
369 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
4a22fd20 370 (size_t)*fpos, iov_iter_count(iter));
7efe48df 371 start = *fpos - elfcorebuf_sz;
4a22fd20 372 if (vmcoredd_copy_dumps(iter, start, tsz))
7efe48df
RL
373 return -EFAULT;
374
7efe48df 375 *fpos += tsz;
7efe48df
RL
376 acc += tsz;
377
378 /* leave now if filled buffer already */
4a22fd20 379 if (!iov_iter_count(iter))
7efe48df
RL
380 return acc;
381 }
382#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
383
384 /* Read remaining elf notes */
4a22fd20
MWO
385 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos,
386 iov_iter_count(iter));
7efe48df 387 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
4a22fd20 388 if (copy_to_iter(kaddr, tsz, iter) < tsz)
087350c9 389 return -EFAULT;
7efe48df 390
087350c9 391 *fpos += tsz;
087350c9
HD
392 acc += tsz;
393
394 /* leave now if filled buffer already */
4a22fd20 395 if (!iov_iter_count(iter))
087350c9
HD
396 return acc;
397 }
398
b27eb186
HD
399 list_for_each_entry(m, &vmcore_list, list) {
400 if (*fpos < m->offset + m->size) {
0b50a2d8
DY
401 tsz = (size_t)min_t(unsigned long long,
402 m->offset + m->size - *fpos,
4a22fd20 403 iov_iter_count(iter));
b27eb186 404 start = m->paddr + *fpos - m->offset;
4a22fd20
MWO
405 tmp = read_from_oldmem_iter(iter, tsz, &start,
406 cc_platform_has(CC_ATTR_MEM_ENCRYPT));
b27eb186
HD
407 if (tmp < 0)
408 return tmp;
b27eb186 409 *fpos += tsz;
b27eb186
HD
410 acc += tsz;
411
412 /* leave now if filled buffer already */
4a22fd20 413 if (!iov_iter_count(iter))
b27eb186 414 return acc;
666bfddb 415 }
666bfddb 416 }
b27eb186 417
666bfddb
VG
418 return acc;
419}
420
4a22fd20 421static ssize_t read_vmcore(struct kiocb *iocb, struct iov_iter *iter)
9cb21813 422{
4a22fd20 423 return __read_vmcore(iter, &iocb->ki_pos);
9cb21813
MH
424}
425
426/*
427 * The vmcore fault handler uses the page cache and fills data using the
4a22fd20 428 * standard __read_vmcore() function.
9cb21813
MH
429 *
430 * On s390 the fault handler is used for memory regions that can't be mapped
431 * directly with remap_pfn_range().
432 */
36f06204 433static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
9cb21813
MH
434{
435#ifdef CONFIG_S390
11bac800 436 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
9cb21813 437 pgoff_t index = vmf->pgoff;
4a22fd20
MWO
438 struct iov_iter iter;
439 struct kvec kvec;
9cb21813
MH
440 struct page *page;
441 loff_t offset;
9cb21813
MH
442 int rc;
443
444 page = find_or_create_page(mapping, index, GFP_KERNEL);
445 if (!page)
446 return VM_FAULT_OOM;
447 if (!PageUptodate(page)) {
09cbfeaf 448 offset = (loff_t) index << PAGE_SHIFT;
4a22fd20
MWO
449 kvec.iov_base = page_address(page);
450 kvec.iov_len = PAGE_SIZE;
451 iov_iter_kvec(&iter, READ, &kvec, 1, PAGE_SIZE);
452
453 rc = __read_vmcore(&iter, &offset);
9cb21813
MH
454 if (rc < 0) {
455 unlock_page(page);
09cbfeaf 456 put_page(page);
b5c21237 457 return vmf_error(rc);
9cb21813
MH
458 }
459 SetPageUptodate(page);
460 }
461 unlock_page(page);
462 vmf->page = page;
463 return 0;
464#else
465 return VM_FAULT_SIGBUS;
466#endif
467}
468
469static const struct vm_operations_struct vmcore_mmap_ops = {
470 .fault = mmap_vmcore_fault,
471};
472
83086978 473/**
2724273e 474 * vmcore_alloc_buf - allocate buffer in vmalloc memory
e9f5d101 475 * @size: size of buffer
83086978
HD
476 *
477 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
478 * the buffer to user-space by means of remap_vmalloc_range().
479 *
480 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
481 * disabled and there's no need to allow users to mmap the buffer.
482 */
2724273e 483static inline char *vmcore_alloc_buf(size_t size)
83086978
HD
484{
485#ifdef CONFIG_MMU
2724273e 486 return vmalloc_user(size);
83086978 487#else
2724273e 488 return vzalloc(size);
83086978
HD
489#endif
490}
491
492/*
493 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
494 * essential for mmap_vmcore() in order to map physically
495 * non-contiguous objects (ELF header, ELF note segment and memory
496 * regions in the 1st kernel pointed to by PT_LOAD entries) into
497 * virtually contiguous user-space in ELF layout.
498 */
11e376a3 499#ifdef CONFIG_MMU
0692dedc
VK
500/*
501 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
502 * reported as not being ram with the zero page.
503 *
504 * @vma: vm_area_struct describing requested mapping
505 * @from: start remapping from
506 * @pfn: page frame number to start remapping to
507 * @size: remapping size
508 * @prot: protection bits
509 *
510 * Returns zero on success, -EAGAIN on failure.
511 */
512static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
513 unsigned long from, unsigned long pfn,
514 unsigned long size, pgprot_t prot)
515{
516 unsigned long map_size;
517 unsigned long pos_start, pos_end, pos;
518 unsigned long zeropage_pfn = my_zero_pfn(0);
519 size_t len = 0;
520
521 pos_start = pfn;
522 pos_end = pfn + (size >> PAGE_SHIFT);
523
524 for (pos = pos_start; pos < pos_end; ++pos) {
525 if (!pfn_is_ram(pos)) {
526 /*
527 * We hit a page which is not ram. Remap the continuous
528 * region between pos_start and pos-1 and replace
529 * the non-ram page at pos with the zero page.
530 */
531 if (pos > pos_start) {
532 /* Remap continuous region */
533 map_size = (pos - pos_start) << PAGE_SHIFT;
534 if (remap_oldmem_pfn_range(vma, from + len,
535 pos_start, map_size,
536 prot))
537 goto fail;
538 len += map_size;
539 }
540 /* Remap the zero page */
541 if (remap_oldmem_pfn_range(vma, from + len,
542 zeropage_pfn,
543 PAGE_SIZE, prot))
544 goto fail;
545 len += PAGE_SIZE;
546 pos_start = pos + 1;
547 }
548 }
549 if (pos > pos_start) {
550 /* Remap the rest */
551 map_size = (pos - pos_start) << PAGE_SHIFT;
552 if (remap_oldmem_pfn_range(vma, from + len, pos_start,
553 map_size, prot))
554 goto fail;
555 }
556 return 0;
557fail:
897ab3e0 558 do_munmap(vma->vm_mm, from, len, NULL);
0692dedc
VK
559 return -EAGAIN;
560}
561
562static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
563 unsigned long from, unsigned long pfn,
564 unsigned long size, pgprot_t prot)
565{
5039b170 566 int ret, idx;
cc5f2704 567
0692dedc 568 /*
5039b170
DH
569 * Check if a callback was registered to avoid looping over all
570 * pages without a reason.
0692dedc 571 */
5039b170 572 idx = srcu_read_lock(&vmcore_cb_srcu);
25bc5b0d 573 if (!list_empty(&vmcore_cb_list))
cc5f2704 574 ret = remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
0692dedc 575 else
cc5f2704 576 ret = remap_oldmem_pfn_range(vma, from, pfn, size, prot);
5039b170 577 srcu_read_unlock(&vmcore_cb_srcu, idx);
cc5f2704 578 return ret;
0692dedc
VK
579}
580
83086978
HD
581static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
582{
583 size_t size = vma->vm_end - vma->vm_start;
584 u64 start, end, len, tsz;
585 struct vmcore *m;
586
587 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
588 end = start + size;
589
590 if (size > vmcore_size || end > vmcore_size)
591 return -EINVAL;
592
593 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
594 return -EPERM;
595
596 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
597 vma->vm_flags |= VM_MIXEDMAP;
9cb21813 598 vma->vm_ops = &vmcore_mmap_ops;
83086978
HD
599
600 len = 0;
601
602 if (start < elfcorebuf_sz) {
603 u64 pfn;
604
605 tsz = min(elfcorebuf_sz - (size_t)start, size);
606 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
607 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
608 vma->vm_page_prot))
609 return -EAGAIN;
610 size -= tsz;
611 start += tsz;
612 len += tsz;
613
614 if (size == 0)
615 return 0;
616 }
617
618 if (start < elfcorebuf_sz + elfnotes_sz) {
619 void *kaddr;
620
7efe48df
RL
621 /* We add device dumps before other elf notes because the
622 * other elf notes may not fill the elf notes buffer
623 * completely and we will end up with zero-filled data
624 * between the elf notes and the device dumps. Tools will
625 * then try to decode this zero-filled data as valid notes
626 * and we don't want that. Hence, adding device dumps before
627 * the other elf notes ensure that zero-filled data can be
628 * avoided. This also ensures that the device dumps and
629 * other elf notes can be properly mmaped at page aligned
630 * address.
631 */
632#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
633 /* Read device dumps */
634 if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
635 u64 start_off;
636
637 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
638 (size_t)start, size);
639 start_off = start - elfcorebuf_sz;
640 if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
641 start_off, tsz))
642 goto fail;
643
644 size -= tsz;
645 start += tsz;
646 len += tsz;
647
648 /* leave now if filled buffer already */
649 if (!size)
650 return 0;
651 }
652#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
653
654 /* Read remaining elf notes */
83086978 655 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
7efe48df 656 kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
83086978 657 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
bdebd6a2 658 kaddr, 0, tsz))
83086978 659 goto fail;
7efe48df 660
83086978
HD
661 size -= tsz;
662 start += tsz;
663 len += tsz;
664
665 if (size == 0)
666 return 0;
667 }
668
669 list_for_each_entry(m, &vmcore_list, list) {
670 if (start < m->offset + m->size) {
671 u64 paddr = 0;
672
0b50a2d8
DY
673 tsz = (size_t)min_t(unsigned long long,
674 m->offset + m->size - start, size);
83086978 675 paddr = m->paddr + start - m->offset;
0692dedc
VK
676 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
677 paddr >> PAGE_SHIFT, tsz,
678 vma->vm_page_prot))
83086978
HD
679 goto fail;
680 size -= tsz;
681 start += tsz;
682 len += tsz;
683
684 if (size == 0)
685 return 0;
686 }
687 }
688
689 return 0;
690fail:
897ab3e0 691 do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
83086978
HD
692 return -EAGAIN;
693}
694#else
695static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
696{
697 return -ENOSYS;
698}
699#endif
700
97a32539 701static const struct proc_ops vmcore_proc_ops = {
cc5f2704 702 .proc_open = open_vmcore,
4a22fd20 703 .proc_read_iter = read_vmcore,
97a32539
AD
704 .proc_lseek = default_llseek,
705 .proc_mmap = mmap_vmcore,
666bfddb
VG
706};
707
708static struct vmcore* __init get_new_element(void)
709{
2f6d3110 710 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
666bfddb
VG
711}
712
44c752fe
RL
713static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
714 struct list_head *vc_list)
666bfddb 715{
666bfddb 716 u64 size;
591ff716 717 struct vmcore *m;
72658e9d 718
591ff716
HD
719 size = elfsz + elfnotesegsz;
720 list_for_each_entry(m, vc_list, list) {
721 size += m->size;
72658e9d
VG
722 }
723 return size;
724}
725
087350c9
HD
726/**
727 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
728 *
729 * @ehdr_ptr: ELF header
730 *
731 * This function updates p_memsz member of each PT_NOTE entry in the
732 * program header table pointed to by @ehdr_ptr to real size of ELF
733 * note segment.
734 */
735static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
666bfddb 736{
087350c9
HD
737 int i, rc=0;
738 Elf64_Phdr *phdr_ptr;
666bfddb 739 Elf64_Nhdr *nhdr_ptr;
666bfddb 740
087350c9 741 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
666bfddb 742 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
666bfddb 743 void *notes_section;
666bfddb
VG
744 u64 offset, max_sz, sz, real_sz = 0;
745 if (phdr_ptr->p_type != PT_NOTE)
746 continue;
666bfddb
VG
747 max_sz = phdr_ptr->p_memsz;
748 offset = phdr_ptr->p_offset;
749 notes_section = kmalloc(max_sz, GFP_KERNEL);
750 if (!notes_section)
751 return -ENOMEM;
be8a8d06 752 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
666bfddb
VG
753 if (rc < 0) {
754 kfree(notes_section);
755 return rc;
756 }
757 nhdr_ptr = notes_section;
38dfac84 758 while (nhdr_ptr->n_namesz != 0) {
666bfddb 759 sz = sizeof(Elf64_Nhdr) +
34b47764
WC
760 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
761 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
38dfac84
GP
762 if ((real_sz + sz) > max_sz) {
763 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
764 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
765 break;
766 }
666bfddb
VG
767 real_sz += sz;
768 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
769 }
666bfddb 770 kfree(notes_section);
087350c9 771 phdr_ptr->p_memsz = real_sz;
38dfac84
GP
772 if (real_sz == 0) {
773 pr_warn("Warning: Zero PT_NOTE entries found\n");
38dfac84 774 }
666bfddb
VG
775 }
776
087350c9
HD
777 return 0;
778}
779
780/**
781 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
782 * headers and sum of real size of their ELF note segment headers and
783 * data.
784 *
785 * @ehdr_ptr: ELF header
786 * @nr_ptnote: buffer for the number of PT_NOTE program headers
787 * @sz_ptnote: buffer for size of unique PT_NOTE program header
788 *
789 * This function is used to merge multiple PT_NOTE program headers
790 * into a unique single one. The resulting unique entry will have
791 * @sz_ptnote in its phdr->p_mem.
792 *
793 * It is assumed that program headers with PT_NOTE type pointed to by
794 * @ehdr_ptr has already been updated by update_note_header_size_elf64
795 * and each of PT_NOTE program headers has actual ELF note segment
796 * size in its p_memsz member.
797 */
798static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
799 int *nr_ptnote, u64 *sz_ptnote)
800{
801 int i;
802 Elf64_Phdr *phdr_ptr;
803
804 *nr_ptnote = *sz_ptnote = 0;
805
806 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
807 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
808 if (phdr_ptr->p_type != PT_NOTE)
809 continue;
810 *nr_ptnote += 1;
811 *sz_ptnote += phdr_ptr->p_memsz;
812 }
813
814 return 0;
815}
816
817/**
818 * copy_notes_elf64 - copy ELF note segments in a given buffer
819 *
820 * @ehdr_ptr: ELF header
821 * @notes_buf: buffer into which ELF note segments are copied
822 *
823 * This function is used to copy ELF note segment in the 1st kernel
824 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
825 * size of the buffer @notes_buf is equal to or larger than sum of the
826 * real ELF note segment headers and data.
827 *
828 * It is assumed that program headers with PT_NOTE type pointed to by
829 * @ehdr_ptr has already been updated by update_note_header_size_elf64
830 * and each of PT_NOTE program headers has actual ELF note segment
831 * size in its p_memsz member.
832 */
833static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
834{
835 int i, rc=0;
836 Elf64_Phdr *phdr_ptr;
837
838 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
839
840 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
841 u64 offset;
842 if (phdr_ptr->p_type != PT_NOTE)
843 continue;
844 offset = phdr_ptr->p_offset;
be8a8d06
MH
845 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
846 &offset);
087350c9
HD
847 if (rc < 0)
848 return rc;
849 notes_buf += phdr_ptr->p_memsz;
850 }
851
852 return 0;
853}
854
855/* Merges all the PT_NOTE headers into one. */
856static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
857 char **notes_buf, size_t *notes_sz)
858{
859 int i, nr_ptnote=0, rc=0;
860 char *tmp;
861 Elf64_Ehdr *ehdr_ptr;
862 Elf64_Phdr phdr;
863 u64 phdr_sz = 0, note_off;
864
865 ehdr_ptr = (Elf64_Ehdr *)elfptr;
866
867 rc = update_note_header_size_elf64(ehdr_ptr);
868 if (rc < 0)
869 return rc;
870
871 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
872 if (rc < 0)
873 return rc;
874
875 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
2724273e 876 *notes_buf = vmcore_alloc_buf(*notes_sz);
087350c9
HD
877 if (!*notes_buf)
878 return -ENOMEM;
879
880 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
881 if (rc < 0)
882 return rc;
883
666bfddb
VG
884 /* Prepare merged PT_NOTE program header. */
885 phdr.p_type = PT_NOTE;
886 phdr.p_flags = 0;
887 note_off = sizeof(Elf64_Ehdr) +
888 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
087350c9 889 phdr.p_offset = roundup(note_off, PAGE_SIZE);
666bfddb
VG
890 phdr.p_vaddr = phdr.p_paddr = 0;
891 phdr.p_filesz = phdr.p_memsz = phdr_sz;
892 phdr.p_align = 0;
893
894 /* Add merged PT_NOTE program header*/
895 tmp = elfptr + sizeof(Elf64_Ehdr);
896 memcpy(tmp, &phdr, sizeof(phdr));
897 tmp += sizeof(phdr);
898
899 /* Remove unwanted PT_NOTE program headers. */
900 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
901 *elfsz = *elfsz - i;
902 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
f2bdacdd
HD
903 memset(elfptr + *elfsz, 0, i);
904 *elfsz = roundup(*elfsz, PAGE_SIZE);
666bfddb
VG
905
906 /* Modify e_phnum to reflect merged headers. */
907 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
908
7efe48df
RL
909 /* Store the size of all notes. We need this to update the note
910 * header when the device dumps will be added.
911 */
912 elfnotes_orig_sz = phdr.p_memsz;
913
666bfddb
VG
914 return 0;
915}
916
087350c9
HD
917/**
918 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
919 *
920 * @ehdr_ptr: ELF header
921 *
922 * This function updates p_memsz member of each PT_NOTE entry in the
923 * program header table pointed to by @ehdr_ptr to real size of ELF
924 * note segment.
925 */
926static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
72658e9d 927{
087350c9
HD
928 int i, rc=0;
929 Elf32_Phdr *phdr_ptr;
72658e9d 930 Elf32_Nhdr *nhdr_ptr;
72658e9d 931
087350c9 932 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
72658e9d 933 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
72658e9d 934 void *notes_section;
72658e9d
VG
935 u64 offset, max_sz, sz, real_sz = 0;
936 if (phdr_ptr->p_type != PT_NOTE)
937 continue;
72658e9d
VG
938 max_sz = phdr_ptr->p_memsz;
939 offset = phdr_ptr->p_offset;
940 notes_section = kmalloc(max_sz, GFP_KERNEL);
941 if (!notes_section)
942 return -ENOMEM;
be8a8d06 943 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
72658e9d
VG
944 if (rc < 0) {
945 kfree(notes_section);
946 return rc;
947 }
948 nhdr_ptr = notes_section;
38dfac84 949 while (nhdr_ptr->n_namesz != 0) {
72658e9d 950 sz = sizeof(Elf32_Nhdr) +
34b47764
WC
951 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
952 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
38dfac84
GP
953 if ((real_sz + sz) > max_sz) {
954 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
955 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
956 break;
957 }
72658e9d
VG
958 real_sz += sz;
959 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
960 }
72658e9d 961 kfree(notes_section);
087350c9 962 phdr_ptr->p_memsz = real_sz;
38dfac84
GP
963 if (real_sz == 0) {
964 pr_warn("Warning: Zero PT_NOTE entries found\n");
38dfac84 965 }
087350c9
HD
966 }
967
968 return 0;
969}
970
971/**
972 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
973 * headers and sum of real size of their ELF note segment headers and
974 * data.
975 *
976 * @ehdr_ptr: ELF header
977 * @nr_ptnote: buffer for the number of PT_NOTE program headers
978 * @sz_ptnote: buffer for size of unique PT_NOTE program header
979 *
980 * This function is used to merge multiple PT_NOTE program headers
981 * into a unique single one. The resulting unique entry will have
982 * @sz_ptnote in its phdr->p_mem.
983 *
984 * It is assumed that program headers with PT_NOTE type pointed to by
985 * @ehdr_ptr has already been updated by update_note_header_size_elf32
986 * and each of PT_NOTE program headers has actual ELF note segment
987 * size in its p_memsz member.
988 */
989static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
990 int *nr_ptnote, u64 *sz_ptnote)
991{
992 int i;
993 Elf32_Phdr *phdr_ptr;
994
995 *nr_ptnote = *sz_ptnote = 0;
996
997 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
998 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
999 if (phdr_ptr->p_type != PT_NOTE)
1000 continue;
1001 *nr_ptnote += 1;
1002 *sz_ptnote += phdr_ptr->p_memsz;
72658e9d
VG
1003 }
1004
087350c9
HD
1005 return 0;
1006}
1007
1008/**
1009 * copy_notes_elf32 - copy ELF note segments in a given buffer
1010 *
1011 * @ehdr_ptr: ELF header
1012 * @notes_buf: buffer into which ELF note segments are copied
1013 *
1014 * This function is used to copy ELF note segment in the 1st kernel
1015 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
1016 * size of the buffer @notes_buf is equal to or larger than sum of the
1017 * real ELF note segment headers and data.
1018 *
1019 * It is assumed that program headers with PT_NOTE type pointed to by
1020 * @ehdr_ptr has already been updated by update_note_header_size_elf32
1021 * and each of PT_NOTE program headers has actual ELF note segment
1022 * size in its p_memsz member.
1023 */
1024static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
1025{
1026 int i, rc=0;
1027 Elf32_Phdr *phdr_ptr;
1028
1029 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
1030
1031 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1032 u64 offset;
1033 if (phdr_ptr->p_type != PT_NOTE)
1034 continue;
1035 offset = phdr_ptr->p_offset;
be8a8d06
MH
1036 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1037 &offset);
087350c9
HD
1038 if (rc < 0)
1039 return rc;
1040 notes_buf += phdr_ptr->p_memsz;
1041 }
1042
1043 return 0;
1044}
1045
1046/* Merges all the PT_NOTE headers into one. */
1047static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1048 char **notes_buf, size_t *notes_sz)
1049{
1050 int i, nr_ptnote=0, rc=0;
1051 char *tmp;
1052 Elf32_Ehdr *ehdr_ptr;
1053 Elf32_Phdr phdr;
1054 u64 phdr_sz = 0, note_off;
1055
1056 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1057
1058 rc = update_note_header_size_elf32(ehdr_ptr);
1059 if (rc < 0)
1060 return rc;
1061
1062 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1063 if (rc < 0)
1064 return rc;
1065
1066 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
2724273e 1067 *notes_buf = vmcore_alloc_buf(*notes_sz);
087350c9
HD
1068 if (!*notes_buf)
1069 return -ENOMEM;
1070
1071 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1072 if (rc < 0)
1073 return rc;
1074
72658e9d
VG
1075 /* Prepare merged PT_NOTE program header. */
1076 phdr.p_type = PT_NOTE;
1077 phdr.p_flags = 0;
1078 note_off = sizeof(Elf32_Ehdr) +
1079 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
087350c9 1080 phdr.p_offset = roundup(note_off, PAGE_SIZE);
72658e9d
VG
1081 phdr.p_vaddr = phdr.p_paddr = 0;
1082 phdr.p_filesz = phdr.p_memsz = phdr_sz;
1083 phdr.p_align = 0;
1084
1085 /* Add merged PT_NOTE program header*/
1086 tmp = elfptr + sizeof(Elf32_Ehdr);
1087 memcpy(tmp, &phdr, sizeof(phdr));
1088 tmp += sizeof(phdr);
1089
1090 /* Remove unwanted PT_NOTE program headers. */
1091 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1092 *elfsz = *elfsz - i;
1093 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
f2bdacdd
HD
1094 memset(elfptr + *elfsz, 0, i);
1095 *elfsz = roundup(*elfsz, PAGE_SIZE);
72658e9d
VG
1096
1097 /* Modify e_phnum to reflect merged headers. */
1098 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1099
7efe48df
RL
1100 /* Store the size of all notes. We need this to update the note
1101 * header when the device dumps will be added.
1102 */
1103 elfnotes_orig_sz = phdr.p_memsz;
1104
72658e9d
VG
1105 return 0;
1106}
1107
666bfddb
VG
1108/* Add memory chunks represented by program headers to vmcore list. Also update
1109 * the new offset fields of exported program headers. */
1110static int __init process_ptload_program_headers_elf64(char *elfptr,
1111 size_t elfsz,
087350c9 1112 size_t elfnotes_sz,
666bfddb
VG
1113 struct list_head *vc_list)
1114{
1115 int i;
1116 Elf64_Ehdr *ehdr_ptr;
1117 Elf64_Phdr *phdr_ptr;
1118 loff_t vmcore_off;
1119 struct vmcore *new;
1120
1121 ehdr_ptr = (Elf64_Ehdr *)elfptr;
1122 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1123
087350c9
HD
1124 /* Skip Elf header, program headers and Elf note segment. */
1125 vmcore_off = elfsz + elfnotes_sz;
666bfddb
VG
1126
1127 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
7f614cd1
HD
1128 u64 paddr, start, end, size;
1129
666bfddb
VG
1130 if (phdr_ptr->p_type != PT_LOAD)
1131 continue;
1132
7f614cd1
HD
1133 paddr = phdr_ptr->p_offset;
1134 start = rounddown(paddr, PAGE_SIZE);
1135 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1136 size = end - start;
1137
666bfddb
VG
1138 /* Add this contiguous chunk of memory to vmcore list.*/
1139 new = get_new_element();
1140 if (!new)
1141 return -ENOMEM;
7f614cd1
HD
1142 new->paddr = start;
1143 new->size = size;
666bfddb
VG
1144 list_add_tail(&new->list, vc_list);
1145
1146 /* Update the program header offset. */
7f614cd1
HD
1147 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1148 vmcore_off = vmcore_off + size;
666bfddb
VG
1149 }
1150 return 0;
1151}
1152
72658e9d
VG
1153static int __init process_ptload_program_headers_elf32(char *elfptr,
1154 size_t elfsz,
087350c9 1155 size_t elfnotes_sz,
72658e9d
VG
1156 struct list_head *vc_list)
1157{
1158 int i;
1159 Elf32_Ehdr *ehdr_ptr;
1160 Elf32_Phdr *phdr_ptr;
1161 loff_t vmcore_off;
1162 struct vmcore *new;
1163
1164 ehdr_ptr = (Elf32_Ehdr *)elfptr;
1165 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
1166
087350c9
HD
1167 /* Skip Elf header, program headers and Elf note segment. */
1168 vmcore_off = elfsz + elfnotes_sz;
72658e9d
VG
1169
1170 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
7f614cd1
HD
1171 u64 paddr, start, end, size;
1172
72658e9d
VG
1173 if (phdr_ptr->p_type != PT_LOAD)
1174 continue;
1175
7f614cd1
HD
1176 paddr = phdr_ptr->p_offset;
1177 start = rounddown(paddr, PAGE_SIZE);
1178 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1179 size = end - start;
1180
72658e9d
VG
1181 /* Add this contiguous chunk of memory to vmcore list.*/
1182 new = get_new_element();
1183 if (!new)
1184 return -ENOMEM;
7f614cd1
HD
1185 new->paddr = start;
1186 new->size = size;
72658e9d
VG
1187 list_add_tail(&new->list, vc_list);
1188
1189 /* Update the program header offset */
7f614cd1
HD
1190 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1191 vmcore_off = vmcore_off + size;
72658e9d
VG
1192 }
1193 return 0;
1194}
1195
666bfddb 1196/* Sets offset fields of vmcore elements. */
7efe48df
RL
1197static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1198 struct list_head *vc_list)
666bfddb
VG
1199{
1200 loff_t vmcore_off;
666bfddb
VG
1201 struct vmcore *m;
1202
087350c9
HD
1203 /* Skip Elf header, program headers and Elf note segment. */
1204 vmcore_off = elfsz + elfnotes_sz;
666bfddb
VG
1205
1206 list_for_each_entry(m, vc_list, list) {
1207 m->offset = vmcore_off;
1208 vmcore_off += m->size;
1209 }
1210}
1211
f2bdacdd 1212static void free_elfcorebuf(void)
72658e9d 1213{
f2bdacdd
HD
1214 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1215 elfcorebuf = NULL;
087350c9
HD
1216 vfree(elfnotes_buf);
1217 elfnotes_buf = NULL;
72658e9d
VG
1218}
1219
666bfddb
VG
1220static int __init parse_crash_elf64_headers(void)
1221{
1222 int rc=0;
1223 Elf64_Ehdr ehdr;
1224 u64 addr;
1225
1226 addr = elfcorehdr_addr;
1227
1228 /* Read Elf header */
be8a8d06 1229 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
666bfddb
VG
1230 if (rc < 0)
1231 return rc;
1232
1233 /* Do some basic Verification. */
1234 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1235 (ehdr.e_type != ET_CORE) ||
9833c394 1236 !vmcore_elf64_check_arch(&ehdr) ||
666bfddb
VG
1237 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1238 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1239 ehdr.e_version != EV_CURRENT ||
1240 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1241 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1242 ehdr.e_phnum == 0) {
87ebdc00 1243 pr_warn("Warning: Core image elf header is not sane\n");
666bfddb
VG
1244 return -EINVAL;
1245 }
1246
1247 /* Read in all elf headers. */
f2bdacdd
HD
1248 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1249 ehdr.e_phnum * sizeof(Elf64_Phdr);
1250 elfcorebuf_sz = elfcorebuf_sz_orig;
1251 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1252 get_order(elfcorebuf_sz_orig));
666bfddb
VG
1253 if (!elfcorebuf)
1254 return -ENOMEM;
1255 addr = elfcorehdr_addr;
be8a8d06 1256 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
f2bdacdd
HD
1257 if (rc < 0)
1258 goto fail;
666bfddb
VG
1259
1260 /* Merge all PT_NOTE headers into one. */
087350c9
HD
1261 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1262 &elfnotes_buf, &elfnotes_sz);
f2bdacdd
HD
1263 if (rc)
1264 goto fail;
666bfddb 1265 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
087350c9 1266 elfnotes_sz, &vmcore_list);
f2bdacdd
HD
1267 if (rc)
1268 goto fail;
087350c9 1269 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
666bfddb 1270 return 0;
f2bdacdd
HD
1271fail:
1272 free_elfcorebuf();
1273 return rc;
666bfddb
VG
1274}
1275
72658e9d
VG
1276static int __init parse_crash_elf32_headers(void)
1277{
1278 int rc=0;
1279 Elf32_Ehdr ehdr;
1280 u64 addr;
1281
1282 addr = elfcorehdr_addr;
1283
1284 /* Read Elf header */
be8a8d06 1285 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
72658e9d
VG
1286 if (rc < 0)
1287 return rc;
1288
1289 /* Do some basic Verification. */
1290 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1291 (ehdr.e_type != ET_CORE) ||
e55d5312 1292 !vmcore_elf32_check_arch(&ehdr) ||
72658e9d
VG
1293 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1294 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1295 ehdr.e_version != EV_CURRENT ||
1296 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1297 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1298 ehdr.e_phnum == 0) {
87ebdc00 1299 pr_warn("Warning: Core image elf header is not sane\n");
72658e9d
VG
1300 return -EINVAL;
1301 }
1302
1303 /* Read in all elf headers. */
f2bdacdd
HD
1304 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1305 elfcorebuf_sz = elfcorebuf_sz_orig;
1306 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1307 get_order(elfcorebuf_sz_orig));
72658e9d
VG
1308 if (!elfcorebuf)
1309 return -ENOMEM;
1310 addr = elfcorehdr_addr;
be8a8d06 1311 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
f2bdacdd
HD
1312 if (rc < 0)
1313 goto fail;
72658e9d
VG
1314
1315 /* Merge all PT_NOTE headers into one. */
087350c9
HD
1316 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1317 &elfnotes_buf, &elfnotes_sz);
f2bdacdd
HD
1318 if (rc)
1319 goto fail;
72658e9d 1320 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
087350c9 1321 elfnotes_sz, &vmcore_list);
f2bdacdd
HD
1322 if (rc)
1323 goto fail;
087350c9 1324 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
72658e9d 1325 return 0;
f2bdacdd
HD
1326fail:
1327 free_elfcorebuf();
1328 return rc;
72658e9d
VG
1329}
1330
666bfddb
VG
1331static int __init parse_crash_elf_headers(void)
1332{
1333 unsigned char e_ident[EI_NIDENT];
1334 u64 addr;
1335 int rc=0;
1336
1337 addr = elfcorehdr_addr;
be8a8d06 1338 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
666bfddb
VG
1339 if (rc < 0)
1340 return rc;
1341 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
87ebdc00 1342 pr_warn("Warning: Core image elf header not found\n");
666bfddb
VG
1343 return -EINVAL;
1344 }
1345
1346 if (e_ident[EI_CLASS] == ELFCLASS64) {
1347 rc = parse_crash_elf64_headers();
1348 if (rc)
1349 return rc;
72658e9d
VG
1350 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1351 rc = parse_crash_elf32_headers();
1352 if (rc)
1353 return rc;
666bfddb 1354 } else {
87ebdc00 1355 pr_warn("Warning: Core image elf header is not sane\n");
666bfddb
VG
1356 return -EINVAL;
1357 }
591ff716
HD
1358
1359 /* Determine vmcore size. */
1360 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1361 &vmcore_list);
1362
666bfddb
VG
1363 return 0;
1364}
1365
2724273e
RL
1366#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1367/**
1368 * vmcoredd_write_header - Write vmcore device dump header at the
1369 * beginning of the dump's buffer.
1370 * @buf: Output buffer where the note is written
1371 * @data: Dump info
1372 * @size: Size of the dump
1373 *
1374 * Fills beginning of the dump's buffer with vmcore device dump header.
1375 */
1376static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1377 u32 size)
1378{
1379 struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1380
1381 vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1382 vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1383 vdd_hdr->n_type = NT_VMCOREDD;
1384
1385 strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1386 sizeof(vdd_hdr->name));
1387 memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1388}
1389
7efe48df
RL
1390/**
1391 * vmcoredd_update_program_headers - Update all Elf program headers
1392 * @elfptr: Pointer to elf header
1393 * @elfnotesz: Size of elf notes aligned to page size
1394 * @vmcoreddsz: Size of device dumps to be added to elf note header
1395 *
1396 * Determine type of Elf header (Elf64 or Elf32) and update the elf note size.
1397 * Also update the offsets of all the program headers after the elf note header.
1398 */
1399static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1400 size_t vmcoreddsz)
1401{
1402 unsigned char *e_ident = (unsigned char *)elfptr;
1403 u64 start, end, size;
1404 loff_t vmcore_off;
1405 u32 i;
1406
1407 vmcore_off = elfcorebuf_sz + elfnotesz;
1408
1409 if (e_ident[EI_CLASS] == ELFCLASS64) {
1410 Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1411 Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1412
1413 /* Update all program headers */
1414 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1415 if (phdr->p_type == PT_NOTE) {
1416 /* Update note size */
1417 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1418 phdr->p_filesz = phdr->p_memsz;
1419 continue;
1420 }
1421
1422 start = rounddown(phdr->p_offset, PAGE_SIZE);
1423 end = roundup(phdr->p_offset + phdr->p_memsz,
1424 PAGE_SIZE);
1425 size = end - start;
1426 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1427 vmcore_off += size;
1428 }
1429 } else {
1430 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1431 Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1432
1433 /* Update all program headers */
1434 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1435 if (phdr->p_type == PT_NOTE) {
1436 /* Update note size */
1437 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1438 phdr->p_filesz = phdr->p_memsz;
1439 continue;
1440 }
1441
1442 start = rounddown(phdr->p_offset, PAGE_SIZE);
1443 end = roundup(phdr->p_offset + phdr->p_memsz,
1444 PAGE_SIZE);
1445 size = end - start;
1446 phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1447 vmcore_off += size;
1448 }
1449 }
1450}
1451
1452/**
1453 * vmcoredd_update_size - Update the total size of the device dumps and update
1454 * Elf header
1455 * @dump_size: Size of the current device dump to be added to total size
1456 *
1457 * Update the total size of all the device dumps and update the Elf program
1458 * headers. Calculate the new offsets for the vmcore list and update the
1459 * total vmcore size.
1460 */
1461static void vmcoredd_update_size(size_t dump_size)
1462{
1463 vmcoredd_orig_sz += dump_size;
1464 elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1465 vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1466 vmcoredd_orig_sz);
1467
1468 /* Update vmcore list offsets */
1469 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1470
1471 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1472 &vmcore_list);
1473 proc_vmcore->size = vmcore_size;
1474}
1475
2724273e
RL
1476/**
1477 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1478 * @data: dump info.
1479 *
1480 * Allocate a buffer and invoke the calling driver's dump collect routine.
1481 * Write Elf note at the beginning of the buffer to indicate vmcore device
1482 * dump and add the dump to global list.
1483 */
1484int vmcore_add_device_dump(struct vmcoredd_data *data)
1485{
1486 struct vmcoredd_node *dump;
1487 void *buf = NULL;
1488 size_t data_size;
1489 int ret;
1490
c6c40533
KS
1491 if (vmcoredd_disabled) {
1492 pr_err_once("Device dump is disabled\n");
1493 return -EINVAL;
1494 }
1495
2724273e
RL
1496 if (!data || !strlen(data->dump_name) ||
1497 !data->vmcoredd_callback || !data->size)
1498 return -EINVAL;
1499
1500 dump = vzalloc(sizeof(*dump));
1501 if (!dump) {
1502 ret = -ENOMEM;
1503 goto out_err;
1504 }
1505
1506 /* Keep size of the buffer page aligned so that it can be mmaped */
1507 data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1508 PAGE_SIZE);
1509
1510 /* Allocate buffer for driver's to write their dumps */
1511 buf = vmcore_alloc_buf(data_size);
1512 if (!buf) {
1513 ret = -ENOMEM;
1514 goto out_err;
1515 }
1516
1517 vmcoredd_write_header(buf, data, data_size -
1518 sizeof(struct vmcoredd_header));
1519
1520 /* Invoke the driver's dump collection routing */
1521 ret = data->vmcoredd_callback(data, buf +
1522 sizeof(struct vmcoredd_header));
1523 if (ret)
1524 goto out_err;
1525
1526 dump->buf = buf;
1527 dump->size = data_size;
1528
1529 /* Add the dump to driver sysfs list */
1530 mutex_lock(&vmcoredd_mutex);
1531 list_add_tail(&dump->list, &vmcoredd_list);
1532 mutex_unlock(&vmcoredd_mutex);
1533
7efe48df 1534 vmcoredd_update_size(data_size);
2724273e
RL
1535 return 0;
1536
1537out_err:
fb9bf048
YL
1538 vfree(buf);
1539 vfree(dump);
2724273e
RL
1540
1541 return ret;
1542}
1543EXPORT_SYMBOL(vmcore_add_device_dump);
1544#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1545
1546/* Free all dumps in vmcore device dump list */
1547static void vmcore_free_device_dumps(void)
1548{
1549#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1550 mutex_lock(&vmcoredd_mutex);
1551 while (!list_empty(&vmcoredd_list)) {
1552 struct vmcoredd_node *dump;
1553
1554 dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1555 list);
1556 list_del(&dump->list);
1557 vfree(dump->buf);
1558 vfree(dump);
1559 }
1560 mutex_unlock(&vmcoredd_mutex);
1561#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1562}
1563
666bfddb
VG
1564/* Init function for vmcore module. */
1565static int __init vmcore_init(void)
1566{
1567 int rc = 0;
1568
be8a8d06
MH
1569 /* Allow architectures to allocate ELF header in 2nd kernel */
1570 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1571 if (rc)
1572 return rc;
1573 /*
1574 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1575 * then capture the dump.
1576 */
85a0ee34 1577 if (!(is_vmcore_usable()))
666bfddb
VG
1578 return rc;
1579 rc = parse_crash_elf_headers();
1580 if (rc) {
87ebdc00 1581 pr_warn("Kdump: vmcore not initialized\n");
666bfddb
VG
1582 return rc;
1583 }
be8a8d06
MH
1584 elfcorehdr_free(elfcorehdr_addr);
1585 elfcorehdr_addr = ELFCORE_ADDR_ERR;
666bfddb 1586
97a32539 1587 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
666bfddb
VG
1588 if (proc_vmcore)
1589 proc_vmcore->size = vmcore_size;
1590 return 0;
1591}
abaf3787 1592fs_initcall(vmcore_init);
16257393
MS
1593
1594/* Cleanup function for vmcore module. */
1595void vmcore_cleanup(void)
1596{
16257393 1597 if (proc_vmcore) {
a8ca16ea 1598 proc_remove(proc_vmcore);
16257393
MS
1599 proc_vmcore = NULL;
1600 }
1601
1602 /* clear the vmcore list. */
593bc695 1603 while (!list_empty(&vmcore_list)) {
16257393
MS
1604 struct vmcore *m;
1605
593bc695 1606 m = list_first_entry(&vmcore_list, struct vmcore, list);
16257393
MS
1607 list_del(&m->list);
1608 kfree(m);
1609 }
f2bdacdd 1610 free_elfcorebuf();
2724273e
RL
1611
1612 /* clear vmcore device dump list */
1613 vmcore_free_device_dumps();
16257393 1614}