Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
60e64d46 | 2 | /* |
835c34a1 | 3 | * Memory preserving reboot related code. |
60e64d46 VG |
4 | * |
5 | * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) | |
6 | * Copyright (C) IBM Corporation, 2004. All rights reserved | |
7 | */ | |
8 | ||
60e64d46 | 9 | #include <linux/errno.h> |
60e64d46 | 10 | #include <linux/crash_dump.h> |
08aadf06 GP |
11 | #include <linux/uaccess.h> |
12 | #include <linux/io.h> | |
2030eae5 | 13 | |
992b649a LJ |
14 | static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, |
15 | unsigned long offset, int userbuf, | |
16 | bool encrypted) | |
60e64d46 | 17 | { |
4ae362be | 18 | void *vaddr; |
60e64d46 VG |
19 | |
20 | if (!csize) | |
21 | return 0; | |
22 | ||
992b649a LJ |
23 | if (encrypted) |
24 | vaddr = (__force void *)ioremap_encrypted(pfn << PAGE_SHIFT, PAGE_SIZE); | |
25 | else | |
26 | vaddr = (__force void *)ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE); | |
27 | ||
af2d237b AM |
28 | if (!vaddr) |
29 | return -ENOMEM; | |
60e64d46 VG |
30 | |
31 | if (userbuf) { | |
992b649a LJ |
32 | if (copy_to_user((void __user *)buf, vaddr + offset, csize)) { |
33 | iounmap((void __iomem *)vaddr); | |
60e64d46 VG |
34 | return -EFAULT; |
35 | } | |
4ae362be | 36 | } else |
af2d237b | 37 | memcpy(buf, vaddr + offset, csize); |
60e64d46 | 38 | |
3ee48b6a | 39 | set_iounmap_nonlazy(); |
992b649a | 40 | iounmap((void __iomem *)vaddr); |
60e64d46 VG |
41 | return csize; |
42 | } | |
992b649a LJ |
43 | |
44 | /** | |
45 | * copy_oldmem_page - copy one page of memory | |
46 | * @pfn: page frame number to be copied | |
47 | * @buf: target memory address for the copy; this can be in kernel address | |
48 | * space or user address space (see @userbuf) | |
49 | * @csize: number of bytes to copy | |
50 | * @offset: offset in bytes into the page (based on pfn) to begin the copy | |
51 | * @userbuf: if set, @buf is in user address space, use copy_to_user(), | |
52 | * otherwise @buf is in kernel address space, use memcpy(). | |
53 | * | |
54 | * Copy a page from the old kernel's memory. For this page, there is no pte | |
55 | * mapped in the current kernel. We stitch up a pte, similar to kmap_atomic. | |
56 | */ | |
57 | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, | |
58 | unsigned long offset, int userbuf) | |
59 | { | |
60 | return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, false); | |
61 | } | |
62 | ||
63 | /** | |
64 | * copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap the | |
a97673a1 | 65 | * memory with the encryption mask set to accommodate kdump on SME-enabled |
992b649a LJ |
66 | * machines. |
67 | */ | |
68 | ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, | |
69 | unsigned long offset, int userbuf) | |
70 | { | |
71 | return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true); | |
72 | } |