s390/mm: allocate Real Memory Copy Area in decompressor
[linux-2.6-block.git] / arch / s390 / mm / maccess.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
88df125f
HC
2/*
3 * Access kernel memory without faulting -- s390 specific implementation.
4 *
3c1a3bce 5 * Copyright IBM Corp. 2009, 2015
88df125f 6 *
88df125f
HC
7 */
8
9#include <linux/uaccess.h>
10#include <linux/kernel.h>
11#include <linux/types.h>
12#include <linux/errno.h>
7f0bf656 13#include <linux/gfp.h>
b2a68c23 14#include <linux/cpu.h>
2f0e8aae 15#include <linux/uio.h>
d09a307f 16#include <asm/asm-extable.h>
a0616cde 17#include <asm/ctl_reg.h>
63df41d6 18#include <asm/io.h>
4df29d2b 19#include <asm/abs_lowcore.h>
78c98f90 20#include <asm/stacktrace.h>
9267bdd8 21#include <asm/maccess.h>
88df125f 22
2f0e8aae 23unsigned long __bootdata_preserved(__memcpy_real_area);
8e9205d2 24pte_t *__bootdata_preserved(memcpy_real_ptep);
2f0e8aae
AG
25static DEFINE_MUTEX(memcpy_real_mutex);
26
8a5d8473 27static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
88df125f 28{
3c1a3bce
HC
29 unsigned long aligned, offset, count;
30 char tmp[8];
88df125f 31
3c1a3bce
HC
32 aligned = (unsigned long) dst & ~7UL;
33 offset = (unsigned long) dst & 7UL;
34 size = min(8UL - offset, size);
35 count = size - 1;
88df125f
HC
36 asm volatile(
37 " bras 1,0f\n"
3c1a3bce
HC
38 " mvc 0(1,%4),0(%5)\n"
39 "0: mvc 0(8,%3),0(%0)\n"
40 " ex %1,0(1)\n"
41 " lg %1,0(%3)\n"
42 " lra %0,0(%0)\n"
43 " sturg %1,%0\n"
44 : "+&a" (aligned), "+&a" (count), "=m" (tmp)
45 : "a" (&tmp), "a" (&tmp[offset]), "a" (src)
46 : "cc", "memory", "1");
47 return size;
88df125f
HC
48}
49
8a5d8473
HC
50/*
51 * s390_kernel_write - write to kernel memory bypassing DAT
52 * @dst: destination address
53 * @src: source address
54 * @size: number of bytes to copy
55 *
56 * This function writes to kernel memory bypassing DAT and possible page table
57 * write protection. It writes to the destination using the sturg instruction.
3c1a3bce
HC
58 * Therefore we have a read-modify-write sequence: the function reads eight
59 * bytes from destination at an eight byte boundary, modifies the bytes
8a5d8473 60 * requested and writes the result back in a loop.
8a5d8473 61 */
a646ef39
MS
62static DEFINE_SPINLOCK(s390_kernel_write_lock);
63
cb2cceae 64notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
88df125f 65{
cb2cceae 66 void *tmp = dst;
a646ef39 67 unsigned long flags;
3c1a3bce 68 long copied;
88df125f 69
a646ef39 70 spin_lock_irqsave(&s390_kernel_write_lock, flags);
12cf6473
AG
71 while (size) {
72 copied = s390_kernel_write_odd(tmp, src, size);
73 tmp += copied;
74 src += copied;
75 size -= copied;
88df125f 76 }
a646ef39 77 spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
cb2cceae
JP
78
79 return dst;
88df125f 80}
92fe3132 81
2f0e8aae 82size_t memcpy_real_iter(struct iov_iter *iter, unsigned long src, size_t count)
b785e0d0 83{
2f0e8aae
AG
84 size_t len, copied, res = 0;
85 unsigned long phys, offset;
86 void *chunk;
87 pte_t pte;
88
89 while (count) {
90 phys = src & PAGE_MASK;
91 offset = src & ~PAGE_MASK;
92 chunk = (void *)(__memcpy_real_area + offset);
93 len = min(count, PAGE_SIZE - offset);
94 pte = mk_pte_phys(phys, PAGE_KERNEL_RO);
95
96 mutex_lock(&memcpy_real_mutex);
97 if (pte_val(pte) != pte_val(*memcpy_real_ptep)) {
98 __ptep_ipte(__memcpy_real_area, memcpy_real_ptep, 0, 0, IPTE_GLOBAL);
99 set_pte(memcpy_real_ptep, pte);
100 }
101 copied = copy_to_iter(chunk, len, iter);
102 mutex_unlock(&memcpy_real_mutex);
103
104 count -= copied;
105 src += copied;
106 res += copied;
107 if (copied < len)
108 break;
109 }
110 return res;
92fe3132 111}
7dd6b334 112
303fd988 113int memcpy_real(void *dest, unsigned long src, size_t count)
ce3dc447 114{
2f0e8aae
AG
115 struct iov_iter iter;
116 struct kvec kvec;
117
118 kvec.iov_base = dest;
119 kvec.iov_len = count;
de4eda9d 120 iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
2f0e8aae
AG
121 if (memcpy_real_iter(&iter, src, count) < count)
122 return -EFAULT;
123 return 0;
ce3dc447
MS
124}
125
7dd6b334 126/*
4df29d2b 127 * Find CPU that owns swapped prefix page
7dd6b334 128 */
4df29d2b 129static int get_swapped_owner(phys_addr_t addr)
b2a68c23 130{
1f231e29 131 phys_addr_t lc;
b2a68c23
MH
132 int cpu;
133
b2a68c23 134 for_each_online_cpu(cpu) {
1f231e29 135 lc = virt_to_phys(lowcore_ptr[cpu]);
c667aeac 136 if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
b2a68c23 137 continue;
4df29d2b 138 return cpu;
b2a68c23 139 }
4df29d2b 140 return -1;
b2a68c23
MH
141}
142
b2a68c23
MH
143/*
144 * Convert a physical pointer for /dev/mem access
145 *
146 * For swapped prefix pages a new buffer is returned that contains a copy of
147 * the absolute memory. The buffer size is maximum one page large.
148 */
4707a341 149void *xlate_dev_mem_ptr(phys_addr_t addr)
b2a68c23 150{
1f231e29
AG
151 void *ptr = phys_to_virt(addr);
152 void *bounce = ptr;
4df29d2b
AG
153 struct lowcore *abs_lc;
154 unsigned long flags;
b2a68c23 155 unsigned long size;
4df29d2b 156 int this_cpu, cpu;
b2a68c23 157
a73de293 158 cpus_read_lock();
4df29d2b
AG
159 this_cpu = get_cpu();
160 if (addr >= sizeof(struct lowcore)) {
161 cpu = get_swapped_owner(addr);
162 if (cpu < 0)
163 goto out;
164 }
165 bounce = (void *)__get_free_page(GFP_ATOMIC);
166 if (!bounce)
167 goto out;
168 size = PAGE_SIZE - (addr & ~PAGE_MASK);
169 if (addr < sizeof(struct lowcore)) {
170 abs_lc = get_abs_lowcore(&flags);
171 ptr = (void *)abs_lc + addr;
172 memcpy(bounce, ptr, size);
173 put_abs_lowcore(abs_lc, flags);
174 } else if (cpu == this_cpu) {
175 ptr = (void *)(addr - virt_to_phys(lowcore_ptr[cpu]));
176 memcpy(bounce, ptr, size);
177 } else {
178 memcpy(bounce, ptr, size);
b2a68c23 179 }
4df29d2b
AG
180out:
181 put_cpu();
a73de293 182 cpus_read_unlock();
b2a68c23
MH
183 return bounce;
184}
185
186/*
187 * Free converted buffer for /dev/mem access (if necessary)
188 */
1f231e29 189void unxlate_dev_mem_ptr(phys_addr_t addr, void *ptr)
b2a68c23 190{
1f231e29
AG
191 if (addr != virt_to_phys(ptr))
192 free_page((unsigned long)ptr);
b2a68c23 193}