Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
[linux-2.6-block.git] / arch / s390 / lib / uaccess_pt.c
CommitLineData
59f35d53
GS
1/*
2 * arch/s390/lib/uaccess_pt.c
3 *
c1821c2e
GS
4 * User access functions based on page table walks for enhanced
5 * system layout without hardware support.
59f35d53
GS
6 *
7 * Copyright IBM Corp. 2006
8 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
9 */
10
11#include <linux/errno.h>
d8ad075e 12#include <linux/hardirq.h>
59f35d53 13#include <linux/mm.h>
22155914 14#include <asm/uaccess.h>
59f35d53 15#include <asm/futex.h>
2b67fc46 16#include "uaccess.h"
59f35d53 17
4d284cac
HC
18static int __handle_fault(struct mm_struct *mm, unsigned long address,
19 int write_access)
59f35d53
GS
20{
21 struct vm_area_struct *vma;
22 int ret = -EFAULT;
23
d8ad075e
HC
24 if (in_atomic())
25 return ret;
59f35d53
GS
26 down_read(&mm->mmap_sem);
27 vma = find_vma(mm, address);
28 if (unlikely(!vma))
29 goto out;
30 if (unlikely(vma->vm_start > address)) {
31 if (!(vma->vm_flags & VM_GROWSDOWN))
32 goto out;
33 if (expand_stack(vma, address))
34 goto out;
35 }
36
37 if (!write_access) {
38 /* page not present, check vm flags */
39 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
40 goto out;
41 } else {
42 if (!(vma->vm_flags & VM_WRITE))
43 goto out;
44 }
45
46survive:
47 switch (handle_mm_fault(mm, vma, address, write_access)) {
48 case VM_FAULT_MINOR:
49 current->min_flt++;
50 break;
51 case VM_FAULT_MAJOR:
52 current->maj_flt++;
53 break;
54 case VM_FAULT_SIGBUS:
55 goto out_sigbus;
56 case VM_FAULT_OOM:
57 goto out_of_memory;
58 default:
59 BUG();
60 }
61 ret = 0;
62out:
63 up_read(&mm->mmap_sem);
64 return ret;
65
66out_of_memory:
67 up_read(&mm->mmap_sem);
22155914 68 if (is_init(current)) {
59f35d53 69 yield();
22155914 70 down_read(&mm->mmap_sem);
59f35d53
GS
71 goto survive;
72 }
73 printk("VM: killing process %s\n", current->comm);
74 return ret;
75
76out_sigbus:
77 up_read(&mm->mmap_sem);
78 current->thread.prot_addr = address;
79 current->thread.trap_no = 0x11;
80 force_sig(SIGBUS, current);
81 return ret;
82}
83
4d284cac
HC
84static size_t __user_copy_pt(unsigned long uaddr, void *kptr,
85 size_t n, int write_user)
59f35d53
GS
86{
87 struct mm_struct *mm = current->mm;
88 unsigned long offset, pfn, done, size;
89 pgd_t *pgd;
90 pmd_t *pmd;
91 pte_t *pte;
92 void *from, *to;
93
94 done = 0;
95retry:
96 spin_lock(&mm->page_table_lock);
97 do {
98 pgd = pgd_offset(mm, uaddr);
99 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
100 goto fault;
101
102 pmd = pmd_offset(pgd, uaddr);
103 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
104 goto fault;
105
106 pte = pte_offset_map(pmd, uaddr);
107 if (!pte || !pte_present(*pte) ||
108 (write_user && !pte_write(*pte)))
109 goto fault;
110
111 pfn = pte_pfn(*pte);
112 if (!pfn_valid(pfn))
113 goto out;
114
115 offset = uaddr & (PAGE_SIZE - 1);
116 size = min(n - done, PAGE_SIZE - offset);
117 if (write_user) {
118 to = (void *)((pfn << PAGE_SHIFT) + offset);
119 from = kptr + done;
120 } else {
121 from = (void *)((pfn << PAGE_SHIFT) + offset);
122 to = kptr + done;
123 }
124 memcpy(to, from, size);
125 done += size;
126 uaddr += size;
127 } while (done < n);
128out:
129 spin_unlock(&mm->page_table_lock);
130 return n - done;
131fault:
132 spin_unlock(&mm->page_table_lock);
133 if (__handle_fault(mm, uaddr, write_user))
134 return n - done;
135 goto retry;
136}
137
c1821c2e
GS
138/*
139 * Do DAT for user address by page table walk, return kernel address.
140 * This function needs to be called with current->mm->page_table_lock held.
141 */
4d284cac 142static unsigned long __dat_user_addr(unsigned long uaddr)
c1821c2e
GS
143{
144 struct mm_struct *mm = current->mm;
145 unsigned long pfn, ret;
146 pgd_t *pgd;
147 pmd_t *pmd;
148 pte_t *pte;
149 int rc;
150
151 ret = 0;
152retry:
153 pgd = pgd_offset(mm, uaddr);
154 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
155 goto fault;
156
157 pmd = pmd_offset(pgd, uaddr);
158 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
159 goto fault;
160
161 pte = pte_offset_map(pmd, uaddr);
162 if (!pte || !pte_present(*pte))
163 goto fault;
164
165 pfn = pte_pfn(*pte);
166 if (!pfn_valid(pfn))
167 goto out;
168
169 ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
170out:
171 return ret;
172fault:
173 spin_unlock(&mm->page_table_lock);
174 rc = __handle_fault(mm, uaddr, 0);
175 spin_lock(&mm->page_table_lock);
176 if (rc)
177 goto out;
178 goto retry;
179}
180
59f35d53
GS
181size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
182{
183 size_t rc;
184
185 if (segment_eq(get_fs(), KERNEL_DS)) {
186 memcpy(to, (void __kernel __force *) from, n);
187 return 0;
188 }
189 rc = __user_copy_pt((unsigned long) from, to, n, 0);
190 if (unlikely(rc))
191 memset(to + n - rc, 0, rc);
192 return rc;
193}
194
195size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
196{
197 if (segment_eq(get_fs(), KERNEL_DS)) {
198 memcpy((void __kernel __force *) to, from, n);
199 return 0;
200 }
201 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
202}
c1821c2e
GS
203
204static size_t clear_user_pt(size_t n, void __user *to)
205{
206 long done, size, ret;
207
208 if (segment_eq(get_fs(), KERNEL_DS)) {
209 memset((void __kernel __force *) to, 0, n);
210 return 0;
211 }
212 done = 0;
213 do {
214 if (n - done > PAGE_SIZE)
215 size = PAGE_SIZE;
216 else
217 size = n - done;
218 ret = __user_copy_pt((unsigned long) to + done,
219 &empty_zero_page, size, 1);
220 done += size;
221 if (ret)
222 return ret + n - done;
223 } while (done < n);
224 return 0;
225}
226
227static size_t strnlen_user_pt(size_t count, const char __user *src)
228{
229 char *addr;
230 unsigned long uaddr = (unsigned long) src;
231 struct mm_struct *mm = current->mm;
232 unsigned long offset, pfn, done, len;
233 pgd_t *pgd;
234 pmd_t *pmd;
235 pte_t *pte;
236 size_t len_str;
237
238 if (segment_eq(get_fs(), KERNEL_DS))
239 return strnlen((const char __kernel __force *) src, count) + 1;
240 done = 0;
241retry:
242 spin_lock(&mm->page_table_lock);
243 do {
244 pgd = pgd_offset(mm, uaddr);
245 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
246 goto fault;
247
248 pmd = pmd_offset(pgd, uaddr);
249 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
250 goto fault;
251
252 pte = pte_offset_map(pmd, uaddr);
253 if (!pte || !pte_present(*pte))
254 goto fault;
255
256 pfn = pte_pfn(*pte);
257 if (!pfn_valid(pfn)) {
258 done = -1;
259 goto out;
260 }
261
262 offset = uaddr & (PAGE_SIZE-1);
263 addr = (char *)(pfn << PAGE_SHIFT) + offset;
264 len = min(count - done, PAGE_SIZE - offset);
265 len_str = strnlen(addr, len);
266 done += len_str;
267 uaddr += len_str;
268 } while ((len_str == len) && (done < count));
269out:
270 spin_unlock(&mm->page_table_lock);
271 return done + 1;
272fault:
273 spin_unlock(&mm->page_table_lock);
274 if (__handle_fault(mm, uaddr, 0)) {
275 return 0;
276 }
277 goto retry;
278}
279
280static size_t strncpy_from_user_pt(size_t count, const char __user *src,
281 char *dst)
282{
283 size_t n = strnlen_user_pt(count, src);
284
285 if (!n)
286 return -EFAULT;
287 if (n > count)
288 n = count;
289 if (segment_eq(get_fs(), KERNEL_DS)) {
290 memcpy(dst, (const char __kernel __force *) src, n);
291 if (dst[n-1] == '\0')
292 return n-1;
293 else
294 return n;
295 }
296 if (__user_copy_pt((unsigned long) src, dst, n, 0))
297 return -EFAULT;
298 if (dst[n-1] == '\0')
299 return n-1;
300 else
301 return n;
302}
303
304static size_t copy_in_user_pt(size_t n, void __user *to,
305 const void __user *from)
306{
307 struct mm_struct *mm = current->mm;
308 unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
309 uaddr, done, size;
310 unsigned long uaddr_from = (unsigned long) from;
311 unsigned long uaddr_to = (unsigned long) to;
312 pgd_t *pgd_from, *pgd_to;
313 pmd_t *pmd_from, *pmd_to;
314 pte_t *pte_from, *pte_to;
315 int write_user;
316
317 done = 0;
318retry:
319 spin_lock(&mm->page_table_lock);
320 do {
321 pgd_from = pgd_offset(mm, uaddr_from);
322 if (pgd_none(*pgd_from) || unlikely(pgd_bad(*pgd_from))) {
323 uaddr = uaddr_from;
324 write_user = 0;
325 goto fault;
326 }
327 pgd_to = pgd_offset(mm, uaddr_to);
328 if (pgd_none(*pgd_to) || unlikely(pgd_bad(*pgd_to))) {
329 uaddr = uaddr_to;
330 write_user = 1;
331 goto fault;
332 }
333
334 pmd_from = pmd_offset(pgd_from, uaddr_from);
335 if (pmd_none(*pmd_from) || unlikely(pmd_bad(*pmd_from))) {
336 uaddr = uaddr_from;
337 write_user = 0;
338 goto fault;
339 }
340 pmd_to = pmd_offset(pgd_to, uaddr_to);
341 if (pmd_none(*pmd_to) || unlikely(pmd_bad(*pmd_to))) {
342 uaddr = uaddr_to;
343 write_user = 1;
344 goto fault;
345 }
346
347 pte_from = pte_offset_map(pmd_from, uaddr_from);
348 if (!pte_from || !pte_present(*pte_from)) {
349 uaddr = uaddr_from;
350 write_user = 0;
351 goto fault;
352 }
353 pte_to = pte_offset_map(pmd_to, uaddr_to);
354 if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) {
355 uaddr = uaddr_to;
356 write_user = 1;
357 goto fault;
358 }
359
360 pfn_from = pte_pfn(*pte_from);
361 if (!pfn_valid(pfn_from))
362 goto out;
363 pfn_to = pte_pfn(*pte_to);
364 if (!pfn_valid(pfn_to))
365 goto out;
366
367 offset_from = uaddr_from & (PAGE_SIZE-1);
368 offset_to = uaddr_from & (PAGE_SIZE-1);
369 offset_max = max(offset_from, offset_to);
370 size = min(n - done, PAGE_SIZE - offset_max);
371
372 memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
373 (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
374 done += size;
375 uaddr_from += size;
376 uaddr_to += size;
377 } while (done < n);
378out:
379 spin_unlock(&mm->page_table_lock);
380 return n - done;
381fault:
382 spin_unlock(&mm->page_table_lock);
383 if (__handle_fault(mm, uaddr, write_user))
384 return n - done;
385 goto retry;
386}
387
388#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
389 asm volatile("0: l %1,0(%6)\n" \
390 "1: " insn \
391 "2: cs %1,%2,0(%6)\n" \
392 "3: jl 1b\n" \
393 " lhi %0,0\n" \
394 "4:\n" \
395 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
396 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
397 "=m" (*uaddr) \
398 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
399 "m" (*uaddr) : "cc" );
400
401int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
402{
403 int oldval = 0, newval, ret;
404
405 spin_lock(&current->mm->page_table_lock);
406 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
407 if (!uaddr) {
408 spin_unlock(&current->mm->page_table_lock);
409 return -EFAULT;
410 }
411 get_page(virt_to_page(uaddr));
412 spin_unlock(&current->mm->page_table_lock);
413 switch (op) {
414 case FUTEX_OP_SET:
415 __futex_atomic_op("lr %2,%5\n",
416 ret, oldval, newval, uaddr, oparg);
417 break;
418 case FUTEX_OP_ADD:
419 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
420 ret, oldval, newval, uaddr, oparg);
421 break;
422 case FUTEX_OP_OR:
423 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
424 ret, oldval, newval, uaddr, oparg);
425 break;
426 case FUTEX_OP_ANDN:
427 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
428 ret, oldval, newval, uaddr, oparg);
429 break;
430 case FUTEX_OP_XOR:
431 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
432 ret, oldval, newval, uaddr, oparg);
433 break;
434 default:
435 ret = -ENOSYS;
436 }
437 put_page(virt_to_page(uaddr));
438 *old = oldval;
439 return ret;
440}
441
442int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
443{
444 int ret;
445
446 spin_lock(&current->mm->page_table_lock);
447 uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
448 if (!uaddr) {
449 spin_unlock(&current->mm->page_table_lock);
450 return -EFAULT;
451 }
452 get_page(virt_to_page(uaddr));
453 spin_unlock(&current->mm->page_table_lock);
454 asm volatile(" cs %1,%4,0(%5)\n"
455 "0: lr %0,%1\n"
456 "1:\n"
457 EX_TABLE(0b,1b)
458 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
459 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
460 : "cc", "memory" );
461 put_page(virt_to_page(uaddr));
462 return ret;
463}
464
465struct uaccess_ops uaccess_pt = {
466 .copy_from_user = copy_from_user_pt,
467 .copy_from_user_small = copy_from_user_pt,
468 .copy_to_user = copy_to_user_pt,
469 .copy_to_user_small = copy_to_user_pt,
470 .copy_in_user = copy_in_user_pt,
471 .clear_user = clear_user_pt,
472 .strnlen_user = strnlen_user_pt,
473 .strncpy_from_user = strncpy_from_user_pt,
474 .futex_atomic_op = futex_atomic_op_pt,
475 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
476};