2 * User access functions based on page table walks for enhanced
3 * system layout without hardware support.
5 * Copyright IBM Corp. 2006, 2012
6 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
9 #include <linux/errno.h>
10 #include <linux/hardirq.h>
12 #include <linux/hugetlb.h>
13 #include <asm/uaccess.h>
14 #include <asm/futex.h>
25 static size_t strnlen_kernel(size_t count, const char __user *src)
27 register unsigned long reg0 asm("0") = 0UL;
28 unsigned long tmp1, tmp2;
36 " la %0,1(%3)\n" /* strnlen_kernel results includes \0 */
40 : "+a" (count), "+a" (src), "=a" (tmp1), "=a" (tmp2)
41 : "d" (reg0) : "cc", "memory");
45 static size_t copy_in_kernel(size_t count, void __user *to,
46 const void __user *from)
55 "1: mvc 0(1,%1),0(%2)\n"
61 "2: mvc 0(256,%1),0(%2)\n"
66 "4: ex %0,1b-0b(%3)\n"
69 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
70 : "+a" (count), "+a" (to), "+a" (from), "=a" (tmp1)
76 * Returns kernel address for user virtual address. If the returned address is
77 * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
78 * contains the (negative) exception code.
82 static unsigned long follow_table(struct mm_struct *mm,
83 unsigned long address, int write)
85 unsigned long *table = (unsigned long *)__pa(mm->pgd);
87 if (unlikely(address > mm->context.asce_limit - 1))
89 switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
90 case _ASCE_TYPE_REGION1:
91 table = table + ((address >> 53) & 0x7ff);
92 if (unlikely(*table & _REGION_ENTRY_INVALID))
94 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
96 case _ASCE_TYPE_REGION2:
97 table = table + ((address >> 42) & 0x7ff);
98 if (unlikely(*table & _REGION_ENTRY_INVALID))
100 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
102 case _ASCE_TYPE_REGION3:
103 table = table + ((address >> 31) & 0x7ff);
104 if (unlikely(*table & _REGION_ENTRY_INVALID))
106 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
108 case _ASCE_TYPE_SEGMENT:
109 table = table + ((address >> 20) & 0x7ff);
110 if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
112 if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
113 if (write && (*table & _SEGMENT_ENTRY_PROTECT))
115 return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
116 (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
118 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
120 table = table + ((address >> 12) & 0xff);
121 if (unlikely(*table & _PAGE_INVALID))
123 if (write && (*table & _PAGE_PROTECT))
125 return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
128 #else /* CONFIG_64BIT */
130 static unsigned long follow_table(struct mm_struct *mm,
131 unsigned long address, int write)
133 unsigned long *table = (unsigned long *)__pa(mm->pgd);
135 table = table + ((address >> 20) & 0x7ff);
136 if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
138 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
139 table = table + ((address >> 12) & 0xff);
140 if (unlikely(*table & _PAGE_INVALID))
142 if (write && (*table & _PAGE_PROTECT))
144 return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
147 #endif /* CONFIG_64BIT */
149 static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
150 size_t n, int write_user)
152 struct mm_struct *mm = current->mm;
153 unsigned long offset, done, size, kaddr;
158 spin_lock(&mm->page_table_lock);
160 kaddr = follow_table(mm, uaddr, write_user);
161 if (IS_ERR_VALUE(kaddr))
164 offset = uaddr & ~PAGE_MASK;
165 size = min(n - done, PAGE_SIZE - offset);
170 from = (void *) kaddr;
173 memcpy(to, from, size);
177 spin_unlock(&mm->page_table_lock);
180 spin_unlock(&mm->page_table_lock);
181 if (__handle_fault(uaddr, -kaddr, write_user))
187 * Do DAT for user address by page table walk, return kernel address.
188 * This function needs to be called with current->mm->page_table_lock held.
190 static __always_inline unsigned long __dat_user_addr(unsigned long uaddr,
193 struct mm_struct *mm = current->mm;
198 kaddr = follow_table(mm, uaddr, write);
199 if (IS_ERR_VALUE(kaddr))
204 spin_unlock(&mm->page_table_lock);
205 rc = __handle_fault(uaddr, -kaddr, write);
206 spin_lock(&mm->page_table_lock);
212 size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
216 if (segment_eq(get_fs(), KERNEL_DS))
217 return copy_in_kernel(n, (void __user *) to, from);
218 rc = __user_copy_pt((unsigned long) from, to, n, 0);
220 memset(to + n - rc, 0, rc);
224 size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
226 if (segment_eq(get_fs(), KERNEL_DS))
227 return copy_in_kernel(n, to, (void __user *) from);
228 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
231 static size_t clear_user_pt(size_t n, void __user *to)
233 void *zpage = (void *) empty_zero_page;
234 long done, size, ret;
238 if (n - done > PAGE_SIZE)
242 if (segment_eq(get_fs(), KERNEL_DS))
243 ret = copy_in_kernel(n, to, (void __user *) zpage);
245 ret = __user_copy_pt((unsigned long) to, zpage, size, 1);
249 return ret + n - done;
254 static size_t strnlen_user_pt(size_t count, const char __user *src)
256 unsigned long uaddr = (unsigned long) src;
257 struct mm_struct *mm = current->mm;
258 unsigned long offset, done, len, kaddr;
261 if (unlikely(!count))
263 if (segment_eq(get_fs(), KERNEL_DS))
264 return strnlen_kernel(count, src);
267 spin_lock(&mm->page_table_lock);
269 kaddr = follow_table(mm, uaddr, 0);
270 if (IS_ERR_VALUE(kaddr))
273 offset = uaddr & ~PAGE_MASK;
274 len = min(count - done, PAGE_SIZE - offset);
275 len_str = strnlen((char *) kaddr, len);
278 } while ((len_str == len) && (done < count));
279 spin_unlock(&mm->page_table_lock);
282 spin_unlock(&mm->page_table_lock);
283 if (__handle_fault(uaddr, -kaddr, 0))
288 static size_t strncpy_from_user_pt(size_t count, const char __user *src,
291 size_t done, len, offset, len_str;
293 if (unlikely(!count))
297 offset = (size_t)src & ~PAGE_MASK;
298 len = min(count - done, PAGE_SIZE - offset);
299 if (segment_eq(get_fs(), KERNEL_DS)) {
300 if (copy_in_kernel(len, (void __user *) dst, src))
303 if (__user_copy_pt((unsigned long) src, dst, len, 0))
306 len_str = strnlen(dst, len);
310 } while ((len_str == len) && (done < count));
314 static size_t copy_in_user_pt(size_t n, void __user *to,
315 const void __user *from)
317 struct mm_struct *mm = current->mm;
318 unsigned long offset_max, uaddr, done, size, error_code;
319 unsigned long uaddr_from = (unsigned long) from;
320 unsigned long uaddr_to = (unsigned long) to;
321 unsigned long kaddr_to, kaddr_from;
324 if (segment_eq(get_fs(), KERNEL_DS))
325 return copy_in_kernel(n, to, from);
328 spin_lock(&mm->page_table_lock);
332 kaddr_from = follow_table(mm, uaddr_from, 0);
333 error_code = kaddr_from;
334 if (IS_ERR_VALUE(error_code))
339 kaddr_to = follow_table(mm, uaddr_to, 1);
340 error_code = (unsigned long) kaddr_to;
341 if (IS_ERR_VALUE(error_code))
344 offset_max = max(uaddr_from & ~PAGE_MASK,
345 uaddr_to & ~PAGE_MASK);
346 size = min(n - done, PAGE_SIZE - offset_max);
348 memcpy((void *) kaddr_to, (void *) kaddr_from, size);
353 spin_unlock(&mm->page_table_lock);
356 spin_unlock(&mm->page_table_lock);
357 if (__handle_fault(uaddr, -error_code, write_user))
362 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
363 asm volatile("0: l %1,0(%6)\n" \
365 "2: cs %1,%2,0(%6)\n" \
369 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
370 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
372 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
373 "m" (*uaddr) : "cc" );
375 static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
377 int oldval = 0, newval, ret;
381 __futex_atomic_op("lr %2,%5\n",
382 ret, oldval, newval, uaddr, oparg);
385 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
386 ret, oldval, newval, uaddr, oparg);
389 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
390 ret, oldval, newval, uaddr, oparg);
393 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
394 ret, oldval, newval, uaddr, oparg);
397 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
398 ret, oldval, newval, uaddr, oparg);
408 int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
412 if (segment_eq(get_fs(), KERNEL_DS))
413 return __futex_atomic_op_pt(op, uaddr, oparg, old);
414 spin_lock(¤t->mm->page_table_lock);
415 uaddr = (u32 __force __user *)
416 __dat_user_addr((__force unsigned long) uaddr, 1);
418 spin_unlock(¤t->mm->page_table_lock);
421 get_page(virt_to_page(uaddr));
422 spin_unlock(¤t->mm->page_table_lock);
423 ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
424 put_page(virt_to_page(uaddr));
428 static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
429 u32 oldval, u32 newval)
433 asm volatile("0: cs %1,%4,0(%5)\n"
436 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
437 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
438 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
444 int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
445 u32 oldval, u32 newval)
449 if (segment_eq(get_fs(), KERNEL_DS))
450 return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
451 spin_lock(¤t->mm->page_table_lock);
452 uaddr = (u32 __force __user *)
453 __dat_user_addr((__force unsigned long) uaddr, 1);
455 spin_unlock(¤t->mm->page_table_lock);
458 get_page(virt_to_page(uaddr));
459 spin_unlock(¤t->mm->page_table_lock);
460 ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
461 put_page(virt_to_page(uaddr));
465 struct uaccess_ops uaccess_pt = {
466 .copy_from_user = copy_from_user_pt,
467 .copy_to_user = copy_to_user_pt,
468 .copy_in_user = copy_in_user_pt,
469 .clear_user = clear_user_pt,
470 .strnlen_user = strnlen_user_pt,
471 .strncpy_from_user = strncpy_from_user_pt,
472 .futex_atomic_op = futex_atomic_op_pt,
473 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,