parisc: optimizations in copy_thread() and friends
[linux-2.6-block.git] / arch / parisc / kernel / entry.S
CommitLineData
1da177e4
LT
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 * Copyright (C) 1999,2000 Philipp Rumpf
6 * Copyright (C) 1999 SuSE GmbH Nuernberg
7 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
0013a854 25#include <asm/asm-offsets.h>
1da177e4
LT
26
27/* we have the following possibilities to act on an interruption:
28 * - handle in assembly and use shadowed registers only
29 * - save registers to kernel stack and handle in assembly or C */
30
31
896a3756 32#include <asm/psw.h>
3d73cf5e 33#include <asm/cache.h> /* for L1_CACHE_SHIFT */
1da177e4
LT
34#include <asm/assembly.h> /* for LDREG/STREG defines */
35#include <asm/pgtable.h>
1da177e4
LT
36#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/thread_info.h>
39
c5e76552
HD
40#include <linux/linkage.h>
41
413059f2 42#ifdef CONFIG_64BIT
1da177e4
LT
43 .level 2.0w
44#else
1da177e4
LT
45 .level 2.0
46#endif
47
48 .import pa_dbit_lock,data
49
50 /* space_to_prot macro creates a prot id from a space id */
51
52#if (SPACEID_SHIFT) == 0
53 .macro space_to_prot spc prot
54 depd,z \spc,62,31,\prot
55 .endm
56#else
57 .macro space_to_prot spc prot
58 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
59 .endm
60#endif
61
62 /* Switch to virtual mapping, trashing only %r1 */
63 .macro virt_map
896a3756
GG
64 /* pcxt_ssm_bug */
65 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
1da177e4
LT
66 mtsp %r0, %sr4
67 mtsp %r0, %sr5
896a3756
GG
68 mfsp %sr7, %r1
69 or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
70 mtsp %r1, %sr3
71 tovirt_r1 %r29
72 load32 KERNEL_PSW, %r1
73
74 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
1da177e4
LT
75 mtsp %r0, %sr6
76 mtsp %r0, %sr7
1da177e4
LT
77 mtctl %r0, %cr17 /* Clear IIASQ tail */
78 mtctl %r0, %cr17 /* Clear IIASQ head */
896a3756 79 mtctl %r1, %ipsw
1da177e4
LT
80 load32 4f, %r1
81 mtctl %r1, %cr18 /* Set IIAOQ tail */
82 ldo 4(%r1), %r1
83 mtctl %r1, %cr18 /* Set IIAOQ head */
84 rfir
85 nop
864:
87 .endm
88
89 /*
90 * The "get_stack" macros are responsible for determining the
91 * kernel stack value.
92 *
1da177e4
LT
93 * If sr7 == 0
94 * Already using a kernel stack, so call the
95 * get_stack_use_r30 macro to push a pt_regs structure
96 * on the stack, and store registers there.
97 * else
98 * Need to set up a kernel stack, so call the
99 * get_stack_use_cr30 macro to set up a pointer
100 * to the pt_regs structure contained within the
101 * task pointer pointed to by cr30. Set the stack
102 * pointer to point to the end of the task structure.
103 *
1da177e4
LT
104 * Note that we use shadowed registers for temps until
105 * we can save %r26 and %r29. %r26 is used to preserve
106 * %r8 (a shadowed register) which temporarily contained
107 * either the fault type ("code") or the eirr. We need
108 * to use a non-shadowed register to carry the value over
109 * the rfir in virt_map. We use %r26 since this value winds
110 * up being passed as the argument to either do_cpu_irq_mask
111 * or handle_interruption. %r29 is used to hold a pointer
112 * the register save area, and once again, it needs to
113 * be a non-shadowed register so that it survives the rfir.
114 *
115 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
116 */
117
118 .macro get_stack_use_cr30
119
120 /* we save the registers in the task struct */
121
122 mfctl %cr30, %r1
123 tophys %r1,%r9
124 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
125 tophys %r1,%r9
126 ldo TASK_REGS(%r9),%r9
127 STREG %r30, PT_GR30(%r9)
128 STREG %r29,PT_GR29(%r9)
129 STREG %r26,PT_GR26(%r9)
130 copy %r9,%r29
131 mfctl %cr30, %r1
132 ldo THREAD_SZ_ALGN(%r1), %r30
133 .endm
134
135 .macro get_stack_use_r30
136
137 /* we put a struct pt_regs on the stack and save the registers there */
138
139 tophys %r30,%r9
140 STREG %r30,PT_GR30(%r9)
141 ldo PT_SZ_ALGN(%r30),%r30
142 STREG %r29,PT_GR29(%r9)
143 STREG %r26,PT_GR26(%r9)
144 copy %r9,%r29
145 .endm
146
147 .macro rest_stack
148 LDREG PT_GR1(%r29), %r1
149 LDREG PT_GR30(%r29),%r30
150 LDREG PT_GR29(%r29),%r29
151 .endm
152
153 /* default interruption handler
154 * (calls traps.c:handle_interruption) */
155 .macro def code
156 b intr_save
157 ldi \code, %r8
158 .align 32
159 .endm
160
161 /* Interrupt interruption handler
162 * (calls irq.c:do_cpu_irq_mask) */
163 .macro extint code
164 b intr_extint
165 mfsp %sr7,%r16
166 .align 32
167 .endm
168
169 .import os_hpmc, code
170
171 /* HPMC handler */
172 .macro hpmc code
173 nop /* must be a NOP, will be patched later */
174 load32 PA(os_hpmc), %r3
175 bv,n 0(%r3)
176 nop
177 .word 0 /* checksum (will be patched) */
178 .word PA(os_hpmc) /* address of handler */
179 .word 0 /* length of handler */
180 .endm
181
182 /*
183 * Performance Note: Instructions will be moved up into
184 * this part of the code later on, once we are sure
185 * that the tlb miss handlers are close to final form.
186 */
187
188 /* Register definitions for tlb miss handler macros */
189
25985edc
LDM
190 va = r8 /* virtual address for which the trap occurred */
191 spc = r24 /* space for which the trap occurred */
1da177e4 192
413059f2 193#ifndef CONFIG_64BIT
1da177e4
LT
194
195 /*
196 * itlb miss interruption handler (parisc 1.1 - 32 bit)
197 */
198
199 .macro itlb_11 code
200
201 mfctl %pcsq, spc
202 b itlb_miss_11
203 mfctl %pcoq, va
204
205 .align 32
206 .endm
207#endif
208
209 /*
210 * itlb miss interruption handler (parisc 2.0)
211 */
212
213 .macro itlb_20 code
214 mfctl %pcsq, spc
413059f2 215#ifdef CONFIG_64BIT
1da177e4
LT
216 b itlb_miss_20w
217#else
218 b itlb_miss_20
219#endif
220 mfctl %pcoq, va
221
222 .align 32
223 .endm
224
413059f2 225#ifndef CONFIG_64BIT
1da177e4
LT
226 /*
227 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
1da177e4
LT
228 */
229
230 .macro naitlb_11 code
231
232 mfctl %isr,spc
f311847c 233 b naitlb_miss_11
1da177e4 234 mfctl %ior,va
1da177e4
LT
235
236 .align 32
237 .endm
238#endif
239
240 /*
241 * naitlb miss interruption handler (parisc 2.0)
1da177e4
LT
242 */
243
244 .macro naitlb_20 code
245
246 mfctl %isr,spc
413059f2 247#ifdef CONFIG_64BIT
f311847c 248 b naitlb_miss_20w
1da177e4 249#else
f311847c 250 b naitlb_miss_20
1da177e4
LT
251#endif
252 mfctl %ior,va
1da177e4
LT
253
254 .align 32
255 .endm
256
413059f2 257#ifndef CONFIG_64BIT
1da177e4
LT
258 /*
259 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
260 */
261
262 .macro dtlb_11 code
263
264 mfctl %isr, spc
265 b dtlb_miss_11
266 mfctl %ior, va
267
268 .align 32
269 .endm
270#endif
271
272 /*
273 * dtlb miss interruption handler (parisc 2.0)
274 */
275
276 .macro dtlb_20 code
277
278 mfctl %isr, spc
413059f2 279#ifdef CONFIG_64BIT
1da177e4
LT
280 b dtlb_miss_20w
281#else
282 b dtlb_miss_20
283#endif
284 mfctl %ior, va
285
286 .align 32
287 .endm
288
413059f2 289#ifndef CONFIG_64BIT
1da177e4
LT
290 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
291
292 .macro nadtlb_11 code
293
294 mfctl %isr,spc
295 b nadtlb_miss_11
296 mfctl %ior,va
297
298 .align 32
299 .endm
300#endif
301
302 /* nadtlb miss interruption handler (parisc 2.0) */
303
304 .macro nadtlb_20 code
305
306 mfctl %isr,spc
413059f2 307#ifdef CONFIG_64BIT
1da177e4
LT
308 b nadtlb_miss_20w
309#else
310 b nadtlb_miss_20
311#endif
312 mfctl %ior,va
313
314 .align 32
315 .endm
316
413059f2 317#ifndef CONFIG_64BIT
1da177e4
LT
318 /*
319 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
320 */
321
322 .macro dbit_11 code
323
324 mfctl %isr,spc
325 b dbit_trap_11
326 mfctl %ior,va
327
328 .align 32
329 .endm
330#endif
331
332 /*
333 * dirty bit trap interruption handler (parisc 2.0)
334 */
335
336 .macro dbit_20 code
337
338 mfctl %isr,spc
413059f2 339#ifdef CONFIG_64BIT
1da177e4
LT
340 b dbit_trap_20w
341#else
342 b dbit_trap_20
343#endif
344 mfctl %ior,va
345
346 .align 32
347 .endm
348
1da177e4
LT
349 /* In LP64, the space contains part of the upper 32 bits of the
350 * fault. We have to extract this and place it in the va,
351 * zeroing the corresponding bits in the space register */
352 .macro space_adjust spc,va,tmp
413059f2 353#ifdef CONFIG_64BIT
1da177e4
LT
354 extrd,u \spc,63,SPACEID_SHIFT,\tmp
355 depd %r0,63,SPACEID_SHIFT,\spc
356 depd \tmp,31,SPACEID_SHIFT,\va
357#endif
358 .endm
359
360 .import swapper_pg_dir,code
361
362 /* Get the pgd. For faults on space zero (kernel space), this
363 * is simply swapper_pg_dir. For user space faults, the
364 * pgd is stored in %cr25 */
365 .macro get_pgd spc,reg
366 ldil L%PA(swapper_pg_dir),\reg
367 ldo R%PA(swapper_pg_dir)(\reg),\reg
368 or,COND(=) %r0,\spc,%r0
369 mfctl %cr25,\reg
370 .endm
371
372 /*
373 space_check(spc,tmp,fault)
374
375 spc - The space we saw the fault with.
376 tmp - The place to store the current space.
377 fault - Function to call on failure.
378
379 Only allow faults on different spaces from the
380 currently active one if we're the kernel
381
382 */
383 .macro space_check spc,tmp,fault
384 mfsp %sr7,\tmp
385 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
386 * as kernel, so defeat the space
387 * check if it is */
388 copy \spc,\tmp
389 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
390 cmpb,COND(<>),n \tmp,\spc,\fault
391 .endm
392
393 /* Look up a PTE in a 2-Level scheme (faulting at each
394 * level if the entry isn't present
395 *
396 * NOTE: we use ldw even for LP64, since the short pointers
397 * can address up to 1TB
398 */
399 .macro L2_ptep pmd,pte,index,va,fault
400#if PT_NLEVELS == 3
9b437bca 401 extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
1da177e4 402#else
9b437bca 403 extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
1da177e4 404#endif
9b437bca 405 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
1da177e4
LT
406 copy %r0,\pte
407 ldw,s \index(\pmd),\pmd
408 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
9b437bca 409 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
1da177e4 410 copy \pmd,%r9
3d73cf5e 411 SHLREG %r9,PxD_VALUE_SHIFT,\pmd
9b437bca
JDA
412 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
413 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
1da177e4
LT
414 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
415 LDREG %r0(\pmd),\pte /* pmd is now pte */
416 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
417 .endm
418
419 /* Look up PTE in a 3-Level scheme.
420 *
421 * Here we implement a Hybrid L2/L3 scheme: we allocate the
422 * first pmd adjacent to the pgd. This means that we can
423 * subtract a constant offset to get to it. The pmd and pgd
424 * sizes are arranged so that a single pmd covers 4GB (giving
425 * a full LP64 process access to 8TB) so our lookups are
426 * effectively L2 for the first 4GB of the kernel (i.e. for
427 * all ILP32 processes and all the kernel for machines with
428 * under 4GB of memory) */
429 .macro L3_ptep pgd,pte,index,va,fault
2fd83038 430#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
1da177e4
LT
431 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
432 copy %r0,\pte
2fd83038 433 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
1da177e4 434 ldw,s \index(\pgd),\pgd
2fd83038 435 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
1da177e4 436 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
2fd83038 437 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
1da177e4 438 shld \pgd,PxD_VALUE_SHIFT,\index
2fd83038 439 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
1da177e4 440 copy \index,\pgd
2fd83038 441 extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
1da177e4 442 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
2fd83038 443#endif
1da177e4
LT
444 L2_ptep \pgd,\pte,\index,\va,\fault
445 .endm
446
447 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
448 * don't needlessly dirty the cache line if it was already set */
449 .macro update_ptep ptep,pte,tmp,tmp1
450 ldi _PAGE_ACCESSED,\tmp1
451 or \tmp1,\pte,\tmp
452 and,COND(<>) \tmp1,\pte,%r0
453 STREG \tmp,0(\ptep)
454 .endm
455
456 /* Set the dirty bit (and accessed bit). No need to be
457 * clever, this is only used from the dirty fault */
458 .macro update_dirty ptep,pte,tmp
459 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
460 or \tmp,\pte,\pte
461 STREG \pte,0(\ptep)
462 .endm
463
afca2523
HD
464 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
465 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
466 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
467
468 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
469 .macro convert_for_tlb_insert20 pte
470 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
471 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
472 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
473 (63-58)+PAGE_ADD_SHIFT,\pte
474 .endm
475
1da177e4
LT
476 /* Convert the pte and prot to tlb insertion values. How
477 * this happens is quite subtle, read below */
478 .macro make_insert_tlb spc,pte,prot
479 space_to_prot \spc \prot /* create prot id from space */
480 /* The following is the real subtlety. This is depositing
481 * T <-> _PAGE_REFTRAP
482 * D <-> _PAGE_DIRTY
483 * B <-> _PAGE_DMB (memory break)
484 *
485 * Then incredible subtlety: The access rights are
486 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
487 * See 3-14 of the parisc 2.0 manual
488 *
489 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
490 * trigger an access rights trap in user space if the user
491 * tries to read an unreadable page */
492 depd \pte,8,7,\prot
493
494 /* PAGE_USER indicates the page can be read with user privileges,
495 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
496 * contains _PAGE_READ */
497 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
498 depdi 7,11,3,\prot
499 /* If we're a gateway page, drop PL2 back to zero for promotion
500 * to kernel privilege (so we can execute the page as kernel).
501 * Any privilege promotion page always denys read and write */
502 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
503 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
504
2fd83038
HD
505 /* Enforce uncacheable pages.
506 * This should ONLY be use for MMIO on PA 2.0 machines.
507 * Memory/DMA is cache coherent on all PA2.0 machines we support
508 * (that means T-class is NOT supported) and the memory controllers
509 * on most of those machines only handles cache transactions.
510 */
511 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
2678251b 512 depdi 1,12,1,\prot
1da177e4 513
2fd83038 514 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
afca2523 515 convert_for_tlb_insert20 \pte
1da177e4
LT
516 .endm
517
518 /* Identical macro to make_insert_tlb above, except it
519 * makes the tlb entry for the differently formatted pa11
520 * insertion instructions */
521 .macro make_insert_tlb_11 spc,pte,prot
522 zdep \spc,30,15,\prot
523 dep \pte,8,7,\prot
524 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
525 depi 1,12,1,\prot
526 extru,= \pte,_PAGE_USER_BIT,1,%r0
527 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
528 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
529 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
530
531 /* Get rid of prot bits and convert to page addr for iitlba */
532
1152a68c
HD
533 depi 0,31,ASM_PFN_PTE_SHIFT,\pte
534 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
1da177e4
LT
535 .endm
536
537 /* This is for ILP32 PA2.0 only. The TLB insertion needs
538 * to extend into I/O space if the address is 0xfXXXXXXX
539 * so we extend the f's into the top word of the pte in
540 * this case */
541 .macro f_extend pte,tmp
542 extrd,s \pte,42,4,\tmp
543 addi,<> 1,\tmp,%r0
544 extrd,s \pte,63,25,\pte
545 .endm
546
547 /* The alias region is an 8MB aligned 16MB to do clear and
548 * copy user pages at addresses congruent with the user
549 * virtual address.
550 *
551 * To use the alias page, you set %r26 up with the to TLB
552 * entry (identifying the physical page) and %r23 up with
553 * the from tlb entry (or nothing if only a to entry---for
554 * clear_user_page_asm) */
2f649c1f 555 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
1da177e4
LT
556 cmpib,COND(<>),n 0,\spc,\fault
557 ldil L%(TMPALIAS_MAP_START),\tmp
413059f2 558#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
1da177e4
LT
559 /* on LP64, ldi will sign extend into the upper 32 bits,
560 * which is behaviour we don't want */
561 depdi 0,31,32,\tmp
562#endif
563 copy \va,\tmp1
9b437bca 564 depi 0,31,23,\tmp1
1da177e4 565 cmpb,COND(<>),n \tmp,\tmp1,\fault
f311847c
JB
566 mfctl %cr19,\tmp /* iir */
567 /* get the opcode (first six bits) into \tmp */
568 extrw,u \tmp,5,6,\tmp
569 /*
570 * Only setting the T bit prevents data cache movein
571 * Setting access rights to zero prevents instruction cache movein
572 *
573 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
574 * to type field and _PAGE_READ goes to top bit of PL1
575 */
576 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
577 /*
578 * so if the opcode is one (i.e. this is a memory management
579 * instruction) nullify the next load so \prot is only T.
580 * Otherwise this is a normal data operation
581 */
582 cmpiclr,= 0x01,\tmp,%r0
583 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
2f649c1f 584.ifc \patype,20
1da177e4 585 depd,z \prot,8,7,\prot
2f649c1f
JB
586.else
587.ifc \patype,11
5e185581 588 depw,z \prot,8,7,\prot
2f649c1f
JB
589.else
590 .error "undefined PA type to do_alias"
591.endif
592.endif
1da177e4
LT
593 /*
594 * OK, it is in the temp alias region, check whether "from" or "to".
595 * Check "subtle" note in pacache.S re: r23/r26.
596 */
413059f2 597#ifdef CONFIG_64BIT
1da177e4
LT
598 extrd,u,*= \va,41,1,%r0
599#else
600 extrw,u,= \va,9,1,%r0
601#endif
602 or,COND(tr) %r23,%r0,\pte
603 or %r26,%r0,\pte
604 .endm
605
606
607 /*
608 * Align fault_vector_20 on 4K boundary so that both
609 * fault_vector_11 and fault_vector_20 are on the
610 * same page. This is only necessary as long as we
611 * write protect the kernel text, which we may stop
612 * doing once we use large page translations to cover
613 * the static part of the kernel address space.
614 */
615
dfcf753b 616 .text
1da177e4 617
873d50e2 618 .align PAGE_SIZE
1da177e4 619
c5e76552 620ENTRY(fault_vector_20)
1da177e4
LT
621 /* First vector is invalid (0) */
622 .ascii "cows can fly"
623 .byte 0
624 .align 32
625
626 hpmc 1
627 def 2
628 def 3
629 extint 4
630 def 5
631 itlb_20 6
632 def 7
633 def 8
634 def 9
635 def 10
636 def 11
637 def 12
638 def 13
639 def 14
640 dtlb_20 15
1da177e4 641 naitlb_20 16
1da177e4
LT
642 nadtlb_20 17
643 def 18
644 def 19
645 dbit_20 20
646 def 21
647 def 22
648 def 23
649 def 24
650 def 25
651 def 26
652 def 27
653 def 28
654 def 29
655 def 30
656 def 31
c5e76552 657END(fault_vector_20)
1da177e4 658
413059f2 659#ifndef CONFIG_64BIT
1da177e4 660
1da177e4
LT
661 .align 2048
662
c5e76552 663ENTRY(fault_vector_11)
1da177e4
LT
664 /* First vector is invalid (0) */
665 .ascii "cows can fly"
666 .byte 0
667 .align 32
668
669 hpmc 1
670 def 2
671 def 3
672 extint 4
673 def 5
674 itlb_11 6
675 def 7
676 def 8
677 def 9
678 def 10
679 def 11
680 def 12
681 def 13
682 def 14
683 dtlb_11 15
1da177e4 684 naitlb_11 16
1da177e4
LT
685 nadtlb_11 17
686 def 18
687 def 19
688 dbit_11 20
689 def 21
690 def 22
691 def 23
692 def 24
693 def 25
694 def 26
695 def 27
696 def 28
697 def 29
698 def 30
699 def 31
c5e76552 700END(fault_vector_11)
1da177e4
LT
701
702#endif
d7dd2ff1
JB
703 /* Fault vector is separately protected and *must* be on its own page */
704 .align PAGE_SIZE
705ENTRY(end_fault_vector)
1da177e4
LT
706
707 .import handle_interruption,code
708 .import do_cpu_irq_mask,code
709
1da177e4
LT
710 /*
711 * Child Returns here
712 *
a44e060f 713 * copy_thread moved args into task save area.
1da177e4
LT
714 */
715
c5e76552 716ENTRY(ret_from_kernel_thread)
1da177e4
LT
717
718 /* Call schedule_tail first though */
719 BL schedule_tail, %r2
720 nop
721
ff0ab8af 722 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1da177e4 723 LDREG TASK_PT_GR25(%r1), %r26
413059f2 724#ifdef CONFIG_64BIT
1da177e4 725 LDREG TASK_PT_GR27(%r1), %r27
1da177e4
LT
726#endif
727 LDREG TASK_PT_GR26(%r1), %r1
728 ble 0(%sr7, %r1)
729 copy %r31, %r2
730
413059f2 731#ifdef CONFIG_64BIT
1da177e4
LT
732 ldo -16(%r30),%r29 /* Reference param save area */
733 loadgp /* Thread could have been in a module */
734#endif
99ac7947 735#ifndef CONFIG_64BIT
1da177e4 736 b sys_exit
99ac7947
RC
737#else
738 load32 sys_exit, %r1
739 bv %r0(%r1)
740#endif
1da177e4 741 ldi 0, %r26
c5e76552 742ENDPROC(ret_from_kernel_thread)
1da177e4 743
4e5ed85a
AV
744ENTRY(ret_from_kernel_execve)
745 mfctl %cr30, %r1
ff0ab8af
AV
746 b syscall_exit /* forward */
747 ldo THREAD_SZ_ALGN+FRAME_SIZE(%r1), %r30
4e5ed85a 748ENDPROC(ret_from_kernel_execve)
1da177e4 749
1da177e4
LT
750
751 /*
752 * struct task_struct *_switch_to(struct task_struct *prev,
753 * struct task_struct *next)
754 *
755 * switch kernel stacks and return prev */
c5e76552 756ENTRY(_switch_to)
1da177e4
LT
757 STREG %r2, -RP_OFFSET(%r30)
758
618febd6 759 callee_save_float
1da177e4
LT
760 callee_save
761
762 load32 _switch_to_ret, %r2
763
764 STREG %r2, TASK_PT_KPC(%r26)
765 LDREG TASK_PT_KPC(%r25), %r2
766
767 STREG %r30, TASK_PT_KSP(%r26)
768 LDREG TASK_PT_KSP(%r25), %r30
769 LDREG TASK_THREAD_INFO(%r25), %r25
770 bv %r0(%r2)
771 mtctl %r25,%cr30
772
773_switch_to_ret:
774 mtctl %r0, %cr0 /* Needed for single stepping */
775 callee_rest
618febd6 776 callee_rest_float
1da177e4
LT
777
778 LDREG -RP_OFFSET(%r30), %r2
779 bv %r0(%r2)
780 copy %r26, %r28
c5e76552 781ENDPROC(_switch_to)
1da177e4
LT
782
783 /*
784 * Common rfi return path for interruptions, kernel execve, and
785 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
786 * return via this path if the signal was received when the process
787 * was running; if the process was blocked on a syscall then the
788 * normal syscall_exit path is used. All syscalls for traced
789 * proceses exit via intr_restore.
790 *
791 * XXX If any syscalls that change a processes space id ever exit
792 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
793 * adjust IASQ[0..1].
794 *
1da177e4
LT
795 */
796
873d50e2 797 .align PAGE_SIZE
1da177e4 798
c5e76552 799ENTRY(syscall_exit_rfi)
1da177e4
LT
800 mfctl %cr30,%r16
801 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
802 ldo TASK_REGS(%r16),%r16
803 /* Force iaoq to userspace, as the user has had access to our current
804 * context via sigcontext. Also Filter the PSW for the same reason.
805 */
806 LDREG PT_IAOQ0(%r16),%r19
807 depi 3,31,2,%r19
808 STREG %r19,PT_IAOQ0(%r16)
809 LDREG PT_IAOQ1(%r16),%r19
810 depi 3,31,2,%r19
811 STREG %r19,PT_IAOQ1(%r16)
812 LDREG PT_PSW(%r16),%r19
813 load32 USER_PSW_MASK,%r1
413059f2 814#ifdef CONFIG_64BIT
1da177e4
LT
815 load32 USER_PSW_HI_MASK,%r20
816 depd %r20,31,32,%r1
817#endif
818 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
819 load32 USER_PSW,%r1
820 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
821 STREG %r19,PT_PSW(%r16)
822
823 /*
824 * If we aren't being traced, we never saved space registers
825 * (we don't store them in the sigcontext), so set them
826 * to "proper" values now (otherwise we'll wind up restoring
827 * whatever was last stored in the task structure, which might
25985edc 828 * be inconsistent if an interrupt occurred while on the gateway
4b3f686d
ML
829 * page). Note that we may be "trashing" values the user put in
830 * them, but we don't support the user changing them.
1da177e4
LT
831 */
832
833 STREG %r0,PT_SR2(%r16)
834 mfsp %sr3,%r19
835 STREG %r19,PT_SR0(%r16)
836 STREG %r19,PT_SR1(%r16)
837 STREG %r19,PT_SR3(%r16)
838 STREG %r19,PT_SR4(%r16)
839 STREG %r19,PT_SR5(%r16)
840 STREG %r19,PT_SR6(%r16)
841 STREG %r19,PT_SR7(%r16)
842
843intr_return:
844 /* NOTE: Need to enable interrupts incase we schedule. */
845 ssm PSW_SM_I, %r0
846
1da177e4
LT
847intr_check_resched:
848
849 /* check for reschedule */
850 mfctl %cr30,%r1
851 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
852 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
853
4650f0a5 854 .import do_notify_resume,code
1da177e4
LT
855intr_check_sig:
856 /* As above */
857 mfctl %cr30,%r1
4650f0a5 858 LDREG TI_FLAGS(%r1),%r19
6fd84c08 859 ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
4650f0a5
KM
860 and,COND(<>) %r19, %r20, %r0
861 b,n intr_restore /* skip past if we've nothing to do */
862
863 /* This check is critical to having LWS
864 * working. The IASQ is zero on the gateway
865 * page and we cannot deliver any signals until
866 * we get off the gateway page.
867 *
868 * Only do signals if we are returning to user space
869 */
870 LDREG PT_IASQ0(%r16), %r20
872f6deb 871 cmpib,COND(=),n 0,%r20,intr_restore /* backward */
4650f0a5 872 LDREG PT_IASQ1(%r16), %r20
872f6deb 873 cmpib,COND(=),n 0,%r20,intr_restore /* backward */
4650f0a5
KM
874
875 copy %r0, %r25 /* long in_syscall = 0 */
876#ifdef CONFIG_64BIT
877 ldo -16(%r30),%r29 /* Reference param save area */
878#endif
879
880 BL do_notify_resume,%r2
881 copy %r16, %r26 /* struct pt_regs *regs */
882
3fe4c55e 883 b,n intr_check_sig
1da177e4
LT
884
885intr_restore:
886 copy %r16,%r29
887 ldo PT_FR31(%r29),%r1
888 rest_fp %r1
889 rest_general %r29
890
896a3756
GG
891 /* inverse of virt_map */
892 pcxt_ssm_bug
893 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
1da177e4 894 tophys_r1 %r29
1da177e4
LT
895
896 /* Restore space id's and special cr's from PT_REGS
896a3756
GG
897 * structure pointed to by r29
898 */
1da177e4
LT
899 rest_specials %r29
900
896a3756
GG
901 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
902 * It also restores r1 and r30.
903 */
1da177e4
LT
904 rest_stack
905
906 rfi
907 nop
1da177e4 908
50a34dbd
KM
909#ifndef CONFIG_PREEMPT
910# define intr_do_preempt intr_restore
911#endif /* !CONFIG_PREEMPT */
912
1da177e4
LT
913 .import schedule,code
914intr_do_resched:
50a34dbd
KM
915 /* Only call schedule on return to userspace. If we're returning
916 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
917 * we jump back to intr_restore.
918 */
1da177e4 919 LDREG PT_IASQ0(%r16), %r20
872f6deb 920 cmpib,COND(=) 0, %r20, intr_do_preempt
1da177e4
LT
921 nop
922 LDREG PT_IASQ1(%r16), %r20
872f6deb 923 cmpib,COND(=) 0, %r20, intr_do_preempt
1da177e4
LT
924 nop
925
413059f2 926#ifdef CONFIG_64BIT
1da177e4
LT
927 ldo -16(%r30),%r29 /* Reference param save area */
928#endif
929
930 ldil L%intr_check_sig, %r2
99ac7947 931#ifndef CONFIG_64BIT
1da177e4 932 b schedule
99ac7947
RC
933#else
934 load32 schedule, %r20
935 bv %r0(%r20)
936#endif
1da177e4
LT
937 ldo R%intr_check_sig(%r2), %r2
938
50a34dbd
KM
939 /* preempt the current task on returning to kernel
940 * mode from an interrupt, iff need_resched is set,
941 * and preempt_count is 0. otherwise, we continue on
942 * our merry way back to the current running task.
943 */
944#ifdef CONFIG_PREEMPT
945 .import preempt_schedule_irq,code
946intr_do_preempt:
947 rsm PSW_SM_I, %r0 /* disable interrupts */
948
949 /* current_thread_info()->preempt_count */
950 mfctl %cr30, %r1
951 LDREG TI_PRE_COUNT(%r1), %r19
872f6deb 952 cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */
50a34dbd
KM
953 nop /* prev insn branched backwards */
954
955 /* check if we interrupted a critical path */
956 LDREG PT_PSW(%r16), %r20
957 bb,<,n %r20, 31 - PSW_SM_I, intr_restore
958 nop
959
960 BL preempt_schedule_irq, %r2
961 nop
962
9c2c5457 963 b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
50a34dbd 964#endif /* CONFIG_PREEMPT */
1da177e4 965
1da177e4
LT
966 /*
967 * External interrupts.
968 */
969
970intr_extint:
872f6deb 971 cmpib,COND(=),n 0,%r16,1f
6cc4525d 972
1da177e4 973 get_stack_use_cr30
6cc4525d 974 b,n 2f
1da177e4
LT
975
9761:
1da177e4 977 get_stack_use_r30
6cc4525d 9782:
1da177e4
LT
979 save_specials %r29
980 virt_map
981 save_general %r29
982
983 ldo PT_FR0(%r29), %r24
984 save_fp %r24
985
986 loadgp
987
988 copy %r29, %r26 /* arg0 is pt_regs */
989 copy %r29, %r16 /* save pt_regs */
990
991 ldil L%intr_return, %r2
992
413059f2 993#ifdef CONFIG_64BIT
1da177e4
LT
994 ldo -16(%r30),%r29 /* Reference param save area */
995#endif
996
997 b do_cpu_irq_mask
998 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
c5e76552 999ENDPROC(syscall_exit_rfi)
1da177e4
LT
1000
1001
1002 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1003
c5e76552 1004ENTRY(intr_save) /* for os_hpmc */
1da177e4 1005 mfsp %sr7,%r16
872f6deb 1006 cmpib,COND(=),n 0,%r16,1f
1da177e4
LT
1007 get_stack_use_cr30
1008 b 2f
1009 copy %r8,%r26
1010
10111:
1012 get_stack_use_r30
1013 copy %r8,%r26
1014
10152:
1016 save_specials %r29
1017
1018 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1019
1020 /*
1021 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1022 * traps.c.
1023 * 2) Once we start executing code above 4 Gb, we need
1024 * to adjust iasq/iaoq here in the same way we
1025 * adjust isr/ior below.
1026 */
1027
872f6deb 1028 cmpib,COND(=),n 6,%r26,skip_save_ior
1da177e4 1029
1da177e4
LT
1030
1031 mfctl %cr20, %r16 /* isr */
896a3756 1032 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1da177e4
LT
1033 mfctl %cr21, %r17 /* ior */
1034
896a3756 1035
413059f2 1036#ifdef CONFIG_64BIT
1da177e4
LT
1037 /*
1038 * If the interrupted code was running with W bit off (32 bit),
1039 * clear the b bits (bits 0 & 1) in the ior.
896a3756 1040 * save_specials left ipsw value in r8 for us to test.
1da177e4
LT
1041 */
1042 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1043 depdi 0,1,2,%r17
1044
1045 /*
1046 * FIXME: This code has hardwired assumptions about the split
1047 * between space bits and offset bits. This will change
1048 * when we allow alternate page sizes.
1049 */
1050
1051 /* adjust isr/ior. */
2fd83038
HD
1052 extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */
1053 depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */
1054 depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */
1da177e4
LT
1055#endif
1056 STREG %r16, PT_ISR(%r29)
1057 STREG %r17, PT_IOR(%r29)
1058
1059
1060skip_save_ior:
1061 virt_map
1062 save_general %r29
1063
1064 ldo PT_FR0(%r29), %r25
1065 save_fp %r25
1066
1067 loadgp
1068
1069 copy %r29, %r25 /* arg1 is pt_regs */
413059f2 1070#ifdef CONFIG_64BIT
1da177e4
LT
1071 ldo -16(%r30),%r29 /* Reference param save area */
1072#endif
1073
1074 ldil L%intr_check_sig, %r2
1075 copy %r25, %r16 /* save pt_regs */
1076
1077 b handle_interruption
1078 ldo R%intr_check_sig(%r2), %r2
c5e76552 1079ENDPROC(intr_save)
1da177e4
LT
1080
1081
1082 /*
1083 * Note for all tlb miss handlers:
1084 *
1085 * cr24 contains a pointer to the kernel address space
1086 * page directory.
1087 *
1088 * cr25 contains a pointer to the current user address
1089 * space page directory.
1090 *
1091 * sr3 will contain the space id of the user address space
1092 * of the current running thread while that thread is
1093 * running in the kernel.
1094 */
1095
1096 /*
1097 * register number allocations. Note that these are all
1098 * in the shadowed registers
1099 */
1100
1101 t0 = r1 /* temporary register 0 */
25985edc 1102 va = r8 /* virtual address for which the trap occurred */
1da177e4
LT
1103 t1 = r9 /* temporary register 1 */
1104 pte = r16 /* pte/phys page # */
1105 prot = r17 /* prot bits */
25985edc 1106 spc = r24 /* space for which the trap occurred */
1da177e4
LT
1107 ptp = r25 /* page directory/page table pointer */
1108
413059f2 1109#ifdef CONFIG_64BIT
1da177e4
LT
1110
1111dtlb_miss_20w:
1112 space_adjust spc,va,t0
1113 get_pgd spc,ptp
1114 space_check spc,t0,dtlb_fault
1115
1116 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1117
1118 update_ptep ptp,pte,t0,t1
1119
1120 make_insert_tlb spc,pte,prot
1121
1122 idtlbt pte,prot
1123
1124 rfir
1125 nop
1126
1127dtlb_check_alias_20w:
2f649c1f 1128 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1da177e4
LT
1129
1130 idtlbt pte,prot
1131
1132 rfir
1133 nop
1134
1135nadtlb_miss_20w:
1136 space_adjust spc,va,t0
1137 get_pgd spc,ptp
1138 space_check spc,t0,nadtlb_fault
1139
f311847c 1140 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1da177e4
LT
1141
1142 update_ptep ptp,pte,t0,t1
1143
1144 make_insert_tlb spc,pte,prot
1145
1146 idtlbt pte,prot
1147
1148 rfir
1149 nop
1150
f311847c 1151nadtlb_check_alias_20w:
2f649c1f 1152 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1da177e4 1153
1da177e4
LT
1154 idtlbt pte,prot
1155
1156 rfir
1157 nop
1158
1159#else
1160
1161dtlb_miss_11:
1162 get_pgd spc,ptp
1163
1164 space_check spc,t0,dtlb_fault
1165
1166 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1167
1168 update_ptep ptp,pte,t0,t1
1169
1170 make_insert_tlb_11 spc,pte,prot
1171
1172 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1173 mtsp spc,%sr1
1174
1175 idtlba pte,(%sr1,va)
1176 idtlbp prot,(%sr1,va)
1177
1178 mtsp t0, %sr1 /* Restore sr1 */
1179
1180 rfir
1181 nop
1182
1183dtlb_check_alias_11:
2f649c1f 1184 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11
1da177e4
LT
1185
1186 idtlba pte,(va)
1187 idtlbp prot,(va)
1188
1189 rfir
1190 nop
1191
1192nadtlb_miss_11:
1193 get_pgd spc,ptp
1194
1195 space_check spc,t0,nadtlb_fault
1196
f311847c 1197 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1da177e4
LT
1198
1199 update_ptep ptp,pte,t0,t1
1200
1201 make_insert_tlb_11 spc,pte,prot
1202
1203
1204 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1205 mtsp spc,%sr1
1206
1207 idtlba pte,(%sr1,va)
1208 idtlbp prot,(%sr1,va)
1209
1210 mtsp t0, %sr1 /* Restore sr1 */
1211
1212 rfir
1213 nop
1214
f311847c 1215nadtlb_check_alias_11:
2f649c1f 1216 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11
f311847c
JB
1217
1218 idtlba pte,(va)
1219 idtlbp prot,(va)
1220
1221 rfir
1222 nop
1223
1da177e4
LT
1224dtlb_miss_20:
1225 space_adjust spc,va,t0
1226 get_pgd spc,ptp
1227 space_check spc,t0,dtlb_fault
1228
1229 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1230
1231 update_ptep ptp,pte,t0,t1
1232
1233 make_insert_tlb spc,pte,prot
1234
1235 f_extend pte,t0
1236
1237 idtlbt pte,prot
1238
1239 rfir
1240 nop
1241
1242dtlb_check_alias_20:
2f649c1f 1243 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1da177e4
LT
1244
1245 idtlbt pte,prot
1246
1247 rfir
1248 nop
1249
1250nadtlb_miss_20:
1251 get_pgd spc,ptp
1252
1253 space_check spc,t0,nadtlb_fault
1254
f311847c 1255 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1da177e4
LT
1256
1257 update_ptep ptp,pte,t0,t1
1258
1259 make_insert_tlb spc,pte,prot
1260
1261 f_extend pte,t0
1262
1263 idtlbt pte,prot
1264
1265 rfir
1266 nop
1267
f311847c 1268nadtlb_check_alias_20:
2f649c1f 1269 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
f311847c
JB
1270
1271 idtlbt pte,prot
1272
1273 rfir
1274 nop
1275
1da177e4
LT
1276#endif
1277
1278nadtlb_emulate:
1279
1280 /*
1281 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1282 * probei instructions. We don't want to fault for these
1283 * instructions (not only does it not make sense, it can cause
1284 * deadlocks, since some flushes are done with the mmap
1285 * semaphore held). If the translation doesn't exist, we can't
1286 * insert a translation, so have to emulate the side effects
1287 * of the instruction. Since we don't insert a translation
1288 * we can get a lot of faults during a flush loop, so it makes
1289 * sense to try to do it here with minimum overhead. We only
1290 * emulate fdc,fic,pdc,probew,prober instructions whose base
1291 * and index registers are not shadowed. We defer everything
1292 * else to the "slow" path.
1293 */
1294
1295 mfctl %cr19,%r9 /* Get iir */
1296
1297 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1298 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1299
1300 /* Checks for fdc,fdce,pdc,"fic,4f" only */
1301 ldi 0x280,%r16
1302 and %r9,%r16,%r17
1303 cmpb,<>,n %r16,%r17,nadtlb_probe_check
1304 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
1305 BL get_register,%r25
1306 extrw,u %r9,15,5,%r8 /* Get index register # */
872f6deb 1307 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1da177e4
LT
1308 copy %r1,%r24
1309 BL get_register,%r25
1310 extrw,u %r9,10,5,%r8 /* Get base register # */
872f6deb 1311 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1da177e4
LT
1312 BL set_register,%r25
1313 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
1314
1315nadtlb_nullify:
896a3756 1316 mfctl %ipsw,%r8
1da177e4
LT
1317 ldil L%PSW_N,%r9
1318 or %r8,%r9,%r8 /* Set PSW_N */
896a3756 1319 mtctl %r8,%ipsw
1da177e4
LT
1320
1321 rfir
1322 nop
1323
1324 /*
1325 When there is no translation for the probe address then we
1326 must nullify the insn and return zero in the target regsiter.
1327 This will indicate to the calling code that it does not have
1328 write/read privileges to this address.
1329
1330 This should technically work for prober and probew in PA 1.1,
1331 and also probe,r and probe,w in PA 2.0
1332
1333 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1334 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1335
1336 */
1337nadtlb_probe_check:
1338 ldi 0x80,%r16
1339 and %r9,%r16,%r17
1340 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1341 BL get_register,%r25 /* Find the target register */
1342 extrw,u %r9,31,5,%r8 /* Get target register */
872f6deb 1343 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1da177e4
LT
1344 BL set_register,%r25
1345 copy %r0,%r1 /* Write zero to target register */
1346 b nadtlb_nullify /* Nullify return insn */
1347 nop
1348
1349
413059f2 1350#ifdef CONFIG_64BIT
1da177e4
LT
1351itlb_miss_20w:
1352
1353 /*
1354 * I miss is a little different, since we allow users to fault
1355 * on the gateway page which is in the kernel address space.
1356 */
1357
1358 space_adjust spc,va,t0
1359 get_pgd spc,ptp
1360 space_check spc,t0,itlb_fault
1361
1362 L3_ptep ptp,pte,t0,va,itlb_fault
1363
1364 update_ptep ptp,pte,t0,t1
1365
1366 make_insert_tlb spc,pte,prot
1367
1368 iitlbt pte,prot
1369
1370 rfir
1371 nop
1372
f311847c
JB
1373naitlb_miss_20w:
1374
1375 /*
1376 * I miss is a little different, since we allow users to fault
1377 * on the gateway page which is in the kernel address space.
1378 */
1379
1380 space_adjust spc,va,t0
1381 get_pgd spc,ptp
1382 space_check spc,t0,naitlb_fault
1383
1384 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1385
1386 update_ptep ptp,pte,t0,t1
1387
1388 make_insert_tlb spc,pte,prot
1389
1390 iitlbt pte,prot
1391
1392 rfir
1393 nop
1394
1395naitlb_check_alias_20w:
2f649c1f 1396 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
f311847c
JB
1397
1398 iitlbt pte,prot
1399
1400 rfir
1401 nop
1402
1da177e4
LT
1403#else
1404
1405itlb_miss_11:
1406 get_pgd spc,ptp
1407
1408 space_check spc,t0,itlb_fault
1409
1410 L2_ptep ptp,pte,t0,va,itlb_fault
1411
1412 update_ptep ptp,pte,t0,t1
1413
1414 make_insert_tlb_11 spc,pte,prot
1415
1416 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1417 mtsp spc,%sr1
1418
1419 iitlba pte,(%sr1,va)
1420 iitlbp prot,(%sr1,va)
1421
1422 mtsp t0, %sr1 /* Restore sr1 */
1423
1424 rfir
1425 nop
1426
f311847c
JB
1427naitlb_miss_11:
1428 get_pgd spc,ptp
1429
1430 space_check spc,t0,naitlb_fault
1431
1432 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1433
1434 update_ptep ptp,pte,t0,t1
1435
1436 make_insert_tlb_11 spc,pte,prot
1437
1438 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1439 mtsp spc,%sr1
1440
1441 iitlba pte,(%sr1,va)
1442 iitlbp prot,(%sr1,va)
1443
1444 mtsp t0, %sr1 /* Restore sr1 */
1445
1446 rfir
1447 nop
1448
1449naitlb_check_alias_11:
2f649c1f 1450 do_alias spc,t0,t1,va,pte,prot,itlb_fault,11
f311847c
JB
1451
1452 iitlba pte,(%sr0, va)
1453 iitlbp prot,(%sr0, va)
1454
1455 rfir
1456 nop
1457
1458
1da177e4
LT
1459itlb_miss_20:
1460 get_pgd spc,ptp
1461
1462 space_check spc,t0,itlb_fault
1463
1464 L2_ptep ptp,pte,t0,va,itlb_fault
1465
1466 update_ptep ptp,pte,t0,t1
1467
1468 make_insert_tlb spc,pte,prot
1469
1470 f_extend pte,t0
1471
1472 iitlbt pte,prot
1473
1474 rfir
1475 nop
1476
f311847c
JB
1477naitlb_miss_20:
1478 get_pgd spc,ptp
1479
1480 space_check spc,t0,naitlb_fault
1481
1482 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1483
1484 update_ptep ptp,pte,t0,t1
1485
1486 make_insert_tlb spc,pte,prot
1487
1488 f_extend pte,t0
1489
1490 iitlbt pte,prot
1491
1492 rfir
1493 nop
1494
1495naitlb_check_alias_20:
2f649c1f 1496 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
f311847c
JB
1497
1498 iitlbt pte,prot
1499
1500 rfir
1501 nop
1502
1da177e4
LT
1503#endif
1504
413059f2 1505#ifdef CONFIG_64BIT
1da177e4
LT
1506
1507dbit_trap_20w:
1508 space_adjust spc,va,t0
1509 get_pgd spc,ptp
1510 space_check spc,t0,dbit_fault
1511
1512 L3_ptep ptp,pte,t0,va,dbit_fault
1513
1514#ifdef CONFIG_SMP
872f6deb 1515 cmpib,COND(=),n 0,spc,dbit_nolock_20w
1da177e4
LT
1516 load32 PA(pa_dbit_lock),t0
1517
1518dbit_spin_20w:
64f49532 1519 LDCW 0(t0),t1
872f6deb 1520 cmpib,COND(=) 0,t1,dbit_spin_20w
1da177e4
LT
1521 nop
1522
1523dbit_nolock_20w:
1524#endif
1525 update_dirty ptp,pte,t1
1526
1527 make_insert_tlb spc,pte,prot
1528
1529 idtlbt pte,prot
1530#ifdef CONFIG_SMP
872f6deb 1531 cmpib,COND(=),n 0,spc,dbit_nounlock_20w
1da177e4
LT
1532 ldi 1,t1
1533 stw t1,0(t0)
1534
1535dbit_nounlock_20w:
1536#endif
1537
1538 rfir
1539 nop
1540#else
1541
1542dbit_trap_11:
1543
1544 get_pgd spc,ptp
1545
1546 space_check spc,t0,dbit_fault
1547
1548 L2_ptep ptp,pte,t0,va,dbit_fault
1549
1550#ifdef CONFIG_SMP
872f6deb 1551 cmpib,COND(=),n 0,spc,dbit_nolock_11
1da177e4
LT
1552 load32 PA(pa_dbit_lock),t0
1553
1554dbit_spin_11:
64f49532 1555 LDCW 0(t0),t1
1da177e4
LT
1556 cmpib,= 0,t1,dbit_spin_11
1557 nop
1558
1559dbit_nolock_11:
1560#endif
1561 update_dirty ptp,pte,t1
1562
1563 make_insert_tlb_11 spc,pte,prot
1564
1565 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1566 mtsp spc,%sr1
1567
1568 idtlba pte,(%sr1,va)
1569 idtlbp prot,(%sr1,va)
1570
1571 mtsp t1, %sr1 /* Restore sr1 */
1572#ifdef CONFIG_SMP
872f6deb 1573 cmpib,COND(=),n 0,spc,dbit_nounlock_11
1da177e4
LT
1574 ldi 1,t1
1575 stw t1,0(t0)
1576
1577dbit_nounlock_11:
1578#endif
1579
1580 rfir
1581 nop
1582
1583dbit_trap_20:
1584 get_pgd spc,ptp
1585
1586 space_check spc,t0,dbit_fault
1587
1588 L2_ptep ptp,pte,t0,va,dbit_fault
1589
1590#ifdef CONFIG_SMP
872f6deb 1591 cmpib,COND(=),n 0,spc,dbit_nolock_20
1da177e4
LT
1592 load32 PA(pa_dbit_lock),t0
1593
1594dbit_spin_20:
64f49532 1595 LDCW 0(t0),t1
1da177e4
LT
1596 cmpib,= 0,t1,dbit_spin_20
1597 nop
1598
1599dbit_nolock_20:
1600#endif
1601 update_dirty ptp,pte,t1
1602
1603 make_insert_tlb spc,pte,prot
1604
1605 f_extend pte,t1
1606
1607 idtlbt pte,prot
1608
1609#ifdef CONFIG_SMP
872f6deb 1610 cmpib,COND(=),n 0,spc,dbit_nounlock_20
1da177e4
LT
1611 ldi 1,t1
1612 stw t1,0(t0)
1613
1614dbit_nounlock_20:
1615#endif
1616
1617 rfir
1618 nop
1619#endif
1620
1621 .import handle_interruption,code
1622
1623kernel_bad_space:
1624 b intr_save
1625 ldi 31,%r8 /* Use an unused code */
1626
1627dbit_fault:
1628 b intr_save
1629 ldi 20,%r8
1630
1631itlb_fault:
1632 b intr_save
1633 ldi 6,%r8
1634
1635nadtlb_fault:
1636 b intr_save
1637 ldi 17,%r8
1638
f311847c
JB
1639naitlb_fault:
1640 b intr_save
1641 ldi 16,%r8
1642
1da177e4
LT
1643dtlb_fault:
1644 b intr_save
1645 ldi 15,%r8
1646
1647 /* Register saving semantics for system calls:
1648
1649 %r1 clobbered by system call macro in userspace
1650 %r2 saved in PT_REGS by gateway page
1651 %r3 - %r18 preserved by C code (saved by signal code)
1652 %r19 - %r20 saved in PT_REGS by gateway page
1653 %r21 - %r22 non-standard syscall args
1654 stored in kernel stack by gateway page
1655 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1656 %r27 - %r30 saved in PT_REGS by gateway page
1657 %r31 syscall return pointer
1658 */
1659
1660 /* Floating point registers (FIXME: what do we do with these?)
1661
1662 %fr0 - %fr3 status/exception, not preserved
1663 %fr4 - %fr7 arguments
1664 %fr8 - %fr11 not preserved by C code
1665 %fr12 - %fr21 preserved by C code
1666 %fr22 - %fr31 not preserved by C code
1667 */
1668
1669 .macro reg_save regs
1670 STREG %r3, PT_GR3(\regs)
1671 STREG %r4, PT_GR4(\regs)
1672 STREG %r5, PT_GR5(\regs)
1673 STREG %r6, PT_GR6(\regs)
1674 STREG %r7, PT_GR7(\regs)
1675 STREG %r8, PT_GR8(\regs)
1676 STREG %r9, PT_GR9(\regs)
1677 STREG %r10,PT_GR10(\regs)
1678 STREG %r11,PT_GR11(\regs)
1679 STREG %r12,PT_GR12(\regs)
1680 STREG %r13,PT_GR13(\regs)
1681 STREG %r14,PT_GR14(\regs)
1682 STREG %r15,PT_GR15(\regs)
1683 STREG %r16,PT_GR16(\regs)
1684 STREG %r17,PT_GR17(\regs)
1685 STREG %r18,PT_GR18(\regs)
1686 .endm
1687
1688 .macro reg_restore regs
1689 LDREG PT_GR3(\regs), %r3
1690 LDREG PT_GR4(\regs), %r4
1691 LDREG PT_GR5(\regs), %r5
1692 LDREG PT_GR6(\regs), %r6
1693 LDREG PT_GR7(\regs), %r7
1694 LDREG PT_GR8(\regs), %r8
1695 LDREG PT_GR9(\regs), %r9
1696 LDREG PT_GR10(\regs),%r10
1697 LDREG PT_GR11(\regs),%r11
1698 LDREG PT_GR12(\regs),%r12
1699 LDREG PT_GR13(\regs),%r13
1700 LDREG PT_GR14(\regs),%r14
1701 LDREG PT_GR15(\regs),%r15
1702 LDREG PT_GR16(\regs),%r16
1703 LDREG PT_GR17(\regs),%r17
1704 LDREG PT_GR18(\regs),%r18
1705 .endm
1706
c5e76552 1707ENTRY(sys_fork_wrapper)
1da177e4
LT
1708 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1709 ldo TASK_REGS(%r1),%r1
1710 reg_save %r1
ff0ab8af
AV
1711 mfctl %cr27, %r28
1712 STREG %r28, PT_CR27(%r1)
1da177e4
LT
1713
1714 LDREG PT_GR30(%r1),%r25
1715 copy %r1,%r24
ff0ab8af 1716 b sys_clone
1da177e4 1717 ldi SIGCHLD,%r26
c5e76552 1718ENDPROC(sys_fork_wrapper)
1da177e4
LT
1719
1720 /* Set the return value for the child */
c5e76552 1721ENTRY(child_return)
1da177e4
LT
1722 BL schedule_tail, %r2
1723 nop
1724
ff0ab8af
AV
1725 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1726 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1727
1728 LDREG PT_CR27(%r1), %r3
1729 mtctl %r3, %cr27
1730 reg_restore %r1
1731 b syscall_exit
1da177e4 1732 copy %r0,%r28
c5e76552 1733ENDPROC(child_return)
1da177e4 1734
c5e76552
HD
1735
1736ENTRY(sys_clone_wrapper)
1da177e4
LT
1737 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1738 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1739 reg_save %r1
ff0ab8af
AV
1740 mfctl %cr27, %r28
1741 STREG %r28, PT_CR27(%r1)
1742 b sys_clone
1da177e4 1743 copy %r1,%r24
c5e76552 1744ENDPROC(sys_clone_wrapper)
1da177e4 1745
c5e76552
HD
1746
1747ENTRY(sys_vfork_wrapper)
1da177e4
LT
1748 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1749 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1750 reg_save %r1
ff0ab8af
AV
1751 mfctl %cr27, %r28
1752 STREG %r28, PT_CR27(%r1)
1da177e4 1753
ff0ab8af 1754 b sys_vfork
1da177e4 1755 copy %r1,%r26
c5e76552 1756ENDPROC(sys_vfork_wrapper)
1da177e4
LT
1757
1758
c5e76552 1759ENTRY(sys_rt_sigreturn_wrapper)
1da177e4
LT
1760 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1761 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1762 /* Don't save regs, we are going to restore them from sigcontext. */
1763 STREG %r2, -RP_OFFSET(%r30)
413059f2 1764#ifdef CONFIG_64BIT
1da177e4
LT
1765 ldo FRAME_SIZE(%r30), %r30
1766 BL sys_rt_sigreturn,%r2
1767 ldo -16(%r30),%r29 /* Reference param save area */
1768#else
1769 BL sys_rt_sigreturn,%r2
1770 ldo FRAME_SIZE(%r30), %r30
1771#endif
1772
1773 ldo -FRAME_SIZE(%r30), %r30
1774 LDREG -RP_OFFSET(%r30), %r2
1775
1776 /* FIXME: I think we need to restore a few more things here. */
1777 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1778 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1779 reg_restore %r1
1780
1781 /* If the signal was received while the process was blocked on a
1782 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1783 * take us to syscall_exit_rfi and on to intr_return.
1784 */
1785 bv %r0(%r2)
1786 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
c5e76552 1787ENDPROC(sys_rt_sigreturn_wrapper)
1da177e4 1788
c5e76552 1789ENTRY(sys_sigaltstack_wrapper)
1da177e4
LT
1790 /* Get the user stack pointer */
1791 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1792 ldo TASK_REGS(%r1),%r24 /* get pt regs */
1793 LDREG TASK_PT_GR30(%r24),%r24
1794 STREG %r2, -RP_OFFSET(%r30)
413059f2 1795#ifdef CONFIG_64BIT
1da177e4 1796 ldo FRAME_SIZE(%r30), %r30
df47b438 1797 BL do_sigaltstack,%r2
1da177e4
LT
1798 ldo -16(%r30),%r29 /* Reference param save area */
1799#else
df47b438 1800 BL do_sigaltstack,%r2
1da177e4
LT
1801 ldo FRAME_SIZE(%r30), %r30
1802#endif
1803
1804 ldo -FRAME_SIZE(%r30), %r30
1805 LDREG -RP_OFFSET(%r30), %r2
1806 bv %r0(%r2)
1807 nop
c5e76552 1808ENDPROC(sys_sigaltstack_wrapper)
1da177e4 1809
413059f2 1810#ifdef CONFIG_64BIT
c5e76552 1811ENTRY(sys32_sigaltstack_wrapper)
1da177e4
LT
1812 /* Get the user stack pointer */
1813 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
1814 LDREG TASK_PT_GR30(%r24),%r24
1815 STREG %r2, -RP_OFFSET(%r30)
1816 ldo FRAME_SIZE(%r30), %r30
df47b438 1817 BL do_sigaltstack32,%r2
1da177e4
LT
1818 ldo -16(%r30),%r29 /* Reference param save area */
1819
1820 ldo -FRAME_SIZE(%r30), %r30
1821 LDREG -RP_OFFSET(%r30), %r2
1822 bv %r0(%r2)
1823 nop
c5e76552 1824ENDPROC(sys32_sigaltstack_wrapper)
1da177e4
LT
1825#endif
1826
c5e76552 1827ENTRY(syscall_exit)
1da177e4
LT
1828 /* NOTE: HP-UX syscalls also come through here
1829 * after hpux_syscall_exit fixes up return
1830 * values. */
1831
1832 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
1833 * via syscall_exit_rfi if the signal was received while the process
1834 * was running.
1835 */
1836
1837 /* save return value now */
1838
1839 mfctl %cr30, %r1
1840 LDREG TI_TASK(%r1),%r1
1841 STREG %r28,TASK_PT_GR28(%r1)
1842
1843#ifdef CONFIG_HPUX
1da177e4
LT
1844/* <linux/personality.h> cannot be easily included */
1845#define PER_HPUX 0x10
376e210b 1846 ldw TASK_PERSONALITY(%r1),%r19
1da177e4
LT
1847
1848 /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
1849 ldo -PER_HPUX(%r19), %r19
872f6deb 1850 cmpib,COND(<>),n 0,%r19,1f
1da177e4
LT
1851
1852 /* Save other hpux returns if personality is PER_HPUX */
1853 STREG %r22,TASK_PT_GR22(%r1)
1854 STREG %r29,TASK_PT_GR29(%r1)
18551:
1856
1857#endif /* CONFIG_HPUX */
1858
1859 /* Seems to me that dp could be wrong here, if the syscall involved
1860 * calling a module, and nothing got round to restoring dp on return.
1861 */
1862 loadgp
1863
1da177e4
LT
1864syscall_check_resched:
1865
1866 /* check for reschedule */
1867
1868 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
1869 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1870
4650f0a5 1871 .import do_signal,code
1da177e4 1872syscall_check_sig:
4650f0a5 1873 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
6fd84c08 1874 ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
4650f0a5
KM
1875 and,COND(<>) %r19, %r26, %r0
1876 b,n syscall_restore /* skip past if we've nothing to do */
1877
1878syscall_do_signal:
1879 /* Save callee-save registers (for sigcontext).
1880 * FIXME: After this point the process structure should be
1881 * consistent with all the relevant state of the process
1882 * before the syscall. We need to verify this.
1883 */
1884 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1885 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
1886 reg_save %r26
1887
1888#ifdef CONFIG_64BIT
1889 ldo -16(%r30),%r29 /* Reference param save area */
1890#endif
1891
1892 BL do_notify_resume,%r2
1893 ldi 1, %r25 /* long in_syscall = 1 */
1894
1895 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1896 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
1897 reg_restore %r20
1898
1899 b,n syscall_check_sig
1da177e4
LT
1900
1901syscall_restore:
1da177e4
LT
1902 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1903
ecd3d4bc
KM
1904 /* Are we being ptraced? */
1905 ldw TASK_FLAGS(%r1),%r19
1906 ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2
1907 and,COND(=) %r19,%r2,%r0
1908 b,n syscall_restore_rfi
1da177e4
LT
1909
1910 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
1911 rest_fp %r19
1912
1913 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
1914 mtsar %r19
1915
1916 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
1917 LDREG TASK_PT_GR19(%r1),%r19
1918 LDREG TASK_PT_GR20(%r1),%r20
1919 LDREG TASK_PT_GR21(%r1),%r21
1920 LDREG TASK_PT_GR22(%r1),%r22
1921 LDREG TASK_PT_GR23(%r1),%r23
1922 LDREG TASK_PT_GR24(%r1),%r24
1923 LDREG TASK_PT_GR25(%r1),%r25
1924 LDREG TASK_PT_GR26(%r1),%r26
1925 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
1926 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
1927 LDREG TASK_PT_GR29(%r1),%r29
1928 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
1929
1930 /* NOTE: We use rsm/ssm pair to make this operation atomic */
8f6c0c2b 1931 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
1da177e4 1932 rsm PSW_SM_I, %r0
8f6c0c2b
JDA
1933 copy %r1,%r30 /* Restore user sp */
1934 mfsp %sr3,%r1 /* Get user space id */
1da177e4
LT
1935 mtsp %r1,%sr7 /* Restore sr7 */
1936 ssm PSW_SM_I, %r0
1937
1938 /* Set sr2 to zero for userspace syscalls to work. */
1939 mtsp %r0,%sr2
1940 mtsp %r1,%sr4 /* Restore sr4 */
1941 mtsp %r1,%sr5 /* Restore sr5 */
1942 mtsp %r1,%sr6 /* Restore sr6 */
1943
1944 depi 3,31,2,%r31 /* ensure return to user mode. */
1945
413059f2 1946#ifdef CONFIG_64BIT
1da177e4
LT
1947 /* decide whether to reset the wide mode bit
1948 *
1949 * For a syscall, the W bit is stored in the lowest bit
1950 * of sp. Extract it and reset W if it is zero */
1951 extrd,u,*<> %r30,63,1,%r1
1952 rsm PSW_SM_W, %r0
1953 /* now reset the lowest bit of sp if it was set */
1954 xor %r30,%r1,%r30
1955#endif
1956 be,n 0(%sr3,%r31) /* return to user space */
1957
1958 /* We have to return via an RFI, so that PSW T and R bits can be set
1959 * appropriately.
1960 * This sets up pt_regs so we can return via intr_restore, which is not
1961 * the most efficient way of doing things, but it works.
1962 */
1963syscall_restore_rfi:
1964 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
1965 mtctl %r2,%cr0 /* for immediate trap */
1966 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
1967 ldi 0x0b,%r20 /* Create new PSW */
1968 depi -1,13,1,%r20 /* C, Q, D, and I bits */
1969
ecd3d4bc
KM
1970 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1971 * set in thread_info.h and converted to PA bitmap
1da177e4
LT
1972 * numbers in asm-offsets.c */
1973
ecd3d4bc
KM
1974 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1975 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1da177e4
LT
1976 depi -1,27,1,%r20 /* R bit */
1977
ecd3d4bc
KM
1978 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1979 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1da177e4
LT
1980 depi -1,7,1,%r20 /* T bit */
1981
1982 STREG %r20,TASK_PT_PSW(%r1)
1983
1984 /* Always store space registers, since sr3 can be changed (e.g. fork) */
1985
1986 mfsp %sr3,%r25
1987 STREG %r25,TASK_PT_SR3(%r1)
1988 STREG %r25,TASK_PT_SR4(%r1)
1989 STREG %r25,TASK_PT_SR5(%r1)
1990 STREG %r25,TASK_PT_SR6(%r1)
1991 STREG %r25,TASK_PT_SR7(%r1)
1992 STREG %r25,TASK_PT_IASQ0(%r1)
1993 STREG %r25,TASK_PT_IASQ1(%r1)
1994
1995 /* XXX W bit??? */
1996 /* Now if old D bit is clear, it means we didn't save all registers
1997 * on syscall entry, so do that now. This only happens on TRACEME
1998 * calls, or if someone attached to us while we were on a syscall.
1999 * We could make this more efficient by not saving r3-r18, but
2000 * then we wouldn't be able to use the common intr_restore path.
2001 * It is only for traced processes anyway, so performance is not
2002 * an issue.
2003 */
2004 bb,< %r2,30,pt_regs_ok /* Branch if D set */
2005 ldo TASK_REGS(%r1),%r25
2006 reg_save %r25 /* Save r3 to r18 */
2007
2008 /* Save the current sr */
2009 mfsp %sr0,%r2
2010 STREG %r2,TASK_PT_SR0(%r1)
2011
2012 /* Save the scratch sr */
2013 mfsp %sr1,%r2
2014 STREG %r2,TASK_PT_SR1(%r1)
2015
2016 /* sr2 should be set to zero for userspace syscalls */
2017 STREG %r0,TASK_PT_SR2(%r1)
2018
2019pt_regs_ok:
2020 LDREG TASK_PT_GR31(%r1),%r2
2021 depi 3,31,2,%r2 /* ensure return to user mode. */
2022 STREG %r2,TASK_PT_IAOQ0(%r1)
2023 ldo 4(%r2),%r2
2024 STREG %r2,TASK_PT_IAOQ1(%r1)
2025 copy %r25,%r16
2026 b intr_restore
2027 nop
2028
1da177e4
LT
2029 .import schedule,code
2030syscall_do_resched:
2031 BL schedule,%r2
413059f2 2032#ifdef CONFIG_64BIT
1da177e4
LT
2033 ldo -16(%r30),%r29 /* Reference param save area */
2034#else
2035 nop
2036#endif
72738a96 2037 b syscall_check_resched /* if resched, we start over again */
1da177e4 2038 nop
c5e76552 2039ENDPROC(syscall_exit)
1da177e4 2040
c5e76552 2041
d75f054a
HD
2042#ifdef CONFIG_FUNCTION_TRACER
2043 .import ftrace_function_trampoline,code
2044ENTRY(_mcount)
2045 copy %r3, %arg2
2046 b ftrace_function_trampoline
2047 nop
2048ENDPROC(_mcount)
2049
2050ENTRY(return_to_handler)
2051 load32 return_trampoline, %rp
2052 copy %ret0, %arg0
2053 copy %ret1, %arg1
2054 b ftrace_return_to_handler
2055 nop
2056return_trampoline:
2057 copy %ret0, %rp
2058 copy %r23, %ret0
2059 copy %r24, %ret1
2060
2061.globl ftrace_stub
2062ftrace_stub:
2063 bv %r0(%rp)
2064 nop
2065ENDPROC(return_to_handler)
2066#endif /* CONFIG_FUNCTION_TRACER */
2067
2068
bcc0e04c 2069get_register:
1da177e4
LT
2070 /*
2071 * get_register is used by the non access tlb miss handlers to
2072 * copy the value of the general register specified in r8 into
2073 * r1. This routine can't be used for shadowed registers, since
2074 * the rfir will restore the original value. So, for the shadowed
2075 * registers we put a -1 into r1 to indicate that the register
2076 * should not be used (the register being copied could also have
2077 * a -1 in it, but that is OK, it just means that we will have
2078 * to use the slow path instead).
2079 */
1da177e4
LT
2080 blr %r8,%r0
2081 nop
2082 bv %r0(%r25) /* r0 */
2083 copy %r0,%r1
2084 bv %r0(%r25) /* r1 - shadowed */
2085 ldi -1,%r1
2086 bv %r0(%r25) /* r2 */
2087 copy %r2,%r1
2088 bv %r0(%r25) /* r3 */
2089 copy %r3,%r1
2090 bv %r0(%r25) /* r4 */
2091 copy %r4,%r1
2092 bv %r0(%r25) /* r5 */
2093 copy %r5,%r1
2094 bv %r0(%r25) /* r6 */
2095 copy %r6,%r1
2096 bv %r0(%r25) /* r7 */
2097 copy %r7,%r1
2098 bv %r0(%r25) /* r8 - shadowed */
2099 ldi -1,%r1
2100 bv %r0(%r25) /* r9 - shadowed */
2101 ldi -1,%r1
2102 bv %r0(%r25) /* r10 */
2103 copy %r10,%r1
2104 bv %r0(%r25) /* r11 */
2105 copy %r11,%r1
2106 bv %r0(%r25) /* r12 */
2107 copy %r12,%r1
2108 bv %r0(%r25) /* r13 */
2109 copy %r13,%r1
2110 bv %r0(%r25) /* r14 */
2111 copy %r14,%r1
2112 bv %r0(%r25) /* r15 */
2113 copy %r15,%r1
2114 bv %r0(%r25) /* r16 - shadowed */
2115 ldi -1,%r1
2116 bv %r0(%r25) /* r17 - shadowed */
2117 ldi -1,%r1
2118 bv %r0(%r25) /* r18 */
2119 copy %r18,%r1
2120 bv %r0(%r25) /* r19 */
2121 copy %r19,%r1
2122 bv %r0(%r25) /* r20 */
2123 copy %r20,%r1
2124 bv %r0(%r25) /* r21 */
2125 copy %r21,%r1
2126 bv %r0(%r25) /* r22 */
2127 copy %r22,%r1
2128 bv %r0(%r25) /* r23 */
2129 copy %r23,%r1
2130 bv %r0(%r25) /* r24 - shadowed */
2131 ldi -1,%r1
2132 bv %r0(%r25) /* r25 - shadowed */
2133 ldi -1,%r1
2134 bv %r0(%r25) /* r26 */
2135 copy %r26,%r1
2136 bv %r0(%r25) /* r27 */
2137 copy %r27,%r1
2138 bv %r0(%r25) /* r28 */
2139 copy %r28,%r1
2140 bv %r0(%r25) /* r29 */
2141 copy %r29,%r1
2142 bv %r0(%r25) /* r30 */
2143 copy %r30,%r1
2144 bv %r0(%r25) /* r31 */
2145 copy %r31,%r1
2146
c5e76552 2147
bcc0e04c 2148set_register:
1da177e4
LT
2149 /*
2150 * set_register is used by the non access tlb miss handlers to
2151 * copy the value of r1 into the general register specified in
2152 * r8.
2153 */
1da177e4
LT
2154 blr %r8,%r0
2155 nop
2156 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2157 copy %r1,%r0
2158 bv %r0(%r25) /* r1 */
2159 copy %r1,%r1
2160 bv %r0(%r25) /* r2 */
2161 copy %r1,%r2
2162 bv %r0(%r25) /* r3 */
2163 copy %r1,%r3
2164 bv %r0(%r25) /* r4 */
2165 copy %r1,%r4
2166 bv %r0(%r25) /* r5 */
2167 copy %r1,%r5
2168 bv %r0(%r25) /* r6 */
2169 copy %r1,%r6
2170 bv %r0(%r25) /* r7 */
2171 copy %r1,%r7
2172 bv %r0(%r25) /* r8 */
2173 copy %r1,%r8
2174 bv %r0(%r25) /* r9 */
2175 copy %r1,%r9
2176 bv %r0(%r25) /* r10 */
2177 copy %r1,%r10
2178 bv %r0(%r25) /* r11 */
2179 copy %r1,%r11
2180 bv %r0(%r25) /* r12 */
2181 copy %r1,%r12
2182 bv %r0(%r25) /* r13 */
2183 copy %r1,%r13
2184 bv %r0(%r25) /* r14 */
2185 copy %r1,%r14
2186 bv %r0(%r25) /* r15 */
2187 copy %r1,%r15
2188 bv %r0(%r25) /* r16 */
2189 copy %r1,%r16
2190 bv %r0(%r25) /* r17 */
2191 copy %r1,%r17
2192 bv %r0(%r25) /* r18 */
2193 copy %r1,%r18
2194 bv %r0(%r25) /* r19 */
2195 copy %r1,%r19
2196 bv %r0(%r25) /* r20 */
2197 copy %r1,%r20
2198 bv %r0(%r25) /* r21 */
2199 copy %r1,%r21
2200 bv %r0(%r25) /* r22 */
2201 copy %r1,%r22
2202 bv %r0(%r25) /* r23 */
2203 copy %r1,%r23
2204 bv %r0(%r25) /* r24 */
2205 copy %r1,%r24
2206 bv %r0(%r25) /* r25 */
2207 copy %r1,%r25
2208 bv %r0(%r25) /* r26 */
2209 copy %r1,%r26
2210 bv %r0(%r25) /* r27 */
2211 copy %r1,%r27
2212 bv %r0(%r25) /* r28 */
2213 copy %r1,%r28
2214 bv %r0(%r25) /* r29 */
2215 copy %r1,%r29
2216 bv %r0(%r25) /* r30 */
2217 copy %r1,%r30
2218 bv %r0(%r25) /* r31 */
2219 copy %r1,%r31
c5e76552 2220