Fix common misspellings
[linux-block.git] / arch / parisc / kernel / entry.S
CommitLineData
1da177e4
LT
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 * Copyright (C) 1999,2000 Philipp Rumpf
6 * Copyright (C) 1999 SuSE GmbH Nuernberg
7 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
0013a854 25#include <asm/asm-offsets.h>
1da177e4
LT
26
27/* we have the following possibilities to act on an interruption:
28 * - handle in assembly and use shadowed registers only
29 * - save registers to kernel stack and handle in assembly or C */
30
31
896a3756 32#include <asm/psw.h>
3d73cf5e 33#include <asm/cache.h> /* for L1_CACHE_SHIFT */
1da177e4
LT
34#include <asm/assembly.h> /* for LDREG/STREG defines */
35#include <asm/pgtable.h>
1da177e4
LT
36#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/thread_info.h>
39
c5e76552
HD
40#include <linux/linkage.h>
41
413059f2 42#ifdef CONFIG_64BIT
1da177e4
LT
43 .level 2.0w
44#else
1da177e4
LT
45 .level 2.0
46#endif
47
48 .import pa_dbit_lock,data
49
50 /* space_to_prot macro creates a prot id from a space id */
51
52#if (SPACEID_SHIFT) == 0
53 .macro space_to_prot spc prot
54 depd,z \spc,62,31,\prot
55 .endm
56#else
57 .macro space_to_prot spc prot
58 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
59 .endm
60#endif
61
62 /* Switch to virtual mapping, trashing only %r1 */
63 .macro virt_map
896a3756
GG
64 /* pcxt_ssm_bug */
65 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
1da177e4
LT
66 mtsp %r0, %sr4
67 mtsp %r0, %sr5
896a3756
GG
68 mfsp %sr7, %r1
69 or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
70 mtsp %r1, %sr3
71 tovirt_r1 %r29
72 load32 KERNEL_PSW, %r1
73
74 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
1da177e4
LT
75 mtsp %r0, %sr6
76 mtsp %r0, %sr7
1da177e4
LT
77 mtctl %r0, %cr17 /* Clear IIASQ tail */
78 mtctl %r0, %cr17 /* Clear IIASQ head */
896a3756 79 mtctl %r1, %ipsw
1da177e4
LT
80 load32 4f, %r1
81 mtctl %r1, %cr18 /* Set IIAOQ tail */
82 ldo 4(%r1), %r1
83 mtctl %r1, %cr18 /* Set IIAOQ head */
84 rfir
85 nop
864:
87 .endm
88
89 /*
90 * The "get_stack" macros are responsible for determining the
91 * kernel stack value.
92 *
1da177e4
LT
93 * If sr7 == 0
94 * Already using a kernel stack, so call the
95 * get_stack_use_r30 macro to push a pt_regs structure
96 * on the stack, and store registers there.
97 * else
98 * Need to set up a kernel stack, so call the
99 * get_stack_use_cr30 macro to set up a pointer
100 * to the pt_regs structure contained within the
101 * task pointer pointed to by cr30. Set the stack
102 * pointer to point to the end of the task structure.
103 *
1da177e4
LT
104 * Note that we use shadowed registers for temps until
105 * we can save %r26 and %r29. %r26 is used to preserve
106 * %r8 (a shadowed register) which temporarily contained
107 * either the fault type ("code") or the eirr. We need
108 * to use a non-shadowed register to carry the value over
109 * the rfir in virt_map. We use %r26 since this value winds
110 * up being passed as the argument to either do_cpu_irq_mask
111 * or handle_interruption. %r29 is used to hold a pointer
112 * the register save area, and once again, it needs to
113 * be a non-shadowed register so that it survives the rfir.
114 *
115 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
116 */
117
118 .macro get_stack_use_cr30
119
120 /* we save the registers in the task struct */
121
122 mfctl %cr30, %r1
123 tophys %r1,%r9
124 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
125 tophys %r1,%r9
126 ldo TASK_REGS(%r9),%r9
127 STREG %r30, PT_GR30(%r9)
128 STREG %r29,PT_GR29(%r9)
129 STREG %r26,PT_GR26(%r9)
130 copy %r9,%r29
131 mfctl %cr30, %r1
132 ldo THREAD_SZ_ALGN(%r1), %r30
133 .endm
134
135 .macro get_stack_use_r30
136
137 /* we put a struct pt_regs on the stack and save the registers there */
138
139 tophys %r30,%r9
140 STREG %r30,PT_GR30(%r9)
141 ldo PT_SZ_ALGN(%r30),%r30
142 STREG %r29,PT_GR29(%r9)
143 STREG %r26,PT_GR26(%r9)
144 copy %r9,%r29
145 .endm
146
147 .macro rest_stack
148 LDREG PT_GR1(%r29), %r1
149 LDREG PT_GR30(%r29),%r30
150 LDREG PT_GR29(%r29),%r29
151 .endm
152
153 /* default interruption handler
154 * (calls traps.c:handle_interruption) */
155 .macro def code
156 b intr_save
157 ldi \code, %r8
158 .align 32
159 .endm
160
161 /* Interrupt interruption handler
162 * (calls irq.c:do_cpu_irq_mask) */
163 .macro extint code
164 b intr_extint
165 mfsp %sr7,%r16
166 .align 32
167 .endm
168
169 .import os_hpmc, code
170
171 /* HPMC handler */
172 .macro hpmc code
173 nop /* must be a NOP, will be patched later */
174 load32 PA(os_hpmc), %r3
175 bv,n 0(%r3)
176 nop
177 .word 0 /* checksum (will be patched) */
178 .word PA(os_hpmc) /* address of handler */
179 .word 0 /* length of handler */
180 .endm
181
182 /*
183 * Performance Note: Instructions will be moved up into
184 * this part of the code later on, once we are sure
185 * that the tlb miss handlers are close to final form.
186 */
187
188 /* Register definitions for tlb miss handler macros */
189
25985edc
LDM
190 va = r8 /* virtual address for which the trap occurred */
191 spc = r24 /* space for which the trap occurred */
1da177e4 192
413059f2 193#ifndef CONFIG_64BIT
1da177e4
LT
194
195 /*
196 * itlb miss interruption handler (parisc 1.1 - 32 bit)
197 */
198
199 .macro itlb_11 code
200
201 mfctl %pcsq, spc
202 b itlb_miss_11
203 mfctl %pcoq, va
204
205 .align 32
206 .endm
207#endif
208
209 /*
210 * itlb miss interruption handler (parisc 2.0)
211 */
212
213 .macro itlb_20 code
214 mfctl %pcsq, spc
413059f2 215#ifdef CONFIG_64BIT
1da177e4
LT
216 b itlb_miss_20w
217#else
218 b itlb_miss_20
219#endif
220 mfctl %pcoq, va
221
222 .align 32
223 .endm
224
413059f2 225#ifndef CONFIG_64BIT
1da177e4
LT
226 /*
227 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
1da177e4
LT
228 */
229
230 .macro naitlb_11 code
231
232 mfctl %isr,spc
f311847c 233 b naitlb_miss_11
1da177e4 234 mfctl %ior,va
1da177e4
LT
235
236 .align 32
237 .endm
238#endif
239
240 /*
241 * naitlb miss interruption handler (parisc 2.0)
1da177e4
LT
242 */
243
244 .macro naitlb_20 code
245
246 mfctl %isr,spc
413059f2 247#ifdef CONFIG_64BIT
f311847c 248 b naitlb_miss_20w
1da177e4 249#else
f311847c 250 b naitlb_miss_20
1da177e4
LT
251#endif
252 mfctl %ior,va
1da177e4
LT
253
254 .align 32
255 .endm
256
413059f2 257#ifndef CONFIG_64BIT
1da177e4
LT
258 /*
259 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
260 */
261
262 .macro dtlb_11 code
263
264 mfctl %isr, spc
265 b dtlb_miss_11
266 mfctl %ior, va
267
268 .align 32
269 .endm
270#endif
271
272 /*
273 * dtlb miss interruption handler (parisc 2.0)
274 */
275
276 .macro dtlb_20 code
277
278 mfctl %isr, spc
413059f2 279#ifdef CONFIG_64BIT
1da177e4
LT
280 b dtlb_miss_20w
281#else
282 b dtlb_miss_20
283#endif
284 mfctl %ior, va
285
286 .align 32
287 .endm
288
413059f2 289#ifndef CONFIG_64BIT
1da177e4
LT
290 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
291
292 .macro nadtlb_11 code
293
294 mfctl %isr,spc
295 b nadtlb_miss_11
296 mfctl %ior,va
297
298 .align 32
299 .endm
300#endif
301
302 /* nadtlb miss interruption handler (parisc 2.0) */
303
304 .macro nadtlb_20 code
305
306 mfctl %isr,spc
413059f2 307#ifdef CONFIG_64BIT
1da177e4
LT
308 b nadtlb_miss_20w
309#else
310 b nadtlb_miss_20
311#endif
312 mfctl %ior,va
313
314 .align 32
315 .endm
316
413059f2 317#ifndef CONFIG_64BIT
1da177e4
LT
318 /*
319 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
320 */
321
322 .macro dbit_11 code
323
324 mfctl %isr,spc
325 b dbit_trap_11
326 mfctl %ior,va
327
328 .align 32
329 .endm
330#endif
331
332 /*
333 * dirty bit trap interruption handler (parisc 2.0)
334 */
335
336 .macro dbit_20 code
337
338 mfctl %isr,spc
413059f2 339#ifdef CONFIG_64BIT
1da177e4
LT
340 b dbit_trap_20w
341#else
342 b dbit_trap_20
343#endif
344 mfctl %ior,va
345
346 .align 32
347 .endm
348
1da177e4
LT
349 /* In LP64, the space contains part of the upper 32 bits of the
350 * fault. We have to extract this and place it in the va,
351 * zeroing the corresponding bits in the space register */
352 .macro space_adjust spc,va,tmp
413059f2 353#ifdef CONFIG_64BIT
1da177e4
LT
354 extrd,u \spc,63,SPACEID_SHIFT,\tmp
355 depd %r0,63,SPACEID_SHIFT,\spc
356 depd \tmp,31,SPACEID_SHIFT,\va
357#endif
358 .endm
359
360 .import swapper_pg_dir,code
361
362 /* Get the pgd. For faults on space zero (kernel space), this
363 * is simply swapper_pg_dir. For user space faults, the
364 * pgd is stored in %cr25 */
365 .macro get_pgd spc,reg
366 ldil L%PA(swapper_pg_dir),\reg
367 ldo R%PA(swapper_pg_dir)(\reg),\reg
368 or,COND(=) %r0,\spc,%r0
369 mfctl %cr25,\reg
370 .endm
371
372 /*
373 space_check(spc,tmp,fault)
374
375 spc - The space we saw the fault with.
376 tmp - The place to store the current space.
377 fault - Function to call on failure.
378
379 Only allow faults on different spaces from the
380 currently active one if we're the kernel
381
382 */
383 .macro space_check spc,tmp,fault
384 mfsp %sr7,\tmp
385 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
386 * as kernel, so defeat the space
387 * check if it is */
388 copy \spc,\tmp
389 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
390 cmpb,COND(<>),n \tmp,\spc,\fault
391 .endm
392
393 /* Look up a PTE in a 2-Level scheme (faulting at each
394 * level if the entry isn't present
395 *
396 * NOTE: we use ldw even for LP64, since the short pointers
397 * can address up to 1TB
398 */
399 .macro L2_ptep pmd,pte,index,va,fault
400#if PT_NLEVELS == 3
9b437bca 401 extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
1da177e4 402#else
9b437bca 403 extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
1da177e4 404#endif
9b437bca 405 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
1da177e4
LT
406 copy %r0,\pte
407 ldw,s \index(\pmd),\pmd
408 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
9b437bca 409 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
1da177e4 410 copy \pmd,%r9
3d73cf5e 411 SHLREG %r9,PxD_VALUE_SHIFT,\pmd
9b437bca
JDA
412 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
413 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
1da177e4
LT
414 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
415 LDREG %r0(\pmd),\pte /* pmd is now pte */
416 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
417 .endm
418
419 /* Look up PTE in a 3-Level scheme.
420 *
421 * Here we implement a Hybrid L2/L3 scheme: we allocate the
422 * first pmd adjacent to the pgd. This means that we can
423 * subtract a constant offset to get to it. The pmd and pgd
424 * sizes are arranged so that a single pmd covers 4GB (giving
425 * a full LP64 process access to 8TB) so our lookups are
426 * effectively L2 for the first 4GB of the kernel (i.e. for
427 * all ILP32 processes and all the kernel for machines with
428 * under 4GB of memory) */
429 .macro L3_ptep pgd,pte,index,va,fault
2fd83038 430#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
1da177e4
LT
431 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
432 copy %r0,\pte
2fd83038 433 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
1da177e4 434 ldw,s \index(\pgd),\pgd
2fd83038 435 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
1da177e4 436 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
2fd83038 437 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
1da177e4 438 shld \pgd,PxD_VALUE_SHIFT,\index
2fd83038 439 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
1da177e4 440 copy \index,\pgd
2fd83038 441 extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
1da177e4 442 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
2fd83038 443#endif
1da177e4
LT
444 L2_ptep \pgd,\pte,\index,\va,\fault
445 .endm
446
447 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
448 * don't needlessly dirty the cache line if it was already set */
449 .macro update_ptep ptep,pte,tmp,tmp1
450 ldi _PAGE_ACCESSED,\tmp1
451 or \tmp1,\pte,\tmp
452 and,COND(<>) \tmp1,\pte,%r0
453 STREG \tmp,0(\ptep)
454 .endm
455
456 /* Set the dirty bit (and accessed bit). No need to be
457 * clever, this is only used from the dirty fault */
458 .macro update_dirty ptep,pte,tmp
459 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
460 or \tmp,\pte,\pte
461 STREG \pte,0(\ptep)
462 .endm
463
afca2523
HD
464 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
465 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
466 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
467
468 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
469 .macro convert_for_tlb_insert20 pte
470 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
471 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
472 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
473 (63-58)+PAGE_ADD_SHIFT,\pte
474 .endm
475
1da177e4
LT
476 /* Convert the pte and prot to tlb insertion values. How
477 * this happens is quite subtle, read below */
478 .macro make_insert_tlb spc,pte,prot
479 space_to_prot \spc \prot /* create prot id from space */
480 /* The following is the real subtlety. This is depositing
481 * T <-> _PAGE_REFTRAP
482 * D <-> _PAGE_DIRTY
483 * B <-> _PAGE_DMB (memory break)
484 *
485 * Then incredible subtlety: The access rights are
486 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
487 * See 3-14 of the parisc 2.0 manual
488 *
489 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
490 * trigger an access rights trap in user space if the user
491 * tries to read an unreadable page */
492 depd \pte,8,7,\prot
493
494 /* PAGE_USER indicates the page can be read with user privileges,
495 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
496 * contains _PAGE_READ */
497 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
498 depdi 7,11,3,\prot
499 /* If we're a gateway page, drop PL2 back to zero for promotion
500 * to kernel privilege (so we can execute the page as kernel).
501 * Any privilege promotion page always denys read and write */
502 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
503 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
504
2fd83038
HD
505 /* Enforce uncacheable pages.
506 * This should ONLY be use for MMIO on PA 2.0 machines.
507 * Memory/DMA is cache coherent on all PA2.0 machines we support
508 * (that means T-class is NOT supported) and the memory controllers
509 * on most of those machines only handles cache transactions.
510 */
511 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
2678251b 512 depdi 1,12,1,\prot
1da177e4 513
2fd83038 514 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
afca2523 515 convert_for_tlb_insert20 \pte
1da177e4
LT
516 .endm
517
518 /* Identical macro to make_insert_tlb above, except it
519 * makes the tlb entry for the differently formatted pa11
520 * insertion instructions */
521 .macro make_insert_tlb_11 spc,pte,prot
522 zdep \spc,30,15,\prot
523 dep \pte,8,7,\prot
524 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
525 depi 1,12,1,\prot
526 extru,= \pte,_PAGE_USER_BIT,1,%r0
527 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
528 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
529 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
530
531 /* Get rid of prot bits and convert to page addr for iitlba */
532
1152a68c
HD
533 depi 0,31,ASM_PFN_PTE_SHIFT,\pte
534 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
1da177e4
LT
535 .endm
536
537 /* This is for ILP32 PA2.0 only. The TLB insertion needs
538 * to extend into I/O space if the address is 0xfXXXXXXX
539 * so we extend the f's into the top word of the pte in
540 * this case */
541 .macro f_extend pte,tmp
542 extrd,s \pte,42,4,\tmp
543 addi,<> 1,\tmp,%r0
544 extrd,s \pte,63,25,\pte
545 .endm
546
547 /* The alias region is an 8MB aligned 16MB to do clear and
548 * copy user pages at addresses congruent with the user
549 * virtual address.
550 *
551 * To use the alias page, you set %r26 up with the to TLB
552 * entry (identifying the physical page) and %r23 up with
553 * the from tlb entry (or nothing if only a to entry---for
554 * clear_user_page_asm) */
555 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault
556 cmpib,COND(<>),n 0,\spc,\fault
557 ldil L%(TMPALIAS_MAP_START),\tmp
413059f2 558#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
1da177e4
LT
559 /* on LP64, ldi will sign extend into the upper 32 bits,
560 * which is behaviour we don't want */
561 depdi 0,31,32,\tmp
562#endif
563 copy \va,\tmp1
9b437bca 564 depi 0,31,23,\tmp1
1da177e4 565 cmpb,COND(<>),n \tmp,\tmp1,\fault
f311847c
JB
566 mfctl %cr19,\tmp /* iir */
567 /* get the opcode (first six bits) into \tmp */
568 extrw,u \tmp,5,6,\tmp
569 /*
570 * Only setting the T bit prevents data cache movein
571 * Setting access rights to zero prevents instruction cache movein
572 *
573 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
574 * to type field and _PAGE_READ goes to top bit of PL1
575 */
576 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
577 /*
578 * so if the opcode is one (i.e. this is a memory management
579 * instruction) nullify the next load so \prot is only T.
580 * Otherwise this is a normal data operation
581 */
582 cmpiclr,= 0x01,\tmp,%r0
583 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
1da177e4
LT
584 depd,z \prot,8,7,\prot
585 /*
586 * OK, it is in the temp alias region, check whether "from" or "to".
587 * Check "subtle" note in pacache.S re: r23/r26.
588 */
413059f2 589#ifdef CONFIG_64BIT
1da177e4
LT
590 extrd,u,*= \va,41,1,%r0
591#else
592 extrw,u,= \va,9,1,%r0
593#endif
594 or,COND(tr) %r23,%r0,\pte
595 or %r26,%r0,\pte
596 .endm
597
598
599 /*
600 * Align fault_vector_20 on 4K boundary so that both
601 * fault_vector_11 and fault_vector_20 are on the
602 * same page. This is only necessary as long as we
603 * write protect the kernel text, which we may stop
604 * doing once we use large page translations to cover
605 * the static part of the kernel address space.
606 */
607
dfcf753b 608 .text
1da177e4 609
873d50e2 610 .align PAGE_SIZE
1da177e4 611
c5e76552 612ENTRY(fault_vector_20)
1da177e4
LT
613 /* First vector is invalid (0) */
614 .ascii "cows can fly"
615 .byte 0
616 .align 32
617
618 hpmc 1
619 def 2
620 def 3
621 extint 4
622 def 5
623 itlb_20 6
624 def 7
625 def 8
626 def 9
627 def 10
628 def 11
629 def 12
630 def 13
631 def 14
632 dtlb_20 15
1da177e4 633 naitlb_20 16
1da177e4
LT
634 nadtlb_20 17
635 def 18
636 def 19
637 dbit_20 20
638 def 21
639 def 22
640 def 23
641 def 24
642 def 25
643 def 26
644 def 27
645 def 28
646 def 29
647 def 30
648 def 31
c5e76552 649END(fault_vector_20)
1da177e4 650
413059f2 651#ifndef CONFIG_64BIT
1da177e4 652
1da177e4
LT
653 .align 2048
654
c5e76552 655ENTRY(fault_vector_11)
1da177e4
LT
656 /* First vector is invalid (0) */
657 .ascii "cows can fly"
658 .byte 0
659 .align 32
660
661 hpmc 1
662 def 2
663 def 3
664 extint 4
665 def 5
666 itlb_11 6
667 def 7
668 def 8
669 def 9
670 def 10
671 def 11
672 def 12
673 def 13
674 def 14
675 dtlb_11 15
1da177e4 676 naitlb_11 16
1da177e4
LT
677 nadtlb_11 17
678 def 18
679 def 19
680 dbit_11 20
681 def 21
682 def 22
683 def 23
684 def 24
685 def 25
686 def 26
687 def 27
688 def 28
689 def 29
690 def 30
691 def 31
c5e76552 692END(fault_vector_11)
1da177e4
LT
693
694#endif
695
696 .import handle_interruption,code
697 .import do_cpu_irq_mask,code
698
699 /*
700 * r26 = function to be called
701 * r25 = argument to pass in
702 * r24 = flags for do_fork()
703 *
704 * Kernel threads don't ever return, so they don't need
705 * a true register context. We just save away the arguments
706 * for copy_thread/ret_ to properly set up the child.
707 */
708
709#define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */
710#define CLONE_UNTRACED 0x00800000
711
1da177e4 712 .import do_fork
c5e76552 713ENTRY(__kernel_thread)
1da177e4
LT
714 STREG %r2, -RP_OFFSET(%r30)
715
716 copy %r30, %r1
717 ldo PT_SZ_ALGN(%r30),%r30
413059f2 718#ifdef CONFIG_64BIT
1da177e4
LT
719 /* Yo, function pointers in wide mode are little structs... -PB */
720 ldd 24(%r26), %r2
721 STREG %r2, PT_GR27(%r1) /* Store childs %dp */
722 ldd 16(%r26), %r26
723
724 STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */
725 copy %r0, %r22 /* user_tid */
726#endif
727 STREG %r26, PT_GR26(%r1) /* Store function & argument for child */
728 STREG %r25, PT_GR25(%r1)
729 ldil L%CLONE_UNTRACED, %r26
730 ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */
731 or %r26, %r24, %r26 /* will have kernel mappings. */
732 ldi 1, %r25 /* stack_start, signals kernel thread */
733 stw %r0, -52(%r30) /* user_tid */
413059f2 734#ifdef CONFIG_64BIT
1da177e4
LT
735 ldo -16(%r30),%r29 /* Reference param save area */
736#endif
737 BL do_fork, %r2
738 copy %r1, %r24 /* pt_regs */
739
740 /* Parent Returns here */
741
742 LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2
743 ldo -PT_SZ_ALGN(%r30), %r30
744 bv %r0(%r2)
745 nop
c5e76552 746ENDPROC(__kernel_thread)
1da177e4
LT
747
748 /*
749 * Child Returns here
750 *
751 * copy_thread moved args from temp save area set up above
752 * into task save area.
753 */
754
c5e76552 755ENTRY(ret_from_kernel_thread)
1da177e4
LT
756
757 /* Call schedule_tail first though */
758 BL schedule_tail, %r2
759 nop
760
761 LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
762 LDREG TASK_PT_GR25(%r1), %r26
413059f2 763#ifdef CONFIG_64BIT
1da177e4
LT
764 LDREG TASK_PT_GR27(%r1), %r27
765 LDREG TASK_PT_GR22(%r1), %r22
766#endif
767 LDREG TASK_PT_GR26(%r1), %r1
768 ble 0(%sr7, %r1)
769 copy %r31, %r2
770
413059f2 771#ifdef CONFIG_64BIT
1da177e4
LT
772 ldo -16(%r30),%r29 /* Reference param save area */
773 loadgp /* Thread could have been in a module */
774#endif
99ac7947 775#ifndef CONFIG_64BIT
1da177e4 776 b sys_exit
99ac7947
RC
777#else
778 load32 sys_exit, %r1
779 bv %r0(%r1)
780#endif
1da177e4 781 ldi 0, %r26
c5e76552 782ENDPROC(ret_from_kernel_thread)
1da177e4
LT
783
784 .import sys_execve, code
c5e76552 785ENTRY(__execve)
1da177e4
LT
786 copy %r2, %r15
787 copy %r30, %r16
788 ldo PT_SZ_ALGN(%r30), %r30
789 STREG %r26, PT_GR26(%r16)
790 STREG %r25, PT_GR25(%r16)
791 STREG %r24, PT_GR24(%r16)
413059f2 792#ifdef CONFIG_64BIT
1da177e4
LT
793 ldo -16(%r30),%r29 /* Reference param save area */
794#endif
795 BL sys_execve, %r2
796 copy %r16, %r26
797
798 cmpib,=,n 0,%r28,intr_return /* forward */
799
800 /* yes, this will trap and die. */
801 copy %r15, %r2
802 copy %r16, %r30
803 bv %r0(%r2)
804 nop
c5e76552 805ENDPROC(__execve)
1da177e4 806
1da177e4
LT
807
808 /*
809 * struct task_struct *_switch_to(struct task_struct *prev,
810 * struct task_struct *next)
811 *
812 * switch kernel stacks and return prev */
c5e76552 813ENTRY(_switch_to)
1da177e4
LT
814 STREG %r2, -RP_OFFSET(%r30)
815
618febd6 816 callee_save_float
1da177e4
LT
817 callee_save
818
819 load32 _switch_to_ret, %r2
820
821 STREG %r2, TASK_PT_KPC(%r26)
822 LDREG TASK_PT_KPC(%r25), %r2
823
824 STREG %r30, TASK_PT_KSP(%r26)
825 LDREG TASK_PT_KSP(%r25), %r30
826 LDREG TASK_THREAD_INFO(%r25), %r25
827 bv %r0(%r2)
828 mtctl %r25,%cr30
829
830_switch_to_ret:
831 mtctl %r0, %cr0 /* Needed for single stepping */
832 callee_rest
618febd6 833 callee_rest_float
1da177e4
LT
834
835 LDREG -RP_OFFSET(%r30), %r2
836 bv %r0(%r2)
837 copy %r26, %r28
c5e76552 838ENDPROC(_switch_to)
1da177e4
LT
839
840 /*
841 * Common rfi return path for interruptions, kernel execve, and
842 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
843 * return via this path if the signal was received when the process
844 * was running; if the process was blocked on a syscall then the
845 * normal syscall_exit path is used. All syscalls for traced
846 * proceses exit via intr_restore.
847 *
848 * XXX If any syscalls that change a processes space id ever exit
849 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
850 * adjust IASQ[0..1].
851 *
1da177e4
LT
852 */
853
873d50e2 854 .align PAGE_SIZE
1da177e4 855
c5e76552 856ENTRY(syscall_exit_rfi)
1da177e4
LT
857 mfctl %cr30,%r16
858 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
859 ldo TASK_REGS(%r16),%r16
860 /* Force iaoq to userspace, as the user has had access to our current
861 * context via sigcontext. Also Filter the PSW for the same reason.
862 */
863 LDREG PT_IAOQ0(%r16),%r19
864 depi 3,31,2,%r19
865 STREG %r19,PT_IAOQ0(%r16)
866 LDREG PT_IAOQ1(%r16),%r19
867 depi 3,31,2,%r19
868 STREG %r19,PT_IAOQ1(%r16)
869 LDREG PT_PSW(%r16),%r19
870 load32 USER_PSW_MASK,%r1
413059f2 871#ifdef CONFIG_64BIT
1da177e4
LT
872 load32 USER_PSW_HI_MASK,%r20
873 depd %r20,31,32,%r1
874#endif
875 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
876 load32 USER_PSW,%r1
877 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
878 STREG %r19,PT_PSW(%r16)
879
880 /*
881 * If we aren't being traced, we never saved space registers
882 * (we don't store them in the sigcontext), so set them
883 * to "proper" values now (otherwise we'll wind up restoring
884 * whatever was last stored in the task structure, which might
25985edc 885 * be inconsistent if an interrupt occurred while on the gateway
4b3f686d
ML
886 * page). Note that we may be "trashing" values the user put in
887 * them, but we don't support the user changing them.
1da177e4
LT
888 */
889
890 STREG %r0,PT_SR2(%r16)
891 mfsp %sr3,%r19
892 STREG %r19,PT_SR0(%r16)
893 STREG %r19,PT_SR1(%r16)
894 STREG %r19,PT_SR3(%r16)
895 STREG %r19,PT_SR4(%r16)
896 STREG %r19,PT_SR5(%r16)
897 STREG %r19,PT_SR6(%r16)
898 STREG %r19,PT_SR7(%r16)
899
900intr_return:
901 /* NOTE: Need to enable interrupts incase we schedule. */
902 ssm PSW_SM_I, %r0
903
1da177e4
LT
904intr_check_resched:
905
906 /* check for reschedule */
907 mfctl %cr30,%r1
908 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
909 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
910
4650f0a5 911 .import do_notify_resume,code
1da177e4
LT
912intr_check_sig:
913 /* As above */
914 mfctl %cr30,%r1
4650f0a5 915 LDREG TI_FLAGS(%r1),%r19
d0420c83 916 ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NOTIFY_RESUME), %r20
4650f0a5
KM
917 and,COND(<>) %r19, %r20, %r0
918 b,n intr_restore /* skip past if we've nothing to do */
919
920 /* This check is critical to having LWS
921 * working. The IASQ is zero on the gateway
922 * page and we cannot deliver any signals until
923 * we get off the gateway page.
924 *
925 * Only do signals if we are returning to user space
926 */
927 LDREG PT_IASQ0(%r16), %r20
872f6deb 928 cmpib,COND(=),n 0,%r20,intr_restore /* backward */
4650f0a5 929 LDREG PT_IASQ1(%r16), %r20
872f6deb 930 cmpib,COND(=),n 0,%r20,intr_restore /* backward */
4650f0a5
KM
931
932 copy %r0, %r25 /* long in_syscall = 0 */
933#ifdef CONFIG_64BIT
934 ldo -16(%r30),%r29 /* Reference param save area */
935#endif
936
937 BL do_notify_resume,%r2
938 copy %r16, %r26 /* struct pt_regs *regs */
939
3fe4c55e 940 b,n intr_check_sig
1da177e4
LT
941
942intr_restore:
943 copy %r16,%r29
944 ldo PT_FR31(%r29),%r1
945 rest_fp %r1
946 rest_general %r29
947
896a3756
GG
948 /* inverse of virt_map */
949 pcxt_ssm_bug
950 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
1da177e4 951 tophys_r1 %r29
1da177e4
LT
952
953 /* Restore space id's and special cr's from PT_REGS
896a3756
GG
954 * structure pointed to by r29
955 */
1da177e4
LT
956 rest_specials %r29
957
896a3756
GG
958 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
959 * It also restores r1 and r30.
960 */
1da177e4
LT
961 rest_stack
962
963 rfi
964 nop
1da177e4 965
50a34dbd
KM
966#ifndef CONFIG_PREEMPT
967# define intr_do_preempt intr_restore
968#endif /* !CONFIG_PREEMPT */
969
1da177e4
LT
970 .import schedule,code
971intr_do_resched:
50a34dbd
KM
972 /* Only call schedule on return to userspace. If we're returning
973 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
974 * we jump back to intr_restore.
975 */
1da177e4 976 LDREG PT_IASQ0(%r16), %r20
872f6deb 977 cmpib,COND(=) 0, %r20, intr_do_preempt
1da177e4
LT
978 nop
979 LDREG PT_IASQ1(%r16), %r20
872f6deb 980 cmpib,COND(=) 0, %r20, intr_do_preempt
1da177e4
LT
981 nop
982
413059f2 983#ifdef CONFIG_64BIT
1da177e4
LT
984 ldo -16(%r30),%r29 /* Reference param save area */
985#endif
986
987 ldil L%intr_check_sig, %r2
99ac7947 988#ifndef CONFIG_64BIT
1da177e4 989 b schedule
99ac7947
RC
990#else
991 load32 schedule, %r20
992 bv %r0(%r20)
993#endif
1da177e4
LT
994 ldo R%intr_check_sig(%r2), %r2
995
50a34dbd
KM
996 /* preempt the current task on returning to kernel
997 * mode from an interrupt, iff need_resched is set,
998 * and preempt_count is 0. otherwise, we continue on
999 * our merry way back to the current running task.
1000 */
1001#ifdef CONFIG_PREEMPT
1002 .import preempt_schedule_irq,code
1003intr_do_preempt:
1004 rsm PSW_SM_I, %r0 /* disable interrupts */
1005
1006 /* current_thread_info()->preempt_count */
1007 mfctl %cr30, %r1
1008 LDREG TI_PRE_COUNT(%r1), %r19
872f6deb 1009 cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */
50a34dbd
KM
1010 nop /* prev insn branched backwards */
1011
1012 /* check if we interrupted a critical path */
1013 LDREG PT_PSW(%r16), %r20
1014 bb,<,n %r20, 31 - PSW_SM_I, intr_restore
1015 nop
1016
1017 BL preempt_schedule_irq, %r2
1018 nop
1019
9c2c5457 1020 b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
50a34dbd 1021#endif /* CONFIG_PREEMPT */
1da177e4 1022
1da177e4
LT
1023 /*
1024 * External interrupts.
1025 */
1026
1027intr_extint:
872f6deb 1028 cmpib,COND(=),n 0,%r16,1f
6cc4525d 1029
1da177e4 1030 get_stack_use_cr30
6cc4525d 1031 b,n 2f
1da177e4
LT
1032
10331:
1da177e4 1034 get_stack_use_r30
6cc4525d 10352:
1da177e4
LT
1036 save_specials %r29
1037 virt_map
1038 save_general %r29
1039
1040 ldo PT_FR0(%r29), %r24
1041 save_fp %r24
1042
1043 loadgp
1044
1045 copy %r29, %r26 /* arg0 is pt_regs */
1046 copy %r29, %r16 /* save pt_regs */
1047
1048 ldil L%intr_return, %r2
1049
413059f2 1050#ifdef CONFIG_64BIT
1da177e4
LT
1051 ldo -16(%r30),%r29 /* Reference param save area */
1052#endif
1053
1054 b do_cpu_irq_mask
1055 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
c5e76552 1056ENDPROC(syscall_exit_rfi)
1da177e4
LT
1057
1058
1059 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1060
c5e76552 1061ENTRY(intr_save) /* for os_hpmc */
1da177e4 1062 mfsp %sr7,%r16
872f6deb 1063 cmpib,COND(=),n 0,%r16,1f
1da177e4
LT
1064 get_stack_use_cr30
1065 b 2f
1066 copy %r8,%r26
1067
10681:
1069 get_stack_use_r30
1070 copy %r8,%r26
1071
10722:
1073 save_specials %r29
1074
1075 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1076
1077 /*
1078 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1079 * traps.c.
1080 * 2) Once we start executing code above 4 Gb, we need
1081 * to adjust iasq/iaoq here in the same way we
1082 * adjust isr/ior below.
1083 */
1084
872f6deb 1085 cmpib,COND(=),n 6,%r26,skip_save_ior
1da177e4 1086
1da177e4
LT
1087
1088 mfctl %cr20, %r16 /* isr */
896a3756 1089 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1da177e4
LT
1090 mfctl %cr21, %r17 /* ior */
1091
896a3756 1092
413059f2 1093#ifdef CONFIG_64BIT
1da177e4
LT
1094 /*
1095 * If the interrupted code was running with W bit off (32 bit),
1096 * clear the b bits (bits 0 & 1) in the ior.
896a3756 1097 * save_specials left ipsw value in r8 for us to test.
1da177e4
LT
1098 */
1099 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1100 depdi 0,1,2,%r17
1101
1102 /*
1103 * FIXME: This code has hardwired assumptions about the split
1104 * between space bits and offset bits. This will change
1105 * when we allow alternate page sizes.
1106 */
1107
1108 /* adjust isr/ior. */
2fd83038
HD
1109 extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */
1110 depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */
1111 depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */
1da177e4
LT
1112#endif
1113 STREG %r16, PT_ISR(%r29)
1114 STREG %r17, PT_IOR(%r29)
1115
1116
1117skip_save_ior:
1118 virt_map
1119 save_general %r29
1120
1121 ldo PT_FR0(%r29), %r25
1122 save_fp %r25
1123
1124 loadgp
1125
1126 copy %r29, %r25 /* arg1 is pt_regs */
413059f2 1127#ifdef CONFIG_64BIT
1da177e4
LT
1128 ldo -16(%r30),%r29 /* Reference param save area */
1129#endif
1130
1131 ldil L%intr_check_sig, %r2
1132 copy %r25, %r16 /* save pt_regs */
1133
1134 b handle_interruption
1135 ldo R%intr_check_sig(%r2), %r2
c5e76552 1136ENDPROC(intr_save)
1da177e4
LT
1137
1138
1139 /*
1140 * Note for all tlb miss handlers:
1141 *
1142 * cr24 contains a pointer to the kernel address space
1143 * page directory.
1144 *
1145 * cr25 contains a pointer to the current user address
1146 * space page directory.
1147 *
1148 * sr3 will contain the space id of the user address space
1149 * of the current running thread while that thread is
1150 * running in the kernel.
1151 */
1152
1153 /*
1154 * register number allocations. Note that these are all
1155 * in the shadowed registers
1156 */
1157
1158 t0 = r1 /* temporary register 0 */
25985edc 1159 va = r8 /* virtual address for which the trap occurred */
1da177e4
LT
1160 t1 = r9 /* temporary register 1 */
1161 pte = r16 /* pte/phys page # */
1162 prot = r17 /* prot bits */
25985edc 1163 spc = r24 /* space for which the trap occurred */
1da177e4
LT
1164 ptp = r25 /* page directory/page table pointer */
1165
413059f2 1166#ifdef CONFIG_64BIT
1da177e4
LT
1167
1168dtlb_miss_20w:
1169 space_adjust spc,va,t0
1170 get_pgd spc,ptp
1171 space_check spc,t0,dtlb_fault
1172
1173 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1174
1175 update_ptep ptp,pte,t0,t1
1176
1177 make_insert_tlb spc,pte,prot
1178
1179 idtlbt pte,prot
1180
1181 rfir
1182 nop
1183
1184dtlb_check_alias_20w:
1185 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1186
1187 idtlbt pte,prot
1188
1189 rfir
1190 nop
1191
1192nadtlb_miss_20w:
1193 space_adjust spc,va,t0
1194 get_pgd spc,ptp
1195 space_check spc,t0,nadtlb_fault
1196
f311847c 1197 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1da177e4
LT
1198
1199 update_ptep ptp,pte,t0,t1
1200
1201 make_insert_tlb spc,pte,prot
1202
1203 idtlbt pte,prot
1204
1205 rfir
1206 nop
1207
f311847c 1208nadtlb_check_alias_20w:
8b4ae334 1209 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
1da177e4 1210
1da177e4
LT
1211 idtlbt pte,prot
1212
1213 rfir
1214 nop
1215
1216#else
1217
1218dtlb_miss_11:
1219 get_pgd spc,ptp
1220
1221 space_check spc,t0,dtlb_fault
1222
1223 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1224
1225 update_ptep ptp,pte,t0,t1
1226
1227 make_insert_tlb_11 spc,pte,prot
1228
1229 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1230 mtsp spc,%sr1
1231
1232 idtlba pte,(%sr1,va)
1233 idtlbp prot,(%sr1,va)
1234
1235 mtsp t0, %sr1 /* Restore sr1 */
1236
1237 rfir
1238 nop
1239
1240dtlb_check_alias_11:
f311847c 1241 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1da177e4
LT
1242
1243 idtlba pte,(va)
1244 idtlbp prot,(va)
1245
1246 rfir
1247 nop
1248
1249nadtlb_miss_11:
1250 get_pgd spc,ptp
1251
1252 space_check spc,t0,nadtlb_fault
1253
f311847c 1254 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1da177e4
LT
1255
1256 update_ptep ptp,pte,t0,t1
1257
1258 make_insert_tlb_11 spc,pte,prot
1259
1260
1261 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1262 mtsp spc,%sr1
1263
1264 idtlba pte,(%sr1,va)
1265 idtlbp prot,(%sr1,va)
1266
1267 mtsp t0, %sr1 /* Restore sr1 */
1268
1269 rfir
1270 nop
1271
f311847c 1272nadtlb_check_alias_11:
8b4ae334 1273 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
f311847c
JB
1274
1275 idtlba pte,(va)
1276 idtlbp prot,(va)
1277
1278 rfir
1279 nop
1280
1da177e4
LT
1281dtlb_miss_20:
1282 space_adjust spc,va,t0
1283 get_pgd spc,ptp
1284 space_check spc,t0,dtlb_fault
1285
1286 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1287
1288 update_ptep ptp,pte,t0,t1
1289
1290 make_insert_tlb spc,pte,prot
1291
1292 f_extend pte,t0
1293
1294 idtlbt pte,prot
1295
1296 rfir
1297 nop
1298
1299dtlb_check_alias_20:
1300 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1301
1302 idtlbt pte,prot
1303
1304 rfir
1305 nop
1306
1307nadtlb_miss_20:
1308 get_pgd spc,ptp
1309
1310 space_check spc,t0,nadtlb_fault
1311
f311847c 1312 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1da177e4
LT
1313
1314 update_ptep ptp,pte,t0,t1
1315
1316 make_insert_tlb spc,pte,prot
1317
1318 f_extend pte,t0
1319
1320 idtlbt pte,prot
1321
1322 rfir
1323 nop
1324
f311847c 1325nadtlb_check_alias_20:
8b4ae334 1326 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
f311847c
JB
1327
1328 idtlbt pte,prot
1329
1330 rfir
1331 nop
1332
1da177e4
LT
1333#endif
1334
1335nadtlb_emulate:
1336
1337 /*
1338 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1339 * probei instructions. We don't want to fault for these
1340 * instructions (not only does it not make sense, it can cause
1341 * deadlocks, since some flushes are done with the mmap
1342 * semaphore held). If the translation doesn't exist, we can't
1343 * insert a translation, so have to emulate the side effects
1344 * of the instruction. Since we don't insert a translation
1345 * we can get a lot of faults during a flush loop, so it makes
1346 * sense to try to do it here with minimum overhead. We only
1347 * emulate fdc,fic,pdc,probew,prober instructions whose base
1348 * and index registers are not shadowed. We defer everything
1349 * else to the "slow" path.
1350 */
1351
1352 mfctl %cr19,%r9 /* Get iir */
1353
1354 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1355 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1356
1357 /* Checks for fdc,fdce,pdc,"fic,4f" only */
1358 ldi 0x280,%r16
1359 and %r9,%r16,%r17
1360 cmpb,<>,n %r16,%r17,nadtlb_probe_check
1361 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
1362 BL get_register,%r25
1363 extrw,u %r9,15,5,%r8 /* Get index register # */
872f6deb 1364 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1da177e4
LT
1365 copy %r1,%r24
1366 BL get_register,%r25
1367 extrw,u %r9,10,5,%r8 /* Get base register # */
872f6deb 1368 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1da177e4
LT
1369 BL set_register,%r25
1370 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
1371
1372nadtlb_nullify:
896a3756 1373 mfctl %ipsw,%r8
1da177e4
LT
1374 ldil L%PSW_N,%r9
1375 or %r8,%r9,%r8 /* Set PSW_N */
896a3756 1376 mtctl %r8,%ipsw
1da177e4
LT
1377
1378 rfir
1379 nop
1380
1381 /*
1382 When there is no translation for the probe address then we
1383 must nullify the insn and return zero in the target regsiter.
1384 This will indicate to the calling code that it does not have
1385 write/read privileges to this address.
1386
1387 This should technically work for prober and probew in PA 1.1,
1388 and also probe,r and probe,w in PA 2.0
1389
1390 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1391 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1392
1393 */
1394nadtlb_probe_check:
1395 ldi 0x80,%r16
1396 and %r9,%r16,%r17
1397 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1398 BL get_register,%r25 /* Find the target register */
1399 extrw,u %r9,31,5,%r8 /* Get target register */
872f6deb 1400 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1da177e4
LT
1401 BL set_register,%r25
1402 copy %r0,%r1 /* Write zero to target register */
1403 b nadtlb_nullify /* Nullify return insn */
1404 nop
1405
1406
413059f2 1407#ifdef CONFIG_64BIT
1da177e4
LT
1408itlb_miss_20w:
1409
1410 /*
1411 * I miss is a little different, since we allow users to fault
1412 * on the gateway page which is in the kernel address space.
1413 */
1414
1415 space_adjust spc,va,t0
1416 get_pgd spc,ptp
1417 space_check spc,t0,itlb_fault
1418
1419 L3_ptep ptp,pte,t0,va,itlb_fault
1420
1421 update_ptep ptp,pte,t0,t1
1422
1423 make_insert_tlb spc,pte,prot
1424
1425 iitlbt pte,prot
1426
1427 rfir
1428 nop
1429
f311847c
JB
1430naitlb_miss_20w:
1431
1432 /*
1433 * I miss is a little different, since we allow users to fault
1434 * on the gateway page which is in the kernel address space.
1435 */
1436
1437 space_adjust spc,va,t0
1438 get_pgd spc,ptp
1439 space_check spc,t0,naitlb_fault
1440
1441 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1442
1443 update_ptep ptp,pte,t0,t1
1444
1445 make_insert_tlb spc,pte,prot
1446
1447 iitlbt pte,prot
1448
1449 rfir
1450 nop
1451
1452naitlb_check_alias_20w:
1453 do_alias spc,t0,t1,va,pte,prot,naitlb_fault
1454
1455 iitlbt pte,prot
1456
1457 rfir
1458 nop
1459
1da177e4
LT
1460#else
1461
1462itlb_miss_11:
1463 get_pgd spc,ptp
1464
1465 space_check spc,t0,itlb_fault
1466
1467 L2_ptep ptp,pte,t0,va,itlb_fault
1468
1469 update_ptep ptp,pte,t0,t1
1470
1471 make_insert_tlb_11 spc,pte,prot
1472
1473 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1474 mtsp spc,%sr1
1475
1476 iitlba pte,(%sr1,va)
1477 iitlbp prot,(%sr1,va)
1478
1479 mtsp t0, %sr1 /* Restore sr1 */
1480
1481 rfir
1482 nop
1483
f311847c
JB
1484naitlb_miss_11:
1485 get_pgd spc,ptp
1486
1487 space_check spc,t0,naitlb_fault
1488
1489 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1490
1491 update_ptep ptp,pte,t0,t1
1492
1493 make_insert_tlb_11 spc,pte,prot
1494
1495 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1496 mtsp spc,%sr1
1497
1498 iitlba pte,(%sr1,va)
1499 iitlbp prot,(%sr1,va)
1500
1501 mtsp t0, %sr1 /* Restore sr1 */
1502
1503 rfir
1504 nop
1505
1506naitlb_check_alias_11:
1507 do_alias spc,t0,t1,va,pte,prot,itlb_fault
1508
1509 iitlba pte,(%sr0, va)
1510 iitlbp prot,(%sr0, va)
1511
1512 rfir
1513 nop
1514
1515
1da177e4
LT
1516itlb_miss_20:
1517 get_pgd spc,ptp
1518
1519 space_check spc,t0,itlb_fault
1520
1521 L2_ptep ptp,pte,t0,va,itlb_fault
1522
1523 update_ptep ptp,pte,t0,t1
1524
1525 make_insert_tlb spc,pte,prot
1526
1527 f_extend pte,t0
1528
1529 iitlbt pte,prot
1530
1531 rfir
1532 nop
1533
f311847c
JB
1534naitlb_miss_20:
1535 get_pgd spc,ptp
1536
1537 space_check spc,t0,naitlb_fault
1538
1539 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1540
1541 update_ptep ptp,pte,t0,t1
1542
1543 make_insert_tlb spc,pte,prot
1544
1545 f_extend pte,t0
1546
1547 iitlbt pte,prot
1548
1549 rfir
1550 nop
1551
1552naitlb_check_alias_20:
1553 do_alias spc,t0,t1,va,pte,prot,naitlb_fault
1554
1555 iitlbt pte,prot
1556
1557 rfir
1558 nop
1559
1da177e4
LT
1560#endif
1561
413059f2 1562#ifdef CONFIG_64BIT
1da177e4
LT
1563
1564dbit_trap_20w:
1565 space_adjust spc,va,t0
1566 get_pgd spc,ptp
1567 space_check spc,t0,dbit_fault
1568
1569 L3_ptep ptp,pte,t0,va,dbit_fault
1570
1571#ifdef CONFIG_SMP
872f6deb 1572 cmpib,COND(=),n 0,spc,dbit_nolock_20w
1da177e4
LT
1573 load32 PA(pa_dbit_lock),t0
1574
1575dbit_spin_20w:
64f49532 1576 LDCW 0(t0),t1
872f6deb 1577 cmpib,COND(=) 0,t1,dbit_spin_20w
1da177e4
LT
1578 nop
1579
1580dbit_nolock_20w:
1581#endif
1582 update_dirty ptp,pte,t1
1583
1584 make_insert_tlb spc,pte,prot
1585
1586 idtlbt pte,prot
1587#ifdef CONFIG_SMP
872f6deb 1588 cmpib,COND(=),n 0,spc,dbit_nounlock_20w
1da177e4
LT
1589 ldi 1,t1
1590 stw t1,0(t0)
1591
1592dbit_nounlock_20w:
1593#endif
1594
1595 rfir
1596 nop
1597#else
1598
1599dbit_trap_11:
1600
1601 get_pgd spc,ptp
1602
1603 space_check spc,t0,dbit_fault
1604
1605 L2_ptep ptp,pte,t0,va,dbit_fault
1606
1607#ifdef CONFIG_SMP
872f6deb 1608 cmpib,COND(=),n 0,spc,dbit_nolock_11
1da177e4
LT
1609 load32 PA(pa_dbit_lock),t0
1610
1611dbit_spin_11:
64f49532 1612 LDCW 0(t0),t1
1da177e4
LT
1613 cmpib,= 0,t1,dbit_spin_11
1614 nop
1615
1616dbit_nolock_11:
1617#endif
1618 update_dirty ptp,pte,t1
1619
1620 make_insert_tlb_11 spc,pte,prot
1621
1622 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1623 mtsp spc,%sr1
1624
1625 idtlba pte,(%sr1,va)
1626 idtlbp prot,(%sr1,va)
1627
1628 mtsp t1, %sr1 /* Restore sr1 */
1629#ifdef CONFIG_SMP
872f6deb 1630 cmpib,COND(=),n 0,spc,dbit_nounlock_11
1da177e4
LT
1631 ldi 1,t1
1632 stw t1,0(t0)
1633
1634dbit_nounlock_11:
1635#endif
1636
1637 rfir
1638 nop
1639
1640dbit_trap_20:
1641 get_pgd spc,ptp
1642
1643 space_check spc,t0,dbit_fault
1644
1645 L2_ptep ptp,pte,t0,va,dbit_fault
1646
1647#ifdef CONFIG_SMP
872f6deb 1648 cmpib,COND(=),n 0,spc,dbit_nolock_20
1da177e4
LT
1649 load32 PA(pa_dbit_lock),t0
1650
1651dbit_spin_20:
64f49532 1652 LDCW 0(t0),t1
1da177e4
LT
1653 cmpib,= 0,t1,dbit_spin_20
1654 nop
1655
1656dbit_nolock_20:
1657#endif
1658 update_dirty ptp,pte,t1
1659
1660 make_insert_tlb spc,pte,prot
1661
1662 f_extend pte,t1
1663
1664 idtlbt pte,prot
1665
1666#ifdef CONFIG_SMP
872f6deb 1667 cmpib,COND(=),n 0,spc,dbit_nounlock_20
1da177e4
LT
1668 ldi 1,t1
1669 stw t1,0(t0)
1670
1671dbit_nounlock_20:
1672#endif
1673
1674 rfir
1675 nop
1676#endif
1677
1678 .import handle_interruption,code
1679
1680kernel_bad_space:
1681 b intr_save
1682 ldi 31,%r8 /* Use an unused code */
1683
1684dbit_fault:
1685 b intr_save
1686 ldi 20,%r8
1687
1688itlb_fault:
1689 b intr_save
1690 ldi 6,%r8
1691
1692nadtlb_fault:
1693 b intr_save
1694 ldi 17,%r8
1695
f311847c
JB
1696naitlb_fault:
1697 b intr_save
1698 ldi 16,%r8
1699
1da177e4
LT
1700dtlb_fault:
1701 b intr_save
1702 ldi 15,%r8
1703
1704 /* Register saving semantics for system calls:
1705
1706 %r1 clobbered by system call macro in userspace
1707 %r2 saved in PT_REGS by gateway page
1708 %r3 - %r18 preserved by C code (saved by signal code)
1709 %r19 - %r20 saved in PT_REGS by gateway page
1710 %r21 - %r22 non-standard syscall args
1711 stored in kernel stack by gateway page
1712 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1713 %r27 - %r30 saved in PT_REGS by gateway page
1714 %r31 syscall return pointer
1715 */
1716
1717 /* Floating point registers (FIXME: what do we do with these?)
1718
1719 %fr0 - %fr3 status/exception, not preserved
1720 %fr4 - %fr7 arguments
1721 %fr8 - %fr11 not preserved by C code
1722 %fr12 - %fr21 preserved by C code
1723 %fr22 - %fr31 not preserved by C code
1724 */
1725
1726 .macro reg_save regs
1727 STREG %r3, PT_GR3(\regs)
1728 STREG %r4, PT_GR4(\regs)
1729 STREG %r5, PT_GR5(\regs)
1730 STREG %r6, PT_GR6(\regs)
1731 STREG %r7, PT_GR7(\regs)
1732 STREG %r8, PT_GR8(\regs)
1733 STREG %r9, PT_GR9(\regs)
1734 STREG %r10,PT_GR10(\regs)
1735 STREG %r11,PT_GR11(\regs)
1736 STREG %r12,PT_GR12(\regs)
1737 STREG %r13,PT_GR13(\regs)
1738 STREG %r14,PT_GR14(\regs)
1739 STREG %r15,PT_GR15(\regs)
1740 STREG %r16,PT_GR16(\regs)
1741 STREG %r17,PT_GR17(\regs)
1742 STREG %r18,PT_GR18(\regs)
1743 .endm
1744
1745 .macro reg_restore regs
1746 LDREG PT_GR3(\regs), %r3
1747 LDREG PT_GR4(\regs), %r4
1748 LDREG PT_GR5(\regs), %r5
1749 LDREG PT_GR6(\regs), %r6
1750 LDREG PT_GR7(\regs), %r7
1751 LDREG PT_GR8(\regs), %r8
1752 LDREG PT_GR9(\regs), %r9
1753 LDREG PT_GR10(\regs),%r10
1754 LDREG PT_GR11(\regs),%r11
1755 LDREG PT_GR12(\regs),%r12
1756 LDREG PT_GR13(\regs),%r13
1757 LDREG PT_GR14(\regs),%r14
1758 LDREG PT_GR15(\regs),%r15
1759 LDREG PT_GR16(\regs),%r16
1760 LDREG PT_GR17(\regs),%r17
1761 LDREG PT_GR18(\regs),%r18
1762 .endm
1763
c5e76552 1764ENTRY(sys_fork_wrapper)
1da177e4
LT
1765 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1766 ldo TASK_REGS(%r1),%r1
1767 reg_save %r1
1768 mfctl %cr27, %r3
1769 STREG %r3, PT_CR27(%r1)
1770
1771 STREG %r2,-RP_OFFSET(%r30)
1772 ldo FRAME_SIZE(%r30),%r30
413059f2 1773#ifdef CONFIG_64BIT
1da177e4
LT
1774 ldo -16(%r30),%r29 /* Reference param save area */
1775#endif
1776
1777 /* These are call-clobbered registers and therefore
1778 also syscall-clobbered (we hope). */
1779 STREG %r2,PT_GR19(%r1) /* save for child */
1780 STREG %r30,PT_GR21(%r1)
1781
1782 LDREG PT_GR30(%r1),%r25
1783 copy %r1,%r24
1784 BL sys_clone,%r2
1785 ldi SIGCHLD,%r26
1786
1787 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1788wrapper_exit:
1789 ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */
1790 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1791 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1792
1793 LDREG PT_CR27(%r1), %r3
1794 mtctl %r3, %cr27
1795 reg_restore %r1
1796
1797 /* strace expects syscall # to be preserved in r20 */
1798 ldi __NR_fork,%r20
1799 bv %r0(%r2)
1800 STREG %r20,PT_GR20(%r1)
c5e76552 1801ENDPROC(sys_fork_wrapper)
1da177e4
LT
1802
1803 /* Set the return value for the child */
c5e76552 1804ENTRY(child_return)
1da177e4
LT
1805 BL schedule_tail, %r2
1806 nop
1807
1808 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
1809 LDREG TASK_PT_GR19(%r1),%r2
1810 b wrapper_exit
1811 copy %r0,%r28
c5e76552 1812ENDPROC(child_return)
1da177e4 1813
c5e76552
HD
1814
1815ENTRY(sys_clone_wrapper)
1da177e4
LT
1816 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1817 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1818 reg_save %r1
1819 mfctl %cr27, %r3
1820 STREG %r3, PT_CR27(%r1)
1821
1822 STREG %r2,-RP_OFFSET(%r30)
1823 ldo FRAME_SIZE(%r30),%r30
413059f2 1824#ifdef CONFIG_64BIT
1da177e4
LT
1825 ldo -16(%r30),%r29 /* Reference param save area */
1826#endif
1827
aa0eecb0 1828 /* WARNING - Clobbers r19 and r21, userspace must save these! */
1da177e4
LT
1829 STREG %r2,PT_GR19(%r1) /* save for child */
1830 STREG %r30,PT_GR21(%r1)
1831 BL sys_clone,%r2
1832 copy %r1,%r24
1833
1834 b wrapper_exit
1835 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
c5e76552 1836ENDPROC(sys_clone_wrapper)
1da177e4 1837
c5e76552
HD
1838
1839ENTRY(sys_vfork_wrapper)
1da177e4
LT
1840 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1841 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1842 reg_save %r1
1843 mfctl %cr27, %r3
1844 STREG %r3, PT_CR27(%r1)
1845
1846 STREG %r2,-RP_OFFSET(%r30)
1847 ldo FRAME_SIZE(%r30),%r30
413059f2 1848#ifdef CONFIG_64BIT
1da177e4
LT
1849 ldo -16(%r30),%r29 /* Reference param save area */
1850#endif
1851
1852 STREG %r2,PT_GR19(%r1) /* save for child */
1853 STREG %r30,PT_GR21(%r1)
1854
1855 BL sys_vfork,%r2
1856 copy %r1,%r26
1857
1858 b wrapper_exit
1859 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
c5e76552 1860ENDPROC(sys_vfork_wrapper)
1da177e4
LT
1861
1862
1863 .macro execve_wrapper execve
1864 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1865 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1866
1867 /*
1868 * Do we need to save/restore r3-r18 here?
1869 * I don't think so. why would new thread need old
1870 * threads registers?
1871 */
1872
1873 /* %arg0 - %arg3 are already saved for us. */
1874
1875 STREG %r2,-RP_OFFSET(%r30)
1876 ldo FRAME_SIZE(%r30),%r30
413059f2 1877#ifdef CONFIG_64BIT
1da177e4
LT
1878 ldo -16(%r30),%r29 /* Reference param save area */
1879#endif
99ac7947 1880 BL \execve,%r2
1da177e4
LT
1881 copy %r1,%arg0
1882
1883 ldo -FRAME_SIZE(%r30),%r30
1884 LDREG -RP_OFFSET(%r30),%r2
1885
1886 /* If exec succeeded we need to load the args */
1887
1888 ldo -1024(%r0),%r1
1889 cmpb,>>= %r28,%r1,error_\execve
1890 copy %r2,%r19
1891
1892error_\execve:
1893 bv %r0(%r19)
1894 nop
1895 .endm
1896
1da177e4 1897 .import sys_execve
c5e76552 1898ENTRY(sys_execve_wrapper)
1da177e4 1899 execve_wrapper sys_execve
c5e76552 1900ENDPROC(sys_execve_wrapper)
1da177e4 1901
413059f2 1902#ifdef CONFIG_64BIT
1da177e4 1903 .import sys32_execve
c5e76552 1904ENTRY(sys32_execve_wrapper)
1da177e4 1905 execve_wrapper sys32_execve
c5e76552 1906ENDPROC(sys32_execve_wrapper)
1da177e4
LT
1907#endif
1908
c5e76552 1909ENTRY(sys_rt_sigreturn_wrapper)
1da177e4
LT
1910 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1911 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1912 /* Don't save regs, we are going to restore them from sigcontext. */
1913 STREG %r2, -RP_OFFSET(%r30)
413059f2 1914#ifdef CONFIG_64BIT
1da177e4
LT
1915 ldo FRAME_SIZE(%r30), %r30
1916 BL sys_rt_sigreturn,%r2
1917 ldo -16(%r30),%r29 /* Reference param save area */
1918#else
1919 BL sys_rt_sigreturn,%r2
1920 ldo FRAME_SIZE(%r30), %r30
1921#endif
1922
1923 ldo -FRAME_SIZE(%r30), %r30
1924 LDREG -RP_OFFSET(%r30), %r2
1925
1926 /* FIXME: I think we need to restore a few more things here. */
1927 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1928 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1929 reg_restore %r1
1930
1931 /* If the signal was received while the process was blocked on a
1932 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1933 * take us to syscall_exit_rfi and on to intr_return.
1934 */
1935 bv %r0(%r2)
1936 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
c5e76552 1937ENDPROC(sys_rt_sigreturn_wrapper)
1da177e4 1938
c5e76552 1939ENTRY(sys_sigaltstack_wrapper)
1da177e4
LT
1940 /* Get the user stack pointer */
1941 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1942 ldo TASK_REGS(%r1),%r24 /* get pt regs */
1943 LDREG TASK_PT_GR30(%r24),%r24
1944 STREG %r2, -RP_OFFSET(%r30)
413059f2 1945#ifdef CONFIG_64BIT
1da177e4 1946 ldo FRAME_SIZE(%r30), %r30
df47b438 1947 BL do_sigaltstack,%r2
1da177e4
LT
1948 ldo -16(%r30),%r29 /* Reference param save area */
1949#else
df47b438 1950 BL do_sigaltstack,%r2
1da177e4
LT
1951 ldo FRAME_SIZE(%r30), %r30
1952#endif
1953
1954 ldo -FRAME_SIZE(%r30), %r30
1955 LDREG -RP_OFFSET(%r30), %r2
1956 bv %r0(%r2)
1957 nop
c5e76552 1958ENDPROC(sys_sigaltstack_wrapper)
1da177e4 1959
413059f2 1960#ifdef CONFIG_64BIT
c5e76552 1961ENTRY(sys32_sigaltstack_wrapper)
1da177e4
LT
1962 /* Get the user stack pointer */
1963 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
1964 LDREG TASK_PT_GR30(%r24),%r24
1965 STREG %r2, -RP_OFFSET(%r30)
1966 ldo FRAME_SIZE(%r30), %r30
df47b438 1967 BL do_sigaltstack32,%r2
1da177e4
LT
1968 ldo -16(%r30),%r29 /* Reference param save area */
1969
1970 ldo -FRAME_SIZE(%r30), %r30
1971 LDREG -RP_OFFSET(%r30), %r2
1972 bv %r0(%r2)
1973 nop
c5e76552 1974ENDPROC(sys32_sigaltstack_wrapper)
1da177e4
LT
1975#endif
1976
c5e76552 1977ENTRY(syscall_exit)
1da177e4
LT
1978 /* NOTE: HP-UX syscalls also come through here
1979 * after hpux_syscall_exit fixes up return
1980 * values. */
1981
1982 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
1983 * via syscall_exit_rfi if the signal was received while the process
1984 * was running.
1985 */
1986
1987 /* save return value now */
1988
1989 mfctl %cr30, %r1
1990 LDREG TI_TASK(%r1),%r1
1991 STREG %r28,TASK_PT_GR28(%r1)
1992
1993#ifdef CONFIG_HPUX
1da177e4
LT
1994/* <linux/personality.h> cannot be easily included */
1995#define PER_HPUX 0x10
376e210b 1996 ldw TASK_PERSONALITY(%r1),%r19
1da177e4
LT
1997
1998 /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
1999 ldo -PER_HPUX(%r19), %r19
872f6deb 2000 cmpib,COND(<>),n 0,%r19,1f
1da177e4
LT
2001
2002 /* Save other hpux returns if personality is PER_HPUX */
2003 STREG %r22,TASK_PT_GR22(%r1)
2004 STREG %r29,TASK_PT_GR29(%r1)
20051:
2006
2007#endif /* CONFIG_HPUX */
2008
2009 /* Seems to me that dp could be wrong here, if the syscall involved
2010 * calling a module, and nothing got round to restoring dp on return.
2011 */
2012 loadgp
2013
1da177e4
LT
2014syscall_check_resched:
2015
2016 /* check for reschedule */
2017
2018 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
2019 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
2020
4650f0a5 2021 .import do_signal,code
1da177e4 2022syscall_check_sig:
4650f0a5 2023 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
3fe4c55e 2024 ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26
4650f0a5
KM
2025 and,COND(<>) %r19, %r26, %r0
2026 b,n syscall_restore /* skip past if we've nothing to do */
2027
2028syscall_do_signal:
2029 /* Save callee-save registers (for sigcontext).
2030 * FIXME: After this point the process structure should be
2031 * consistent with all the relevant state of the process
2032 * before the syscall. We need to verify this.
2033 */
2034 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2035 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
2036 reg_save %r26
2037
2038#ifdef CONFIG_64BIT
2039 ldo -16(%r30),%r29 /* Reference param save area */
2040#endif
2041
2042 BL do_notify_resume,%r2
2043 ldi 1, %r25 /* long in_syscall = 1 */
2044
2045 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2046 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
2047 reg_restore %r20
2048
2049 b,n syscall_check_sig
1da177e4
LT
2050
2051syscall_restore:
1da177e4
LT
2052 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2053
ecd3d4bc
KM
2054 /* Are we being ptraced? */
2055 ldw TASK_FLAGS(%r1),%r19
2056 ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2
2057 and,COND(=) %r19,%r2,%r0
2058 b,n syscall_restore_rfi
1da177e4
LT
2059
2060 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
2061 rest_fp %r19
2062
2063 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
2064 mtsar %r19
2065
2066 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
2067 LDREG TASK_PT_GR19(%r1),%r19
2068 LDREG TASK_PT_GR20(%r1),%r20
2069 LDREG TASK_PT_GR21(%r1),%r21
2070 LDREG TASK_PT_GR22(%r1),%r22
2071 LDREG TASK_PT_GR23(%r1),%r23
2072 LDREG TASK_PT_GR24(%r1),%r24
2073 LDREG TASK_PT_GR25(%r1),%r25
2074 LDREG TASK_PT_GR26(%r1),%r26
2075 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
2076 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
2077 LDREG TASK_PT_GR29(%r1),%r29
2078 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
2079
2080 /* NOTE: We use rsm/ssm pair to make this operation atomic */
8f6c0c2b 2081 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
1da177e4 2082 rsm PSW_SM_I, %r0
8f6c0c2b
JDA
2083 copy %r1,%r30 /* Restore user sp */
2084 mfsp %sr3,%r1 /* Get user space id */
1da177e4
LT
2085 mtsp %r1,%sr7 /* Restore sr7 */
2086 ssm PSW_SM_I, %r0
2087
2088 /* Set sr2 to zero for userspace syscalls to work. */
2089 mtsp %r0,%sr2
2090 mtsp %r1,%sr4 /* Restore sr4 */
2091 mtsp %r1,%sr5 /* Restore sr5 */
2092 mtsp %r1,%sr6 /* Restore sr6 */
2093
2094 depi 3,31,2,%r31 /* ensure return to user mode. */
2095
413059f2 2096#ifdef CONFIG_64BIT
1da177e4
LT
2097 /* decide whether to reset the wide mode bit
2098 *
2099 * For a syscall, the W bit is stored in the lowest bit
2100 * of sp. Extract it and reset W if it is zero */
2101 extrd,u,*<> %r30,63,1,%r1
2102 rsm PSW_SM_W, %r0
2103 /* now reset the lowest bit of sp if it was set */
2104 xor %r30,%r1,%r30
2105#endif
2106 be,n 0(%sr3,%r31) /* return to user space */
2107
2108 /* We have to return via an RFI, so that PSW T and R bits can be set
2109 * appropriately.
2110 * This sets up pt_regs so we can return via intr_restore, which is not
2111 * the most efficient way of doing things, but it works.
2112 */
2113syscall_restore_rfi:
2114 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
2115 mtctl %r2,%cr0 /* for immediate trap */
2116 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
2117 ldi 0x0b,%r20 /* Create new PSW */
2118 depi -1,13,1,%r20 /* C, Q, D, and I bits */
2119
ecd3d4bc
KM
2120 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
2121 * set in thread_info.h and converted to PA bitmap
1da177e4
LT
2122 * numbers in asm-offsets.c */
2123
ecd3d4bc
KM
2124 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
2125 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1da177e4
LT
2126 depi -1,27,1,%r20 /* R bit */
2127
ecd3d4bc
KM
2128 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
2129 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1da177e4
LT
2130 depi -1,7,1,%r20 /* T bit */
2131
2132 STREG %r20,TASK_PT_PSW(%r1)
2133
2134 /* Always store space registers, since sr3 can be changed (e.g. fork) */
2135
2136 mfsp %sr3,%r25
2137 STREG %r25,TASK_PT_SR3(%r1)
2138 STREG %r25,TASK_PT_SR4(%r1)
2139 STREG %r25,TASK_PT_SR5(%r1)
2140 STREG %r25,TASK_PT_SR6(%r1)
2141 STREG %r25,TASK_PT_SR7(%r1)
2142 STREG %r25,TASK_PT_IASQ0(%r1)
2143 STREG %r25,TASK_PT_IASQ1(%r1)
2144
2145 /* XXX W bit??? */
2146 /* Now if old D bit is clear, it means we didn't save all registers
2147 * on syscall entry, so do that now. This only happens on TRACEME
2148 * calls, or if someone attached to us while we were on a syscall.
2149 * We could make this more efficient by not saving r3-r18, but
2150 * then we wouldn't be able to use the common intr_restore path.
2151 * It is only for traced processes anyway, so performance is not
2152 * an issue.
2153 */
2154 bb,< %r2,30,pt_regs_ok /* Branch if D set */
2155 ldo TASK_REGS(%r1),%r25
2156 reg_save %r25 /* Save r3 to r18 */
2157
2158 /* Save the current sr */
2159 mfsp %sr0,%r2
2160 STREG %r2,TASK_PT_SR0(%r1)
2161
2162 /* Save the scratch sr */
2163 mfsp %sr1,%r2
2164 STREG %r2,TASK_PT_SR1(%r1)
2165
2166 /* sr2 should be set to zero for userspace syscalls */
2167 STREG %r0,TASK_PT_SR2(%r1)
2168
2169pt_regs_ok:
2170 LDREG TASK_PT_GR31(%r1),%r2
2171 depi 3,31,2,%r2 /* ensure return to user mode. */
2172 STREG %r2,TASK_PT_IAOQ0(%r1)
2173 ldo 4(%r2),%r2
2174 STREG %r2,TASK_PT_IAOQ1(%r1)
2175 copy %r25,%r16
2176 b intr_restore
2177 nop
2178
1da177e4
LT
2179 .import schedule,code
2180syscall_do_resched:
2181 BL schedule,%r2
413059f2 2182#ifdef CONFIG_64BIT
1da177e4
LT
2183 ldo -16(%r30),%r29 /* Reference param save area */
2184#else
2185 nop
2186#endif
72738a96 2187 b syscall_check_resched /* if resched, we start over again */
1da177e4 2188 nop
c5e76552 2189ENDPROC(syscall_exit)
1da177e4 2190
c5e76552 2191
d75f054a
HD
2192#ifdef CONFIG_FUNCTION_TRACER
2193 .import ftrace_function_trampoline,code
2194ENTRY(_mcount)
2195 copy %r3, %arg2
2196 b ftrace_function_trampoline
2197 nop
2198ENDPROC(_mcount)
2199
2200ENTRY(return_to_handler)
2201 load32 return_trampoline, %rp
2202 copy %ret0, %arg0
2203 copy %ret1, %arg1
2204 b ftrace_return_to_handler
2205 nop
2206return_trampoline:
2207 copy %ret0, %rp
2208 copy %r23, %ret0
2209 copy %r24, %ret1
2210
2211.globl ftrace_stub
2212ftrace_stub:
2213 bv %r0(%rp)
2214 nop
2215ENDPROC(return_to_handler)
2216#endif /* CONFIG_FUNCTION_TRACER */
2217
2218
bcc0e04c 2219get_register:
1da177e4
LT
2220 /*
2221 * get_register is used by the non access tlb miss handlers to
2222 * copy the value of the general register specified in r8 into
2223 * r1. This routine can't be used for shadowed registers, since
2224 * the rfir will restore the original value. So, for the shadowed
2225 * registers we put a -1 into r1 to indicate that the register
2226 * should not be used (the register being copied could also have
2227 * a -1 in it, but that is OK, it just means that we will have
2228 * to use the slow path instead).
2229 */
1da177e4
LT
2230 blr %r8,%r0
2231 nop
2232 bv %r0(%r25) /* r0 */
2233 copy %r0,%r1
2234 bv %r0(%r25) /* r1 - shadowed */
2235 ldi -1,%r1
2236 bv %r0(%r25) /* r2 */
2237 copy %r2,%r1
2238 bv %r0(%r25) /* r3 */
2239 copy %r3,%r1
2240 bv %r0(%r25) /* r4 */
2241 copy %r4,%r1
2242 bv %r0(%r25) /* r5 */
2243 copy %r5,%r1
2244 bv %r0(%r25) /* r6 */
2245 copy %r6,%r1
2246 bv %r0(%r25) /* r7 */
2247 copy %r7,%r1
2248 bv %r0(%r25) /* r8 - shadowed */
2249 ldi -1,%r1
2250 bv %r0(%r25) /* r9 - shadowed */
2251 ldi -1,%r1
2252 bv %r0(%r25) /* r10 */
2253 copy %r10,%r1
2254 bv %r0(%r25) /* r11 */
2255 copy %r11,%r1
2256 bv %r0(%r25) /* r12 */
2257 copy %r12,%r1
2258 bv %r0(%r25) /* r13 */
2259 copy %r13,%r1
2260 bv %r0(%r25) /* r14 */
2261 copy %r14,%r1
2262 bv %r0(%r25) /* r15 */
2263 copy %r15,%r1
2264 bv %r0(%r25) /* r16 - shadowed */
2265 ldi -1,%r1
2266 bv %r0(%r25) /* r17 - shadowed */
2267 ldi -1,%r1
2268 bv %r0(%r25) /* r18 */
2269 copy %r18,%r1
2270 bv %r0(%r25) /* r19 */
2271 copy %r19,%r1
2272 bv %r0(%r25) /* r20 */
2273 copy %r20,%r1
2274 bv %r0(%r25) /* r21 */
2275 copy %r21,%r1
2276 bv %r0(%r25) /* r22 */
2277 copy %r22,%r1
2278 bv %r0(%r25) /* r23 */
2279 copy %r23,%r1
2280 bv %r0(%r25) /* r24 - shadowed */
2281 ldi -1,%r1
2282 bv %r0(%r25) /* r25 - shadowed */
2283 ldi -1,%r1
2284 bv %r0(%r25) /* r26 */
2285 copy %r26,%r1
2286 bv %r0(%r25) /* r27 */
2287 copy %r27,%r1
2288 bv %r0(%r25) /* r28 */
2289 copy %r28,%r1
2290 bv %r0(%r25) /* r29 */
2291 copy %r29,%r1
2292 bv %r0(%r25) /* r30 */
2293 copy %r30,%r1
2294 bv %r0(%r25) /* r31 */
2295 copy %r31,%r1
2296
c5e76552 2297
bcc0e04c 2298set_register:
1da177e4
LT
2299 /*
2300 * set_register is used by the non access tlb miss handlers to
2301 * copy the value of r1 into the general register specified in
2302 * r8.
2303 */
1da177e4
LT
2304 blr %r8,%r0
2305 nop
2306 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2307 copy %r1,%r0
2308 bv %r0(%r25) /* r1 */
2309 copy %r1,%r1
2310 bv %r0(%r25) /* r2 */
2311 copy %r1,%r2
2312 bv %r0(%r25) /* r3 */
2313 copy %r1,%r3
2314 bv %r0(%r25) /* r4 */
2315 copy %r1,%r4
2316 bv %r0(%r25) /* r5 */
2317 copy %r1,%r5
2318 bv %r0(%r25) /* r6 */
2319 copy %r1,%r6
2320 bv %r0(%r25) /* r7 */
2321 copy %r1,%r7
2322 bv %r0(%r25) /* r8 */
2323 copy %r1,%r8
2324 bv %r0(%r25) /* r9 */
2325 copy %r1,%r9
2326 bv %r0(%r25) /* r10 */
2327 copy %r1,%r10
2328 bv %r0(%r25) /* r11 */
2329 copy %r1,%r11
2330 bv %r0(%r25) /* r12 */
2331 copy %r1,%r12
2332 bv %r0(%r25) /* r13 */
2333 copy %r1,%r13
2334 bv %r0(%r25) /* r14 */
2335 copy %r1,%r14
2336 bv %r0(%r25) /* r15 */
2337 copy %r1,%r15
2338 bv %r0(%r25) /* r16 */
2339 copy %r1,%r16
2340 bv %r0(%r25) /* r17 */
2341 copy %r1,%r17
2342 bv %r0(%r25) /* r18 */
2343 copy %r1,%r18
2344 bv %r0(%r25) /* r19 */
2345 copy %r1,%r19
2346 bv %r0(%r25) /* r20 */
2347 copy %r1,%r20
2348 bv %r0(%r25) /* r21 */
2349 copy %r1,%r21
2350 bv %r0(%r25) /* r22 */
2351 copy %r1,%r22
2352 bv %r0(%r25) /* r23 */
2353 copy %r1,%r23
2354 bv %r0(%r25) /* r24 */
2355 copy %r1,%r24
2356 bv %r0(%r25) /* r25 */
2357 copy %r1,%r25
2358 bv %r0(%r25) /* r26 */
2359 copy %r1,%r26
2360 bv %r0(%r25) /* r27 */
2361 copy %r1,%r27
2362 bv %r0(%r25) /* r28 */
2363 copy %r1,%r28
2364 bv %r0(%r25) /* r29 */
2365 copy %r1,%r29
2366 bv %r0(%r25) /* r30 */
2367 copy %r1,%r30
2368 bv %r0(%r25) /* r31 */
2369 copy %r1,%r31
c5e76552 2370