Merge tag 'csky-for-linus-4.20-fixup-dtb' of https://github.com/c-sky/csky-linux
[linux-block.git] / arch / parisc / kernel / entry.S
CommitLineData
1da177e4
LT
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 * Copyright (C) 1999,2000 Philipp Rumpf
6 * Copyright (C) 1999 SuSE GmbH Nuernberg
7 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
0013a854 25#include <asm/asm-offsets.h>
1da177e4
LT
26
27/* we have the following possibilities to act on an interruption:
28 * - handle in assembly and use shadowed registers only
29 * - save registers to kernel stack and handle in assembly or C */
30
31
896a3756 32#include <asm/psw.h>
3d73cf5e 33#include <asm/cache.h> /* for L1_CACHE_SHIFT */
1da177e4
LT
34#include <asm/assembly.h> /* for LDREG/STREG defines */
35#include <asm/pgtable.h>
1da177e4
LT
36#include <asm/signal.h>
37#include <asm/unistd.h>
88776c0e 38#include <asm/ldcw.h>
5b00ca0b 39#include <asm/traps.h>
1da177e4 40#include <asm/thread_info.h>
3847dab7 41#include <asm/alternative.h>
1da177e4 42
c5e76552
HD
43#include <linux/linkage.h>
44
413059f2 45#ifdef CONFIG_64BIT
1da177e4
LT
46 .level 2.0w
47#else
1da177e4
LT
48 .level 2.0
49#endif
50
01ab6057 51 .import pa_tlb_lock,data
88776c0e
HD
52 .macro load_pa_tlb_lock reg
53#if __PA_LDCW_ALIGNMENT > 4
54 load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
55 depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg
56#else
57 load32 PA(pa_tlb_lock), \reg
58#endif
59 .endm
1da177e4
LT
60
61 /* space_to_prot macro creates a prot id from a space id */
62
63#if (SPACEID_SHIFT) == 0
64 .macro space_to_prot spc prot
65 depd,z \spc,62,31,\prot
66 .endm
67#else
68 .macro space_to_prot spc prot
69 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
70 .endm
71#endif
72
73 /* Switch to virtual mapping, trashing only %r1 */
74 .macro virt_map
896a3756
GG
75 /* pcxt_ssm_bug */
76 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
1da177e4
LT
77 mtsp %r0, %sr4
78 mtsp %r0, %sr5
b63a2bbc 79 mtsp %r0, %sr6
896a3756
GG
80 tovirt_r1 %r29
81 load32 KERNEL_PSW, %r1
82
83 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
1da177e4
LT
84 mtctl %r0, %cr17 /* Clear IIASQ tail */
85 mtctl %r0, %cr17 /* Clear IIASQ head */
896a3756 86 mtctl %r1, %ipsw
1da177e4
LT
87 load32 4f, %r1
88 mtctl %r1, %cr18 /* Set IIAOQ tail */
89 ldo 4(%r1), %r1
90 mtctl %r1, %cr18 /* Set IIAOQ head */
91 rfir
92 nop
934:
94 .endm
95
96 /*
97 * The "get_stack" macros are responsible for determining the
98 * kernel stack value.
99 *
1da177e4
LT
100 * If sr7 == 0
101 * Already using a kernel stack, so call the
102 * get_stack_use_r30 macro to push a pt_regs structure
103 * on the stack, and store registers there.
104 * else
105 * Need to set up a kernel stack, so call the
106 * get_stack_use_cr30 macro to set up a pointer
107 * to the pt_regs structure contained within the
108 * task pointer pointed to by cr30. Set the stack
109 * pointer to point to the end of the task structure.
110 *
1da177e4
LT
111 * Note that we use shadowed registers for temps until
112 * we can save %r26 and %r29. %r26 is used to preserve
113 * %r8 (a shadowed register) which temporarily contained
114 * either the fault type ("code") or the eirr. We need
115 * to use a non-shadowed register to carry the value over
116 * the rfir in virt_map. We use %r26 since this value winds
117 * up being passed as the argument to either do_cpu_irq_mask
118 * or handle_interruption. %r29 is used to hold a pointer
119 * the register save area, and once again, it needs to
120 * be a non-shadowed register so that it survives the rfir.
121 *
122 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
123 */
124
125 .macro get_stack_use_cr30
126
127 /* we save the registers in the task struct */
128
b63a2bbc 129 copy %r30, %r17
1da177e4 130 mfctl %cr30, %r1
b63a2bbc
JDA
131 ldo THREAD_SZ_ALGN(%r1), %r30
132 mtsp %r0,%sr7
133 mtsp %r16,%sr3
1da177e4
LT
134 tophys %r1,%r9
135 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
136 tophys %r1,%r9
137 ldo TASK_REGS(%r9),%r9
b63a2bbc 138 STREG %r17,PT_GR30(%r9)
1da177e4
LT
139 STREG %r29,PT_GR29(%r9)
140 STREG %r26,PT_GR26(%r9)
b63a2bbc 141 STREG %r16,PT_SR7(%r9)
1da177e4 142 copy %r9,%r29
1da177e4
LT
143 .endm
144
145 .macro get_stack_use_r30
146
147 /* we put a struct pt_regs on the stack and save the registers there */
148
149 tophys %r30,%r9
b63a2bbc 150 copy %r30,%r1
1da177e4 151 ldo PT_SZ_ALGN(%r30),%r30
b63a2bbc 152 STREG %r1,PT_GR30(%r9)
1da177e4
LT
153 STREG %r29,PT_GR29(%r9)
154 STREG %r26,PT_GR26(%r9)
b63a2bbc 155 STREG %r16,PT_SR7(%r9)
1da177e4
LT
156 copy %r9,%r29
157 .endm
158
159 .macro rest_stack
160 LDREG PT_GR1(%r29), %r1
161 LDREG PT_GR30(%r29),%r30
162 LDREG PT_GR29(%r29),%r29
163 .endm
164
165 /* default interruption handler
166 * (calls traps.c:handle_interruption) */
167 .macro def code
168 b intr_save
169 ldi \code, %r8
170 .align 32
171 .endm
172
173 /* Interrupt interruption handler
174 * (calls irq.c:do_cpu_irq_mask) */
175 .macro extint code
176 b intr_extint
177 mfsp %sr7,%r16
178 .align 32
179 .endm
180
181 .import os_hpmc, code
182
183 /* HPMC handler */
184 .macro hpmc code
185 nop /* must be a NOP, will be patched later */
186 load32 PA(os_hpmc), %r3
187 bv,n 0(%r3)
188 nop
189 .word 0 /* checksum (will be patched) */
1138b671 190 .word 0 /* address of handler */
1da177e4
LT
191 .word 0 /* length of handler */
192 .endm
193
194 /*
195 * Performance Note: Instructions will be moved up into
196 * this part of the code later on, once we are sure
197 * that the tlb miss handlers are close to final form.
198 */
199
200 /* Register definitions for tlb miss handler macros */
201
25985edc
LDM
202 va = r8 /* virtual address for which the trap occurred */
203 spc = r24 /* space for which the trap occurred */
1da177e4 204
413059f2 205#ifndef CONFIG_64BIT
1da177e4
LT
206
207 /*
208 * itlb miss interruption handler (parisc 1.1 - 32 bit)
209 */
210
211 .macro itlb_11 code
212
213 mfctl %pcsq, spc
214 b itlb_miss_11
215 mfctl %pcoq, va
216
217 .align 32
218 .endm
219#endif
220
221 /*
222 * itlb miss interruption handler (parisc 2.0)
223 */
224
225 .macro itlb_20 code
226 mfctl %pcsq, spc
413059f2 227#ifdef CONFIG_64BIT
1da177e4
LT
228 b itlb_miss_20w
229#else
230 b itlb_miss_20
231#endif
232 mfctl %pcoq, va
233
234 .align 32
235 .endm
236
413059f2 237#ifndef CONFIG_64BIT
1da177e4
LT
238 /*
239 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
1da177e4
LT
240 */
241
242 .macro naitlb_11 code
243
244 mfctl %isr,spc
f311847c 245 b naitlb_miss_11
1da177e4 246 mfctl %ior,va
1da177e4
LT
247
248 .align 32
249 .endm
250#endif
251
252 /*
253 * naitlb miss interruption handler (parisc 2.0)
1da177e4
LT
254 */
255
256 .macro naitlb_20 code
257
258 mfctl %isr,spc
413059f2 259#ifdef CONFIG_64BIT
f311847c 260 b naitlb_miss_20w
1da177e4 261#else
f311847c 262 b naitlb_miss_20
1da177e4
LT
263#endif
264 mfctl %ior,va
1da177e4
LT
265
266 .align 32
267 .endm
268
413059f2 269#ifndef CONFIG_64BIT
1da177e4
LT
270 /*
271 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
272 */
273
274 .macro dtlb_11 code
275
276 mfctl %isr, spc
277 b dtlb_miss_11
278 mfctl %ior, va
279
280 .align 32
281 .endm
282#endif
283
284 /*
285 * dtlb miss interruption handler (parisc 2.0)
286 */
287
288 .macro dtlb_20 code
289
290 mfctl %isr, spc
413059f2 291#ifdef CONFIG_64BIT
1da177e4
LT
292 b dtlb_miss_20w
293#else
294 b dtlb_miss_20
295#endif
296 mfctl %ior, va
297
298 .align 32
299 .endm
300
413059f2 301#ifndef CONFIG_64BIT
1da177e4
LT
302 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
303
304 .macro nadtlb_11 code
305
306 mfctl %isr,spc
307 b nadtlb_miss_11
308 mfctl %ior,va
309
310 .align 32
311 .endm
312#endif
313
314 /* nadtlb miss interruption handler (parisc 2.0) */
315
316 .macro nadtlb_20 code
317
318 mfctl %isr,spc
413059f2 319#ifdef CONFIG_64BIT
1da177e4
LT
320 b nadtlb_miss_20w
321#else
322 b nadtlb_miss_20
323#endif
324 mfctl %ior,va
325
326 .align 32
327 .endm
328
413059f2 329#ifndef CONFIG_64BIT
1da177e4
LT
330 /*
331 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
332 */
333
334 .macro dbit_11 code
335
336 mfctl %isr,spc
337 b dbit_trap_11
338 mfctl %ior,va
339
340 .align 32
341 .endm
342#endif
343
344 /*
345 * dirty bit trap interruption handler (parisc 2.0)
346 */
347
348 .macro dbit_20 code
349
350 mfctl %isr,spc
413059f2 351#ifdef CONFIG_64BIT
1da177e4
LT
352 b dbit_trap_20w
353#else
354 b dbit_trap_20
355#endif
356 mfctl %ior,va
357
358 .align 32
359 .endm
360
1da177e4
LT
361 /* In LP64, the space contains part of the upper 32 bits of the
362 * fault. We have to extract this and place it in the va,
363 * zeroing the corresponding bits in the space register */
364 .macro space_adjust spc,va,tmp
413059f2 365#ifdef CONFIG_64BIT
1da177e4
LT
366 extrd,u \spc,63,SPACEID_SHIFT,\tmp
367 depd %r0,63,SPACEID_SHIFT,\spc
368 depd \tmp,31,SPACEID_SHIFT,\va
369#endif
370 .endm
371
372 .import swapper_pg_dir,code
373
374 /* Get the pgd. For faults on space zero (kernel space), this
375 * is simply swapper_pg_dir. For user space faults, the
376 * pgd is stored in %cr25 */
377 .macro get_pgd spc,reg
378 ldil L%PA(swapper_pg_dir),\reg
379 ldo R%PA(swapper_pg_dir)(\reg),\reg
380 or,COND(=) %r0,\spc,%r0
381 mfctl %cr25,\reg
382 .endm
383
384 /*
385 space_check(spc,tmp,fault)
386
387 spc - The space we saw the fault with.
388 tmp - The place to store the current space.
389 fault - Function to call on failure.
390
391 Only allow faults on different spaces from the
392 currently active one if we're the kernel
393
394 */
395 .macro space_check spc,tmp,fault
396 mfsp %sr7,\tmp
87613bb9 397 /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */
1da177e4
LT
398 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
399 * as kernel, so defeat the space
400 * check if it is */
401 copy \spc,\tmp
402 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
403 cmpb,COND(<>),n \tmp,\spc,\fault
404 .endm
405
406 /* Look up a PTE in a 2-Level scheme (faulting at each
407 * level if the entry isn't present
408 *
409 * NOTE: we use ldw even for LP64, since the short pointers
410 * can address up to 1TB
411 */
412 .macro L2_ptep pmd,pte,index,va,fault
f24ffde4 413#if CONFIG_PGTABLE_LEVELS == 3
9b437bca 414 extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
1da177e4 415#else
6a45716a
HD
416# if defined(CONFIG_64BIT)
417 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
418 #else
419 # if PAGE_SIZE > 4096
420 extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
421 # else
9b437bca 422 extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
6a45716a
HD
423 # endif
424# endif
1da177e4 425#endif
9b437bca 426 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
1da177e4
LT
427 copy %r0,\pte
428 ldw,s \index(\pmd),\pmd
429 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
9b437bca 430 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
32c1ceea 431 SHLREG \pmd,PxD_VALUE_SHIFT,\pmd
9b437bca
JDA
432 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
433 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
01ab6057 434 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
1da177e4
LT
435 .endm
436
437 /* Look up PTE in a 3-Level scheme.
438 *
439 * Here we implement a Hybrid L2/L3 scheme: we allocate the
440 * first pmd adjacent to the pgd. This means that we can
441 * subtract a constant offset to get to it. The pmd and pgd
442 * sizes are arranged so that a single pmd covers 4GB (giving
443 * a full LP64 process access to 8TB) so our lookups are
444 * effectively L2 for the first 4GB of the kernel (i.e. for
445 * all ILP32 processes and all the kernel for machines with
446 * under 4GB of memory) */
447 .macro L3_ptep pgd,pte,index,va,fault
f24ffde4 448#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
1da177e4 449 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
2fd83038 450 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
1da177e4 451 ldw,s \index(\pgd),\pgd
2fd83038 452 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
1da177e4 453 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
2fd83038 454 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
1da177e4 455 shld \pgd,PxD_VALUE_SHIFT,\index
2fd83038 456 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
1da177e4 457 copy \index,\pgd
2fd83038 458 extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
1da177e4 459 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
2fd83038 460#endif
1da177e4
LT
461 L2_ptep \pgd,\pte,\index,\va,\fault
462 .endm
463
32a7901f 464 /* Acquire pa_tlb_lock lock and check page is present. */
01ab6057 465 .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
f0a18819 466#ifdef CONFIG_SMP
3847dab7 46798: cmpib,COND(=),n 0,\spc,2f
88776c0e 468 load_pa_tlb_lock \tmp
f0a18819
JDA
4691: LDCW 0(\tmp),\tmp1
470 cmpib,COND(=) 0,\tmp1,1b
471 nop
01ab6057 472 LDREG 0(\ptp),\pte
32a7901f 473 bb,<,n \pte,_PAGE_PRESENT_BIT,3f
01ab6057 474 b \fault
32a7901f 475 stw,ma \spc,0(\tmp)
3847dab7 47699: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
f0a18819 477#endif
32a7901f
JDA
4782: LDREG 0(\ptp),\pte
479 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
4803:
f0a18819
JDA
481 .endm
482
01ab6057
JDA
483 /* Release pa_tlb_lock lock without reloading lock address. */
484 .macro tlb_unlock0 spc,tmp
f0a18819 485#ifdef CONFIG_SMP
3847dab7 48698: or,COND(=) %r0,\spc,%r0
4dd5b673 487 stw,ma \spc,0(\tmp)
3847dab7 48899: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
f0a18819
JDA
489#endif
490 .endm
491
01ab6057
JDA
492 /* Release pa_tlb_lock lock. */
493 .macro tlb_unlock1 spc,tmp
f0a18819 494#ifdef CONFIG_SMP
3847dab7
HD
49598: load_pa_tlb_lock \tmp
49699: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
01ab6057 497 tlb_unlock0 \spc,\tmp
f0a18819
JDA
498#endif
499 .endm
500
1da177e4
LT
501 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
502 * don't needlessly dirty the cache line if it was already set */
01ab6057 503 .macro update_accessed ptp,pte,tmp,tmp1
1da177e4
LT
504 ldi _PAGE_ACCESSED,\tmp1
505 or \tmp1,\pte,\tmp
506 and,COND(<>) \tmp1,\pte,%r0
01ab6057 507 STREG \tmp,0(\ptp)
1da177e4
LT
508 .endm
509
510 /* Set the dirty bit (and accessed bit). No need to be
511 * clever, this is only used from the dirty fault */
01ab6057 512 .macro update_dirty ptp,pte,tmp
1da177e4
LT
513 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
514 or \tmp,\pte,\pte
01ab6057 515 STREG \pte,0(\ptp)
1da177e4
LT
516 .endm
517
736d2169
HD
518 /* We have (depending on the page size):
519 * - 38 to 52-bit Physical Page Number
520 * - 12 to 26-bit page offset
521 */
afca2523
HD
522 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
523 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
736d2169
HD
524 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
525 #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
afca2523
HD
526
527 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
736d2169
HD
528 .macro convert_for_tlb_insert20 pte,tmp
529#ifdef CONFIG_HUGETLB_PAGE
530 copy \pte,\tmp
531 extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
532 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
533
534 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
535 (63-58)+PAGE_ADD_SHIFT,\pte
536 extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0
537 depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
538 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
539#else /* Huge pages disabled */
afca2523
HD
540 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
541 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
542 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
543 (63-58)+PAGE_ADD_SHIFT,\pte
736d2169 544#endif
afca2523
HD
545 .endm
546
1da177e4
LT
547 /* Convert the pte and prot to tlb insertion values. How
548 * this happens is quite subtle, read below */
736d2169 549 .macro make_insert_tlb spc,pte,prot,tmp
1da177e4
LT
550 space_to_prot \spc \prot /* create prot id from space */
551 /* The following is the real subtlety. This is depositing
552 * T <-> _PAGE_REFTRAP
553 * D <-> _PAGE_DIRTY
554 * B <-> _PAGE_DMB (memory break)
555 *
556 * Then incredible subtlety: The access rights are
1c4c6597 557 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
1da177e4
LT
558 * See 3-14 of the parisc 2.0 manual
559 *
560 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
561 * trigger an access rights trap in user space if the user
562 * tries to read an unreadable page */
563 depd \pte,8,7,\prot
564
565 /* PAGE_USER indicates the page can be read with user privileges,
566 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
1c4c6597 567 * contains _PAGE_READ) */
1da177e4
LT
568 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
569 depdi 7,11,3,\prot
570 /* If we're a gateway page, drop PL2 back to zero for promotion
571 * to kernel privilege (so we can execute the page as kernel).
572 * Any privilege promotion page always denys read and write */
573 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
574 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
575
2fd83038
HD
576 /* Enforce uncacheable pages.
577 * This should ONLY be use for MMIO on PA 2.0 machines.
578 * Memory/DMA is cache coherent on all PA2.0 machines we support
579 * (that means T-class is NOT supported) and the memory controllers
580 * on most of those machines only handles cache transactions.
581 */
582 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
2678251b 583 depdi 1,12,1,\prot
1da177e4 584
2fd83038 585 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
736d2169 586 convert_for_tlb_insert20 \pte \tmp
1da177e4
LT
587 .endm
588
589 /* Identical macro to make_insert_tlb above, except it
590 * makes the tlb entry for the differently formatted pa11
591 * insertion instructions */
592 .macro make_insert_tlb_11 spc,pte,prot
593 zdep \spc,30,15,\prot
594 dep \pte,8,7,\prot
595 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
596 depi 1,12,1,\prot
597 extru,= \pte,_PAGE_USER_BIT,1,%r0
598 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
599 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
600 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
601
602 /* Get rid of prot bits and convert to page addr for iitlba */
603
1152a68c
HD
604 depi 0,31,ASM_PFN_PTE_SHIFT,\pte
605 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
1da177e4
LT
606 .endm
607
608 /* This is for ILP32 PA2.0 only. The TLB insertion needs
609 * to extend into I/O space if the address is 0xfXXXXXXX
610 * so we extend the f's into the top word of the pte in
611 * this case */
612 .macro f_extend pte,tmp
613 extrd,s \pte,42,4,\tmp
614 addi,<> 1,\tmp,%r0
615 extrd,s \pte,63,25,\pte
616 .endm
617
618 /* The alias region is an 8MB aligned 16MB to do clear and
619 * copy user pages at addresses congruent with the user
620 * virtual address.
621 *
622 * To use the alias page, you set %r26 up with the to TLB
623 * entry (identifying the physical page) and %r23 up with
624 * the from tlb entry (or nothing if only a to entry---for
625 * clear_user_page_asm) */
2f649c1f 626 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
1da177e4
LT
627 cmpib,COND(<>),n 0,\spc,\fault
628 ldil L%(TMPALIAS_MAP_START),\tmp
413059f2 629#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
1da177e4
LT
630 /* on LP64, ldi will sign extend into the upper 32 bits,
631 * which is behaviour we don't want */
632 depdi 0,31,32,\tmp
633#endif
634 copy \va,\tmp1
9b437bca 635 depi 0,31,23,\tmp1
1da177e4 636 cmpb,COND(<>),n \tmp,\tmp1,\fault
f311847c
JB
637 mfctl %cr19,\tmp /* iir */
638 /* get the opcode (first six bits) into \tmp */
639 extrw,u \tmp,5,6,\tmp
640 /*
641 * Only setting the T bit prevents data cache movein
642 * Setting access rights to zero prevents instruction cache movein
643 *
644 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
645 * to type field and _PAGE_READ goes to top bit of PL1
646 */
647 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
648 /*
649 * so if the opcode is one (i.e. this is a memory management
650 * instruction) nullify the next load so \prot is only T.
651 * Otherwise this is a normal data operation
652 */
653 cmpiclr,= 0x01,\tmp,%r0
654 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
2f649c1f 655.ifc \patype,20
1da177e4 656 depd,z \prot,8,7,\prot
2f649c1f
JB
657.else
658.ifc \patype,11
5e185581 659 depw,z \prot,8,7,\prot
2f649c1f
JB
660.else
661 .error "undefined PA type to do_alias"
662.endif
663.endif
1da177e4
LT
664 /*
665 * OK, it is in the temp alias region, check whether "from" or "to".
666 * Check "subtle" note in pacache.S re: r23/r26.
667 */
413059f2 668#ifdef CONFIG_64BIT
1da177e4
LT
669 extrd,u,*= \va,41,1,%r0
670#else
671 extrw,u,= \va,9,1,%r0
672#endif
673 or,COND(tr) %r23,%r0,\pte
674 or %r26,%r0,\pte
675 .endm
676
677
678 /*
4182d0cd
HD
679 * Fault_vectors are architecturally required to be aligned on a 2K
680 * boundary
1da177e4
LT
681 */
682
4df3c9ec 683 .section .text.hot
4182d0cd 684 .align 2048
1da177e4 685
c5e76552 686ENTRY(fault_vector_20)
1da177e4
LT
687 /* First vector is invalid (0) */
688 .ascii "cows can fly"
689 .byte 0
690 .align 32
691
692 hpmc 1
693 def 2
694 def 3
695 extint 4
696 def 5
5b00ca0b 697 itlb_20 PARISC_ITLB_TRAP
1da177e4
LT
698 def 7
699 def 8
700 def 9
701 def 10
702 def 11
703 def 12
704 def 13
705 def 14
706 dtlb_20 15
1da177e4 707 naitlb_20 16
1da177e4
LT
708 nadtlb_20 17
709 def 18
710 def 19
711 dbit_20 20
712 def 21
713 def 22
714 def 23
715 def 24
716 def 25
717 def 26
718 def 27
719 def 28
720 def 29
721 def 30
722 def 31
c5e76552 723END(fault_vector_20)
1da177e4 724
413059f2 725#ifndef CONFIG_64BIT
1da177e4 726
1da177e4
LT
727 .align 2048
728
c5e76552 729ENTRY(fault_vector_11)
1da177e4
LT
730 /* First vector is invalid (0) */
731 .ascii "cows can fly"
732 .byte 0
733 .align 32
734
735 hpmc 1
736 def 2
737 def 3
738 extint 4
739 def 5
5b00ca0b 740 itlb_11 PARISC_ITLB_TRAP
1da177e4
LT
741 def 7
742 def 8
743 def 9
744 def 10
745 def 11
746 def 12
747 def 13
748 def 14
749 dtlb_11 15
1da177e4 750 naitlb_11 16
1da177e4
LT
751 nadtlb_11 17
752 def 18
753 def 19
754 dbit_11 20
755 def 21
756 def 22
757 def 23
758 def 24
759 def 25
760 def 26
761 def 27
762 def 28
763 def 29
764 def 30
765 def 31
c5e76552 766END(fault_vector_11)
1da177e4
LT
767
768#endif
d7dd2ff1
JB
769 /* Fault vector is separately protected and *must* be on its own page */
770 .align PAGE_SIZE
1da177e4
LT
771
772 .import handle_interruption,code
773 .import do_cpu_irq_mask,code
774
1da177e4
LT
775 /*
776 * Child Returns here
777 *
a44e060f 778 * copy_thread moved args into task save area.
1da177e4
LT
779 */
780
8801ccb9 781ENTRY(ret_from_kernel_thread)
1da177e4
LT
782 /* Call schedule_tail first though */
783 BL schedule_tail, %r2
784 nop
785
ff0ab8af 786 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1da177e4 787 LDREG TASK_PT_GR25(%r1), %r26
413059f2 788#ifdef CONFIG_64BIT
1da177e4 789 LDREG TASK_PT_GR27(%r1), %r27
1da177e4
LT
790#endif
791 LDREG TASK_PT_GR26(%r1), %r1
792 ble 0(%sr7, %r1)
793 copy %r31, %r2
363806dd
AV
794 b finish_child_return
795 nop
8801ccb9 796END(ret_from_kernel_thread)
1da177e4 797
1da177e4
LT
798
799 /*
800 * struct task_struct *_switch_to(struct task_struct *prev,
801 * struct task_struct *next)
802 *
803 * switch kernel stacks and return prev */
f39cce65 804ENTRY_CFI(_switch_to)
1da177e4
LT
805 STREG %r2, -RP_OFFSET(%r30)
806
618febd6 807 callee_save_float
1da177e4
LT
808 callee_save
809
810 load32 _switch_to_ret, %r2
811
812 STREG %r2, TASK_PT_KPC(%r26)
813 LDREG TASK_PT_KPC(%r25), %r2
814
815 STREG %r30, TASK_PT_KSP(%r26)
816 LDREG TASK_PT_KSP(%r25), %r30
817 LDREG TASK_THREAD_INFO(%r25), %r25
818 bv %r0(%r2)
819 mtctl %r25,%cr30
820
8801ccb9 821ENTRY(_switch_to_ret)
1da177e4
LT
822 mtctl %r0, %cr0 /* Needed for single stepping */
823 callee_rest
618febd6 824 callee_rest_float
1da177e4
LT
825
826 LDREG -RP_OFFSET(%r30), %r2
827 bv %r0(%r2)
828 copy %r26, %r28
8801ccb9 829ENDPROC_CFI(_switch_to)
1da177e4
LT
830
831 /*
832 * Common rfi return path for interruptions, kernel execve, and
833 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
834 * return via this path if the signal was received when the process
835 * was running; if the process was blocked on a syscall then the
836 * normal syscall_exit path is used. All syscalls for traced
837 * proceses exit via intr_restore.
838 *
839 * XXX If any syscalls that change a processes space id ever exit
840 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
841 * adjust IASQ[0..1].
842 *
1da177e4
LT
843 */
844
873d50e2 845 .align PAGE_SIZE
1da177e4 846
f39cce65 847ENTRY_CFI(syscall_exit_rfi)
1da177e4
LT
848 mfctl %cr30,%r16
849 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
850 ldo TASK_REGS(%r16),%r16
851 /* Force iaoq to userspace, as the user has had access to our current
852 * context via sigcontext. Also Filter the PSW for the same reason.
853 */
854 LDREG PT_IAOQ0(%r16),%r19
855 depi 3,31,2,%r19
856 STREG %r19,PT_IAOQ0(%r16)
857 LDREG PT_IAOQ1(%r16),%r19
858 depi 3,31,2,%r19
859 STREG %r19,PT_IAOQ1(%r16)
860 LDREG PT_PSW(%r16),%r19
861 load32 USER_PSW_MASK,%r1
413059f2 862#ifdef CONFIG_64BIT
1da177e4
LT
863 load32 USER_PSW_HI_MASK,%r20
864 depd %r20,31,32,%r1
865#endif
866 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
867 load32 USER_PSW,%r1
868 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
869 STREG %r19,PT_PSW(%r16)
870
871 /*
872 * If we aren't being traced, we never saved space registers
873 * (we don't store them in the sigcontext), so set them
874 * to "proper" values now (otherwise we'll wind up restoring
875 * whatever was last stored in the task structure, which might
25985edc 876 * be inconsistent if an interrupt occurred while on the gateway
4b3f686d
ML
877 * page). Note that we may be "trashing" values the user put in
878 * them, but we don't support the user changing them.
1da177e4
LT
879 */
880
881 STREG %r0,PT_SR2(%r16)
882 mfsp %sr3,%r19
883 STREG %r19,PT_SR0(%r16)
884 STREG %r19,PT_SR1(%r16)
885 STREG %r19,PT_SR3(%r16)
886 STREG %r19,PT_SR4(%r16)
887 STREG %r19,PT_SR5(%r16)
888 STREG %r19,PT_SR6(%r16)
889 STREG %r19,PT_SR7(%r16)
890
8801ccb9 891ENTRY(intr_return)
1da177e4
LT
892 /* check for reschedule */
893 mfctl %cr30,%r1
894 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
895 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
896
4650f0a5 897 .import do_notify_resume,code
1da177e4
LT
898intr_check_sig:
899 /* As above */
900 mfctl %cr30,%r1
4650f0a5 901 LDREG TI_FLAGS(%r1),%r19
6fd84c08 902 ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
4650f0a5
KM
903 and,COND(<>) %r19, %r20, %r0
904 b,n intr_restore /* skip past if we've nothing to do */
905
906 /* This check is critical to having LWS
907 * working. The IASQ is zero on the gateway
908 * page and we cannot deliver any signals until
909 * we get off the gateway page.
910 *
911 * Only do signals if we are returning to user space
912 */
913 LDREG PT_IASQ0(%r16), %r20
87613bb9 914 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* backward */
4650f0a5 915 LDREG PT_IASQ1(%r16), %r20
87613bb9 916 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* backward */
4650f0a5 917
9352aead
JDA
918 /* NOTE: We need to enable interrupts if we have to deliver
919 * signals. We used to do this earlier but it caused kernel
920 * stack overflows. */
921 ssm PSW_SM_I, %r0
922
4650f0a5
KM
923 copy %r0, %r25 /* long in_syscall = 0 */
924#ifdef CONFIG_64BIT
925 ldo -16(%r30),%r29 /* Reference param save area */
926#endif
927
928 BL do_notify_resume,%r2
929 copy %r16, %r26 /* struct pt_regs *regs */
930
3fe4c55e 931 b,n intr_check_sig
1da177e4
LT
932
933intr_restore:
934 copy %r16,%r29
935 ldo PT_FR31(%r29),%r1
936 rest_fp %r1
937 rest_general %r29
938
896a3756
GG
939 /* inverse of virt_map */
940 pcxt_ssm_bug
941 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
1da177e4 942 tophys_r1 %r29
1da177e4
LT
943
944 /* Restore space id's and special cr's from PT_REGS
896a3756
GG
945 * structure pointed to by r29
946 */
1da177e4
LT
947 rest_specials %r29
948
896a3756
GG
949 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
950 * It also restores r1 and r30.
951 */
1da177e4
LT
952 rest_stack
953
954 rfi
955 nop
1da177e4 956
50a34dbd
KM
957#ifndef CONFIG_PREEMPT
958# define intr_do_preempt intr_restore
959#endif /* !CONFIG_PREEMPT */
960
1da177e4
LT
961 .import schedule,code
962intr_do_resched:
50a34dbd
KM
963 /* Only call schedule on return to userspace. If we're returning
964 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
965 * we jump back to intr_restore.
966 */
1da177e4 967 LDREG PT_IASQ0(%r16), %r20
872f6deb 968 cmpib,COND(=) 0, %r20, intr_do_preempt
1da177e4
LT
969 nop
970 LDREG PT_IASQ1(%r16), %r20
872f6deb 971 cmpib,COND(=) 0, %r20, intr_do_preempt
1da177e4
LT
972 nop
973
9352aead
JDA
974 /* NOTE: We need to enable interrupts if we schedule. We used
975 * to do this earlier but it caused kernel stack overflows. */
976 ssm PSW_SM_I, %r0
977
413059f2 978#ifdef CONFIG_64BIT
1da177e4
LT
979 ldo -16(%r30),%r29 /* Reference param save area */
980#endif
981
982 ldil L%intr_check_sig, %r2
99ac7947 983#ifndef CONFIG_64BIT
1da177e4 984 b schedule
99ac7947
RC
985#else
986 load32 schedule, %r20
987 bv %r0(%r20)
988#endif
1da177e4
LT
989 ldo R%intr_check_sig(%r2), %r2
990
50a34dbd
KM
991 /* preempt the current task on returning to kernel
992 * mode from an interrupt, iff need_resched is set,
993 * and preempt_count is 0. otherwise, we continue on
994 * our merry way back to the current running task.
995 */
996#ifdef CONFIG_PREEMPT
997 .import preempt_schedule_irq,code
998intr_do_preempt:
999 rsm PSW_SM_I, %r0 /* disable interrupts */
1000
1001 /* current_thread_info()->preempt_count */
1002 mfctl %cr30, %r1
1003 LDREG TI_PRE_COUNT(%r1), %r19
872f6deb 1004 cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */
50a34dbd
KM
1005 nop /* prev insn branched backwards */
1006
1007 /* check if we interrupted a critical path */
1008 LDREG PT_PSW(%r16), %r20
1009 bb,<,n %r20, 31 - PSW_SM_I, intr_restore
1010 nop
1011
1012 BL preempt_schedule_irq, %r2
1013 nop
1014
9c2c5457 1015 b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
50a34dbd 1016#endif /* CONFIG_PREEMPT */
1da177e4 1017
1da177e4
LT
1018 /*
1019 * External interrupts.
1020 */
1021
1022intr_extint:
872f6deb 1023 cmpib,COND(=),n 0,%r16,1f
6cc4525d 1024
1da177e4 1025 get_stack_use_cr30
6cc4525d 1026 b,n 2f
1da177e4
LT
1027
10281:
1da177e4 1029 get_stack_use_r30
6cc4525d 10302:
1da177e4
LT
1031 save_specials %r29
1032 virt_map
1033 save_general %r29
1034
1035 ldo PT_FR0(%r29), %r24
1036 save_fp %r24
1037
1038 loadgp
1039
1040 copy %r29, %r26 /* arg0 is pt_regs */
1041 copy %r29, %r16 /* save pt_regs */
1042
1043 ldil L%intr_return, %r2
1044
413059f2 1045#ifdef CONFIG_64BIT
1da177e4
LT
1046 ldo -16(%r30),%r29 /* Reference param save area */
1047#endif
1048
1049 b do_cpu_irq_mask
1050 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
8801ccb9 1051ENDPROC_CFI(syscall_exit_rfi)
1da177e4
LT
1052
1053
1054 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1055
f39cce65 1056ENTRY_CFI(intr_save) /* for os_hpmc */
1da177e4 1057 mfsp %sr7,%r16
872f6deb 1058 cmpib,COND(=),n 0,%r16,1f
1da177e4
LT
1059 get_stack_use_cr30
1060 b 2f
1061 copy %r8,%r26
1062
10631:
1064 get_stack_use_r30
1065 copy %r8,%r26
1066
10672:
1068 save_specials %r29
1069
1070 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
5b00ca0b 1071 cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior
1da177e4 1072
1da177e4 1073
5b00ca0b 1074 mfctl %isr, %r16
896a3756 1075 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
5b00ca0b 1076 mfctl %ior, %r17
1da177e4 1077
896a3756 1078
413059f2 1079#ifdef CONFIG_64BIT
1da177e4
LT
1080 /*
1081 * If the interrupted code was running with W bit off (32 bit),
1082 * clear the b bits (bits 0 & 1) in the ior.
896a3756 1083 * save_specials left ipsw value in r8 for us to test.
1da177e4
LT
1084 */
1085 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1086 depdi 0,1,2,%r17
1087
5b00ca0b
HD
1088 /* adjust isr/ior: get high bits from isr and deposit in ior */
1089 space_adjust %r16,%r17,%r1
1da177e4
LT
1090#endif
1091 STREG %r16, PT_ISR(%r29)
1092 STREG %r17, PT_IOR(%r29)
1093
5b00ca0b
HD
1094#if 0 && defined(CONFIG_64BIT)
1095 /* Revisit when we have 64-bit code above 4Gb */
1096 b,n intr_save2
1da177e4
LT
1097
1098skip_save_ior:
5b00ca0b
HD
1099 /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
1100 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
1101 * above.
1102 */
1103 extrd,u,* %r8,PSW_W_BIT,1,%r1
1104 cmpib,COND(=),n 1,%r1,intr_save2
1105 LDREG PT_IASQ0(%r29), %r16
1106 LDREG PT_IAOQ0(%r29), %r17
1107 /* adjust iasq/iaoq */
1108 space_adjust %r16,%r17,%r1
1109 STREG %r16, PT_IASQ0(%r29)
1110 STREG %r17, PT_IAOQ0(%r29)
1111#else
1112skip_save_ior:
1113#endif
1114
1115intr_save2:
1da177e4
LT
1116 virt_map
1117 save_general %r29
1118
1119 ldo PT_FR0(%r29), %r25
1120 save_fp %r25
1121
1122 loadgp
1123
1124 copy %r29, %r25 /* arg1 is pt_regs */
413059f2 1125#ifdef CONFIG_64BIT
1da177e4
LT
1126 ldo -16(%r30),%r29 /* Reference param save area */
1127#endif
1128
1129 ldil L%intr_check_sig, %r2
1130 copy %r25, %r16 /* save pt_regs */
1131
1132 b handle_interruption
1133 ldo R%intr_check_sig(%r2), %r2
f39cce65 1134ENDPROC_CFI(intr_save)
1da177e4
LT
1135
1136
1137 /*
1138 * Note for all tlb miss handlers:
1139 *
1140 * cr24 contains a pointer to the kernel address space
1141 * page directory.
1142 *
1143 * cr25 contains a pointer to the current user address
1144 * space page directory.
1145 *
1146 * sr3 will contain the space id of the user address space
1147 * of the current running thread while that thread is
1148 * running in the kernel.
1149 */
1150
1151 /*
1152 * register number allocations. Note that these are all
1153 * in the shadowed registers
1154 */
1155
1156 t0 = r1 /* temporary register 0 */
25985edc 1157 va = r8 /* virtual address for which the trap occurred */
1da177e4
LT
1158 t1 = r9 /* temporary register 1 */
1159 pte = r16 /* pte/phys page # */
1160 prot = r17 /* prot bits */
25985edc 1161 spc = r24 /* space for which the trap occurred */
1da177e4
LT
1162 ptp = r25 /* page directory/page table pointer */
1163
413059f2 1164#ifdef CONFIG_64BIT
1da177e4
LT
1165
1166dtlb_miss_20w:
1167 space_adjust spc,va,t0
1168 get_pgd spc,ptp
1169 space_check spc,t0,dtlb_fault
1170
1171 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1172
01ab6057
JDA
1173 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1174 update_accessed ptp,pte,t0,t1
1da177e4 1175
736d2169 1176 make_insert_tlb spc,pte,prot,t1
1da177e4
LT
1177
1178 idtlbt pte,prot
1179
01ab6057 1180 tlb_unlock1 spc,t0
1da177e4
LT
1181 rfir
1182 nop
1183
1184dtlb_check_alias_20w:
2f649c1f 1185 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1da177e4
LT
1186
1187 idtlbt pte,prot
1188
1189 rfir
1190 nop
1191
1192nadtlb_miss_20w:
1193 space_adjust spc,va,t0
1194 get_pgd spc,ptp
1195 space_check spc,t0,nadtlb_fault
1196
f311847c 1197 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1da177e4 1198
01ab6057
JDA
1199 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1200 update_accessed ptp,pte,t0,t1
1da177e4 1201
736d2169 1202 make_insert_tlb spc,pte,prot,t1
1da177e4
LT
1203
1204 idtlbt pte,prot
1205
01ab6057 1206 tlb_unlock1 spc,t0
1da177e4
LT
1207 rfir
1208 nop
1209
f311847c 1210nadtlb_check_alias_20w:
2f649c1f 1211 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1da177e4 1212
1da177e4
LT
1213 idtlbt pte,prot
1214
1215 rfir
1216 nop
1217
1218#else
1219
1220dtlb_miss_11:
1221 get_pgd spc,ptp
1222
1223 space_check spc,t0,dtlb_fault
1224
1225 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1226
01ab6057
JDA
1227 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
1228 update_accessed ptp,pte,t0,t1
1da177e4
LT
1229
1230 make_insert_tlb_11 spc,pte,prot
1231
01ab6057 1232 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1da177e4
LT
1233 mtsp spc,%sr1
1234
1235 idtlba pte,(%sr1,va)
1236 idtlbp prot,(%sr1,va)
1237
01ab6057 1238 mtsp t1, %sr1 /* Restore sr1 */
1da177e4 1239
01ab6057 1240 tlb_unlock1 spc,t0
1da177e4
LT
1241 rfir
1242 nop
1243
1244dtlb_check_alias_11:
2f649c1f 1245 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11
1da177e4
LT
1246
1247 idtlba pte,(va)
1248 idtlbp prot,(va)
1249
1250 rfir
1251 nop
1252
1253nadtlb_miss_11:
1254 get_pgd spc,ptp
1255
1256 space_check spc,t0,nadtlb_fault
1257
f311847c 1258 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1da177e4 1259
01ab6057
JDA
1260 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1261 update_accessed ptp,pte,t0,t1
1da177e4
LT
1262
1263 make_insert_tlb_11 spc,pte,prot
1264
01ab6057 1265 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1da177e4
LT
1266 mtsp spc,%sr1
1267
1268 idtlba pte,(%sr1,va)
1269 idtlbp prot,(%sr1,va)
1270
01ab6057 1271 mtsp t1, %sr1 /* Restore sr1 */
1da177e4 1272
01ab6057 1273 tlb_unlock1 spc,t0
1da177e4
LT
1274 rfir
1275 nop
1276
f311847c 1277nadtlb_check_alias_11:
2f649c1f 1278 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11
f311847c
JB
1279
1280 idtlba pte,(va)
1281 idtlbp prot,(va)
1282
1283 rfir
1284 nop
1285
1da177e4
LT
1286dtlb_miss_20:
1287 space_adjust spc,va,t0
1288 get_pgd spc,ptp
1289 space_check spc,t0,dtlb_fault
1290
1291 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1292
01ab6057
JDA
1293 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
1294 update_accessed ptp,pte,t0,t1
1da177e4 1295
736d2169 1296 make_insert_tlb spc,pte,prot,t1
1da177e4 1297
01ab6057 1298 f_extend pte,t1
1da177e4
LT
1299
1300 idtlbt pte,prot
1301
01ab6057 1302 tlb_unlock1 spc,t0
1da177e4
LT
1303 rfir
1304 nop
1305
1306dtlb_check_alias_20:
2f649c1f 1307 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1da177e4
LT
1308
1309 idtlbt pte,prot
1310
1311 rfir
1312 nop
1313
1314nadtlb_miss_20:
1315 get_pgd spc,ptp
1316
1317 space_check spc,t0,nadtlb_fault
1318
f311847c 1319 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1da177e4 1320
01ab6057
JDA
1321 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1322 update_accessed ptp,pte,t0,t1
1da177e4 1323
736d2169 1324 make_insert_tlb spc,pte,prot,t1
1da177e4 1325
01ab6057 1326 f_extend pte,t1
1da177e4 1327
01ab6057 1328 idtlbt pte,prot
1da177e4 1329
01ab6057 1330 tlb_unlock1 spc,t0
1da177e4
LT
1331 rfir
1332 nop
1333
f311847c 1334nadtlb_check_alias_20:
2f649c1f 1335 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
f311847c
JB
1336
1337 idtlbt pte,prot
1338
1339 rfir
1340 nop
1341
1da177e4
LT
1342#endif
1343
1344nadtlb_emulate:
1345
1346 /*
1347 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1348 * probei instructions. We don't want to fault for these
1349 * instructions (not only does it not make sense, it can cause
1350 * deadlocks, since some flushes are done with the mmap
1351 * semaphore held). If the translation doesn't exist, we can't
1352 * insert a translation, so have to emulate the side effects
1353 * of the instruction. Since we don't insert a translation
1354 * we can get a lot of faults during a flush loop, so it makes
1355 * sense to try to do it here with minimum overhead. We only
1356 * emulate fdc,fic,pdc,probew,prober instructions whose base
1357 * and index registers are not shadowed. We defer everything
1358 * else to the "slow" path.
1359 */
1360
1361 mfctl %cr19,%r9 /* Get iir */
1362
1363 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1364 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1365
1366 /* Checks for fdc,fdce,pdc,"fic,4f" only */
1367 ldi 0x280,%r16
1368 and %r9,%r16,%r17
1369 cmpb,<>,n %r16,%r17,nadtlb_probe_check
1370 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
1371 BL get_register,%r25
1372 extrw,u %r9,15,5,%r8 /* Get index register # */
872f6deb 1373 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1da177e4
LT
1374 copy %r1,%r24
1375 BL get_register,%r25
1376 extrw,u %r9,10,5,%r8 /* Get base register # */
872f6deb 1377 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1da177e4
LT
1378 BL set_register,%r25
1379 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
1380
1381nadtlb_nullify:
896a3756 1382 mfctl %ipsw,%r8
1da177e4
LT
1383 ldil L%PSW_N,%r9
1384 or %r8,%r9,%r8 /* Set PSW_N */
896a3756 1385 mtctl %r8,%ipsw
1da177e4
LT
1386
1387 rfir
1388 nop
1389
1390 /*
1391 When there is no translation for the probe address then we
ad61dd30 1392 must nullify the insn and return zero in the target register.
1da177e4
LT
1393 This will indicate to the calling code that it does not have
1394 write/read privileges to this address.
1395
1396 This should technically work for prober and probew in PA 1.1,
1397 and also probe,r and probe,w in PA 2.0
1398
1399 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1400 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1401
1402 */
1403nadtlb_probe_check:
1404 ldi 0x80,%r16
1405 and %r9,%r16,%r17
1406 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1407 BL get_register,%r25 /* Find the target register */
1408 extrw,u %r9,31,5,%r8 /* Get target register */
872f6deb 1409 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1da177e4
LT
1410 BL set_register,%r25
1411 copy %r0,%r1 /* Write zero to target register */
1412 b nadtlb_nullify /* Nullify return insn */
1413 nop
1414
1415
413059f2 1416#ifdef CONFIG_64BIT
1da177e4
LT
1417itlb_miss_20w:
1418
1419 /*
1420 * I miss is a little different, since we allow users to fault
1421 * on the gateway page which is in the kernel address space.
1422 */
1423
1424 space_adjust spc,va,t0
1425 get_pgd spc,ptp
1426 space_check spc,t0,itlb_fault
1427
1428 L3_ptep ptp,pte,t0,va,itlb_fault
1429
01ab6057
JDA
1430 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1431 update_accessed ptp,pte,t0,t1
1da177e4 1432
736d2169 1433 make_insert_tlb spc,pte,prot,t1
1da177e4
LT
1434
1435 iitlbt pte,prot
1436
01ab6057 1437 tlb_unlock1 spc,t0
1da177e4
LT
1438 rfir
1439 nop
1440
f311847c
JB
1441naitlb_miss_20w:
1442
1443 /*
1444 * I miss is a little different, since we allow users to fault
1445 * on the gateway page which is in the kernel address space.
1446 */
1447
1448 space_adjust spc,va,t0
1449 get_pgd spc,ptp
1450 space_check spc,t0,naitlb_fault
1451
1452 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1453
01ab6057
JDA
1454 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1455 update_accessed ptp,pte,t0,t1
f311847c 1456
736d2169 1457 make_insert_tlb spc,pte,prot,t1
f311847c
JB
1458
1459 iitlbt pte,prot
1460
01ab6057 1461 tlb_unlock1 spc,t0
f311847c
JB
1462 rfir
1463 nop
1464
1465naitlb_check_alias_20w:
2f649c1f 1466 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
f311847c
JB
1467
1468 iitlbt pte,prot
1469
1470 rfir
1471 nop
1472
1da177e4
LT
1473#else
1474
1475itlb_miss_11:
1476 get_pgd spc,ptp
1477
1478 space_check spc,t0,itlb_fault
1479
1480 L2_ptep ptp,pte,t0,va,itlb_fault
1481
01ab6057
JDA
1482 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1483 update_accessed ptp,pte,t0,t1
1da177e4
LT
1484
1485 make_insert_tlb_11 spc,pte,prot
1486
01ab6057 1487 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1da177e4
LT
1488 mtsp spc,%sr1
1489
1490 iitlba pte,(%sr1,va)
1491 iitlbp prot,(%sr1,va)
1492
01ab6057 1493 mtsp t1, %sr1 /* Restore sr1 */
1da177e4 1494
01ab6057 1495 tlb_unlock1 spc,t0
1da177e4
LT
1496 rfir
1497 nop
1498
f311847c
JB
1499naitlb_miss_11:
1500 get_pgd spc,ptp
1501
1502 space_check spc,t0,naitlb_fault
1503
1504 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1505
01ab6057
JDA
1506 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
1507 update_accessed ptp,pte,t0,t1
f311847c
JB
1508
1509 make_insert_tlb_11 spc,pte,prot
1510
01ab6057 1511 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
f311847c
JB
1512 mtsp spc,%sr1
1513
1514 iitlba pte,(%sr1,va)
1515 iitlbp prot,(%sr1,va)
1516
01ab6057 1517 mtsp t1, %sr1 /* Restore sr1 */
f311847c 1518
01ab6057 1519 tlb_unlock1 spc,t0
f311847c
JB
1520 rfir
1521 nop
1522
1523naitlb_check_alias_11:
2f649c1f 1524 do_alias spc,t0,t1,va,pte,prot,itlb_fault,11
f311847c
JB
1525
1526 iitlba pte,(%sr0, va)
1527 iitlbp prot,(%sr0, va)
1528
1529 rfir
1530 nop
1531
1532
1da177e4
LT
1533itlb_miss_20:
1534 get_pgd spc,ptp
1535
1536 space_check spc,t0,itlb_fault
1537
1538 L2_ptep ptp,pte,t0,va,itlb_fault
1539
01ab6057
JDA
1540 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1541 update_accessed ptp,pte,t0,t1
1da177e4 1542
736d2169 1543 make_insert_tlb spc,pte,prot,t1
1da177e4 1544
01ab6057 1545 f_extend pte,t1
1da177e4
LT
1546
1547 iitlbt pte,prot
1548
01ab6057 1549 tlb_unlock1 spc,t0
1da177e4
LT
1550 rfir
1551 nop
1552
f311847c
JB
1553naitlb_miss_20:
1554 get_pgd spc,ptp
1555
1556 space_check spc,t0,naitlb_fault
1557
1558 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1559
01ab6057
JDA
1560 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
1561 update_accessed ptp,pte,t0,t1
f311847c 1562
736d2169 1563 make_insert_tlb spc,pte,prot,t1
f311847c 1564
01ab6057 1565 f_extend pte,t1
f311847c
JB
1566
1567 iitlbt pte,prot
1568
01ab6057 1569 tlb_unlock1 spc,t0
f311847c
JB
1570 rfir
1571 nop
1572
1573naitlb_check_alias_20:
2f649c1f 1574 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
f311847c
JB
1575
1576 iitlbt pte,prot
1577
1578 rfir
1579 nop
1580
1da177e4
LT
1581#endif
1582
413059f2 1583#ifdef CONFIG_64BIT
1da177e4
LT
1584
1585dbit_trap_20w:
1586 space_adjust spc,va,t0
1587 get_pgd spc,ptp
1588 space_check spc,t0,dbit_fault
1589
1590 L3_ptep ptp,pte,t0,va,dbit_fault
1591
01ab6057
JDA
1592 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1593 update_dirty ptp,pte,t1
1da177e4 1594
736d2169 1595 make_insert_tlb spc,pte,prot,t1
1da177e4
LT
1596
1597 idtlbt pte,prot
1da177e4 1598
01ab6057 1599 tlb_unlock0 spc,t0
1da177e4
LT
1600 rfir
1601 nop
1602#else
1603
1604dbit_trap_11:
1605
1606 get_pgd spc,ptp
1607
1608 space_check spc,t0,dbit_fault
1609
1610 L2_ptep ptp,pte,t0,va,dbit_fault
1611
01ab6057
JDA
1612 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1613 update_dirty ptp,pte,t1
1da177e4
LT
1614
1615 make_insert_tlb_11 spc,pte,prot
1616
1617 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1618 mtsp spc,%sr1
1619
1620 idtlba pte,(%sr1,va)
1621 idtlbp prot,(%sr1,va)
1622
1623 mtsp t1, %sr1 /* Restore sr1 */
1da177e4 1624
01ab6057 1625 tlb_unlock0 spc,t0
1da177e4
LT
1626 rfir
1627 nop
1628
1629dbit_trap_20:
1630 get_pgd spc,ptp
1631
1632 space_check spc,t0,dbit_fault
1633
1634 L2_ptep ptp,pte,t0,va,dbit_fault
1635
01ab6057
JDA
1636 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1637 update_dirty ptp,pte,t1
1da177e4 1638
736d2169 1639 make_insert_tlb spc,pte,prot,t1
1da177e4
LT
1640
1641 f_extend pte,t1
1642
01ab6057 1643 idtlbt pte,prot
1da177e4 1644
01ab6057 1645 tlb_unlock0 spc,t0
1da177e4
LT
1646 rfir
1647 nop
1648#endif
1649
1650 .import handle_interruption,code
1651
1652kernel_bad_space:
1653 b intr_save
1654 ldi 31,%r8 /* Use an unused code */
1655
1656dbit_fault:
1657 b intr_save
1658 ldi 20,%r8
1659
1660itlb_fault:
1661 b intr_save
cd2b8520 1662 ldi PARISC_ITLB_TRAP,%r8
1da177e4
LT
1663
1664nadtlb_fault:
1665 b intr_save
1666 ldi 17,%r8
1667
f311847c
JB
1668naitlb_fault:
1669 b intr_save
1670 ldi 16,%r8
1671
1da177e4
LT
1672dtlb_fault:
1673 b intr_save
1674 ldi 15,%r8
1675
1676 /* Register saving semantics for system calls:
1677
1678 %r1 clobbered by system call macro in userspace
1679 %r2 saved in PT_REGS by gateway page
1680 %r3 - %r18 preserved by C code (saved by signal code)
1681 %r19 - %r20 saved in PT_REGS by gateway page
1682 %r21 - %r22 non-standard syscall args
1683 stored in kernel stack by gateway page
1684 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1685 %r27 - %r30 saved in PT_REGS by gateway page
1686 %r31 syscall return pointer
1687 */
1688
1689 /* Floating point registers (FIXME: what do we do with these?)
1690
1691 %fr0 - %fr3 status/exception, not preserved
1692 %fr4 - %fr7 arguments
1693 %fr8 - %fr11 not preserved by C code
1694 %fr12 - %fr21 preserved by C code
1695 %fr22 - %fr31 not preserved by C code
1696 */
1697
1698 .macro reg_save regs
1699 STREG %r3, PT_GR3(\regs)
1700 STREG %r4, PT_GR4(\regs)
1701 STREG %r5, PT_GR5(\regs)
1702 STREG %r6, PT_GR6(\regs)
1703 STREG %r7, PT_GR7(\regs)
1704 STREG %r8, PT_GR8(\regs)
1705 STREG %r9, PT_GR9(\regs)
1706 STREG %r10,PT_GR10(\regs)
1707 STREG %r11,PT_GR11(\regs)
1708 STREG %r12,PT_GR12(\regs)
1709 STREG %r13,PT_GR13(\regs)
1710 STREG %r14,PT_GR14(\regs)
1711 STREG %r15,PT_GR15(\regs)
1712 STREG %r16,PT_GR16(\regs)
1713 STREG %r17,PT_GR17(\regs)
1714 STREG %r18,PT_GR18(\regs)
1715 .endm
1716
1717 .macro reg_restore regs
1718 LDREG PT_GR3(\regs), %r3
1719 LDREG PT_GR4(\regs), %r4
1720 LDREG PT_GR5(\regs), %r5
1721 LDREG PT_GR6(\regs), %r6
1722 LDREG PT_GR7(\regs), %r7
1723 LDREG PT_GR8(\regs), %r8
1724 LDREG PT_GR9(\regs), %r9
1725 LDREG PT_GR10(\regs),%r10
1726 LDREG PT_GR11(\regs),%r11
1727 LDREG PT_GR12(\regs),%r12
1728 LDREG PT_GR13(\regs),%r13
1729 LDREG PT_GR14(\regs),%r14
1730 LDREG PT_GR15(\regs),%r15
1731 LDREG PT_GR16(\regs),%r16
1732 LDREG PT_GR17(\regs),%r17
1733 LDREG PT_GR18(\regs),%r18
1734 .endm
1735
415bfae9 1736 .macro fork_like name
f39cce65 1737ENTRY_CFI(sys_\name\()_wrapper)
1da177e4
LT
1738 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1739 ldo TASK_REGS(%r1),%r1
1740 reg_save %r1
ff0ab8af 1741 mfctl %cr27, %r28
bbbfde78
JDA
1742 ldil L%sys_\name, %r31
1743 be R%sys_\name(%sr4,%r31)
ff0ab8af 1744 STREG %r28, PT_CR27(%r1)
f39cce65 1745ENDPROC_CFI(sys_\name\()_wrapper)
415bfae9 1746 .endm
1da177e4 1747
415bfae9
AV
1748fork_like clone
1749fork_like fork
1750fork_like vfork
1da177e4
LT
1751
1752 /* Set the return value for the child */
8801ccb9 1753ENTRY(child_return)
1da177e4
LT
1754 BL schedule_tail, %r2
1755 nop
363806dd 1756finish_child_return:
ff0ab8af
AV
1757 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1758 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1759
1760 LDREG PT_CR27(%r1), %r3
1761 mtctl %r3, %cr27
1762 reg_restore %r1
1763 b syscall_exit
1da177e4 1764 copy %r0,%r28
8801ccb9 1765END(child_return)
1da177e4 1766
f39cce65 1767ENTRY_CFI(sys_rt_sigreturn_wrapper)
1da177e4
LT
1768 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1769 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1770 /* Don't save regs, we are going to restore them from sigcontext. */
1771 STREG %r2, -RP_OFFSET(%r30)
413059f2 1772#ifdef CONFIG_64BIT
1da177e4
LT
1773 ldo FRAME_SIZE(%r30), %r30
1774 BL sys_rt_sigreturn,%r2
1775 ldo -16(%r30),%r29 /* Reference param save area */
1776#else
1777 BL sys_rt_sigreturn,%r2
1778 ldo FRAME_SIZE(%r30), %r30
1779#endif
1780
1781 ldo -FRAME_SIZE(%r30), %r30
1782 LDREG -RP_OFFSET(%r30), %r2
1783
1784 /* FIXME: I think we need to restore a few more things here. */
1785 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1786 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1787 reg_restore %r1
1788
1789 /* If the signal was received while the process was blocked on a
1790 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1791 * take us to syscall_exit_rfi and on to intr_return.
1792 */
1793 bv %r0(%r2)
1794 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
f39cce65 1795ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1da177e4 1796
8801ccb9 1797ENTRY(syscall_exit)
1da177e4
LT
1798 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
1799 * via syscall_exit_rfi if the signal was received while the process
1800 * was running.
1801 */
1802
1803 /* save return value now */
1804
1805 mfctl %cr30, %r1
1806 LDREG TI_TASK(%r1),%r1
1807 STREG %r28,TASK_PT_GR28(%r1)
1808
1da177e4
LT
1809 /* Seems to me that dp could be wrong here, if the syscall involved
1810 * calling a module, and nothing got round to restoring dp on return.
1811 */
1812 loadgp
1813
1da177e4
LT
1814syscall_check_resched:
1815
1816 /* check for reschedule */
1817
1818 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
1819 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1820
4650f0a5 1821 .import do_signal,code
1da177e4 1822syscall_check_sig:
4650f0a5 1823 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
6fd84c08 1824 ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
4650f0a5
KM
1825 and,COND(<>) %r19, %r26, %r0
1826 b,n syscall_restore /* skip past if we've nothing to do */
1827
1828syscall_do_signal:
1829 /* Save callee-save registers (for sigcontext).
1830 * FIXME: After this point the process structure should be
1831 * consistent with all the relevant state of the process
1832 * before the syscall. We need to verify this.
1833 */
1834 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1835 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
1836 reg_save %r26
1837
1838#ifdef CONFIG_64BIT
1839 ldo -16(%r30),%r29 /* Reference param save area */
1840#endif
1841
1842 BL do_notify_resume,%r2
1843 ldi 1, %r25 /* long in_syscall = 1 */
1844
1845 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1846 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
1847 reg_restore %r20
1848
1849 b,n syscall_check_sig
1da177e4
LT
1850
1851syscall_restore:
1da177e4
LT
1852 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1853
ecd3d4bc
KM
1854 /* Are we being ptraced? */
1855 ldw TASK_FLAGS(%r1),%r19
34360f08 1856 ldi _TIF_SYSCALL_TRACE_MASK,%r2
ecd3d4bc
KM
1857 and,COND(=) %r19,%r2,%r0
1858 b,n syscall_restore_rfi
1da177e4
LT
1859
1860 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
1861 rest_fp %r19
1862
1863 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
1864 mtsar %r19
1865
1866 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
1867 LDREG TASK_PT_GR19(%r1),%r19
1868 LDREG TASK_PT_GR20(%r1),%r20
1869 LDREG TASK_PT_GR21(%r1),%r21
1870 LDREG TASK_PT_GR22(%r1),%r22
1871 LDREG TASK_PT_GR23(%r1),%r23
1872 LDREG TASK_PT_GR24(%r1),%r24
1873 LDREG TASK_PT_GR25(%r1),%r25
1874 LDREG TASK_PT_GR26(%r1),%r26
1875 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
1876 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
1877 LDREG TASK_PT_GR29(%r1),%r29
1878 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
1879
1880 /* NOTE: We use rsm/ssm pair to make this operation atomic */
8f6c0c2b 1881 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
1da177e4 1882 rsm PSW_SM_I, %r0
8f6c0c2b
JDA
1883 copy %r1,%r30 /* Restore user sp */
1884 mfsp %sr3,%r1 /* Get user space id */
1da177e4
LT
1885 mtsp %r1,%sr7 /* Restore sr7 */
1886 ssm PSW_SM_I, %r0
1887
1888 /* Set sr2 to zero for userspace syscalls to work. */
1889 mtsp %r0,%sr2
1890 mtsp %r1,%sr4 /* Restore sr4 */
1891 mtsp %r1,%sr5 /* Restore sr5 */
1892 mtsp %r1,%sr6 /* Restore sr6 */
1893
1894 depi 3,31,2,%r31 /* ensure return to user mode. */
1895
413059f2 1896#ifdef CONFIG_64BIT
1da177e4
LT
1897 /* decide whether to reset the wide mode bit
1898 *
1899 * For a syscall, the W bit is stored in the lowest bit
1900 * of sp. Extract it and reset W if it is zero */
1901 extrd,u,*<> %r30,63,1,%r1
1902 rsm PSW_SM_W, %r0
1903 /* now reset the lowest bit of sp if it was set */
1904 xor %r30,%r1,%r30
1905#endif
1906 be,n 0(%sr3,%r31) /* return to user space */
1907
1908 /* We have to return via an RFI, so that PSW T and R bits can be set
1909 * appropriately.
1910 * This sets up pt_regs so we can return via intr_restore, which is not
1911 * the most efficient way of doing things, but it works.
1912 */
1913syscall_restore_rfi:
1914 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
1915 mtctl %r2,%cr0 /* for immediate trap */
1916 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
1917 ldi 0x0b,%r20 /* Create new PSW */
1918 depi -1,13,1,%r20 /* C, Q, D, and I bits */
1919
ecd3d4bc
KM
1920 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1921 * set in thread_info.h and converted to PA bitmap
1da177e4
LT
1922 * numbers in asm-offsets.c */
1923
ecd3d4bc
KM
1924 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1925 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1da177e4
LT
1926 depi -1,27,1,%r20 /* R bit */
1927
ecd3d4bc
KM
1928 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1929 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1da177e4
LT
1930 depi -1,7,1,%r20 /* T bit */
1931
1932 STREG %r20,TASK_PT_PSW(%r1)
1933
1934 /* Always store space registers, since sr3 can be changed (e.g. fork) */
1935
1936 mfsp %sr3,%r25
1937 STREG %r25,TASK_PT_SR3(%r1)
1938 STREG %r25,TASK_PT_SR4(%r1)
1939 STREG %r25,TASK_PT_SR5(%r1)
1940 STREG %r25,TASK_PT_SR6(%r1)
1941 STREG %r25,TASK_PT_SR7(%r1)
1942 STREG %r25,TASK_PT_IASQ0(%r1)
1943 STREG %r25,TASK_PT_IASQ1(%r1)
1944
1945 /* XXX W bit??? */
1946 /* Now if old D bit is clear, it means we didn't save all registers
1947 * on syscall entry, so do that now. This only happens on TRACEME
1948 * calls, or if someone attached to us while we were on a syscall.
1949 * We could make this more efficient by not saving r3-r18, but
1950 * then we wouldn't be able to use the common intr_restore path.
1951 * It is only for traced processes anyway, so performance is not
1952 * an issue.
1953 */
1954 bb,< %r2,30,pt_regs_ok /* Branch if D set */
1955 ldo TASK_REGS(%r1),%r25
1956 reg_save %r25 /* Save r3 to r18 */
1957
1958 /* Save the current sr */
1959 mfsp %sr0,%r2
1960 STREG %r2,TASK_PT_SR0(%r1)
1961
1962 /* Save the scratch sr */
1963 mfsp %sr1,%r2
1964 STREG %r2,TASK_PT_SR1(%r1)
1965
1966 /* sr2 should be set to zero for userspace syscalls */
1967 STREG %r0,TASK_PT_SR2(%r1)
1968
1da177e4 1969 LDREG TASK_PT_GR31(%r1),%r2
34360f08
JDA
1970 depi 3,31,2,%r2 /* ensure return to user mode. */
1971 STREG %r2,TASK_PT_IAOQ0(%r1)
1da177e4
LT
1972 ldo 4(%r2),%r2
1973 STREG %r2,TASK_PT_IAOQ1(%r1)
34360f08 1974 b intr_restore
1da177e4 1975 copy %r25,%r16
34360f08
JDA
1976
1977pt_regs_ok:
1978 LDREG TASK_PT_IAOQ0(%r1),%r2
1979 depi 3,31,2,%r2 /* ensure return to user mode. */
1980 STREG %r2,TASK_PT_IAOQ0(%r1)
1981 LDREG TASK_PT_IAOQ1(%r1),%r2
1982 depi 3,31,2,%r2
1983 STREG %r2,TASK_PT_IAOQ1(%r1)
1da177e4 1984 b intr_restore
34360f08 1985 copy %r25,%r16
1da177e4 1986
1da177e4 1987syscall_do_resched:
366dd4ea
HD
1988 load32 syscall_check_resched,%r2 /* if resched, we start over again */
1989 load32 schedule,%r19
1990 bv %r0(%r19) /* jumps to schedule() */
413059f2 1991#ifdef CONFIG_64BIT
1da177e4
LT
1992 ldo -16(%r30),%r29 /* Reference param save area */
1993#else
1994 nop
1995#endif
8801ccb9 1996END(syscall_exit)
1da177e4 1997
c5e76552 1998
d75f054a 1999#ifdef CONFIG_FUNCTION_TRACER
366dd4ea 2000
d75f054a 2001 .import ftrace_function_trampoline,code
366dd4ea 2002 .align L1_CACHE_BYTES
c8921d72 2003ENTRY_CFI(mcount, caller)
366dd4ea
HD
2004_mcount:
2005 .export _mcount,data
366dd4ea
HD
2006 /*
2007 * The 64bit mcount() function pointer needs 4 dwords, of which the
2008 * first two are free. We optimize it here and put 2 instructions for
2009 * calling mcount(), and 2 instructions for ftrace_stub(). That way we
2010 * have all on one L1 cacheline.
2011 */
d75f054a 2012 b ftrace_function_trampoline
366dd4ea
HD
2013 copy %r3, %arg2 /* caller original %sp */
2014ftrace_stub:
2015 .globl ftrace_stub
2016 .type ftrace_stub, @function
2017#ifdef CONFIG_64BIT
2018 bve (%rp)
2019#else
2020 bv %r0(%rp)
2021#endif
d75f054a 2022 nop
366dd4ea
HD
2023#ifdef CONFIG_64BIT
2024 .dword mcount
2025 .dword 0 /* code in head.S puts value of global gp here */
2026#endif
c8921d72 2027ENDPROC_CFI(mcount)
d75f054a 2028
5fece5ad 2029#ifdef CONFIG_FUNCTION_GRAPH_TRACER
366dd4ea 2030 .align 8
c8921d72 2031ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
366dd4ea
HD
2032 .export parisc_return_to_handler,data
2033parisc_return_to_handler:
2034 copy %r3,%r1
2035 STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */
2036 copy %sp,%r3
2037 STREGM %r1,FRAME_SIZE(%sp)
2038 STREG %ret0,8(%r3)
2039 STREG %ret1,16(%r3)
d75f054a 2040
366dd4ea
HD
2041#ifdef CONFIG_64BIT
2042 loadgp
2043#endif
2044
2045 /* call ftrace_return_to_handler(0) */
5fece5ad
HD
2046 .import ftrace_return_to_handler,code
2047 load32 ftrace_return_to_handler,%ret0
2048 load32 .Lftrace_ret,%r2
366dd4ea
HD
2049#ifdef CONFIG_64BIT
2050 ldo -16(%sp),%ret1 /* Reference param save area */
5fece5ad
HD
2051 bve (%ret0)
2052#else
2053 bv %r0(%ret0)
366dd4ea 2054#endif
366dd4ea 2055 ldi 0,%r26
5fece5ad 2056.Lftrace_ret:
366dd4ea
HD
2057 copy %ret0,%rp
2058
2059 /* restore original return values */
2060 LDREG 8(%r3),%ret0
2061 LDREG 16(%r3),%ret1
2062
2063 /* return from function */
2064#ifdef CONFIG_64BIT
2065 bve (%rp)
2066#else
d75f054a 2067 bv %r0(%rp)
366dd4ea
HD
2068#endif
2069 LDREGM -FRAME_SIZE(%sp),%r3
f39cce65 2070ENDPROC_CFI(return_to_handler)
366dd4ea 2071
5fece5ad
HD
2072#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2073
d75f054a
HD
2074#endif /* CONFIG_FUNCTION_TRACER */
2075
200c8804
HD
2076#ifdef CONFIG_IRQSTACKS
2077/* void call_on_stack(unsigned long param1, void *func,
2078 unsigned long new_stack) */
c8921d72 2079ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
8801ccb9 2080ENTRY(_call_on_stack)
200c8804
HD
2081 copy %sp, %r1
2082
2083 /* Regarding the HPPA calling conventions for function pointers,
2084 we assume the PIC register is not changed across call. For
2085 CONFIG_64BIT, the argument pointer is left to point at the
2086 argument region allocated for the call to call_on_stack. */
c8921d72
HD
2087
2088 /* Switch to new stack. We allocate two frames. */
2089 ldo 2*FRAME_SIZE(%arg2), %sp
200c8804 2090# ifdef CONFIG_64BIT
200c8804 2091 /* Save previous stack pointer and return pointer in frame marker */
c8921d72 2092 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
200c8804
HD
2093 /* Calls always use function descriptor */
2094 LDREG 16(%arg1), %arg1
2095 bve,l (%arg1), %rp
c8921d72
HD
2096 STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
2097 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
200c8804 2098 bve (%rp)
c8921d72 2099 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
200c8804 2100# else
200c8804 2101 /* Save previous stack pointer and return pointer in frame marker */
c8921d72
HD
2102 STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
2103 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
200c8804
HD
2104 /* Calls use function descriptor if PLABEL bit is set */
2105 bb,>=,n %arg1, 30, 1f
2106 depwi 0,31,2, %arg1
2107 LDREG 0(%arg1), %arg1
21081:
2109 be,l 0(%sr4,%arg1), %sr0, %r31
2110 copy %r31, %rp
c8921d72 2111 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
200c8804 2112 bv (%rp)
c8921d72 2113 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
200c8804 2114# endif /* CONFIG_64BIT */
f39cce65 2115ENDPROC_CFI(call_on_stack)
200c8804 2116#endif /* CONFIG_IRQSTACKS */
d75f054a 2117
f39cce65 2118ENTRY_CFI(get_register)
1da177e4
LT
2119 /*
2120 * get_register is used by the non access tlb miss handlers to
2121 * copy the value of the general register specified in r8 into
2122 * r1. This routine can't be used for shadowed registers, since
2123 * the rfir will restore the original value. So, for the shadowed
2124 * registers we put a -1 into r1 to indicate that the register
2125 * should not be used (the register being copied could also have
2126 * a -1 in it, but that is OK, it just means that we will have
2127 * to use the slow path instead).
2128 */
1da177e4
LT
2129 blr %r8,%r0
2130 nop
2131 bv %r0(%r25) /* r0 */
2132 copy %r0,%r1
2133 bv %r0(%r25) /* r1 - shadowed */
2134 ldi -1,%r1
2135 bv %r0(%r25) /* r2 */
2136 copy %r2,%r1
2137 bv %r0(%r25) /* r3 */
2138 copy %r3,%r1
2139 bv %r0(%r25) /* r4 */
2140 copy %r4,%r1
2141 bv %r0(%r25) /* r5 */
2142 copy %r5,%r1
2143 bv %r0(%r25) /* r6 */
2144 copy %r6,%r1
2145 bv %r0(%r25) /* r7 */
2146 copy %r7,%r1
2147 bv %r0(%r25) /* r8 - shadowed */
2148 ldi -1,%r1
2149 bv %r0(%r25) /* r9 - shadowed */
2150 ldi -1,%r1
2151 bv %r0(%r25) /* r10 */
2152 copy %r10,%r1
2153 bv %r0(%r25) /* r11 */
2154 copy %r11,%r1
2155 bv %r0(%r25) /* r12 */
2156 copy %r12,%r1
2157 bv %r0(%r25) /* r13 */
2158 copy %r13,%r1
2159 bv %r0(%r25) /* r14 */
2160 copy %r14,%r1
2161 bv %r0(%r25) /* r15 */
2162 copy %r15,%r1
2163 bv %r0(%r25) /* r16 - shadowed */
2164 ldi -1,%r1
2165 bv %r0(%r25) /* r17 - shadowed */
2166 ldi -1,%r1
2167 bv %r0(%r25) /* r18 */
2168 copy %r18,%r1
2169 bv %r0(%r25) /* r19 */
2170 copy %r19,%r1
2171 bv %r0(%r25) /* r20 */
2172 copy %r20,%r1
2173 bv %r0(%r25) /* r21 */
2174 copy %r21,%r1
2175 bv %r0(%r25) /* r22 */
2176 copy %r22,%r1
2177 bv %r0(%r25) /* r23 */
2178 copy %r23,%r1
2179 bv %r0(%r25) /* r24 - shadowed */
2180 ldi -1,%r1
2181 bv %r0(%r25) /* r25 - shadowed */
2182 ldi -1,%r1
2183 bv %r0(%r25) /* r26 */
2184 copy %r26,%r1
2185 bv %r0(%r25) /* r27 */
2186 copy %r27,%r1
2187 bv %r0(%r25) /* r28 */
2188 copy %r28,%r1
2189 bv %r0(%r25) /* r29 */
2190 copy %r29,%r1
2191 bv %r0(%r25) /* r30 */
2192 copy %r30,%r1
2193 bv %r0(%r25) /* r31 */
2194 copy %r31,%r1
f39cce65 2195ENDPROC_CFI(get_register)
1da177e4 2196
c5e76552 2197
f39cce65 2198ENTRY_CFI(set_register)
1da177e4
LT
2199 /*
2200 * set_register is used by the non access tlb miss handlers to
2201 * copy the value of r1 into the general register specified in
2202 * r8.
2203 */
1da177e4
LT
2204 blr %r8,%r0
2205 nop
2206 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2207 copy %r1,%r0
2208 bv %r0(%r25) /* r1 */
2209 copy %r1,%r1
2210 bv %r0(%r25) /* r2 */
2211 copy %r1,%r2
2212 bv %r0(%r25) /* r3 */
2213 copy %r1,%r3
2214 bv %r0(%r25) /* r4 */
2215 copy %r1,%r4
2216 bv %r0(%r25) /* r5 */
2217 copy %r1,%r5
2218 bv %r0(%r25) /* r6 */
2219 copy %r1,%r6
2220 bv %r0(%r25) /* r7 */
2221 copy %r1,%r7
2222 bv %r0(%r25) /* r8 */
2223 copy %r1,%r8
2224 bv %r0(%r25) /* r9 */
2225 copy %r1,%r9
2226 bv %r0(%r25) /* r10 */
2227 copy %r1,%r10
2228 bv %r0(%r25) /* r11 */
2229 copy %r1,%r11
2230 bv %r0(%r25) /* r12 */
2231 copy %r1,%r12
2232 bv %r0(%r25) /* r13 */
2233 copy %r1,%r13
2234 bv %r0(%r25) /* r14 */
2235 copy %r1,%r14
2236 bv %r0(%r25) /* r15 */
2237 copy %r1,%r15
2238 bv %r0(%r25) /* r16 */
2239 copy %r1,%r16
2240 bv %r0(%r25) /* r17 */
2241 copy %r1,%r17
2242 bv %r0(%r25) /* r18 */
2243 copy %r1,%r18
2244 bv %r0(%r25) /* r19 */
2245 copy %r1,%r19
2246 bv %r0(%r25) /* r20 */
2247 copy %r1,%r20
2248 bv %r0(%r25) /* r21 */
2249 copy %r1,%r21
2250 bv %r0(%r25) /* r22 */
2251 copy %r1,%r22
2252 bv %r0(%r25) /* r23 */
2253 copy %r1,%r23
2254 bv %r0(%r25) /* r24 */
2255 copy %r1,%r24
2256 bv %r0(%r25) /* r25 */
2257 copy %r1,%r25
2258 bv %r0(%r25) /* r26 */
2259 copy %r1,%r26
2260 bv %r0(%r25) /* r27 */
2261 copy %r1,%r27
2262 bv %r0(%r25) /* r28 */
2263 copy %r1,%r28
2264 bv %r0(%r25) /* r29 */
2265 copy %r1,%r29
2266 bv %r0(%r25) /* r30 */
2267 copy %r1,%r30
2268 bv %r0(%r25) /* r31 */
2269 copy %r1,%r31
f39cce65 2270ENDPROC_CFI(set_register)
c5e76552 2271