Auto merge with /home/aegl/GIT/linus
[linux-2.6-block.git] / arch / ia64 / kernel / entry.S
CommitLineData
1da177e4
LT
1/*
2 * ia64/kernel/entry.S
3 *
4 * Kernel entry points.
5 *
6 * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Copyright (C) 1999, 2002-2003
9 * Asit Mallick <Asit.K.Mallick@intel.com>
10 * Don Dugger <Don.Dugger@intel.com>
11 * Suresh Siddha <suresh.b.siddha@intel.com>
12 * Fenghua Yu <fenghua.yu@intel.com>
13 * Copyright (C) 1999 VA Linux Systems
14 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
15 */
16/*
17 * ia64_switch_to now places correct virtual mapping in in TR2 for
18 * kernel stack. This allows us to handle interrupts without changing
19 * to physical mode.
20 *
21 * Jonathan Nicklin <nicklin@missioncriticallinux.com>
22 * Patrick O'Rourke <orourke@missioncriticallinux.com>
23 * 11/07/2000
24 */
25/*
26 * Global (preserved) predicate usage on syscall entry/exit path:
27 *
28 * pKStk: See entry.h.
29 * pUStk: See entry.h.
30 * pSys: See entry.h.
31 * pNonSys: !pSys
32 */
33
34#include <linux/config.h>
35
36#include <asm/asmmacro.h>
37#include <asm/cache.h>
38#include <asm/errno.h>
39#include <asm/kregs.h>
40#include <asm/offsets.h>
41#include <asm/pgtable.h>
42#include <asm/percpu.h>
43#include <asm/processor.h>
44#include <asm/thread_info.h>
45#include <asm/unistd.h>
46
47#include "minstate.h"
48
49 /*
50 * execve() is special because in case of success, we need to
51 * setup a null register window frame.
52 */
53ENTRY(ia64_execve)
54 /*
55 * Allocate 8 input registers since ptrace() may clobber them
56 */
57 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
58 alloc loc1=ar.pfs,8,2,4,0
59 mov loc0=rp
60 .body
61 mov out0=in0 // filename
62 ;; // stop bit between alloc and call
63 mov out1=in1 // argv
64 mov out2=in2 // envp
65 add out3=16,sp // regs
66 br.call.sptk.many rp=sys_execve
67.ret0:
68#ifdef CONFIG_IA32_SUPPORT
69 /*
70 * Check if we're returning to ia32 mode. If so, we need to restore ia32 registers
71 * from pt_regs.
72 */
73 adds r16=PT(CR_IPSR)+16,sp
74 ;;
75 ld8 r16=[r16]
76#endif
77 cmp4.ge p6,p7=r8,r0
78 mov ar.pfs=loc1 // restore ar.pfs
79 sxt4 r8=r8 // return 64-bit result
80 ;;
81 stf.spill [sp]=f0
82(p6) cmp.ne pKStk,pUStk=r0,r0 // a successful execve() lands us in user-mode...
83 mov rp=loc0
84(p6) mov ar.pfs=r0 // clear ar.pfs on success
85(p7) br.ret.sptk.many rp
86
87 /*
88 * In theory, we'd have to zap this state only to prevent leaking of
89 * security sensitive state (e.g., if current->mm->dumpable is zero). However,
90 * this executes in less than 20 cycles even on Itanium, so it's not worth
91 * optimizing for...).
92 */
93 mov ar.unat=0; mov ar.lc=0
94 mov r4=0; mov f2=f0; mov b1=r0
95 mov r5=0; mov f3=f0; mov b2=r0
96 mov r6=0; mov f4=f0; mov b3=r0
97 mov r7=0; mov f5=f0; mov b4=r0
98 ldf.fill f12=[sp]; mov f13=f0; mov b5=r0
99 ldf.fill f14=[sp]; ldf.fill f15=[sp]; mov f16=f0
100 ldf.fill f17=[sp]; ldf.fill f18=[sp]; mov f19=f0
101 ldf.fill f20=[sp]; ldf.fill f21=[sp]; mov f22=f0
102 ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0
103 ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0
104 ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0
105#ifdef CONFIG_IA32_SUPPORT
106 tbit.nz p6,p0=r16, IA64_PSR_IS_BIT
107 movl loc0=ia64_ret_from_ia32_execve
108 ;;
109(p6) mov rp=loc0
110#endif
111 br.ret.sptk.many rp
112END(ia64_execve)
113
114/*
115 * sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 parent_tidptr, u64 child_tidptr,
116 * u64 tls)
117 */
118GLOBAL_ENTRY(sys_clone2)
119 /*
120 * Allocate 8 input registers since ptrace() may clobber them
121 */
122 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
123 alloc r16=ar.pfs,8,2,6,0
124 DO_SAVE_SWITCH_STACK
125 adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
126 mov loc0=rp
127 mov loc1=r16 // save ar.pfs across do_fork
128 .body
129 mov out1=in1
130 mov out3=in2
131 tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
132 mov out4=in3 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
133 ;;
134(p6) st8 [r2]=in5 // store TLS in r16 for copy_thread()
135 mov out5=in4 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
136 adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
137 mov out0=in0 // out0 = clone_flags
138 br.call.sptk.many rp=do_fork
139.ret1: .restore sp
140 adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
141 mov ar.pfs=loc1
142 mov rp=loc0
143 br.ret.sptk.many rp
144END(sys_clone2)
145
146/*
147 * sys_clone(u64 flags, u64 ustack_base, u64 parent_tidptr, u64 child_tidptr, u64 tls)
148 * Deprecated. Use sys_clone2() instead.
149 */
150GLOBAL_ENTRY(sys_clone)
151 /*
152 * Allocate 8 input registers since ptrace() may clobber them
153 */
154 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
155 alloc r16=ar.pfs,8,2,6,0
156 DO_SAVE_SWITCH_STACK
157 adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
158 mov loc0=rp
159 mov loc1=r16 // save ar.pfs across do_fork
160 .body
161 mov out1=in1
162 mov out3=16 // stacksize (compensates for 16-byte scratch area)
163 tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
164 mov out4=in2 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
165 ;;
166(p6) st8 [r2]=in4 // store TLS in r13 (tp)
167 mov out5=in3 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
168 adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
169 mov out0=in0 // out0 = clone_flags
170 br.call.sptk.many rp=do_fork
171.ret2: .restore sp
172 adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
173 mov ar.pfs=loc1
174 mov rp=loc0
175 br.ret.sptk.many rp
176END(sys_clone)
177
178/*
179 * prev_task <- ia64_switch_to(struct task_struct *next)
180 * With Ingo's new scheduler, interrupts are disabled when this routine gets
181 * called. The code starting at .map relies on this. The rest of the code
182 * doesn't care about the interrupt masking status.
183 */
184GLOBAL_ENTRY(ia64_switch_to)
185 .prologue
186 alloc r16=ar.pfs,1,0,0,0
187 DO_SAVE_SWITCH_STACK
188 .body
189
190 adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
191 movl r25=init_task
192 mov r27=IA64_KR(CURRENT_STACK)
193 adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
194 dep r20=0,in0,61,3 // physical address of "next"
195 ;;
196 st8 [r22]=sp // save kernel stack pointer of old task
197 shr.u r26=r20,IA64_GRANULE_SHIFT
198 cmp.eq p7,p6=r25,in0
199 ;;
200 /*
201 * If we've already mapped this task's page, we can skip doing it again.
202 */
203(p6) cmp.eq p7,p6=r26,r27
204(p6) br.cond.dpnt .map
205 ;;
206.done:
207(p6) ssm psr.ic // if we had to map, reenable the psr.ic bit FIRST!!!
208 ;;
209(p6) srlz.d
210 ld8 sp=[r21] // load kernel stack pointer of new task
211 mov IA64_KR(CURRENT)=in0 // update "current" application register
212 mov r8=r13 // return pointer to previously running task
213 mov r13=in0 // set "current" pointer
214 ;;
215 DO_LOAD_SWITCH_STACK
216
217#ifdef CONFIG_SMP
218 sync.i // ensure "fc"s done by this CPU are visible on other CPUs
219#endif
220 br.ret.sptk.many rp // boogie on out in new context
221
222.map:
223 rsm psr.ic // interrupts (psr.i) are already disabled here
224 movl r25=PAGE_KERNEL
225 ;;
226 srlz.d
227 or r23=r25,r20 // construct PA | page properties
228 mov r25=IA64_GRANULE_SHIFT<<2
229 ;;
230 mov cr.itir=r25
231 mov cr.ifa=in0 // VA of next task...
232 ;;
233 mov r25=IA64_TR_CURRENT_STACK
234 mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped...
235 ;;
236 itr.d dtr[r25]=r23 // wire in new mapping...
237 br.cond.sptk .done
238END(ia64_switch_to)
239
240/*
241 * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This
242 * means that we may get an interrupt with "sp" pointing to the new kernel stack while
243 * ar.bspstore is still pointing to the old kernel backing store area. Since ar.rsc,
244 * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, this is not a
245 * problem. Also, we don't need to specify unwind information for preserved registers
246 * that are not modified in save_switch_stack as the right unwind information is already
247 * specified at the call-site of save_switch_stack.
248 */
249
250/*
251 * save_switch_stack:
252 * - r16 holds ar.pfs
253 * - b7 holds address to return to
254 * - rp (b0) holds return address to save
255 */
256GLOBAL_ENTRY(save_switch_stack)
257 .prologue
258 .altrp b7
259 flushrs // flush dirty regs to backing store (must be first in insn group)
260 .save @priunat,r17
261 mov r17=ar.unat // preserve caller's
262 .body
263#ifdef CONFIG_ITANIUM
264 adds r2=16+128,sp
265 adds r3=16+64,sp
266 adds r14=SW(R4)+16,sp
267 ;;
268 st8.spill [r14]=r4,16 // spill r4
269 lfetch.fault.excl.nt1 [r3],128
270 ;;
271 lfetch.fault.excl.nt1 [r2],128
272 lfetch.fault.excl.nt1 [r3],128
273 ;;
274 lfetch.fault.excl [r2]
275 lfetch.fault.excl [r3]
276 adds r15=SW(R5)+16,sp
277#else
278 add r2=16+3*128,sp
279 add r3=16,sp
280 add r14=SW(R4)+16,sp
281 ;;
282 st8.spill [r14]=r4,SW(R6)-SW(R4) // spill r4 and prefetch offset 0x1c0
283 lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x010
284 ;;
285 lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x090
286 lfetch.fault.excl.nt1 [r2],128 // prefetch offset 0x190
287 ;;
288 lfetch.fault.excl.nt1 [r3] // prefetch offset 0x110
289 lfetch.fault.excl.nt1 [r2] // prefetch offset 0x210
290 adds r15=SW(R5)+16,sp
291#endif
292 ;;
293 st8.spill [r15]=r5,SW(R7)-SW(R5) // spill r5
294 mov.m ar.rsc=0 // put RSE in mode: enforced lazy, little endian, pl 0
295 add r2=SW(F2)+16,sp // r2 = &sw->f2
296 ;;
297 st8.spill [r14]=r6,SW(B0)-SW(R6) // spill r6
298 mov.m r18=ar.fpsr // preserve fpsr
299 add r3=SW(F3)+16,sp // r3 = &sw->f3
300 ;;
301 stf.spill [r2]=f2,32
302 mov.m r19=ar.rnat
303 mov r21=b0
304
305 stf.spill [r3]=f3,32
306 st8.spill [r15]=r7,SW(B2)-SW(R7) // spill r7
307 mov r22=b1
308 ;;
309 // since we're done with the spills, read and save ar.unat:
310 mov.m r29=ar.unat
311 mov.m r20=ar.bspstore
312 mov r23=b2
313 stf.spill [r2]=f4,32
314 stf.spill [r3]=f5,32
315 mov r24=b3
316 ;;
317 st8 [r14]=r21,SW(B1)-SW(B0) // save b0
318 st8 [r15]=r23,SW(B3)-SW(B2) // save b2
319 mov r25=b4
320 mov r26=b5
321 ;;
322 st8 [r14]=r22,SW(B4)-SW(B1) // save b1
323 st8 [r15]=r24,SW(AR_PFS)-SW(B3) // save b3
324 mov r21=ar.lc // I-unit
325 stf.spill [r2]=f12,32
326 stf.spill [r3]=f13,32
327 ;;
328 st8 [r14]=r25,SW(B5)-SW(B4) // save b4
329 st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS) // save ar.pfs
330 stf.spill [r2]=f14,32
331 stf.spill [r3]=f15,32
332 ;;
333 st8 [r14]=r26 // save b5
334 st8 [r15]=r21 // save ar.lc
335 stf.spill [r2]=f16,32
336 stf.spill [r3]=f17,32
337 ;;
338 stf.spill [r2]=f18,32
339 stf.spill [r3]=f19,32
340 ;;
341 stf.spill [r2]=f20,32
342 stf.spill [r3]=f21,32
343 ;;
344 stf.spill [r2]=f22,32
345 stf.spill [r3]=f23,32
346 ;;
347 stf.spill [r2]=f24,32
348 stf.spill [r3]=f25,32
349 ;;
350 stf.spill [r2]=f26,32
351 stf.spill [r3]=f27,32
352 ;;
353 stf.spill [r2]=f28,32
354 stf.spill [r3]=f29,32
355 ;;
356 stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30)
357 stf.spill [r3]=f31,SW(PR)-SW(F31)
358 add r14=SW(CALLER_UNAT)+16,sp
359 ;;
360 st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT) // save ar.unat
361 st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT) // save caller_unat
362 mov r21=pr
363 ;;
364 st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat
365 st8 [r3]=r21 // save predicate registers
366 ;;
367 st8 [r2]=r20 // save ar.bspstore
368 st8 [r14]=r18 // save fpsr
369 mov ar.rsc=3 // put RSE back into eager mode, pl 0
370 br.cond.sptk.many b7
371END(save_switch_stack)
372
373/*
374 * load_switch_stack:
375 * - "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK)
376 * - b7 holds address to return to
377 * - must not touch r8-r11
378 */
379ENTRY(load_switch_stack)
380 .prologue
381 .altrp b7
382
383 .body
384 lfetch.fault.nt1 [sp]
385 adds r2=SW(AR_BSPSTORE)+16,sp
386 adds r3=SW(AR_UNAT)+16,sp
387 mov ar.rsc=0 // put RSE into enforced lazy mode
388 adds r14=SW(CALLER_UNAT)+16,sp
389 adds r15=SW(AR_FPSR)+16,sp
390 ;;
391 ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE)) // bspstore
392 ld8 r29=[r3],(SW(B1)-SW(AR_UNAT)) // unat
393 ;;
394 ld8 r21=[r2],16 // restore b0
395 ld8 r22=[r3],16 // restore b1
396 ;;
397 ld8 r23=[r2],16 // restore b2
398 ld8 r24=[r3],16 // restore b3
399 ;;
400 ld8 r25=[r2],16 // restore b4
401 ld8 r26=[r3],16 // restore b5
402 ;;
403 ld8 r16=[r2],(SW(PR)-SW(AR_PFS)) // ar.pfs
404 ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC)) // ar.lc
405 ;;
406 ld8 r28=[r2] // restore pr
407 ld8 r30=[r3] // restore rnat
408 ;;
409 ld8 r18=[r14],16 // restore caller's unat
410 ld8 r19=[r15],24 // restore fpsr
411 ;;
412 ldf.fill f2=[r14],32
413 ldf.fill f3=[r15],32
414 ;;
415 ldf.fill f4=[r14],32
416 ldf.fill f5=[r15],32
417 ;;
418 ldf.fill f12=[r14],32
419 ldf.fill f13=[r15],32
420 ;;
421 ldf.fill f14=[r14],32
422 ldf.fill f15=[r15],32
423 ;;
424 ldf.fill f16=[r14],32
425 ldf.fill f17=[r15],32
426 ;;
427 ldf.fill f18=[r14],32
428 ldf.fill f19=[r15],32
429 mov b0=r21
430 ;;
431 ldf.fill f20=[r14],32
432 ldf.fill f21=[r15],32
433 mov b1=r22
434 ;;
435 ldf.fill f22=[r14],32
436 ldf.fill f23=[r15],32
437 mov b2=r23
438 ;;
439 mov ar.bspstore=r27
440 mov ar.unat=r29 // establish unat holding the NaT bits for r4-r7
441 mov b3=r24
442 ;;
443 ldf.fill f24=[r14],32
444 ldf.fill f25=[r15],32
445 mov b4=r25
446 ;;
447 ldf.fill f26=[r14],32
448 ldf.fill f27=[r15],32
449 mov b5=r26
450 ;;
451 ldf.fill f28=[r14],32
452 ldf.fill f29=[r15],32
453 mov ar.pfs=r16
454 ;;
455 ldf.fill f30=[r14],32
456 ldf.fill f31=[r15],24
457 mov ar.lc=r17
458 ;;
459 ld8.fill r4=[r14],16
460 ld8.fill r5=[r15],16
461 mov pr=r28,-1
462 ;;
463 ld8.fill r6=[r14],16
464 ld8.fill r7=[r15],16
465
466 mov ar.unat=r18 // restore caller's unat
467 mov ar.rnat=r30 // must restore after bspstore but before rsc!
468 mov ar.fpsr=r19 // restore fpsr
469 mov ar.rsc=3 // put RSE back into eager mode, pl 0
470 br.cond.sptk.many b7
471END(load_switch_stack)
472
1da177e4
LT
473GLOBAL_ENTRY(execve)
474 mov r15=__NR_execve // put syscall number in place
475 break __BREAK_SYSCALL
476 br.ret.sptk.many rp
477END(execve)
478
479GLOBAL_ENTRY(clone)
480 mov r15=__NR_clone // put syscall number in place
481 break __BREAK_SYSCALL
482 br.ret.sptk.many rp
483END(clone)
484
485 /*
486 * Invoke a system call, but do some tracing before and after the call.
487 * We MUST preserve the current register frame throughout this routine
488 * because some system calls (such as ia64_execve) directly
489 * manipulate ar.pfs.
490 */
491GLOBAL_ENTRY(ia64_trace_syscall)
492 PT_REGS_UNWIND_INFO(0)
493 /*
494 * We need to preserve the scratch registers f6-f11 in case the system
495 * call is sigreturn.
496 */
497 adds r16=PT(F6)+16,sp
498 adds r17=PT(F7)+16,sp
499 ;;
500 stf.spill [r16]=f6,32
501 stf.spill [r17]=f7,32
502 ;;
503 stf.spill [r16]=f8,32
504 stf.spill [r17]=f9,32
505 ;;
506 stf.spill [r16]=f10
507 stf.spill [r17]=f11
508 br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args
509 adds r16=PT(F6)+16,sp
510 adds r17=PT(F7)+16,sp
511 ;;
512 ldf.fill f6=[r16],32
513 ldf.fill f7=[r17],32
514 ;;
515 ldf.fill f8=[r16],32
516 ldf.fill f9=[r17],32
517 ;;
518 ldf.fill f10=[r16]
519 ldf.fill f11=[r17]
520 // the syscall number may have changed, so re-load it and re-calculate the
521 // syscall entry-point:
522 adds r15=PT(R15)+16,sp // r15 = &pt_regs.r15 (syscall #)
523 ;;
524 ld8 r15=[r15]
525 mov r3=NR_syscalls - 1
526 ;;
527 adds r15=-1024,r15
528 movl r16=sys_call_table
529 ;;
530 shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
531 cmp.leu p6,p7=r15,r3
532 ;;
533(p6) ld8 r20=[r20] // load address of syscall entry point
534(p7) movl r20=sys_ni_syscall
535 ;;
536 mov b6=r20
537 br.call.sptk.many rp=b6 // do the syscall
538.strace_check_retval:
539 cmp.lt p6,p0=r8,r0 // syscall failed?
540 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
541 adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
542 mov r10=0
543(p6) br.cond.sptk strace_error // syscall failed ->
544 ;; // avoid RAW on r10
545.strace_save_retval:
546.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
547.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
548 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
549.ret3: br.cond.sptk .work_pending_syscall_end
550
551strace_error:
552 ld8 r3=[r2] // load pt_regs.r8
553 sub r9=0,r8 // negate return value to get errno value
554 ;;
555 cmp.ne p6,p0=r3,r0 // is pt_regs.r8!=0?
556 adds r3=16,r2 // r3=&pt_regs.r10
557 ;;
558(p6) mov r10=-1
559(p6) mov r8=r9
560 br.cond.sptk .strace_save_retval
561END(ia64_trace_syscall)
562
563 /*
564 * When traced and returning from sigreturn, we invoke syscall_trace but then
565 * go straight to ia64_leave_kernel rather than ia64_leave_syscall.
566 */
567GLOBAL_ENTRY(ia64_strace_leave_kernel)
568 PT_REGS_UNWIND_INFO(0)
569{ /*
570 * Some versions of gas generate bad unwind info if the first instruction of a
571 * procedure doesn't go into the first slot of a bundle. This is a workaround.
572 */
573 nop.m 0
574 nop.i 0
575 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
576}
577.ret4: br.cond.sptk ia64_leave_kernel
578END(ia64_strace_leave_kernel)
579
580GLOBAL_ENTRY(ia64_ret_from_clone)
581 PT_REGS_UNWIND_INFO(0)
582{ /*
583 * Some versions of gas generate bad unwind info if the first instruction of a
584 * procedure doesn't go into the first slot of a bundle. This is a workaround.
585 */
586 nop.m 0
587 nop.i 0
588 /*
589 * We need to call schedule_tail() to complete the scheduling process.
590 * Called by ia64_switch_to() after do_fork()->copy_thread(). r8 contains the
591 * address of the previously executing task.
592 */
593 br.call.sptk.many rp=ia64_invoke_schedule_tail
594}
595.ret8:
596 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
597 ;;
598 ld4 r2=[r2]
599 ;;
600 mov r8=0
601 and r2=_TIF_SYSCALL_TRACEAUDIT,r2
602 ;;
603 cmp.ne p6,p0=r2,r0
604(p6) br.cond.spnt .strace_check_retval
605 ;; // added stop bits to prevent r8 dependency
606END(ia64_ret_from_clone)
607 // fall through
608GLOBAL_ENTRY(ia64_ret_from_syscall)
609 PT_REGS_UNWIND_INFO(0)
610 cmp.ge p6,p7=r8,r0 // syscall executed successfully?
611 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
612 mov r10=r0 // clear error indication in r10
613(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
614END(ia64_ret_from_syscall)
615 // fall through
616/*
617 * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
618 * need to switch to bank 0 and doesn't restore the scratch registers.
619 * To avoid leaking kernel bits, the scratch registers are set to
620 * the following known-to-be-safe values:
621 *
622 * r1: restored (global pointer)
623 * r2: cleared
624 * r3: 1 (when returning to user-level)
625 * r8-r11: restored (syscall return value(s))
626 * r12: restored (user-level stack pointer)
627 * r13: restored (user-level thread pointer)
c03f058f 628 * r14: set to __kernel_syscall_via_epc
1da177e4
LT
629 * r15: restored (syscall #)
630 * r16-r17: cleared
631 * r18: user-level b6
632 * r19: cleared
633 * r20: user-level ar.fpsr
634 * r21: user-level b0
635 * r22: cleared
636 * r23: user-level ar.bspstore
637 * r24: user-level ar.rnat
638 * r25: user-level ar.unat
639 * r26: user-level ar.pfs
640 * r27: user-level ar.rsc
641 * r28: user-level ip
642 * r29: user-level psr
643 * r30: user-level cfm
644 * r31: user-level pr
645 * f6-f11: cleared
646 * pr: restored (user-level pr)
647 * b0: restored (user-level rp)
648 * b6: restored
c03f058f 649 * b7: set to __kernel_syscall_via_epc
1da177e4
LT
650 * ar.unat: restored (user-level ar.unat)
651 * ar.pfs: restored (user-level ar.pfs)
652 * ar.rsc: restored (user-level ar.rsc)
653 * ar.rnat: restored (user-level ar.rnat)
654 * ar.bspstore: restored (user-level ar.bspstore)
655 * ar.fpsr: restored (user-level ar.fpsr)
656 * ar.ccv: cleared
657 * ar.csd: cleared
658 * ar.ssd: cleared
659 */
660ENTRY(ia64_leave_syscall)
661 PT_REGS_UNWIND_INFO(0)
662 /*
663 * work.need_resched etc. mustn't get changed by this CPU before it returns to
664 * user- or fsys-mode, hence we disable interrupts early on.
665 *
666 * p6 controls whether current_thread_info()->flags needs to be check for
667 * extra work. We always check for extra work when returning to user-level.
668 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
669 * is 0. After extra work processing has been completed, execution
670 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
671 * needs to be redone.
672 */
673#ifdef CONFIG_PREEMPT
674 rsm psr.i // disable interrupts
675 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
676(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
677 ;;
678 .pred.rel.mutex pUStk,pKStk
679(pKStk) ld4 r21=[r20] // r21 <- preempt_count
680(pUStk) mov r21=0 // r21 <- 0
681 ;;
682 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
683#else /* !CONFIG_PREEMPT */
684(pUStk) rsm psr.i
685 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
686(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
687#endif
688.work_processed_syscall:
689 adds r2=PT(LOADRS)+16,r12
690 adds r3=PT(AR_BSPSTORE)+16,r12
691 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
692 ;;
693(p6) ld4 r31=[r18] // load current_thread_info()->flags
694 ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
96e01749 695 nop.i 0
1da177e4 696 ;;
87e522a0 697 mov r16=ar.bsp // M2 get existing backing store pointer
1da177e4
LT
698 ld8 r18=[r2],PT(R9)-PT(B6) // load b6
699(p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
700 ;;
87e522a0 701 ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
1da177e4
LT
702(p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending?
703(p6) br.cond.spnt .work_pending_syscall
704 ;;
705 // start restoring the state saved on the kernel stack (struct pt_regs):
706 ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
707 ld8 r11=[r3],PT(CR_IIP)-PT(R11)
87e522a0 708(pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE!
1da177e4
LT
709 ;;
710 invala // M0|1 invalidate ALAT
c03f058f
DMT
711 rsm psr.i | psr.ic // M2 turn off interrupts and interruption collection
712 cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs
1da177e4 713
c03f058f
DMT
714 ld8 r29=[r2],16 // M0|1 load cr.ipsr
715 ld8 r28=[r3],16 // M0|1 load cr.iip
716 mov r22=r0 // A clear r22
1da177e4
LT
717 ;;
718 ld8 r30=[r2],16 // M0|1 load cr.ifs
1da177e4 719 ld8 r25=[r3],16 // M0|1 load ar.unat
87e522a0 720(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
1da177e4
LT
721 ;;
722 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
c03f058f 723(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
87e522a0 724 nop 0
1da177e4 725 ;;
c03f058f
DMT
726 ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
727 ld8 r27=[r3],PT(PR)-PT(AR_RSC) // M0|1 load ar.rsc
728 mov f6=f0 // F clear f6
1da177e4 729 ;;
c03f058f
DMT
730 ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // M0|1 load ar.rnat (may be garbage)
731 ld8 r31=[r3],PT(R1)-PT(PR) // M0|1 load predicates
732 mov f7=f0 // F clear f7
1da177e4 733 ;;
c03f058f
DMT
734 ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // M0|1 load ar.fpsr
735 ld8.fill r1=[r3],16 // M0|1 load r1
736(pUStk) mov r17=1 // A
1da177e4 737 ;;
c03f058f
DMT
738(pUStk) st1 [r14]=r17 // M2|3
739 ld8.fill r13=[r3],16 // M0|1
740 mov f8=f0 // F clear f8
1da177e4 741 ;;
c03f058f
DMT
742 ld8.fill r12=[r2] // M0|1 restore r12 (sp)
743 ld8.fill r15=[r3] // M0|1 restore r15
744 mov b6=r18 // I0 restore b6
30325d17 745
c03f058f
DMT
746 addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 // A
747 mov f9=f0 // F clear f9
748(pKStk) br.cond.dpnt.many skip_rbs_switch // B
87e522a0 749
c03f058f
DMT
750 srlz.d // M0 ensure interruption collection is off (for cover)
751 shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
752 cover // B add current frame into dirty partition & set cr.ifs
1da177e4 753 ;;
c03f058f
DMT
754(pUStk) ld4 r17=[r17] // M0|1 r17 = cpu_data->phys_stacked_size_p8
755 mov r19=ar.bsp // M2 get new backing store pointer
756 mov f10=f0 // F clear f10
96e01749
DMT
757
758 nop.m 0
c03f058f 759 movl r14=__kernel_syscall_via_epc // X
1da177e4 760 ;;
c03f058f
DMT
761 mov.m ar.csd=r0 // M2 clear ar.csd
762 mov.m ar.ccv=r0 // M2 clear ar.ccv
763 mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc)
1da177e4 764
c03f058f
DMT
765 mov.m ar.ssd=r0 // M2 clear ar.ssd
766 mov f11=f0 // F clear f11
767 br.cond.sptk.many rbs_switch // B
1da177e4
LT
768END(ia64_leave_syscall)
769
770#ifdef CONFIG_IA32_SUPPORT
771GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
772 PT_REGS_UNWIND_INFO(0)
773 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
774 adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
775 ;;
776 .mem.offset 0,0
777 st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
778 .mem.offset 8,0
779 st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
9df6f705 780END(ia64_ret_from_ia32_execve)
1da177e4
LT
781 // fall through
782#endif /* CONFIG_IA32_SUPPORT */
783GLOBAL_ENTRY(ia64_leave_kernel)
784 PT_REGS_UNWIND_INFO(0)
785 /*
786 * work.need_resched etc. mustn't get changed by this CPU before it returns to
787 * user- or fsys-mode, hence we disable interrupts early on.
788 *
789 * p6 controls whether current_thread_info()->flags needs to be check for
790 * extra work. We always check for extra work when returning to user-level.
791 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
792 * is 0. After extra work processing has been completed, execution
793 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
794 * needs to be redone.
795 */
796#ifdef CONFIG_PREEMPT
797 rsm psr.i // disable interrupts
798 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
799(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
800 ;;
801 .pred.rel.mutex pUStk,pKStk
802(pKStk) ld4 r21=[r20] // r21 <- preempt_count
803(pUStk) mov r21=0 // r21 <- 0
804 ;;
805 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
806#else
807(pUStk) rsm psr.i
808 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
809(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
810#endif
811.work_processed_kernel:
812 adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
813 ;;
814(p6) ld4 r31=[r17] // load current_thread_info()->flags
815 adds r21=PT(PR)+16,r12
816 ;;
817
818 lfetch [r21],PT(CR_IPSR)-PT(PR)
819 adds r2=PT(B6)+16,r12
820 adds r3=PT(R16)+16,r12
821 ;;
822 lfetch [r21]
823 ld8 r28=[r2],8 // load b6
824 adds r29=PT(R24)+16,r12
825
826 ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
827 adds r30=PT(AR_CCV)+16,r12
828(p6) and r19=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
829 ;;
830 ld8.fill r24=[r29]
831 ld8 r15=[r30] // load ar.ccv
832(p6) cmp4.ne.unc p6,p0=r19, r0 // any special work pending?
833 ;;
834 ld8 r29=[r2],16 // load b7
835 ld8 r30=[r3],16 // load ar.csd
836(p6) br.cond.spnt .work_pending
837 ;;
838 ld8 r31=[r2],16 // load ar.ssd
839 ld8.fill r8=[r3],16
840 ;;
841 ld8.fill r9=[r2],16
842 ld8.fill r10=[r3],PT(R17)-PT(R10)
843 ;;
844 ld8.fill r11=[r2],PT(R18)-PT(R11)
845 ld8.fill r17=[r3],16
846 ;;
847 ld8.fill r18=[r2],16
848 ld8.fill r19=[r3],16
849 ;;
850 ld8.fill r20=[r2],16
851 ld8.fill r21=[r3],16
852 mov ar.csd=r30
853 mov ar.ssd=r31
854 ;;
855 rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection
856 invala // invalidate ALAT
857 ;;
858 ld8.fill r22=[r2],24
859 ld8.fill r23=[r3],24
860 mov b6=r28
861 ;;
862 ld8.fill r25=[r2],16
863 ld8.fill r26=[r3],16
864 mov b7=r29
865 ;;
866 ld8.fill r27=[r2],16
867 ld8.fill r28=[r3],16
868 ;;
869 ld8.fill r29=[r2],16
870 ld8.fill r30=[r3],24
871 ;;
872 ld8.fill r31=[r2],PT(F9)-PT(R31)
873 adds r3=PT(F10)-PT(F6),r3
874 ;;
875 ldf.fill f9=[r2],PT(F6)-PT(F9)
876 ldf.fill f10=[r3],PT(F8)-PT(F10)
877 ;;
878 ldf.fill f6=[r2],PT(F7)-PT(F6)
879 ;;
880 ldf.fill f7=[r2],PT(F11)-PT(F7)
881 ldf.fill f8=[r3],32
882 ;;
e7e965fa 883 srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned)
1da177e4
LT
884 mov ar.ccv=r15
885 ;;
886 ldf.fill f11=[r2]
887 bsw.0 // switch back to bank 0 (no stop bit required beforehand...)
888 ;;
889(pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
890 adds r16=PT(CR_IPSR)+16,r12
891 adds r17=PT(CR_IIP)+16,r12
892
893(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
894 nop.i 0
895 nop.i 0
896 ;;
897 ld8 r29=[r16],16 // load cr.ipsr
898 ld8 r28=[r17],16 // load cr.iip
899 ;;
900 ld8 r30=[r16],16 // load cr.ifs
901 ld8 r25=[r17],16 // load ar.unat
902 ;;
903 ld8 r26=[r16],16 // load ar.pfs
904 ld8 r27=[r17],16 // load ar.rsc
905 cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
906 ;;
907 ld8 r24=[r16],16 // load ar.rnat (may be garbage)
908 ld8 r23=[r17],16 // load ar.bspstore (may be garbage)
909 ;;
910 ld8 r31=[r16],16 // load predicates
911 ld8 r21=[r17],16 // load b0
912 ;;
913 ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
914 ld8.fill r1=[r17],16 // load r1
915 ;;
916 ld8.fill r12=[r16],16
917 ld8.fill r13=[r17],16
918(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
919 ;;
920 ld8 r20=[r16],16 // ar.fpsr
921 ld8.fill r15=[r17],16
922 ;;
923 ld8.fill r14=[r16],16
924 ld8.fill r2=[r17]
925(pUStk) mov r17=1
926 ;;
927 ld8.fill r3=[r16]
928(pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
929 shr.u r18=r19,16 // get byte size of existing "dirty" partition
930 ;;
931 mov r16=ar.bsp // get existing backing store pointer
932 addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
933 ;;
934 ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8
935(pKStk) br.cond.dpnt skip_rbs_switch
936
937 /*
938 * Restore user backing store.
939 *
940 * NOTE: alloc, loadrs, and cover can't be predicated.
941 */
942(pNonSys) br.cond.dpnt dont_preserve_current_frame
1da177e4
LT
943 cover // add current frame into dirty partition and set cr.ifs
944 ;;
945 mov r19=ar.bsp // get new backing store pointer
87e522a0 946rbs_switch:
1da177e4
LT
947 sub r16=r16,r18 // krbs = old bsp - size of dirty partition
948 cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs
949 ;;
950 sub r19=r19,r16 // calculate total byte size of dirty partition
951 add r18=64,r18 // don't force in0-in7 into memory...
952 ;;
953 shl r19=r19,16 // shift size of dirty partition into loadrs position
954 ;;
955dont_preserve_current_frame:
956 /*
957 * To prevent leaking bits between the kernel and user-space,
958 * we must clear the stacked registers in the "invalid" partition here.
959 * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium,
960 * 5 registers/cycle on McKinley).
961 */
962# define pRecurse p6
963# define pReturn p7
964#ifdef CONFIG_ITANIUM
965# define Nregs 10
966#else
967# define Nregs 14
968#endif
969 alloc loc0=ar.pfs,2,Nregs-2,2,0
970 shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
971 sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize
972 ;;
973 mov ar.rsc=r19 // load ar.rsc to be used for "loadrs"
974 shladd in0=loc1,3,r17
975 mov in1=0
976 ;;
977 TEXT_ALIGN(32)
978rse_clear_invalid:
979#ifdef CONFIG_ITANIUM
980 // cycle 0
981 { .mii
982 alloc loc0=ar.pfs,2,Nregs-2,2,0
983 cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
984 add out0=-Nregs*8,in0
985}{ .mfb
986 add out1=1,in1 // increment recursion count
987 nop.f 0
988 nop.b 0 // can't do br.call here because of alloc (WAW on CFM)
989 ;;
990}{ .mfi // cycle 1
991 mov loc1=0
992 nop.f 0
993 mov loc2=0
994}{ .mib
995 mov loc3=0
996 mov loc4=0
997(pRecurse) br.call.sptk.many b0=rse_clear_invalid
998
999}{ .mfi // cycle 2
1000 mov loc5=0
1001 nop.f 0
1002 cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
1003}{ .mib
1004 mov loc6=0
1005 mov loc7=0
1006(pReturn) br.ret.sptk.many b0
1007}
1008#else /* !CONFIG_ITANIUM */
1009 alloc loc0=ar.pfs,2,Nregs-2,2,0
1010 cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
1011 add out0=-Nregs*8,in0
1012 add out1=1,in1 // increment recursion count
1013 mov loc1=0
1014 mov loc2=0
1015 ;;
1016 mov loc3=0
1017 mov loc4=0
1018 mov loc5=0
1019 mov loc6=0
1020 mov loc7=0
9ec1a7ad 1021(pRecurse) br.call.dptk.few b0=rse_clear_invalid
1da177e4
LT
1022 ;;
1023 mov loc8=0
1024 mov loc9=0
1025 cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
1026 mov loc10=0
1027 mov loc11=0
9ec1a7ad 1028(pReturn) br.ret.dptk.many b0
1da177e4
LT
1029#endif /* !CONFIG_ITANIUM */
1030# undef pRecurse
1031# undef pReturn
1032 ;;
1033 alloc r17=ar.pfs,0,0,0,0 // drop current register frame
1034 ;;
1035 loadrs
1036 ;;
1037skip_rbs_switch:
1038 mov ar.unat=r25 // M2
1039(pKStk) extr.u r22=r22,21,1 // I0 extract current value of psr.pp from r22
1040(pLvSys)mov r19=r0 // A clear r19 for leave_syscall, no-op otherwise
1041 ;;
1042(pUStk) mov ar.bspstore=r23 // M2
1043(pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp
1044(pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise
1045 ;;
1046 mov cr.ipsr=r29 // M2
1047 mov ar.pfs=r26 // I0
1048(pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise
1049
1050(p9) mov cr.ifs=r30 // M2
1051 mov b0=r21 // I0
1052(pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise
1053
1054 mov ar.fpsr=r20 // M2
1055 mov cr.iip=r28 // M2
1056 nop 0
1057 ;;
1058(pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode
1059 nop 0
1060(pLvSys)mov r2=r0
1061
1062 mov ar.rsc=r27 // M2
1063 mov pr=r31,-1 // I0
1064 rfi // B
1065
1066 /*
1067 * On entry:
1068 * r20 = &current->thread_info->pre_count (if CONFIG_PREEMPT)
1069 * r31 = current->thread_info->flags
1070 * On exit:
1071 * p6 = TRUE if work-pending-check needs to be redone
1072 */
1073.work_pending_syscall:
1074 add r2=-8,r2
1075 add r3=-8,r3
1076 ;;
1077 st8 [r2]=r8
1078 st8 [r3]=r10
1079.work_pending:
1080 tbit.nz p6,p0=r31,TIF_SIGDELAYED // signal delayed from MCA/INIT/NMI/PMI context?
1081(p6) br.cond.sptk.few .sigdelayed
1082 ;;
1083 tbit.z p6,p0=r31,TIF_NEED_RESCHED // current_thread_info()->need_resched==0?
1084(p6) br.cond.sptk.few .notify
1085#ifdef CONFIG_PREEMPT
1086(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1
1087 ;;
1088(pKStk) st4 [r20]=r21
1089 ssm psr.i // enable interrupts
1090#endif
1091 br.call.spnt.many rp=schedule
1092.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1
1093 rsm psr.i // disable interrupts
1094 ;;
1095#ifdef CONFIG_PREEMPT
1096(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
1097 ;;
1098(pKStk) st4 [r20]=r0 // preempt_count() <- 0
1099#endif
1100(pLvSys)br.cond.sptk.few .work_pending_syscall_end
1101 br.cond.sptk.many .work_processed_kernel // re-check
1102
1103.notify:
1104(pUStk) br.call.spnt.many rp=notify_resume_user
1105.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0
1106(pLvSys)br.cond.sptk.few .work_pending_syscall_end
1107 br.cond.sptk.many .work_processed_kernel // don't re-check
1108
1109// There is a delayed signal that was detected in MCA/INIT/NMI/PMI context where
1110// it could not be delivered. Deliver it now. The signal might be for us and
1111// may set TIF_SIGPENDING, so redrive ia64_leave_* after processing the delayed
1112// signal.
1113
1114.sigdelayed:
1115 br.call.sptk.many rp=do_sigdelayed
1116 cmp.eq p6,p0=r0,r0 // p6 <- 1, always re-check
1117(pLvSys)br.cond.sptk.few .work_pending_syscall_end
1118 br.cond.sptk.many .work_processed_kernel // re-check
1119
1120.work_pending_syscall_end:
1121 adds r2=PT(R8)+16,r12
1122 adds r3=PT(R10)+16,r12
1123 ;;
1124 ld8 r8=[r2]
1125 ld8 r10=[r3]
1126 br.cond.sptk.many .work_processed_syscall // re-check
1127
1128END(ia64_leave_kernel)
1129
1130ENTRY(handle_syscall_error)
1131 /*
1132 * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could
1133 * lead us to mistake a negative return value as a failed syscall. Those syscall
1134 * must deposit a non-zero value in pt_regs.r8 to indicate an error. If
1135 * pt_regs.r8 is zero, we assume that the call completed successfully.
1136 */
1137 PT_REGS_UNWIND_INFO(0)
1138 ld8 r3=[r2] // load pt_regs.r8
1139 ;;
1140 cmp.eq p6,p7=r3,r0 // is pt_regs.r8==0?
1141 ;;
1142(p7) mov r10=-1
1143(p7) sub r8=0,r8 // negate return value to get errno
1144 br.cond.sptk ia64_leave_syscall
1145END(handle_syscall_error)
1146
1147 /*
1148 * Invoke schedule_tail(task) while preserving in0-in7, which may be needed
1149 * in case a system call gets restarted.
1150 */
1151GLOBAL_ENTRY(ia64_invoke_schedule_tail)
1152 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
1153 alloc loc1=ar.pfs,8,2,1,0
1154 mov loc0=rp
1155 mov out0=r8 // Address of previous task
1156 ;;
1157 br.call.sptk.many rp=schedule_tail
1158.ret11: mov ar.pfs=loc1
1159 mov rp=loc0
1160 br.ret.sptk.many rp
1161END(ia64_invoke_schedule_tail)
1162
1163 /*
1164 * Setup stack and call do_notify_resume_user(). Note that pSys and pNonSys need to
1165 * be set up by the caller. We declare 8 input registers so the system call
1166 * args get preserved, in case we need to restart a system call.
1167 */
1168ENTRY(notify_resume_user)
1169 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
1170 alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
1171 mov r9=ar.unat
1172 mov loc0=rp // save return address
1173 mov out0=0 // there is no "oldset"
1174 adds out1=8,sp // out1=&sigscratch->ar_pfs
1175(pSys) mov out2=1 // out2==1 => we're in a syscall
1176 ;;
1177(pNonSys) mov out2=0 // out2==0 => not a syscall
1178 .fframe 16
bfd68594 1179 .spillsp ar.unat, 16
1da177e4
LT
1180 st8 [sp]=r9,-16 // allocate space for ar.unat and save it
1181 st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch
1182 .body
1183 br.call.sptk.many rp=do_notify_resume_user
1184.ret15: .restore sp
1185 adds sp=16,sp // pop scratch stack space
1186 ;;
1187 ld8 r9=[sp] // load new unat from sigscratch->scratch_unat
1188 mov rp=loc0
1189 ;;
1190 mov ar.unat=r9
1191 mov ar.pfs=loc1
1192 br.ret.sptk.many rp
1193END(notify_resume_user)
1194
1195GLOBAL_ENTRY(sys_rt_sigsuspend)
1196 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
1197 alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
1198 mov r9=ar.unat
1199 mov loc0=rp // save return address
1200 mov out0=in0 // mask
1201 mov out1=in1 // sigsetsize
1202 adds out2=8,sp // out2=&sigscratch->ar_pfs
1203 ;;
1204 .fframe 16
bfd68594 1205 .spillsp ar.unat, 16
1da177e4
LT
1206 st8 [sp]=r9,-16 // allocate space for ar.unat and save it
1207 st8 [out2]=loc1,-8 // save ar.pfs, out2=&sigscratch
1208 .body
1209 br.call.sptk.many rp=ia64_rt_sigsuspend
1210.ret17: .restore sp
1211 adds sp=16,sp // pop scratch stack space
1212 ;;
1213 ld8 r9=[sp] // load new unat from sw->caller_unat
1214 mov rp=loc0
1215 ;;
1216 mov ar.unat=r9
1217 mov ar.pfs=loc1
1218 br.ret.sptk.many rp
1219END(sys_rt_sigsuspend)
1220
1221ENTRY(sys_rt_sigreturn)
1222 PT_REGS_UNWIND_INFO(0)
1223 /*
1224 * Allocate 8 input registers since ptrace() may clobber them
1225 */
1226 alloc r2=ar.pfs,8,0,1,0
1227 .prologue
1228 PT_REGS_SAVES(16)
1229 adds sp=-16,sp
1230 .body
1231 cmp.eq pNonSys,pSys=r0,r0 // sigreturn isn't a normal syscall...
1232 ;;
1233 /*
1234 * leave_kernel() restores f6-f11 from pt_regs, but since the streamlined
1235 * syscall-entry path does not save them we save them here instead. Note: we
1236 * don't need to save any other registers that are not saved by the stream-lined
1237 * syscall path, because restore_sigcontext() restores them.
1238 */
1239 adds r16=PT(F6)+32,sp
1240 adds r17=PT(F7)+32,sp
1241 ;;
1242 stf.spill [r16]=f6,32
1243 stf.spill [r17]=f7,32
1244 ;;
1245 stf.spill [r16]=f8,32
1246 stf.spill [r17]=f9,32
1247 ;;
1248 stf.spill [r16]=f10
1249 stf.spill [r17]=f11
1250 adds out0=16,sp // out0 = &sigscratch
1251 br.call.sptk.many rp=ia64_rt_sigreturn
763b3917 1252.ret19: .restore sp,0
1da177e4
LT
1253 adds sp=16,sp
1254 ;;
1255 ld8 r9=[sp] // load new ar.unat
1256 mov.sptk b7=r8,ia64_leave_kernel
1257 ;;
1258 mov ar.unat=r9
1259 br.many b7
1260END(sys_rt_sigreturn)
1261
1262GLOBAL_ENTRY(ia64_prepare_handle_unaligned)
1263 .prologue
1264 /*
1265 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
1266 */
1267 mov r16=r0
1268 DO_SAVE_SWITCH_STACK
1269 br.call.sptk.many rp=ia64_handle_unaligned // stack frame setup in ivt
1270.ret21: .body
1271 DO_LOAD_SWITCH_STACK
1272 br.cond.sptk.many rp // goes to ia64_leave_kernel
1273END(ia64_prepare_handle_unaligned)
1274
1275 //
1276 // unw_init_running(void (*callback)(info, arg), void *arg)
1277 //
1278# define EXTRA_FRAME_SIZE ((UNW_FRAME_INFO_SIZE+15)&~15)
1279
1280GLOBAL_ENTRY(unw_init_running)
1281 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
1282 alloc loc1=ar.pfs,2,3,3,0
1283 ;;
1284 ld8 loc2=[in0],8
1285 mov loc0=rp
1286 mov r16=loc1
1287 DO_SAVE_SWITCH_STACK
1288 .body
1289
1290 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
1291 .fframe IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE
1292 SWITCH_STACK_SAVES(EXTRA_FRAME_SIZE)
1293 adds sp=-EXTRA_FRAME_SIZE,sp
1294 .body
1295 ;;
1296 adds out0=16,sp // &info
1297 mov out1=r13 // current
1298 adds out2=16+EXTRA_FRAME_SIZE,sp // &switch_stack
1299 br.call.sptk.many rp=unw_init_frame_info
13001: adds out0=16,sp // &info
1301 mov b6=loc2
1302 mov loc2=gp // save gp across indirect function call
1303 ;;
1304 ld8 gp=[in0]
1305 mov out1=in1 // arg
1306 br.call.sptk.many rp=b6 // invoke the callback function
13071: mov gp=loc2 // restore gp
1308
1309 // For now, we don't allow changing registers from within
1310 // unw_init_running; if we ever want to allow that, we'd
1311 // have to do a load_switch_stack here:
1312 .restore sp
1313 adds sp=IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE,sp
1314
1315 mov ar.pfs=loc1
1316 mov rp=loc0
1317 br.ret.sptk.many rp
1318END(unw_init_running)
1319
1320 .rodata
1321 .align 8
1322 .globl sys_call_table
1323sys_call_table:
1324 data8 sys_ni_syscall // This must be sys_ni_syscall! See ivt.S.
1325 data8 sys_exit // 1025
1326 data8 sys_read
1327 data8 sys_write
1328 data8 sys_open
1329 data8 sys_close
1330 data8 sys_creat // 1030
1331 data8 sys_link
1332 data8 sys_unlink
1333 data8 ia64_execve
1334 data8 sys_chdir
1335 data8 sys_fchdir // 1035
1336 data8 sys_utimes
1337 data8 sys_mknod
1338 data8 sys_chmod
1339 data8 sys_chown
1340 data8 sys_lseek // 1040
1341 data8 sys_getpid
1342 data8 sys_getppid
1343 data8 sys_mount
1344 data8 sys_umount
1345 data8 sys_setuid // 1045
1346 data8 sys_getuid
1347 data8 sys_geteuid
1348 data8 sys_ptrace
1349 data8 sys_access
1350 data8 sys_sync // 1050
1351 data8 sys_fsync
1352 data8 sys_fdatasync
1353 data8 sys_kill
1354 data8 sys_rename
1355 data8 sys_mkdir // 1055
1356 data8 sys_rmdir
1357 data8 sys_dup
1358 data8 sys_pipe
1359 data8 sys_times
1360 data8 ia64_brk // 1060
1361 data8 sys_setgid
1362 data8 sys_getgid
1363 data8 sys_getegid
1364 data8 sys_acct
1365 data8 sys_ioctl // 1065
1366 data8 sys_fcntl
1367 data8 sys_umask
1368 data8 sys_chroot
1369 data8 sys_ustat
1370 data8 sys_dup2 // 1070
1371 data8 sys_setreuid
1372 data8 sys_setregid
1373 data8 sys_getresuid
1374 data8 sys_setresuid
1375 data8 sys_getresgid // 1075
1376 data8 sys_setresgid
1377 data8 sys_getgroups
1378 data8 sys_setgroups
1379 data8 sys_getpgid
1380 data8 sys_setpgid // 1080
1381 data8 sys_setsid
1382 data8 sys_getsid
1383 data8 sys_sethostname
1384 data8 sys_setrlimit
1385 data8 sys_getrlimit // 1085
1386 data8 sys_getrusage
1387 data8 sys_gettimeofday
1388 data8 sys_settimeofday
1389 data8 sys_select
1390 data8 sys_poll // 1090
1391 data8 sys_symlink
1392 data8 sys_readlink
1393 data8 sys_uselib
1394 data8 sys_swapon
1395 data8 sys_swapoff // 1095
1396 data8 sys_reboot
1397 data8 sys_truncate
1398 data8 sys_ftruncate
1399 data8 sys_fchmod
1400 data8 sys_fchown // 1100
1401 data8 ia64_getpriority
1402 data8 sys_setpriority
1403 data8 sys_statfs
1404 data8 sys_fstatfs
1405 data8 sys_gettid // 1105
1406 data8 sys_semget
1407 data8 sys_semop
1408 data8 sys_semctl
1409 data8 sys_msgget
1410 data8 sys_msgsnd // 1110
1411 data8 sys_msgrcv
1412 data8 sys_msgctl
1413 data8 sys_shmget
7d87e14c 1414 data8 sys_shmat
1da177e4
LT
1415 data8 sys_shmdt // 1115
1416 data8 sys_shmctl
1417 data8 sys_syslog
1418 data8 sys_setitimer
1419 data8 sys_getitimer
1420 data8 sys_ni_syscall // 1120 /* was: ia64_oldstat */
1421 data8 sys_ni_syscall /* was: ia64_oldlstat */
1422 data8 sys_ni_syscall /* was: ia64_oldfstat */
1423 data8 sys_vhangup
1424 data8 sys_lchown
1425 data8 sys_remap_file_pages // 1125
1426 data8 sys_wait4
1427 data8 sys_sysinfo
1428 data8 sys_clone
1429 data8 sys_setdomainname
1430 data8 sys_newuname // 1130
1431 data8 sys_adjtimex
1432 data8 sys_ni_syscall /* was: ia64_create_module */
1433 data8 sys_init_module
1434 data8 sys_delete_module
1435 data8 sys_ni_syscall // 1135 /* was: sys_get_kernel_syms */
1436 data8 sys_ni_syscall /* was: sys_query_module */
1437 data8 sys_quotactl
1438 data8 sys_bdflush
1439 data8 sys_sysfs
1440 data8 sys_personality // 1140
1441 data8 sys_ni_syscall // sys_afs_syscall
1442 data8 sys_setfsuid
1443 data8 sys_setfsgid
1444 data8 sys_getdents
1445 data8 sys_flock // 1145
1446 data8 sys_readv
1447 data8 sys_writev
1448 data8 sys_pread64
1449 data8 sys_pwrite64
1450 data8 sys_sysctl // 1150
1451 data8 sys_mmap
1452 data8 sys_munmap
1453 data8 sys_mlock
1454 data8 sys_mlockall
1455 data8 sys_mprotect // 1155
1456 data8 ia64_mremap
1457 data8 sys_msync
1458 data8 sys_munlock
1459 data8 sys_munlockall
1460 data8 sys_sched_getparam // 1160
1461 data8 sys_sched_setparam
1462 data8 sys_sched_getscheduler
1463 data8 sys_sched_setscheduler
1464 data8 sys_sched_yield
1465 data8 sys_sched_get_priority_max // 1165
1466 data8 sys_sched_get_priority_min
1467 data8 sys_sched_rr_get_interval
1468 data8 sys_nanosleep
1469 data8 sys_nfsservctl
1470 data8 sys_prctl // 1170
1471 data8 sys_getpagesize
1472 data8 sys_mmap2
1473 data8 sys_pciconfig_read
1474 data8 sys_pciconfig_write
1475 data8 sys_perfmonctl // 1175
1476 data8 sys_sigaltstack
1477 data8 sys_rt_sigaction
1478 data8 sys_rt_sigpending
1479 data8 sys_rt_sigprocmask
1480 data8 sys_rt_sigqueueinfo // 1180
1481 data8 sys_rt_sigreturn
1482 data8 sys_rt_sigsuspend
1483 data8 sys_rt_sigtimedwait
1484 data8 sys_getcwd
1485 data8 sys_capget // 1185
1486 data8 sys_capset
1487 data8 sys_sendfile64
1488 data8 sys_ni_syscall // sys_getpmsg (STREAMS)
1489 data8 sys_ni_syscall // sys_putpmsg (STREAMS)
1490 data8 sys_socket // 1190
1491 data8 sys_bind
1492 data8 sys_connect
1493 data8 sys_listen
1494 data8 sys_accept
1495 data8 sys_getsockname // 1195
1496 data8 sys_getpeername
1497 data8 sys_socketpair
1498 data8 sys_send
1499 data8 sys_sendto
1500 data8 sys_recv // 1200
1501 data8 sys_recvfrom
1502 data8 sys_shutdown
1503 data8 sys_setsockopt
1504 data8 sys_getsockopt
1505 data8 sys_sendmsg // 1205
1506 data8 sys_recvmsg
1507 data8 sys_pivot_root
1508 data8 sys_mincore
1509 data8 sys_madvise
1510 data8 sys_newstat // 1210
1511 data8 sys_newlstat
1512 data8 sys_newfstat
1513 data8 sys_clone2
1514 data8 sys_getdents64
1515 data8 sys_getunwind // 1215
1516 data8 sys_readahead
1517 data8 sys_setxattr
1518 data8 sys_lsetxattr
1519 data8 sys_fsetxattr
1520 data8 sys_getxattr // 1220
1521 data8 sys_lgetxattr
1522 data8 sys_fgetxattr
1523 data8 sys_listxattr
1524 data8 sys_llistxattr
1525 data8 sys_flistxattr // 1225
1526 data8 sys_removexattr
1527 data8 sys_lremovexattr
1528 data8 sys_fremovexattr
1529 data8 sys_tkill
1530 data8 sys_futex // 1230
1531 data8 sys_sched_setaffinity
1532 data8 sys_sched_getaffinity
1533 data8 sys_set_tid_address
1534 data8 sys_fadvise64_64
1535 data8 sys_tgkill // 1235
1536 data8 sys_exit_group
1537 data8 sys_lookup_dcookie
1538 data8 sys_io_setup
1539 data8 sys_io_destroy
1540 data8 sys_io_getevents // 1240
1541 data8 sys_io_submit
1542 data8 sys_io_cancel
1543 data8 sys_epoll_create
1544 data8 sys_epoll_ctl
1545 data8 sys_epoll_wait // 1245
1546 data8 sys_restart_syscall
1547 data8 sys_semtimedop
1548 data8 sys_timer_create
1549 data8 sys_timer_settime
1550 data8 sys_timer_gettime // 1250
1551 data8 sys_timer_getoverrun
1552 data8 sys_timer_delete
1553 data8 sys_clock_settime
1554 data8 sys_clock_gettime
1555 data8 sys_clock_getres // 1255
1556 data8 sys_clock_nanosleep
1557 data8 sys_fstatfs64
1558 data8 sys_statfs64
1559 data8 sys_mbind
1560 data8 sys_get_mempolicy // 1260
1561 data8 sys_set_mempolicy
1562 data8 sys_mq_open
1563 data8 sys_mq_unlink
1564 data8 sys_mq_timedsend
1565 data8 sys_mq_timedreceive // 1265
1566 data8 sys_mq_notify
1567 data8 sys_mq_getsetattr
1568 data8 sys_ni_syscall // reserved for kexec_load
1569 data8 sys_ni_syscall // reserved for vserver
1570 data8 sys_waitid // 1270
1571 data8 sys_add_key
1572 data8 sys_request_key
1573 data8 sys_keyctl
22e2c507
JA
1574 data8 sys_ioprio_set
1575 data8 sys_ioprio_get // 1275
753ee728 1576 data8 sys_set_zone_reclaim
1da177e4
LT
1577 data8 sys_ni_syscall
1578 data8 sys_ni_syscall
1579 data8 sys_ni_syscall
1580
1581 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls