ARC: pt_regs update #2: Remove unused gutter at start of pt_regs
[linux-2.6-block.git] / arch / arc / include / asm / entry.h
CommitLineData
9d42c84f
VG
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
4788a594
VG
8 * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
9 * Stack switching code can no longer reliably rely on the fact that
10 * if we are NOT in user mode, stack is switched to kernel mode.
11 * e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
12 * it's prologue including stack switching from user mode
13 *
9d42c84f
VG
14 * Vineetg: Aug 28th 2008: Bug #94984
15 * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
16 * Normally CPU does this automatically, however when doing FAKE rtie,
17 * we also need to explicitly do this. The problem in macros
18 * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
19 * was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
20 *
21 * Vineetg: May 5th 2008
080c3747
VG
22 * -Modified CALLEE_REG save/restore macros to handle the fact that
23 * r25 contains the kernel current task ptr
9d42c84f
VG
24 * - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
25 * - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
26 * address Write back load ld.ab instead of seperate ld/add instn
27 *
28 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
29 */
30
31#ifndef __ASM_ARC_ENTRY_H
32#define __ASM_ARC_ENTRY_H
33
34#ifdef __ASSEMBLY__
35#include <asm/unistd.h> /* For NR_syscalls defination */
36#include <asm/asm-offsets.h>
37#include <asm/arcregs.h>
38#include <asm/ptrace.h>
080c3747 39#include <asm/processor.h> /* For VMALLOC_START */
9d42c84f
VG
40#include <asm/thread_info.h> /* For THREAD_SIZE */
41
42/* Note on the LD/ST addr modes with addr reg wback
43 *
44 * LD.a same as LD.aw
45 *
46 * LD.a reg1, [reg2, x] => Pre Incr
47 * Eff Addr for load = [reg2 + x]
48 *
49 * LD.ab reg1, [reg2, x] => Post Incr
50 * Eff Addr for load = [reg2]
51 */
52
53/*--------------------------------------------------------------
54 * Save caller saved registers (scratch registers) ( r0 - r12 )
55 * Registers are pushed / popped in the order defined in struct ptregs
56 * in asm/ptrace.h
57 *-------------------------------------------------------------*/
58.macro SAVE_CALLER_SAVED
59 st.a r0, [sp, -4]
60 st.a r1, [sp, -4]
61 st.a r2, [sp, -4]
62 st.a r3, [sp, -4]
63 st.a r4, [sp, -4]
64 st.a r5, [sp, -4]
65 st.a r6, [sp, -4]
66 st.a r7, [sp, -4]
67 st.a r8, [sp, -4]
68 st.a r9, [sp, -4]
69 st.a r10, [sp, -4]
70 st.a r11, [sp, -4]
71 st.a r12, [sp, -4]
72.endm
73
74/*--------------------------------------------------------------
75 * Restore caller saved registers (scratch registers)
76 *-------------------------------------------------------------*/
77.macro RESTORE_CALLER_SAVED
78 ld.ab r12, [sp, 4]
79 ld.ab r11, [sp, 4]
80 ld.ab r10, [sp, 4]
81 ld.ab r9, [sp, 4]
82 ld.ab r8, [sp, 4]
83 ld.ab r7, [sp, 4]
84 ld.ab r6, [sp, 4]
85 ld.ab r5, [sp, 4]
86 ld.ab r4, [sp, 4]
87 ld.ab r3, [sp, 4]
88 ld.ab r2, [sp, 4]
89 ld.ab r1, [sp, 4]
90 ld.ab r0, [sp, 4]
91.endm
92
93
94/*--------------------------------------------------------------
95 * Save callee saved registers (non scratch registers) ( r13 - r25 )
96 * on kernel stack.
97 * User mode callee regs need to be saved in case of
98 * -fork and friends for replicating from parent to child
99 * -before going into do_signal( ) for ptrace/core-dump
100 * Special case handling is required for r25 in case it is used by kernel
101 * for caching task ptr. Low level exception/ISR save user mode r25
102 * into task->thread.user_r25. So it needs to be retrieved from there and
103 * saved into kernel stack with rest of callee reg-file
104 *-------------------------------------------------------------*/
105.macro SAVE_CALLEE_SAVED_USER
106 st.a r13, [sp, -4]
107 st.a r14, [sp, -4]
108 st.a r15, [sp, -4]
109 st.a r16, [sp, -4]
110 st.a r17, [sp, -4]
111 st.a r18, [sp, -4]
112 st.a r19, [sp, -4]
113 st.a r20, [sp, -4]
114 st.a r21, [sp, -4]
115 st.a r22, [sp, -4]
116 st.a r23, [sp, -4]
117 st.a r24, [sp, -4]
080c3747
VG
118
119#ifdef CONFIG_ARC_CURR_IN_REG
120 ; Retrieve orig r25 and save it on stack
121 ld r12, [r25, TASK_THREAD + THREAD_USER_R25]
122 st.a r12, [sp, -4]
123#else
9d42c84f 124 st.a r25, [sp, -4]
080c3747 125#endif
9d42c84f
VG
126
127 /* move up by 1 word to "create" callee_regs->"stack_place_holder" */
128 sub sp, sp, 4
129.endm
130
131/*--------------------------------------------------------------
132 * Save callee saved registers (non scratch registers) ( r13 - r25 )
133 * kernel mode callee regs needed to be saved in case of context switch
134 * If r25 is used for caching task pointer then that need not be saved
135 * as it can be re-created from current task global
136 *-------------------------------------------------------------*/
137.macro SAVE_CALLEE_SAVED_KERNEL
138 st.a r13, [sp, -4]
139 st.a r14, [sp, -4]
140 st.a r15, [sp, -4]
141 st.a r16, [sp, -4]
142 st.a r17, [sp, -4]
143 st.a r18, [sp, -4]
144 st.a r19, [sp, -4]
145 st.a r20, [sp, -4]
146 st.a r21, [sp, -4]
147 st.a r22, [sp, -4]
148 st.a r23, [sp, -4]
149 st.a r24, [sp, -4]
080c3747
VG
150#ifdef CONFIG_ARC_CURR_IN_REG
151 sub sp, sp, 8
152#else
9d42c84f
VG
153 st.a r25, [sp, -4]
154 sub sp, sp, 4
080c3747 155#endif
9d42c84f
VG
156.endm
157
158/*--------------------------------------------------------------
159 * RESTORE_CALLEE_SAVED_KERNEL:
160 * Loads callee (non scratch) Reg File by popping from Kernel mode stack.
161 * This is reverse of SAVE_CALLEE_SAVED,
162 *
163 * NOTE:
164 * Ideally this shd only be called in switch_to for loading
165 * switched-IN task's CALLEE Reg File.
166 * For all other cases RESTORE_CALLEE_SAVED_FAST must be used
167 * which simply pops the stack w/o touching regs.
168 *-------------------------------------------------------------*/
169.macro RESTORE_CALLEE_SAVED_KERNEL
170
080c3747
VG
171
172#ifdef CONFIG_ARC_CURR_IN_REG
173 add sp, sp, 8 /* skip callee_reg gutter and user r25 placeholder */
174#else
9d42c84f
VG
175 add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */
176 ld.ab r25, [sp, 4]
080c3747
VG
177#endif
178
9d42c84f
VG
179 ld.ab r24, [sp, 4]
180 ld.ab r23, [sp, 4]
181 ld.ab r22, [sp, 4]
182 ld.ab r21, [sp, 4]
183 ld.ab r20, [sp, 4]
184 ld.ab r19, [sp, 4]
185 ld.ab r18, [sp, 4]
186 ld.ab r17, [sp, 4]
187 ld.ab r16, [sp, 4]
188 ld.ab r15, [sp, 4]
189 ld.ab r14, [sp, 4]
190 ld.ab r13, [sp, 4]
191
192.endm
193
c3581039
VG
194/*--------------------------------------------------------------
195 * RESTORE_CALLEE_SAVED_USER:
196 * This is called after do_signal where tracer might have changed callee regs
197 * thus we need to restore the reg file.
198 * Special case handling is required for r25 in case it is used by kernel
199 * for caching task ptr. Ptrace would have modified on-kernel-stack value of
200 * r25, which needs to be shoved back into task->thread.user_r25 where from
201 * Low level exception/ISR return code will retrieve to populate with rest of
202 * callee reg-file.
203 *-------------------------------------------------------------*/
204.macro RESTORE_CALLEE_SAVED_USER
205
206 add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */
207
208#ifdef CONFIG_ARC_CURR_IN_REG
209 ld.ab r12, [sp, 4]
210 st r12, [r25, TASK_THREAD + THREAD_USER_R25]
211#else
212 ld.ab r25, [sp, 4]
213#endif
214
215 ld.ab r24, [sp, 4]
216 ld.ab r23, [sp, 4]
217 ld.ab r22, [sp, 4]
218 ld.ab r21, [sp, 4]
219 ld.ab r20, [sp, 4]
220 ld.ab r19, [sp, 4]
221 ld.ab r18, [sp, 4]
222 ld.ab r17, [sp, 4]
223 ld.ab r16, [sp, 4]
224 ld.ab r15, [sp, 4]
225 ld.ab r14, [sp, 4]
226 ld.ab r13, [sp, 4]
227.endm
228
9d42c84f
VG
229/*--------------------------------------------------------------
230 * Super FAST Restore callee saved regs by simply re-adjusting SP
231 *-------------------------------------------------------------*/
232.macro DISCARD_CALLEE_SAVED_USER
233 add sp, sp, 14 * 4
234.endm
235
236/*--------------------------------------------------------------
237 * Restore User mode r25 saved in task_struct->thread.user_r25
238 *-------------------------------------------------------------*/
239.macro RESTORE_USER_R25
240 ld r25, [r25, TASK_THREAD + THREAD_USER_R25]
241.endm
242
243/*-------------------------------------------------------------
244 * given a tsk struct, get to the base of it's kernel mode stack
245 * tsk->thread_info is really a PAGE, whose bottom hoists stack
246 * which grows upwards towards thread_info
247 *------------------------------------------------------------*/
248
249.macro GET_TSK_STACK_BASE tsk, out
250
251 /* Get task->thread_info (this is essentially start of a PAGE) */
252 ld \out, [\tsk, TASK_THREAD_INFO]
253
254 /* Go to end of page where stack begins (grows upwards) */
283237a0 255 add2 \out, \out, (THREAD_SIZE)/4
9d42c84f
VG
256
257.endm
258
259/*--------------------------------------------------------------
260 * Switch to Kernel Mode stack if SP points to User Mode stack
261 *
262 * Entry : r9 contains pre-IRQ/exception/trap status32
263 * Exit : SP is set to kernel mode stack pointer
080c3747 264 * If CURR_IN_REG, r25 set to "current" task pointer
9d42c84f
VG
265 * Clobbers: r9
266 *-------------------------------------------------------------*/
267
268.macro SWITCH_TO_KERNEL_STK
269
270 /* User Mode when this happened ? Yes: Proceed to switch stack */
271 bbit1 r9, STATUS_U_BIT, 88f
272
273 /* OK we were already in kernel mode when this event happened, thus can
274 * assume SP is kernel mode SP. _NO_ need to do any stack switching
275 */
276
4788a594
VG
277#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
278 /* However....
279 * If Level 2 Interrupts enabled, we may end up with a corner case:
280 * 1. User Task executing
281 * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
282 * 3. But before it could switch SP from USER to KERNEL stack
283 * a L2 IRQ "Interrupts" L1
284 * Thay way although L2 IRQ happened in Kernel mode, stack is still
285 * not switched.
286 * To handle this, we may need to switch stack even if in kernel mode
287 * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
288 */
289 brlo sp, VMALLOC_START, 88f
290
291 /* TODO: vineetg:
292 * We need to be a bit more cautious here. What if a kernel bug in
293 * L1 ISR, caused SP to go whaco (some small value which looks like
294 * USER stk) and then we take L2 ISR.
295 * Above brlo alone would treat it as a valid L1-L2 sceanrio
296 * instead of shouting alound
297 * The only feasible way is to make sure this L2 happened in
298 * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
299 * L1 ISR before it switches stack
300 */
301
302#endif
303
9d42c84f
VG
304 /* Save Pre Intr/Exception KERNEL MODE SP on kernel stack
305 * safe-keeping not really needed, but it keeps the epilogue code
306 * (SP restore) simpler/uniform.
307 */
308 b.d 77f
309
310 st.a sp, [sp, -12] ; Make room for orig_r0 and orig_r8
311
31288: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
313
314 GET_CURR_TASK_ON_CPU r9
315
080c3747
VG
316#ifdef CONFIG_ARC_CURR_IN_REG
317
318 /* If current task pointer cached in r25, time to
319 * -safekeep USER r25 in task->thread_struct->user_r25
320 * -load r25 with current task ptr
321 */
322 st.as r25, [r9, (TASK_THREAD + THREAD_USER_R25)/4]
323 mov r25, r9
324#endif
325
9d42c84f
VG
326 /* With current tsk in r9, get it's kernel mode stack base */
327 GET_TSK_STACK_BASE r9, r9
328
9d42c84f
VG
329 /* Save Pre Intr/Exception User SP on kernel stack */
330 st.a sp, [r9, -12] ; Make room for orig_r0 and orig_r8
331
332 /* CAUTION:
333 * SP should be set at the very end when we are done with everything
334 * In case of 2 levels of interrupt we depend on value of SP to assume
335 * that everything else is done (loading r25 etc)
336 */
337
338 /* set SP to point to kernel mode stack */
339 mov sp, r9
340
34177: /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
342
343.endm
344
345/*------------------------------------------------------------
346 * "FAKE" a rtie to return from CPU Exception context
347 * This is to re-enable Exceptions within exception
348 * Look at EV_ProtV to see how this is actually used
349 *-------------------------------------------------------------*/
350
351.macro FAKE_RET_FROM_EXCPN reg
352
353 ld \reg, [sp, PT_status32]
354 bic \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK)
355 bset \reg, \reg, STATUS_L_BIT
356 sr \reg, [erstatus]
357 mov \reg, 55f
358 sr \reg, [eret]
359
360 rtie
36155:
362.endm
363
364/*
365 * @reg [OUT] &thread_info of "current"
366 */
367.macro GET_CURR_THR_INFO_FROM_SP reg
368 and \reg, sp, ~(THREAD_SIZE - 1)
369.endm
370
371/*
372 * @reg [OUT] thread_info->flags of "current"
373 */
374.macro GET_CURR_THR_INFO_FLAGS reg
375 GET_CURR_THR_INFO_FROM_SP \reg
376 ld \reg, [\reg, THREAD_INFO_FLAGS]
377.endm
378
379/*--------------------------------------------------------------
380 * For early Exception Prologue, a core reg is temporarily needed to
381 * code the rest of prolog (stack switching). This is done by stashing
382 * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
383 *
384 * Before saving the full regfile - this reg is restored back, only
385 * to be saved again on kernel mode stack, as part of ptregs.
386 *-------------------------------------------------------------*/
387.macro EXCPN_PROLOG_FREEUP_REG reg
41195d23
VG
388#ifdef CONFIG_SMP
389 sr \reg, [ARC_REG_SCRATCH_DATA0]
390#else
9d42c84f 391 st \reg, [@ex_saved_reg1]
41195d23 392#endif
9d42c84f
VG
393.endm
394
395.macro EXCPN_PROLOG_RESTORE_REG reg
41195d23
VG
396#ifdef CONFIG_SMP
397 lr \reg, [ARC_REG_SCRATCH_DATA0]
398#else
9d42c84f 399 ld \reg, [@ex_saved_reg1]
41195d23 400#endif
9d42c84f
VG
401.endm
402
403/*--------------------------------------------------------------
404 * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
405 * Requires SP to be already switched to kernel mode Stack
406 * sp points to the next free element on the stack at exit of this macro.
407 * Registers are pushed / popped in the order defined in struct ptregs
408 * in asm/ptrace.h
409 * Note that syscalls are implemented via TRAP which is also a exception
410 * from CPU's point of view
411 *-------------------------------------------------------------*/
412.macro SAVE_ALL_EXCEPTION marker
413
367f3fcd 414 st \marker, [sp, 8] /* orig_r8 */
5c39c0ab
VG
415 st r0, [sp, 4] /* orig_r0, needed only for sys calls */
416
9d42c84f
VG
417 /* Restore r9 used to code the early prologue */
418 EXCPN_PROLOG_RESTORE_REG r9
419
9d42c84f
VG
420 SAVE_CALLER_SAVED
421 st.a r26, [sp, -4] /* gp */
422 st.a fp, [sp, -4]
423 st.a blink, [sp, -4]
424 lr r9, [eret]
425 st.a r9, [sp, -4]
426 lr r9, [erstatus]
427 st.a r9, [sp, -4]
428 st.a lp_count, [sp, -4]
429 lr r9, [lp_end]
430 st.a r9, [sp, -4]
431 lr r9, [lp_start]
432 st.a r9, [sp, -4]
433 lr r9, [erbta]
434 st.a r9, [sp, -4]
9d42c84f
VG
435.endm
436
437/*--------------------------------------------------------------
438 * Save scratch regs for exceptions
439 *-------------------------------------------------------------*/
440.macro SAVE_ALL_SYS
5c39c0ab 441 SAVE_ALL_EXCEPTION orig_r8_IS_EXCPN
9d42c84f
VG
442.endm
443
444/*--------------------------------------------------------------
445 * Save scratch regs for sys calls
446 *-------------------------------------------------------------*/
447.macro SAVE_ALL_TRAP
5c39c0ab
VG
448 /*
449 * Setup pt_regs->orig_r8.
450 * Encode syscall number (r8) in upper short word of event type (r9)
451 * N.B. #1: This is already endian safe (see ptrace.h)
452 * #2: Only r9 can be used as scratch as it is already clobbered
453 * and it's contents are no longer needed by the latter part
454 * of exception prologue
455 */
456 lsl r9, r8, 16
457 or r9, r9, orig_r8_IS_SCALL
458
459 SAVE_ALL_EXCEPTION r9
9d42c84f
VG
460.endm
461
462/*--------------------------------------------------------------
463 * Restore all registers used by system call or Exceptions
464 * SP should always be pointing to the next free stack element
465 * when entering this macro.
466 *
467 * NOTE:
468 *
469 * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
470 * for memory load operations. If used in that way interrupts are deffered
471 * by hardware and that is not good.
472 *-------------------------------------------------------------*/
473.macro RESTORE_ALL_SYS
9d42c84f
VG
474 ld.ab r9, [sp, 4]
475 sr r9, [erbta]
476 ld.ab r9, [sp, 4]
477 sr r9, [lp_start]
478 ld.ab r9, [sp, 4]
479 sr r9, [lp_end]
480 ld.ab r9, [sp, 4]
481 mov lp_count, r9
482 ld.ab r9, [sp, 4]
483 sr r9, [erstatus]
484 ld.ab r9, [sp, 4]
485 sr r9, [eret]
486 ld.ab blink, [sp, 4]
487 ld.ab fp, [sp, 4]
488 ld.ab r26, [sp, 4] /* gp */
489 RESTORE_CALLER_SAVED
490
491 ld sp, [sp] /* restore original sp */
492 /* orig_r0 and orig_r8 skipped automatically */
493.endm
494
495
496/*--------------------------------------------------------------
497 * Save all registers used by interrupt handlers.
498 *-------------------------------------------------------------*/
499.macro SAVE_ALL_INT1
500
501 /* restore original r9 , saved in int1_saved_reg
502 * It will be saved on stack in macro: SAVE_CALLER_SAVED
503 */
41195d23
VG
504#ifdef CONFIG_SMP
505 lr r9, [ARC_REG_SCRATCH_DATA0]
506#else
9d42c84f 507 ld r9, [@int1_saved_reg]
41195d23 508#endif
9d42c84f
VG
509
510 /* now we are ready to save the remaining context :) */
5c39c0ab 511 st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */
9d42c84f
VG
512 st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
513 SAVE_CALLER_SAVED
514 st.a r26, [sp, -4] /* gp */
515 st.a fp, [sp, -4]
516 st.a blink, [sp, -4]
517 st.a ilink1, [sp, -4]
518 lr r9, [status32_l1]
519 st.a r9, [sp, -4]
520 st.a lp_count, [sp, -4]
521 lr r9, [lp_end]
522 st.a r9, [sp, -4]
523 lr r9, [lp_start]
524 st.a r9, [sp, -4]
525 lr r9, [bta_l1]
526 st.a r9, [sp, -4]
9d42c84f
VG
527.endm
528
4788a594
VG
529.macro SAVE_ALL_INT2
530
531 /* TODO-vineetg: SMP we can't use global nor can we use
532 * SCRATCH0 as we do for int1 because while int1 is using
533 * it, int2 can come
534 */
535 /* retsore original r9 , saved in sys_saved_r9 */
536 ld r9, [@int2_saved_reg]
537
538 /* now we are ready to save the remaining context :) */
539 st orig_r8_IS_IRQ2, [sp, 8] /* Event Type */
540 st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
541 SAVE_CALLER_SAVED
542 st.a r26, [sp, -4] /* gp */
543 st.a fp, [sp, -4]
544 st.a blink, [sp, -4]
545 st.a ilink2, [sp, -4]
546 lr r9, [status32_l2]
547 st.a r9, [sp, -4]
548 st.a lp_count, [sp, -4]
549 lr r9, [lp_end]
550 st.a r9, [sp, -4]
551 lr r9, [lp_start]
552 st.a r9, [sp, -4]
553 lr r9, [bta_l2]
554 st.a r9, [sp, -4]
4788a594
VG
555.endm
556
9d42c84f
VG
557/*--------------------------------------------------------------
558 * Restore all registers used by interrupt handlers.
559 *
560 * NOTE:
561 *
562 * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
563 * for memory load operations. If used in that way interrupts are deffered
564 * by hardware and that is not good.
565 *-------------------------------------------------------------*/
566
567.macro RESTORE_ALL_INT1
9d42c84f
VG
568 ld.ab r9, [sp, 4] /* Actual reg file */
569 sr r9, [bta_l1]
570 ld.ab r9, [sp, 4]
571 sr r9, [lp_start]
572 ld.ab r9, [sp, 4]
573 sr r9, [lp_end]
574 ld.ab r9, [sp, 4]
575 mov lp_count, r9
576 ld.ab r9, [sp, 4]
577 sr r9, [status32_l1]
578 ld.ab r9, [sp, 4]
579 mov ilink1, r9
580 ld.ab blink, [sp, 4]
581 ld.ab fp, [sp, 4]
582 ld.ab r26, [sp, 4] /* gp */
583 RESTORE_CALLER_SAVED
584
585 ld sp, [sp] /* restore original sp */
586 /* orig_r0 and orig_r8 skipped automatically */
587.endm
588
4788a594 589.macro RESTORE_ALL_INT2
4788a594
VG
590 ld.ab r9, [sp, 4]
591 sr r9, [bta_l2]
592 ld.ab r9, [sp, 4]
593 sr r9, [lp_start]
594 ld.ab r9, [sp, 4]
595 sr r9, [lp_end]
596 ld.ab r9, [sp, 4]
597 mov lp_count, r9
598 ld.ab r9, [sp, 4]
599 sr r9, [status32_l2]
600 ld.ab r9, [sp, 4]
601 mov ilink2, r9
602 ld.ab blink, [sp, 4]
603 ld.ab fp, [sp, 4]
604 ld.ab r26, [sp, 4] /* gp */
605 RESTORE_CALLER_SAVED
606
607 ld sp, [sp] /* restore original sp */
608 /* orig_r0 and orig_r8 skipped automatically */
609
610.endm
611
612
9d42c84f
VG
613/* Get CPU-ID of this core */
614.macro GET_CPU_ID reg
615 lr \reg, [identity]
616 lsr \reg, \reg, 8
617 bmsk \reg, \reg, 7
618.endm
619
41195d23
VG
620#ifdef CONFIG_SMP
621
622/*-------------------------------------------------
623 * Retrieve the current running task on this CPU
624 * 1. Determine curr CPU id.
625 * 2. Use it to index into _current_task[ ]
626 */
627.macro GET_CURR_TASK_ON_CPU reg
628 GET_CPU_ID \reg
629 ld.as \reg, [@_current_task, \reg]
630.endm
631
632/*-------------------------------------------------
633 * Save a new task as the "current" task on this CPU
634 * 1. Determine curr CPU id.
635 * 2. Use it to index into _current_task[ ]
636 *
637 * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
638 * because ST r0, [r1, offset] can ONLY have s9 @offset
639 * while LD can take s9 (4 byte insn) or LIMM (8 byte insn)
640 */
641
642.macro SET_CURR_TASK_ON_CPU tsk, tmp
643 GET_CPU_ID \tmp
644 add2 \tmp, @_current_task, \tmp
645 st \tsk, [\tmp]
646#ifdef CONFIG_ARC_CURR_IN_REG
647 mov r25, \tsk
648#endif
649
650.endm
651
652
653#else /* Uniprocessor implementation of macros */
654
9d42c84f
VG
655.macro GET_CURR_TASK_ON_CPU reg
656 ld \reg, [@_current_task]
657.endm
658
659.macro SET_CURR_TASK_ON_CPU tsk, tmp
660 st \tsk, [@_current_task]
080c3747
VG
661#ifdef CONFIG_ARC_CURR_IN_REG
662 mov r25, \tsk
663#endif
9d42c84f
VG
664.endm
665
41195d23
VG
666#endif /* SMP / UNI */
667
9d42c84f
VG
668/* ------------------------------------------------------------------
669 * Get the ptr to some field of Current Task at @off in task struct
080c3747 670 * -Uses r25 for Current task ptr if that is enabled
9d42c84f
VG
671 */
672
080c3747
VG
673#ifdef CONFIG_ARC_CURR_IN_REG
674
675.macro GET_CURR_TASK_FIELD_PTR off, reg
676 add \reg, r25, \off
677.endm
678
679#else
680
9d42c84f
VG
681.macro GET_CURR_TASK_FIELD_PTR off, reg
682 GET_CURR_TASK_ON_CPU \reg
683 add \reg, \reg, \off
684.endm
685
080c3747
VG
686#endif /* CONFIG_ARC_CURR_IN_REG */
687
9d42c84f
VG
688#endif /* __ASSEMBLY__ */
689
690#endif /* __ASM_ARC_ENTRY_H */