microblaze: Defconfig update
[linux-2.6-block.git] / arch / microblaze / kernel / entry-nommu.S
CommitLineData
6d5af1a3
MS
1/*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/linkage.h>
12#include <asm/thread_info.h>
65504a47 13#include <linux/errno.h>
6d5af1a3
MS
14#include <asm/entry.h>
15#include <asm/asm-offsets.h>
16#include <asm/registers.h>
17#include <asm/unistd.h>
18#include <asm/percpu.h>
19#include <asm/signal.h>
20
21#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
22 .macro disable_irq
23 msrclr r0, MSR_IE
24 .endm
25
26 .macro enable_irq
27 msrset r0, MSR_IE
28 .endm
29
30 .macro clear_bip
31 msrclr r0, MSR_BIP
32 .endm
33#else
34 .macro disable_irq
35 mfs r11, rmsr
36 andi r11, r11, ~MSR_IE
37 mts rmsr, r11
38 .endm
39
40 .macro enable_irq
41 mfs r11, rmsr
42 ori r11, r11, MSR_IE
43 mts rmsr, r11
44 .endm
45
46 .macro clear_bip
47 mfs r11, rmsr
48 andi r11, r11, ~MSR_BIP
49 mts rmsr, r11
50 .endm
51#endif
52
53ENTRY(_interrupt)
54 swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
55 swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
56 lwi r11, r0, PER_CPU(KM) /* load mode indicator */
57 beqid r11, 1f
58 nop
59 brid 2f /* jump over */
60 addik r1, r1, (-PT_SIZE) /* room for pt_regs (delay slot) */
611: /* switch to kernel stack */
62 lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
63 lwi r1, r1, TS_THREAD_INFO /* get the thread info */
64 /* calculate kernel stack pointer */
65 addik r1, r1, THREAD_SIZE - PT_SIZE
662:
67 swi r11, r1, PT_MODE /* store the mode */
68 lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
69 swi r2, r1, PT_R2
70 swi r3, r1, PT_R3
71 swi r4, r1, PT_R4
72 swi r5, r1, PT_R5
73 swi r6, r1, PT_R6
74 swi r7, r1, PT_R7
75 swi r8, r1, PT_R8
76 swi r9, r1, PT_R9
77 swi r10, r1, PT_R10
78 swi r11, r1, PT_R11
79 swi r12, r1, PT_R12
80 swi r13, r1, PT_R13
81 swi r14, r1, PT_R14
82 swi r14, r1, PT_PC
83 swi r15, r1, PT_R15
84 swi r16, r1, PT_R16
85 swi r17, r1, PT_R17
86 swi r18, r1, PT_R18
87 swi r19, r1, PT_R19
88 swi r20, r1, PT_R20
89 swi r21, r1, PT_R21
90 swi r22, r1, PT_R22
91 swi r23, r1, PT_R23
92 swi r24, r1, PT_R24
93 swi r25, r1, PT_R25
94 swi r26, r1, PT_R26
95 swi r27, r1, PT_R27
96 swi r28, r1, PT_R28
97 swi r29, r1, PT_R29
98 swi r30, r1, PT_R30
99 swi r31, r1, PT_R31
100 /* special purpose registers */
101 mfs r11, rmsr
102 swi r11, r1, PT_MSR
103 mfs r11, rear
104 swi r11, r1, PT_EAR
105 mfs r11, resr
106 swi r11, r1, PT_ESR
107 mfs r11, rfsr
108 swi r11, r1, PT_FSR
109 /* reload original stack pointer and save it */
110 lwi r11, r0, PER_CPU(ENTRY_SP)
111 swi r11, r1, PT_R1
112 /* update mode indicator we are in kernel mode */
113 addik r11, r0, 1
114 swi r11, r0, PER_CPU(KM)
115 /* restore r31 */
116 lwi r31, r0, PER_CPU(CURRENT_SAVE)
117 /* prepare the link register, the argument and jump */
118 la r15, r0, ret_from_intr - 8
119 addk r6, r0, r15
120 braid do_IRQ
121 add r5, r0, r1
122
123ret_from_intr:
124 lwi r11, r1, PT_MODE
125 bneid r11, 3f
126
127 lwi r6, r31, TS_THREAD_INFO /* get thread info */
128 lwi r19, r6, TI_FLAGS /* get flags in thread info */
129 /* do an extra work if any bits are set */
130
131 andi r11, r19, _TIF_NEED_RESCHED
132 beqi r11, 1f
133 bralid r15, schedule
134 nop
1351: andi r11, r19, _TIF_SIGPENDING
136 beqid r11, no_intr_reshed
137 addk r5, r1, r0
138 addk r7, r0, r0
139 bralid r15, do_signal
140 addk r6, r0, r0
141
142no_intr_reshed:
143 /* save mode indicator */
144 lwi r11, r1, PT_MODE
1453:
146 swi r11, r0, PER_CPU(KM)
147
148 /* save r31 */
149 swi r31, r0, PER_CPU(CURRENT_SAVE)
150restore_context:
151 /* special purpose registers */
152 lwi r11, r1, PT_FSR
153 mts rfsr, r11
154 lwi r11, r1, PT_ESR
155 mts resr, r11
156 lwi r11, r1, PT_EAR
157 mts rear, r11
158 lwi r11, r1, PT_MSR
159 mts rmsr, r11
160
161 lwi r31, r1, PT_R31
162 lwi r30, r1, PT_R30
163 lwi r29, r1, PT_R29
164 lwi r28, r1, PT_R28
165 lwi r27, r1, PT_R27
166 lwi r26, r1, PT_R26
167 lwi r25, r1, PT_R25
168 lwi r24, r1, PT_R24
169 lwi r23, r1, PT_R23
170 lwi r22, r1, PT_R22
171 lwi r21, r1, PT_R21
172 lwi r20, r1, PT_R20
173 lwi r19, r1, PT_R19
174 lwi r18, r1, PT_R18
175 lwi r17, r1, PT_R17
176 lwi r16, r1, PT_R16
177 lwi r15, r1, PT_R15
178 lwi r14, r1, PT_PC
179 lwi r13, r1, PT_R13
180 lwi r12, r1, PT_R12
181 lwi r11, r1, PT_R11
182 lwi r10, r1, PT_R10
183 lwi r9, r1, PT_R9
184 lwi r8, r1, PT_R8
185 lwi r7, r1, PT_R7
186 lwi r6, r1, PT_R6
187 lwi r5, r1, PT_R5
188 lwi r4, r1, PT_R4
189 lwi r3, r1, PT_R3
190 lwi r2, r1, PT_R2
191 lwi r1, r1, PT_R1
192 rtid r14, 0
193 nop
194
195ENTRY(_reset)
196 brai 0;
197
198ENTRY(_user_exception)
199 swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
200 swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
201 lwi r11, r0, PER_CPU(KM) /* load mode indicator */
202 beqid r11, 1f /* Already in kernel mode? */
203 nop
204 brid 2f /* jump over */
205 addik r1, r1, (-PT_SIZE) /* Room for pt_regs (delay slot) */
2061: /* Switch to kernel stack */
207 lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
208 lwi r1, r1, TS_THREAD_INFO /* get the thread info */
209 /* calculate kernel stack pointer */
210 addik r1, r1, THREAD_SIZE - PT_SIZE
6d5af1a3
MS
2112:
212 swi r11, r1, PT_MODE /* store the mode */
213 lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
214 /* save them on stack */
215 swi r2, r1, PT_R2
216 swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
217 swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
218 swi r5, r1, PT_R5
219 swi r6, r1, PT_R6
220 swi r7, r1, PT_R7
221 swi r8, r1, PT_R8
222 swi r9, r1, PT_R9
223 swi r10, r1, PT_R10
224 swi r11, r1, PT_R11
225 /* r12: _always_ in clobber list; see unistd.h */
226 swi r12, r1, PT_R12
227 swi r13, r1, PT_R13
228 /* r14: _always_ in clobber list; see unistd.h */
229 swi r14, r1, PT_R14
230 /* but we want to return to the next inst. */
231 addik r14, r14, 0x4
232 swi r14, r1, PT_PC /* increment by 4 and store in pc */
233 swi r15, r1, PT_R15
234 swi r16, r1, PT_R16
235 swi r17, r1, PT_R17
236 swi r18, r1, PT_R18
237 swi r19, r1, PT_R19
238 swi r20, r1, PT_R20
239 swi r21, r1, PT_R21
240 swi r22, r1, PT_R22
241 swi r23, r1, PT_R23
242 swi r24, r1, PT_R24
243 swi r25, r1, PT_R25
244 swi r26, r1, PT_R26
245 swi r27, r1, PT_R27
246 swi r28, r1, PT_R28
247 swi r29, r1, PT_R29
248 swi r30, r1, PT_R30
249 swi r31, r1, PT_R31
250
251 disable_irq
252 nop /* make sure IE bit is in effect */
253 clear_bip /* once IE is in effect it is safe to clear BIP */
254 nop
255
256 /* special purpose registers */
257 mfs r11, rmsr
258 swi r11, r1, PT_MSR
259 mfs r11, rear
260 swi r11, r1, PT_EAR
261 mfs r11, resr
262 swi r11, r1, PT_ESR
263 mfs r11, rfsr
264 swi r11, r1, PT_FSR
265 /* reload original stack pointer and save it */
266 lwi r11, r0, PER_CPU(ENTRY_SP)
267 swi r11, r1, PT_R1
268 /* update mode indicator we are in kernel mode */
269 addik r11, r0, 1
270 swi r11, r0, PER_CPU(KM)
271 /* restore r31 */
272 lwi r31, r0, PER_CPU(CURRENT_SAVE)
273 /* re-enable interrupts now we are in kernel mode */
274 enable_irq
275
276 /* See if the system call number is valid. */
277 addi r11, r12, -__NR_syscalls
278 bgei r11, 1f /* return to user if not valid */
279 /* Figure out which function to use for this system call. */
280 /* Note Microblaze barrel shift is optional, so don't rely on it */
281 add r12, r12, r12 /* convert num -> ptr */
282 add r12, r12, r12
283 lwi r12, r12, sys_call_table /* Get function pointer */
284 la r15, r0, ret_to_user-8 /* set return address */
285 bra r12 /* Make the system call. */
286 bri 0 /* won't reach here */
2871:
288 brid ret_to_user /* jump to syscall epilogue */
289 addi r3, r0, -ENOSYS /* set errno in delay slot */
290
291/*
292 * Debug traps are like a system call, but entered via brki r14, 0x60
293 * All we need to do is send the SIGTRAP signal to current, ptrace and do_signal
294 * will handle the rest
295 */
296ENTRY(_debug_exception)
297 swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
298 lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
299 lwi r1, r1, TS_THREAD_INFO /* get the thread info */
300 addik r1, r1, THREAD_SIZE - PT_SIZE /* get the kernel stack */
301 swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
302 lwi r11, r0, PER_CPU(KM) /* load mode indicator */
303//save_context:
304 swi r11, r1, PT_MODE /* store the mode */
305 lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
306 /* save them on stack */
307 swi r2, r1, PT_R2
308 swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
309 swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
310 swi r5, r1, PT_R5
311 swi r6, r1, PT_R6
312 swi r7, r1, PT_R7
313 swi r8, r1, PT_R8
314 swi r9, r1, PT_R9
315 swi r10, r1, PT_R10
316 swi r11, r1, PT_R11
317 /* r12: _always_ in clobber list; see unistd.h */
318 swi r12, r1, PT_R12
319 swi r13, r1, PT_R13
320 /* r14: _always_ in clobber list; see unistd.h */
321 swi r14, r1, PT_R14
322 swi r14, r1, PT_PC /* Will return to interrupted instruction */
323 swi r15, r1, PT_R15
324 swi r16, r1, PT_R16
325 swi r17, r1, PT_R17
326 swi r18, r1, PT_R18
327 swi r19, r1, PT_R19
328 swi r20, r1, PT_R20
329 swi r21, r1, PT_R21
330 swi r22, r1, PT_R22
331 swi r23, r1, PT_R23
332 swi r24, r1, PT_R24
333 swi r25, r1, PT_R25
334 swi r26, r1, PT_R26
335 swi r27, r1, PT_R27
336 swi r28, r1, PT_R28
337 swi r29, r1, PT_R29
338 swi r30, r1, PT_R30
339 swi r31, r1, PT_R31
340
341 disable_irq
342 nop /* make sure IE bit is in effect */
343 clear_bip /* once IE is in effect it is safe to clear BIP */
344 nop
345
346 /* special purpose registers */
347 mfs r11, rmsr
348 swi r11, r1, PT_MSR
349 mfs r11, rear
350 swi r11, r1, PT_EAR
351 mfs r11, resr
352 swi r11, r1, PT_ESR
353 mfs r11, rfsr
354 swi r11, r1, PT_FSR
355 /* reload original stack pointer and save it */
356 lwi r11, r0, PER_CPU(ENTRY_SP)
357 swi r11, r1, PT_R1
358 /* update mode indicator we are in kernel mode */
359 addik r11, r0, 1
360 swi r11, r0, PER_CPU(KM)
361 /* restore r31 */
362 lwi r31, r0, PER_CPU(CURRENT_SAVE)
363 /* re-enable interrupts now we are in kernel mode */
364 enable_irq
365
366 addi r5, r0, SIGTRAP /* sending the trap signal */
367 add r6, r0, r31 /* to current */
368 bralid r15, send_sig
369 add r7, r0, r0 /* 3rd param zero */
370
371 /* Restore r3/r4 to work around how ret_to_user works */
372 lwi r3, r1, PT_R3
373 lwi r4, r1, PT_R4
374 bri ret_to_user
375
376ENTRY(_break)
377 bri 0
378
379/* struct task_struct *_switch_to(struct thread_info *prev,
380 struct thread_info *next); */
381ENTRY(_switch_to)
382 /* prepare return value */
383 addk r3, r0, r31
384
385 /* save registers in cpu_context */
386 /* use r11 and r12, volatile registers, as temp register */
387 addik r11, r5, TI_CPU_CONTEXT
388 swi r1, r11, CC_R1
389 swi r2, r11, CC_R2
390 /* skip volatile registers.
391 * they are saved on stack when we jumped to _switch_to() */
392 /* dedicated registers */
393 swi r13, r11, CC_R13
394 swi r14, r11, CC_R14
395 swi r15, r11, CC_R15
396 swi r16, r11, CC_R16
397 swi r17, r11, CC_R17
398 swi r18, r11, CC_R18
399 /* save non-volatile registers */
400 swi r19, r11, CC_R19
401 swi r20, r11, CC_R20
402 swi r21, r11, CC_R21
403 swi r22, r11, CC_R22
404 swi r23, r11, CC_R23
405 swi r24, r11, CC_R24
406 swi r25, r11, CC_R25
407 swi r26, r11, CC_R26
408 swi r27, r11, CC_R27
409 swi r28, r11, CC_R28
410 swi r29, r11, CC_R29
411 swi r30, r11, CC_R30
412 /* special purpose registers */
413 mfs r12, rmsr
414 swi r12, r11, CC_MSR
415 mfs r12, rear
416 swi r12, r11, CC_EAR
417 mfs r12, resr
418 swi r12, r11, CC_ESR
419 mfs r12, rfsr
420 swi r12, r11, CC_FSR
421
422 /* update r31, the current */
423 lwi r31, r6, TI_TASK
424 swi r31, r0, PER_CPU(CURRENT_SAVE)
425
426 /* get new process' cpu context and restore */
427 addik r11, r6, TI_CPU_CONTEXT
428
429 /* special purpose registers */
430 lwi r12, r11, CC_FSR
431 mts rfsr, r12
432 lwi r12, r11, CC_ESR
433 mts resr, r12
434 lwi r12, r11, CC_EAR
435 mts rear, r12
436 lwi r12, r11, CC_MSR
437 mts rmsr, r12
438 /* non-volatile registers */
439 lwi r30, r11, CC_R30
440 lwi r29, r11, CC_R29
441 lwi r28, r11, CC_R28
442 lwi r27, r11, CC_R27
443 lwi r26, r11, CC_R26
444 lwi r25, r11, CC_R25
445 lwi r24, r11, CC_R24
446 lwi r23, r11, CC_R23
447 lwi r22, r11, CC_R22
448 lwi r21, r11, CC_R21
449 lwi r20, r11, CC_R20
450 lwi r19, r11, CC_R19
451 /* dedicated registers */
452 lwi r18, r11, CC_R18
453 lwi r17, r11, CC_R17
454 lwi r16, r11, CC_R16
455 lwi r15, r11, CC_R15
456 lwi r14, r11, CC_R14
457 lwi r13, r11, CC_R13
458 /* skip volatile registers */
459 lwi r2, r11, CC_R2
460 lwi r1, r11, CC_R1
461
462 rtsd r15, 8
463 nop
464
465ENTRY(ret_from_fork)
466 addk r5, r0, r3
467 addk r6, r0, r1
468 brlid r15, schedule_tail
469 nop
470 swi r31, r1, PT_R31 /* save r31 in user context. */
471 /* will soon be restored to r31 in ret_to_user */
472 addk r3, r0, r0
473 brid ret_to_user
474 nop
475
476work_pending:
477 andi r11, r19, _TIF_NEED_RESCHED
478 beqi r11, 1f
479 bralid r15, schedule
480 nop
4811: andi r11, r19, _TIF_SIGPENDING
482 beqi r11, no_work_pending
483 addk r5, r1, r0
484 addik r7, r0, 1
485 bralid r15, do_signal
486 addk r6, r0, r0
487 bri no_work_pending
488
489ENTRY(ret_to_user)
490 disable_irq
491
492 swi r4, r1, PT_R4 /* return val */
493 swi r3, r1, PT_R3 /* return val */
494
495 lwi r6, r31, TS_THREAD_INFO /* get thread info */
496 lwi r19, r6, TI_FLAGS /* get flags in thread info */
497 bnei r19, work_pending /* do an extra work if any bits are set */
498no_work_pending:
499 disable_irq
500
501 /* save r31 */
502 swi r31, r0, PER_CPU(CURRENT_SAVE)
503 /* save mode indicator */
504 lwi r18, r1, PT_MODE
505 swi r18, r0, PER_CPU(KM)
506//restore_context:
507 /* special purpose registers */
508 lwi r18, r1, PT_FSR
509 mts rfsr, r18
510 lwi r18, r1, PT_ESR
511 mts resr, r18
512 lwi r18, r1, PT_EAR
513 mts rear, r18
514 lwi r18, r1, PT_MSR
515 mts rmsr, r18
516
517 lwi r31, r1, PT_R31
518 lwi r30, r1, PT_R30
519 lwi r29, r1, PT_R29
520 lwi r28, r1, PT_R28
521 lwi r27, r1, PT_R27
522 lwi r26, r1, PT_R26
523 lwi r25, r1, PT_R25
524 lwi r24, r1, PT_R24
525 lwi r23, r1, PT_R23
526 lwi r22, r1, PT_R22
527 lwi r21, r1, PT_R21
528 lwi r20, r1, PT_R20
529 lwi r19, r1, PT_R19
530 lwi r18, r1, PT_R18
531 lwi r17, r1, PT_R17
532 lwi r16, r1, PT_R16
533 lwi r15, r1, PT_R15
534 lwi r14, r1, PT_PC
535 lwi r13, r1, PT_R13
536 lwi r12, r1, PT_R12
537 lwi r11, r1, PT_R11
538 lwi r10, r1, PT_R10
539 lwi r9, r1, PT_R9
540 lwi r8, r1, PT_R8
541 lwi r7, r1, PT_R7
542 lwi r6, r1, PT_R6
543 lwi r5, r1, PT_R5
544 lwi r4, r1, PT_R4 /* return val */
545 lwi r3, r1, PT_R3 /* return val */
546 lwi r2, r1, PT_R2
547 lwi r1, r1, PT_R1
548
549 rtid r14, 0
550 nop
551
e513588f
AB
552sys_vfork:
553 brid microblaze_vfork
6d5af1a3
MS
554 addk r5, r1, r0
555
e513588f
AB
556sys_clone:
557 brid microblaze_clone
6d5af1a3
MS
558 addk r7, r1, r0
559
e513588f
AB
560sys_execve:
561 brid microblaze_execve
6d5af1a3
MS
562 addk r8, r1, r0
563
6d5af1a3
MS
564sys_rt_sigreturn_wrapper:
565 brid sys_rt_sigreturn
566 addk r5, r1, r0
567
6d5af1a3
MS
568sys_rt_sigsuspend_wrapper:
569 brid sys_rt_sigsuspend
570 addk r7, r1, r0
571
572 /* Interrupt vector table */
573 .section .init.ivt, "ax"
574 .org 0x0
575 brai _reset
576 brai _user_exception
577 brai _interrupt
578 brai _break
579 brai _hw_exception_handler
580 .org 0x60
581 brai _debug_exception
582
583.section .rodata,"a"
584#include "syscall_table.S"
585
586syscall_table_size=(.-sys_call_table)