Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _H8300_SYSTEM_H |
2 | #define _H8300_SYSTEM_H | |
3 | ||
1da177e4 LT |
4 | #include <linux/linkage.h> |
5 | ||
1da177e4 LT |
6 | /* |
7 | * switch_to(n) should switch tasks to task ptr, first checking that | |
8 | * ptr isn't the current task, in which case it does nothing. This | |
9 | * also clears the TS-flag if the task we switched to has used the | |
10 | * math co-processor latest. | |
11 | */ | |
12 | /* | |
13 | * switch_to() saves the extra registers, that are not saved | |
14 | * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and | |
15 | * a0-a1. Some of these are used by schedule() and its predecessors | |
16 | * and so we might get see unexpected behaviors when a task returns | |
17 | * with unexpected register values. | |
18 | * | |
19 | * syscall stores these registers itself and none of them are used | |
20 | * by syscall after the function in the syscall has been called. | |
21 | * | |
22 | * Beware that resume now expects *next to be in d1 and the offset of | |
23 | * tss to be in a1. This saves a few instructions as we no longer have | |
24 | * to push them onto the stack and read them back right after. | |
25 | * | |
26 | * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) | |
27 | * | |
28 | * Changed 96/09/19 by Andreas Schwab | |
29 | * pass prev in a0, next in a1, offset of tss in d1, and whether | |
30 | * the mm structures are shared in d2 (to avoid atc flushing). | |
31 | * | |
32 | * H8/300 Porting 2002/09/04 Yoshinori Sato | |
33 | */ | |
34 | ||
35 | asmlinkage void resume(void); | |
36 | #define switch_to(prev,next,last) { \ | |
37 | void *_last; \ | |
38 | __asm__ __volatile__( \ | |
39 | "mov.l %1, er0\n\t" \ | |
40 | "mov.l %2, er1\n\t" \ | |
41 | "mov.l %3, er2\n\t" \ | |
42 | "jsr @_resume\n\t" \ | |
43 | "mov.l er2,%0\n\t" \ | |
44 | : "=r" (_last) \ | |
45 | : "r" (&(prev->thread)), \ | |
46 | "r" (&(next->thread)), \ | |
47 | "g" (prev) \ | |
48 | : "cc", "er0", "er1", "er2", "er3"); \ | |
49 | (last) = _last; \ | |
50 | } | |
51 | ||
52 | #define __sti() asm volatile ("andc #0x7f,ccr") | |
53 | #define __cli() asm volatile ("orc #0x80,ccr") | |
54 | ||
55 | #define __save_flags(x) \ | |
56 | asm volatile ("stc ccr,%w0":"=r" (x)) | |
57 | ||
58 | #define __restore_flags(x) \ | |
59 | asm volatile ("ldc %w0,ccr": :"r" (x)) | |
60 | ||
61 | #define irqs_disabled() \ | |
62 | ({ \ | |
63 | unsigned char flags; \ | |
64 | __save_flags(flags); \ | |
65 | ((flags & 0x80) == 0x80); \ | |
66 | }) | |
67 | ||
68 | #define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") | |
69 | ||
70 | /* For spinlocks etc */ | |
71 | #define local_irq_disable() __cli() | |
72 | #define local_irq_enable() __sti() | |
73 | #define local_irq_save(x) ({ __save_flags(x); local_irq_disable(); }) | |
74 | #define local_irq_restore(x) __restore_flags(x) | |
75 | #define local_save_flags(x) __save_flags(x) | |
76 | ||
77 | /* | |
78 | * Force strict CPU ordering. | |
79 | * Not really required on H8... | |
80 | */ | |
81 | #define nop() asm volatile ("nop"::) | |
82 | #define mb() asm volatile ("" : : :"memory") | |
83 | #define rmb() asm volatile ("" : : :"memory") | |
84 | #define wmb() asm volatile ("" : : :"memory") | |
85 | #define set_rmb(var, value) do { xchg(&var, value); } while (0) | |
86 | #define set_mb(var, value) set_rmb(var, value) | |
1da177e4 LT |
87 | |
88 | #ifdef CONFIG_SMP | |
89 | #define smp_mb() mb() | |
90 | #define smp_rmb() rmb() | |
91 | #define smp_wmb() wmb() | |
92 | #define smp_read_barrier_depends() read_barrier_depends() | |
93 | #else | |
94 | #define smp_mb() barrier() | |
95 | #define smp_rmb() barrier() | |
96 | #define smp_wmb() barrier() | |
97 | #define smp_read_barrier_depends() do { } while(0) | |
98 | #endif | |
99 | ||
100 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
101 | #define tas(ptr) (xchg((ptr),1)) | |
102 | ||
103 | struct __xchg_dummy { unsigned long a[100]; }; | |
104 | #define __xg(x) ((volatile struct __xchg_dummy *)(x)) | |
105 | ||
106 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | |
107 | { | |
108 | unsigned long tmp, flags; | |
109 | ||
110 | local_irq_save(flags); | |
111 | ||
112 | switch (size) { | |
113 | case 1: | |
114 | __asm__ __volatile__ | |
115 | ("mov.b %2,%0\n\t" | |
116 | "mov.b %1,%2" | |
117 | : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); | |
118 | break; | |
119 | case 2: | |
120 | __asm__ __volatile__ | |
121 | ("mov.w %2,%0\n\t" | |
122 | "mov.w %1,%2" | |
123 | : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); | |
124 | break; | |
125 | case 4: | |
126 | __asm__ __volatile__ | |
127 | ("mov.l %2,%0\n\t" | |
128 | "mov.l %1,%2" | |
129 | : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); | |
130 | break; | |
131 | default: | |
132 | tmp = 0; | |
133 | } | |
134 | local_irq_restore(flags); | |
135 | return tmp; | |
136 | } | |
137 | ||
138 | #define HARD_RESET_NOW() ({ \ | |
139 | local_irq_disable(); \ | |
140 | asm("jmp @@0"); \ | |
141 | }) | |
142 | ||
143 | #define arch_align_stack(x) (x) | |
144 | ||
145 | #endif /* _H8300_SYSTEM_H */ |