Commit | Line | Data |
---|---|---|
0016a4cf PM |
1 | /* |
2 | * Floating-point, VMX/Altivec and VSX loads and stores | |
3 | * for use in instruction emulation. | |
4 | * | |
5 | * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | ||
13 | #include <asm/processor.h> | |
14 | #include <asm/ppc_asm.h> | |
15 | #include <asm/ppc-opcode.h> | |
16 | #include <asm/reg.h> | |
17 | #include <asm/asm-offsets.h> | |
18 | #include <linux/errno.h> | |
19 | ||
cd64d169 SM |
20 | #ifdef CONFIG_PPC_FPU |
21 | ||
0016a4cf PM |
22 | #define STKFRM (PPC_MIN_STKFRM + 16) |
23 | ||
24 | .macro extab instr,handler | |
25 | .section __ex_table,"a" | |
26 | PPC_LONG \instr,\handler | |
27 | .previous | |
28 | .endm | |
29 | ||
30 | .macro inst32 op | |
31 | reg = 0 | |
32 | .rept 32 | |
33 | 20: \op reg,0,r4 | |
34 | b 3f | |
35 | extab 20b,99f | |
36 | reg = reg + 1 | |
37 | .endr | |
38 | .endm | |
39 | ||
40 | /* Get the contents of frN into fr0; N is in r3. */ | |
41 | _GLOBAL(get_fpr) | |
42 | mflr r0 | |
43 | rlwinm r3,r3,3,0xf8 | |
44 | bcl 20,31,1f | |
45 | blr /* fr0 is already in fr0 */ | |
46 | nop | |
47 | reg = 1 | |
48 | .rept 31 | |
49 | fmr fr0,reg | |
50 | blr | |
51 | reg = reg + 1 | |
52 | .endr | |
53 | 1: mflr r5 | |
54 | add r5,r3,r5 | |
55 | mtctr r5 | |
56 | mtlr r0 | |
57 | bctr | |
58 | ||
59 | /* Put the contents of fr0 into frN; N is in r3. */ | |
60 | _GLOBAL(put_fpr) | |
61 | mflr r0 | |
62 | rlwinm r3,r3,3,0xf8 | |
63 | bcl 20,31,1f | |
64 | blr /* fr0 is already in fr0 */ | |
65 | nop | |
66 | reg = 1 | |
67 | .rept 31 | |
68 | fmr reg,fr0 | |
69 | blr | |
70 | reg = reg + 1 | |
71 | .endr | |
72 | 1: mflr r5 | |
73 | add r5,r3,r5 | |
74 | mtctr r5 | |
75 | mtlr r0 | |
76 | bctr | |
77 | ||
78 | /* Load FP reg N from float at *p. N is in r3, p in r4. */ | |
79 | _GLOBAL(do_lfs) | |
80 | PPC_STLU r1,-STKFRM(r1) | |
81 | mflr r0 | |
82 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) | |
83 | mfmsr r6 | |
84 | ori r7,r6,MSR_FP | |
85 | cmpwi cr7,r3,0 | |
cd64d169 | 86 | MTMSRD(r7) |
0016a4cf PM |
87 | isync |
88 | beq cr7,1f | |
89 | stfd fr0,STKFRM-16(r1) | |
90 | 1: li r9,-EFAULT | |
91 | 2: lfs fr0,0(r4) | |
92 | li r9,0 | |
93 | 3: bl put_fpr | |
94 | beq cr7,4f | |
95 | lfd fr0,STKFRM-16(r1) | |
96 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | |
97 | mtlr r0 | |
cd64d169 | 98 | MTMSRD(r6) |
0016a4cf PM |
99 | isync |
100 | mr r3,r9 | |
101 | addi r1,r1,STKFRM | |
102 | blr | |
103 | extab 2b,3b | |
104 | ||
105 | /* Load FP reg N from double at *p. N is in r3, p in r4. */ | |
106 | _GLOBAL(do_lfd) | |
107 | PPC_STLU r1,-STKFRM(r1) | |
108 | mflr r0 | |
109 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) | |
110 | mfmsr r6 | |
111 | ori r7,r6,MSR_FP | |
112 | cmpwi cr7,r3,0 | |
cd64d169 | 113 | MTMSRD(r7) |
0016a4cf PM |
114 | isync |
115 | beq cr7,1f | |
116 | stfd fr0,STKFRM-16(r1) | |
117 | 1: li r9,-EFAULT | |
118 | 2: lfd fr0,0(r4) | |
119 | li r9,0 | |
120 | 3: beq cr7,4f | |
121 | bl put_fpr | |
122 | lfd fr0,STKFRM-16(r1) | |
123 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | |
124 | mtlr r0 | |
cd64d169 | 125 | MTMSRD(r6) |
0016a4cf PM |
126 | isync |
127 | mr r3,r9 | |
128 | addi r1,r1,STKFRM | |
129 | blr | |
130 | extab 2b,3b | |
131 | ||
132 | /* Store FP reg N to float at *p. N is in r3, p in r4. */ | |
133 | _GLOBAL(do_stfs) | |
134 | PPC_STLU r1,-STKFRM(r1) | |
135 | mflr r0 | |
136 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) | |
137 | mfmsr r6 | |
138 | ori r7,r6,MSR_FP | |
139 | cmpwi cr7,r3,0 | |
cd64d169 | 140 | MTMSRD(r7) |
0016a4cf PM |
141 | isync |
142 | beq cr7,1f | |
143 | stfd fr0,STKFRM-16(r1) | |
144 | bl get_fpr | |
145 | 1: li r9,-EFAULT | |
146 | 2: stfs fr0,0(r4) | |
147 | li r9,0 | |
148 | 3: beq cr7,4f | |
149 | lfd fr0,STKFRM-16(r1) | |
150 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | |
151 | mtlr r0 | |
cd64d169 | 152 | MTMSRD(r6) |
0016a4cf PM |
153 | isync |
154 | mr r3,r9 | |
155 | addi r1,r1,STKFRM | |
156 | blr | |
157 | extab 2b,3b | |
158 | ||
159 | /* Store FP reg N to double at *p. N is in r3, p in r4. */ | |
160 | _GLOBAL(do_stfd) | |
161 | PPC_STLU r1,-STKFRM(r1) | |
162 | mflr r0 | |
163 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) | |
164 | mfmsr r6 | |
165 | ori r7,r6,MSR_FP | |
166 | cmpwi cr7,r3,0 | |
cd64d169 | 167 | MTMSRD(r7) |
0016a4cf PM |
168 | isync |
169 | beq cr7,1f | |
170 | stfd fr0,STKFRM-16(r1) | |
171 | bl get_fpr | |
172 | 1: li r9,-EFAULT | |
173 | 2: stfd fr0,0(r4) | |
174 | li r9,0 | |
175 | 3: beq cr7,4f | |
176 | lfd fr0,STKFRM-16(r1) | |
177 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | |
178 | mtlr r0 | |
cd64d169 | 179 | MTMSRD(r6) |
0016a4cf PM |
180 | isync |
181 | mr r3,r9 | |
182 | addi r1,r1,STKFRM | |
183 | blr | |
184 | extab 2b,3b | |
185 | ||
186 | #ifdef CONFIG_ALTIVEC | |
187 | /* Get the contents of vrN into vr0; N is in r3. */ | |
188 | _GLOBAL(get_vr) | |
189 | mflr r0 | |
190 | rlwinm r3,r3,3,0xf8 | |
191 | bcl 20,31,1f | |
192 | blr /* vr0 is already in vr0 */ | |
193 | nop | |
194 | reg = 1 | |
195 | .rept 31 | |
196 | vor vr0,reg,reg /* assembler doesn't know vmr? */ | |
197 | blr | |
198 | reg = reg + 1 | |
199 | .endr | |
200 | 1: mflr r5 | |
201 | add r5,r3,r5 | |
202 | mtctr r5 | |
203 | mtlr r0 | |
204 | bctr | |
205 | ||
206 | /* Put the contents of vr0 into vrN; N is in r3. */ | |
207 | _GLOBAL(put_vr) | |
208 | mflr r0 | |
209 | rlwinm r3,r3,3,0xf8 | |
210 | bcl 20,31,1f | |
211 | blr /* vr0 is already in vr0 */ | |
212 | nop | |
213 | reg = 1 | |
214 | .rept 31 | |
215 | vor reg,vr0,vr0 | |
216 | blr | |
217 | reg = reg + 1 | |
218 | .endr | |
219 | 1: mflr r5 | |
220 | add r5,r3,r5 | |
221 | mtctr r5 | |
222 | mtlr r0 | |
223 | bctr | |
224 | ||
225 | /* Load vector reg N from *p. N is in r3, p in r4. */ | |
226 | _GLOBAL(do_lvx) | |
227 | PPC_STLU r1,-STKFRM(r1) | |
228 | mflr r0 | |
229 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) | |
230 | mfmsr r6 | |
231 | oris r7,r6,MSR_VEC@h | |
232 | cmpwi cr7,r3,0 | |
233 | li r8,STKFRM-16 | |
cd64d169 | 234 | MTMSRD(r7) |
0016a4cf PM |
235 | isync |
236 | beq cr7,1f | |
237 | stvx vr0,r1,r8 | |
238 | 1: li r9,-EFAULT | |
239 | 2: lvx vr0,0,r4 | |
240 | li r9,0 | |
241 | 3: beq cr7,4f | |
242 | bl put_vr | |
243 | lvx vr0,r1,r8 | |
244 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | |
245 | mtlr r0 | |
cd64d169 | 246 | MTMSRD(r6) |
0016a4cf PM |
247 | isync |
248 | mr r3,r9 | |
249 | addi r1,r1,STKFRM | |
250 | blr | |
251 | extab 2b,3b | |
252 | ||
253 | /* Store vector reg N to *p. N is in r3, p in r4. */ | |
254 | _GLOBAL(do_stvx) | |
255 | PPC_STLU r1,-STKFRM(r1) | |
256 | mflr r0 | |
257 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) | |
258 | mfmsr r6 | |
259 | oris r7,r6,MSR_VEC@h | |
260 | cmpwi cr7,r3,0 | |
261 | li r8,STKFRM-16 | |
cd64d169 | 262 | MTMSRD(r7) |
0016a4cf PM |
263 | isync |
264 | beq cr7,1f | |
265 | stvx vr0,r1,r8 | |
266 | bl get_vr | |
267 | 1: li r9,-EFAULT | |
268 | 2: stvx vr0,0,r4 | |
269 | li r9,0 | |
270 | 3: beq cr7,4f | |
271 | lvx vr0,r1,r8 | |
272 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | |
273 | mtlr r0 | |
cd64d169 | 274 | MTMSRD(r6) |
0016a4cf PM |
275 | isync |
276 | mr r3,r9 | |
277 | addi r1,r1,STKFRM | |
278 | blr | |
279 | extab 2b,3b | |
280 | #endif /* CONFIG_ALTIVEC */ | |
281 | ||
282 | #ifdef CONFIG_VSX | |
283 | /* Get the contents of vsrN into vsr0; N is in r3. */ | |
284 | _GLOBAL(get_vsr) | |
285 | mflr r0 | |
286 | rlwinm r3,r3,3,0x1f8 | |
287 | bcl 20,31,1f | |
288 | blr /* vsr0 is already in vsr0 */ | |
289 | nop | |
290 | reg = 1 | |
291 | .rept 63 | |
292 | XXLOR(0,reg,reg) | |
293 | blr | |
294 | reg = reg + 1 | |
295 | .endr | |
296 | 1: mflr r5 | |
297 | add r5,r3,r5 | |
298 | mtctr r5 | |
299 | mtlr r0 | |
300 | bctr | |
301 | ||
302 | /* Put the contents of vsr0 into vsrN; N is in r3. */ | |
303 | _GLOBAL(put_vsr) | |
304 | mflr r0 | |
305 | rlwinm r3,r3,3,0x1f8 | |
306 | bcl 20,31,1f | |
307 | blr /* vr0 is already in vr0 */ | |
308 | nop | |
309 | reg = 1 | |
310 | .rept 63 | |
311 | XXLOR(reg,0,0) | |
312 | blr | |
313 | reg = reg + 1 | |
314 | .endr | |
315 | 1: mflr r5 | |
316 | add r5,r3,r5 | |
317 | mtctr r5 | |
318 | mtlr r0 | |
319 | bctr | |
320 | ||
321 | /* Load VSX reg N from vector doubleword *p. N is in r3, p in r4. */ | |
322 | _GLOBAL(do_lxvd2x) | |
323 | PPC_STLU r1,-STKFRM(r1) | |
324 | mflr r0 | |
325 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) | |
326 | mfmsr r6 | |
327 | oris r7,r6,MSR_VSX@h | |
328 | cmpwi cr7,r3,0 | |
329 | li r8,STKFRM-16 | |
cd64d169 | 330 | MTMSRD(r7) |
0016a4cf PM |
331 | isync |
332 | beq cr7,1f | |
c75df6f9 | 333 | STXVD2X(0,R1,R8) |
0016a4cf | 334 | 1: li r9,-EFAULT |
e55174e9 | 335 | 2: LXVD2X(0,R0,R4) |
0016a4cf PM |
336 | li r9,0 |
337 | 3: beq cr7,4f | |
338 | bl put_vsr | |
c75df6f9 | 339 | LXVD2X(0,R1,R8) |
0016a4cf PM |
340 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
341 | mtlr r0 | |
cd64d169 | 342 | MTMSRD(r6) |
0016a4cf PM |
343 | isync |
344 | mr r3,r9 | |
345 | addi r1,r1,STKFRM | |
346 | blr | |
347 | extab 2b,3b | |
348 | ||
349 | /* Store VSX reg N to vector doubleword *p. N is in r3, p in r4. */ | |
350 | _GLOBAL(do_stxvd2x) | |
351 | PPC_STLU r1,-STKFRM(r1) | |
352 | mflr r0 | |
353 | PPC_STL r0,STKFRM+PPC_LR_STKOFF(r1) | |
354 | mfmsr r6 | |
355 | oris r7,r6,MSR_VSX@h | |
356 | cmpwi cr7,r3,0 | |
357 | li r8,STKFRM-16 | |
cd64d169 | 358 | MTMSRD(r7) |
0016a4cf PM |
359 | isync |
360 | beq cr7,1f | |
c75df6f9 | 361 | STXVD2X(0,R1,R8) |
0016a4cf PM |
362 | bl get_vsr |
363 | 1: li r9,-EFAULT | |
e55174e9 | 364 | 2: STXVD2X(0,R0,R4) |
0016a4cf PM |
365 | li r9,0 |
366 | 3: beq cr7,4f | |
c75df6f9 | 367 | LXVD2X(0,R1,R8) |
0016a4cf PM |
368 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
369 | mtlr r0 | |
cd64d169 | 370 | MTMSRD(r6) |
0016a4cf PM |
371 | isync |
372 | mr r3,r9 | |
373 | addi r1,r1,STKFRM | |
374 | blr | |
375 | extab 2b,3b | |
376 | ||
377 | #endif /* CONFIG_VSX */ | |
cd64d169 SM |
378 | |
379 | #endif /* CONFIG_PPC_FPU */ |