Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | // |
2 | // assembly portion of the IA64 MCA handling | |
3 | // | |
4 | // Mods by cfleck to integrate into kernel build | |
5 | // 00/03/15 davidm Added various stop bits to get a clean compile | |
6 | // | |
7 | // 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp | |
8 | // kstack, switch modes, jump to C INIT handler | |
9 | // | |
10 | // 02/01/04 J.Hall <jenna.s.hall@intel.com> | |
11 | // Before entering virtual mode code: | |
12 | // 1. Check for TLB CPU error | |
13 | // 2. Restore current thread pointer to kr6 | |
14 | // 3. Move stack ptr 16 bytes to conform to C calling convention | |
15 | // | |
16 | // 04/11/12 Russ Anderson <rja@sgi.com> | |
17 | // Added per cpu MCA/INIT stack save areas. | |
18 | // | |
19 | #include <linux/config.h> | |
20 | #include <linux/threads.h> | |
21 | ||
22 | #include <asm/asmmacro.h> | |
23 | #include <asm/pgtable.h> | |
24 | #include <asm/processor.h> | |
25 | #include <asm/mca_asm.h> | |
26 | #include <asm/mca.h> | |
27 | ||
28 | /* | |
29 | * When we get a machine check, the kernel stack pointer is no longer | |
30 | * valid, so we need to set a new stack pointer. | |
31 | */ | |
32 | #define MINSTATE_PHYS /* Make sure stack access is physical for MINSTATE */ | |
33 | ||
34 | /* | |
35 | * Needed for return context to SAL | |
36 | */ | |
37 | #define IA64_MCA_SAME_CONTEXT 0 | |
38 | #define IA64_MCA_COLD_BOOT -2 | |
39 | ||
40 | #include "minstate.h" | |
41 | ||
42 | /* | |
43 | * SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec) | |
44 | * 1. GR1 = OS GP | |
45 | * 2. GR8 = PAL_PROC physical address | |
46 | * 3. GR9 = SAL_PROC physical address | |
47 | * 4. GR10 = SAL GP (physical) | |
48 | * 5. GR11 = Rendez state | |
49 | * 6. GR12 = Return address to location within SAL_CHECK | |
50 | */ | |
51 | #define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \ | |
52 | LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \ | |
53 | st8 [_tmp]=r1,0x08;; \ | |
54 | st8 [_tmp]=r8,0x08;; \ | |
55 | st8 [_tmp]=r9,0x08;; \ | |
56 | st8 [_tmp]=r10,0x08;; \ | |
57 | st8 [_tmp]=r11,0x08;; \ | |
58 | st8 [_tmp]=r12,0x08;; \ | |
59 | st8 [_tmp]=r17,0x08;; \ | |
60 | st8 [_tmp]=r18,0x08 | |
61 | ||
62 | /* | |
63 | * OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec) | |
64 | * (p6) is executed if we never entered virtual mode (TLB error) | |
65 | * (p7) is executed if we entered virtual mode as expected (normal case) | |
66 | * 1. GR8 = OS_MCA return status | |
67 | * 2. GR9 = SAL GP (physical) | |
68 | * 3. GR10 = 0/1 returning same/new context | |
69 | * 4. GR22 = New min state save area pointer | |
70 | * returns ptr to SAL rtn save loc in _tmp | |
71 | */ | |
72 | #define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp) \ | |
73 | movl _tmp=ia64_os_to_sal_handoff_state;; \ | |
74 | DATA_VA_TO_PA(_tmp);; \ | |
75 | ld8 r8=[_tmp],0x08;; \ | |
76 | ld8 r9=[_tmp],0x08;; \ | |
77 | ld8 r10=[_tmp],0x08;; \ | |
78 | ld8 r22=[_tmp],0x08;; | |
79 | // now _tmp is pointing to SAL rtn save location | |
80 | ||
81 | /* | |
82 | * COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state | |
83 | * imots_os_status=IA64_MCA_COLD_BOOT | |
84 | * imots_sal_gp=SAL GP | |
85 | * imots_context=IA64_MCA_SAME_CONTEXT | |
86 | * imots_new_min_state=Min state save area pointer | |
87 | * imots_sal_check_ra=Return address to location within SAL_CHECK | |
88 | * | |
89 | */ | |
90 | #define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\ | |
91 | movl tmp=IA64_MCA_COLD_BOOT; \ | |
92 | movl sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state); \ | |
93 | movl os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);; \ | |
94 | st8 [os_to_sal_handoff]=tmp,8;; \ | |
95 | ld8 tmp=[sal_to_os_handoff],48;; \ | |
96 | st8 [os_to_sal_handoff]=tmp,8;; \ | |
97 | movl tmp=IA64_MCA_SAME_CONTEXT;; \ | |
98 | st8 [os_to_sal_handoff]=tmp,8;; \ | |
99 | ld8 tmp=[sal_to_os_handoff],-8;; \ | |
100 | st8 [os_to_sal_handoff]=tmp,8;; \ | |
101 | ld8 tmp=[sal_to_os_handoff];; \ | |
102 | st8 [os_to_sal_handoff]=tmp;; | |
103 | ||
104 | #define GET_IA64_MCA_DATA(reg) \ | |
105 | GET_THIS_PADDR(reg, ia64_mca_data) \ | |
106 | ;; \ | |
107 | ld8 reg=[reg] | |
108 | ||
109 | .global ia64_os_mca_dispatch | |
110 | .global ia64_os_mca_dispatch_end | |
111 | .global ia64_sal_to_os_handoff_state | |
112 | .global ia64_os_to_sal_handoff_state | |
b8d8b883 | 113 | .global ia64_do_tlb_purge |
1da177e4 LT |
114 | |
115 | .text | |
116 | .align 16 | |
117 | ||
b8d8b883 AR |
118 | /* |
119 | * Just the TLB purge part is moved to a separate function | |
120 | * so we can re-use the code for cpu hotplug code as well | |
121 | * Caller should now setup b1, so we can branch once the | |
122 | * tlb flush is complete. | |
123 | */ | |
1da177e4 | 124 | |
b8d8b883 | 125 | ia64_do_tlb_purge: |
1da177e4 LT |
126 | #define O(member) IA64_CPUINFO_##member##_OFFSET |
127 | ||
128 | GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 | |
129 | ;; | |
130 | addl r17=O(PTCE_STRIDE),r2 | |
131 | addl r2=O(PTCE_BASE),r2 | |
132 | ;; | |
133 | ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base | |
134 | ld4 r19=[r2],4 // r19=ptce_count[0] | |
135 | ld4 r21=[r17],4 // r21=ptce_stride[0] | |
136 | ;; | |
137 | ld4 r20=[r2] // r20=ptce_count[1] | |
138 | ld4 r22=[r17] // r22=ptce_stride[1] | |
139 | mov r24=0 | |
140 | ;; | |
141 | adds r20=-1,r20 | |
142 | ;; | |
143 | #undef O | |
144 | ||
145 | 2: | |
146 | cmp.ltu p6,p7=r24,r19 | |
147 | (p7) br.cond.dpnt.few 4f | |
148 | mov ar.lc=r20 | |
149 | 3: | |
150 | ptc.e r18 | |
151 | ;; | |
152 | add r18=r22,r18 | |
153 | br.cloop.sptk.few 3b | |
154 | ;; | |
155 | add r18=r21,r18 | |
156 | add r24=1,r24 | |
157 | ;; | |
158 | br.sptk.few 2b | |
159 | 4: | |
160 | srlz.i // srlz.i implies srlz.d | |
161 | ;; | |
162 | ||
163 | // Now purge addresses formerly mapped by TR registers | |
164 | // 1. Purge ITR&DTR for kernel. | |
165 | movl r16=KERNEL_START | |
166 | mov r18=KERNEL_TR_PAGE_SHIFT<<2 | |
167 | ;; | |
168 | ptr.i r16, r18 | |
169 | ptr.d r16, r18 | |
170 | ;; | |
171 | srlz.i | |
172 | ;; | |
173 | srlz.d | |
174 | ;; | |
175 | // 2. Purge DTR for PERCPU data. | |
176 | movl r16=PERCPU_ADDR | |
177 | mov r18=PERCPU_PAGE_SHIFT<<2 | |
178 | ;; | |
179 | ptr.d r16,r18 | |
180 | ;; | |
181 | srlz.d | |
182 | ;; | |
183 | // 3. Purge ITR for PAL code. | |
184 | GET_THIS_PADDR(r2, ia64_mca_pal_base) | |
185 | ;; | |
186 | ld8 r16=[r2] | |
187 | mov r18=IA64_GRANULE_SHIFT<<2 | |
188 | ;; | |
189 | ptr.i r16,r18 | |
190 | ;; | |
191 | srlz.i | |
192 | ;; | |
193 | // 4. Purge DTR for stack. | |
194 | mov r16=IA64_KR(CURRENT_STACK) | |
195 | ;; | |
196 | shl r16=r16,IA64_GRANULE_SHIFT | |
197 | movl r19=PAGE_OFFSET | |
198 | ;; | |
199 | add r16=r19,r16 | |
200 | mov r18=IA64_GRANULE_SHIFT<<2 | |
201 | ;; | |
202 | ptr.d r16,r18 | |
203 | ;; | |
204 | srlz.i | |
205 | ;; | |
b8d8b883 AR |
206 | // Now branch away to caller. |
207 | br.sptk.many b1 | |
208 | ;; | |
209 | ||
210 | ia64_os_mca_dispatch: | |
211 | ||
212 | // Serialize all MCA processing | |
213 | mov r3=1;; | |
214 | LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);; | |
215 | ia64_os_mca_spin: | |
216 | xchg8 r4=[r2],r3;; | |
217 | cmp.ne p6,p0=r4,r0 | |
218 | (p6) br ia64_os_mca_spin | |
219 | ||
220 | // Save the SAL to OS MCA handoff state as defined | |
221 | // by SAL SPEC 3.0 | |
222 | // NOTE : The order in which the state gets saved | |
223 | // is dependent on the way the C-structure | |
224 | // for ia64_mca_sal_to_os_state_t has been | |
225 | // defined in include/asm/mca.h | |
226 | SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2) | |
227 | ;; | |
228 | ||
229 | // LOG PROCESSOR STATE INFO FROM HERE ON.. | |
230 | begin_os_mca_dump: | |
231 | br ia64_os_mca_proc_state_dump;; | |
232 | ||
233 | ia64_os_mca_done_dump: | |
234 | ||
235 | LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56) | |
236 | ;; | |
237 | ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK. | |
238 | ;; | |
239 | tbit.nz p6,p7=r18,60 | |
240 | (p7) br.spnt done_tlb_purge_and_reload | |
241 | ||
242 | // The following code purges TC and TR entries. Then reload all TC entries. | |
243 | // Purge percpu data TC entries. | |
244 | begin_tlb_purge_and_reload: | |
245 | movl r18=ia64_reload_tr;; | |
246 | LOAD_PHYSICAL(p0,r18,ia64_reload_tr);; | |
247 | mov b1=r18;; | |
248 | br.sptk.many ia64_do_tlb_purge;; | |
249 | ||
250 | ia64_reload_tr: | |
1da177e4 LT |
251 | // Finally reload the TR registers. |
252 | // 1. Reload DTR/ITR registers for kernel. | |
253 | mov r18=KERNEL_TR_PAGE_SHIFT<<2 | |
254 | movl r17=KERNEL_START | |
255 | ;; | |
256 | mov cr.itir=r18 | |
257 | mov cr.ifa=r17 | |
258 | mov r16=IA64_TR_KERNEL | |
259 | mov r19=ip | |
260 | movl r18=PAGE_KERNEL | |
261 | ;; | |
262 | dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT | |
263 | ;; | |
264 | or r18=r17,r18 | |
265 | ;; | |
266 | itr.i itr[r16]=r18 | |
267 | ;; | |
268 | itr.d dtr[r16]=r18 | |
269 | ;; | |
270 | srlz.i | |
271 | srlz.d | |
272 | ;; | |
273 | // 2. Reload DTR register for PERCPU data. | |
274 | GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte) | |
275 | ;; | |
276 | movl r16=PERCPU_ADDR // vaddr | |
277 | movl r18=PERCPU_PAGE_SHIFT<<2 | |
278 | ;; | |
279 | mov cr.itir=r18 | |
280 | mov cr.ifa=r16 | |
281 | ;; | |
282 | ld8 r18=[r2] // load per-CPU PTE | |
283 | mov r16=IA64_TR_PERCPU_DATA; | |
284 | ;; | |
285 | itr.d dtr[r16]=r18 | |
286 | ;; | |
287 | srlz.d | |
288 | ;; | |
289 | // 3. Reload ITR for PAL code. | |
290 | GET_THIS_PADDR(r2, ia64_mca_pal_pte) | |
291 | ;; | |
292 | ld8 r18=[r2] // load PAL PTE | |
293 | ;; | |
294 | GET_THIS_PADDR(r2, ia64_mca_pal_base) | |
295 | ;; | |
296 | ld8 r16=[r2] // load PAL vaddr | |
297 | mov r19=IA64_GRANULE_SHIFT<<2 | |
298 | ;; | |
299 | mov cr.itir=r19 | |
300 | mov cr.ifa=r16 | |
301 | mov r20=IA64_TR_PALCODE | |
302 | ;; | |
303 | itr.i itr[r20]=r18 | |
304 | ;; | |
305 | srlz.i | |
306 | ;; | |
307 | // 4. Reload DTR for stack. | |
308 | mov r16=IA64_KR(CURRENT_STACK) | |
309 | ;; | |
310 | shl r16=r16,IA64_GRANULE_SHIFT | |
311 | movl r19=PAGE_OFFSET | |
312 | ;; | |
313 | add r18=r19,r16 | |
314 | movl r20=PAGE_KERNEL | |
315 | ;; | |
316 | add r16=r20,r16 | |
317 | mov r19=IA64_GRANULE_SHIFT<<2 | |
318 | ;; | |
319 | mov cr.itir=r19 | |
320 | mov cr.ifa=r18 | |
321 | mov r20=IA64_TR_CURRENT_STACK | |
322 | ;; | |
323 | itr.d dtr[r20]=r16 | |
324 | ;; | |
325 | srlz.d | |
326 | ;; | |
327 | br.sptk.many done_tlb_purge_and_reload | |
328 | err: | |
329 | COLD_BOOT_HANDOFF_STATE(r20,r21,r22) | |
330 | br.sptk.many ia64_os_mca_done_restore | |
331 | ||
332 | done_tlb_purge_and_reload: | |
333 | ||
334 | // Setup new stack frame for OS_MCA handling | |
335 | GET_IA64_MCA_DATA(r2) | |
336 | ;; | |
337 | add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2 | |
338 | add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2 | |
339 | ;; | |
340 | rse_switch_context(r6,r3,r2);; // RSC management in this new context | |
341 | ||
342 | GET_IA64_MCA_DATA(r2) | |
343 | ;; | |
344 | add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2 | |
345 | ;; | |
346 | mov r12=r2 // establish new stack-pointer | |
347 | ||
348 | // Enter virtual mode from physical mode | |
349 | VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4) | |
350 | ia64_os_mca_virtual_begin: | |
351 | ||
352 | // Call virtual mode handler | |
353 | movl r2=ia64_mca_ucmc_handler;; | |
354 | mov b6=r2;; | |
355 | br.call.sptk.many b0=b6;; | |
356 | .ret0: | |
357 | // Revert back to physical mode before going back to SAL | |
358 | PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4) | |
359 | ia64_os_mca_virtual_end: | |
360 | ||
361 | // restore the original stack frame here | |
362 | GET_IA64_MCA_DATA(r2) | |
363 | ;; | |
364 | add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2 | |
365 | ;; | |
366 | movl r4=IA64_PSR_MC | |
367 | ;; | |
368 | rse_return_context(r4,r3,r2) // switch from interrupt context for RSE | |
369 | ||
370 | // let us restore all the registers from our PSI structure | |
371 | mov r8=gp | |
372 | ;; | |
373 | begin_os_mca_restore: | |
374 | br ia64_os_mca_proc_state_restore;; | |
375 | ||
376 | ia64_os_mca_done_restore: | |
377 | OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);; | |
378 | // branch back to SALE_CHECK | |
379 | ld8 r3=[r2];; | |
380 | mov b0=r3;; // SAL_CHECK return address | |
381 | ||
382 | // release lock | |
383 | movl r3=ia64_mca_serialize;; | |
384 | DATA_VA_TO_PA(r3);; | |
385 | st8.rel [r3]=r0 | |
386 | ||
387 | br b0 | |
388 | ;; | |
389 | ia64_os_mca_dispatch_end: | |
390 | //EndMain////////////////////////////////////////////////////////////////////// | |
391 | ||
392 | ||
393 | //++ | |
394 | // Name: | |
395 | // ia64_os_mca_proc_state_dump() | |
396 | // | |
397 | // Stub Description: | |
398 | // | |
399 | // This stub dumps the processor state during MCHK to a data area | |
400 | // | |
401 | //-- | |
402 | ||
403 | ia64_os_mca_proc_state_dump: | |
404 | // Save bank 1 GRs 16-31 which will be used by c-language code when we switch | |
405 | // to virtual addressing mode. | |
406 | GET_IA64_MCA_DATA(r2) | |
407 | ;; | |
408 | add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2 | |
409 | ;; | |
410 | // save ar.NaT | |
411 | mov r5=ar.unat // ar.unat | |
412 | ||
413 | // save banked GRs 16-31 along with NaT bits | |
414 | bsw.1;; | |
415 | st8.spill [r2]=r16,8;; | |
416 | st8.spill [r2]=r17,8;; | |
417 | st8.spill [r2]=r18,8;; | |
418 | st8.spill [r2]=r19,8;; | |
419 | st8.spill [r2]=r20,8;; | |
420 | st8.spill [r2]=r21,8;; | |
421 | st8.spill [r2]=r22,8;; | |
422 | st8.spill [r2]=r23,8;; | |
423 | st8.spill [r2]=r24,8;; | |
424 | st8.spill [r2]=r25,8;; | |
425 | st8.spill [r2]=r26,8;; | |
426 | st8.spill [r2]=r27,8;; | |
427 | st8.spill [r2]=r28,8;; | |
428 | st8.spill [r2]=r29,8;; | |
429 | st8.spill [r2]=r30,8;; | |
430 | st8.spill [r2]=r31,8;; | |
431 | ||
432 | mov r4=ar.unat;; | |
433 | st8 [r2]=r4,8 // save User NaT bits for r16-r31 | |
434 | mov ar.unat=r5 // restore original unat | |
435 | bsw.0;; | |
436 | ||
437 | //save BRs | |
438 | add r4=8,r2 // duplicate r2 in r4 | |
439 | add r6=2*8,r2 // duplicate r2 in r4 | |
440 | ||
441 | mov r3=b0 | |
442 | mov r5=b1 | |
443 | mov r7=b2;; | |
444 | st8 [r2]=r3,3*8 | |
445 | st8 [r4]=r5,3*8 | |
446 | st8 [r6]=r7,3*8;; | |
447 | ||
448 | mov r3=b3 | |
449 | mov r5=b4 | |
450 | mov r7=b5;; | |
451 | st8 [r2]=r3,3*8 | |
452 | st8 [r4]=r5,3*8 | |
453 | st8 [r6]=r7,3*8;; | |
454 | ||
455 | mov r3=b6 | |
456 | mov r5=b7;; | |
457 | st8 [r2]=r3,2*8 | |
458 | st8 [r4]=r5,2*8;; | |
459 | ||
460 | cSaveCRs: | |
461 | // save CRs | |
462 | add r4=8,r2 // duplicate r2 in r4 | |
463 | add r6=2*8,r2 // duplicate r2 in r4 | |
464 | ||
465 | mov r3=cr.dcr | |
466 | mov r5=cr.itm | |
467 | mov r7=cr.iva;; | |
468 | ||
469 | st8 [r2]=r3,8*8 | |
470 | st8 [r4]=r5,3*8 | |
471 | st8 [r6]=r7,3*8;; // 48 byte rements | |
472 | ||
473 | mov r3=cr.pta;; | |
474 | st8 [r2]=r3,8*8;; // 64 byte rements | |
475 | ||
476 | // if PSR.ic=0, reading interruption registers causes an illegal operation fault | |
477 | mov r3=psr;; | |
478 | tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test | |
479 | (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc. | |
480 | begin_skip_intr_regs: | |
481 | (p6) br SkipIntrRegs;; | |
482 | ||
483 | add r4=8,r2 // duplicate r2 in r4 | |
484 | add r6=2*8,r2 // duplicate r2 in r6 | |
485 | ||
486 | mov r3=cr.ipsr | |
487 | mov r5=cr.isr | |
488 | mov r7=r0;; | |
489 | st8 [r2]=r3,3*8 | |
490 | st8 [r4]=r5,3*8 | |
491 | st8 [r6]=r7,3*8;; | |
492 | ||
493 | mov r3=cr.iip | |
494 | mov r5=cr.ifa | |
495 | mov r7=cr.itir;; | |
496 | st8 [r2]=r3,3*8 | |
497 | st8 [r4]=r5,3*8 | |
498 | st8 [r6]=r7,3*8;; | |
499 | ||
500 | mov r3=cr.iipa | |
501 | mov r5=cr.ifs | |
502 | mov r7=cr.iim;; | |
503 | st8 [r2]=r3,3*8 | |
504 | st8 [r4]=r5,3*8 | |
505 | st8 [r6]=r7,3*8;; | |
506 | ||
507 | mov r3=cr25;; // cr.iha | |
508 | st8 [r2]=r3,160;; // 160 byte rement | |
509 | ||
510 | SkipIntrRegs: | |
511 | st8 [r2]=r0,152;; // another 152 byte . | |
512 | ||
513 | add r4=8,r2 // duplicate r2 in r4 | |
514 | add r6=2*8,r2 // duplicate r2 in r6 | |
515 | ||
516 | mov r3=cr.lid | |
517 | // mov r5=cr.ivr // cr.ivr, don't read it | |
518 | mov r7=cr.tpr;; | |
519 | st8 [r2]=r3,3*8 | |
520 | st8 [r4]=r5,3*8 | |
521 | st8 [r6]=r7,3*8;; | |
522 | ||
523 | mov r3=r0 // cr.eoi => cr67 | |
524 | mov r5=r0 // cr.irr0 => cr68 | |
525 | mov r7=r0;; // cr.irr1 => cr69 | |
526 | st8 [r2]=r3,3*8 | |
527 | st8 [r4]=r5,3*8 | |
528 | st8 [r6]=r7,3*8;; | |
529 | ||
530 | mov r3=r0 // cr.irr2 => cr70 | |
531 | mov r5=r0 // cr.irr3 => cr71 | |
532 | mov r7=cr.itv;; | |
533 | st8 [r2]=r3,3*8 | |
534 | st8 [r4]=r5,3*8 | |
535 | st8 [r6]=r7,3*8;; | |
536 | ||
537 | mov r3=cr.pmv | |
538 | mov r5=cr.cmcv;; | |
539 | st8 [r2]=r3,7*8 | |
540 | st8 [r4]=r5,7*8;; | |
541 | ||
542 | mov r3=r0 // cr.lrr0 => cr80 | |
543 | mov r5=r0;; // cr.lrr1 => cr81 | |
544 | st8 [r2]=r3,23*8 | |
545 | st8 [r4]=r5,23*8;; | |
546 | ||
547 | adds r2=25*8,r2;; | |
548 | ||
549 | cSaveARs: | |
550 | // save ARs | |
551 | add r4=8,r2 // duplicate r2 in r4 | |
552 | add r6=2*8,r2 // duplicate r2 in r6 | |
553 | ||
554 | mov r3=ar.k0 | |
555 | mov r5=ar.k1 | |
556 | mov r7=ar.k2;; | |
557 | st8 [r2]=r3,3*8 | |
558 | st8 [r4]=r5,3*8 | |
559 | st8 [r6]=r7,3*8;; | |
560 | ||
561 | mov r3=ar.k3 | |
562 | mov r5=ar.k4 | |
563 | mov r7=ar.k5;; | |
564 | st8 [r2]=r3,3*8 | |
565 | st8 [r4]=r5,3*8 | |
566 | st8 [r6]=r7,3*8;; | |
567 | ||
568 | mov r3=ar.k6 | |
569 | mov r5=ar.k7 | |
570 | mov r7=r0;; // ar.kr8 | |
571 | st8 [r2]=r3,10*8 | |
572 | st8 [r4]=r5,10*8 | |
573 | st8 [r6]=r7,10*8;; // rement by 72 bytes | |
574 | ||
575 | mov r3=ar.rsc | |
576 | mov ar.rsc=r0 // put RSE in enforced lazy mode | |
577 | mov r5=ar.bsp | |
578 | ;; | |
579 | mov r7=ar.bspstore;; | |
580 | st8 [r2]=r3,3*8 | |
581 | st8 [r4]=r5,3*8 | |
582 | st8 [r6]=r7,3*8;; | |
583 | ||
584 | mov r3=ar.rnat;; | |
585 | st8 [r2]=r3,8*13 // increment by 13x8 bytes | |
586 | ||
587 | mov r3=ar.ccv;; | |
588 | st8 [r2]=r3,8*4 | |
589 | ||
590 | mov r3=ar.unat;; | |
591 | st8 [r2]=r3,8*4 | |
592 | ||
593 | mov r3=ar.fpsr;; | |
594 | st8 [r2]=r3,8*4 | |
595 | ||
596 | mov r3=ar.itc;; | |
597 | st8 [r2]=r3,160 // 160 | |
598 | ||
599 | mov r3=ar.pfs;; | |
600 | st8 [r2]=r3,8 | |
601 | ||
602 | mov r3=ar.lc;; | |
603 | st8 [r2]=r3,8 | |
604 | ||
605 | mov r3=ar.ec;; | |
606 | st8 [r2]=r3 | |
607 | add r2=8*62,r2 //padding | |
608 | ||
609 | // save RRs | |
610 | mov ar.lc=0x08-1 | |
611 | movl r4=0x00;; | |
612 | ||
613 | cStRR: | |
614 | dep.z r5=r4,61,3;; | |
615 | mov r3=rr[r5];; | |
616 | st8 [r2]=r3,8 | |
617 | add r4=1,r4 | |
618 | br.cloop.sptk.few cStRR | |
619 | ;; | |
620 | end_os_mca_dump: | |
621 | br ia64_os_mca_done_dump;; | |
622 | ||
623 | //EndStub////////////////////////////////////////////////////////////////////// | |
624 | ||
625 | ||
626 | //++ | |
627 | // Name: | |
628 | // ia64_os_mca_proc_state_restore() | |
629 | // | |
630 | // Stub Description: | |
631 | // | |
632 | // This is a stub to restore the saved processor state during MCHK | |
633 | // | |
634 | //-- | |
635 | ||
636 | ia64_os_mca_proc_state_restore: | |
637 | ||
638 | // Restore bank1 GR16-31 | |
639 | GET_IA64_MCA_DATA(r2) | |
640 | ;; | |
641 | add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2 | |
642 | ||
643 | restore_GRs: // restore bank-1 GRs 16-31 | |
644 | bsw.1;; | |
645 | add r3=16*8,r2;; // to get to NaT of GR 16-31 | |
646 | ld8 r3=[r3];; | |
647 | mov ar.unat=r3;; // first restore NaT | |
648 | ||
649 | ld8.fill r16=[r2],8;; | |
650 | ld8.fill r17=[r2],8;; | |
651 | ld8.fill r18=[r2],8;; | |
652 | ld8.fill r19=[r2],8;; | |
653 | ld8.fill r20=[r2],8;; | |
654 | ld8.fill r21=[r2],8;; | |
655 | ld8.fill r22=[r2],8;; | |
656 | ld8.fill r23=[r2],8;; | |
657 | ld8.fill r24=[r2],8;; | |
658 | ld8.fill r25=[r2],8;; | |
659 | ld8.fill r26=[r2],8;; | |
660 | ld8.fill r27=[r2],8;; | |
661 | ld8.fill r28=[r2],8;; | |
662 | ld8.fill r29=[r2],8;; | |
663 | ld8.fill r30=[r2],8;; | |
664 | ld8.fill r31=[r2],8;; | |
665 | ||
666 | ld8 r3=[r2],8;; // increment to skip NaT | |
667 | bsw.0;; | |
668 | ||
669 | restore_BRs: | |
670 | add r4=8,r2 // duplicate r2 in r4 | |
671 | add r6=2*8,r2;; // duplicate r2 in r4 | |
672 | ||
673 | ld8 r3=[r2],3*8 | |
674 | ld8 r5=[r4],3*8 | |
675 | ld8 r7=[r6],3*8;; | |
676 | mov b0=r3 | |
677 | mov b1=r5 | |
678 | mov b2=r7;; | |
679 | ||
680 | ld8 r3=[r2],3*8 | |
681 | ld8 r5=[r4],3*8 | |
682 | ld8 r7=[r6],3*8;; | |
683 | mov b3=r3 | |
684 | mov b4=r5 | |
685 | mov b5=r7;; | |
686 | ||
687 | ld8 r3=[r2],2*8 | |
688 | ld8 r5=[r4],2*8;; | |
689 | mov b6=r3 | |
690 | mov b7=r5;; | |
691 | ||
692 | restore_CRs: | |
693 | add r4=8,r2 // duplicate r2 in r4 | |
694 | add r6=2*8,r2;; // duplicate r2 in r4 | |
695 | ||
696 | ld8 r3=[r2],8*8 | |
697 | ld8 r5=[r4],3*8 | |
698 | ld8 r7=[r6],3*8;; // 48 byte increments | |
699 | mov cr.dcr=r3 | |
700 | mov cr.itm=r5 | |
701 | mov cr.iva=r7;; | |
702 | ||
703 | ld8 r3=[r2],8*8;; // 64 byte increments | |
704 | // mov cr.pta=r3 | |
705 | ||
706 | ||
707 | // if PSR.ic=1, reading interruption registers causes an illegal operation fault | |
708 | mov r3=psr;; | |
709 | tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test | |
710 | (p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc. | |
711 | ||
712 | begin_rskip_intr_regs: | |
713 | (p6) br rSkipIntrRegs;; | |
714 | ||
715 | add r4=8,r2 // duplicate r2 in r4 | |
716 | add r6=2*8,r2;; // duplicate r2 in r4 | |
717 | ||
718 | ld8 r3=[r2],3*8 | |
719 | ld8 r5=[r4],3*8 | |
720 | ld8 r7=[r6],3*8;; | |
721 | mov cr.ipsr=r3 | |
722 | // mov cr.isr=r5 // cr.isr is read only | |
723 | ||
724 | ld8 r3=[r2],3*8 | |
725 | ld8 r5=[r4],3*8 | |
726 | ld8 r7=[r6],3*8;; | |
727 | mov cr.iip=r3 | |
728 | mov cr.ifa=r5 | |
729 | mov cr.itir=r7;; | |
730 | ||
731 | ld8 r3=[r2],3*8 | |
732 | ld8 r5=[r4],3*8 | |
733 | ld8 r7=[r6],3*8;; | |
734 | mov cr.iipa=r3 | |
735 | mov cr.ifs=r5 | |
736 | mov cr.iim=r7 | |
737 | ||
738 | ld8 r3=[r2],160;; // 160 byte increment | |
739 | mov cr.iha=r3 | |
740 | ||
741 | rSkipIntrRegs: | |
742 | ld8 r3=[r2],152;; // another 152 byte inc. | |
743 | ||
744 | add r4=8,r2 // duplicate r2 in r4 | |
745 | add r6=2*8,r2;; // duplicate r2 in r6 | |
746 | ||
747 | ld8 r3=[r2],8*3 | |
748 | ld8 r5=[r4],8*3 | |
749 | ld8 r7=[r6],8*3;; | |
750 | mov cr.lid=r3 | |
751 | // mov cr.ivr=r5 // cr.ivr is read only | |
752 | mov cr.tpr=r7;; | |
753 | ||
754 | ld8 r3=[r2],8*3 | |
755 | ld8 r5=[r4],8*3 | |
756 | ld8 r7=[r6],8*3;; | |
757 | // mov cr.eoi=r3 | |
758 | // mov cr.irr0=r5 // cr.irr0 is read only | |
759 | // mov cr.irr1=r7;; // cr.irr1 is read only | |
760 | ||
761 | ld8 r3=[r2],8*3 | |
762 | ld8 r5=[r4],8*3 | |
763 | ld8 r7=[r6],8*3;; | |
764 | // mov cr.irr2=r3 // cr.irr2 is read only | |
765 | // mov cr.irr3=r5 // cr.irr3 is read only | |
766 | mov cr.itv=r7;; | |
767 | ||
768 | ld8 r3=[r2],8*7 | |
769 | ld8 r5=[r4],8*7;; | |
770 | mov cr.pmv=r3 | |
771 | mov cr.cmcv=r5;; | |
772 | ||
773 | ld8 r3=[r2],8*23 | |
774 | ld8 r5=[r4],8*23;; | |
775 | adds r2=8*23,r2 | |
776 | adds r4=8*23,r4;; | |
777 | // mov cr.lrr0=r3 | |
778 | // mov cr.lrr1=r5 | |
779 | ||
780 | adds r2=8*2,r2;; | |
781 | ||
782 | restore_ARs: | |
783 | add r4=8,r2 // duplicate r2 in r4 | |
784 | add r6=2*8,r2;; // duplicate r2 in r4 | |
785 | ||
786 | ld8 r3=[r2],3*8 | |
787 | ld8 r5=[r4],3*8 | |
788 | ld8 r7=[r6],3*8;; | |
789 | mov ar.k0=r3 | |
790 | mov ar.k1=r5 | |
791 | mov ar.k2=r7;; | |
792 | ||
793 | ld8 r3=[r2],3*8 | |
794 | ld8 r5=[r4],3*8 | |
795 | ld8 r7=[r6],3*8;; | |
796 | mov ar.k3=r3 | |
797 | mov ar.k4=r5 | |
798 | mov ar.k5=r7;; | |
799 | ||
800 | ld8 r3=[r2],10*8 | |
801 | ld8 r5=[r4],10*8 | |
802 | ld8 r7=[r6],10*8;; | |
803 | mov ar.k6=r3 | |
804 | mov ar.k7=r5 | |
805 | ;; | |
806 | ||
807 | ld8 r3=[r2],3*8 | |
808 | ld8 r5=[r4],3*8 | |
809 | ld8 r7=[r6],3*8;; | |
810 | // mov ar.rsc=r3 | |
811 | // mov ar.bsp=r5 // ar.bsp is read only | |
812 | mov ar.rsc=r0 // make sure that RSE is in enforced lazy mode | |
813 | ;; | |
814 | mov ar.bspstore=r7;; | |
815 | ||
816 | ld8 r9=[r2],8*13;; | |
817 | mov ar.rnat=r9 | |
818 | ||
819 | mov ar.rsc=r3 | |
820 | ld8 r3=[r2],8*4;; | |
821 | mov ar.ccv=r3 | |
822 | ||
823 | ld8 r3=[r2],8*4;; | |
824 | mov ar.unat=r3 | |
825 | ||
826 | ld8 r3=[r2],8*4;; | |
827 | mov ar.fpsr=r3 | |
828 | ||
829 | ld8 r3=[r2],160;; // 160 | |
830 | // mov ar.itc=r3 | |
831 | ||
832 | ld8 r3=[r2],8;; | |
833 | mov ar.pfs=r3 | |
834 | ||
835 | ld8 r3=[r2],8;; | |
836 | mov ar.lc=r3 | |
837 | ||
838 | ld8 r3=[r2];; | |
839 | mov ar.ec=r3 | |
840 | add r2=8*62,r2;; // padding | |
841 | ||
842 | restore_RRs: | |
843 | mov r5=ar.lc | |
844 | mov ar.lc=0x08-1 | |
845 | movl r4=0x00;; | |
846 | cStRRr: | |
847 | dep.z r7=r4,61,3 | |
848 | ld8 r3=[r2],8;; | |
849 | mov rr[r7]=r3 // what are its access previledges? | |
850 | add r4=1,r4 | |
851 | br.cloop.sptk.few cStRRr | |
852 | ;; | |
853 | mov ar.lc=r5 | |
854 | ;; | |
855 | end_os_mca_restore: | |
856 | br ia64_os_mca_done_restore;; | |
857 | ||
858 | //EndStub////////////////////////////////////////////////////////////////////// | |
859 | ||
860 | ||
861 | // ok, the issue here is that we need to save state information so | |
862 | // it can be useable by the kernel debugger and show regs routines. | |
863 | // In order to do this, our best bet is save the current state (plus | |
864 | // the state information obtain from the MIN_STATE_AREA) into a pt_regs | |
865 | // format. This way we can pass it on in a useable format. | |
866 | // | |
867 | ||
868 | // | |
869 | // SAL to OS entry point for INIT on the monarch processor | |
870 | // This has been defined for registration purposes with SAL | |
871 | // as a part of ia64_mca_init. | |
872 | // | |
873 | // When we get here, the following registers have been | |
874 | // set by the SAL for our use | |
875 | // | |
876 | // 1. GR1 = OS INIT GP | |
877 | // 2. GR8 = PAL_PROC physical address | |
878 | // 3. GR9 = SAL_PROC physical address | |
879 | // 4. GR10 = SAL GP (physical) | |
880 | // 5. GR11 = Init Reason | |
881 | // 0 = Received INIT for event other than crash dump switch | |
882 | // 1 = Received wakeup at the end of an OS_MCA corrected machine check | |
883 | // 2 = Received INIT dude to CrashDump switch assertion | |
884 | // | |
885 | // 6. GR12 = Return address to location within SAL_INIT procedure | |
886 | ||
887 | ||
888 | GLOBAL_ENTRY(ia64_monarch_init_handler) | |
889 | .prologue | |
890 | // stash the information the SAL passed to os | |
891 | SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2) | |
892 | ;; | |
893 | SAVE_MIN_WITH_COVER | |
894 | ;; | |
895 | mov r8=cr.ifa | |
896 | mov r9=cr.isr | |
897 | adds r3=8,r2 // set up second base pointer | |
898 | ;; | |
899 | SAVE_REST | |
900 | ||
901 | // ok, enough should be saved at this point to be dangerous, and supply | |
902 | // information for a dump | |
903 | // We need to switch to Virtual mode before hitting the C functions. | |
904 | ||
905 | movl r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN | |
906 | mov r3=psr // get the current psr, minimum enabled at this point | |
907 | ;; | |
908 | or r2=r2,r3 | |
909 | ;; | |
910 | movl r3=IVirtual_Switch | |
911 | ;; | |
912 | mov cr.iip=r3 // short return to set the appropriate bits | |
913 | mov cr.ipsr=r2 // need to do an rfi to set appropriate bits | |
914 | ;; | |
915 | rfi | |
916 | ;; | |
917 | IVirtual_Switch: | |
918 | // | |
919 | // We should now be running virtual | |
920 | // | |
921 | // Let's call the C handler to get the rest of the state info | |
922 | // | |
923 | alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) | |
924 | ;; | |
925 | adds out0=16,sp // out0 = pointer to pt_regs | |
926 | ;; | |
927 | DO_SAVE_SWITCH_STACK | |
928 | .body | |
929 | adds out1=16,sp // out0 = pointer to switch_stack | |
930 | ||
931 | br.call.sptk.many rp=ia64_init_handler | |
932 | .ret1: | |
933 | ||
934 | return_from_init: | |
935 | br.sptk return_from_init | |
936 | END(ia64_monarch_init_handler) | |
937 | ||
938 | // | |
939 | // SAL to OS entry point for INIT on the slave processor | |
940 | // This has been defined for registration purposes with SAL | |
941 | // as a part of ia64_mca_init. | |
942 | // | |
943 | ||
944 | GLOBAL_ENTRY(ia64_slave_init_handler) | |
945 | 1: br.sptk 1b | |
946 | END(ia64_slave_init_handler) |