2 * Machine check exception handling CPU-side for power7 and power8
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright 2013 IBM Corporation
19 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
23 #define pr_fmt(fmt) "mce_power: " fmt
25 #include <linux/types.h>
26 #include <linux/ptrace.h>
29 #include <asm/machdep.h>
30 #include <asm/pgtable.h>
31 #include <asm/pte-walk.h>
32 #include <asm/sstep.h>
33 #include <asm/exception-64s.h>
36 * Convert an address related to an mm to a PFN. NOTE: we are in real
37 * mode, we could potentially race with page table updates.
39 static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
50 local_irq_save(flags);
51 if (mm == current->mm)
52 ptep = find_current_mm_pte(mm->pgd, addr, NULL, NULL);
54 ptep = find_init_mm_pte(addr, NULL);
55 local_irq_restore(flags);
56 if (!ptep || pte_special(*ptep))
58 return pte_pfn(*ptep);
61 /* flush SLBs and reload */
62 #ifdef CONFIG_PPC_BOOK3S_64
63 void flush_and_reload_slb(void)
65 /* Invalidate all SLBs */
66 slb_flush_all_realmode();
68 #ifdef CONFIG_KVM_BOOK3S_HANDLER
70 * If machine check is hit when in guest or in transition, we will
71 * only flush the SLBs and continue.
73 if (get_paca()->kvm_hstate.in_guest)
76 if (early_radix_enabled())
80 * This probably shouldn't happen, but it may be possible it's
81 * called in early boot before SLB shadows are allocated.
83 if (!get_slb_shadow())
86 slb_restore_bolted_realmode();
90 static void flush_erat(void)
92 #ifdef CONFIG_PPC_BOOK3S_64
93 if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) {
94 flush_and_reload_slb();
98 /* PPC_INVALIDATE_ERAT can only be used on ISA v3 and newer */
99 asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
102 #define MCE_FLUSH_SLB 1
103 #define MCE_FLUSH_TLB 2
104 #define MCE_FLUSH_ERAT 3
106 static int mce_flush(int what)
108 #ifdef CONFIG_PPC_BOOK3S_64
109 if (what == MCE_FLUSH_SLB) {
110 flush_and_reload_slb();
114 if (what == MCE_FLUSH_ERAT) {
118 if (what == MCE_FLUSH_TLB) {
126 #define SRR1_MC_LOADSTORE(srr1) ((srr1) & PPC_BIT(42))
128 struct mce_ierror_table {
129 unsigned long srr1_mask;
130 unsigned long srr1_value;
131 bool nip_valid; /* nip is a valid indicator of faulting address */
132 unsigned int error_type;
133 unsigned int error_subtype;
134 unsigned int initiator;
135 unsigned int severity;
138 static const struct mce_ierror_table mce_p7_ierror_table[] = {
139 { 0x00000000001c0000, 0x0000000000040000, true,
140 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH,
141 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
142 { 0x00000000001c0000, 0x0000000000080000, true,
143 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
144 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
145 { 0x00000000001c0000, 0x00000000000c0000, true,
146 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
147 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
148 { 0x00000000001c0000, 0x0000000000100000, true,
149 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_INDETERMINATE, /* BOTH */
150 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
151 { 0x00000000001c0000, 0x0000000000140000, true,
152 MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
153 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
154 { 0x00000000001c0000, 0x0000000000180000, true,
155 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH,
156 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
157 { 0x00000000001c0000, 0x00000000001c0000, true,
158 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH,
159 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
160 { 0, 0, 0, 0, 0, 0 } };
162 static const struct mce_ierror_table mce_p8_ierror_table[] = {
163 { 0x00000000081c0000, 0x0000000000040000, true,
164 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH,
165 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
166 { 0x00000000081c0000, 0x0000000000080000, true,
167 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
168 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
169 { 0x00000000081c0000, 0x00000000000c0000, true,
170 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
171 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
172 { 0x00000000081c0000, 0x0000000000100000, true,
173 MCE_ERROR_TYPE_ERAT,MCE_ERAT_ERROR_MULTIHIT,
174 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
175 { 0x00000000081c0000, 0x0000000000140000, true,
176 MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
177 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
178 { 0x00000000081c0000, 0x0000000000180000, true,
179 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH,
180 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
181 { 0x00000000081c0000, 0x00000000001c0000, true,
182 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH,
183 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
184 { 0x00000000081c0000, 0x0000000008000000, true,
185 MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_IFETCH_TIMEOUT,
186 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
187 { 0x00000000081c0000, 0x0000000008040000, true,
188 MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT,
189 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
190 { 0, 0, 0, 0, 0, 0 } };
192 static const struct mce_ierror_table mce_p9_ierror_table[] = {
193 { 0x00000000081c0000, 0x0000000000040000, true,
194 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH,
195 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
196 { 0x00000000081c0000, 0x0000000000080000, true,
197 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
198 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
199 { 0x00000000081c0000, 0x00000000000c0000, true,
200 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT,
201 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
202 { 0x00000000081c0000, 0x0000000000100000, true,
203 MCE_ERROR_TYPE_ERAT,MCE_ERAT_ERROR_MULTIHIT,
204 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
205 { 0x00000000081c0000, 0x0000000000140000, true,
206 MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
207 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
208 { 0x00000000081c0000, 0x0000000000180000, true,
209 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH,
210 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
211 { 0x00000000081c0000, 0x00000000001c0000, true,
212 MCE_ERROR_TYPE_RA, MCE_RA_ERROR_IFETCH_FOREIGN,
213 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
214 { 0x00000000081c0000, 0x0000000008000000, true,
215 MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_IFETCH_TIMEOUT,
216 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
217 { 0x00000000081c0000, 0x0000000008040000, true,
218 MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT,
219 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
220 { 0x00000000081c0000, 0x00000000080c0000, true,
221 MCE_ERROR_TYPE_RA, MCE_RA_ERROR_IFETCH,
222 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
223 { 0x00000000081c0000, 0x0000000008100000, true,
224 MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH,
225 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
226 { 0x00000000081c0000, 0x0000000008140000, false,
227 MCE_ERROR_TYPE_RA, MCE_RA_ERROR_STORE,
228 MCE_INITIATOR_CPU, MCE_SEV_FATAL, }, /* ASYNC is fatal */
229 { 0x00000000081c0000, 0x0000000008180000, false,
230 MCE_ERROR_TYPE_LINK,MCE_LINK_ERROR_STORE_TIMEOUT,
231 MCE_INITIATOR_CPU, MCE_SEV_FATAL, }, /* ASYNC is fatal */
232 { 0x00000000081c0000, 0x00000000081c0000, true,
233 MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN,
234 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
235 { 0, 0, 0, 0, 0, 0 } };
237 struct mce_derror_table {
238 unsigned long dsisr_value;
239 bool dar_valid; /* dar is a valid indicator of faulting address */
240 unsigned int error_type;
241 unsigned int error_subtype;
242 unsigned int initiator;
243 unsigned int severity;
246 static const struct mce_derror_table mce_p7_derror_table[] = {
248 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE,
249 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
251 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
252 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
254 MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT,
255 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
257 MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
258 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
260 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, /* Before PARITY */
261 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
263 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
264 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
266 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_INDETERMINATE, /* BOTH */
267 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
268 { 0, false, 0, 0, 0, 0 } };
270 static const struct mce_derror_table mce_p8_derror_table[] = {
272 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE,
273 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
275 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
276 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
278 MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_LOAD_TIMEOUT,
279 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
281 MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT,
282 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
284 MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT,
285 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
287 MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
288 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
290 MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, /* SECONDARY ERAT */
291 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
293 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, /* Before PARITY */
294 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
296 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
297 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
298 { 0, false, 0, 0, 0, 0 } };
300 static const struct mce_derror_table mce_p9_derror_table[] = {
302 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE,
303 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
305 MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
306 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
308 MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_LOAD_TIMEOUT,
309 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
311 MCE_ERROR_TYPE_LINK, MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT,
312 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
314 MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT,
315 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
317 MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT,
318 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
320 MCE_ERROR_TYPE_USER, MCE_USER_ERROR_TLBIE,
321 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
323 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, /* Before PARITY */
324 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
326 MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY,
327 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
329 MCE_ERROR_TYPE_RA, MCE_RA_ERROR_LOAD,
330 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
332 MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
333 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
335 MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN,
336 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
338 MCE_ERROR_TYPE_RA, MCE_RA_ERROR_LOAD_STORE_FOREIGN,
339 MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
340 { 0, false, 0, 0, 0, 0 } };
342 static int mce_find_instr_ea_and_pfn(struct pt_regs *regs, uint64_t *addr,
346 * Carefully look at the NIP to determine
347 * the instruction to analyse. Reading the NIP
348 * in real-mode is tricky and can lead to recursive
352 unsigned long pfn, instr_addr;
353 struct instruction_op op;
354 struct pt_regs tmp = *regs;
356 pfn = addr_to_pfn(regs, regs->nip);
357 if (pfn != ULONG_MAX) {
358 instr_addr = (pfn << PAGE_SHIFT) + (regs->nip & ~PAGE_MASK);
359 instr = *(unsigned int *)(instr_addr);
360 if (!analyse_instr(&op, &tmp, instr)) {
361 pfn = addr_to_pfn(regs, op.ea);
363 *phys_addr = (pfn << PAGE_SHIFT);
367 * analyse_instr() might fail if the instruction
368 * is not a load/store, although this is unexpected
369 * for load/store errors or if we got the NIP
377 static int mce_handle_ierror(struct pt_regs *regs,
378 const struct mce_ierror_table table[],
379 struct mce_error_info *mce_err, uint64_t *addr,
382 uint64_t srr1 = regs->msr;
388 for (i = 0; table[i].srr1_mask; i++) {
389 if ((srr1 & table[i].srr1_mask) != table[i].srr1_value)
392 /* attempt to correct the error */
393 switch (table[i].error_type) {
394 case MCE_ERROR_TYPE_SLB:
395 handled = mce_flush(MCE_FLUSH_SLB);
397 case MCE_ERROR_TYPE_ERAT:
398 handled = mce_flush(MCE_FLUSH_ERAT);
400 case MCE_ERROR_TYPE_TLB:
401 handled = mce_flush(MCE_FLUSH_TLB);
405 /* now fill in mce_error_info */
406 mce_err->error_type = table[i].error_type;
407 switch (table[i].error_type) {
408 case MCE_ERROR_TYPE_UE:
409 mce_err->u.ue_error_type = table[i].error_subtype;
411 case MCE_ERROR_TYPE_SLB:
412 mce_err->u.slb_error_type = table[i].error_subtype;
414 case MCE_ERROR_TYPE_ERAT:
415 mce_err->u.erat_error_type = table[i].error_subtype;
417 case MCE_ERROR_TYPE_TLB:
418 mce_err->u.tlb_error_type = table[i].error_subtype;
420 case MCE_ERROR_TYPE_USER:
421 mce_err->u.user_error_type = table[i].error_subtype;
423 case MCE_ERROR_TYPE_RA:
424 mce_err->u.ra_error_type = table[i].error_subtype;
426 case MCE_ERROR_TYPE_LINK:
427 mce_err->u.link_error_type = table[i].error_subtype;
430 mce_err->severity = table[i].severity;
431 mce_err->initiator = table[i].initiator;
432 if (table[i].nip_valid) {
434 if (mce_err->severity == MCE_SEV_ERROR_SYNC &&
435 table[i].error_type == MCE_ERROR_TYPE_UE) {
438 if (get_paca()->in_mce < MAX_MCE_DEPTH) {
439 pfn = addr_to_pfn(regs, regs->nip);
440 if (pfn != ULONG_MAX) {
450 mce_err->error_type = MCE_ERROR_TYPE_UNKNOWN;
451 mce_err->severity = MCE_SEV_ERROR_SYNC;
452 mce_err->initiator = MCE_INITIATOR_CPU;
457 static int mce_handle_derror(struct pt_regs *regs,
458 const struct mce_derror_table table[],
459 struct mce_error_info *mce_err, uint64_t *addr,
462 uint64_t dsisr = regs->dsisr;
469 for (i = 0; table[i].dsisr_value; i++) {
470 if (!(dsisr & table[i].dsisr_value))
473 /* attempt to correct the error */
474 switch (table[i].error_type) {
475 case MCE_ERROR_TYPE_SLB:
476 if (mce_flush(MCE_FLUSH_SLB))
479 case MCE_ERROR_TYPE_ERAT:
480 if (mce_flush(MCE_FLUSH_ERAT))
483 case MCE_ERROR_TYPE_TLB:
484 if (mce_flush(MCE_FLUSH_TLB))
490 * Attempt to handle multiple conditions, but only return
491 * one. Ensure uncorrectable errors are first in the table
497 /* now fill in mce_error_info */
498 mce_err->error_type = table[i].error_type;
499 switch (table[i].error_type) {
500 case MCE_ERROR_TYPE_UE:
501 mce_err->u.ue_error_type = table[i].error_subtype;
503 case MCE_ERROR_TYPE_SLB:
504 mce_err->u.slb_error_type = table[i].error_subtype;
506 case MCE_ERROR_TYPE_ERAT:
507 mce_err->u.erat_error_type = table[i].error_subtype;
509 case MCE_ERROR_TYPE_TLB:
510 mce_err->u.tlb_error_type = table[i].error_subtype;
512 case MCE_ERROR_TYPE_USER:
513 mce_err->u.user_error_type = table[i].error_subtype;
515 case MCE_ERROR_TYPE_RA:
516 mce_err->u.ra_error_type = table[i].error_subtype;
518 case MCE_ERROR_TYPE_LINK:
519 mce_err->u.link_error_type = table[i].error_subtype;
522 mce_err->severity = table[i].severity;
523 mce_err->initiator = table[i].initiator;
524 if (table[i].dar_valid)
526 else if (mce_err->severity == MCE_SEV_ERROR_SYNC &&
527 table[i].error_type == MCE_ERROR_TYPE_UE) {
529 * We do a maximum of 4 nested MCE calls, see
530 * kernel/exception-64s.h
532 if (get_paca()->in_mce < MAX_MCE_DEPTH)
533 mce_find_instr_ea_and_pfn(regs, addr, phys_addr);
541 mce_err->error_type = MCE_ERROR_TYPE_UNKNOWN;
542 mce_err->severity = MCE_SEV_ERROR_SYNC;
543 mce_err->initiator = MCE_INITIATOR_CPU;
548 static long mce_handle_ue_error(struct pt_regs *regs)
553 * On specific SCOM read via MMIO we may get a machine check
554 * exception with SRR0 pointing inside opal. If that is the
555 * case OPAL may have recovery address to re-read SCOM data in
556 * different way and hence we can recover from this MC.
559 if (ppc_md.mce_check_early_recovery) {
560 if (ppc_md.mce_check_early_recovery(regs))
566 static long mce_handle_error(struct pt_regs *regs,
567 const struct mce_derror_table dtable[],
568 const struct mce_ierror_table itable[])
570 struct mce_error_info mce_err = { 0 };
571 uint64_t addr, phys_addr = ULONG_MAX;
572 uint64_t srr1 = regs->msr;
575 if (SRR1_MC_LOADSTORE(srr1))
576 handled = mce_handle_derror(regs, dtable, &mce_err, &addr,
579 handled = mce_handle_ierror(regs, itable, &mce_err, &addr,
582 if (!handled && mce_err.error_type == MCE_ERROR_TYPE_UE)
583 handled = mce_handle_ue_error(regs);
585 save_mce_event(regs, handled, &mce_err, regs->nip, addr, phys_addr);
590 long __machine_check_early_realmode_p7(struct pt_regs *regs)
592 /* P7 DD1 leaves top bits of DSISR undefined */
593 regs->dsisr &= 0x0000ffff;
595 return mce_handle_error(regs, mce_p7_derror_table, mce_p7_ierror_table);
598 long __machine_check_early_realmode_p8(struct pt_regs *regs)
600 return mce_handle_error(regs, mce_p8_derror_table, mce_p8_ierror_table);
603 long __machine_check_early_realmode_p9(struct pt_regs *regs)
606 * On POWER9 DD2.1 and below, it's possible to get a machine check
607 * caused by a paste instruction where only DSISR bit 25 is set. This
608 * will result in the MCE handler seeing an unknown event and the kernel
609 * crashing. An MCE that occurs like this is spurious, so we don't need
610 * to do anything in terms of servicing it. If there is something that
611 * needs to be serviced, the CPU will raise the MCE again with the
612 * correct DSISR so that it can be serviced properly. So detect this
613 * case and mark it as handled.
615 if (SRR1_MC_LOADSTORE(regs->msr) && regs->dsisr == 0x02000000)
618 return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table);