[PATCH] x86-64: x86_64-make-the-numa-hash-function-nodemap-allocation fix fix
[linux-2.6-block.git] / arch / i386 / kernel / traps.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/i386/traps.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 */
9
10/*
11 * 'Traps.c' handles hardware traps and faults after we have saved some
12 * state in 'asm.s'.
13 */
1da177e4
LT
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/string.h>
17#include <linux/errno.h>
18#include <linux/timer.h>
19#include <linux/mm.h>
20#include <linux/init.h>
21#include <linux/delay.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h>
24#include <linux/highmem.h>
25#include <linux/kallsyms.h>
26#include <linux/ptrace.h>
27#include <linux/utsname.h>
28#include <linux/kprobes.h>
6e274d14 29#include <linux/kexec.h>
176a2718 30#include <linux/unwind.h>
1e2af92e 31#include <linux/uaccess.h>
a36df98a 32#include <linux/nmi.h>
91768d6c 33#include <linux/bug.h>
1da177e4
LT
34
35#ifdef CONFIG_EISA
36#include <linux/ioport.h>
37#include <linux/eisa.h>
38#endif
39
40#ifdef CONFIG_MCA
41#include <linux/mca.h>
42#endif
43
44#include <asm/processor.h>
45#include <asm/system.h>
1da177e4
LT
46#include <asm/io.h>
47#include <asm/atomic.h>
48#include <asm/debugreg.h>
49#include <asm/desc.h>
50#include <asm/i387.h>
51#include <asm/nmi.h>
176a2718 52#include <asm/unwind.h>
1da177e4
LT
53#include <asm/smp.h>
54#include <asm/arch_hooks.h>
55#include <asm/kdebug.h>
2b14a78c 56#include <asm/stacktrace.h>
1da177e4 57
1da177e4
LT
58#include <linux/module.h>
59
60#include "mach_traps.h"
61
29cbc78b
AK
62int panic_on_unrecovered_nmi;
63
1da177e4
LT
64asmlinkage int system_call(void);
65
1da177e4
LT
66/* Do we ignore FPU interrupts ? */
67char ignore_fpu_irq = 0;
68
69/*
70 * The IDT has to be page-aligned to simplify the Pentium
71 * F0 0F bug workaround.. We have a special link segment
72 * for this.
73 */
74struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
75
76asmlinkage void divide_error(void);
77asmlinkage void debug(void);
78asmlinkage void nmi(void);
79asmlinkage void int3(void);
80asmlinkage void overflow(void);
81asmlinkage void bounds(void);
82asmlinkage void invalid_op(void);
83asmlinkage void device_not_available(void);
84asmlinkage void coprocessor_segment_overrun(void);
85asmlinkage void invalid_TSS(void);
86asmlinkage void segment_not_present(void);
87asmlinkage void stack_segment(void);
88asmlinkage void general_protection(void);
89asmlinkage void page_fault(void);
90asmlinkage void coprocessor_error(void);
91asmlinkage void simd_coprocessor_error(void);
92asmlinkage void alignment_check(void);
93asmlinkage void spurious_interrupt_bug(void);
94asmlinkage void machine_check(void);
95
0741f4d2 96int kstack_depth_to_print = 24;
e041c683 97ATOMIC_NOTIFIER_HEAD(i386die_chain);
1da177e4
LT
98
99int register_die_notifier(struct notifier_block *nb)
100{
101f12af 101 vmalloc_sync_all();
e041c683 102 return atomic_notifier_chain_register(&i386die_chain, nb);
1da177e4 103}
1454aed9 104EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
1da177e4 105
e041c683
AS
106int unregister_die_notifier(struct notifier_block *nb)
107{
108 return atomic_notifier_chain_unregister(&i386die_chain, nb);
109}
1454aed9 110EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
e041c683 111
1da177e4
LT
112static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
113{
114 return p > (void *)tinfo &&
115 p < (void *)tinfo + THREAD_SIZE - 3;
116}
117
118static inline unsigned long print_context_stack(struct thread_info *tinfo,
7aa89746 119 unsigned long *stack, unsigned long ebp,
2b14a78c 120 struct stacktrace_ops *ops, void *data)
1da177e4
LT
121{
122 unsigned long addr;
123
124#ifdef CONFIG_FRAME_POINTER
125 while (valid_stack_ptr(tinfo, (void *)ebp)) {
808dbbb6 126 unsigned long new_ebp;
1da177e4 127 addr = *(unsigned long *)(ebp + 4);
2b14a78c 128 ops->address(data, addr);
b88d4f1d
IM
129 /*
130 * break out of recursive entries (such as
808dbbb6
LT
131 * end_of_stack_stop_unwind_function). Also,
132 * we can never allow a frame pointer to
133 * move downwards!
b88d4f1d 134 */
808dbbb6
LT
135 new_ebp = *(unsigned long *)ebp;
136 if (new_ebp <= ebp)
b88d4f1d 137 break;
808dbbb6 138 ebp = new_ebp;
1da177e4
LT
139 }
140#else
141 while (valid_stack_ptr(tinfo, stack)) {
142 addr = *stack++;
7aa89746 143 if (__kernel_text_address(addr))
2b14a78c 144 ops->address(data, addr);
1da177e4
LT
145 }
146#endif
147 return ebp;
148}
149
b615ebda
AK
150#define MSG(msg) ops->warning(data, msg)
151
2b14a78c
AK
152void dump_trace(struct task_struct *task, struct pt_regs *regs,
153 unsigned long *stack,
154 struct stacktrace_ops *ops, void *data)
1da177e4 155{
a32cf397 156 unsigned long ebp = 0;
1da177e4
LT
157
158 if (!task)
159 task = current;
160
a32cf397 161 if (!stack) {
2b14a78c
AK
162 unsigned long dummy;
163 stack = &dummy;
164 if (task && task != current)
165 stack = (unsigned long *)task->thread.esp;
176a2718
JB
166 }
167
a32cf397
AK
168#ifdef CONFIG_FRAME_POINTER
169 if (!ebp) {
170 if (task == current) {
171 /* Grab ebp right from our regs */
172 asm ("movl %%ebp, %0" : "=r" (ebp) : );
173 } else {
174 /* ebp is the last reg pushed by switch_to */
175 ebp = *(unsigned long *) task->thread.esp;
176 }
1da177e4 177 }
a32cf397 178#endif
1da177e4
LT
179
180 while (1) {
181 struct thread_info *context;
182 context = (struct thread_info *)
183 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
2b14a78c
AK
184 ebp = print_context_stack(context, stack, ebp, ops, data);
185 /* Should be after the line below, but somewhere
186 in early boot context comes out corrupted and we
187 can't reference it -AK */
188 if (ops->stack(data, "IRQ") < 0)
189 break;
1da177e4
LT
190 stack = (unsigned long*)context->previous_esp;
191 if (!stack)
192 break;
a36df98a 193 touch_nmi_watchdog();
1da177e4
LT
194 }
195}
2b14a78c
AK
196EXPORT_SYMBOL(dump_trace);
197
198static void
199print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
200{
201 printk(data);
202 print_symbol(msg, symbol);
203 printk("\n");
204}
205
206static void print_trace_warning(void *data, char *msg)
207{
208 printk("%s%s\n", (char *)data, msg);
209}
210
211static int print_trace_stack(void *data, char *name)
212{
213 return 0;
214}
215
216/*
217 * Print one address/symbol entries per line.
218 */
219static void print_trace_address(void *data, unsigned long addr)
220{
221 printk("%s [<%08lx>] ", (char *)data, addr);
222 print_symbol("%s\n", addr);
223}
224
225static struct stacktrace_ops print_trace_ops = {
226 .warning = print_trace_warning,
227 .warning_symbol = print_trace_warning_symbol,
228 .stack = print_trace_stack,
229 .address = print_trace_address,
230};
231
232static void
233show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
234 unsigned long * stack, char *log_lvl)
235{
236 dump_trace(task, regs, stack, &print_trace_ops, log_lvl);
237 printk("%s =======================\n", log_lvl);
238}
1da177e4 239
2b14a78c
AK
240void show_trace(struct task_struct *task, struct pt_regs *regs,
241 unsigned long * stack)
7aa89746 242{
176a2718 243 show_trace_log_lvl(task, regs, stack, "");
7aa89746
CE
244}
245
176a2718
JB
246static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
247 unsigned long *esp, char *log_lvl)
1da177e4
LT
248{
249 unsigned long *stack;
250 int i;
251
252 if (esp == NULL) {
253 if (task)
254 esp = (unsigned long*)task->thread.esp;
255 else
256 esp = (unsigned long *)&esp;
257 }
258
259 stack = esp;
260 for(i = 0; i < kstack_depth_to_print; i++) {
261 if (kstack_end(stack))
262 break;
75874d5c
CE
263 if (i && ((i % 8) == 0))
264 printk("\n%s ", log_lvl);
1da177e4
LT
265 printk("%08lx ", *stack++);
266 }
75874d5c 267 printk("\n%sCall Trace:\n", log_lvl);
176a2718 268 show_trace_log_lvl(task, regs, esp, log_lvl);
7aa89746
CE
269}
270
271void show_stack(struct task_struct *task, unsigned long *esp)
272{
75874d5c 273 printk(" ");
176a2718 274 show_stack_log_lvl(task, NULL, esp, "");
1da177e4
LT
275}
276
277/*
278 * The architecture-independent dump_stack generator
279 */
280void dump_stack(void)
281{
282 unsigned long stack;
283
176a2718 284 show_trace(current, NULL, &stack);
1da177e4
LT
285}
286
287EXPORT_SYMBOL(dump_stack);
288
289void show_registers(struct pt_regs *regs)
290{
291 int i;
292 int in_kernel = 1;
293 unsigned long esp;
294 unsigned short ss;
295
296 esp = (unsigned long) (&regs->esp);
0998e422 297 savesegment(ss, ss);
db753bdf 298 if (user_mode_vm(regs)) {
1da177e4
LT
299 in_kernel = 0;
300 esp = regs->esp;
301 ss = regs->xss & 0xffff;
302 }
303 print_modules();
f354b3a9
DJ
304 printk(KERN_EMERG "CPU: %d\n"
305 KERN_EMERG "EIP: %04x:[<%08lx>] %s VLI\n"
306 KERN_EMERG "EFLAGS: %08lx (%s %.*s)\n",
1da177e4 307 smp_processor_id(), 0xffff & regs->xcs, regs->eip,
96b644bd
SH
308 print_tainted(), regs->eflags, init_utsname()->release,
309 (int)strcspn(init_utsname()->version, " "),
310 init_utsname()->version);
9c107805
DJ
311 print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
312 printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
1da177e4 313 regs->eax, regs->ebx, regs->ecx, regs->edx);
9c107805 314 printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
1da177e4 315 regs->esi, regs->edi, regs->ebp, esp);
9c107805 316 printk(KERN_EMERG "ds: %04x es: %04x ss: %04x\n",
1da177e4 317 regs->xds & 0xffff, regs->xes & 0xffff, ss);
7e04a118
CE
318 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
319 TASK_COMM_LEN, current->comm, current->pid,
320 current_thread_info(), current, current->thread_info);
1da177e4
LT
321 /*
322 * When in-kernel, we also print out the stack and code at the
323 * time of the fault..
324 */
325 if (in_kernel) {
11a4180c 326 u8 *eip;
99325326
CE
327 int code_bytes = 64;
328 unsigned char c;
1da177e4 329
9c107805 330 printk("\n" KERN_EMERG "Stack: ");
176a2718 331 show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
1da177e4 332
9c107805 333 printk(KERN_EMERG "Code: ");
1da177e4 334
11a4180c
AK
335 eip = (u8 *)regs->eip - 43;
336 if (eip < (u8 *)PAGE_OFFSET ||
337 probe_kernel_address(eip, c)) {
99325326 338 /* try starting at EIP */
11a4180c 339 eip = (u8 *)regs->eip;
99325326
CE
340 code_bytes = 32;
341 }
342 for (i = 0; i < code_bytes; i++, eip++) {
11a4180c
AK
343 if (eip < (u8 *)PAGE_OFFSET ||
344 probe_kernel_address(eip, c)) {
1da177e4
LT
345 printk(" Bad EIP value.");
346 break;
347 }
11a4180c 348 if (eip == (u8 *)regs->eip)
1da177e4
LT
349 printk("<%02x> ", c);
350 else
351 printk("%02x ", c);
352 }
353 }
354 printk("\n");
355}
356
91768d6c 357int is_valid_bugaddr(unsigned long eip)
1da177e4
LT
358{
359 unsigned short ud2;
1da177e4
LT
360
361 if (eip < PAGE_OFFSET)
91768d6c 362 return 0;
11a4180c 363 if (probe_kernel_address((unsigned short *)eip, ud2))
91768d6c 364 return 0;
1da177e4 365
91768d6c 366 return ud2 == 0x0b0f;
1da177e4
LT
367}
368
91768d6c
JF
369/*
370 * This is gone through when something in the kernel has done something bad and
371 * is about to be terminated.
372 */
1da177e4
LT
373void die(const char * str, struct pt_regs * regs, long err)
374{
375 static struct {
376 spinlock_t lock;
377 u32 lock_owner;
378 int lock_owner_depth;
379 } die = {
6cfd76a2 380 .lock = __SPIN_LOCK_UNLOCKED(die.lock),
1da177e4
LT
381 .lock_owner = -1,
382 .lock_owner_depth = 0
383 };
384 static int die_counter;
e43d674f 385 unsigned long flags;
1da177e4 386
dd287796
AM
387 oops_enter();
388
39c715b7 389 if (die.lock_owner != raw_smp_processor_id()) {
1da177e4 390 console_verbose();
e43d674f 391 spin_lock_irqsave(&die.lock, flags);
1da177e4
LT
392 die.lock_owner = smp_processor_id();
393 die.lock_owner_depth = 0;
394 bust_spinlocks(1);
395 }
e43d674f
JB
396 else
397 local_save_flags(flags);
1da177e4
LT
398
399 if (++die.lock_owner_depth < 3) {
400 int nl = 0;
7bee5c0f
RD
401 unsigned long esp;
402 unsigned short ss;
403
91768d6c
JF
404 report_bug(regs->eip);
405
9c107805 406 printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
1da177e4 407#ifdef CONFIG_PREEMPT
9c107805 408 printk(KERN_EMERG "PREEMPT ");
1da177e4
LT
409 nl = 1;
410#endif
411#ifdef CONFIG_SMP
9c107805
DJ
412 if (!nl)
413 printk(KERN_EMERG);
1da177e4
LT
414 printk("SMP ");
415 nl = 1;
416#endif
417#ifdef CONFIG_DEBUG_PAGEALLOC
9c107805
DJ
418 if (!nl)
419 printk(KERN_EMERG);
1da177e4
LT
420 printk("DEBUG_PAGEALLOC");
421 nl = 1;
422#endif
423 if (nl)
424 printk("\n");
20c0d2d4
JB
425 if (notify_die(DIE_OOPS, str, regs, err,
426 current->thread.trap_no, SIGSEGV) !=
7bee5c0f 427 NOTIFY_STOP) {
20c0d2d4 428 show_registers(regs);
7bee5c0f
RD
429 /* Executive summary in case the oops scrolled away */
430 esp = (unsigned long) (&regs->esp);
431 savesegment(ss, ss);
432 if (user_mode(regs)) {
433 esp = regs->esp;
434 ss = regs->xss & 0xffff;
435 }
436 printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
437 print_symbol("%s", regs->eip);
438 printk(" SS:ESP %04x:%08lx\n", ss, esp);
439 }
20c0d2d4
JB
440 else
441 regs = NULL;
1da177e4 442 } else
9c107805 443 printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
1da177e4
LT
444
445 bust_spinlocks(0);
446 die.lock_owner = -1;
e43d674f 447 spin_unlock_irqrestore(&die.lock, flags);
6e274d14 448
20c0d2d4
JB
449 if (!regs)
450 return;
451
6e274d14
AN
452 if (kexec_should_crash(current))
453 crash_kexec(regs);
454
1da177e4
LT
455 if (in_interrupt())
456 panic("Fatal exception in interrupt");
457
cea6a4ba 458 if (panic_on_oops)
012c437d 459 panic("Fatal exception");
cea6a4ba 460
dd287796 461 oops_exit();
1da177e4
LT
462 do_exit(SIGSEGV);
463}
464
465static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
466{
717b594a 467 if (!user_mode_vm(regs))
1da177e4
LT
468 die(str, regs, err);
469}
470
3d97ae5b
PP
471static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
472 struct pt_regs * regs, long error_code,
473 siginfo_t *info)
1da177e4 474{
4f339ecb
AN
475 struct task_struct *tsk = current;
476 tsk->thread.error_code = error_code;
477 tsk->thread.trap_no = trapnr;
478
1da177e4
LT
479 if (regs->eflags & VM_MASK) {
480 if (vm86)
481 goto vm86_trap;
482 goto trap_signal;
483 }
484
717b594a 485 if (!user_mode(regs))
1da177e4
LT
486 goto kernel_trap;
487
488 trap_signal: {
1da177e4
LT
489 if (info)
490 force_sig_info(signr, info, tsk);
491 else
492 force_sig(signr, tsk);
493 return;
494 }
495
496 kernel_trap: {
497 if (!fixup_exception(regs))
498 die(str, regs, error_code);
499 return;
500 }
501
502 vm86_trap: {
503 int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
504 if (ret) goto trap_signal;
505 return;
506 }
507}
508
509#define DO_ERROR(trapnr, signr, str, name) \
510fastcall void do_##name(struct pt_regs * regs, long error_code) \
511{ \
512 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
513 == NOTIFY_STOP) \
514 return; \
515 do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
516}
517
518#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
519fastcall void do_##name(struct pt_regs * regs, long error_code) \
520{ \
521 siginfo_t info; \
522 info.si_signo = signr; \
523 info.si_errno = 0; \
524 info.si_code = sicode; \
525 info.si_addr = (void __user *)siaddr; \
526 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
527 == NOTIFY_STOP) \
528 return; \
529 do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
530}
531
532#define DO_VM86_ERROR(trapnr, signr, str, name) \
533fastcall void do_##name(struct pt_regs * regs, long error_code) \
534{ \
535 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
536 == NOTIFY_STOP) \
537 return; \
538 do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
539}
540
541#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
542fastcall void do_##name(struct pt_regs * regs, long error_code) \
543{ \
544 siginfo_t info; \
545 info.si_signo = signr; \
546 info.si_errno = 0; \
547 info.si_code = sicode; \
548 info.si_addr = (void __user *)siaddr; \
549 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
550 == NOTIFY_STOP) \
551 return; \
552 do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
553}
554
555DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
556#ifndef CONFIG_KPROBES
557DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
558#endif
559DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
560DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
631b0347 561DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
1da177e4
LT
562DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
563DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
564DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
565DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
566DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
a879cbbb 567DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
1da177e4 568
3d97ae5b
PP
569fastcall void __kprobes do_general_protection(struct pt_regs * regs,
570 long error_code)
1da177e4
LT
571{
572 int cpu = get_cpu();
573 struct tss_struct *tss = &per_cpu(init_tss, cpu);
574 struct thread_struct *thread = &current->thread;
575
576 /*
577 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
578 * invalid offset set (the LAZY one) and the faulting thread has
579 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS
580 * and we set the offset field correctly. Then we let the CPU to
581 * restart the faulting instruction.
582 */
583 if (tss->io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
584 thread->io_bitmap_ptr) {
585 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
586 thread->io_bitmap_max);
587 /*
588 * If the previously set map was extending to higher ports
589 * than the current one, pad extra space with 0xff (no access).
590 */
591 if (thread->io_bitmap_max < tss->io_bitmap_max)
592 memset((char *) tss->io_bitmap +
593 thread->io_bitmap_max, 0xff,
594 tss->io_bitmap_max - thread->io_bitmap_max);
595 tss->io_bitmap_max = thread->io_bitmap_max;
596 tss->io_bitmap_base = IO_BITMAP_OFFSET;
d5cd4aad 597 tss->io_bitmap_owner = thread;
1da177e4
LT
598 put_cpu();
599 return;
600 }
601 put_cpu();
602
4f339ecb
AN
603 current->thread.error_code = error_code;
604 current->thread.trap_no = 13;
605
1da177e4
LT
606 if (regs->eflags & VM_MASK)
607 goto gp_in_vm86;
608
717b594a 609 if (!user_mode(regs))
1da177e4
LT
610 goto gp_in_kernel;
611
612 current->thread.error_code = error_code;
613 current->thread.trap_no = 13;
614 force_sig(SIGSEGV, current);
615 return;
616
617gp_in_vm86:
618 local_irq_enable();
619 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
620 return;
621
622gp_in_kernel:
623 if (!fixup_exception(regs)) {
624 if (notify_die(DIE_GPF, "general protection fault", regs,
625 error_code, 13, SIGSEGV) == NOTIFY_STOP)
626 return;
627 die("general protection fault", regs, error_code);
628 }
629}
630