Commit | Line | Data |
---|---|---|
10c28d93 AK |
1 | #include <linux/slab.h> |
2 | #include <linux/file.h> | |
3 | #include <linux/fdtable.h> | |
4 | #include <linux/mm.h> | |
5 | #include <linux/stat.h> | |
6 | #include <linux/fcntl.h> | |
7 | #include <linux/swap.h> | |
8 | #include <linux/string.h> | |
9 | #include <linux/init.h> | |
10 | #include <linux/pagemap.h> | |
11 | #include <linux/perf_event.h> | |
12 | #include <linux/highmem.h> | |
13 | #include <linux/spinlock.h> | |
14 | #include <linux/key.h> | |
15 | #include <linux/personality.h> | |
16 | #include <linux/binfmts.h> | |
179899fd | 17 | #include <linux/coredump.h> |
10c28d93 AK |
18 | #include <linux/utsname.h> |
19 | #include <linux/pid_namespace.h> | |
20 | #include <linux/module.h> | |
21 | #include <linux/namei.h> | |
22 | #include <linux/mount.h> | |
23 | #include <linux/security.h> | |
24 | #include <linux/syscalls.h> | |
25 | #include <linux/tsacct_kern.h> | |
26 | #include <linux/cn_proc.h> | |
27 | #include <linux/audit.h> | |
28 | #include <linux/tracehook.h> | |
29 | #include <linux/kmod.h> | |
30 | #include <linux/fsnotify.h> | |
31 | #include <linux/fs_struct.h> | |
32 | #include <linux/pipe_fs_i.h> | |
33 | #include <linux/oom.h> | |
34 | #include <linux/compat.h> | |
35 | ||
36 | #include <asm/uaccess.h> | |
37 | #include <asm/mmu_context.h> | |
38 | #include <asm/tlb.h> | |
39 | #include <asm/exec.h> | |
40 | ||
41 | #include <trace/events/task.h> | |
42 | #include "internal.h" | |
179899fd | 43 | #include "coredump.h" |
10c28d93 AK |
44 | |
45 | #include <trace/events/sched.h> | |
46 | ||
47 | int core_uses_pid; | |
48 | char core_pattern[CORENAME_MAX_SIZE] = "core"; | |
49 | unsigned int core_pipe_limit; | |
50 | ||
51 | struct core_name { | |
52 | char *corename; | |
53 | int used, size; | |
54 | }; | |
55 | static atomic_t call_count = ATOMIC_INIT(1); | |
56 | ||
57 | /* The maximal length of core_pattern is also specified in sysctl.c */ | |
58 | ||
59 | static int expand_corename(struct core_name *cn) | |
60 | { | |
61 | char *old_corename = cn->corename; | |
62 | ||
63 | cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count); | |
64 | cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL); | |
65 | ||
66 | if (!cn->corename) { | |
67 | kfree(old_corename); | |
68 | return -ENOMEM; | |
69 | } | |
70 | ||
71 | return 0; | |
72 | } | |
73 | ||
74 | static int cn_printf(struct core_name *cn, const char *fmt, ...) | |
75 | { | |
76 | char *cur; | |
77 | int need; | |
78 | int ret; | |
79 | va_list arg; | |
80 | ||
81 | va_start(arg, fmt); | |
82 | need = vsnprintf(NULL, 0, fmt, arg); | |
83 | va_end(arg); | |
84 | ||
85 | if (likely(need < cn->size - cn->used - 1)) | |
86 | goto out_printf; | |
87 | ||
88 | ret = expand_corename(cn); | |
89 | if (ret) | |
90 | goto expand_fail; | |
91 | ||
92 | out_printf: | |
93 | cur = cn->corename + cn->used; | |
94 | va_start(arg, fmt); | |
95 | vsnprintf(cur, need + 1, fmt, arg); | |
96 | va_end(arg); | |
97 | cn->used += need; | |
98 | return 0; | |
99 | ||
100 | expand_fail: | |
101 | return ret; | |
102 | } | |
103 | ||
104 | static void cn_escape(char *str) | |
105 | { | |
106 | for (; *str; str++) | |
107 | if (*str == '/') | |
108 | *str = '!'; | |
109 | } | |
110 | ||
111 | static int cn_print_exe_file(struct core_name *cn) | |
112 | { | |
113 | struct file *exe_file; | |
114 | char *pathbuf, *path; | |
115 | int ret; | |
116 | ||
117 | exe_file = get_mm_exe_file(current->mm); | |
118 | if (!exe_file) { | |
119 | char *commstart = cn->corename + cn->used; | |
120 | ret = cn_printf(cn, "%s (path unknown)", current->comm); | |
121 | cn_escape(commstart); | |
122 | return ret; | |
123 | } | |
124 | ||
125 | pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY); | |
126 | if (!pathbuf) { | |
127 | ret = -ENOMEM; | |
128 | goto put_exe_file; | |
129 | } | |
130 | ||
131 | path = d_path(&exe_file->f_path, pathbuf, PATH_MAX); | |
132 | if (IS_ERR(path)) { | |
133 | ret = PTR_ERR(path); | |
134 | goto free_buf; | |
135 | } | |
136 | ||
137 | cn_escape(path); | |
138 | ||
139 | ret = cn_printf(cn, "%s", path); | |
140 | ||
141 | free_buf: | |
142 | kfree(pathbuf); | |
143 | put_exe_file: | |
144 | fput(exe_file); | |
145 | return ret; | |
146 | } | |
147 | ||
148 | /* format_corename will inspect the pattern parameter, and output a | |
149 | * name into corename, which must have space for at least | |
150 | * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. | |
151 | */ | |
152 | static int format_corename(struct core_name *cn, long signr) | |
153 | { | |
154 | const struct cred *cred = current_cred(); | |
155 | const char *pat_ptr = core_pattern; | |
156 | int ispipe = (*pat_ptr == '|'); | |
157 | int pid_in_pattern = 0; | |
158 | int err = 0; | |
159 | ||
160 | cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count); | |
161 | cn->corename = kmalloc(cn->size, GFP_KERNEL); | |
162 | cn->used = 0; | |
163 | ||
164 | if (!cn->corename) | |
165 | return -ENOMEM; | |
166 | ||
167 | /* Repeat as long as we have more pattern to process and more output | |
168 | space */ | |
169 | while (*pat_ptr) { | |
170 | if (*pat_ptr != '%') { | |
171 | if (*pat_ptr == 0) | |
172 | goto out; | |
173 | err = cn_printf(cn, "%c", *pat_ptr++); | |
174 | } else { | |
175 | switch (*++pat_ptr) { | |
176 | /* single % at the end, drop that */ | |
177 | case 0: | |
178 | goto out; | |
179 | /* Double percent, output one percent */ | |
180 | case '%': | |
181 | err = cn_printf(cn, "%c", '%'); | |
182 | break; | |
183 | /* pid */ | |
184 | case 'p': | |
185 | pid_in_pattern = 1; | |
186 | err = cn_printf(cn, "%d", | |
187 | task_tgid_vnr(current)); | |
188 | break; | |
189 | /* uid */ | |
190 | case 'u': | |
191 | err = cn_printf(cn, "%d", cred->uid); | |
192 | break; | |
193 | /* gid */ | |
194 | case 'g': | |
195 | err = cn_printf(cn, "%d", cred->gid); | |
196 | break; | |
197 | /* signal that caused the coredump */ | |
198 | case 's': | |
199 | err = cn_printf(cn, "%ld", signr); | |
200 | break; | |
201 | /* UNIX time of coredump */ | |
202 | case 't': { | |
203 | struct timeval tv; | |
204 | do_gettimeofday(&tv); | |
205 | err = cn_printf(cn, "%lu", tv.tv_sec); | |
206 | break; | |
207 | } | |
208 | /* hostname */ | |
209 | case 'h': { | |
210 | char *namestart = cn->corename + cn->used; | |
211 | down_read(&uts_sem); | |
212 | err = cn_printf(cn, "%s", | |
213 | utsname()->nodename); | |
214 | up_read(&uts_sem); | |
215 | cn_escape(namestart); | |
216 | break; | |
217 | } | |
218 | /* executable */ | |
219 | case 'e': { | |
220 | char *commstart = cn->corename + cn->used; | |
221 | err = cn_printf(cn, "%s", current->comm); | |
222 | cn_escape(commstart); | |
223 | break; | |
224 | } | |
225 | case 'E': | |
226 | err = cn_print_exe_file(cn); | |
227 | break; | |
228 | /* core limit size */ | |
229 | case 'c': | |
230 | err = cn_printf(cn, "%lu", | |
231 | rlimit(RLIMIT_CORE)); | |
232 | break; | |
233 | default: | |
234 | break; | |
235 | } | |
236 | ++pat_ptr; | |
237 | } | |
238 | ||
239 | if (err) | |
240 | return err; | |
241 | } | |
242 | ||
243 | /* Backward compatibility with core_uses_pid: | |
244 | * | |
245 | * If core_pattern does not include a %p (as is the default) | |
246 | * and core_uses_pid is set, then .%pid will be appended to | |
247 | * the filename. Do not do this for piped commands. */ | |
248 | if (!ispipe && !pid_in_pattern && core_uses_pid) { | |
249 | err = cn_printf(cn, ".%d", task_tgid_vnr(current)); | |
250 | if (err) | |
251 | return err; | |
252 | } | |
253 | out: | |
254 | return ispipe; | |
255 | } | |
256 | ||
257 | static int zap_process(struct task_struct *start, int exit_code) | |
258 | { | |
259 | struct task_struct *t; | |
260 | int nr = 0; | |
261 | ||
262 | start->signal->flags = SIGNAL_GROUP_EXIT; | |
263 | start->signal->group_exit_code = exit_code; | |
264 | start->signal->group_stop_count = 0; | |
265 | ||
266 | t = start; | |
267 | do { | |
268 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); | |
269 | if (t != current && t->mm) { | |
270 | sigaddset(&t->pending.signal, SIGKILL); | |
271 | signal_wake_up(t, 1); | |
272 | nr++; | |
273 | } | |
274 | } while_each_thread(start, t); | |
275 | ||
276 | return nr; | |
277 | } | |
278 | ||
279 | static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm, | |
280 | struct core_state *core_state, int exit_code) | |
281 | { | |
282 | struct task_struct *g, *p; | |
283 | unsigned long flags; | |
284 | int nr = -EAGAIN; | |
285 | ||
286 | spin_lock_irq(&tsk->sighand->siglock); | |
287 | if (!signal_group_exit(tsk->signal)) { | |
288 | mm->core_state = core_state; | |
289 | nr = zap_process(tsk, exit_code); | |
290 | } | |
291 | spin_unlock_irq(&tsk->sighand->siglock); | |
292 | if (unlikely(nr < 0)) | |
293 | return nr; | |
294 | ||
295 | if (atomic_read(&mm->mm_users) == nr + 1) | |
296 | goto done; | |
297 | /* | |
298 | * We should find and kill all tasks which use this mm, and we should | |
299 | * count them correctly into ->nr_threads. We don't take tasklist | |
300 | * lock, but this is safe wrt: | |
301 | * | |
302 | * fork: | |
303 | * None of sub-threads can fork after zap_process(leader). All | |
304 | * processes which were created before this point should be | |
305 | * visible to zap_threads() because copy_process() adds the new | |
306 | * process to the tail of init_task.tasks list, and lock/unlock | |
307 | * of ->siglock provides a memory barrier. | |
308 | * | |
309 | * do_exit: | |
310 | * The caller holds mm->mmap_sem. This means that the task which | |
311 | * uses this mm can't pass exit_mm(), so it can't exit or clear | |
312 | * its ->mm. | |
313 | * | |
314 | * de_thread: | |
315 | * It does list_replace_rcu(&leader->tasks, ¤t->tasks), | |
316 | * we must see either old or new leader, this does not matter. | |
317 | * However, it can change p->sighand, so lock_task_sighand(p) | |
318 | * must be used. Since p->mm != NULL and we hold ->mmap_sem | |
319 | * it can't fail. | |
320 | * | |
321 | * Note also that "g" can be the old leader with ->mm == NULL | |
322 | * and already unhashed and thus removed from ->thread_group. | |
323 | * This is OK, __unhash_process()->list_del_rcu() does not | |
324 | * clear the ->next pointer, we will find the new leader via | |
325 | * next_thread(). | |
326 | */ | |
327 | rcu_read_lock(); | |
328 | for_each_process(g) { | |
329 | if (g == tsk->group_leader) | |
330 | continue; | |
331 | if (g->flags & PF_KTHREAD) | |
332 | continue; | |
333 | p = g; | |
334 | do { | |
335 | if (p->mm) { | |
336 | if (unlikely(p->mm == mm)) { | |
337 | lock_task_sighand(p, &flags); | |
338 | nr += zap_process(p, exit_code); | |
339 | unlock_task_sighand(p, &flags); | |
340 | } | |
341 | break; | |
342 | } | |
343 | } while_each_thread(g, p); | |
344 | } | |
345 | rcu_read_unlock(); | |
346 | done: | |
347 | atomic_set(&core_state->nr_threads, nr); | |
348 | return nr; | |
349 | } | |
350 | ||
351 | static int coredump_wait(int exit_code, struct core_state *core_state) | |
352 | { | |
353 | struct task_struct *tsk = current; | |
354 | struct mm_struct *mm = tsk->mm; | |
355 | int core_waiters = -EBUSY; | |
356 | ||
357 | init_completion(&core_state->startup); | |
358 | core_state->dumper.task = tsk; | |
359 | core_state->dumper.next = NULL; | |
360 | ||
361 | down_write(&mm->mmap_sem); | |
362 | if (!mm->core_state) | |
363 | core_waiters = zap_threads(tsk, mm, core_state, exit_code); | |
364 | up_write(&mm->mmap_sem); | |
365 | ||
366 | if (core_waiters > 0) { | |
367 | struct core_thread *ptr; | |
368 | ||
369 | wait_for_completion(&core_state->startup); | |
370 | /* | |
371 | * Wait for all the threads to become inactive, so that | |
372 | * all the thread context (extended register state, like | |
373 | * fpu etc) gets copied to the memory. | |
374 | */ | |
375 | ptr = core_state->dumper.next; | |
376 | while (ptr != NULL) { | |
377 | wait_task_inactive(ptr->task, 0); | |
378 | ptr = ptr->next; | |
379 | } | |
380 | } | |
381 | ||
382 | return core_waiters; | |
383 | } | |
384 | ||
385 | static void coredump_finish(struct mm_struct *mm) | |
386 | { | |
387 | struct core_thread *curr, *next; | |
388 | struct task_struct *task; | |
389 | ||
390 | next = mm->core_state->dumper.next; | |
391 | while ((curr = next) != NULL) { | |
392 | next = curr->next; | |
393 | task = curr->task; | |
394 | /* | |
395 | * see exit_mm(), curr->task must not see | |
396 | * ->task == NULL before we read ->next. | |
397 | */ | |
398 | smp_mb(); | |
399 | curr->task = NULL; | |
400 | wake_up_process(task); | |
401 | } | |
402 | ||
403 | mm->core_state = NULL; | |
404 | } | |
405 | ||
406 | static void wait_for_dump_helpers(struct file *file) | |
407 | { | |
408 | struct pipe_inode_info *pipe; | |
409 | ||
410 | pipe = file->f_path.dentry->d_inode->i_pipe; | |
411 | ||
412 | pipe_lock(pipe); | |
413 | pipe->readers++; | |
414 | pipe->writers--; | |
415 | ||
416 | while ((pipe->readers > 1) && (!signal_pending(current))) { | |
417 | wake_up_interruptible_sync(&pipe->wait); | |
418 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); | |
419 | pipe_wait(pipe); | |
420 | } | |
421 | ||
422 | pipe->readers--; | |
423 | pipe->writers++; | |
424 | pipe_unlock(pipe); | |
425 | ||
426 | } | |
427 | ||
428 | /* | |
429 | * umh_pipe_setup | |
430 | * helper function to customize the process used | |
431 | * to collect the core in userspace. Specifically | |
432 | * it sets up a pipe and installs it as fd 0 (stdin) | |
433 | * for the process. Returns 0 on success, or | |
434 | * PTR_ERR on failure. | |
435 | * Note that it also sets the core limit to 1. This | |
436 | * is a special value that we use to trap recursive | |
437 | * core dumps | |
438 | */ | |
439 | static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) | |
440 | { | |
441 | struct file *files[2]; | |
442 | struct coredump_params *cp = (struct coredump_params *)info->data; | |
443 | int err = create_pipe_files(files, 0); | |
444 | if (err) | |
445 | return err; | |
446 | ||
447 | cp->file = files[1]; | |
448 | ||
449 | replace_fd(0, files[0], 0); | |
450 | /* and disallow core files too */ | |
451 | current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1}; | |
452 | ||
453 | return 0; | |
454 | } | |
455 | ||
456 | void do_coredump(long signr, int exit_code, struct pt_regs *regs) | |
457 | { | |
458 | struct core_state core_state; | |
459 | struct core_name cn; | |
460 | struct mm_struct *mm = current->mm; | |
461 | struct linux_binfmt * binfmt; | |
462 | const struct cred *old_cred; | |
463 | struct cred *cred; | |
464 | int retval = 0; | |
465 | int flag = 0; | |
466 | int ispipe; | |
467 | struct files_struct *displaced; | |
468 | bool need_nonrelative = false; | |
469 | static atomic_t core_dump_count = ATOMIC_INIT(0); | |
470 | struct coredump_params cprm = { | |
471 | .signr = signr, | |
472 | .regs = regs, | |
473 | .limit = rlimit(RLIMIT_CORE), | |
474 | /* | |
475 | * We must use the same mm->flags while dumping core to avoid | |
476 | * inconsistency of bit flags, since this flag is not protected | |
477 | * by any locks. | |
478 | */ | |
479 | .mm_flags = mm->flags, | |
480 | }; | |
481 | ||
482 | audit_core_dumps(signr); | |
483 | ||
484 | binfmt = mm->binfmt; | |
485 | if (!binfmt || !binfmt->core_dump) | |
486 | goto fail; | |
487 | if (!__get_dumpable(cprm.mm_flags)) | |
488 | goto fail; | |
489 | ||
490 | cred = prepare_creds(); | |
491 | if (!cred) | |
492 | goto fail; | |
493 | /* | |
494 | * We cannot trust fsuid as being the "true" uid of the process | |
495 | * nor do we know its entire history. We only know it was tainted | |
496 | * so we dump it as root in mode 2, and only into a controlled | |
497 | * environment (pipe handler or fully qualified path). | |
498 | */ | |
499 | if (__get_dumpable(cprm.mm_flags) == SUID_DUMPABLE_SAFE) { | |
500 | /* Setuid core dump mode */ | |
501 | flag = O_EXCL; /* Stop rewrite attacks */ | |
502 | cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */ | |
503 | need_nonrelative = true; | |
504 | } | |
505 | ||
506 | retval = coredump_wait(exit_code, &core_state); | |
507 | if (retval < 0) | |
508 | goto fail_creds; | |
509 | ||
510 | old_cred = override_creds(cred); | |
511 | ||
512 | /* | |
513 | * Clear any false indication of pending signals that might | |
514 | * be seen by the filesystem code called to write the core file. | |
515 | */ | |
516 | clear_thread_flag(TIF_SIGPENDING); | |
517 | ||
518 | ispipe = format_corename(&cn, signr); | |
519 | ||
520 | if (ispipe) { | |
521 | int dump_count; | |
522 | char **helper_argv; | |
523 | ||
524 | if (ispipe < 0) { | |
525 | printk(KERN_WARNING "format_corename failed\n"); | |
526 | printk(KERN_WARNING "Aborting core\n"); | |
527 | goto fail_corename; | |
528 | } | |
529 | ||
530 | if (cprm.limit == 1) { | |
531 | /* See umh_pipe_setup() which sets RLIMIT_CORE = 1. | |
532 | * | |
533 | * Normally core limits are irrelevant to pipes, since | |
534 | * we're not writing to the file system, but we use | |
535 | * cprm.limit of 1 here as a speacial value, this is a | |
536 | * consistent way to catch recursive crashes. | |
537 | * We can still crash if the core_pattern binary sets | |
538 | * RLIM_CORE = !1, but it runs as root, and can do | |
539 | * lots of stupid things. | |
540 | * | |
541 | * Note that we use task_tgid_vnr here to grab the pid | |
542 | * of the process group leader. That way we get the | |
543 | * right pid if a thread in a multi-threaded | |
544 | * core_pattern process dies. | |
545 | */ | |
546 | printk(KERN_WARNING | |
547 | "Process %d(%s) has RLIMIT_CORE set to 1\n", | |
548 | task_tgid_vnr(current), current->comm); | |
549 | printk(KERN_WARNING "Aborting core\n"); | |
550 | goto fail_unlock; | |
551 | } | |
552 | cprm.limit = RLIM_INFINITY; | |
553 | ||
554 | dump_count = atomic_inc_return(&core_dump_count); | |
555 | if (core_pipe_limit && (core_pipe_limit < dump_count)) { | |
556 | printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n", | |
557 | task_tgid_vnr(current), current->comm); | |
558 | printk(KERN_WARNING "Skipping core dump\n"); | |
559 | goto fail_dropcount; | |
560 | } | |
561 | ||
562 | helper_argv = argv_split(GFP_KERNEL, cn.corename+1, NULL); | |
563 | if (!helper_argv) { | |
564 | printk(KERN_WARNING "%s failed to allocate memory\n", | |
565 | __func__); | |
566 | goto fail_dropcount; | |
567 | } | |
568 | ||
569 | retval = call_usermodehelper_fns(helper_argv[0], helper_argv, | |
570 | NULL, UMH_WAIT_EXEC, umh_pipe_setup, | |
571 | NULL, &cprm); | |
572 | argv_free(helper_argv); | |
573 | if (retval) { | |
574 | printk(KERN_INFO "Core dump to %s pipe failed\n", | |
575 | cn.corename); | |
576 | goto close_fail; | |
577 | } | |
578 | } else { | |
579 | struct inode *inode; | |
580 | ||
581 | if (cprm.limit < binfmt->min_coredump) | |
582 | goto fail_unlock; | |
583 | ||
584 | if (need_nonrelative && cn.corename[0] != '/') { | |
585 | printk(KERN_WARNING "Pid %d(%s) can only dump core "\ | |
586 | "to fully qualified path!\n", | |
587 | task_tgid_vnr(current), current->comm); | |
588 | printk(KERN_WARNING "Skipping core dump\n"); | |
589 | goto fail_unlock; | |
590 | } | |
591 | ||
592 | cprm.file = filp_open(cn.corename, | |
593 | O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, | |
594 | 0600); | |
595 | if (IS_ERR(cprm.file)) | |
596 | goto fail_unlock; | |
597 | ||
598 | inode = cprm.file->f_path.dentry->d_inode; | |
599 | if (inode->i_nlink > 1) | |
600 | goto close_fail; | |
601 | if (d_unhashed(cprm.file->f_path.dentry)) | |
602 | goto close_fail; | |
603 | /* | |
604 | * AK: actually i see no reason to not allow this for named | |
605 | * pipes etc, but keep the previous behaviour for now. | |
606 | */ | |
607 | if (!S_ISREG(inode->i_mode)) | |
608 | goto close_fail; | |
609 | /* | |
610 | * Dont allow local users get cute and trick others to coredump | |
611 | * into their pre-created files. | |
612 | */ | |
613 | if (!uid_eq(inode->i_uid, current_fsuid())) | |
614 | goto close_fail; | |
615 | if (!cprm.file->f_op || !cprm.file->f_op->write) | |
616 | goto close_fail; | |
617 | if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file)) | |
618 | goto close_fail; | |
619 | } | |
620 | ||
621 | /* get us an unshared descriptor table; almost always a no-op */ | |
622 | retval = unshare_files(&displaced); | |
623 | if (retval) | |
624 | goto close_fail; | |
625 | if (displaced) | |
626 | put_files_struct(displaced); | |
627 | retval = binfmt->core_dump(&cprm); | |
628 | if (retval) | |
629 | current->signal->group_exit_code |= 0x80; | |
630 | ||
631 | if (ispipe && core_pipe_limit) | |
632 | wait_for_dump_helpers(cprm.file); | |
633 | close_fail: | |
634 | if (cprm.file) | |
635 | filp_close(cprm.file, NULL); | |
636 | fail_dropcount: | |
637 | if (ispipe) | |
638 | atomic_dec(&core_dump_count); | |
639 | fail_unlock: | |
640 | kfree(cn.corename); | |
641 | fail_corename: | |
642 | coredump_finish(mm); | |
643 | revert_creds(old_cred); | |
644 | fail_creds: | |
645 | put_cred(cred); | |
646 | fail: | |
647 | return; | |
648 | } | |
649 | ||
650 | /* | |
651 | * Core dumping helper functions. These are the only things you should | |
652 | * do on a core-file: use only these functions to write out all the | |
653 | * necessary info. | |
654 | */ | |
655 | int dump_write(struct file *file, const void *addr, int nr) | |
656 | { | |
657 | return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr; | |
658 | } | |
659 | EXPORT_SYMBOL(dump_write); | |
660 | ||
661 | int dump_seek(struct file *file, loff_t off) | |
662 | { | |
663 | int ret = 1; | |
664 | ||
665 | if (file->f_op->llseek && file->f_op->llseek != no_llseek) { | |
666 | if (file->f_op->llseek(file, off, SEEK_CUR) < 0) | |
667 | return 0; | |
668 | } else { | |
669 | char *buf = (char *)get_zeroed_page(GFP_KERNEL); | |
670 | ||
671 | if (!buf) | |
672 | return 0; | |
673 | while (off > 0) { | |
674 | unsigned long n = off; | |
675 | ||
676 | if (n > PAGE_SIZE) | |
677 | n = PAGE_SIZE; | |
678 | if (!dump_write(file, buf, n)) { | |
679 | ret = 0; | |
680 | break; | |
681 | } | |
682 | off -= n; | |
683 | } | |
684 | free_page((unsigned long)buf); | |
685 | } | |
686 | return ret; | |
687 | } | |
688 | EXPORT_SYMBOL(dump_seek); |