fs: remove unlikely() from WARN_ON() condition
[linux-2.6-block.git] / fs / binfmt_elf.c
CommitLineData
09c434b8 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/fs/binfmt_elf.c
4 *
5 * These are the functions used to load ELF format executables as used
6 * on SVr4 machines. Information on the format may be found in the book
7 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
8 * Tools".
9 *
10 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/fs.h>
1da177e4
LT
16#include <linux/mm.h>
17#include <linux/mman.h>
1da177e4
LT
18#include <linux/errno.h>
19#include <linux/signal.h>
20#include <linux/binfmts.h>
21#include <linux/string.h>
22#include <linux/file.h>
1da177e4 23#include <linux/slab.h>
1da177e4
LT
24#include <linux/personality.h>
25#include <linux/elfcore.h>
26#include <linux/init.h>
27#include <linux/highuid.h>
1da177e4
LT
28#include <linux/compiler.h>
29#include <linux/highmem.h>
30#include <linux/pagemap.h>
2aa362c4 31#include <linux/vmalloc.h>
1da177e4 32#include <linux/security.h>
1da177e4 33#include <linux/random.h>
f4e5cc2c 34#include <linux/elf.h>
d1fd836d 35#include <linux/elf-randomize.h>
7e80d0d0 36#include <linux/utsname.h>
088e7af7 37#include <linux/coredump.h>
6fac4829 38#include <linux/sched.h>
f7ccbae4 39#include <linux/sched/coredump.h>
68db0cf1 40#include <linux/sched/task_stack.h>
32ef5517 41#include <linux/sched/cputime.h>
5b825c3a 42#include <linux/cred.h>
5037835c 43#include <linux/dax.h>
7c0f6ba6 44#include <linux/uaccess.h>
1da177e4
LT
45#include <asm/param.h>
46#include <asm/page.h>
47
2aa362c4
DV
48#ifndef user_long_t
49#define user_long_t long
50#endif
49ae4d4b
DV
51#ifndef user_siginfo_t
52#define user_siginfo_t siginfo_t
53#endif
54
4755200b
NP
55/* That's for binfmt_elf_fdpic to deal with */
56#ifndef elf_check_fdpic
57#define elf_check_fdpic(ex) false
58#endif
59
71613c3b 60static int load_elf_binary(struct linux_binprm *bprm);
1da177e4 61
69369a70
JT
62#ifdef CONFIG_USELIB
63static int load_elf_library(struct file *);
64#else
65#define load_elf_library NULL
66#endif
67
1da177e4
LT
68/*
69 * If we don't support core dumping, then supply a NULL so we
70 * don't even try.
71 */
698ba7b5 72#ifdef CONFIG_ELF_CORE
f6151dfe 73static int elf_core_dump(struct coredump_params *cprm);
1da177e4
LT
74#else
75#define elf_core_dump NULL
76#endif
77
78#if ELF_EXEC_PAGESIZE > PAGE_SIZE
f4e5cc2c 79#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
1da177e4 80#else
f4e5cc2c 81#define ELF_MIN_ALIGN PAGE_SIZE
1da177e4
LT
82#endif
83
84#ifndef ELF_CORE_EFLAGS
85#define ELF_CORE_EFLAGS 0
86#endif
87
88#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
89#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
90#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
91
92static struct linux_binfmt elf_format = {
f670d0ec
MP
93 .module = THIS_MODULE,
94 .load_binary = load_elf_binary,
95 .load_shlib = load_elf_library,
96 .core_dump = elf_core_dump,
97 .min_coredump = ELF_EXEC_PAGESIZE,
1da177e4
LT
98};
99
d4e3cc38 100#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
1da177e4 101
16e72e9b 102static int set_brk(unsigned long start, unsigned long end, int prot)
1da177e4
LT
103{
104 start = ELF_PAGEALIGN(start);
105 end = ELF_PAGEALIGN(end);
106 if (end > start) {
16e72e9b
DV
107 /*
108 * Map the last of the bss segment.
109 * If the header is requesting these pages to be
110 * executable, honour that (ppc32 needs this).
111 */
112 int error = vm_brk_flags(start, end - start,
113 prot & PROT_EXEC ? VM_EXEC : 0);
5d22fc25
LT
114 if (error)
115 return error;
1da177e4
LT
116 }
117 current->mm->start_brk = current->mm->brk = end;
118 return 0;
119}
120
1da177e4
LT
121/* We need to explicitly zero any fractional pages
122 after the data section (i.e. bss). This would
123 contain the junk from the file that should not
f4e5cc2c
JJ
124 be in memory
125 */
1da177e4
LT
126static int padzero(unsigned long elf_bss)
127{
128 unsigned long nbyte;
129
130 nbyte = ELF_PAGEOFFSET(elf_bss);
131 if (nbyte) {
132 nbyte = ELF_MIN_ALIGN - nbyte;
133 if (clear_user((void __user *) elf_bss, nbyte))
134 return -EFAULT;
135 }
136 return 0;
137}
138
09c6dd3c 139/* Let's use some macros to make this stack manipulation a little clearer */
1da177e4
LT
140#ifdef CONFIG_STACK_GROWSUP
141#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
142#define STACK_ROUND(sp, items) \
143 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
f4e5cc2c
JJ
144#define STACK_ALLOC(sp, len) ({ \
145 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
146 old_sp; })
1da177e4
LT
147#else
148#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
149#define STACK_ROUND(sp, items) \
150 (((unsigned long) (sp - items)) &~ 15UL)
151#define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
152#endif
153
483fad1c
NL
154#ifndef ELF_BASE_PLATFORM
155/*
156 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
157 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
158 * will be copied to the user stack in the same manner as AT_PLATFORM.
159 */
160#define ELF_BASE_PLATFORM NULL
161#endif
162
1da177e4 163static int
f4e5cc2c 164create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
d20894a2 165 unsigned long load_addr, unsigned long interp_load_addr)
1da177e4
LT
166{
167 unsigned long p = bprm->p;
168 int argc = bprm->argc;
169 int envc = bprm->envc;
1da177e4
LT
170 elf_addr_t __user *sp;
171 elf_addr_t __user *u_platform;
483fad1c 172 elf_addr_t __user *u_base_platform;
f06295b4 173 elf_addr_t __user *u_rand_bytes;
1da177e4 174 const char *k_platform = ELF_PLATFORM;
483fad1c 175 const char *k_base_platform = ELF_BASE_PLATFORM;
f06295b4 176 unsigned char k_rand_bytes[16];
1da177e4
LT
177 int items;
178 elf_addr_t *elf_info;
179 int ei_index = 0;
86a264ab 180 const struct cred *cred = current_cred();
b6a2fea3 181 struct vm_area_struct *vma;
1da177e4 182
d68c9d6a
FBH
183 /*
184 * In some cases (e.g. Hyper-Threading), we want to avoid L1
185 * evictions by the processes running on the same package. One
186 * thing we can do is to shuffle the initial stack for them.
187 */
188
189 p = arch_align_stack(p);
190
1da177e4
LT
191 /*
192 * If this architecture has a platform capability string, copy it
193 * to userspace. In some cases (Sparc), this info is impossible
194 * for userspace to get any other way, in others (i386) it is
195 * merely difficult.
196 */
1da177e4
LT
197 u_platform = NULL;
198 if (k_platform) {
199 size_t len = strlen(k_platform) + 1;
200
1da177e4
LT
201 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
202 if (__copy_to_user(u_platform, k_platform, len))
203 return -EFAULT;
204 }
205
483fad1c
NL
206 /*
207 * If this architecture has a "base" platform capability
208 * string, copy it to userspace.
209 */
210 u_base_platform = NULL;
211 if (k_base_platform) {
212 size_t len = strlen(k_base_platform) + 1;
213
214 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
215 if (__copy_to_user(u_base_platform, k_base_platform, len))
216 return -EFAULT;
217 }
218
f06295b4
KC
219 /*
220 * Generate 16 random bytes for userspace PRNG seeding.
221 */
222 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
223 u_rand_bytes = (elf_addr_t __user *)
224 STACK_ALLOC(p, sizeof(k_rand_bytes));
225 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
226 return -EFAULT;
227
1da177e4 228 /* Create the ELF interpreter info */
785d5570 229 elf_info = (elf_addr_t *)current->mm->saved_auxv;
4f9a58d7 230 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
1da177e4 231#define NEW_AUX_ENT(id, val) \
f4e5cc2c 232 do { \
785d5570
JJ
233 elf_info[ei_index++] = id; \
234 elf_info[ei_index++] = val; \
f4e5cc2c 235 } while (0)
1da177e4
LT
236
237#ifdef ARCH_DLINFO
238 /*
239 * ARCH_DLINFO must come first so PPC can do its special alignment of
240 * AUXV.
4f9a58d7
OH
241 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
242 * ARCH_DLINFO changes
1da177e4
LT
243 */
244 ARCH_DLINFO;
245#endif
246 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
247 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
248 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
249 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
f4e5cc2c 250 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
1da177e4
LT
251 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
252 NEW_AUX_ENT(AT_BASE, interp_load_addr);
253 NEW_AUX_ENT(AT_FLAGS, 0);
254 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
ebc887b2
EB
255 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
256 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
257 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
258 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
c425e189 259 NEW_AUX_ENT(AT_SECURE, bprm->secureexec);
f06295b4 260 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
2171364d
MN
261#ifdef ELF_HWCAP2
262 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
263#endif
65191087 264 NEW_AUX_ENT(AT_EXECFN, bprm->exec);
1da177e4 265 if (k_platform) {
f4e5cc2c 266 NEW_AUX_ENT(AT_PLATFORM,
785d5570 267 (elf_addr_t)(unsigned long)u_platform);
1da177e4 268 }
483fad1c
NL
269 if (k_base_platform) {
270 NEW_AUX_ENT(AT_BASE_PLATFORM,
271 (elf_addr_t)(unsigned long)u_base_platform);
272 }
1da177e4 273 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
785d5570 274 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
1da177e4
LT
275 }
276#undef NEW_AUX_ENT
277 /* AT_NULL is zero; clear the rest too */
278 memset(&elf_info[ei_index], 0,
279 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
280
281 /* And advance past the AT_NULL entry. */
282 ei_index += 2;
283
284 sp = STACK_ADD(p, ei_index);
285
d20894a2 286 items = (argc + 1) + (envc + 1) + 1;
1da177e4
LT
287 bprm->p = STACK_ROUND(sp, items);
288
289 /* Point sp at the lowest address on the stack */
290#ifdef CONFIG_STACK_GROWSUP
291 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
f4e5cc2c 292 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
1da177e4
LT
293#else
294 sp = (elf_addr_t __user *)bprm->p;
295#endif
296
b6a2fea3
OW
297
298 /*
299 * Grow the stack manually; some architectures have a limit on how
300 * far ahead a user-space access may be in order to grow the stack.
301 */
302 vma = find_extend_vma(current->mm, bprm->p);
303 if (!vma)
304 return -EFAULT;
305
1da177e4
LT
306 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
307 if (__put_user(argc, sp++))
308 return -EFAULT;
1da177e4 309
67c6777a 310 /* Populate list of argv pointers back to argv strings. */
a84a5059 311 p = current->mm->arg_end = current->mm->arg_start;
1da177e4
LT
312 while (argc-- > 0) {
313 size_t len;
67c6777a 314 if (__put_user((elf_addr_t)p, sp++))
841d5fb7 315 return -EFAULT;
b6a2fea3
OW
316 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
317 if (!len || len > MAX_ARG_STRLEN)
23c4971e 318 return -EINVAL;
1da177e4
LT
319 p += len;
320 }
67c6777a 321 if (__put_user(0, sp++))
1da177e4 322 return -EFAULT;
67c6777a
KC
323 current->mm->arg_end = p;
324
325 /* Populate list of envp pointers back to envp strings. */
326 current->mm->env_end = current->mm->env_start = p;
1da177e4
LT
327 while (envc-- > 0) {
328 size_t len;
67c6777a 329 if (__put_user((elf_addr_t)p, sp++))
841d5fb7 330 return -EFAULT;
b6a2fea3
OW
331 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
332 if (!len || len > MAX_ARG_STRLEN)
23c4971e 333 return -EINVAL;
1da177e4
LT
334 p += len;
335 }
67c6777a 336 if (__put_user(0, sp++))
1da177e4
LT
337 return -EFAULT;
338 current->mm->env_end = p;
339
340 /* Put the elf_info on the stack in the right place. */
1da177e4
LT
341 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
342 return -EFAULT;
343 return 0;
344}
345
c07380be
JH
346#ifndef elf_map
347
1da177e4 348static unsigned long elf_map(struct file *filep, unsigned long addr,
49ac9819 349 const struct elf_phdr *eppnt, int prot, int type,
cc503c1b 350 unsigned long total_size)
1da177e4
LT
351{
352 unsigned long map_addr;
cc503c1b
JK
353 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
354 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
355 addr = ELF_PAGESTART(addr);
356 size = ELF_PAGEALIGN(size);
1da177e4 357
dda6ebde
DG
358 /* mmap() will return -EINVAL if given a zero size, but a
359 * segment with zero filesize is perfectly valid */
cc503c1b
JK
360 if (!size)
361 return addr;
362
cc503c1b
JK
363 /*
364 * total_size is the size of the ELF (interpreter) image.
365 * The _first_ mmap needs to know the full size, otherwise
366 * randomization might put this image into an overlapping
367 * position with the ELF binary image. (since size < total_size)
368 * So we first map the 'big' image - and unmap the remainder at
369 * the end. (which unmap is needed for ELF images with holes.)
370 */
371 if (total_size) {
372 total_size = ELF_PAGEALIGN(total_size);
5a5e4c2e 373 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
cc503c1b 374 if (!BAD_ADDR(map_addr))
5a5e4c2e 375 vm_munmap(map_addr+size, total_size-size);
cc503c1b 376 } else
5a5e4c2e 377 map_addr = vm_mmap(filep, addr, size, prot, type, off);
cc503c1b 378
d23a61ee
TH
379 if ((type & MAP_FIXED_NOREPLACE) &&
380 PTR_ERR((void *)map_addr) == -EEXIST)
381 pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n",
382 task_pid_nr(current), current->comm, (void *)addr);
4ed28639 383
1da177e4
LT
384 return(map_addr);
385}
386
c07380be
JH
387#endif /* !elf_map */
388
49ac9819 389static unsigned long total_mapping_size(const struct elf_phdr *cmds, int nr)
cc503c1b
JK
390{
391 int i, first_idx = -1, last_idx = -1;
392
393 for (i = 0; i < nr; i++) {
394 if (cmds[i].p_type == PT_LOAD) {
395 last_idx = i;
396 if (first_idx == -1)
397 first_idx = i;
398 }
399 }
400 if (first_idx == -1)
401 return 0;
402
403 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
404 ELF_PAGESTART(cmds[first_idx].p_vaddr);
405}
406
6a8d3894
PB
407/**
408 * load_elf_phdrs() - load ELF program headers
409 * @elf_ex: ELF header of the binary whose program headers should be loaded
410 * @elf_file: the opened ELF binary file
411 *
412 * Loads ELF program headers from the binary file elf_file, which has the ELF
413 * header pointed to by elf_ex, into a newly allocated array. The caller is
414 * responsible for freeing the allocated data. Returns an ERR_PTR upon failure.
415 */
49ac9819 416static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex,
6a8d3894
PB
417 struct file *elf_file)
418{
419 struct elf_phdr *elf_phdata = NULL;
faf1c315 420 int retval, err = -1;
bdd1d2d3 421 loff_t pos = elf_ex->e_phoff;
faf1c315 422 unsigned int size;
6a8d3894
PB
423
424 /*
425 * If the size of this structure has changed, then punt, since
426 * we will be doing the wrong thing.
427 */
428 if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
429 goto out;
430
431 /* Sanity check the number of program headers... */
6a8d3894
PB
432 /* ...and their total size. */
433 size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
faf1c315 434 if (size == 0 || size > 65536 || size > ELF_MIN_ALIGN)
6a8d3894
PB
435 goto out;
436
437 elf_phdata = kmalloc(size, GFP_KERNEL);
438 if (!elf_phdata)
439 goto out;
440
441 /* Read in the program headers */
bdd1d2d3 442 retval = kernel_read(elf_file, elf_phdata, size, &pos);
6a8d3894
PB
443 if (retval != size) {
444 err = (retval < 0) ? retval : -EIO;
445 goto out;
446 }
447
448 /* Success! */
449 err = 0;
450out:
451 if (err) {
452 kfree(elf_phdata);
453 elf_phdata = NULL;
454 }
455 return elf_phdata;
456}
cc503c1b 457
774c105e
PB
458#ifndef CONFIG_ARCH_BINFMT_ELF_STATE
459
460/**
461 * struct arch_elf_state - arch-specific ELF loading state
462 *
463 * This structure is used to preserve architecture specific data during
464 * the loading of an ELF file, throughout the checking of architecture
465 * specific ELF headers & through to the point where the ELF load is
466 * known to be proceeding (ie. SET_PERSONALITY).
467 *
468 * This implementation is a dummy for architectures which require no
469 * specific state.
470 */
471struct arch_elf_state {
472};
473
474#define INIT_ARCH_ELF_STATE {}
475
476/**
477 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
478 * @ehdr: The main ELF header
479 * @phdr: The program header to check
480 * @elf: The open ELF file
481 * @is_interp: True if the phdr is from the interpreter of the ELF being
482 * loaded, else false.
483 * @state: Architecture-specific state preserved throughout the process
484 * of loading the ELF.
485 *
486 * Inspects the program header phdr to validate its correctness and/or
487 * suitability for the system. Called once per ELF program header in the
488 * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its
489 * interpreter.
490 *
491 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
492 * with that return code.
493 */
494static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
495 struct elf_phdr *phdr,
496 struct file *elf, bool is_interp,
497 struct arch_elf_state *state)
498{
499 /* Dummy implementation, always proceed */
500 return 0;
501}
502
503/**
54d15714 504 * arch_check_elf() - check an ELF executable
774c105e
PB
505 * @ehdr: The main ELF header
506 * @has_interp: True if the ELF has an interpreter, else false.
eb4bc076 507 * @interp_ehdr: The interpreter's ELF header
774c105e
PB
508 * @state: Architecture-specific state preserved throughout the process
509 * of loading the ELF.
510 *
511 * Provides a final opportunity for architecture code to reject the loading
512 * of the ELF & cause an exec syscall to return an error. This is called after
513 * all program headers to be checked by arch_elf_pt_proc have been.
514 *
515 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
516 * with that return code.
517 */
518static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
eb4bc076 519 struct elfhdr *interp_ehdr,
774c105e
PB
520 struct arch_elf_state *state)
521{
522 /* Dummy implementation, always proceed */
523 return 0;
524}
525
526#endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
cc503c1b 527
d8e7cb39
AD
528static inline int make_prot(u32 p_flags)
529{
530 int prot = 0;
531
532 if (p_flags & PF_R)
533 prot |= PROT_READ;
534 if (p_flags & PF_W)
535 prot |= PROT_WRITE;
536 if (p_flags & PF_X)
537 prot |= PROT_EXEC;
538 return prot;
539}
540
1da177e4
LT
541/* This is much more generalized than the library routine read function,
542 so we keep this separate. Technically the library read function
543 is only provided so that we can read a.out libraries that have
544 an ELF header */
545
f4e5cc2c 546static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
cc503c1b 547 struct file *interpreter, unsigned long *interp_map_addr,
a9d9ef13 548 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
1da177e4 549{
1da177e4
LT
550 struct elf_phdr *eppnt;
551 unsigned long load_addr = 0;
552 int load_addr_set = 0;
553 unsigned long last_bss = 0, elf_bss = 0;
16e72e9b 554 int bss_prot = 0;
1da177e4 555 unsigned long error = ~0UL;
cc503c1b 556 unsigned long total_size;
6a8d3894 557 int i;
1da177e4
LT
558
559 /* First of all, some simple consistency checks */
560 if (interp_elf_ex->e_type != ET_EXEC &&
561 interp_elf_ex->e_type != ET_DYN)
562 goto out;
4755200b
NP
563 if (!elf_check_arch(interp_elf_ex) ||
564 elf_check_fdpic(interp_elf_ex))
1da177e4 565 goto out;
72c2d531 566 if (!interpreter->f_op->mmap)
1da177e4
LT
567 goto out;
568
a9d9ef13
PB
569 total_size = total_mapping_size(interp_elf_phdata,
570 interp_elf_ex->e_phnum);
cc503c1b
JK
571 if (!total_size) {
572 error = -EINVAL;
a9d9ef13 573 goto out;
cc503c1b
JK
574 }
575
a9d9ef13 576 eppnt = interp_elf_phdata;
f4e5cc2c
JJ
577 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
578 if (eppnt->p_type == PT_LOAD) {
579 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
d8e7cb39 580 int elf_prot = make_prot(eppnt->p_flags);
f4e5cc2c
JJ
581 unsigned long vaddr = 0;
582 unsigned long k, map_addr;
583
f4e5cc2c
JJ
584 vaddr = eppnt->p_vaddr;
585 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
4ed28639 586 elf_type |= MAP_FIXED_NOREPLACE;
cc503c1b
JK
587 else if (no_base && interp_elf_ex->e_type == ET_DYN)
588 load_addr = -vaddr;
f4e5cc2c
JJ
589
590 map_addr = elf_map(interpreter, load_addr + vaddr,
bb1ad820 591 eppnt, elf_prot, elf_type, total_size);
cc503c1b
JK
592 total_size = 0;
593 if (!*interp_map_addr)
594 *interp_map_addr = map_addr;
f4e5cc2c
JJ
595 error = map_addr;
596 if (BAD_ADDR(map_addr))
a9d9ef13 597 goto out;
f4e5cc2c
JJ
598
599 if (!load_addr_set &&
600 interp_elf_ex->e_type == ET_DYN) {
601 load_addr = map_addr - ELF_PAGESTART(vaddr);
602 load_addr_set = 1;
603 }
604
605 /*
606 * Check to see if the section's size will overflow the
607 * allowed task size. Note that p_filesz must always be
608 * <= p_memsize so it's only necessary to check p_memsz.
609 */
610 k = load_addr + eppnt->p_vaddr;
ce51059b 611 if (BAD_ADDR(k) ||
f4e5cc2c
JJ
612 eppnt->p_filesz > eppnt->p_memsz ||
613 eppnt->p_memsz > TASK_SIZE ||
614 TASK_SIZE - eppnt->p_memsz < k) {
615 error = -ENOMEM;
a9d9ef13 616 goto out;
f4e5cc2c
JJ
617 }
618
619 /*
620 * Find the end of the file mapping for this phdr, and
621 * keep track of the largest address we see for this.
622 */
623 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
624 if (k > elf_bss)
625 elf_bss = k;
626
627 /*
628 * Do the same thing for the memory mapping - between
629 * elf_bss and last_bss is the bss section.
630 */
0036d1f7 631 k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
16e72e9b 632 if (k > last_bss) {
f4e5cc2c 633 last_bss = k;
16e72e9b
DV
634 bss_prot = elf_prot;
635 }
f4e5cc2c 636 }
1da177e4
LT
637 }
638
0036d1f7
KC
639 /*
640 * Now fill out the bss section: first pad the last page from
641 * the file up to the page boundary, and zero it from elf_bss
642 * up to the end of the page.
643 */
644 if (padzero(elf_bss)) {
645 error = -EFAULT;
646 goto out;
647 }
648 /*
649 * Next, align both the file and mem bss up to the page size,
650 * since this is where elf_bss was just zeroed up to, and where
16e72e9b 651 * last_bss will end after the vm_brk_flags() below.
0036d1f7
KC
652 */
653 elf_bss = ELF_PAGEALIGN(elf_bss);
654 last_bss = ELF_PAGEALIGN(last_bss);
655 /* Finally, if there is still more bss to allocate, do it. */
752015d1 656 if (last_bss > elf_bss) {
16e72e9b
DV
657 error = vm_brk_flags(elf_bss, last_bss - elf_bss,
658 bss_prot & PROT_EXEC ? VM_EXEC : 0);
5d22fc25 659 if (error)
a9d9ef13 660 goto out;
1da177e4
LT
661 }
662
cc503c1b 663 error = load_addr;
1da177e4
LT
664out:
665 return error;
666}
667
1da177e4
LT
668/*
669 * These are the functions used to load ELF style executables and shared
670 * libraries. There is no binary dependent code anywhere else.
671 */
672
71613c3b 673static int load_elf_binary(struct linux_binprm *bprm)
1da177e4
LT
674{
675 struct file *interpreter = NULL; /* to shut gcc up */
676 unsigned long load_addr = 0, load_bias = 0;
677 int load_addr_set = 0;
1da177e4 678 unsigned long error;
a9d9ef13 679 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
1da177e4 680 unsigned long elf_bss, elf_brk;
16e72e9b 681 int bss_prot = 0;
1da177e4 682 int retval, i;
cc503c1b
JK
683 unsigned long elf_entry;
684 unsigned long interp_load_addr = 0;
1da177e4 685 unsigned long start_code, end_code, start_data, end_data;
1a530a6f 686 unsigned long reloc_func_desc __maybe_unused = 0;
8de61e69 687 int executable_stack = EXSTACK_DEFAULT;
1da177e4
LT
688 struct {
689 struct elfhdr elf_ex;
690 struct elfhdr interp_elf_ex;
1da177e4 691 } *loc;
774c105e 692 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
249b08e4 693 struct pt_regs *regs;
1da177e4
LT
694
695 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
696 if (!loc) {
697 retval = -ENOMEM;
698 goto out_ret;
699 }
700
701 /* Get the exec-header */
f4e5cc2c 702 loc->elf_ex = *((struct elfhdr *)bprm->buf);
1da177e4
LT
703
704 retval = -ENOEXEC;
705 /* First of all, some simple consistency checks */
706 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
707 goto out;
708
709 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
710 goto out;
711 if (!elf_check_arch(&loc->elf_ex))
712 goto out;
4755200b
NP
713 if (elf_check_fdpic(&loc->elf_ex))
714 goto out;
72c2d531 715 if (!bprm->file->f_op->mmap)
1da177e4
LT
716 goto out;
717
6a8d3894 718 elf_phdata = load_elf_phdrs(&loc->elf_ex, bprm->file);
1da177e4
LT
719 if (!elf_phdata)
720 goto out;
721
1da177e4 722 elf_ppnt = elf_phdata;
be0deb58
AD
723 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
724 char *elf_interpreter;
725 loff_t pos;
1da177e4 726
be0deb58
AD
727 if (elf_ppnt->p_type != PT_INTERP)
728 continue;
1fb84496 729
be0deb58
AD
730 /*
731 * This is the program interpreter used for shared libraries -
732 * for now assume that this is an a.out format binary.
733 */
734 retval = -ENOEXEC;
735 if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2)
736 goto out_free_ph;
1da177e4 737
be0deb58
AD
738 retval = -ENOMEM;
739 elf_interpreter = kmalloc(elf_ppnt->p_filesz, GFP_KERNEL);
740 if (!elf_interpreter)
741 goto out_free_ph;
cc338010 742
be0deb58
AD
743 pos = elf_ppnt->p_offset;
744 retval = kernel_read(bprm->file, elf_interpreter,
745 elf_ppnt->p_filesz, &pos);
746 if (retval != elf_ppnt->p_filesz) {
747 if (retval >= 0)
748 retval = -EIO;
749 goto out_free_interp;
750 }
751 /* make sure path is NULL terminated */
752 retval = -ENOEXEC;
753 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
754 goto out_free_interp;
755
756 interpreter = open_exec(elf_interpreter);
757 kfree(elf_interpreter);
758 retval = PTR_ERR(interpreter);
759 if (IS_ERR(interpreter))
cc338010 760 goto out_free_ph;
be0deb58
AD
761
762 /*
763 * If the binary is not readable then enforce mm->dumpable = 0
764 * regardless of the interpreter's permissions.
765 */
766 would_dump(bprm, interpreter);
767
768 /* Get the exec headers */
769 pos = 0;
770 retval = kernel_read(interpreter, &loc->interp_elf_ex,
771 sizeof(loc->interp_elf_ex), &pos);
772 if (retval != sizeof(loc->interp_elf_ex)) {
773 if (retval >= 0)
774 retval = -EIO;
775 goto out_free_dentry;
1da177e4 776 }
be0deb58
AD
777
778 break;
779
780out_free_interp:
781 kfree(elf_interpreter);
782 goto out_free_ph;
1da177e4
LT
783 }
784
785 elf_ppnt = elf_phdata;
786 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
774c105e
PB
787 switch (elf_ppnt->p_type) {
788 case PT_GNU_STACK:
1da177e4
LT
789 if (elf_ppnt->p_flags & PF_X)
790 executable_stack = EXSTACK_ENABLE_X;
791 else
792 executable_stack = EXSTACK_DISABLE_X;
793 break;
774c105e
PB
794
795 case PT_LOPROC ... PT_HIPROC:
796 retval = arch_elf_pt_proc(&loc->elf_ex, elf_ppnt,
797 bprm->file, false,
798 &arch_state);
799 if (retval)
800 goto out_free_dentry;
801 break;
1da177e4 802 }
1da177e4
LT
803
804 /* Some simple consistency checks for the interpreter */
cc338010 805 if (interpreter) {
1da177e4 806 retval = -ELIBBAD;
d20894a2
AK
807 /* Not an ELF interpreter */
808 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1da177e4 809 goto out_free_dentry;
1da177e4 810 /* Verify the interpreter has a valid arch */
4755200b
NP
811 if (!elf_check_arch(&loc->interp_elf_ex) ||
812 elf_check_fdpic(&loc->interp_elf_ex))
1da177e4 813 goto out_free_dentry;
a9d9ef13
PB
814
815 /* Load the interpreter program headers */
816 interp_elf_phdata = load_elf_phdrs(&loc->interp_elf_ex,
817 interpreter);
818 if (!interp_elf_phdata)
819 goto out_free_dentry;
774c105e
PB
820
821 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */
822 elf_ppnt = interp_elf_phdata;
823 for (i = 0; i < loc->interp_elf_ex.e_phnum; i++, elf_ppnt++)
824 switch (elf_ppnt->p_type) {
825 case PT_LOPROC ... PT_HIPROC:
826 retval = arch_elf_pt_proc(&loc->interp_elf_ex,
827 elf_ppnt, interpreter,
828 true, &arch_state);
829 if (retval)
830 goto out_free_dentry;
831 break;
832 }
1da177e4
LT
833 }
834
774c105e
PB
835 /*
836 * Allow arch code to reject the ELF at this point, whilst it's
837 * still possible to return an error to the code that invoked
838 * the exec syscall.
839 */
eb4bc076
MR
840 retval = arch_check_elf(&loc->elf_ex,
841 !!interpreter, &loc->interp_elf_ex,
842 &arch_state);
774c105e
PB
843 if (retval)
844 goto out_free_dentry;
845
1da177e4
LT
846 /* Flush all traces of the currently running executable */
847 retval = flush_old_exec(bprm);
848 if (retval)
849 goto out_free_dentry;
850
1da177e4
LT
851 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
852 may depend on the personality. */
774c105e 853 SET_PERSONALITY2(loc->elf_ex, &arch_state);
1da177e4
LT
854 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
855 current->personality |= READ_IMPLIES_EXEC;
856
f4e5cc2c 857 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1da177e4 858 current->flags |= PF_RANDOMIZE;
221af7f8
LT
859
860 setup_new_exec(bprm);
9f834ec1 861 install_exec_creds(bprm);
1da177e4
LT
862
863 /* Do this so that we can load the interpreter, if need be. We will
864 change some of these later */
1da177e4
LT
865 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
866 executable_stack);
19d860a1 867 if (retval < 0)
1da177e4 868 goto out_free_dentry;
1da177e4 869
85264316
AD
870 elf_bss = 0;
871 elf_brk = 0;
872
873 start_code = ~0UL;
874 end_code = 0;
875 start_data = 0;
876 end_data = 0;
877
af901ca1 878 /* Now we do a little grungy work by mmapping the ELF image into
cc503c1b 879 the correct location in memory. */
f4e5cc2c
JJ
880 for(i = 0, elf_ppnt = elf_phdata;
881 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
d8e7cb39 882 int elf_prot, elf_flags, elf_fixed = MAP_FIXED_NOREPLACE;
1da177e4 883 unsigned long k, vaddr;
a87938b2 884 unsigned long total_size = 0;
1da177e4
LT
885
886 if (elf_ppnt->p_type != PT_LOAD)
887 continue;
888
889 if (unlikely (elf_brk > elf_bss)) {
890 unsigned long nbyte;
891
892 /* There was a PT_LOAD segment with p_memsz > p_filesz
893 before this one. Map anonymous pages, if needed,
894 and clear the area. */
f670d0ec 895 retval = set_brk(elf_bss + load_bias,
16e72e9b
DV
896 elf_brk + load_bias,
897 bss_prot);
19d860a1 898 if (retval)
1da177e4 899 goto out_free_dentry;
1da177e4
LT
900 nbyte = ELF_PAGEOFFSET(elf_bss);
901 if (nbyte) {
902 nbyte = ELF_MIN_ALIGN - nbyte;
903 if (nbyte > elf_brk - elf_bss)
904 nbyte = elf_brk - elf_bss;
905 if (clear_user((void __user *)elf_bss +
906 load_bias, nbyte)) {
907 /*
908 * This bss-zeroing can fail if the ELF
f4e5cc2c 909 * file specifies odd protections. So
1da177e4
LT
910 * we don't check the return value
911 */
912 }
913 }
ad55eac7
MH
914
915 /*
916 * Some binaries have overlapping elf segments and then
917 * we have to forcefully map over an existing mapping
918 * e.g. over this newly established brk mapping.
919 */
920 elf_fixed = MAP_FIXED;
1da177e4
LT
921 }
922
d8e7cb39 923 elf_prot = make_prot(elf_ppnt->p_flags);
1da177e4 924
f4e5cc2c 925 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
1da177e4
LT
926
927 vaddr = elf_ppnt->p_vaddr;
eab09532
KC
928 /*
929 * If we are loading ET_EXEC or we have already performed
930 * the ET_DYN load_addr calculations, proceed normally.
931 */
1da177e4 932 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
ad55eac7 933 elf_flags |= elf_fixed;
1da177e4 934 } else if (loc->elf_ex.e_type == ET_DYN) {
eab09532
KC
935 /*
936 * This logic is run once for the first LOAD Program
937 * Header for ET_DYN binaries to calculate the
938 * randomization (load_bias) for all the LOAD
939 * Program Headers, and to calculate the entire
940 * size of the ELF mapping (total_size). (Note that
941 * load_addr_set is set to true later once the
942 * initial mapping is performed.)
943 *
944 * There are effectively two types of ET_DYN
945 * binaries: programs (i.e. PIE: ET_DYN with INTERP)
946 * and loaders (ET_DYN without INTERP, since they
947 * _are_ the ELF interpreter). The loaders must
948 * be loaded away from programs since the program
949 * may otherwise collide with the loader (especially
950 * for ET_EXEC which does not have a randomized
951 * position). For example to handle invocations of
952 * "./ld.so someprog" to test out a new version of
953 * the loader, the subsequent program that the
954 * loader loads must avoid the loader itself, so
955 * they cannot share the same load range. Sufficient
956 * room for the brk must be allocated with the
957 * loader as well, since brk must be available with
958 * the loader.
959 *
960 * Therefore, programs are loaded offset from
961 * ELF_ET_DYN_BASE and loaders are loaded into the
962 * independently randomized mmap region (0 load_bias
963 * without MAP_FIXED).
964 */
cc338010 965 if (interpreter) {
eab09532
KC
966 load_bias = ELF_ET_DYN_BASE;
967 if (current->flags & PF_RANDOMIZE)
968 load_bias += arch_mmap_rnd();
ad55eac7 969 elf_flags |= elf_fixed;
eab09532
KC
970 } else
971 load_bias = 0;
972
973 /*
974 * Since load_bias is used for all subsequent loading
975 * calculations, we must lower it by the first vaddr
976 * so that the remaining calculations based on the
977 * ELF vaddrs will be correctly offset. The result
978 * is then page aligned.
979 */
980 load_bias = ELF_PAGESTART(load_bias - vaddr);
981
a87938b2
MD
982 total_size = total_mapping_size(elf_phdata,
983 loc->elf_ex.e_phnum);
984 if (!total_size) {
2b1d3ae9 985 retval = -EINVAL;
a87938b2
MD
986 goto out_free_dentry;
987 }
1da177e4
LT
988 }
989
f4e5cc2c 990 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
a87938b2 991 elf_prot, elf_flags, total_size);
1da177e4 992 if (BAD_ADDR(error)) {
b140f251
AK
993 retval = IS_ERR((void *)error) ?
994 PTR_ERR((void*)error) : -EINVAL;
1da177e4
LT
995 goto out_free_dentry;
996 }
997
998 if (!load_addr_set) {
999 load_addr_set = 1;
1000 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
1001 if (loc->elf_ex.e_type == ET_DYN) {
1002 load_bias += error -
1003 ELF_PAGESTART(load_bias + vaddr);
1004 load_addr += load_bias;
1005 reloc_func_desc = load_bias;
1006 }
1007 }
1008 k = elf_ppnt->p_vaddr;
f4e5cc2c
JJ
1009 if (k < start_code)
1010 start_code = k;
1011 if (start_data < k)
1012 start_data = k;
1da177e4
LT
1013
1014 /*
1015 * Check to see if the section's size will overflow the
1016 * allowed task size. Note that p_filesz must always be
1017 * <= p_memsz so it is only necessary to check p_memsz.
1018 */
ce51059b 1019 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
1da177e4
LT
1020 elf_ppnt->p_memsz > TASK_SIZE ||
1021 TASK_SIZE - elf_ppnt->p_memsz < k) {
f4e5cc2c 1022 /* set_brk can never work. Avoid overflows. */
b140f251 1023 retval = -EINVAL;
1da177e4
LT
1024 goto out_free_dentry;
1025 }
1026
1027 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1028
1029 if (k > elf_bss)
1030 elf_bss = k;
1031 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1032 end_code = k;
1033 if (end_data < k)
1034 end_data = k;
1035 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
16e72e9b
DV
1036 if (k > elf_brk) {
1037 bss_prot = elf_prot;
1da177e4 1038 elf_brk = k;
16e72e9b 1039 }
1da177e4
LT
1040 }
1041
1042 loc->elf_ex.e_entry += load_bias;
1043 elf_bss += load_bias;
1044 elf_brk += load_bias;
1045 start_code += load_bias;
1046 end_code += load_bias;
1047 start_data += load_bias;
1048 end_data += load_bias;
1049
1050 /* Calling set_brk effectively mmaps the pages that we need
1051 * for the bss and break sections. We must do this before
1052 * mapping in the interpreter, to make sure it doesn't wind
1053 * up getting placed where the bss needs to go.
1054 */
16e72e9b 1055 retval = set_brk(elf_bss, elf_brk, bss_prot);
19d860a1 1056 if (retval)
1da177e4 1057 goto out_free_dentry;
6de50517 1058 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
1da177e4
LT
1059 retval = -EFAULT; /* Nobody gets to see this, but.. */
1060 goto out_free_dentry;
1061 }
1062
cc338010 1063 if (interpreter) {
6eec482f 1064 unsigned long interp_map_addr = 0;
d20894a2
AK
1065
1066 elf_entry = load_elf_interp(&loc->interp_elf_ex,
1067 interpreter,
1068 &interp_map_addr,
a9d9ef13 1069 load_bias, interp_elf_phdata);
d20894a2
AK
1070 if (!IS_ERR((void *)elf_entry)) {
1071 /*
1072 * load_elf_interp() returns relocation
1073 * adjustment
1074 */
1075 interp_load_addr = elf_entry;
1076 elf_entry += loc->interp_elf_ex.e_entry;
cc503c1b 1077 }
1da177e4 1078 if (BAD_ADDR(elf_entry)) {
ce51059b
CE
1079 retval = IS_ERR((void *)elf_entry) ?
1080 (int)elf_entry : -EINVAL;
1da177e4
LT
1081 goto out_free_dentry;
1082 }
1083 reloc_func_desc = interp_load_addr;
1084
1085 allow_write_access(interpreter);
1086 fput(interpreter);
1da177e4
LT
1087 } else {
1088 elf_entry = loc->elf_ex.e_entry;
5342fba5 1089 if (BAD_ADDR(elf_entry)) {
ce51059b 1090 retval = -EINVAL;
5342fba5
SS
1091 goto out_free_dentry;
1092 }
1da177e4
LT
1093 }
1094
774c105e 1095 kfree(interp_elf_phdata);
1da177e4
LT
1096 kfree(elf_phdata);
1097
1da177e4
LT
1098 set_binfmt(&elf_format);
1099
547ee84c 1100#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
cc338010 1101 retval = arch_setup_additional_pages(bprm, !!interpreter);
19d860a1 1102 if (retval < 0)
18c8baff 1103 goto out;
547ee84c
BH
1104#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1105
b6a2fea3 1106 retval = create_elf_tables(bprm, &loc->elf_ex,
f4e5cc2c 1107 load_addr, interp_load_addr);
19d860a1 1108 if (retval < 0)
b6a2fea3 1109 goto out;
1da177e4
LT
1110 current->mm->end_code = end_code;
1111 current->mm->start_code = start_code;
1112 current->mm->start_data = start_data;
1113 current->mm->end_data = end_data;
1114 current->mm->start_stack = bprm->p;
1115
4471a675 1116 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
bbdc6076
KC
1117 /*
1118 * For architectures with ELF randomization, when executing
1119 * a loader directly (i.e. no interpreter listed in ELF
1120 * headers), move the brk area out of the mmap region
1121 * (since it grows up, and may collide early with the stack
1122 * growing down), and into the unused ELF_ET_DYN_BASE region.
1123 */
1124 if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) && !interpreter)
1125 current->mm->brk = current->mm->start_brk =
1126 ELF_ET_DYN_BASE;
1127
c1d171a0
JK
1128 current->mm->brk = current->mm->start_brk =
1129 arch_randomize_brk(current->mm);
204db6ed 1130#ifdef compat_brk_randomized
4471a675
JK
1131 current->brk_randomized = 1;
1132#endif
1133 }
c1d171a0 1134
1da177e4
LT
1135 if (current->personality & MMAP_PAGE_ZERO) {
1136 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1137 and some applications "depend" upon this behavior.
1138 Since we do not have the power to recompile these, we
f4e5cc2c 1139 emulate the SVr4 behavior. Sigh. */
6be5ceb0 1140 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1da177e4 1141 MAP_FIXED | MAP_PRIVATE, 0);
1da177e4
LT
1142 }
1143
249b08e4 1144 regs = current_pt_regs();
1da177e4
LT
1145#ifdef ELF_PLAT_INIT
1146 /*
1147 * The ABI may specify that certain registers be set up in special
1148 * ways (on i386 %edx is the address of a DT_FINI function, for
1149 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1150 * that the e_entry field is the address of the function descriptor
1151 * for the startup routine, rather than the address of the startup
1152 * routine itself. This macro performs whatever initialization to
1153 * the regs structure is required as well as any relocations to the
1154 * function descriptor entries when executing dynamically links apps.
1155 */
1156 ELF_PLAT_INIT(regs, reloc_func_desc);
1157#endif
1158
b8383831 1159 finalize_exec(bprm);
1da177e4 1160 start_thread(regs, elf_entry, bprm->p);
1da177e4
LT
1161 retval = 0;
1162out:
1163 kfree(loc);
1164out_ret:
1165 return retval;
1166
1167 /* error cleanup */
1168out_free_dentry:
a9d9ef13 1169 kfree(interp_elf_phdata);
1da177e4
LT
1170 allow_write_access(interpreter);
1171 if (interpreter)
1172 fput(interpreter);
1da177e4
LT
1173out_free_ph:
1174 kfree(elf_phdata);
1175 goto out;
1176}
1177
69369a70 1178#ifdef CONFIG_USELIB
1da177e4
LT
1179/* This is really simpleminded and specialized - we are loading an
1180 a.out library that is given an ELF header. */
1da177e4
LT
1181static int load_elf_library(struct file *file)
1182{
1183 struct elf_phdr *elf_phdata;
1184 struct elf_phdr *eppnt;
1185 unsigned long elf_bss, bss, len;
1186 int retval, error, i, j;
1187 struct elfhdr elf_ex;
bdd1d2d3 1188 loff_t pos = 0;
1da177e4
LT
1189
1190 error = -ENOEXEC;
bdd1d2d3 1191 retval = kernel_read(file, &elf_ex, sizeof(elf_ex), &pos);
1da177e4
LT
1192 if (retval != sizeof(elf_ex))
1193 goto out;
1194
1195 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1196 goto out;
1197
1198 /* First of all, some simple consistency checks */
1199 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
72c2d531 1200 !elf_check_arch(&elf_ex) || !file->f_op->mmap)
1da177e4 1201 goto out;
4755200b
NP
1202 if (elf_check_fdpic(&elf_ex))
1203 goto out;
1da177e4
LT
1204
1205 /* Now read in all of the header information */
1206
1207 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1208 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1209
1210 error = -ENOMEM;
1211 elf_phdata = kmalloc(j, GFP_KERNEL);
1212 if (!elf_phdata)
1213 goto out;
1214
1215 eppnt = elf_phdata;
1216 error = -ENOEXEC;
bdd1d2d3
CH
1217 pos = elf_ex.e_phoff;
1218 retval = kernel_read(file, eppnt, j, &pos);
1da177e4
LT
1219 if (retval != j)
1220 goto out_free_ph;
1221
1222 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1223 if ((eppnt + i)->p_type == PT_LOAD)
1224 j++;
1225 if (j != 1)
1226 goto out_free_ph;
1227
1228 while (eppnt->p_type != PT_LOAD)
1229 eppnt++;
1230
1231 /* Now use mmap to map the library into memory. */
6be5ceb0 1232 error = vm_mmap(file,
1da177e4
LT
1233 ELF_PAGESTART(eppnt->p_vaddr),
1234 (eppnt->p_filesz +
1235 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1236 PROT_READ | PROT_WRITE | PROT_EXEC,
4ed28639 1237 MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_DENYWRITE,
1da177e4
LT
1238 (eppnt->p_offset -
1239 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1da177e4
LT
1240 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1241 goto out_free_ph;
1242
1243 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1244 if (padzero(elf_bss)) {
1245 error = -EFAULT;
1246 goto out_free_ph;
1247 }
1248
24962af7
OS
1249 len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
1250 bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
ecc2bc8a
MH
1251 if (bss > len) {
1252 error = vm_brk(len, bss - len);
5d22fc25 1253 if (error)
ecc2bc8a
MH
1254 goto out_free_ph;
1255 }
1da177e4
LT
1256 error = 0;
1257
1258out_free_ph:
1259 kfree(elf_phdata);
1260out:
1261 return error;
1262}
69369a70 1263#endif /* #ifdef CONFIG_USELIB */
1da177e4 1264
698ba7b5 1265#ifdef CONFIG_ELF_CORE
1da177e4
LT
1266/*
1267 * ELF core dumper
1268 *
1269 * Modelled on fs/exec.c:aout_core_dump()
1270 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1271 */
1da177e4 1272
909af768
JB
1273/*
1274 * The purpose of always_dump_vma() is to make sure that special kernel mappings
1275 * that are useful for post-mortem analysis are included in every core dump.
1276 * In that way we ensure that the core dump is fully interpretable later
1277 * without matching up the same kernel and hardware config to see what PC values
1278 * meant. These special mappings include - vDSO, vsyscall, and other
1279 * architecture specific mappings
1280 */
1281static bool always_dump_vma(struct vm_area_struct *vma)
1282{
1283 /* Any vsyscall mappings? */
1284 if (vma == get_gate_vma(vma->vm_mm))
1285 return true;
78d683e8
AL
1286
1287 /*
1288 * Assume that all vmas with a .name op should always be dumped.
1289 * If this changes, a new vm_ops field can easily be added.
1290 */
1291 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
1292 return true;
1293
909af768
JB
1294 /*
1295 * arch_vma_name() returns non-NULL for special architecture mappings,
1296 * such as vDSO sections.
1297 */
1298 if (arch_vma_name(vma))
1299 return true;
1300
1301 return false;
1302}
1303
1da177e4 1304/*
82df3973 1305 * Decide what to dump of a segment, part, all or none.
1da177e4 1306 */
82df3973
RM
1307static unsigned long vma_dump_size(struct vm_area_struct *vma,
1308 unsigned long mm_flags)
1da177e4 1309{
e575f111
KM
1310#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1311
909af768
JB
1312 /* always dump the vdso and vsyscall sections */
1313 if (always_dump_vma(vma))
82df3973 1314 goto whole;
e5b97dde 1315
0103bd16 1316 if (vma->vm_flags & VM_DONTDUMP)
accb61fe
JB
1317 return 0;
1318
5037835c
RZ
1319 /* support for DAX */
1320 if (vma_is_dax(vma)) {
1321 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1322 goto whole;
1323 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1324 goto whole;
1325 return 0;
1326 }
1327
e575f111
KM
1328 /* Hugetlb memory check */
1329 if (vma->vm_flags & VM_HUGETLB) {
1330 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1331 goto whole;
1332 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1333 goto whole;
23d9e482 1334 return 0;
e575f111
KM
1335 }
1336
1da177e4 1337 /* Do not dump I/O mapped devices or special mappings */
314e51b9 1338 if (vma->vm_flags & VM_IO)
1da177e4
LT
1339 return 0;
1340
a1b59e80
KH
1341 /* By default, dump shared memory if mapped from an anonymous file. */
1342 if (vma->vm_flags & VM_SHARED) {
496ad9aa 1343 if (file_inode(vma->vm_file)->i_nlink == 0 ?
82df3973
RM
1344 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1345 goto whole;
1346 return 0;
a1b59e80 1347 }
1da177e4 1348
82df3973
RM
1349 /* Dump segments that have been written to. */
1350 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1351 goto whole;
1352 if (vma->vm_file == NULL)
1353 return 0;
1da177e4 1354
82df3973
RM
1355 if (FILTER(MAPPED_PRIVATE))
1356 goto whole;
1357
1358 /*
1359 * If this looks like the beginning of a DSO or executable mapping,
1360 * check for an ELF header. If we find one, dump the first page to
1361 * aid in determining what was mapped here.
1362 */
92dc07b1
RM
1363 if (FILTER(ELF_HEADERS) &&
1364 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
82df3973
RM
1365 u32 __user *header = (u32 __user *) vma->vm_start;
1366 u32 word;
92dc07b1 1367 mm_segment_t fs = get_fs();
82df3973
RM
1368 /*
1369 * Doing it this way gets the constant folded by GCC.
1370 */
1371 union {
1372 u32 cmp;
1373 char elfmag[SELFMAG];
1374 } magic;
1375 BUILD_BUG_ON(SELFMAG != sizeof word);
1376 magic.elfmag[EI_MAG0] = ELFMAG0;
1377 magic.elfmag[EI_MAG1] = ELFMAG1;
1378 magic.elfmag[EI_MAG2] = ELFMAG2;
1379 magic.elfmag[EI_MAG3] = ELFMAG3;
92dc07b1
RM
1380 /*
1381 * Switch to the user "segment" for get_user(),
1382 * then put back what elf_core_dump() had in place.
1383 */
1384 set_fs(USER_DS);
1385 if (unlikely(get_user(word, header)))
1386 word = 0;
1387 set_fs(fs);
1388 if (word == magic.cmp)
82df3973
RM
1389 return PAGE_SIZE;
1390 }
1391
1392#undef FILTER
1393
1394 return 0;
1395
1396whole:
1397 return vma->vm_end - vma->vm_start;
1da177e4
LT
1398}
1399
1da177e4
LT
1400/* An ELF note in memory */
1401struct memelfnote
1402{
1403 const char *name;
1404 int type;
1405 unsigned int datasz;
1406 void *data;
1407};
1408
1409static int notesize(struct memelfnote *en)
1410{
1411 int sz;
1412
1413 sz = sizeof(struct elf_note);
1414 sz += roundup(strlen(en->name) + 1, 4);
1415 sz += roundup(en->datasz, 4);
1416
1417 return sz;
1418}
1419
ecc8c772 1420static int writenote(struct memelfnote *men, struct coredump_params *cprm)
d025c9db
AK
1421{
1422 struct elf_note en;
1da177e4
LT
1423 en.n_namesz = strlen(men->name) + 1;
1424 en.n_descsz = men->datasz;
1425 en.n_type = men->type;
1426
ecc8c772 1427 return dump_emit(cprm, &en, sizeof(en)) &&
22a8cb82
AV
1428 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
1429 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
1da177e4 1430}
1da177e4 1431
3aba481f 1432static void fill_elf_header(struct elfhdr *elf, int segs,
d3330cf0 1433 u16 machine, u32 flags)
1da177e4 1434{
6970c8ef
CG
1435 memset(elf, 0, sizeof(*elf));
1436
1da177e4
LT
1437 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1438 elf->e_ident[EI_CLASS] = ELF_CLASS;
1439 elf->e_ident[EI_DATA] = ELF_DATA;
1440 elf->e_ident[EI_VERSION] = EV_CURRENT;
1441 elf->e_ident[EI_OSABI] = ELF_OSABI;
1da177e4
LT
1442
1443 elf->e_type = ET_CORE;
3aba481f 1444 elf->e_machine = machine;
1da177e4 1445 elf->e_version = EV_CURRENT;
1da177e4 1446 elf->e_phoff = sizeof(struct elfhdr);
3aba481f 1447 elf->e_flags = flags;
1da177e4
LT
1448 elf->e_ehsize = sizeof(struct elfhdr);
1449 elf->e_phentsize = sizeof(struct elf_phdr);
1450 elf->e_phnum = segs;
1da177e4
LT
1451}
1452
8d6b5eee 1453static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
1da177e4
LT
1454{
1455 phdr->p_type = PT_NOTE;
1456 phdr->p_offset = offset;
1457 phdr->p_vaddr = 0;
1458 phdr->p_paddr = 0;
1459 phdr->p_filesz = sz;
1460 phdr->p_memsz = 0;
1461 phdr->p_flags = 0;
1462 phdr->p_align = 0;
1da177e4
LT
1463}
1464
1465static void fill_note(struct memelfnote *note, const char *name, int type,
1466 unsigned int sz, void *data)
1467{
1468 note->name = name;
1469 note->type = type;
1470 note->datasz = sz;
1471 note->data = data;
1da177e4
LT
1472}
1473
1474/*
f4e5cc2c
JJ
1475 * fill up all the fields in prstatus from the given task struct, except
1476 * registers which need to be filled up separately.
1da177e4
LT
1477 */
1478static void fill_prstatus(struct elf_prstatus *prstatus,
f4e5cc2c 1479 struct task_struct *p, long signr)
1da177e4
LT
1480{
1481 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1482 prstatus->pr_sigpend = p->pending.signal.sig[0];
1483 prstatus->pr_sighold = p->blocked.sig[0];
3b34fc58
ON
1484 rcu_read_lock();
1485 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1486 rcu_read_unlock();
b488893a 1487 prstatus->pr_pid = task_pid_vnr(p);
b488893a
PE
1488 prstatus->pr_pgrp = task_pgrp_vnr(p);
1489 prstatus->pr_sid = task_session_vnr(p);
1da177e4 1490 if (thread_group_leader(p)) {
cd19c364 1491 struct task_cputime cputime;
f06febc9 1492
1da177e4 1493 /*
f06febc9
FM
1494 * This is the record for the group leader. It shows the
1495 * group-wide total, not its individual thread total.
1da177e4 1496 */
cd19c364
FW
1497 thread_group_cputime(p, &cputime);
1498 prstatus->pr_utime = ns_to_timeval(cputime.utime);
1499 prstatus->pr_stime = ns_to_timeval(cputime.stime);
1da177e4 1500 } else {
cd19c364 1501 u64 utime, stime;
6fac4829 1502
cd19c364
FW
1503 task_cputime(p, &utime, &stime);
1504 prstatus->pr_utime = ns_to_timeval(utime);
1505 prstatus->pr_stime = ns_to_timeval(stime);
1da177e4 1506 }
5613fda9 1507
cd19c364
FW
1508 prstatus->pr_cutime = ns_to_timeval(p->signal->cutime);
1509 prstatus->pr_cstime = ns_to_timeval(p->signal->cstime);
1da177e4
LT
1510}
1511
1512static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1513 struct mm_struct *mm)
1514{
c69e8d9c 1515 const struct cred *cred;
a84a5059 1516 unsigned int i, len;
1da177e4
LT
1517
1518 /* first copy the parameters from user space */
1519 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1520
1521 len = mm->arg_end - mm->arg_start;
1522 if (len >= ELF_PRARGSZ)
1523 len = ELF_PRARGSZ-1;
1524 if (copy_from_user(&psinfo->pr_psargs,
1525 (const char __user *)mm->arg_start, len))
1526 return -EFAULT;
1527 for(i = 0; i < len; i++)
1528 if (psinfo->pr_psargs[i] == 0)
1529 psinfo->pr_psargs[i] = ' ';
1530 psinfo->pr_psargs[len] = 0;
1531
3b34fc58
ON
1532 rcu_read_lock();
1533 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1534 rcu_read_unlock();
b488893a 1535 psinfo->pr_pid = task_pid_vnr(p);
b488893a
PE
1536 psinfo->pr_pgrp = task_pgrp_vnr(p);
1537 psinfo->pr_sid = task_session_vnr(p);
1da177e4
LT
1538
1539 i = p->state ? ffz(~p->state) + 1 : 0;
1540 psinfo->pr_state = i;
55148548 1541 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1da177e4
LT
1542 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1543 psinfo->pr_nice = task_nice(p);
1544 psinfo->pr_flag = p->flags;
c69e8d9c
DH
1545 rcu_read_lock();
1546 cred = __task_cred(p);
ebc887b2
EB
1547 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
1548 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
c69e8d9c 1549 rcu_read_unlock();
1da177e4
LT
1550 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1551
1552 return 0;
1553}
1554
3aba481f
RM
1555static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1556{
1557 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1558 int i = 0;
1559 do
1560 i += 2;
1561 while (auxv[i - 2] != AT_NULL);
1562 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1563}
1564
49ae4d4b 1565static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
ae7795bc 1566 const kernel_siginfo_t *siginfo)
49ae4d4b
DV
1567{
1568 mm_segment_t old_fs = get_fs();
1569 set_fs(KERNEL_DS);
1570 copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
1571 set_fs(old_fs);
1572 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
1573}
1574
2aa362c4
DV
1575#define MAX_FILE_NOTE_SIZE (4*1024*1024)
1576/*
1577 * Format of NT_FILE note:
1578 *
1579 * long count -- how many files are mapped
1580 * long page_size -- units for file_ofs
1581 * array of [COUNT] elements of
1582 * long start
1583 * long end
1584 * long file_ofs
1585 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
1586 */
72023656 1587static int fill_files_note(struct memelfnote *note)
2aa362c4
DV
1588{
1589 struct vm_area_struct *vma;
1590 unsigned count, size, names_ofs, remaining, n;
1591 user_long_t *data;
1592 user_long_t *start_end_ofs;
1593 char *name_base, *name_curpos;
1594
1595 /* *Estimated* file count and total data size needed */
1596 count = current->mm->map_count;
60c9d92f
AD
1597 if (count > UINT_MAX / 64)
1598 return -EINVAL;
2aa362c4
DV
1599 size = count * 64;
1600
1601 names_ofs = (2 + 3 * count) * sizeof(data[0]);
1602 alloc:
1603 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
72023656 1604 return -EINVAL;
2aa362c4 1605 size = round_up(size, PAGE_SIZE);
86a2bb5a
AD
1606 data = kvmalloc(size, GFP_KERNEL);
1607 if (ZERO_OR_NULL_PTR(data))
72023656 1608 return -ENOMEM;
2aa362c4
DV
1609
1610 start_end_ofs = data + 2;
1611 name_base = name_curpos = ((char *)data) + names_ofs;
1612 remaining = size - names_ofs;
1613 count = 0;
1614 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1615 struct file *file;
1616 const char *filename;
1617
1618 file = vma->vm_file;
1619 if (!file)
1620 continue;
9bf39ab2 1621 filename = file_path(file, name_curpos, remaining);
2aa362c4
DV
1622 if (IS_ERR(filename)) {
1623 if (PTR_ERR(filename) == -ENAMETOOLONG) {
86a2bb5a 1624 kvfree(data);
2aa362c4
DV
1625 size = size * 5 / 4;
1626 goto alloc;
1627 }
1628 continue;
1629 }
1630
9bf39ab2 1631 /* file_path() fills at the end, move name down */
2aa362c4
DV
1632 /* n = strlen(filename) + 1: */
1633 n = (name_curpos + remaining) - filename;
1634 remaining = filename - name_curpos;
1635 memmove(name_curpos, filename, n);
1636 name_curpos += n;
1637
1638 *start_end_ofs++ = vma->vm_start;
1639 *start_end_ofs++ = vma->vm_end;
1640 *start_end_ofs++ = vma->vm_pgoff;
1641 count++;
1642 }
1643
1644 /* Now we know exact count of files, can store it */
1645 data[0] = count;
1646 data[1] = PAGE_SIZE;
1647 /*
1648 * Count usually is less than current->mm->map_count,
1649 * we need to move filenames down.
1650 */
1651 n = current->mm->map_count - count;
1652 if (n != 0) {
1653 unsigned shift_bytes = n * 3 * sizeof(data[0]);
1654 memmove(name_base - shift_bytes, name_base,
1655 name_curpos - name_base);
1656 name_curpos -= shift_bytes;
1657 }
1658
1659 size = name_curpos - (char *)data;
1660 fill_note(note, "CORE", NT_FILE, size, data);
72023656 1661 return 0;
2aa362c4
DV
1662}
1663
4206d3aa
RM
1664#ifdef CORE_DUMP_USE_REGSET
1665#include <linux/regset.h>
1666
1667struct elf_thread_core_info {
1668 struct elf_thread_core_info *next;
1669 struct task_struct *task;
1670 struct elf_prstatus prstatus;
1671 struct memelfnote notes[0];
1672};
1673
1674struct elf_note_info {
1675 struct elf_thread_core_info *thread;
1676 struct memelfnote psinfo;
49ae4d4b 1677 struct memelfnote signote;
4206d3aa 1678 struct memelfnote auxv;
2aa362c4 1679 struct memelfnote files;
49ae4d4b 1680 user_siginfo_t csigdata;
4206d3aa
RM
1681 size_t size;
1682 int thread_notes;
1683};
1684
d31472b6
RM
1685/*
1686 * When a regset has a writeback hook, we call it on each thread before
1687 * dumping user memory. On register window machines, this makes sure the
1688 * user memory backing the register data is up to date before we read it.
1689 */
1690static void do_thread_regset_writeback(struct task_struct *task,
1691 const struct user_regset *regset)
1692{
1693 if (regset->writeback)
1694 regset->writeback(task, regset, 1);
1695}
1696
0953f65d 1697#ifndef PRSTATUS_SIZE
90954e7b 1698#define PRSTATUS_SIZE(S, R) sizeof(S)
0953f65d
L
1699#endif
1700
1701#ifndef SET_PR_FPVALID
90954e7b 1702#define SET_PR_FPVALID(S, V, R) ((S)->pr_fpvalid = (V))
0953f65d
L
1703#endif
1704
4206d3aa
RM
1705static int fill_thread_core_info(struct elf_thread_core_info *t,
1706 const struct user_regset_view *view,
1707 long signr, size_t *total)
1708{
1709 unsigned int i;
27e64b4b 1710 unsigned int regset0_size = regset_size(t->task, &view->regsets[0]);
4206d3aa
RM
1711
1712 /*
1713 * NT_PRSTATUS is the one special case, because the regset data
1714 * goes into the pr_reg field inside the note contents, rather
1715 * than being the whole note contents. We fill the reset in here.
1716 * We assume that regset 0 is NT_PRSTATUS.
1717 */
1718 fill_prstatus(&t->prstatus, t->task, signr);
27e64b4b 1719 (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset0_size,
90954e7b 1720 &t->prstatus.pr_reg, NULL);
4206d3aa
RM
1721
1722 fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
27e64b4b 1723 PRSTATUS_SIZE(t->prstatus, regset0_size), &t->prstatus);
4206d3aa
RM
1724 *total += notesize(&t->notes[0]);
1725
d31472b6
RM
1726 do_thread_regset_writeback(t->task, &view->regsets[0]);
1727
4206d3aa
RM
1728 /*
1729 * Each other regset might generate a note too. For each regset
1730 * that has no core_note_type or is inactive, we leave t->notes[i]
1731 * all zero and we'll know to skip writing it later.
1732 */
1733 for (i = 1; i < view->n; ++i) {
1734 const struct user_regset *regset = &view->regsets[i];
d31472b6 1735 do_thread_regset_writeback(t->task, regset);
c8e25258 1736 if (regset->core_note_type && regset->get &&
2f819db5 1737 (!regset->active || regset->active(t->task, regset) > 0)) {
4206d3aa 1738 int ret;
27e64b4b 1739 size_t size = regset_size(t->task, regset);
4206d3aa
RM
1740 void *data = kmalloc(size, GFP_KERNEL);
1741 if (unlikely(!data))
1742 return 0;
1743 ret = regset->get(t->task, regset,
1744 0, size, data, NULL);
1745 if (unlikely(ret))
1746 kfree(data);
1747 else {
1748 if (regset->core_note_type != NT_PRFPREG)
1749 fill_note(&t->notes[i], "LINUX",
1750 regset->core_note_type,
1751 size, data);
1752 else {
90954e7b 1753 SET_PR_FPVALID(&t->prstatus,
27e64b4b 1754 1, regset0_size);
4206d3aa
RM
1755 fill_note(&t->notes[i], "CORE",
1756 NT_PRFPREG, size, data);
1757 }
1758 *total += notesize(&t->notes[i]);
1759 }
1760 }
1761 }
1762
1763 return 1;
1764}
1765
1766static int fill_note_info(struct elfhdr *elf, int phdrs,
1767 struct elf_note_info *info,
ae7795bc 1768 const kernel_siginfo_t *siginfo, struct pt_regs *regs)
4206d3aa
RM
1769{
1770 struct task_struct *dump_task = current;
1771 const struct user_regset_view *view = task_user_regset_view(dump_task);
1772 struct elf_thread_core_info *t;
1773 struct elf_prpsinfo *psinfo;
83914441 1774 struct core_thread *ct;
4206d3aa
RM
1775 unsigned int i;
1776
1777 info->size = 0;
1778 info->thread = NULL;
1779
1780 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
6899e92d
AC
1781 if (psinfo == NULL) {
1782 info->psinfo.data = NULL; /* So we don't free this wrongly */
4206d3aa 1783 return 0;
6899e92d 1784 }
4206d3aa 1785
e2dbe125
AW
1786 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1787
4206d3aa
RM
1788 /*
1789 * Figure out how many notes we're going to need for each thread.
1790 */
1791 info->thread_notes = 0;
1792 for (i = 0; i < view->n; ++i)
1793 if (view->regsets[i].core_note_type != 0)
1794 ++info->thread_notes;
1795
1796 /*
1797 * Sanity check. We rely on regset 0 being in NT_PRSTATUS,
1798 * since it is our one special case.
1799 */
1800 if (unlikely(info->thread_notes == 0) ||
1801 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1802 WARN_ON(1);
1803 return 0;
1804 }
1805
1806 /*
1807 * Initialize the ELF file header.
1808 */
1809 fill_elf_header(elf, phdrs,
d3330cf0 1810 view->e_machine, view->e_flags);
4206d3aa
RM
1811
1812 /*
1813 * Allocate a structure for each thread.
1814 */
83914441
ON
1815 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1816 t = kzalloc(offsetof(struct elf_thread_core_info,
1817 notes[info->thread_notes]),
1818 GFP_KERNEL);
1819 if (unlikely(!t))
1820 return 0;
1821
1822 t->task = ct->task;
1823 if (ct->task == dump_task || !info->thread) {
1824 t->next = info->thread;
1825 info->thread = t;
1826 } else {
1827 /*
1828 * Make sure to keep the original task at
1829 * the head of the list.
1830 */
1831 t->next = info->thread->next;
1832 info->thread->next = t;
4206d3aa 1833 }
83914441 1834 }
4206d3aa
RM
1835
1836 /*
1837 * Now fill in each thread's information.
1838 */
1839 for (t = info->thread; t != NULL; t = t->next)
5ab1c309 1840 if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
4206d3aa
RM
1841 return 0;
1842
1843 /*
1844 * Fill in the two process-wide notes.
1845 */
1846 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1847 info->size += notesize(&info->psinfo);
1848
49ae4d4b
DV
1849 fill_siginfo_note(&info->signote, &info->csigdata, siginfo);
1850 info->size += notesize(&info->signote);
1851
4206d3aa
RM
1852 fill_auxv_note(&info->auxv, current->mm);
1853 info->size += notesize(&info->auxv);
1854
72023656
DA
1855 if (fill_files_note(&info->files) == 0)
1856 info->size += notesize(&info->files);
2aa362c4 1857
4206d3aa
RM
1858 return 1;
1859}
1860
1861static size_t get_note_info_size(struct elf_note_info *info)
1862{
1863 return info->size;
1864}
1865
1866/*
1867 * Write all the notes for each thread. When writing the first thread, the
1868 * process-wide notes are interleaved after the first thread-specific note.
1869 */
1870static int write_note_info(struct elf_note_info *info,
ecc8c772 1871 struct coredump_params *cprm)
4206d3aa 1872{
b219e25f 1873 bool first = true;
4206d3aa
RM
1874 struct elf_thread_core_info *t = info->thread;
1875
1876 do {
1877 int i;
1878
ecc8c772 1879 if (!writenote(&t->notes[0], cprm))
4206d3aa
RM
1880 return 0;
1881
ecc8c772 1882 if (first && !writenote(&info->psinfo, cprm))
4206d3aa 1883 return 0;
ecc8c772 1884 if (first && !writenote(&info->signote, cprm))
49ae4d4b 1885 return 0;
ecc8c772 1886 if (first && !writenote(&info->auxv, cprm))
4206d3aa 1887 return 0;
72023656 1888 if (first && info->files.data &&
ecc8c772 1889 !writenote(&info->files, cprm))
2aa362c4 1890 return 0;
4206d3aa
RM
1891
1892 for (i = 1; i < info->thread_notes; ++i)
1893 if (t->notes[i].data &&
ecc8c772 1894 !writenote(&t->notes[i], cprm))
4206d3aa
RM
1895 return 0;
1896
b219e25f 1897 first = false;
4206d3aa
RM
1898 t = t->next;
1899 } while (t);
1900
1901 return 1;
1902}
1903
1904static void free_note_info(struct elf_note_info *info)
1905{
1906 struct elf_thread_core_info *threads = info->thread;
1907 while (threads) {
1908 unsigned int i;
1909 struct elf_thread_core_info *t = threads;
1910 threads = t->next;
1911 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
1912 for (i = 1; i < info->thread_notes; ++i)
1913 kfree(t->notes[i].data);
1914 kfree(t);
1915 }
1916 kfree(info->psinfo.data);
86a2bb5a 1917 kvfree(info->files.data);
4206d3aa
RM
1918}
1919
1920#else
1921
1da177e4
LT
1922/* Here is the structure in which status of each thread is captured. */
1923struct elf_thread_status
1924{
1925 struct list_head list;
1926 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1927 elf_fpregset_t fpu; /* NT_PRFPREG */
1928 struct task_struct *thread;
1929#ifdef ELF_CORE_COPY_XFPREGS
5b20cd80 1930 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
1da177e4
LT
1931#endif
1932 struct memelfnote notes[3];
1933 int num_notes;
1934};
1935
1936/*
1937 * In order to add the specific thread information for the elf file format,
f4e5cc2c
JJ
1938 * we need to keep a linked list of every threads pr_status and then create
1939 * a single section for them in the final core file.
1da177e4
LT
1940 */
1941static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1942{
1943 int sz = 0;
1944 struct task_struct *p = t->thread;
1945 t->num_notes = 0;
1946
1947 fill_prstatus(&t->prstatus, p, signr);
1948 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1949
f4e5cc2c
JJ
1950 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1951 &(t->prstatus));
1da177e4
LT
1952 t->num_notes++;
1953 sz += notesize(&t->notes[0]);
1954
f4e5cc2c
JJ
1955 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1956 &t->fpu))) {
1957 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1958 &(t->fpu));
1da177e4
LT
1959 t->num_notes++;
1960 sz += notesize(&t->notes[1]);
1961 }
1962
1963#ifdef ELF_CORE_COPY_XFPREGS
1964 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
5b20cd80
MN
1965 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
1966 sizeof(t->xfpu), &t->xfpu);
1da177e4
LT
1967 t->num_notes++;
1968 sz += notesize(&t->notes[2]);
1969 }
1970#endif
1971 return sz;
1972}
1973
3aba481f
RM
1974struct elf_note_info {
1975 struct memelfnote *notes;
72023656 1976 struct memelfnote *notes_files;
3aba481f
RM
1977 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
1978 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1979 struct list_head thread_list;
1980 elf_fpregset_t *fpu;
1981#ifdef ELF_CORE_COPY_XFPREGS
1982 elf_fpxregset_t *xfpu;
1983#endif
49ae4d4b 1984 user_siginfo_t csigdata;
3aba481f
RM
1985 int thread_status_size;
1986 int numnote;
1987};
1988
0cf062d0 1989static int elf_note_info_init(struct elf_note_info *info)
3aba481f 1990{
0cf062d0 1991 memset(info, 0, sizeof(*info));
3aba481f
RM
1992 INIT_LIST_HEAD(&info->thread_list);
1993
49ae4d4b 1994 /* Allocate space for ELF notes */
6da2ec56 1995 info->notes = kmalloc_array(8, sizeof(struct memelfnote), GFP_KERNEL);
3aba481f
RM
1996 if (!info->notes)
1997 return 0;
1998 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
1999 if (!info->psinfo)
f34f9d18 2000 return 0;
3aba481f
RM
2001 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
2002 if (!info->prstatus)
f34f9d18 2003 return 0;
3aba481f
RM
2004 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
2005 if (!info->fpu)
f34f9d18 2006 return 0;
3aba481f
RM
2007#ifdef ELF_CORE_COPY_XFPREGS
2008 info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
2009 if (!info->xfpu)
f34f9d18 2010 return 0;
3aba481f 2011#endif
0cf062d0 2012 return 1;
0cf062d0
AW
2013}
2014
2015static int fill_note_info(struct elfhdr *elf, int phdrs,
2016 struct elf_note_info *info,
ae7795bc 2017 const kernel_siginfo_t *siginfo, struct pt_regs *regs)
0cf062d0 2018{
afabada9
AV
2019 struct core_thread *ct;
2020 struct elf_thread_status *ets;
0cf062d0
AW
2021
2022 if (!elf_note_info_init(info))
2023 return 0;
3aba481f 2024
afabada9
AV
2025 for (ct = current->mm->core_state->dumper.next;
2026 ct; ct = ct->next) {
2027 ets = kzalloc(sizeof(*ets), GFP_KERNEL);
2028 if (!ets)
2029 return 0;
83914441 2030
afabada9
AV
2031 ets->thread = ct->task;
2032 list_add(&ets->list, &info->thread_list);
2033 }
83914441 2034
93f044e2 2035 list_for_each_entry(ets, &info->thread_list, list) {
afabada9 2036 int sz;
3aba481f 2037
afabada9
AV
2038 sz = elf_dump_thread_status(siginfo->si_signo, ets);
2039 info->thread_status_size += sz;
3aba481f
RM
2040 }
2041 /* now collect the dump for the current */
2042 memset(info->prstatus, 0, sizeof(*info->prstatus));
5ab1c309 2043 fill_prstatus(info->prstatus, current, siginfo->si_signo);
3aba481f
RM
2044 elf_core_copy_regs(&info->prstatus->pr_reg, regs);
2045
2046 /* Set up header */
d3330cf0 2047 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
3aba481f
RM
2048
2049 /*
2050 * Set up the notes in similar form to SVR4 core dumps made
2051 * with info from their /proc.
2052 */
2053
2054 fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
2055 sizeof(*info->prstatus), info->prstatus);
2056 fill_psinfo(info->psinfo, current->group_leader, current->mm);
2057 fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
2058 sizeof(*info->psinfo), info->psinfo);
2059
2aa362c4
DV
2060 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
2061 fill_auxv_note(info->notes + 3, current->mm);
72023656 2062 info->numnote = 4;
3aba481f 2063
72023656
DA
2064 if (fill_files_note(info->notes + info->numnote) == 0) {
2065 info->notes_files = info->notes + info->numnote;
2066 info->numnote++;
2067 }
3aba481f
RM
2068
2069 /* Try to dump the FPU. */
2070 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
2071 info->fpu);
2072 if (info->prstatus->pr_fpvalid)
2073 fill_note(info->notes + info->numnote++,
2074 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
2075#ifdef ELF_CORE_COPY_XFPREGS
2076 if (elf_core_copy_task_xfpregs(current, info->xfpu))
2077 fill_note(info->notes + info->numnote++,
2078 "LINUX", ELF_CORE_XFPREG_TYPE,
2079 sizeof(*info->xfpu), info->xfpu);
2080#endif
2081
2082 return 1;
3aba481f
RM
2083}
2084
2085static size_t get_note_info_size(struct elf_note_info *info)
2086{
2087 int sz = 0;
2088 int i;
2089
2090 for (i = 0; i < info->numnote; i++)
2091 sz += notesize(info->notes + i);
2092
2093 sz += info->thread_status_size;
2094
2095 return sz;
2096}
2097
2098static int write_note_info(struct elf_note_info *info,
ecc8c772 2099 struct coredump_params *cprm)
3aba481f 2100{
93f044e2 2101 struct elf_thread_status *ets;
3aba481f 2102 int i;
3aba481f
RM
2103
2104 for (i = 0; i < info->numnote; i++)
ecc8c772 2105 if (!writenote(info->notes + i, cprm))
3aba481f
RM
2106 return 0;
2107
2108 /* write out the thread status notes section */
93f044e2
AD
2109 list_for_each_entry(ets, &info->thread_list, list) {
2110 for (i = 0; i < ets->num_notes; i++)
2111 if (!writenote(&ets->notes[i], cprm))
3aba481f
RM
2112 return 0;
2113 }
2114
2115 return 1;
2116}
2117
2118static void free_note_info(struct elf_note_info *info)
2119{
2120 while (!list_empty(&info->thread_list)) {
2121 struct list_head *tmp = info->thread_list.next;
2122 list_del(tmp);
2123 kfree(list_entry(tmp, struct elf_thread_status, list));
2124 }
2125
72023656
DA
2126 /* Free data possibly allocated by fill_files_note(): */
2127 if (info->notes_files)
86a2bb5a 2128 kvfree(info->notes_files->data);
2aa362c4 2129
3aba481f
RM
2130 kfree(info->prstatus);
2131 kfree(info->psinfo);
2132 kfree(info->notes);
2133 kfree(info->fpu);
2134#ifdef ELF_CORE_COPY_XFPREGS
2135 kfree(info->xfpu);
2136#endif
2137}
2138
4206d3aa
RM
2139#endif
2140
f47aef55
RM
2141static struct vm_area_struct *first_vma(struct task_struct *tsk,
2142 struct vm_area_struct *gate_vma)
2143{
2144 struct vm_area_struct *ret = tsk->mm->mmap;
2145
2146 if (ret)
2147 return ret;
2148 return gate_vma;
2149}
2150/*
2151 * Helper function for iterating across a vma list. It ensures that the caller
2152 * will visit `gate_vma' prior to terminating the search.
2153 */
2154static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
2155 struct vm_area_struct *gate_vma)
2156{
2157 struct vm_area_struct *ret;
2158
2159 ret = this_vma->vm_next;
2160 if (ret)
2161 return ret;
2162 if (this_vma == gate_vma)
2163 return NULL;
2164 return gate_vma;
2165}
2166
8d9032bb
DH
2167static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
2168 elf_addr_t e_shoff, int segs)
2169{
2170 elf->e_shoff = e_shoff;
2171 elf->e_shentsize = sizeof(*shdr4extnum);
2172 elf->e_shnum = 1;
2173 elf->e_shstrndx = SHN_UNDEF;
2174
2175 memset(shdr4extnum, 0, sizeof(*shdr4extnum));
2176
2177 shdr4extnum->sh_type = SHT_NULL;
2178 shdr4extnum->sh_size = elf->e_shnum;
2179 shdr4extnum->sh_link = elf->e_shstrndx;
2180 shdr4extnum->sh_info = segs;
2181}
2182
1da177e4
LT
2183/*
2184 * Actual dumper
2185 *
2186 * This is a two-pass process; first we find the offsets of the bits,
2187 * and then they are actually written out. If we run out of core limit
2188 * we just truncate.
2189 */
f6151dfe 2190static int elf_core_dump(struct coredump_params *cprm)
1da177e4 2191{
1da177e4
LT
2192 int has_dumped = 0;
2193 mm_segment_t fs;
52f5592e
JL
2194 int segs, i;
2195 size_t vma_data_size = 0;
f47aef55 2196 struct vm_area_struct *vma, *gate_vma;
1da177e4 2197 struct elfhdr *elf = NULL;
cdc3d562 2198 loff_t offset = 0, dataoff;
72023656 2199 struct elf_note_info info = { };
93eb211e 2200 struct elf_phdr *phdr4note = NULL;
8d9032bb
DH
2201 struct elf_shdr *shdr4extnum = NULL;
2202 Elf_Half e_phnum;
2203 elf_addr_t e_shoff;
52f5592e 2204 elf_addr_t *vma_filesz = NULL;
1da177e4
LT
2205
2206 /*
2207 * We no longer stop all VM operations.
2208 *
f4e5cc2c
JJ
2209 * This is because those proceses that could possibly change map_count
2210 * or the mmap / vma pages are now blocked in do_exit on current
2211 * finishing this core dump.
1da177e4
LT
2212 *
2213 * Only ptrace can touch these memory addresses, but it doesn't change
f4e5cc2c 2214 * the map_count or the pages allocated. So no possibility of crashing
1da177e4
LT
2215 * exists while dumping the mm->vm_next areas to the core file.
2216 */
2217
2218 /* alloc memory for large data structures: too large to be on stack */
2219 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
2220 if (!elf)
5f719558 2221 goto out;
341c87bf
KH
2222 /*
2223 * The number of segs are recored into ELF header as 16bit value.
2224 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
2225 */
1da177e4 2226 segs = current->mm->map_count;
1fcccbac 2227 segs += elf_core_extra_phdrs();
1da177e4 2228
31db58b3 2229 gate_vma = get_gate_vma(current->mm);
f47aef55
RM
2230 if (gate_vma != NULL)
2231 segs++;
2232
8d9032bb
DH
2233 /* for notes section */
2234 segs++;
2235
2236 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
2237 * this, kernel supports extended numbering. Have a look at
2238 * include/linux/elf.h for further information. */
2239 e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
2240
1da177e4 2241 /*
3aba481f
RM
2242 * Collect all the non-memory information about the process for the
2243 * notes. This also sets up the file header.
1da177e4 2244 */
5ab1c309 2245 if (!fill_note_info(elf, e_phnum, &info, cprm->siginfo, cprm->regs))
3aba481f 2246 goto cleanup;
1da177e4 2247
3aba481f 2248 has_dumped = 1;
079148b9 2249
1da177e4
LT
2250 fs = get_fs();
2251 set_fs(KERNEL_DS);
2252
1da177e4 2253 offset += sizeof(*elf); /* Elf header */
8d9032bb 2254 offset += segs * sizeof(struct elf_phdr); /* Program headers */
1da177e4
LT
2255
2256 /* Write notes phdr entry */
2257 {
3aba481f 2258 size_t sz = get_note_info_size(&info);
1da177e4 2259
e5501492 2260 sz += elf_coredump_extra_notes_size();
bf1ab978 2261
93eb211e
DH
2262 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
2263 if (!phdr4note)
088e7af7 2264 goto end_coredump;
93eb211e
DH
2265
2266 fill_elf_note_phdr(phdr4note, sz, offset);
2267 offset += sz;
1da177e4
LT
2268 }
2269
1da177e4
LT
2270 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2271
30f74aa0
JB
2272 if (segs - 1 > ULONG_MAX / sizeof(*vma_filesz))
2273 goto end_coredump;
86a2bb5a
AD
2274 vma_filesz = kvmalloc(array_size(sizeof(*vma_filesz), (segs - 1)),
2275 GFP_KERNEL);
2276 if (ZERO_OR_NULL_PTR(vma_filesz))
52f5592e
JL
2277 goto end_coredump;
2278
2279 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2280 vma = next_vma(vma, gate_vma)) {
2281 unsigned long dump_size;
2282
2283 dump_size = vma_dump_size(vma, cprm->mm_flags);
2284 vma_filesz[i++] = dump_size;
2285 vma_data_size += dump_size;
2286 }
2287
2288 offset += vma_data_size;
8d9032bb
DH
2289 offset += elf_core_extra_data_size();
2290 e_shoff = offset;
2291
2292 if (e_phnum == PN_XNUM) {
2293 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
2294 if (!shdr4extnum)
2295 goto end_coredump;
2296 fill_extnum_info(elf, shdr4extnum, e_shoff, segs);
2297 }
2298
2299 offset = dataoff;
2300
ecc8c772 2301 if (!dump_emit(cprm, elf, sizeof(*elf)))
93eb211e
DH
2302 goto end_coredump;
2303
ecc8c772 2304 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
93eb211e
DH
2305 goto end_coredump;
2306
1da177e4 2307 /* Write program headers for segments dump */
52f5592e 2308 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
f47aef55 2309 vma = next_vma(vma, gate_vma)) {
1da177e4 2310 struct elf_phdr phdr;
1da177e4
LT
2311
2312 phdr.p_type = PT_LOAD;
2313 phdr.p_offset = offset;
2314 phdr.p_vaddr = vma->vm_start;
2315 phdr.p_paddr = 0;
52f5592e 2316 phdr.p_filesz = vma_filesz[i++];
82df3973 2317 phdr.p_memsz = vma->vm_end - vma->vm_start;
1da177e4
LT
2318 offset += phdr.p_filesz;
2319 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
f4e5cc2c
JJ
2320 if (vma->vm_flags & VM_WRITE)
2321 phdr.p_flags |= PF_W;
2322 if (vma->vm_flags & VM_EXEC)
2323 phdr.p_flags |= PF_X;
1da177e4
LT
2324 phdr.p_align = ELF_EXEC_PAGESIZE;
2325
ecc8c772 2326 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
088e7af7 2327 goto end_coredump;
1da177e4
LT
2328 }
2329
506f21c5 2330 if (!elf_core_write_extra_phdrs(cprm, offset))
1fcccbac 2331 goto end_coredump;
1da177e4
LT
2332
2333 /* write out the notes section */
ecc8c772 2334 if (!write_note_info(&info, cprm))
3aba481f 2335 goto end_coredump;
1da177e4 2336
cdc3d562 2337 if (elf_coredump_extra_notes_write(cprm))
e5501492 2338 goto end_coredump;
bf1ab978 2339
d025c9db 2340 /* Align to page */
1607f09c 2341 if (!dump_skip(cprm, dataoff - cprm->pos))
f3e8fccd 2342 goto end_coredump;
1da177e4 2343
52f5592e 2344 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
f47aef55 2345 vma = next_vma(vma, gate_vma)) {
1da177e4 2346 unsigned long addr;
82df3973 2347 unsigned long end;
1da177e4 2348
52f5592e 2349 end = vma->vm_start + vma_filesz[i++];
1da177e4 2350
82df3973 2351 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
f4e5cc2c 2352 struct page *page;
f3e8fccd
HD
2353 int stop;
2354
2355 page = get_dump_page(addr);
2356 if (page) {
2357 void *kaddr = kmap(page);
13046ece 2358 stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
f3e8fccd 2359 kunmap(page);
09cbfeaf 2360 put_page(page);
f3e8fccd 2361 } else
9b56d543 2362 stop = !dump_skip(cprm, PAGE_SIZE);
f3e8fccd
HD
2363 if (stop)
2364 goto end_coredump;
1da177e4
LT
2365 }
2366 }
4d22c75d 2367 dump_truncate(cprm);
1da177e4 2368
aa3e7eaf 2369 if (!elf_core_write_extra_data(cprm))
1fcccbac 2370 goto end_coredump;
1da177e4 2371
8d9032bb 2372 if (e_phnum == PN_XNUM) {
13046ece 2373 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
8d9032bb
DH
2374 goto end_coredump;
2375 }
2376
1da177e4
LT
2377end_coredump:
2378 set_fs(fs);
2379
2380cleanup:
3aba481f 2381 free_note_info(&info);
8d9032bb 2382 kfree(shdr4extnum);
86a2bb5a 2383 kvfree(vma_filesz);
93eb211e 2384 kfree(phdr4note);
5f719558
WC
2385 kfree(elf);
2386out:
1da177e4 2387 return has_dumped;
1da177e4
LT
2388}
2389
698ba7b5 2390#endif /* CONFIG_ELF_CORE */
1da177e4
LT
2391
2392static int __init init_elf_binfmt(void)
2393{
8fc3dc5a
AV
2394 register_binfmt(&elf_format);
2395 return 0;
1da177e4
LT
2396}
2397
2398static void __exit exit_elf_binfmt(void)
2399{
2400 /* Remove the COFF and ELF loaders. */
2401 unregister_binfmt(&elf_format);
2402}
2403
2404core_initcall(init_elf_binfmt);
2405module_exit(exit_elf_binfmt);
2406MODULE_LICENSE("GPL");