Merge branch 'core-objtool-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / arch / mips / kernel / vpe.c
CommitLineData
e01402b1 1/*
5792bf64
SH
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
e01402b1 5 *
5792bf64
SH
6 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
7 * Copyright (C) 2013 Imagination Technologies Ltd.
e01402b1 8 *
5792bf64
SH
9 * VPE spport module for loading a MIPS SP program into VPE1. The SP
10 * environment is rather simple since there are no TLBs. It needs
11 * to be relocatable (or partiall linked). Initialize your stack in
12 * the startup-code. The loader looks for the symbol __start and sets
13 * up the execution to resume from there. To load and run, simply do
14 * a cat SP 'binary' to the /dev/vpe1 device.
e01402b1 15 */
e01402b1 16#include <linux/kernel.h>
27a3bbaf 17#include <linux/device.h>
e01402b1
RB
18#include <linux/fs.h>
19#include <linux/init.h>
e01402b1
RB
20#include <linux/slab.h>
21#include <linux/list.h>
22#include <linux/vmalloc.h>
23#include <linux/elf.h>
24#include <linux/seq_file.h>
25#include <linux/syscalls.h>
26#include <linux/moduleloader.h>
27#include <linux/interrupt.h>
28#include <linux/poll.h>
57c8a661 29#include <linux/memblock.h>
e01402b1 30#include <asm/mipsregs.h>
340ee4b9 31#include <asm/mipsmtregs.h>
e01402b1 32#include <asm/cacheflush.h>
60063497 33#include <linux/atomic.h>
27a3bbaf 34#include <asm/mips_mt.h>
e01402b1 35#include <asm/processor.h>
2600990e 36#include <asm/vpe.h>
e01402b1 37
e01402b1
RB
38#ifndef ARCH_SHF_SMALL
39#define ARCH_SHF_SMALL 0
40#endif
41
42/* If this is set, the section belongs in the init part of the module */
43#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
44
1a2a6d7e 45struct vpe_control vpecontrol = {
52bd080d 46 .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock),
1bbfc20d 47 .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
52bd080d 48 .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock),
1bbfc20d 49 .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
9cfdf6f1 50};
e01402b1 51
e01402b1 52/* get the vpe associated with this minor */
1a2a6d7e 53struct vpe *get_vpe(int minor)
e01402b1 54{
1bbfc20d 55 struct vpe *res, *v;
e01402b1 56
2600990e
RB
57 if (!cpu_has_mipsmt)
58 return NULL;
59
1bbfc20d
RB
60 res = NULL;
61 spin_lock(&vpecontrol.vpe_list_lock);
e01402b1 62 list_for_each_entry(v, &vpecontrol.vpe_list, list) {
1a2a6d7e 63 if (v->minor == VPE_MODULE_MINOR) {
1bbfc20d
RB
64 res = v;
65 break;
66 }
e01402b1 67 }
1bbfc20d 68 spin_unlock(&vpecontrol.vpe_list_lock);
e01402b1 69
1bbfc20d 70 return res;
e01402b1
RB
71}
72
73/* get the vpe associated with this minor */
1a2a6d7e 74struct tc *get_tc(int index)
e01402b1 75{
1bbfc20d 76 struct tc *res, *t;
e01402b1 77
1bbfc20d
RB
78 res = NULL;
79 spin_lock(&vpecontrol.tc_list_lock);
e01402b1 80 list_for_each_entry(t, &vpecontrol.tc_list, list) {
1bbfc20d
RB
81 if (t->index == index) {
82 res = t;
83 break;
84 }
e01402b1 85 }
1bbfc20d 86 spin_unlock(&vpecontrol.tc_list_lock);
e01402b1 87
9fbcbd7e 88 return res;
e01402b1
RB
89}
90
e01402b1 91/* allocate a vpe and associate it with this minor (or index) */
1a2a6d7e 92struct vpe *alloc_vpe(int minor)
e01402b1
RB
93{
94 struct vpe *v;
95
5792bf64
SH
96 v = kzalloc(sizeof(struct vpe), GFP_KERNEL);
97 if (v == NULL)
98 goto out;
e01402b1 99
e01402b1 100 INIT_LIST_HEAD(&v->tc);
1bbfc20d 101 spin_lock(&vpecontrol.vpe_list_lock);
e01402b1 102 list_add_tail(&v->list, &vpecontrol.vpe_list);
1bbfc20d 103 spin_unlock(&vpecontrol.vpe_list_lock);
e01402b1 104
2600990e 105 INIT_LIST_HEAD(&v->notify);
1a2a6d7e 106 v->minor = VPE_MODULE_MINOR;
1bbfc20d 107
5792bf64 108out:
e01402b1
RB
109 return v;
110}
111
112/* allocate a tc. At startup only tc0 is running, all other can be halted. */
1a2a6d7e 113struct tc *alloc_tc(int index)
e01402b1 114{
07cc0c9e 115 struct tc *tc;
e01402b1 116
5792bf64
SH
117 tc = kzalloc(sizeof(struct tc), GFP_KERNEL);
118 if (tc == NULL)
07cc0c9e 119 goto out;
e01402b1 120
07cc0c9e
RB
121 INIT_LIST_HEAD(&tc->tc);
122 tc->index = index;
1bbfc20d
RB
123
124 spin_lock(&vpecontrol.tc_list_lock);
07cc0c9e 125 list_add_tail(&tc->list, &vpecontrol.tc_list);
1bbfc20d 126 spin_unlock(&vpecontrol.tc_list_lock);
e01402b1 127
07cc0c9e
RB
128out:
129 return tc;
e01402b1
RB
130}
131
132/* clean up and free everything */
1a2a6d7e 133void release_vpe(struct vpe *v)
e01402b1
RB
134{
135 list_del(&v->list);
136 if (v->load_addr)
bef8e2df 137 release_progmem(v->load_addr);
e01402b1
RB
138 kfree(v);
139}
140
5792bf64 141/* Find some VPE program space */
1a2a6d7e 142void *alloc_progmem(unsigned long len)
e01402b1 143{
5408c490
RB
144 void *addr;
145
e01402b1 146#ifdef CONFIG_MIPS_VPE_LOADER_TOM
5408c490
RB
147 /*
148 * This means you must tell Linux to use less memory than you
149 * physically have, for example by passing a mem= boot argument.
150 */
9f2546ad 151 addr = pfn_to_kaddr(max_low_pfn);
5408c490 152 memset(addr, 0, len);
e01402b1 153#else
5408c490
RB
154 /* simple grab some mem for now */
155 addr = kzalloc(len, GFP_KERNEL);
e01402b1 156#endif
5408c490
RB
157
158 return addr;
e01402b1
RB
159}
160
1a2a6d7e 161void release_progmem(void *ptr)
e01402b1
RB
162{
163#ifndef CONFIG_MIPS_VPE_LOADER_TOM
164 kfree(ptr);
165#endif
166}
167
168/* Update size with this section: return offset. */
5792bf64 169static long get_offset(unsigned long *size, Elf_Shdr *sechdr)
e01402b1
RB
170{
171 long ret;
172
173 ret = ALIGN(*size, sechdr->sh_addralign ? : 1);
174 *size = ret + sechdr->sh_size;
175 return ret;
176}
177
178/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
70342287 179 might -- code, read-only data, read-write data, small data. Tally
e01402b1
RB
180 sizes, and place the offsets into sh_entsize fields: high bit means it
181 belongs in init. */
5792bf64
SH
182static void layout_sections(struct module *mod, const Elf_Ehdr *hdr,
183 Elf_Shdr *sechdrs, const char *secstrings)
e01402b1
RB
184{
185 static unsigned long const masks[][2] = {
186 /* NOTE: all executable code must be the first section
187 * in this array; otherwise modify the text_size
188 * finder in the two loops below */
189 {SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL},
190 {SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL},
191 {SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL},
192 {ARCH_SHF_SMALL | SHF_ALLOC, 0}
193 };
194 unsigned int m, i;
195
196 for (i = 0; i < hdr->e_shnum; i++)
197 sechdrs[i].sh_entsize = ~0UL;
198
199 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
200 for (i = 0; i < hdr->e_shnum; ++i) {
201 Elf_Shdr *s = &sechdrs[i];
202
e01402b1
RB
203 if ((s->sh_flags & masks[m][0]) != masks[m][0]
204 || (s->sh_flags & masks[m][1])
205 || s->sh_entsize != ~0UL)
206 continue;
e2a9cf96 207 s->sh_entsize =
7523e4dc 208 get_offset((unsigned long *)&mod->core_layout.size, s);
e01402b1
RB
209 }
210
211 if (m == 0)
7523e4dc 212 mod->core_layout.text_size = mod->core_layout.size;
e01402b1
RB
213
214 }
215}
216
e01402b1
RB
217/* from module-elf32.c, but subverted a little */
218
219struct mips_hi16 {
220 struct mips_hi16 *next;
221 Elf32_Addr *addr;
222 Elf32_Addr value;
223};
224
225static struct mips_hi16 *mips_hi16_list;
226static unsigned int gp_offs, gp_addr;
227
228static int apply_r_mips_none(struct module *me, uint32_t *location,
229 Elf32_Addr v)
230{
231 return 0;
232}
233
234static int apply_r_mips_gprel16(struct module *me, uint32_t *location,
235 Elf32_Addr v)
236{
237 int rel;
238
5792bf64 239 if (!(*location & 0xffff)) {
e01402b1 240 rel = (int)v - gp_addr;
5792bf64 241 } else {
e01402b1
RB
242 /* .sbss + gp(relative) + offset */
243 /* kludge! */
244 rel = (int)(short)((int)v + gp_offs +
245 (int)(short)(*location & 0xffff) - gp_addr);
246 }
247
5792bf64
SH
248 if ((rel > 32768) || (rel < -32768)) {
249 pr_debug("VPE loader: apply_r_mips_gprel16: relative address 0x%x out of range of gp register\n",
250 rel);
e01402b1
RB
251 return -ENOEXEC;
252 }
253
254 *location = (*location & 0xffff0000) | (rel & 0xffff);
255
256 return 0;
257}
258
259static int apply_r_mips_pc16(struct module *me, uint32_t *location,
260 Elf32_Addr v)
261{
262 int rel;
263 rel = (((unsigned int)v - (unsigned int)location));
5792bf64
SH
264 rel >>= 2; /* because the offset is in _instructions_ not bytes. */
265 rel -= 1; /* and one instruction less due to the branch delay slot. */
e01402b1 266
5792bf64
SH
267 if ((rel > 32768) || (rel < -32768)) {
268 pr_debug("VPE loader: apply_r_mips_pc16: relative address out of range 0x%x\n",
269 rel);
e01402b1
RB
270 return -ENOEXEC;
271 }
272
273 *location = (*location & 0xffff0000) | (rel & 0xffff);
274
275 return 0;
276}
277
278static int apply_r_mips_32(struct module *me, uint32_t *location,
279 Elf32_Addr v)
280{
281 *location += v;
282
283 return 0;
284}
285
286static int apply_r_mips_26(struct module *me, uint32_t *location,
287 Elf32_Addr v)
288{
289 if (v % 4) {
5792bf64 290 pr_debug("VPE loader: apply_r_mips_26: unaligned relocation\n");
e01402b1
RB
291 return -ENOEXEC;
292 }
293
307bd284
RB
294/*
295 * Not desperately convinced this is a good check of an overflow condition
296 * anyway. But it gets in the way of handling undefined weak symbols which
297 * we want to set to zero.
298 * if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
299 * printk(KERN_ERR
300 * "module %s: relocation overflow\n",
301 * me->name);
302 * return -ENOEXEC;
303 * }
304 */
e01402b1
RB
305
306 *location = (*location & ~0x03ffffff) |
307 ((*location + (v >> 2)) & 0x03ffffff);
308 return 0;
309}
310
311static int apply_r_mips_hi16(struct module *me, uint32_t *location,
312 Elf32_Addr v)
313{
314 struct mips_hi16 *n;
315
316 /*
317 * We cannot relocate this one now because we don't know the value of
318 * the carry we need to add. Save the information, and let LO16 do the
319 * actual relocation.
320 */
5792bf64 321 n = kmalloc(sizeof(*n), GFP_KERNEL);
e01402b1
RB
322 if (!n)
323 return -ENOMEM;
324
325 n->addr = location;
326 n->value = v;
327 n->next = mips_hi16_list;
328 mips_hi16_list = n;
329
330 return 0;
331}
332
333static int apply_r_mips_lo16(struct module *me, uint32_t *location,
334 Elf32_Addr v)
335{
336 unsigned long insnlo = *location;
337 Elf32_Addr val, vallo;
477c4b07 338 struct mips_hi16 *l, *next;
e01402b1 339
70342287 340 /* Sign extend the addend we extract from the lo insn. */
e01402b1
RB
341 vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
342
343 if (mips_hi16_list != NULL) {
e01402b1
RB
344
345 l = mips_hi16_list;
346 while (l != NULL) {
e01402b1
RB
347 unsigned long insn;
348
349 /*
350 * The value for the HI16 had best be the same.
351 */
70342287 352 if (v != l->value) {
5792bf64 353 pr_debug("VPE loader: apply_r_mips_lo16/hi16: inconsistent value information\n");
477c4b07 354 goto out_free;
e01402b1
RB
355 }
356
e01402b1
RB
357 /*
358 * Do the HI16 relocation. Note that we actually don't
359 * need to know anything about the LO16 itself, except
360 * where to find the low 16 bits of the addend needed
361 * by the LO16.
362 */
363 insn = *l->addr;
364 val = ((insn & 0xffff) << 16) + vallo;
365 val += v;
366
367 /*
368 * Account for the sign extension that will happen in
369 * the low bits.
370 */
371 val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
372
373 insn = (insn & ~0xffff) | val;
374 *l->addr = insn;
375
376 next = l->next;
377 kfree(l);
378 l = next;
379 }
380
381 mips_hi16_list = NULL;
382 }
383
384 /*
70342287 385 * Ok, we're done with the HI16 relocs. Now deal with the LO16.
e01402b1
RB
386 */
387 val = v + vallo;
388 insnlo = (insnlo & ~0xffff) | (val & 0xffff);
389 *location = insnlo;
390
391 return 0;
477c4b07
RB
392
393out_free:
394 while (l != NULL) {
395 next = l->next;
396 kfree(l);
397 l = next;
398 }
399 mips_hi16_list = NULL;
400
401 return -ENOEXEC;
e01402b1
RB
402}
403
404static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
405 Elf32_Addr v) = {
406 [R_MIPS_NONE] = apply_r_mips_none,
407 [R_MIPS_32] = apply_r_mips_32,
408 [R_MIPS_26] = apply_r_mips_26,
409 [R_MIPS_HI16] = apply_r_mips_hi16,
410 [R_MIPS_LO16] = apply_r_mips_lo16,
411 [R_MIPS_GPREL16] = apply_r_mips_gprel16,
412 [R_MIPS_PC16] = apply_r_mips_pc16
413};
414
2600990e 415static char *rstrs[] = {
e0daad44 416 [R_MIPS_NONE] = "MIPS_NONE",
2600990e
RB
417 [R_MIPS_32] = "MIPS_32",
418 [R_MIPS_26] = "MIPS_26",
419 [R_MIPS_HI16] = "MIPS_HI16",
420 [R_MIPS_LO16] = "MIPS_LO16",
421 [R_MIPS_GPREL16] = "MIPS_GPREL16",
422 [R_MIPS_PC16] = "MIPS_PC16"
423};
e01402b1 424
f18b51cc 425static int apply_relocations(Elf32_Shdr *sechdrs,
e01402b1
RB
426 const char *strtab,
427 unsigned int symindex,
428 unsigned int relsec,
429 struct module *me)
430{
431 Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr;
432 Elf32_Sym *sym;
433 uint32_t *location;
434 unsigned int i;
435 Elf32_Addr v;
436 int res;
437
438 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
439 Elf32_Word r_info = rel[i].r_info;
440
441 /* This is where to make the change */
442 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
443 + rel[i].r_offset;
444 /* This is the symbol it is referring to */
445 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
446 + ELF32_R_SYM(r_info);
447
448 if (!sym->st_value) {
5792bf64
SH
449 pr_debug("%s: undefined weak symbol %s\n",
450 me->name, strtab + sym->st_name);
e01402b1
RB
451 /* just print the warning, dont barf */
452 }
453
454 v = sym->st_value;
455
456 res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
5792bf64 457 if (res) {
2600990e 458 char *r = rstrs[ELF32_R_TYPE(r_info)];
5792bf64
SH
459 pr_warn("VPE loader: .text+0x%x relocation type %s for symbol \"%s\" failed\n",
460 rel[i].r_offset, r ? r : "UNKNOWN",
461 strtab + sym->st_name);
e01402b1 462 return res;
2600990e 463 }
e01402b1
RB
464 }
465
466 return 0;
467}
468
f18b51cc 469static inline void save_gp_address(unsigned int secbase, unsigned int rel)
e01402b1
RB
470{
471 gp_addr = secbase + rel;
472 gp_offs = gp_addr - (secbase & 0xffff0000);
473}
474/* end module-elf32.c */
475
e01402b1 476/* Change all symbols so that sh_value encodes the pointer directly. */
5792bf64 477static void simplify_symbols(Elf_Shdr *sechdrs,
e01402b1
RB
478 unsigned int symindex,
479 const char *strtab,
480 const char *secstrings,
481 unsigned int nsecs, struct module *mod)
482{
483 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
484 unsigned long secbase, bssbase = 0;
485 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
2600990e 486 int size;
e01402b1
RB
487
488 /* find the .bss section for COMMON symbols */
489 for (i = 0; i < nsecs; i++) {
2600990e 490 if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) {
e01402b1 491 bssbase = sechdrs[i].sh_addr;
2600990e
RB
492 break;
493 }
e01402b1
RB
494 }
495
496 for (i = 1; i < n; i++) {
497 switch (sym[i].st_shndx) {
498 case SHN_COMMON:
2600990e
RB
499 /* Allocate space for the symbol in the .bss section.
500 st_value is currently size.
e01402b1
RB
501 We want it to have the address of the symbol. */
502
503 size = sym[i].st_value;
504 sym[i].st_value = bssbase;
505
506 bssbase += size;
507 break;
508
509 case SHN_ABS:
510 /* Don't need to do anything */
511 break;
512
513 case SHN_UNDEF:
514 /* ret = -ENOENT; */
515 break;
516
517 case SHN_MIPS_SCOMMON:
5792bf64
SH
518 pr_debug("simplify_symbols: ignoring SHN_MIPS_SCOMMON symbol <%s> st_shndx %d\n",
519 strtab + sym[i].st_name, sym[i].st_shndx);
520 /* .sbss section */
e01402b1
RB
521 break;
522
523 default:
524 secbase = sechdrs[sym[i].st_shndx].sh_addr;
525
5792bf64 526 if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0)
e01402b1 527 save_gp_address(secbase, sym[i].st_value);
e01402b1
RB
528
529 sym[i].st_value += secbase;
530 break;
531 }
e01402b1 532 }
e01402b1
RB
533}
534
535#ifdef DEBUG_ELFLOADER
5792bf64 536static void dump_elfsymbols(Elf_Shdr *sechdrs, unsigned int symindex,
e01402b1
RB
537 const char *strtab, struct module *mod)
538{
539 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
540 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
541
5792bf64 542 pr_debug("dump_elfsymbols: n %d\n", n);
e01402b1 543 for (i = 1; i < n; i++) {
5792bf64
SH
544 pr_debug(" i %d name <%s> 0x%x\n", i, strtab + sym[i].st_name,
545 sym[i].st_value);
e01402b1
RB
546 }
547}
548#endif
549
5792bf64 550static int find_vpe_symbols(struct vpe *v, Elf_Shdr *sechdrs,
e01402b1
RB
551 unsigned int symindex, const char *strtab,
552 struct module *mod)
553{
554 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
555 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
556
557 for (i = 1; i < n; i++) {
5792bf64 558 if (strcmp(strtab + sym[i].st_name, "__start") == 0)
e01402b1 559 v->__start = sym[i].st_value;
e01402b1 560
5792bf64 561 if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0)
e01402b1 562 v->shared_ptr = (void *)sym[i].st_value;
e01402b1
RB
563 }
564
5792bf64 565 if ((v->__start == 0) || (v->shared_ptr == NULL))
2600990e
RB
566 return -1;
567
e01402b1
RB
568 return 0;
569}
570
307bd284 571/*
2600990e
RB
572 * Allocates a VPE with some program code space(the load address), copies the
573 * contents of the program (p)buffer performing relocatations/etc, free's it
574 * when finished.
575 */
5792bf64 576static int vpe_elfload(struct vpe *v)
e01402b1
RB
577{
578 Elf_Ehdr *hdr;
579 Elf_Shdr *sechdrs;
580 long err = 0;
581 char *secstrings, *strtab = NULL;
2600990e 582 unsigned int len, i, symindex = 0, strindex = 0, relocate = 0;
5792bf64 583 struct module mod; /* so we can re-use the relocations code */
e01402b1
RB
584
585 memset(&mod, 0, sizeof(struct module));
2600990e 586 strcpy(mod.name, "VPE loader");
e01402b1
RB
587
588 hdr = (Elf_Ehdr *) v->pbuffer;
589 len = v->plen;
590
591 /* Sanity checks against insmoding binaries or wrong arch,
592 weird elf version */
d303f4a1 593 if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
2600990e
RB
594 || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC)
595 || !elf_check_arch(hdr)
e01402b1 596 || hdr->e_shentsize != sizeof(*sechdrs)) {
5792bf64 597 pr_warn("VPE loader: program wrong arch or weird elf version\n");
e01402b1
RB
598
599 return -ENOEXEC;
600 }
601
2600990e
RB
602 if (hdr->e_type == ET_REL)
603 relocate = 1;
604
e01402b1 605 if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
5792bf64 606 pr_err("VPE loader: program length %u truncated\n", len);
2600990e 607
e01402b1
RB
608 return -ENOEXEC;
609 }
610
611 /* Convenience variables */
612 sechdrs = (void *)hdr + hdr->e_shoff;
613 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
614 sechdrs[0].sh_addr = 0;
615
616 /* And these should exist, but gcc whinges if we don't init them */
617 symindex = strindex = 0;
618
2600990e
RB
619 if (relocate) {
620 for (i = 1; i < hdr->e_shnum; i++) {
5792bf64
SH
621 if ((sechdrs[i].sh_type != SHT_NOBITS) &&
622 (len < sechdrs[i].sh_offset + sechdrs[i].sh_size)) {
623 pr_err("VPE program length %u truncated\n",
2600990e
RB
624 len);
625 return -ENOEXEC;
626 }
e01402b1 627
2600990e
RB
628 /* Mark all sections sh_addr with their address in the
629 temporary image. */
5792bf64
SH
630 sechdrs[i].sh_addr = (size_t) hdr +
631 sechdrs[i].sh_offset;
e01402b1 632
2600990e
RB
633 /* Internal symbols and strings. */
634 if (sechdrs[i].sh_type == SHT_SYMTAB) {
635 symindex = i;
636 strindex = sechdrs[i].sh_link;
5792bf64
SH
637 strtab = (char *)hdr +
638 sechdrs[strindex].sh_offset;
2600990e 639 }
e01402b1 640 }
2600990e 641 layout_sections(&mod, hdr, sechdrs, secstrings);
e01402b1
RB
642 }
643
7523e4dc 644 v->load_addr = alloc_progmem(mod.core_layout.size);
5408c490
RB
645 if (!v->load_addr)
646 return -ENOMEM;
e01402b1 647
5408c490 648 pr_info("VPE loader: loading to %p\n", v->load_addr);
e01402b1 649
2600990e
RB
650 if (relocate) {
651 for (i = 0; i < hdr->e_shnum; i++) {
652 void *dest;
e01402b1 653
2600990e
RB
654 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
655 continue;
e01402b1 656
2600990e 657 dest = v->load_addr + sechdrs[i].sh_entsize;
e01402b1 658
2600990e
RB
659 if (sechdrs[i].sh_type != SHT_NOBITS)
660 memcpy(dest, (void *)sechdrs[i].sh_addr,
661 sechdrs[i].sh_size);
662 /* Update sh_addr to point to copy in image. */
663 sechdrs[i].sh_addr = (unsigned long)dest;
e01402b1 664
5792bf64
SH
665 pr_debug(" section sh_name %s sh_addr 0x%x\n",
666 secstrings + sechdrs[i].sh_name,
667 sechdrs[i].sh_addr);
2600990e 668 }
e01402b1 669
70342287
RB
670 /* Fix up syms, so that st_value is a pointer to location. */
671 simplify_symbols(sechdrs, symindex, strtab, secstrings,
672 hdr->e_shnum, &mod);
673
674 /* Now do relocations. */
675 for (i = 1; i < hdr->e_shnum; i++) {
676 const char *strtab = (char *)sechdrs[strindex].sh_addr;
677 unsigned int info = sechdrs[i].sh_info;
678
679 /* Not a valid relocation section? */
680 if (info >= hdr->e_shnum)
681 continue;
682
683 /* Don't bother with non-allocated sections */
684 if (!(sechdrs[info].sh_flags & SHF_ALLOC))
685 continue;
686
687 if (sechdrs[i].sh_type == SHT_REL)
5792bf64
SH
688 err = apply_relocations(sechdrs, strtab,
689 symindex, i, &mod);
70342287 690 else if (sechdrs[i].sh_type == SHT_RELA)
5792bf64
SH
691 err = apply_relocate_add(sechdrs, strtab,
692 symindex, i, &mod);
70342287
RB
693 if (err < 0)
694 return err;
695
696 }
697 } else {
5792bf64
SH
698 struct elf_phdr *phdr = (struct elf_phdr *)
699 ((char *)hdr + hdr->e_phoff);
2600990e 700
bdf5d42c 701 for (i = 0; i < hdr->e_phnum; i++) {
b618336a
KK
702 if (phdr->p_type == PT_LOAD) {
703 memcpy((void *)phdr->p_paddr,
704 (char *)hdr + phdr->p_offset,
705 phdr->p_filesz);
706 memset((void *)phdr->p_paddr + phdr->p_filesz,
707 0, phdr->p_memsz - phdr->p_filesz);
708 }
709 phdr++;
bdf5d42c
RB
710 }
711
712 for (i = 0; i < hdr->e_shnum; i++) {
70342287
RB
713 /* Internal symbols and strings. */
714 if (sechdrs[i].sh_type == SHT_SYMTAB) {
715 symindex = i;
716 strindex = sechdrs[i].sh_link;
5792bf64
SH
717 strtab = (char *)hdr +
718 sechdrs[strindex].sh_offset;
70342287 719
5792bf64
SH
720 /*
721 * mark symtab's address for when we try
722 * to find the magic symbols
723 */
724 sechdrs[i].sh_addr = (size_t) hdr +
725 sechdrs[i].sh_offset;
70342287 726 }
e01402b1
RB
727 }
728 }
729
730 /* make sure it's physically written out */
731 flush_icache_range((unsigned long)v->load_addr,
732 (unsigned long)v->load_addr + v->len);
733
734 if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
2600990e 735 if (v->__start == 0) {
5792bf64 736 pr_warn("VPE loader: program does not contain a __start symbol\n");
2600990e
RB
737 return -ENOEXEC;
738 }
e01402b1 739
2600990e 740 if (v->shared_ptr == NULL)
5792bf64
SH
741 pr_warn("VPE loader: program does not contain vpe_shared symbol.\n"
742 " Unable to use AMVP (AP/SP) facilities.\n");
e01402b1
RB
743 }
744
5792bf64 745 pr_info(" elf loaded\n");
2600990e 746 return 0;
e01402b1
RB
747}
748
2600990e
RB
749static int getcwd(char *buff, int size)
750{
751 mm_segment_t old_fs;
752 int ret;
753
754 old_fs = get_fs();
755 set_fs(KERNEL_DS);
756
21a151d8 757 ret = sys_getcwd(buff, size);
2600990e
RB
758
759 set_fs(old_fs);
760
761 return ret;
762}
763
70342287 764/* checks VPE is unused and gets ready to load program */
e01402b1
RB
765static int vpe_open(struct inode *inode, struct file *filp)
766{
c4c4018b 767 enum vpe_state state;
5792bf64 768 struct vpe_notifications *notifier;
07cc0c9e 769 struct vpe *v;
1bbfc20d 770 int ret;
e01402b1 771
1a2a6d7e 772 if (VPE_MODULE_MINOR != iminor(inode)) {
07cc0c9e 773 /* assume only 1 device at the moment. */
5792bf64 774 pr_warn("VPE loader: only vpe1 is supported\n");
1bbfc20d
RB
775
776 return -ENODEV;
e01402b1
RB
777 }
778
5792bf64
SH
779 v = get_vpe(aprp_cpu_index());
780 if (v == NULL) {
781 pr_warn("VPE loader: unable to get vpe\n");
1bbfc20d
RB
782
783 return -ENODEV;
e01402b1
RB
784 }
785
c4c4018b
RB
786 state = xchg(&v->state, VPE_STATE_INUSE);
787 if (state != VPE_STATE_UNUSED) {
5792bf64 788 pr_debug("VPE loader: tc in use dumping regs\n");
e01402b1 789
5792bf64
SH
790 list_for_each_entry(notifier, &v->notify, list)
791 notifier->stop(aprp_cpu_index());
e01402b1 792
2600990e 793 release_progmem(v->load_addr);
1a2a6d7e 794 cleanup_tc(get_tc(aprp_cpu_index()));
e01402b1
RB
795 }
796
e01402b1
RB
797 /* this of-course trashes what was there before... */
798 v->pbuffer = vmalloc(P_SIZE);
863abad4 799 if (!v->pbuffer) {
5792bf64 800 pr_warn("VPE loader: unable to allocate memory\n");
863abad4
JJ
801 return -ENOMEM;
802 }
e01402b1
RB
803 v->plen = P_SIZE;
804 v->load_addr = NULL;
805 v->len = 0;
806
2600990e
RB
807 v->cwd[0] = 0;
808 ret = getcwd(v->cwd, VPE_PATH_MAX);
809 if (ret < 0)
5792bf64 810 pr_warn("VPE loader: open, getcwd returned %d\n", ret);
2600990e
RB
811
812 v->shared_ptr = NULL;
813 v->__start = 0;
07cc0c9e 814
e01402b1
RB
815 return 0;
816}
817
818static int vpe_release(struct inode *inode, struct file *filp)
819{
c60f9944 820#if defined(CONFIG_MIPS_VPE_LOADER_MT) || defined(CONFIG_MIPS_VPE_LOADER_CMP)
307bd284 821 struct vpe *v;
e01402b1 822 Elf_Ehdr *hdr;
07cc0c9e 823 int ret = 0;
e01402b1 824
1a2a6d7e 825 v = get_vpe(aprp_cpu_index());
07cc0c9e 826 if (v == NULL)
e01402b1
RB
827 return -ENODEV;
828
e01402b1 829 hdr = (Elf_Ehdr *) v->pbuffer;
d303f4a1 830 if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) {
1c205b9c 831 if (vpe_elfload(v) >= 0) {
e01402b1 832 vpe_run(v);
07cc0c9e 833 } else {
5792bf64 834 pr_warn("VPE loader: ELF load failed.\n");
e01402b1
RB
835 ret = -ENOEXEC;
836 }
837 } else {
5792bf64 838 pr_warn("VPE loader: only elf files are supported\n");
e01402b1
RB
839 ret = -ENOEXEC;
840 }
841
2600990e
RB
842 /* It's good to be able to run the SP and if it chokes have a look at
843 the /dev/rt?. But if we reset the pointer to the shared struct we
8ebcfc8b 844 lose what has happened. So perhaps if garbage is sent to the vpe
2600990e
RB
845 device, use it as a trigger for the reset. Hopefully a nice
846 executable will be along shortly. */
847 if (ret < 0)
848 v->shared_ptr = NULL;
849
863abad4 850 vfree(v->pbuffer);
e01402b1 851 v->plen = 0;
863abad4 852
e01402b1 853 return ret;
c60f9944
BH
854#else
855 pr_warn("VPE loader: ELF load failed.\n");
856 return -ENOEXEC;
857#endif
e01402b1
RB
858}
859
5792bf64
SH
860static ssize_t vpe_write(struct file *file, const char __user *buffer,
861 size_t count, loff_t *ppos)
e01402b1 862{
e01402b1 863 size_t ret = count;
307bd284 864 struct vpe *v;
e01402b1 865
1a2a6d7e 866 if (iminor(file_inode(file)) != VPE_MODULE_MINOR)
07cc0c9e
RB
867 return -ENODEV;
868
1a2a6d7e 869 v = get_vpe(aprp_cpu_index());
5792bf64 870
07cc0c9e 871 if (v == NULL)
e01402b1
RB
872 return -ENODEV;
873
e01402b1 874 if ((count + v->len) > v->plen) {
aae22f16 875 pr_warn("VPE loader: elf size too big. Perhaps strip unneeded symbols\n");
e01402b1
RB
876 return -ENOMEM;
877 }
878
879 count -= copy_from_user(v->pbuffer + v->len, buffer, count);
2600990e 880 if (!count)
e01402b1 881 return -EFAULT;
e01402b1
RB
882
883 v->len += count;
884 return ret;
885}
886
1a2a6d7e 887const struct file_operations vpe_fops = {
e01402b1
RB
888 .owner = THIS_MODULE,
889 .open = vpe_open,
890 .release = vpe_release,
6038f373
AB
891 .write = vpe_write,
892 .llseek = noop_llseek,
e01402b1
RB
893};
894
e01402b1
RB
895void *vpe_get_shared(int index)
896{
5792bf64 897 struct vpe *v = get_vpe(index);
e01402b1 898
5792bf64 899 if (v == NULL)
e01402b1 900 return NULL;
e01402b1
RB
901
902 return v->shared_ptr;
903}
e01402b1
RB
904EXPORT_SYMBOL(vpe_get_shared);
905
2600990e
RB
906int vpe_notify(int index, struct vpe_notifications *notify)
907{
5792bf64 908 struct vpe *v = get_vpe(index);
2600990e 909
5792bf64 910 if (v == NULL)
2600990e
RB
911 return -1;
912
913 list_add(&notify->list, &v->notify);
914 return 0;
915}
2600990e
RB
916EXPORT_SYMBOL(vpe_notify);
917
918char *vpe_getcwd(int index)
919{
5792bf64 920 struct vpe *v = get_vpe(index);
2600990e 921
5792bf64 922 if (v == NULL)
2600990e
RB
923 return NULL;
924
925 return v->cwd;
926}
2600990e
RB
927EXPORT_SYMBOL(vpe_getcwd);
928
e01402b1
RB
929module_init(vpe_module_init);
930module_exit(vpe_module_exit);
931MODULE_DESCRIPTION("MIPS VPE Loader");
2600990e 932MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
e01402b1 933MODULE_LICENSE("GPL");