treewide: Use fallthrough pseudo-keyword
[linux-block.git] / arch / ia64 / kernel / module.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * IA-64-specific support for kernel module loader.
4 *
5 * Copyright (C) 2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * Loosely based on patch by Rusty Russell.
9 */
10
11/* relocs tested so far:
12
13 DIR64LSB
14 FPTR64LSB
15 GPREL22
16 LDXMOV
17 LDXMOV
18 LTOFF22
19 LTOFF22X
20 LTOFF22X
21 LTOFF_FPTR22
22 PCREL21B (for br.call only; br.cond is not supported out of modules!)
23 PCREL60B (for brl.cond only; brl.call is not supported for modules!)
24 PCREL64LSB
25 SECREL32LSB
26 SEGREL64LSB
27 */
28
1da177e4
LT
29
30#include <linux/kernel.h>
31#include <linux/sched.h>
32#include <linux/elf.h>
33#include <linux/moduleloader.h>
34#include <linux/string.h>
35#include <linux/vmalloc.h>
36
37#include <asm/patch.h>
38#include <asm/unaligned.h>
8e307888 39#include <asm/sections.h>
1da177e4
LT
40
41#define ARCH_MODULE_DEBUG 0
42
43#if ARCH_MODULE_DEBUG
44# define DEBUGP printk
45# define inline
46#else
47# define DEBUGP(fmt , a...)
48#endif
49
50#ifdef CONFIG_ITANIUM
51# define USE_BRL 0
52#else
53# define USE_BRL 1
54#endif
55
56#define MAX_LTOFF ((uint64_t) (1 << 22)) /* max. allowable linkage-table offset */
57
58/* Define some relocation helper macros/types: */
59
60#define FORMAT_SHIFT 0
61#define FORMAT_BITS 3
62#define FORMAT_MASK ((1 << FORMAT_BITS) - 1)
63#define VALUE_SHIFT 3
64#define VALUE_BITS 5
65#define VALUE_MASK ((1 << VALUE_BITS) - 1)
66
67enum reloc_target_format {
68 /* direct encoded formats: */
69 RF_NONE = 0,
70 RF_INSN14 = 1,
71 RF_INSN22 = 2,
72 RF_INSN64 = 3,
73 RF_32MSB = 4,
74 RF_32LSB = 5,
75 RF_64MSB = 6,
76 RF_64LSB = 7,
77
78 /* formats that cannot be directly decoded: */
79 RF_INSN60,
80 RF_INSN21B, /* imm21 form 1 */
81 RF_INSN21M, /* imm21 form 2 */
82 RF_INSN21F /* imm21 form 3 */
83};
84
85enum reloc_value_formula {
86 RV_DIRECT = 4, /* S + A */
87 RV_GPREL = 5, /* @gprel(S + A) */
88 RV_LTREL = 6, /* @ltoff(S + A) */
89 RV_PLTREL = 7, /* @pltoff(S + A) */
90 RV_FPTR = 8, /* @fptr(S + A) */
91 RV_PCREL = 9, /* S + A - P */
92 RV_LTREL_FPTR = 10, /* @ltoff(@fptr(S + A)) */
93 RV_SEGREL = 11, /* @segrel(S + A) */
94 RV_SECREL = 12, /* @secrel(S + A) */
95 RV_BDREL = 13, /* BD + A */
96 RV_LTV = 14, /* S + A (like RV_DIRECT, except frozen at static link-time) */
97 RV_PCREL2 = 15, /* S + A - P */
98 RV_SPECIAL = 16, /* various (see below) */
99 RV_RSVD17 = 17,
100 RV_TPREL = 18, /* @tprel(S + A) */
101 RV_LTREL_TPREL = 19, /* @ltoff(@tprel(S + A)) */
102 RV_DTPMOD = 20, /* @dtpmod(S + A) */
103 RV_LTREL_DTPMOD = 21, /* @ltoff(@dtpmod(S + A)) */
104 RV_DTPREL = 22, /* @dtprel(S + A) */
105 RV_LTREL_DTPREL = 23, /* @ltoff(@dtprel(S + A)) */
106 RV_RSVD24 = 24,
107 RV_RSVD25 = 25,
108 RV_RSVD26 = 26,
109 RV_RSVD27 = 27
110 /* 28-31 reserved for implementation-specific purposes. */
111};
112
113#define N(reloc) [R_IA64_##reloc] = #reloc
114
115static const char *reloc_name[256] = {
116 N(NONE), N(IMM14), N(IMM22), N(IMM64),
117 N(DIR32MSB), N(DIR32LSB), N(DIR64MSB), N(DIR64LSB),
118 N(GPREL22), N(GPREL64I), N(GPREL32MSB), N(GPREL32LSB),
119 N(GPREL64MSB), N(GPREL64LSB), N(LTOFF22), N(LTOFF64I),
120 N(PLTOFF22), N(PLTOFF64I), N(PLTOFF64MSB), N(PLTOFF64LSB),
121 N(FPTR64I), N(FPTR32MSB), N(FPTR32LSB), N(FPTR64MSB),
122 N(FPTR64LSB), N(PCREL60B), N(PCREL21B), N(PCREL21M),
123 N(PCREL21F), N(PCREL32MSB), N(PCREL32LSB), N(PCREL64MSB),
124 N(PCREL64LSB), N(LTOFF_FPTR22), N(LTOFF_FPTR64I), N(LTOFF_FPTR32MSB),
125 N(LTOFF_FPTR32LSB), N(LTOFF_FPTR64MSB), N(LTOFF_FPTR64LSB), N(SEGREL32MSB),
126 N(SEGREL32LSB), N(SEGREL64MSB), N(SEGREL64LSB), N(SECREL32MSB),
127 N(SECREL32LSB), N(SECREL64MSB), N(SECREL64LSB), N(REL32MSB),
128 N(REL32LSB), N(REL64MSB), N(REL64LSB), N(LTV32MSB),
129 N(LTV32LSB), N(LTV64MSB), N(LTV64LSB), N(PCREL21BI),
130 N(PCREL22), N(PCREL64I), N(IPLTMSB), N(IPLTLSB),
131 N(COPY), N(LTOFF22X), N(LDXMOV), N(TPREL14),
132 N(TPREL22), N(TPREL64I), N(TPREL64MSB), N(TPREL64LSB),
133 N(LTOFF_TPREL22), N(DTPMOD64MSB), N(DTPMOD64LSB), N(LTOFF_DTPMOD22),
134 N(DTPREL14), N(DTPREL22), N(DTPREL64I), N(DTPREL32MSB),
135 N(DTPREL32LSB), N(DTPREL64MSB), N(DTPREL64LSB), N(LTOFF_DTPREL22)
136};
137
138#undef N
139
1da177e4
LT
140/* Opaque struct for insns, to protect against derefs. */
141struct insn;
142
143static inline uint64_t
144bundle (const struct insn *insn)
145{
146 return (uint64_t) insn & ~0xfUL;
147}
148
149static inline int
150slot (const struct insn *insn)
151{
152 return (uint64_t) insn & 0x3;
153}
154
155static int
156apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
157{
a25fb850 158 if (slot(insn) != 1 && slot(insn) != 2) {
1da177e4
LT
159 printk(KERN_ERR "%s: invalid slot number %d for IMM64\n",
160 mod->name, slot(insn));
161 return 0;
162 }
163 ia64_patch_imm64((u64) insn, val);
164 return 1;
165}
166
167static int
168apply_imm60 (struct module *mod, struct insn *insn, uint64_t val)
169{
a25fb850 170 if (slot(insn) != 1 && slot(insn) != 2) {
1da177e4
LT
171 printk(KERN_ERR "%s: invalid slot number %d for IMM60\n",
172 mod->name, slot(insn));
173 return 0;
174 }
175 if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) {
e088a4ad
MW
176 printk(KERN_ERR "%s: value %ld out of IMM60 range\n",
177 mod->name, (long) val);
1da177e4
LT
178 return 0;
179 }
180 ia64_patch_imm60((u64) insn, val);
181 return 1;
182}
183
184static int
185apply_imm22 (struct module *mod, struct insn *insn, uint64_t val)
186{
187 if (val + (1 << 21) >= (1 << 22)) {
e088a4ad
MW
188 printk(KERN_ERR "%s: value %li out of IMM22 range\n",
189 mod->name, (long)val);
1da177e4
LT
190 return 0;
191 }
192 ia64_patch((u64) insn, 0x01fffcfe000UL, ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
193 | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */
194 | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */
195 | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */));
196 return 1;
197}
198
199static int
200apply_imm21b (struct module *mod, struct insn *insn, uint64_t val)
201{
202 if (val + (1 << 20) >= (1 << 21)) {
e088a4ad
MW
203 printk(KERN_ERR "%s: value %li out of IMM21b range\n",
204 mod->name, (long)val);
1da177e4
LT
205 return 0;
206 }
207 ia64_patch((u64) insn, 0x11ffffe000UL, ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */
208 | ((val & 0x0fffffUL) << 13) /* bit 0 -> 13 */));
209 return 1;
210}
211
212#if USE_BRL
213
214struct plt_entry {
215 /* Three instruction bundles in PLT. */
216 unsigned char bundle[2][16];
217};
218
219static const struct plt_entry ia64_plt_template = {
220 {
221 {
222 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
223 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
224 0x00, 0x00, 0x00, 0x60
225 },
226 {
227 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
228 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* brl.many gp=TARGET_GP */
229 0x08, 0x00, 0x00, 0xc0
230 }
231 }
232};
233
234static int
235patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
236{
237 if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_gp)
238 && apply_imm60(mod, (struct insn *) (plt->bundle[1] + 2),
239 (target_ip - (int64_t) plt->bundle[1]) / 16))
240 return 1;
241 return 0;
242}
243
244unsigned long
245plt_target (struct plt_entry *plt)
246{
247 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[1];
248 long off;
249
250 b0 = b[0]; b1 = b[1];
251 off = ( ((b1 & 0x00fffff000000000UL) >> 36) /* imm20b -> bit 0 */
252 | ((b0 >> 48) << 20) | ((b1 & 0x7fffffUL) << 36) /* imm39 -> bit 20 */
253 | ((b1 & 0x0800000000000000UL) << 0)); /* i -> bit 59 */
254 return (long) plt->bundle[1] + 16*off;
255}
256
257#else /* !USE_BRL */
258
259struct plt_entry {
260 /* Three instruction bundles in PLT. */
261 unsigned char bundle[3][16];
262};
263
264static const struct plt_entry ia64_plt_template = {
265 {
266 {
267 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* movl r16=TARGET_IP */
269 0x02, 0x00, 0x00, 0x60
270 },
271 {
272 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
273 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
274 0x00, 0x00, 0x00, 0x60
275 },
276 {
277 0x11, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MIB] nop.m 0 */
278 0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /* mov b6=r16 */
279 0x60, 0x00, 0x80, 0x00 /* br.few b6 */
280 }
281 }
282};
283
284static int
285patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
286{
287 if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_ip)
288 && apply_imm64(mod, (struct insn *) (plt->bundle[1] + 2), target_gp))
289 return 1;
290 return 0;
291}
292
293unsigned long
294plt_target (struct plt_entry *plt)
295{
296 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[0];
297
298 b0 = b[0]; b1 = b[1];
299 return ( ((b1 & 0x000007f000000000) >> 36) /* imm7b -> bit 0 */
300 | ((b1 & 0x07fc000000000000) >> 43) /* imm9d -> bit 7 */
301 | ((b1 & 0x0003e00000000000) >> 29) /* imm5c -> bit 16 */
302 | ((b1 & 0x0000100000000000) >> 23) /* ic -> bit 21 */
303 | ((b0 >> 46) << 22) | ((b1 & 0x7fffff) << 40) /* imm41 -> bit 22 */
304 | ((b1 & 0x0800000000000000) << 4)); /* i -> bit 63 */
305}
306
307#endif /* !USE_BRL */
308
1da177e4 309void
d453cded 310module_arch_freeing_init (struct module *mod)
1da177e4 311{
d453cded 312 if (mod->arch.init_unw_table) {
1da177e4
LT
313 unw_remove_unwind_table(mod->arch.init_unw_table);
314 mod->arch.init_unw_table = NULL;
315 }
1da177e4
LT
316}
317
318/* Have we already seen one of these relocations? */
319/* FIXME: we could look in other sections, too --RR */
320static int
321duplicate_reloc (const Elf64_Rela *rela, unsigned int num)
322{
323 unsigned int i;
324
325 for (i = 0; i < num; i++) {
326 if (rela[i].r_info == rela[num].r_info && rela[i].r_addend == rela[num].r_addend)
327 return 1;
328 }
329 return 0;
330}
331
332/* Count how many GOT entries we may need */
333static unsigned int
334count_gots (const Elf64_Rela *rela, unsigned int num)
335{
336 unsigned int i, ret = 0;
337
338 /* Sure, this is order(n^2), but it's usually short, and not
339 time critical */
340 for (i = 0; i < num; i++) {
341 switch (ELF64_R_TYPE(rela[i].r_info)) {
342 case R_IA64_LTOFF22:
343 case R_IA64_LTOFF22X:
344 case R_IA64_LTOFF64I:
345 case R_IA64_LTOFF_FPTR22:
346 case R_IA64_LTOFF_FPTR64I:
347 case R_IA64_LTOFF_FPTR32MSB:
348 case R_IA64_LTOFF_FPTR32LSB:
349 case R_IA64_LTOFF_FPTR64MSB:
350 case R_IA64_LTOFF_FPTR64LSB:
351 if (!duplicate_reloc(rela, i))
352 ret++;
353 break;
354 }
355 }
356 return ret;
357}
358
359/* Count how many PLT entries we may need */
360static unsigned int
361count_plts (const Elf64_Rela *rela, unsigned int num)
362{
363 unsigned int i, ret = 0;
364
365 /* Sure, this is order(n^2), but it's usually short, and not
366 time critical */
367 for (i = 0; i < num; i++) {
368 switch (ELF64_R_TYPE(rela[i].r_info)) {
369 case R_IA64_PCREL21B:
370 case R_IA64_PLTOFF22:
371 case R_IA64_PLTOFF64I:
372 case R_IA64_PLTOFF64MSB:
373 case R_IA64_PLTOFF64LSB:
374 case R_IA64_IPLTMSB:
375 case R_IA64_IPLTLSB:
376 if (!duplicate_reloc(rela, i))
377 ret++;
378 break;
379 }
380 }
381 return ret;
382}
383
384/* We need to create an function-descriptors for any internal function
385 which is referenced. */
386static unsigned int
387count_fdescs (const Elf64_Rela *rela, unsigned int num)
388{
389 unsigned int i, ret = 0;
390
391 /* Sure, this is order(n^2), but it's usually short, and not time critical. */
392 for (i = 0; i < num; i++) {
393 switch (ELF64_R_TYPE(rela[i].r_info)) {
394 case R_IA64_FPTR64I:
395 case R_IA64_FPTR32LSB:
396 case R_IA64_FPTR32MSB:
397 case R_IA64_FPTR64LSB:
398 case R_IA64_FPTR64MSB:
399 case R_IA64_LTOFF_FPTR22:
400 case R_IA64_LTOFF_FPTR32LSB:
401 case R_IA64_LTOFF_FPTR32MSB:
402 case R_IA64_LTOFF_FPTR64I:
403 case R_IA64_LTOFF_FPTR64LSB:
404 case R_IA64_LTOFF_FPTR64MSB:
405 case R_IA64_IPLTMSB:
406 case R_IA64_IPLTLSB:
407 /*
408 * Jumps to static functions sometimes go straight to their
409 * offset. Of course, that may not be possible if the jump is
410 * from init -> core or vice. versa, so we need to generate an
411 * FDESC (and PLT etc) for that.
412 */
413 case R_IA64_PCREL21B:
414 if (!duplicate_reloc(rela, i))
415 ret++;
416 break;
417 }
418 }
419 return ret;
420}
421
422int
423module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
424 struct module *mod)
425{
426 unsigned long core_plts = 0, init_plts = 0, gots = 0, fdescs = 0;
427 Elf64_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
428
429 /*
430 * To store the PLTs and function-descriptors, we expand the .text section for
431 * core module-code and the .init.text section for initialization code.
432 */
433 for (s = sechdrs; s < sechdrs_end; ++s)
434 if (strcmp(".core.plt", secstrings + s->sh_name) == 0)
435 mod->arch.core_plt = s;
436 else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
437 mod->arch.init_plt = s;
438 else if (strcmp(".got", secstrings + s->sh_name) == 0)
439 mod->arch.got = s;
440 else if (strcmp(".opd", secstrings + s->sh_name) == 0)
441 mod->arch.opd = s;
442 else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0)
443 mod->arch.unwind = s;
444
445 if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
446 printk(KERN_ERR "%s: sections missing\n", mod->name);
447 return -ENOEXEC;
448 }
449
450 /* GOT and PLTs can occur in any relocated section... */
451 for (s = sechdrs + 1; s < sechdrs_end; ++s) {
452 const Elf64_Rela *rels = (void *)ehdr + s->sh_offset;
453 unsigned long numrels = s->sh_size/sizeof(Elf64_Rela);
454
455 if (s->sh_type != SHT_RELA)
456 continue;
457
458 gots += count_gots(rels, numrels);
459 fdescs += count_fdescs(rels, numrels);
460 if (strstr(secstrings + s->sh_name, ".init"))
461 init_plts += count_plts(rels, numrels);
462 else
463 core_plts += count_plts(rels, numrels);
464 }
465
466 mod->arch.core_plt->sh_type = SHT_NOBITS;
467 mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
468 mod->arch.core_plt->sh_addralign = 16;
469 mod->arch.core_plt->sh_size = core_plts * sizeof(struct plt_entry);
470 mod->arch.init_plt->sh_type = SHT_NOBITS;
471 mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
472 mod->arch.init_plt->sh_addralign = 16;
473 mod->arch.init_plt->sh_size = init_plts * sizeof(struct plt_entry);
474 mod->arch.got->sh_type = SHT_NOBITS;
475 mod->arch.got->sh_flags = ARCH_SHF_SMALL | SHF_ALLOC;
476 mod->arch.got->sh_addralign = 8;
477 mod->arch.got->sh_size = gots * sizeof(struct got_entry);
478 mod->arch.opd->sh_type = SHT_NOBITS;
479 mod->arch.opd->sh_flags = SHF_ALLOC;
480 mod->arch.opd->sh_addralign = 8;
481 mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc);
482 DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n",
d4ed8084 483 __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size,
1da177e4
LT
484 mod->arch.got->sh_size, mod->arch.opd->sh_size);
485 return 0;
486}
487
488static inline int
489in_init (const struct module *mod, uint64_t addr)
490{
7523e4dc 491 return addr - (uint64_t) mod->init_layout.base < mod->init_layout.size;
1da177e4
LT
492}
493
494static inline int
495in_core (const struct module *mod, uint64_t addr)
496{
7523e4dc 497 return addr - (uint64_t) mod->core_layout.base < mod->core_layout.size;
1da177e4
LT
498}
499
500static inline int
501is_internal (const struct module *mod, uint64_t value)
502{
503 return in_init(mod, value) || in_core(mod, value);
504}
505
506/*
507 * Get gp-relative offset for the linkage-table entry of VALUE.
508 */
509static uint64_t
510get_ltoff (struct module *mod, uint64_t value, int *okp)
511{
512 struct got_entry *got, *e;
513
514 if (!*okp)
515 return 0;
516
517 got = (void *) mod->arch.got->sh_addr;
518 for (e = got; e < got + mod->arch.next_got_entry; ++e)
519 if (e->val == value)
520 goto found;
521
522 /* Not enough GOT entries? */
80a03e29 523 BUG_ON(e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size));
1da177e4
LT
524
525 e->val = value;
526 ++mod->arch.next_got_entry;
527 found:
528 return (uint64_t) e - mod->arch.gp;
529}
530
531static inline int
532gp_addressable (struct module *mod, uint64_t value)
533{
534 return value - mod->arch.gp + MAX_LTOFF/2 < MAX_LTOFF;
535}
536
537/* Get PC-relative PLT entry for this value. Returns 0 on failure. */
538static uint64_t
539get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp)
540{
541 struct plt_entry *plt, *plt_end;
542 uint64_t target_ip, target_gp;
543
544 if (!*okp)
545 return 0;
546
547 if (in_init(mod, (uint64_t) insn)) {
548 plt = (void *) mod->arch.init_plt->sh_addr;
549 plt_end = (void *) plt + mod->arch.init_plt->sh_size;
550 } else {
551 plt = (void *) mod->arch.core_plt->sh_addr;
552 plt_end = (void *) plt + mod->arch.core_plt->sh_size;
553 }
554
555 /* "value" is a pointer to a function-descriptor; fetch the target ip/gp from it: */
556 target_ip = ((uint64_t *) value)[0];
557 target_gp = ((uint64_t *) value)[1];
558
559 /* Look for existing PLT entry. */
560 while (plt->bundle[0][0]) {
561 if (plt_target(plt) == target_ip)
562 goto found;
563 if (++plt >= plt_end)
564 BUG();
565 }
566 *plt = ia64_plt_template;
567 if (!patch_plt(mod, plt, target_ip, target_gp)) {
568 *okp = 0;
569 return 0;
570 }
571#if ARCH_MODULE_DEBUG
572 if (plt_target(plt) != target_ip) {
573 printk("%s: mistargeted PLT: wanted %lx, got %lx\n",
d4ed8084 574 __func__, target_ip, plt_target(plt));
1da177e4
LT
575 *okp = 0;
576 return 0;
577 }
578#endif
579 found:
580 return (uint64_t) plt;
581}
582
583/* Get function descriptor for VALUE. */
584static uint64_t
585get_fdesc (struct module *mod, uint64_t value, int *okp)
586{
587 struct fdesc *fdesc = (void *) mod->arch.opd->sh_addr;
588
589 if (!*okp)
590 return 0;
591
592 if (!value) {
593 printk(KERN_ERR "%s: fdesc for zero requested!\n", mod->name);
594 return 0;
595 }
596
597 if (!is_internal(mod, value))
598 /*
599 * If it's not a module-local entry-point, "value" already points to a
600 * function-descriptor.
601 */
602 return value;
603
604 /* Look for existing function descriptor. */
605 while (fdesc->ip) {
606 if (fdesc->ip == value)
607 return (uint64_t)fdesc;
608 if ((uint64_t) ++fdesc >= mod->arch.opd->sh_addr + mod->arch.opd->sh_size)
609 BUG();
610 }
611
612 /* Create new one */
613 fdesc->ip = value;
614 fdesc->gp = mod->arch.gp;
615 return (uint64_t) fdesc;
616}
617
618static inline int
619do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
620 Elf64_Shdr *sec, void *location)
621{
622 enum reloc_target_format format = (r_type >> FORMAT_SHIFT) & FORMAT_MASK;
623 enum reloc_value_formula formula = (r_type >> VALUE_SHIFT) & VALUE_MASK;
624 uint64_t val;
625 int ok = 1;
626
627 val = sym->st_value + addend;
628
629 switch (formula) {
630 case RV_SEGREL: /* segment base is arbitrarily chosen to be 0 for kernel modules */
631 case RV_DIRECT:
632 break;
633
634 case RV_GPREL: val -= mod->arch.gp; break;
635 case RV_LTREL: val = get_ltoff(mod, val, &ok); break;
636 case RV_PLTREL: val = get_plt(mod, location, val, &ok); break;
637 case RV_FPTR: val = get_fdesc(mod, val, &ok); break;
638 case RV_SECREL: val -= sec->sh_addr; break;
639 case RV_LTREL_FPTR: val = get_ltoff(mod, get_fdesc(mod, val, &ok), &ok); break;
640
641 case RV_PCREL:
642 switch (r_type) {
643 case R_IA64_PCREL21B:
644 if ((in_init(mod, val) && in_core(mod, (uint64_t)location)) ||
645 (in_core(mod, val) && in_init(mod, (uint64_t)location))) {
646 /*
647 * Init section may have been allocated far away from core,
648 * if the branch won't reach, then allocate a plt for it.
649 */
650 uint64_t delta = ((int64_t)val - (int64_t)location) / 16;
651 if (delta + (1 << 20) >= (1 << 21)) {
652 val = get_fdesc(mod, val, &ok);
653 val = get_plt(mod, location, val, &ok);
654 }
655 } else if (!is_internal(mod, val))
656 val = get_plt(mod, location, val, &ok);
df561f66 657 fallthrough;
1da177e4
LT
658 default:
659 val -= bundle(location);
660 break;
661
662 case R_IA64_PCREL32MSB:
663 case R_IA64_PCREL32LSB:
664 case R_IA64_PCREL64MSB:
665 case R_IA64_PCREL64LSB:
666 val -= (uint64_t) location;
667 break;
668
669 }
670 switch (r_type) {
671 case R_IA64_PCREL60B: format = RF_INSN60; break;
672 case R_IA64_PCREL21B: format = RF_INSN21B; break;
673 case R_IA64_PCREL21M: format = RF_INSN21M; break;
674 case R_IA64_PCREL21F: format = RF_INSN21F; break;
675 default: break;
676 }
677 break;
678
679 case RV_BDREL:
7523e4dc 680 val -= (uint64_t) (in_init(mod, val) ? mod->init_layout.base : mod->core_layout.base);
1da177e4
LT
681 break;
682
683 case RV_LTV:
684 /* can link-time value relocs happen here? */
685 BUG();
686 break;
687
688 case RV_PCREL2:
689 if (r_type == R_IA64_PCREL21BI) {
690 if (!is_internal(mod, val)) {
e088a4ad
MW
691 printk(KERN_ERR "%s: %s reloc against "
692 "non-local symbol (%lx)\n", __func__,
693 reloc_name[r_type], (unsigned long)val);
1da177e4
LT
694 return -ENOEXEC;
695 }
696 format = RF_INSN21B;
697 }
698 val -= bundle(location);
699 break;
700
701 case RV_SPECIAL:
702 switch (r_type) {
703 case R_IA64_IPLTMSB:
704 case R_IA64_IPLTLSB:
705 val = get_fdesc(mod, get_plt(mod, location, val, &ok), &ok);
706 format = RF_64LSB;
707 if (r_type == R_IA64_IPLTMSB)
708 format = RF_64MSB;
709 break;
710
711 case R_IA64_SUB:
712 val = addend - sym->st_value;
713 format = RF_INSN64;
714 break;
715
716 case R_IA64_LTOFF22X:
717 if (gp_addressable(mod, val))
718 val -= mod->arch.gp;
719 else
720 val = get_ltoff(mod, val, &ok);
721 format = RF_INSN22;
722 break;
723
724 case R_IA64_LDXMOV:
725 if (gp_addressable(mod, val)) {
726 /* turn "ld8" into "mov": */
d4ed8084 727 DEBUGP("%s: patching ld8 at %p to mov\n", __func__, location);
1da177e4
LT
728 ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL);
729 }
730 return 0;
731
732 default:
733 if (reloc_name[r_type])
734 printk(KERN_ERR "%s: special reloc %s not supported",
735 mod->name, reloc_name[r_type]);
736 else
737 printk(KERN_ERR "%s: unknown special reloc %x\n",
738 mod->name, r_type);
739 return -ENOEXEC;
740 }
741 break;
742
743 case RV_TPREL:
744 case RV_LTREL_TPREL:
745 case RV_DTPMOD:
746 case RV_LTREL_DTPMOD:
747 case RV_DTPREL:
748 case RV_LTREL_DTPREL:
749 printk(KERN_ERR "%s: %s reloc not supported\n",
750 mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?");
751 return -ENOEXEC;
752
753 default:
754 printk(KERN_ERR "%s: unknown reloc %x\n", mod->name, r_type);
755 return -ENOEXEC;
756 }
757
758 if (!ok)
759 return -ENOEXEC;
760
d4ed8084 761 DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __func__, location, val,
1da177e4
LT
762 reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend);
763
764 switch (format) {
765 case RF_INSN21B: ok = apply_imm21b(mod, location, (int64_t) val / 16); break;
766 case RF_INSN22: ok = apply_imm22(mod, location, val); break;
767 case RF_INSN64: ok = apply_imm64(mod, location, val); break;
768 case RF_INSN60: ok = apply_imm60(mod, location, (int64_t) val / 16); break;
769 case RF_32LSB: put_unaligned(val, (uint32_t *) location); break;
770 case RF_64LSB: put_unaligned(val, (uint64_t *) location); break;
771 case RF_32MSB: /* ia64 Linux is little-endian... */
772 case RF_64MSB: /* ia64 Linux is little-endian... */
773 case RF_INSN14: /* must be within-module, i.e., resolved by "ld -r" */
774 case RF_INSN21M: /* must be within-module, i.e., resolved by "ld -r" */
775 case RF_INSN21F: /* must be within-module, i.e., resolved by "ld -r" */
776 printk(KERN_ERR "%s: format %u needed by %s reloc is not supported\n",
777 mod->name, format, reloc_name[r_type] ? reloc_name[r_type] : "?");
778 return -ENOEXEC;
779
780 default:
781 printk(KERN_ERR "%s: relocation %s resulted in unknown format %u\n",
782 mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?", format);
783 return -ENOEXEC;
784 }
785 return ok ? 0 : -ENOEXEC;
786}
787
788int
789apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
790 unsigned int relsec, struct module *mod)
791{
792 unsigned int i, n = sechdrs[relsec].sh_size / sizeof(Elf64_Rela);
793 Elf64_Rela *rela = (void *) sechdrs[relsec].sh_addr;
794 Elf64_Shdr *target_sec;
795 int ret;
796
d4ed8084 797 DEBUGP("%s: applying section %u (%u relocs) to %u\n", __func__,
1da177e4
LT
798 relsec, n, sechdrs[relsec].sh_info);
799
800 target_sec = sechdrs + sechdrs[relsec].sh_info;
801
802 if (target_sec->sh_entsize == ~0UL)
803 /*
804 * If target section wasn't allocated, we don't need to relocate it.
805 * Happens, e.g., for debug sections.
806 */
807 return 0;
808
809 if (!mod->arch.gp) {
810 /*
811 * XXX Should have an arch-hook for running this after final section
812 * addresses have been selected...
813 */
866ba633 814 uint64_t gp;
7523e4dc 815 if (mod->core_layout.size > MAX_LTOFF)
1da177e4
LT
816 /*
817 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
818 * at the end of the module.
819 */
7523e4dc 820 gp = mod->core_layout.size - MAX_LTOFF / 2;
866ba633 821 else
7523e4dc
RR
822 gp = mod->core_layout.size / 2;
823 gp = (uint64_t) mod->core_layout.base + ((gp + 7) & -8);
1da177e4 824 mod->arch.gp = gp;
d4ed8084 825 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1da177e4
LT
826 }
827
828 for (i = 0; i < n; i++) {
829 ret = do_reloc(mod, ELF64_R_TYPE(rela[i].r_info),
830 ((Elf64_Sym *) sechdrs[symindex].sh_addr
831 + ELF64_R_SYM(rela[i].r_info)),
832 rela[i].r_addend, target_sec,
833 (void *) target_sec->sh_addr + rela[i].r_offset);
834 if (ret < 0)
835 return ret;
836 }
837 return 0;
838}
839
1da177e4
LT
840/*
841 * Modules contain a single unwind table which covers both the core and the init text
842 * sections but since the two are not contiguous, we need to split this table up such that
72fdbdce 843 * we can register (and unregister) each "segment" separately. Fortunately, this sounds
1da177e4
LT
844 * more complicated than it really is.
845 */
846static void
847register_unwind_table (struct module *mod)
848{
849 struct unw_table_entry *start = (void *) mod->arch.unwind->sh_addr;
850 struct unw_table_entry *end = start + mod->arch.unwind->sh_size / sizeof (*start);
851 struct unw_table_entry tmp, *e1, *e2, *core, *init;
852 unsigned long num_init = 0, num_core = 0;
853
854 /* First, count how many init and core unwind-table entries there are. */
855 for (e1 = start; e1 < end; ++e1)
856 if (in_init(mod, e1->start_offset))
857 ++num_init;
858 else
859 ++num_core;
860 /*
861 * Second, sort the table such that all unwind-table entries for the init and core
862 * text sections are nicely separated. We do this with a stupid bubble sort
863 * (unwind tables don't get ridiculously huge).
864 */
865 for (e1 = start; e1 < end; ++e1) {
866 for (e2 = e1 + 1; e2 < end; ++e2) {
867 if (e2->start_offset < e1->start_offset) {
868 tmp = *e1;
869 *e1 = *e2;
870 *e2 = tmp;
871 }
872 }
873 }
874 /*
875 * Third, locate the init and core segments in the unwind table:
876 */
877 if (in_init(mod, start->start_offset)) {
878 init = start;
879 core = start + num_init;
880 } else {
881 core = start;
882 init = start + num_core;
883 }
884
d4ed8084 885 DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __func__,
1da177e4
LT
886 mod->name, mod->arch.gp, num_init, num_core);
887
888 /*
889 * Fourth, register both tables (if not empty).
890 */
891 if (num_core > 0) {
892 mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
893 core, core + num_core);
d4ed8084 894 DEBUGP("%s: core: handle=%p [%p-%p)\n", __func__,
1da177e4
LT
895 mod->arch.core_unw_table, core, core + num_core);
896 }
897 if (num_init > 0) {
898 mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
899 init, init + num_init);
d4ed8084 900 DEBUGP("%s: init: handle=%p [%p-%p)\n", __func__,
1da177e4
LT
901 mod->arch.init_unw_table, init, init + num_init);
902 }
903}
904
905int
906module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
907{
d4ed8084 908 DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
1da177e4
LT
909 if (mod->arch.unwind)
910 register_unwind_table(mod);
911 return 0;
912}
913
914void
915module_arch_cleanup (struct module *mod)
916{
c5e5c48c 917 if (mod->arch.init_unw_table) {
1da177e4 918 unw_remove_unwind_table(mod->arch.init_unw_table);
c5e5c48c 919 mod->arch.init_unw_table = NULL;
920 }
921 if (mod->arch.core_unw_table) {
1da177e4 922 unw_remove_unwind_table(mod->arch.core_unw_table);
c5e5c48c 923 mod->arch.core_unw_table = NULL;
924 }
1da177e4 925}
8e307888
SS
926
927void *dereference_module_function_descriptor(struct module *mod, void *ptr)
928{
929 Elf64_Shdr *opd = mod->arch.opd;
930
931 if (ptr < (void *)opd->sh_addr ||
932 ptr >= (void *)(opd->sh_addr + opd->sh_size))
933 return ptr;
934
935 return dereference_function_descriptor(ptr);
936}