Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
257cb251 WD |
2 | /* |
3 | * AArch64 loadable module support. | |
4 | * | |
5 | * Copyright (C) 2012 ARM Limited | |
6 | * | |
257cb251 WD |
7 | * Author: Will Deacon <will.deacon@arm.com> |
8 | */ | |
9 | ||
10 | #include <linux/bitops.h> | |
11 | #include <linux/elf.h> | |
12 | #include <linux/gfp.h> | |
39d114dd | 13 | #include <linux/kasan.h> |
257cb251 WD |
14 | #include <linux/kernel.h> |
15 | #include <linux/mm.h> | |
16 | #include <linux/moduleloader.h> | |
17 | #include <linux/vmalloc.h> | |
2c2b282d | 18 | #include <asm/alternative.h> |
c84fced8 | 19 | #include <asm/insn.h> |
932ded4b | 20 | #include <asm/sections.h> |
c84fced8 | 21 | |
257cb251 WD |
22 | void *module_alloc(unsigned long size) |
23 | { | |
6f496a55 | 24 | u64 module_alloc_end = module_alloc_base + MODULES_VSIZE; |
0c2cf6d9 | 25 | gfp_t gfp_mask = GFP_KERNEL; |
39d114dd AR |
26 | void *p; |
27 | ||
0c2cf6d9 FF |
28 | /* Silence the initial allocation */ |
29 | if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) | |
30 | gfp_mask |= __GFP_NOWARN; | |
31 | ||
6f496a55 AB |
32 | if (IS_ENABLED(CONFIG_KASAN)) |
33 | /* don't exceed the static module region - see below */ | |
34 | module_alloc_end = MODULES_END; | |
35 | ||
f80fb3a3 | 36 | p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, |
dfd437a2 | 37 | module_alloc_end, gfp_mask, PAGE_KERNEL, 0, |
39d114dd AR |
38 | NUMA_NO_NODE, __builtin_return_address(0)); |
39 | ||
fd045f6c AB |
40 | if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && |
41 | !IS_ENABLED(CONFIG_KASAN)) | |
42 | /* | |
43 | * KASAN can only deal with module allocations being served | |
44 | * from the reserved module region, since the remainder of | |
45 | * the vmalloc region is already backed by zero shadow pages, | |
46 | * and punching holes into it is non-trivial. Since the module | |
47 | * region is not randomized when KASAN is enabled, it is even | |
48 | * less likely that the module region gets exhausted, so we | |
49 | * can simply omit this fallback in that case. | |
50 | */ | |
f2b9ba87 | 51 | p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, |
b2eed9b5 | 52 | module_alloc_base + SZ_2G, GFP_KERNEL, |
7dfac3c5 | 53 | PAGE_KERNEL, 0, NUMA_NO_NODE, |
f2b9ba87 | 54 | __builtin_return_address(0)); |
fd045f6c | 55 | |
39d114dd AR |
56 | if (p && (kasan_module_alloc(p, size) < 0)) { |
57 | vfree(p); | |
58 | return NULL; | |
59 | } | |
60 | ||
61 | return p; | |
257cb251 WD |
62 | } |
63 | ||
64 | enum aarch64_reloc_op { | |
65 | RELOC_OP_NONE, | |
66 | RELOC_OP_ABS, | |
67 | RELOC_OP_PREL, | |
68 | RELOC_OP_PAGE, | |
69 | }; | |
70 | ||
02129ae5 | 71 | static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val) |
257cb251 WD |
72 | { |
73 | switch (reloc_op) { | |
74 | case RELOC_OP_ABS: | |
75 | return val; | |
76 | case RELOC_OP_PREL: | |
77 | return val - (u64)place; | |
78 | case RELOC_OP_PAGE: | |
79 | return (val & ~0xfff) - ((u64)place & ~0xfff); | |
80 | case RELOC_OP_NONE: | |
81 | return 0; | |
82 | } | |
83 | ||
84 | pr_err("do_reloc: unknown relocation operation %d\n", reloc_op); | |
85 | return 0; | |
86 | } | |
87 | ||
88 | static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) | |
89 | { | |
257cb251 WD |
90 | s64 sval = do_reloc(op, place, val); |
91 | ||
1cf24a2c AB |
92 | /* |
93 | * The ELF psABI for AArch64 documents the 16-bit and 32-bit place | |
3fd00beb AB |
94 | * relative and absolute relocations as having a range of [-2^15, 2^16) |
95 | * or [-2^31, 2^32), respectively. However, in order to be able to | |
96 | * detect overflows reliably, we have to choose whether we interpret | |
97 | * such quantities as signed or as unsigned, and stick with it. | |
1cf24a2c AB |
98 | * The way we organize our address space requires a signed |
99 | * interpretation of 32-bit relative references, so let's use that | |
100 | * for all R_AARCH64_PRELxx relocations. This means our upper | |
101 | * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX. | |
102 | */ | |
103 | ||
257cb251 WD |
104 | switch (len) { |
105 | case 16: | |
106 | *(s16 *)place = sval; | |
3fd00beb AB |
107 | switch (op) { |
108 | case RELOC_OP_ABS: | |
109 | if (sval < 0 || sval > U16_MAX) | |
110 | return -ERANGE; | |
111 | break; | |
112 | case RELOC_OP_PREL: | |
113 | if (sval < S16_MIN || sval > S16_MAX) | |
114 | return -ERANGE; | |
115 | break; | |
116 | default: | |
117 | pr_err("Invalid 16-bit data relocation (%d)\n", op); | |
118 | return 0; | |
119 | } | |
257cb251 WD |
120 | break; |
121 | case 32: | |
122 | *(s32 *)place = sval; | |
3fd00beb AB |
123 | switch (op) { |
124 | case RELOC_OP_ABS: | |
125 | if (sval < 0 || sval > U32_MAX) | |
126 | return -ERANGE; | |
127 | break; | |
128 | case RELOC_OP_PREL: | |
129 | if (sval < S32_MIN || sval > S32_MAX) | |
130 | return -ERANGE; | |
131 | break; | |
132 | default: | |
133 | pr_err("Invalid 32-bit data relocation (%d)\n", op); | |
134 | return 0; | |
135 | } | |
257cb251 WD |
136 | break; |
137 | case 64: | |
138 | *(s64 *)place = sval; | |
139 | break; | |
140 | default: | |
141 | pr_err("Invalid length (%d) for data relocation\n", len); | |
142 | return 0; | |
143 | } | |
257cb251 WD |
144 | return 0; |
145 | } | |
146 | ||
b24a5575 AB |
147 | enum aarch64_insn_movw_imm_type { |
148 | AARCH64_INSN_IMM_MOVNZ, | |
149 | AARCH64_INSN_IMM_MOVKZ, | |
150 | }; | |
151 | ||
02129ae5 | 152 | static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val, |
b24a5575 | 153 | int lsb, enum aarch64_insn_movw_imm_type imm_type) |
257cb251 | 154 | { |
b24a5575 | 155 | u64 imm; |
c84fced8 | 156 | s64 sval; |
02129ae5 | 157 | u32 insn = le32_to_cpu(*place); |
257cb251 | 158 | |
c84fced8 | 159 | sval = do_reloc(op, place, val); |
b24a5575 | 160 | imm = sval >> lsb; |
122e2fa0 | 161 | |
c84fced8 | 162 | if (imm_type == AARCH64_INSN_IMM_MOVNZ) { |
257cb251 WD |
163 | /* |
164 | * For signed MOVW relocations, we have to manipulate the | |
165 | * instruction encoding depending on whether or not the | |
166 | * immediate is less than zero. | |
167 | */ | |
168 | insn &= ~(3 << 29); | |
b24a5575 | 169 | if (sval >= 0) { |
257cb251 WD |
170 | /* >=0: Set the instruction to MOVZ (opcode 10b). */ |
171 | insn |= 2 << 29; | |
172 | } else { | |
173 | /* | |
174 | * <0: Set the instruction to MOVN (opcode 00b). | |
175 | * Since we've masked the opcode already, we | |
176 | * don't need to do anything other than | |
177 | * inverting the new immediate field. | |
178 | */ | |
179 | imm = ~imm; | |
180 | } | |
257cb251 WD |
181 | } |
182 | ||
257cb251 | 183 | /* Update the instruction with the new encoding. */ |
b24a5575 | 184 | insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); |
02129ae5 | 185 | *place = cpu_to_le32(insn); |
257cb251 | 186 | |
b24a5575 | 187 | if (imm > U16_MAX) |
257cb251 WD |
188 | return -ERANGE; |
189 | ||
190 | return 0; | |
191 | } | |
192 | ||
02129ae5 | 193 | static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val, |
c84fced8 | 194 | int lsb, int len, enum aarch64_insn_imm_type imm_type) |
257cb251 WD |
195 | { |
196 | u64 imm, imm_mask; | |
197 | s64 sval; | |
02129ae5 | 198 | u32 insn = le32_to_cpu(*place); |
257cb251 WD |
199 | |
200 | /* Calculate the relocation value. */ | |
201 | sval = do_reloc(op, place, val); | |
202 | sval >>= lsb; | |
203 | ||
204 | /* Extract the value bits and shift them to bit 0. */ | |
205 | imm_mask = (BIT(lsb + len) - 1) >> lsb; | |
206 | imm = sval & imm_mask; | |
207 | ||
208 | /* Update the instruction's immediate field. */ | |
c84fced8 | 209 | insn = aarch64_insn_encode_immediate(imm_type, insn, imm); |
02129ae5 | 210 | *place = cpu_to_le32(insn); |
257cb251 WD |
211 | |
212 | /* | |
213 | * Extract the upper value bits (including the sign bit) and | |
214 | * shift them to bit 0. | |
215 | */ | |
216 | sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); | |
217 | ||
218 | /* | |
219 | * Overflow has occurred if the upper bits are not all equal to | |
220 | * the sign bit of the value. | |
221 | */ | |
222 | if ((u64)(sval + 1) >= 2) | |
223 | return -ERANGE; | |
224 | ||
225 | return 0; | |
226 | } | |
227 | ||
c8ebf64e JY |
228 | static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs, |
229 | __le32 *place, u64 val) | |
a257e025 AB |
230 | { |
231 | u32 insn; | |
232 | ||
bdb85cd1 | 233 | if (!is_forbidden_offset_for_adrp(place)) |
a257e025 AB |
234 | return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21, |
235 | AARCH64_INSN_IMM_ADR); | |
236 | ||
237 | /* patch ADRP to ADR if it is in range */ | |
238 | if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21, | |
239 | AARCH64_INSN_IMM_ADR)) { | |
240 | insn = le32_to_cpu(*place); | |
241 | insn &= ~BIT(31); | |
242 | } else { | |
243 | /* out of range for ADR -> emit a veneer */ | |
c8ebf64e | 244 | val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff); |
a257e025 AB |
245 | if (!val) |
246 | return -ENOEXEC; | |
247 | insn = aarch64_insn_gen_branch_imm((u64)place, val, | |
248 | AARCH64_INSN_BRANCH_NOLINK); | |
249 | } | |
250 | ||
251 | *place = cpu_to_le32(insn); | |
252 | return 0; | |
253 | } | |
254 | ||
257cb251 WD |
255 | int apply_relocate_add(Elf64_Shdr *sechdrs, |
256 | const char *strtab, | |
257 | unsigned int symindex, | |
258 | unsigned int relsec, | |
259 | struct module *me) | |
260 | { | |
261 | unsigned int i; | |
262 | int ovf; | |
263 | bool overflow_check; | |
264 | Elf64_Sym *sym; | |
265 | void *loc; | |
266 | u64 val; | |
267 | Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; | |
268 | ||
269 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | |
270 | /* loc corresponds to P in the AArch64 ELF document. */ | |
271 | loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | |
272 | + rel[i].r_offset; | |
273 | ||
274 | /* sym is the ELF symbol we're referring to. */ | |
275 | sym = (Elf64_Sym *)sechdrs[symindex].sh_addr | |
276 | + ELF64_R_SYM(rel[i].r_info); | |
277 | ||
278 | /* val corresponds to (S + A) in the AArch64 ELF document. */ | |
279 | val = sym->st_value + rel[i].r_addend; | |
280 | ||
281 | /* Check for overflow by default. */ | |
282 | overflow_check = true; | |
283 | ||
284 | /* Perform the static relocation. */ | |
285 | switch (ELF64_R_TYPE(rel[i].r_info)) { | |
286 | /* Null relocations. */ | |
287 | case R_ARM_NONE: | |
288 | case R_AARCH64_NONE: | |
289 | ovf = 0; | |
290 | break; | |
291 | ||
292 | /* Data relocations. */ | |
293 | case R_AARCH64_ABS64: | |
294 | overflow_check = false; | |
295 | ovf = reloc_data(RELOC_OP_ABS, loc, val, 64); | |
296 | break; | |
297 | case R_AARCH64_ABS32: | |
298 | ovf = reloc_data(RELOC_OP_ABS, loc, val, 32); | |
299 | break; | |
300 | case R_AARCH64_ABS16: | |
301 | ovf = reloc_data(RELOC_OP_ABS, loc, val, 16); | |
302 | break; | |
303 | case R_AARCH64_PREL64: | |
304 | overflow_check = false; | |
305 | ovf = reloc_data(RELOC_OP_PREL, loc, val, 64); | |
306 | break; | |
307 | case R_AARCH64_PREL32: | |
308 | ovf = reloc_data(RELOC_OP_PREL, loc, val, 32); | |
309 | break; | |
310 | case R_AARCH64_PREL16: | |
311 | ovf = reloc_data(RELOC_OP_PREL, loc, val, 16); | |
312 | break; | |
313 | ||
314 | /* MOVW instruction relocations. */ | |
315 | case R_AARCH64_MOVW_UABS_G0_NC: | |
316 | overflow_check = false; | |
eca92a53 | 317 | /* Fall through */ |
257cb251 WD |
318 | case R_AARCH64_MOVW_UABS_G0: |
319 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, | |
b24a5575 | 320 | AARCH64_INSN_IMM_MOVKZ); |
257cb251 WD |
321 | break; |
322 | case R_AARCH64_MOVW_UABS_G1_NC: | |
323 | overflow_check = false; | |
eca92a53 | 324 | /* Fall through */ |
257cb251 WD |
325 | case R_AARCH64_MOVW_UABS_G1: |
326 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, | |
b24a5575 | 327 | AARCH64_INSN_IMM_MOVKZ); |
257cb251 WD |
328 | break; |
329 | case R_AARCH64_MOVW_UABS_G2_NC: | |
330 | overflow_check = false; | |
eca92a53 | 331 | /* Fall through */ |
257cb251 WD |
332 | case R_AARCH64_MOVW_UABS_G2: |
333 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, | |
b24a5575 | 334 | AARCH64_INSN_IMM_MOVKZ); |
257cb251 WD |
335 | break; |
336 | case R_AARCH64_MOVW_UABS_G3: | |
337 | /* We're using the top bits so we can't overflow. */ | |
338 | overflow_check = false; | |
339 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48, | |
b24a5575 | 340 | AARCH64_INSN_IMM_MOVKZ); |
257cb251 WD |
341 | break; |
342 | case R_AARCH64_MOVW_SABS_G0: | |
343 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, | |
c84fced8 | 344 | AARCH64_INSN_IMM_MOVNZ); |
257cb251 WD |
345 | break; |
346 | case R_AARCH64_MOVW_SABS_G1: | |
347 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, | |
c84fced8 | 348 | AARCH64_INSN_IMM_MOVNZ); |
257cb251 WD |
349 | break; |
350 | case R_AARCH64_MOVW_SABS_G2: | |
351 | ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, | |
c84fced8 | 352 | AARCH64_INSN_IMM_MOVNZ); |
257cb251 WD |
353 | break; |
354 | case R_AARCH64_MOVW_PREL_G0_NC: | |
355 | overflow_check = false; | |
356 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, | |
b24a5575 | 357 | AARCH64_INSN_IMM_MOVKZ); |
257cb251 WD |
358 | break; |
359 | case R_AARCH64_MOVW_PREL_G0: | |
360 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, | |
c84fced8 | 361 | AARCH64_INSN_IMM_MOVNZ); |
257cb251 WD |
362 | break; |
363 | case R_AARCH64_MOVW_PREL_G1_NC: | |
364 | overflow_check = false; | |
365 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, | |
b24a5575 | 366 | AARCH64_INSN_IMM_MOVKZ); |
257cb251 WD |
367 | break; |
368 | case R_AARCH64_MOVW_PREL_G1: | |
369 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, | |
c84fced8 | 370 | AARCH64_INSN_IMM_MOVNZ); |
257cb251 WD |
371 | break; |
372 | case R_AARCH64_MOVW_PREL_G2_NC: | |
373 | overflow_check = false; | |
374 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, | |
b24a5575 | 375 | AARCH64_INSN_IMM_MOVKZ); |
257cb251 WD |
376 | break; |
377 | case R_AARCH64_MOVW_PREL_G2: | |
378 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, | |
c84fced8 | 379 | AARCH64_INSN_IMM_MOVNZ); |
257cb251 WD |
380 | break; |
381 | case R_AARCH64_MOVW_PREL_G3: | |
382 | /* We're using the top bits so we can't overflow. */ | |
383 | overflow_check = false; | |
384 | ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48, | |
c84fced8 | 385 | AARCH64_INSN_IMM_MOVNZ); |
257cb251 WD |
386 | break; |
387 | ||
388 | /* Immediate instruction relocations. */ | |
389 | case R_AARCH64_LD_PREL_LO19: | |
390 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, | |
c84fced8 | 391 | AARCH64_INSN_IMM_19); |
257cb251 WD |
392 | break; |
393 | case R_AARCH64_ADR_PREL_LO21: | |
394 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, | |
c84fced8 | 395 | AARCH64_INSN_IMM_ADR); |
257cb251 WD |
396 | break; |
397 | case R_AARCH64_ADR_PREL_PG_HI21_NC: | |
398 | overflow_check = false; | |
eca92a53 | 399 | /* Fall through */ |
257cb251 | 400 | case R_AARCH64_ADR_PREL_PG_HI21: |
c8ebf64e | 401 | ovf = reloc_insn_adrp(me, sechdrs, loc, val); |
a257e025 AB |
402 | if (ovf && ovf != -ERANGE) |
403 | return ovf; | |
257cb251 WD |
404 | break; |
405 | case R_AARCH64_ADD_ABS_LO12_NC: | |
406 | case R_AARCH64_LDST8_ABS_LO12_NC: | |
407 | overflow_check = false; | |
408 | ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12, | |
c84fced8 | 409 | AARCH64_INSN_IMM_12); |
257cb251 WD |
410 | break; |
411 | case R_AARCH64_LDST16_ABS_LO12_NC: | |
412 | overflow_check = false; | |
413 | ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11, | |
c84fced8 | 414 | AARCH64_INSN_IMM_12); |
257cb251 WD |
415 | break; |
416 | case R_AARCH64_LDST32_ABS_LO12_NC: | |
417 | overflow_check = false; | |
418 | ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10, | |
c84fced8 | 419 | AARCH64_INSN_IMM_12); |
257cb251 WD |
420 | break; |
421 | case R_AARCH64_LDST64_ABS_LO12_NC: | |
422 | overflow_check = false; | |
423 | ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9, | |
c84fced8 | 424 | AARCH64_INSN_IMM_12); |
257cb251 WD |
425 | break; |
426 | case R_AARCH64_LDST128_ABS_LO12_NC: | |
427 | overflow_check = false; | |
428 | ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8, | |
c84fced8 | 429 | AARCH64_INSN_IMM_12); |
257cb251 WD |
430 | break; |
431 | case R_AARCH64_TSTBR14: | |
432 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14, | |
c84fced8 | 433 | AARCH64_INSN_IMM_14); |
257cb251 WD |
434 | break; |
435 | case R_AARCH64_CONDBR19: | |
436 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, | |
c84fced8 | 437 | AARCH64_INSN_IMM_19); |
257cb251 WD |
438 | break; |
439 | case R_AARCH64_JUMP26: | |
440 | case R_AARCH64_CALL26: | |
441 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, | |
c84fced8 | 442 | AARCH64_INSN_IMM_26); |
fd045f6c AB |
443 | |
444 | if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && | |
445 | ovf == -ERANGE) { | |
c8ebf64e | 446 | val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym); |
5e8307b9 AB |
447 | if (!val) |
448 | return -ENOEXEC; | |
fd045f6c AB |
449 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, |
450 | 26, AARCH64_INSN_IMM_26); | |
451 | } | |
257cb251 WD |
452 | break; |
453 | ||
454 | default: | |
455 | pr_err("module %s: unsupported RELA relocation: %llu\n", | |
456 | me->name, ELF64_R_TYPE(rel[i].r_info)); | |
457 | return -ENOEXEC; | |
458 | } | |
459 | ||
460 | if (overflow_check && ovf == -ERANGE) | |
461 | goto overflow; | |
462 | ||
463 | } | |
464 | ||
465 | return 0; | |
466 | ||
467 | overflow: | |
468 | pr_err("module %s: overflow in relocation type %d val %Lx\n", | |
469 | me->name, (int)ELF64_R_TYPE(rel[i].r_info), val); | |
470 | return -ENOEXEC; | |
471 | } | |
932ded4b AP |
472 | |
473 | int module_finalize(const Elf_Ehdr *hdr, | |
474 | const Elf_Shdr *sechdrs, | |
475 | struct module *me) | |
476 | { | |
477 | const Elf_Shdr *s, *se; | |
478 | const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | |
479 | ||
480 | for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { | |
42938868 WD |
481 | if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) |
482 | apply_alternatives_module((void *)s->sh_addr, s->sh_size); | |
e71a4e1b AB |
483 | #ifdef CONFIG_ARM64_MODULE_PLTS |
484 | if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) && | |
485 | !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name)) | |
486 | me->arch.ftrace_trampoline = (void *)s->sh_addr; | |
487 | #endif | |
932ded4b AP |
488 | } |
489 | ||
490 | return 0; | |
491 | } |