Commit | Line | Data |
---|---|---|
b11a64a4 JL |
1 | /* |
2 | * Copyright (C) 2013 Huawei Ltd. | |
3 | * Author: Jiang Liu <liuj97@gmail.com> | |
4 | * | |
617d2fbc ZSL |
5 | * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com> |
6 | * | |
b11a64a4 JL |
7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
5c5bf25d | 19 | #include <linux/bitops.h> |
b11a64a4 JL |
20 | #include <linux/compiler.h> |
21 | #include <linux/kernel.h> | |
ae164807 JL |
22 | #include <linux/smp.h> |
23 | #include <linux/stop_machine.h> | |
24 | #include <linux/uaccess.h> | |
25 | #include <asm/cacheflush.h> | |
b11a64a4 JL |
26 | #include <asm/insn.h> |
27 | ||
617d2fbc | 28 | #define AARCH64_INSN_SF_BIT BIT(31) |
4a89d2c9 | 29 | #define AARCH64_INSN_N_BIT BIT(22) |
617d2fbc | 30 | |
b11a64a4 JL |
31 | static int aarch64_insn_encoding_class[] = { |
32 | AARCH64_INSN_CLS_UNKNOWN, | |
33 | AARCH64_INSN_CLS_UNKNOWN, | |
34 | AARCH64_INSN_CLS_UNKNOWN, | |
35 | AARCH64_INSN_CLS_UNKNOWN, | |
36 | AARCH64_INSN_CLS_LDST, | |
37 | AARCH64_INSN_CLS_DP_REG, | |
38 | AARCH64_INSN_CLS_LDST, | |
39 | AARCH64_INSN_CLS_DP_FPSIMD, | |
40 | AARCH64_INSN_CLS_DP_IMM, | |
41 | AARCH64_INSN_CLS_DP_IMM, | |
42 | AARCH64_INSN_CLS_BR_SYS, | |
43 | AARCH64_INSN_CLS_BR_SYS, | |
44 | AARCH64_INSN_CLS_LDST, | |
45 | AARCH64_INSN_CLS_DP_REG, | |
46 | AARCH64_INSN_CLS_LDST, | |
47 | AARCH64_INSN_CLS_DP_FPSIMD, | |
48 | }; | |
49 | ||
50 | enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn) | |
51 | { | |
52 | return aarch64_insn_encoding_class[(insn >> 25) & 0xf]; | |
53 | } | |
54 | ||
55 | /* NOP is an alias of HINT */ | |
56 | bool __kprobes aarch64_insn_is_nop(u32 insn) | |
57 | { | |
58 | if (!aarch64_insn_is_hint(insn)) | |
59 | return false; | |
60 | ||
61 | switch (insn & 0xFE0) { | |
62 | case AARCH64_INSN_HINT_YIELD: | |
63 | case AARCH64_INSN_HINT_WFE: | |
64 | case AARCH64_INSN_HINT_WFI: | |
65 | case AARCH64_INSN_HINT_SEV: | |
66 | case AARCH64_INSN_HINT_SEVL: | |
67 | return false; | |
68 | default: | |
69 | return true; | |
70 | } | |
71 | } | |
72 | ||
ae164807 JL |
73 | /* |
74 | * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always | |
75 | * little-endian. | |
76 | */ | |
77 | int __kprobes aarch64_insn_read(void *addr, u32 *insnp) | |
78 | { | |
79 | int ret; | |
80 | u32 val; | |
81 | ||
82 | ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE); | |
83 | if (!ret) | |
84 | *insnp = le32_to_cpu(val); | |
85 | ||
86 | return ret; | |
87 | } | |
88 | ||
89 | int __kprobes aarch64_insn_write(void *addr, u32 insn) | |
90 | { | |
91 | insn = cpu_to_le32(insn); | |
92 | return probe_kernel_write(addr, &insn, AARCH64_INSN_SIZE); | |
93 | } | |
94 | ||
b11a64a4 JL |
95 | static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn) |
96 | { | |
97 | if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS) | |
98 | return false; | |
99 | ||
100 | return aarch64_insn_is_b(insn) || | |
101 | aarch64_insn_is_bl(insn) || | |
102 | aarch64_insn_is_svc(insn) || | |
103 | aarch64_insn_is_hvc(insn) || | |
104 | aarch64_insn_is_smc(insn) || | |
105 | aarch64_insn_is_brk(insn) || | |
106 | aarch64_insn_is_nop(insn); | |
107 | } | |
108 | ||
109 | /* | |
110 | * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a | |
111 | * Section B2.6.5 "Concurrent modification and execution of instructions": | |
112 | * Concurrent modification and execution of instructions can lead to the | |
113 | * resulting instruction performing any behavior that can be achieved by | |
114 | * executing any sequence of instructions that can be executed from the | |
115 | * same Exception level, except where the instruction before modification | |
116 | * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC, | |
117 | * or SMC instruction. | |
118 | */ | |
119 | bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn) | |
120 | { | |
121 | return __aarch64_insn_hotpatch_safe(old_insn) && | |
122 | __aarch64_insn_hotpatch_safe(new_insn); | |
123 | } | |
ae164807 JL |
124 | |
125 | int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) | |
126 | { | |
127 | u32 *tp = addr; | |
128 | int ret; | |
129 | ||
130 | /* A64 instructions must be word aligned */ | |
131 | if ((uintptr_t)tp & 0x3) | |
132 | return -EINVAL; | |
133 | ||
134 | ret = aarch64_insn_write(tp, insn); | |
135 | if (ret == 0) | |
136 | flush_icache_range((uintptr_t)tp, | |
137 | (uintptr_t)tp + AARCH64_INSN_SIZE); | |
138 | ||
139 | return ret; | |
140 | } | |
141 | ||
142 | struct aarch64_insn_patch { | |
143 | void **text_addrs; | |
144 | u32 *new_insns; | |
145 | int insn_cnt; | |
146 | atomic_t cpu_count; | |
147 | }; | |
148 | ||
149 | static int __kprobes aarch64_insn_patch_text_cb(void *arg) | |
150 | { | |
151 | int i, ret = 0; | |
152 | struct aarch64_insn_patch *pp = arg; | |
153 | ||
154 | /* The first CPU becomes master */ | |
155 | if (atomic_inc_return(&pp->cpu_count) == 1) { | |
156 | for (i = 0; ret == 0 && i < pp->insn_cnt; i++) | |
157 | ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], | |
158 | pp->new_insns[i]); | |
159 | /* | |
160 | * aarch64_insn_patch_text_nosync() calls flush_icache_range(), | |
161 | * which ends with "dsb; isb" pair guaranteeing global | |
162 | * visibility. | |
163 | */ | |
164 | atomic_set(&pp->cpu_count, -1); | |
165 | } else { | |
166 | while (atomic_read(&pp->cpu_count) != -1) | |
167 | cpu_relax(); | |
168 | isb(); | |
169 | } | |
170 | ||
171 | return ret; | |
172 | } | |
173 | ||
174 | int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt) | |
175 | { | |
176 | struct aarch64_insn_patch patch = { | |
177 | .text_addrs = addrs, | |
178 | .new_insns = insns, | |
179 | .insn_cnt = cnt, | |
180 | .cpu_count = ATOMIC_INIT(0), | |
181 | }; | |
182 | ||
183 | if (cnt <= 0) | |
184 | return -EINVAL; | |
185 | ||
186 | return stop_machine(aarch64_insn_patch_text_cb, &patch, | |
187 | cpu_online_mask); | |
188 | } | |
189 | ||
190 | int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) | |
191 | { | |
192 | int ret; | |
193 | u32 insn; | |
194 | ||
195 | /* Unsafe to patch multiple instructions without synchronizaiton */ | |
196 | if (cnt == 1) { | |
197 | ret = aarch64_insn_read(addrs[0], &insn); | |
198 | if (ret) | |
199 | return ret; | |
200 | ||
201 | if (aarch64_insn_hotpatch_safe(insn, insns[0])) { | |
202 | /* | |
203 | * ARMv8 architecture doesn't guarantee all CPUs see | |
204 | * the new instruction after returning from function | |
205 | * aarch64_insn_patch_text_nosync(). So send IPIs to | |
206 | * all other CPUs to achieve instruction | |
207 | * synchronization. | |
208 | */ | |
209 | ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]); | |
210 | kick_all_cpus_sync(); | |
211 | return ret; | |
212 | } | |
213 | } | |
214 | ||
215 | return aarch64_insn_patch_text_sync(addrs, insns, cnt); | |
216 | } | |
c84fced8 JL |
217 | |
218 | u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, | |
219 | u32 insn, u64 imm) | |
220 | { | |
221 | u32 immlo, immhi, lomask, himask, mask; | |
222 | int shift; | |
223 | ||
224 | switch (type) { | |
225 | case AARCH64_INSN_IMM_ADR: | |
226 | lomask = 0x3; | |
227 | himask = 0x7ffff; | |
228 | immlo = imm & lomask; | |
229 | imm >>= 2; | |
230 | immhi = imm & himask; | |
231 | imm = (immlo << 24) | (immhi); | |
232 | mask = (lomask << 24) | (himask); | |
233 | shift = 5; | |
234 | break; | |
235 | case AARCH64_INSN_IMM_26: | |
236 | mask = BIT(26) - 1; | |
237 | shift = 0; | |
238 | break; | |
239 | case AARCH64_INSN_IMM_19: | |
240 | mask = BIT(19) - 1; | |
241 | shift = 5; | |
242 | break; | |
243 | case AARCH64_INSN_IMM_16: | |
244 | mask = BIT(16) - 1; | |
245 | shift = 5; | |
246 | break; | |
247 | case AARCH64_INSN_IMM_14: | |
248 | mask = BIT(14) - 1; | |
249 | shift = 5; | |
250 | break; | |
251 | case AARCH64_INSN_IMM_12: | |
252 | mask = BIT(12) - 1; | |
253 | shift = 10; | |
254 | break; | |
255 | case AARCH64_INSN_IMM_9: | |
256 | mask = BIT(9) - 1; | |
257 | shift = 12; | |
258 | break; | |
1bba567d ZSL |
259 | case AARCH64_INSN_IMM_7: |
260 | mask = BIT(7) - 1; | |
261 | shift = 15; | |
262 | break; | |
4a89d2c9 ZSL |
263 | case AARCH64_INSN_IMM_S: |
264 | mask = BIT(6) - 1; | |
265 | shift = 10; | |
266 | break; | |
267 | case AARCH64_INSN_IMM_R: | |
268 | mask = BIT(6) - 1; | |
269 | shift = 16; | |
270 | break; | |
c84fced8 JL |
271 | default: |
272 | pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n", | |
273 | type); | |
274 | return 0; | |
275 | } | |
276 | ||
277 | /* Update the immediate field. */ | |
278 | insn &= ~(mask << shift); | |
279 | insn |= (imm & mask) << shift; | |
280 | ||
281 | return insn; | |
282 | } | |
5c5bf25d | 283 | |
617d2fbc ZSL |
284 | static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type, |
285 | u32 insn, | |
286 | enum aarch64_insn_register reg) | |
287 | { | |
288 | int shift; | |
289 | ||
290 | if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) { | |
291 | pr_err("%s: unknown register encoding %d\n", __func__, reg); | |
292 | return 0; | |
293 | } | |
294 | ||
295 | switch (type) { | |
296 | case AARCH64_INSN_REGTYPE_RT: | |
9951a157 | 297 | case AARCH64_INSN_REGTYPE_RD: |
617d2fbc ZSL |
298 | shift = 0; |
299 | break; | |
c0cafbae ZSL |
300 | case AARCH64_INSN_REGTYPE_RN: |
301 | shift = 5; | |
302 | break; | |
1bba567d ZSL |
303 | case AARCH64_INSN_REGTYPE_RT2: |
304 | shift = 10; | |
305 | break; | |
17cac179 ZSL |
306 | case AARCH64_INSN_REGTYPE_RM: |
307 | shift = 16; | |
308 | break; | |
617d2fbc ZSL |
309 | default: |
310 | pr_err("%s: unknown register type encoding %d\n", __func__, | |
311 | type); | |
312 | return 0; | |
313 | } | |
314 | ||
315 | insn &= ~(GENMASK(4, 0) << shift); | |
316 | insn |= reg << shift; | |
317 | ||
318 | return insn; | |
319 | } | |
320 | ||
17cac179 ZSL |
321 | static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type, |
322 | u32 insn) | |
323 | { | |
324 | u32 size; | |
325 | ||
326 | switch (type) { | |
327 | case AARCH64_INSN_SIZE_8: | |
328 | size = 0; | |
329 | break; | |
330 | case AARCH64_INSN_SIZE_16: | |
331 | size = 1; | |
332 | break; | |
333 | case AARCH64_INSN_SIZE_32: | |
334 | size = 2; | |
335 | break; | |
336 | case AARCH64_INSN_SIZE_64: | |
337 | size = 3; | |
338 | break; | |
339 | default: | |
340 | pr_err("%s: unknown size encoding %d\n", __func__, type); | |
341 | return 0; | |
342 | } | |
343 | ||
344 | insn &= ~GENMASK(31, 30); | |
345 | insn |= size << 30; | |
346 | ||
347 | return insn; | |
348 | } | |
349 | ||
617d2fbc ZSL |
350 | static inline long branch_imm_common(unsigned long pc, unsigned long addr, |
351 | long range) | |
5c5bf25d | 352 | { |
5c5bf25d JL |
353 | long offset; |
354 | ||
355 | /* | |
356 | * PC: A 64-bit Program Counter holding the address of the current | |
357 | * instruction. A64 instructions must be word-aligned. | |
358 | */ | |
359 | BUG_ON((pc & 0x3) || (addr & 0x3)); | |
360 | ||
617d2fbc ZSL |
361 | offset = ((long)addr - (long)pc); |
362 | BUG_ON(offset < -range || offset >= range); | |
363 | ||
364 | return offset; | |
365 | } | |
366 | ||
367 | u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, | |
368 | enum aarch64_insn_branch_type type) | |
369 | { | |
370 | u32 insn; | |
371 | long offset; | |
372 | ||
5c5bf25d JL |
373 | /* |
374 | * B/BL support [-128M, 128M) offset | |
375 | * ARM64 virtual address arrangement guarantees all kernel and module | |
376 | * texts are within +/-128M. | |
377 | */ | |
617d2fbc | 378 | offset = branch_imm_common(pc, addr, SZ_128M); |
5c5bf25d | 379 | |
c0cafbae ZSL |
380 | switch (type) { |
381 | case AARCH64_INSN_BRANCH_LINK: | |
5c5bf25d | 382 | insn = aarch64_insn_get_bl_value(); |
c0cafbae ZSL |
383 | break; |
384 | case AARCH64_INSN_BRANCH_NOLINK: | |
5c5bf25d | 385 | insn = aarch64_insn_get_b_value(); |
c0cafbae ZSL |
386 | break; |
387 | default: | |
388 | BUG_ON(1); | |
389 | } | |
5c5bf25d JL |
390 | |
391 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn, | |
392 | offset >> 2); | |
393 | } | |
394 | ||
617d2fbc ZSL |
395 | u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr, |
396 | enum aarch64_insn_register reg, | |
397 | enum aarch64_insn_variant variant, | |
398 | enum aarch64_insn_branch_type type) | |
399 | { | |
400 | u32 insn; | |
401 | long offset; | |
402 | ||
403 | offset = branch_imm_common(pc, addr, SZ_1M); | |
404 | ||
405 | switch (type) { | |
406 | case AARCH64_INSN_BRANCH_COMP_ZERO: | |
407 | insn = aarch64_insn_get_cbz_value(); | |
408 | break; | |
409 | case AARCH64_INSN_BRANCH_COMP_NONZERO: | |
410 | insn = aarch64_insn_get_cbnz_value(); | |
411 | break; | |
412 | default: | |
413 | BUG_ON(1); | |
414 | } | |
415 | ||
416 | switch (variant) { | |
417 | case AARCH64_INSN_VARIANT_32BIT: | |
418 | break; | |
419 | case AARCH64_INSN_VARIANT_64BIT: | |
420 | insn |= AARCH64_INSN_SF_BIT; | |
421 | break; | |
422 | default: | |
423 | BUG_ON(1); | |
424 | } | |
425 | ||
426 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); | |
427 | ||
428 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn, | |
429 | offset >> 2); | |
430 | } | |
431 | ||
345e0d35 ZSL |
432 | u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr, |
433 | enum aarch64_insn_condition cond) | |
434 | { | |
435 | u32 insn; | |
436 | long offset; | |
437 | ||
438 | offset = branch_imm_common(pc, addr, SZ_1M); | |
439 | ||
440 | insn = aarch64_insn_get_bcond_value(); | |
441 | ||
442 | BUG_ON(cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL); | |
443 | insn |= cond; | |
444 | ||
445 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn, | |
446 | offset >> 2); | |
447 | } | |
448 | ||
5c5bf25d JL |
449 | u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op) |
450 | { | |
451 | return aarch64_insn_get_hint_value() | op; | |
452 | } | |
453 | ||
454 | u32 __kprobes aarch64_insn_gen_nop(void) | |
455 | { | |
456 | return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP); | |
457 | } | |
c0cafbae ZSL |
458 | |
459 | u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg, | |
460 | enum aarch64_insn_branch_type type) | |
461 | { | |
462 | u32 insn; | |
463 | ||
464 | switch (type) { | |
465 | case AARCH64_INSN_BRANCH_NOLINK: | |
466 | insn = aarch64_insn_get_br_value(); | |
467 | break; | |
468 | case AARCH64_INSN_BRANCH_LINK: | |
469 | insn = aarch64_insn_get_blr_value(); | |
470 | break; | |
471 | case AARCH64_INSN_BRANCH_RETURN: | |
472 | insn = aarch64_insn_get_ret_value(); | |
473 | break; | |
474 | default: | |
475 | BUG_ON(1); | |
476 | } | |
477 | ||
478 | return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg); | |
479 | } | |
17cac179 ZSL |
480 | |
481 | u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg, | |
482 | enum aarch64_insn_register base, | |
483 | enum aarch64_insn_register offset, | |
484 | enum aarch64_insn_size_type size, | |
485 | enum aarch64_insn_ldst_type type) | |
486 | { | |
487 | u32 insn; | |
488 | ||
489 | switch (type) { | |
490 | case AARCH64_INSN_LDST_LOAD_REG_OFFSET: | |
491 | insn = aarch64_insn_get_ldr_reg_value(); | |
492 | break; | |
493 | case AARCH64_INSN_LDST_STORE_REG_OFFSET: | |
494 | insn = aarch64_insn_get_str_reg_value(); | |
495 | break; | |
496 | default: | |
497 | BUG_ON(1); | |
498 | } | |
499 | ||
500 | insn = aarch64_insn_encode_ldst_size(size, insn); | |
501 | ||
502 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg); | |
503 | ||
504 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, | |
505 | base); | |
506 | ||
507 | return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, | |
508 | offset); | |
509 | } | |
1bba567d ZSL |
510 | |
511 | u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1, | |
512 | enum aarch64_insn_register reg2, | |
513 | enum aarch64_insn_register base, | |
514 | int offset, | |
515 | enum aarch64_insn_variant variant, | |
516 | enum aarch64_insn_ldst_type type) | |
517 | { | |
518 | u32 insn; | |
519 | int shift; | |
520 | ||
521 | switch (type) { | |
522 | case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX: | |
523 | insn = aarch64_insn_get_ldp_pre_value(); | |
524 | break; | |
525 | case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX: | |
526 | insn = aarch64_insn_get_stp_pre_value(); | |
527 | break; | |
528 | case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX: | |
529 | insn = aarch64_insn_get_ldp_post_value(); | |
530 | break; | |
531 | case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX: | |
532 | insn = aarch64_insn_get_stp_post_value(); | |
533 | break; | |
534 | default: | |
535 | BUG_ON(1); | |
536 | } | |
537 | ||
538 | switch (variant) { | |
539 | case AARCH64_INSN_VARIANT_32BIT: | |
540 | /* offset must be multiples of 4 in the range [-256, 252] */ | |
541 | BUG_ON(offset & 0x3); | |
542 | BUG_ON(offset < -256 || offset > 252); | |
543 | shift = 2; | |
544 | break; | |
545 | case AARCH64_INSN_VARIANT_64BIT: | |
546 | /* offset must be multiples of 8 in the range [-512, 504] */ | |
547 | BUG_ON(offset & 0x7); | |
548 | BUG_ON(offset < -512 || offset > 504); | |
549 | shift = 3; | |
550 | insn |= AARCH64_INSN_SF_BIT; | |
551 | break; | |
552 | default: | |
553 | BUG_ON(1); | |
554 | } | |
555 | ||
556 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, | |
557 | reg1); | |
558 | ||
559 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn, | |
560 | reg2); | |
561 | ||
562 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, | |
563 | base); | |
564 | ||
565 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn, | |
566 | offset >> shift); | |
567 | } | |
9951a157 ZSL |
568 | |
569 | u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst, | |
570 | enum aarch64_insn_register src, | |
571 | int imm, enum aarch64_insn_variant variant, | |
572 | enum aarch64_insn_adsb_type type) | |
573 | { | |
574 | u32 insn; | |
575 | ||
576 | switch (type) { | |
577 | case AARCH64_INSN_ADSB_ADD: | |
578 | insn = aarch64_insn_get_add_imm_value(); | |
579 | break; | |
580 | case AARCH64_INSN_ADSB_SUB: | |
581 | insn = aarch64_insn_get_sub_imm_value(); | |
582 | break; | |
583 | case AARCH64_INSN_ADSB_ADD_SETFLAGS: | |
584 | insn = aarch64_insn_get_adds_imm_value(); | |
585 | break; | |
586 | case AARCH64_INSN_ADSB_SUB_SETFLAGS: | |
587 | insn = aarch64_insn_get_subs_imm_value(); | |
588 | break; | |
589 | default: | |
590 | BUG_ON(1); | |
591 | } | |
592 | ||
593 | switch (variant) { | |
594 | case AARCH64_INSN_VARIANT_32BIT: | |
595 | break; | |
596 | case AARCH64_INSN_VARIANT_64BIT: | |
597 | insn |= AARCH64_INSN_SF_BIT; | |
598 | break; | |
599 | default: | |
600 | BUG_ON(1); | |
601 | } | |
602 | ||
603 | BUG_ON(imm & ~(SZ_4K - 1)); | |
604 | ||
605 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); | |
606 | ||
607 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); | |
608 | ||
609 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm); | |
610 | } | |
4a89d2c9 ZSL |
611 | |
612 | u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst, | |
613 | enum aarch64_insn_register src, | |
614 | int immr, int imms, | |
615 | enum aarch64_insn_variant variant, | |
616 | enum aarch64_insn_bitfield_type type) | |
617 | { | |
618 | u32 insn; | |
619 | u32 mask; | |
620 | ||
621 | switch (type) { | |
622 | case AARCH64_INSN_BITFIELD_MOVE: | |
623 | insn = aarch64_insn_get_bfm_value(); | |
624 | break; | |
625 | case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED: | |
626 | insn = aarch64_insn_get_ubfm_value(); | |
627 | break; | |
628 | case AARCH64_INSN_BITFIELD_MOVE_SIGNED: | |
629 | insn = aarch64_insn_get_sbfm_value(); | |
630 | break; | |
631 | default: | |
632 | BUG_ON(1); | |
633 | } | |
634 | ||
635 | switch (variant) { | |
636 | case AARCH64_INSN_VARIANT_32BIT: | |
637 | mask = GENMASK(4, 0); | |
638 | break; | |
639 | case AARCH64_INSN_VARIANT_64BIT: | |
640 | insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT; | |
641 | mask = GENMASK(5, 0); | |
642 | break; | |
643 | default: | |
644 | BUG_ON(1); | |
645 | } | |
646 | ||
647 | BUG_ON(immr & ~mask); | |
648 | BUG_ON(imms & ~mask); | |
649 | ||
650 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); | |
651 | ||
652 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); | |
653 | ||
654 | insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr); | |
655 | ||
656 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms); | |
657 | } | |
6098f2d5 ZSL |
658 | |
659 | u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, | |
660 | int imm, int shift, | |
661 | enum aarch64_insn_variant variant, | |
662 | enum aarch64_insn_movewide_type type) | |
663 | { | |
664 | u32 insn; | |
665 | ||
666 | switch (type) { | |
667 | case AARCH64_INSN_MOVEWIDE_ZERO: | |
668 | insn = aarch64_insn_get_movz_value(); | |
669 | break; | |
670 | case AARCH64_INSN_MOVEWIDE_KEEP: | |
671 | insn = aarch64_insn_get_movk_value(); | |
672 | break; | |
673 | case AARCH64_INSN_MOVEWIDE_INVERSE: | |
674 | insn = aarch64_insn_get_movn_value(); | |
675 | break; | |
676 | default: | |
677 | BUG_ON(1); | |
678 | } | |
679 | ||
680 | BUG_ON(imm & ~(SZ_64K - 1)); | |
681 | ||
682 | switch (variant) { | |
683 | case AARCH64_INSN_VARIANT_32BIT: | |
684 | BUG_ON(shift != 0 && shift != 16); | |
685 | break; | |
686 | case AARCH64_INSN_VARIANT_64BIT: | |
687 | insn |= AARCH64_INSN_SF_BIT; | |
688 | BUG_ON(shift != 0 && shift != 16 && shift != 32 && | |
689 | shift != 48); | |
690 | break; | |
691 | default: | |
692 | BUG_ON(1); | |
693 | } | |
694 | ||
695 | insn |= (shift >> 4) << 21; | |
696 | ||
697 | insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); | |
698 | ||
699 | return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); | |
700 | } |