Linux 5.13-rc3
[linux-block.git] / arch / arm64 / kernel / insn.c
CommitLineData
caab277b 1// SPDX-License-Identifier: GPL-2.0-only
b11a64a4
JL
2/*
3 * Copyright (C) 2013 Huawei Ltd.
4 * Author: Jiang Liu <liuj97@gmail.com>
5 *
c94ae4f7 6 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
b11a64a4 7 */
5c5bf25d 8#include <linux/bitops.h>
2f896d58 9#include <linux/bug.h>
b11a64a4
JL
10#include <linux/compiler.h>
11#include <linux/kernel.h>
2f896d58 12#include <linux/mm.h>
ae164807 13#include <linux/smp.h>
2f896d58 14#include <linux/spinlock.h>
ae164807 15#include <linux/stop_machine.h>
2f896d58 16#include <linux/types.h>
ae164807 17#include <linux/uaccess.h>
a9ae04c9 18
ae164807 19#include <asm/cacheflush.h>
a9ae04c9 20#include <asm/debug-monitors.h>
2f896d58 21#include <asm/fixmap.h>
b11a64a4 22#include <asm/insn.h>
7d134b2c 23#include <asm/kprobes.h>
ca2ef4ff 24#include <asm/sections.h>
b11a64a4 25
617d2fbc 26#define AARCH64_INSN_SF_BIT BIT(31)
4a89d2c9 27#define AARCH64_INSN_N_BIT BIT(22)
11d76407 28#define AARCH64_INSN_LSL_12 BIT(22)
617d2fbc 29
0da23df2 30static const int aarch64_insn_encoding_class[] = {
b11a64a4
JL
31 AARCH64_INSN_CLS_UNKNOWN,
32 AARCH64_INSN_CLS_UNKNOWN,
33 AARCH64_INSN_CLS_UNKNOWN,
34 AARCH64_INSN_CLS_UNKNOWN,
35 AARCH64_INSN_CLS_LDST,
36 AARCH64_INSN_CLS_DP_REG,
37 AARCH64_INSN_CLS_LDST,
38 AARCH64_INSN_CLS_DP_FPSIMD,
39 AARCH64_INSN_CLS_DP_IMM,
40 AARCH64_INSN_CLS_DP_IMM,
41 AARCH64_INSN_CLS_BR_SYS,
42 AARCH64_INSN_CLS_BR_SYS,
43 AARCH64_INSN_CLS_LDST,
44 AARCH64_INSN_CLS_DP_REG,
45 AARCH64_INSN_CLS_LDST,
46 AARCH64_INSN_CLS_DP_FPSIMD,
47};
48
49enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
50{
51 return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
52}
53
07dcd967 54bool __kprobes aarch64_insn_is_steppable_hint(u32 insn)
b11a64a4
JL
55{
56 if (!aarch64_insn_is_hint(insn))
57 return false;
58
59 switch (insn & 0xFE0) {
47d67e4d
MB
60 case AARCH64_INSN_HINT_XPACLRI:
61 case AARCH64_INSN_HINT_PACIA_1716:
62 case AARCH64_INSN_HINT_PACIB_1716:
47d67e4d
MB
63 case AARCH64_INSN_HINT_PACIAZ:
64 case AARCH64_INSN_HINT_PACIASP:
65 case AARCH64_INSN_HINT_PACIBZ:
66 case AARCH64_INSN_HINT_PACIBSP:
47d67e4d
MB
67 case AARCH64_INSN_HINT_BTI:
68 case AARCH64_INSN_HINT_BTIC:
69 case AARCH64_INSN_HINT_BTIJ:
70 case AARCH64_INSN_HINT_BTIJC:
c71052cc 71 case AARCH64_INSN_HINT_NOP:
b11a64a4 72 return true;
c71052cc
MB
73 default:
74 return false;
b11a64a4
JL
75 }
76}
77
10b48f7e
MZ
78bool aarch64_insn_is_branch_imm(u32 insn)
79{
80 return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
81 aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
82 aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
83 aarch64_insn_is_bcond(insn));
84}
85
abffa6f3 86static DEFINE_RAW_SPINLOCK(patch_lock);
2f896d58 87
ca2ef4ff
MR
88static bool is_exit_text(unsigned long addr)
89{
90 /* discarded with init text/data */
91 return system_state < SYSTEM_RUNNING &&
92 addr >= (unsigned long)__exittext_begin &&
93 addr < (unsigned long)__exittext_end;
94}
95
96static bool is_image_text(unsigned long addr)
97{
98 return core_kernel_text(addr) || is_exit_text(addr);
99}
100
2f896d58
LA
101static void __kprobes *patch_map(void *addr, int fixmap)
102{
103 unsigned long uintaddr = (uintptr_t) addr;
ca2ef4ff 104 bool image = is_image_text(uintaddr);
2f896d58
LA
105 struct page *page;
106
ca2ef4ff 107 if (image)
2077be67 108 page = phys_to_page(__pa_symbol(addr));
ca2ef4ff
MR
109 else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
110 page = vmalloc_to_page(addr);
f6242cac
MZ
111 else
112 return addr;
2f896d58
LA
113
114 BUG_ON(!page);
51650dc2 115 return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
116 (uintaddr & ~PAGE_MASK));
2f896d58
LA
117}
118
119static void __kprobes patch_unmap(int fixmap)
120{
121 clear_fixmap(fixmap);
122}
ae164807
JL
123/*
124 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
125 * little-endian.
126 */
127int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
128{
129 int ret;
65de1421 130 __le32 val;
ae164807 131
fe557319 132 ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
ae164807
JL
133 if (!ret)
134 *insnp = le32_to_cpu(val);
135
136 return ret;
137}
138
57c13835 139static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
2f896d58
LA
140{
141 void *waddr = addr;
142 unsigned long flags = 0;
143 int ret;
144
abffa6f3 145 raw_spin_lock_irqsave(&patch_lock, flags);
2f896d58
LA
146 waddr = patch_map(addr, FIX_TEXT_POKE0);
147
fe557319 148 ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
2f896d58
LA
149
150 patch_unmap(FIX_TEXT_POKE0);
abffa6f3 151 raw_spin_unlock_irqrestore(&patch_lock, flags);
2f896d58
LA
152
153 return ret;
154}
155
ae164807
JL
156int __kprobes aarch64_insn_write(void *addr, u32 insn)
157{
57c13835 158 return __aarch64_insn_write(addr, cpu_to_le32(insn));
ae164807
JL
159}
160
d59bee88
DL
161bool __kprobes aarch64_insn_uses_literal(u32 insn)
162{
163 /* ldr/ldrsw (literal), prfm */
164
165 return aarch64_insn_is_ldr_lit(insn) ||
166 aarch64_insn_is_ldrsw_lit(insn) ||
167 aarch64_insn_is_adr_adrp(insn) ||
168 aarch64_insn_is_prfm_lit(insn);
169}
170
171bool __kprobes aarch64_insn_is_branch(u32 insn)
172{
93396936 173 /* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */
d59bee88
DL
174
175 return aarch64_insn_is_b(insn) ||
176 aarch64_insn_is_bl(insn) ||
177 aarch64_insn_is_cbz(insn) ||
178 aarch64_insn_is_cbnz(insn) ||
179 aarch64_insn_is_tbz(insn) ||
180 aarch64_insn_is_tbnz(insn) ||
181 aarch64_insn_is_ret(insn) ||
93396936 182 aarch64_insn_is_ret_auth(insn) ||
d59bee88 183 aarch64_insn_is_br(insn) ||
93396936 184 aarch64_insn_is_br_auth(insn) ||
d59bee88 185 aarch64_insn_is_blr(insn) ||
93396936 186 aarch64_insn_is_blr_auth(insn) ||
d59bee88
DL
187 aarch64_insn_is_bcond(insn);
188}
189
ae164807
JL
190int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
191{
192 u32 *tp = addr;
193 int ret;
194
195 /* A64 instructions must be word aligned */
196 if ((uintptr_t)tp & 0x3)
197 return -EINVAL;
198
199 ret = aarch64_insn_write(tp, insn);
200 if (ret == 0)
3b8c9f1c
WD
201 __flush_icache_range((uintptr_t)tp,
202 (uintptr_t)tp + AARCH64_INSN_SIZE);
ae164807
JL
203
204 return ret;
205}
206
207struct aarch64_insn_patch {
208 void **text_addrs;
209 u32 *new_insns;
210 int insn_cnt;
211 atomic_t cpu_count;
212};
213
214static int __kprobes aarch64_insn_patch_text_cb(void *arg)
215{
216 int i, ret = 0;
217 struct aarch64_insn_patch *pp = arg;
218
219 /* The first CPU becomes master */
220 if (atomic_inc_return(&pp->cpu_count) == 1) {
221 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
222 ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
223 pp->new_insns[i]);
899d5933
WC
224 /* Notify other processors with an additional increment. */
225 atomic_inc(&pp->cpu_count);
ae164807 226 } else {
899d5933 227 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
ae164807
JL
228 cpu_relax();
229 isb();
230 }
231
232 return ret;
233}
234
693350a7 235int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
ae164807
JL
236{
237 struct aarch64_insn_patch patch = {
238 .text_addrs = addrs,
239 .new_insns = insns,
240 .insn_cnt = cnt,
241 .cpu_count = ATOMIC_INIT(0),
242 };
243
244 if (cnt <= 0)
245 return -EINVAL;
246
c23a4656
TG
247 return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
248 cpu_online_mask);
ae164807
JL
249}
250
0978fb25
MZ
251static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
252 u32 *maskp, int *shiftp)
c84fced8 253{
0978fb25 254 u32 mask;
c84fced8
JL
255 int shift;
256
257 switch (type) {
c84fced8
JL
258 case AARCH64_INSN_IMM_26:
259 mask = BIT(26) - 1;
260 shift = 0;
261 break;
262 case AARCH64_INSN_IMM_19:
263 mask = BIT(19) - 1;
264 shift = 5;
265 break;
266 case AARCH64_INSN_IMM_16:
267 mask = BIT(16) - 1;
268 shift = 5;
269 break;
270 case AARCH64_INSN_IMM_14:
271 mask = BIT(14) - 1;
272 shift = 5;
273 break;
274 case AARCH64_INSN_IMM_12:
275 mask = BIT(12) - 1;
276 shift = 10;
277 break;
278 case AARCH64_INSN_IMM_9:
279 mask = BIT(9) - 1;
280 shift = 12;
281 break;
1bba567d
ZSL
282 case AARCH64_INSN_IMM_7:
283 mask = BIT(7) - 1;
284 shift = 15;
285 break;
5fdc639a 286 case AARCH64_INSN_IMM_6:
4a89d2c9
ZSL
287 case AARCH64_INSN_IMM_S:
288 mask = BIT(6) - 1;
289 shift = 10;
290 break;
291 case AARCH64_INSN_IMM_R:
292 mask = BIT(6) - 1;
293 shift = 16;
294 break;
a264bf34
MZ
295 case AARCH64_INSN_IMM_N:
296 mask = 1;
297 shift = 22;
298 break;
c84fced8 299 default:
0978fb25
MZ
300 return -EINVAL;
301 }
302
303 *maskp = mask;
304 *shiftp = shift;
305
306 return 0;
307}
308
309#define ADR_IMM_HILOSPLIT 2
310#define ADR_IMM_SIZE SZ_2M
311#define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1)
312#define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
313#define ADR_IMM_LOSHIFT 29
314#define ADR_IMM_HISHIFT 5
315
316u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
317{
318 u32 immlo, immhi, mask;
319 int shift;
320
321 switch (type) {
322 case AARCH64_INSN_IMM_ADR:
323 shift = 0;
324 immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
325 immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
326 insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
327 mask = ADR_IMM_SIZE - 1;
328 break;
329 default:
330 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
331 pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
332 type);
333 return 0;
334 }
335 }
336
337 return (insn >> shift) & mask;
338}
339
340u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
341 u32 insn, u64 imm)
342{
343 u32 immlo, immhi, mask;
344 int shift;
345
c94ae4f7
ZSL
346 if (insn == AARCH64_BREAK_FAULT)
347 return AARCH64_BREAK_FAULT;
348
0978fb25
MZ
349 switch (type) {
350 case AARCH64_INSN_IMM_ADR:
351 shift = 0;
352 immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
353 imm >>= ADR_IMM_HILOSPLIT;
354 immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
355 imm = immlo | immhi;
356 mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
357 (ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
358 break;
359 default:
360 if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
361 pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
362 type);
c94ae4f7 363 return AARCH64_BREAK_FAULT;
0978fb25 364 }
c84fced8
JL
365 }
366
367 /* Update the immediate field. */
368 insn &= ~(mask << shift);
369 insn |= (imm & mask) << shift;
370
371 return insn;
372}
5c5bf25d 373
8c2dcbd2
SP
374u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
375 u32 insn)
376{
377 int shift;
378
379 switch (type) {
380 case AARCH64_INSN_REGTYPE_RT:
381 case AARCH64_INSN_REGTYPE_RD:
382 shift = 0;
383 break;
384 case AARCH64_INSN_REGTYPE_RN:
385 shift = 5;
386 break;
387 case AARCH64_INSN_REGTYPE_RT2:
388 case AARCH64_INSN_REGTYPE_RA:
389 shift = 10;
390 break;
391 case AARCH64_INSN_REGTYPE_RM:
392 shift = 16;
393 break;
394 default:
395 pr_err("%s: unknown register type encoding %d\n", __func__,
396 type);
397 return 0;
398 }
399
400 return (insn >> shift) & GENMASK(4, 0);
401}
402
617d2fbc
ZSL
403static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
404 u32 insn,
405 enum aarch64_insn_register reg)
406{
407 int shift;
408
c94ae4f7
ZSL
409 if (insn == AARCH64_BREAK_FAULT)
410 return AARCH64_BREAK_FAULT;
411
617d2fbc
ZSL
412 if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
413 pr_err("%s: unknown register encoding %d\n", __func__, reg);
c94ae4f7 414 return AARCH64_BREAK_FAULT;
617d2fbc
ZSL
415 }
416
417 switch (type) {
418 case AARCH64_INSN_REGTYPE_RT:
9951a157 419 case AARCH64_INSN_REGTYPE_RD:
617d2fbc
ZSL
420 shift = 0;
421 break;
c0cafbae
ZSL
422 case AARCH64_INSN_REGTYPE_RN:
423 shift = 5;
424 break;
1bba567d 425 case AARCH64_INSN_REGTYPE_RT2:
27f95ba5 426 case AARCH64_INSN_REGTYPE_RA:
1bba567d
ZSL
427 shift = 10;
428 break;
17cac179 429 case AARCH64_INSN_REGTYPE_RM:
85f68fe8 430 case AARCH64_INSN_REGTYPE_RS:
17cac179
ZSL
431 shift = 16;
432 break;
617d2fbc
ZSL
433 default:
434 pr_err("%s: unknown register type encoding %d\n", __func__,
435 type);
c94ae4f7 436 return AARCH64_BREAK_FAULT;
617d2fbc
ZSL
437 }
438
439 insn &= ~(GENMASK(4, 0) << shift);
440 insn |= reg << shift;
441
442 return insn;
443}
444
17cac179
ZSL
445static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
446 u32 insn)
447{
448 u32 size;
449
450 switch (type) {
451 case AARCH64_INSN_SIZE_8:
452 size = 0;
453 break;
454 case AARCH64_INSN_SIZE_16:
455 size = 1;
456 break;
457 case AARCH64_INSN_SIZE_32:
458 size = 2;
459 break;
460 case AARCH64_INSN_SIZE_64:
461 size = 3;
462 break;
463 default:
464 pr_err("%s: unknown size encoding %d\n", __func__, type);
c94ae4f7 465 return AARCH64_BREAK_FAULT;
17cac179
ZSL
466 }
467
468 insn &= ~GENMASK(31, 30);
469 insn |= size << 30;
470
471 return insn;
472}
473
617d2fbc
ZSL
474static inline long branch_imm_common(unsigned long pc, unsigned long addr,
475 long range)
5c5bf25d 476{
5c5bf25d
JL
477 long offset;
478
c94ae4f7
ZSL
479 if ((pc & 0x3) || (addr & 0x3)) {
480 pr_err("%s: A64 instructions must be word aligned\n", __func__);
481 return range;
482 }
5c5bf25d 483
617d2fbc 484 offset = ((long)addr - (long)pc);
c94ae4f7
ZSL
485
486 if (offset < -range || offset >= range) {
487 pr_err("%s: offset out of range\n", __func__);
488 return range;
489 }
617d2fbc
ZSL
490
491 return offset;
492}
493
494u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
495 enum aarch64_insn_branch_type type)
496{
497 u32 insn;
498 long offset;
499
5c5bf25d
JL
500 /*
501 * B/BL support [-128M, 128M) offset
502 * ARM64 virtual address arrangement guarantees all kernel and module
503 * texts are within +/-128M.
504 */
617d2fbc 505 offset = branch_imm_common(pc, addr, SZ_128M);
c94ae4f7
ZSL
506 if (offset >= SZ_128M)
507 return AARCH64_BREAK_FAULT;
5c5bf25d 508
c0cafbae
ZSL
509 switch (type) {
510 case AARCH64_INSN_BRANCH_LINK:
5c5bf25d 511 insn = aarch64_insn_get_bl_value();
c0cafbae
ZSL
512 break;
513 case AARCH64_INSN_BRANCH_NOLINK:
5c5bf25d 514 insn = aarch64_insn_get_b_value();
c0cafbae
ZSL
515 break;
516 default:
c94ae4f7 517 pr_err("%s: unknown branch encoding %d\n", __func__, type);
a9ae04c9 518 return AARCH64_BREAK_FAULT;
c0cafbae 519 }
5c5bf25d
JL
520
521 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
522 offset >> 2);
523}
524
617d2fbc
ZSL
525u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
526 enum aarch64_insn_register reg,
527 enum aarch64_insn_variant variant,
528 enum aarch64_insn_branch_type type)
529{
530 u32 insn;
531 long offset;
532
533 offset = branch_imm_common(pc, addr, SZ_1M);
c94ae4f7
ZSL
534 if (offset >= SZ_1M)
535 return AARCH64_BREAK_FAULT;
617d2fbc
ZSL
536
537 switch (type) {
538 case AARCH64_INSN_BRANCH_COMP_ZERO:
539 insn = aarch64_insn_get_cbz_value();
540 break;
541 case AARCH64_INSN_BRANCH_COMP_NONZERO:
542 insn = aarch64_insn_get_cbnz_value();
543 break;
544 default:
c94ae4f7 545 pr_err("%s: unknown branch encoding %d\n", __func__, type);
a9ae04c9 546 return AARCH64_BREAK_FAULT;
617d2fbc
ZSL
547 }
548
549 switch (variant) {
550 case AARCH64_INSN_VARIANT_32BIT:
551 break;
552 case AARCH64_INSN_VARIANT_64BIT:
553 insn |= AARCH64_INSN_SF_BIT;
554 break;
555 default:
c94ae4f7 556 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
a9ae04c9 557 return AARCH64_BREAK_FAULT;
617d2fbc
ZSL
558 }
559
560 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
561
562 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
563 offset >> 2);
564}
565
345e0d35
ZSL
566u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
567 enum aarch64_insn_condition cond)
568{
569 u32 insn;
570 long offset;
571
572 offset = branch_imm_common(pc, addr, SZ_1M);
573
574 insn = aarch64_insn_get_bcond_value();
575
c94ae4f7
ZSL
576 if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
577 pr_err("%s: unknown condition encoding %d\n", __func__, cond);
578 return AARCH64_BREAK_FAULT;
579 }
345e0d35
ZSL
580 insn |= cond;
581
582 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
583 offset >> 2);
584}
585
bd507ca2 586u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op)
5c5bf25d
JL
587{
588 return aarch64_insn_get_hint_value() | op;
589}
590
591u32 __kprobes aarch64_insn_gen_nop(void)
592{
593 return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
594}
c0cafbae
ZSL
595
596u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
597 enum aarch64_insn_branch_type type)
598{
599 u32 insn;
600
601 switch (type) {
602 case AARCH64_INSN_BRANCH_NOLINK:
603 insn = aarch64_insn_get_br_value();
604 break;
605 case AARCH64_INSN_BRANCH_LINK:
606 insn = aarch64_insn_get_blr_value();
607 break;
608 case AARCH64_INSN_BRANCH_RETURN:
609 insn = aarch64_insn_get_ret_value();
610 break;
611 default:
c94ae4f7 612 pr_err("%s: unknown branch encoding %d\n", __func__, type);
a9ae04c9 613 return AARCH64_BREAK_FAULT;
c0cafbae
ZSL
614 }
615
616 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
617}
17cac179
ZSL
618
619u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
620 enum aarch64_insn_register base,
621 enum aarch64_insn_register offset,
622 enum aarch64_insn_size_type size,
623 enum aarch64_insn_ldst_type type)
624{
625 u32 insn;
626
627 switch (type) {
628 case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
629 insn = aarch64_insn_get_ldr_reg_value();
630 break;
631 case AARCH64_INSN_LDST_STORE_REG_OFFSET:
632 insn = aarch64_insn_get_str_reg_value();
633 break;
634 default:
c94ae4f7 635 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
a9ae04c9 636 return AARCH64_BREAK_FAULT;
17cac179
ZSL
637 }
638
639 insn = aarch64_insn_encode_ldst_size(size, insn);
640
641 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
642
643 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
644 base);
645
646 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
647 offset);
648}
1bba567d
ZSL
649
650u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
651 enum aarch64_insn_register reg2,
652 enum aarch64_insn_register base,
653 int offset,
654 enum aarch64_insn_variant variant,
655 enum aarch64_insn_ldst_type type)
656{
657 u32 insn;
658 int shift;
659
660 switch (type) {
661 case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
662 insn = aarch64_insn_get_ldp_pre_value();
663 break;
664 case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
665 insn = aarch64_insn_get_stp_pre_value();
666 break;
667 case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
668 insn = aarch64_insn_get_ldp_post_value();
669 break;
670 case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
671 insn = aarch64_insn_get_stp_post_value();
672 break;
673 default:
c94ae4f7 674 pr_err("%s: unknown load/store encoding %d\n", __func__, type);
a9ae04c9 675 return AARCH64_BREAK_FAULT;
1bba567d
ZSL
676 }
677
678 switch (variant) {
679 case AARCH64_INSN_VARIANT_32BIT:
c94ae4f7
ZSL
680 if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
681 pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
682 __func__, offset);
683 return AARCH64_BREAK_FAULT;
684 }
1bba567d
ZSL
685 shift = 2;
686 break;
687 case AARCH64_INSN_VARIANT_64BIT:
c94ae4f7
ZSL
688 if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
689 pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
690 __func__, offset);
691 return AARCH64_BREAK_FAULT;
692 }
1bba567d
ZSL
693 shift = 3;
694 insn |= AARCH64_INSN_SF_BIT;
695 break;
696 default:
c94ae4f7 697 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
a9ae04c9 698 return AARCH64_BREAK_FAULT;
1bba567d
ZSL
699 }
700
701 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
702 reg1);
703
704 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
705 reg2);
706
707 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
708 base);
709
710 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
711 offset >> shift);
712}
9951a157 713
85f68fe8
DB
714u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
715 enum aarch64_insn_register base,
716 enum aarch64_insn_register state,
717 enum aarch64_insn_size_type size,
718 enum aarch64_insn_ldst_type type)
719{
720 u32 insn;
721
722 switch (type) {
723 case AARCH64_INSN_LDST_LOAD_EX:
724 insn = aarch64_insn_get_load_ex_value();
725 break;
726 case AARCH64_INSN_LDST_STORE_EX:
727 insn = aarch64_insn_get_store_ex_value();
728 break;
729 default:
730 pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
731 return AARCH64_BREAK_FAULT;
732 }
733
734 insn = aarch64_insn_encode_ldst_size(size, insn);
735
736 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
737 reg);
738
739 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
740 base);
741
742 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
743 AARCH64_INSN_REG_ZR);
744
745 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
746 state);
747}
748
34b8ab09
DB
749u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
750 enum aarch64_insn_register address,
751 enum aarch64_insn_register value,
752 enum aarch64_insn_size_type size)
753{
754 u32 insn = aarch64_insn_get_ldadd_value();
755
756 switch (size) {
757 case AARCH64_INSN_SIZE_32:
758 case AARCH64_INSN_SIZE_64:
759 break;
760 default:
761 pr_err("%s: unimplemented size encoding %d\n", __func__, size);
762 return AARCH64_BREAK_FAULT;
763 }
764
765 insn = aarch64_insn_encode_ldst_size(size, insn);
766
767 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
768 result);
769
770 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
771 address);
772
773 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
774 value);
775}
776
777u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
778 enum aarch64_insn_register value,
779 enum aarch64_insn_size_type size)
780{
781 /*
782 * STADD is simply encoded as an alias for LDADD with XZR as
783 * the destination register.
784 */
785 return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
786 value, size);
787}
788
85f68fe8
DB
789static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
790 enum aarch64_insn_prfm_target target,
791 enum aarch64_insn_prfm_policy policy,
792 u32 insn)
793{
794 u32 imm_type = 0, imm_target = 0, imm_policy = 0;
795
796 switch (type) {
797 case AARCH64_INSN_PRFM_TYPE_PLD:
798 break;
799 case AARCH64_INSN_PRFM_TYPE_PLI:
800 imm_type = BIT(0);
801 break;
802 case AARCH64_INSN_PRFM_TYPE_PST:
803 imm_type = BIT(1);
804 break;
805 default:
806 pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
807 return AARCH64_BREAK_FAULT;
808 }
809
810 switch (target) {
811 case AARCH64_INSN_PRFM_TARGET_L1:
812 break;
813 case AARCH64_INSN_PRFM_TARGET_L2:
814 imm_target = BIT(0);
815 break;
816 case AARCH64_INSN_PRFM_TARGET_L3:
817 imm_target = BIT(1);
818 break;
819 default:
820 pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
821 return AARCH64_BREAK_FAULT;
822 }
823
824 switch (policy) {
825 case AARCH64_INSN_PRFM_POLICY_KEEP:
826 break;
827 case AARCH64_INSN_PRFM_POLICY_STRM:
828 imm_policy = BIT(0);
829 break;
830 default:
831 pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
832 return AARCH64_BREAK_FAULT;
833 }
834
835 /* In this case, imm5 is encoded into Rt field. */
836 insn &= ~GENMASK(4, 0);
837 insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
838
839 return insn;
840}
841
842u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
843 enum aarch64_insn_prfm_type type,
844 enum aarch64_insn_prfm_target target,
845 enum aarch64_insn_prfm_policy policy)
846{
847 u32 insn = aarch64_insn_get_prfm_value();
848
849 insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
850
851 insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
852
853 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
854 base);
855
856 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
857}
858
9951a157
ZSL
859u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
860 enum aarch64_insn_register src,
861 int imm, enum aarch64_insn_variant variant,
862 enum aarch64_insn_adsb_type type)
863{
864 u32 insn;
865
866 switch (type) {
867 case AARCH64_INSN_ADSB_ADD:
868 insn = aarch64_insn_get_add_imm_value();
869 break;
870 case AARCH64_INSN_ADSB_SUB:
871 insn = aarch64_insn_get_sub_imm_value();
872 break;
873 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
874 insn = aarch64_insn_get_adds_imm_value();
875 break;
876 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
877 insn = aarch64_insn_get_subs_imm_value();
878 break;
879 default:
c94ae4f7 880 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
a9ae04c9 881 return AARCH64_BREAK_FAULT;
9951a157
ZSL
882 }
883
884 switch (variant) {
885 case AARCH64_INSN_VARIANT_32BIT:
886 break;
887 case AARCH64_INSN_VARIANT_64BIT:
888 insn |= AARCH64_INSN_SF_BIT;
889 break;
890 default:
c94ae4f7 891 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
a9ae04c9 892 return AARCH64_BREAK_FAULT;
9951a157
ZSL
893 }
894
11d76407
MZ
895 /* We can't encode more than a 24bit value (12bit + 12bit shift) */
896 if (imm & ~(BIT(24) - 1))
897 goto out;
898
899 /* If we have something in the top 12 bits... */
c94ae4f7 900 if (imm & ~(SZ_4K - 1)) {
11d76407
MZ
901 /* ... and in the low 12 bits -> error */
902 if (imm & (SZ_4K - 1))
903 goto out;
904
905 imm >>= 12;
906 insn |= AARCH64_INSN_LSL_12;
c94ae4f7 907 }
9951a157
ZSL
908
909 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
910
911 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
912
913 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
11d76407
MZ
914
915out:
916 pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
917 return AARCH64_BREAK_FAULT;
9951a157 918}
4a89d2c9
ZSL
919
920u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
921 enum aarch64_insn_register src,
922 int immr, int imms,
923 enum aarch64_insn_variant variant,
924 enum aarch64_insn_bitfield_type type)
925{
926 u32 insn;
927 u32 mask;
928
929 switch (type) {
930 case AARCH64_INSN_BITFIELD_MOVE:
931 insn = aarch64_insn_get_bfm_value();
932 break;
933 case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
934 insn = aarch64_insn_get_ubfm_value();
935 break;
936 case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
937 insn = aarch64_insn_get_sbfm_value();
938 break;
939 default:
c94ae4f7 940 pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
a9ae04c9 941 return AARCH64_BREAK_FAULT;
4a89d2c9
ZSL
942 }
943
944 switch (variant) {
945 case AARCH64_INSN_VARIANT_32BIT:
946 mask = GENMASK(4, 0);
947 break;
948 case AARCH64_INSN_VARIANT_64BIT:
949 insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
950 mask = GENMASK(5, 0);
951 break;
952 default:
c94ae4f7 953 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
a9ae04c9 954 return AARCH64_BREAK_FAULT;
4a89d2c9
ZSL
955 }
956
c94ae4f7
ZSL
957 if (immr & ~mask) {
958 pr_err("%s: invalid immr encoding %d\n", __func__, immr);
959 return AARCH64_BREAK_FAULT;
960 }
961 if (imms & ~mask) {
962 pr_err("%s: invalid imms encoding %d\n", __func__, imms);
963 return AARCH64_BREAK_FAULT;
964 }
4a89d2c9
ZSL
965
966 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
967
968 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
969
970 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
971
972 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
973}
6098f2d5
ZSL
974
975u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
976 int imm, int shift,
977 enum aarch64_insn_variant variant,
978 enum aarch64_insn_movewide_type type)
979{
980 u32 insn;
981
982 switch (type) {
983 case AARCH64_INSN_MOVEWIDE_ZERO:
984 insn = aarch64_insn_get_movz_value();
985 break;
986 case AARCH64_INSN_MOVEWIDE_KEEP:
987 insn = aarch64_insn_get_movk_value();
988 break;
989 case AARCH64_INSN_MOVEWIDE_INVERSE:
990 insn = aarch64_insn_get_movn_value();
991 break;
992 default:
c94ae4f7 993 pr_err("%s: unknown movewide encoding %d\n", __func__, type);
a9ae04c9 994 return AARCH64_BREAK_FAULT;
6098f2d5
ZSL
995 }
996
c94ae4f7
ZSL
997 if (imm & ~(SZ_64K - 1)) {
998 pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
999 return AARCH64_BREAK_FAULT;
1000 }
6098f2d5
ZSL
1001
1002 switch (variant) {
1003 case AARCH64_INSN_VARIANT_32BIT:
c94ae4f7
ZSL
1004 if (shift != 0 && shift != 16) {
1005 pr_err("%s: invalid shift encoding %d\n", __func__,
1006 shift);
1007 return AARCH64_BREAK_FAULT;
1008 }
6098f2d5
ZSL
1009 break;
1010 case AARCH64_INSN_VARIANT_64BIT:
1011 insn |= AARCH64_INSN_SF_BIT;
c94ae4f7
ZSL
1012 if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
1013 pr_err("%s: invalid shift encoding %d\n", __func__,
1014 shift);
1015 return AARCH64_BREAK_FAULT;
1016 }
6098f2d5
ZSL
1017 break;
1018 default:
c94ae4f7 1019 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
a9ae04c9 1020 return AARCH64_BREAK_FAULT;
6098f2d5
ZSL
1021 }
1022
1023 insn |= (shift >> 4) << 21;
1024
1025 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1026
1027 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
1028}
5fdc639a
ZSL
1029
1030u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
1031 enum aarch64_insn_register src,
1032 enum aarch64_insn_register reg,
1033 int shift,
1034 enum aarch64_insn_variant variant,
1035 enum aarch64_insn_adsb_type type)
1036{
1037 u32 insn;
1038
1039 switch (type) {
1040 case AARCH64_INSN_ADSB_ADD:
1041 insn = aarch64_insn_get_add_value();
1042 break;
1043 case AARCH64_INSN_ADSB_SUB:
1044 insn = aarch64_insn_get_sub_value();
1045 break;
1046 case AARCH64_INSN_ADSB_ADD_SETFLAGS:
1047 insn = aarch64_insn_get_adds_value();
1048 break;
1049 case AARCH64_INSN_ADSB_SUB_SETFLAGS:
1050 insn = aarch64_insn_get_subs_value();
1051 break;
1052 default:
c94ae4f7 1053 pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
a9ae04c9 1054 return AARCH64_BREAK_FAULT;
5fdc639a
ZSL
1055 }
1056
1057 switch (variant) {
1058 case AARCH64_INSN_VARIANT_32BIT:
c94ae4f7
ZSL
1059 if (shift & ~(SZ_32 - 1)) {
1060 pr_err("%s: invalid shift encoding %d\n", __func__,
1061 shift);
1062 return AARCH64_BREAK_FAULT;
1063 }
5fdc639a
ZSL
1064 break;
1065 case AARCH64_INSN_VARIANT_64BIT:
1066 insn |= AARCH64_INSN_SF_BIT;
c94ae4f7
ZSL
1067 if (shift & ~(SZ_64 - 1)) {
1068 pr_err("%s: invalid shift encoding %d\n", __func__,
1069 shift);
1070 return AARCH64_BREAK_FAULT;
1071 }
5fdc639a
ZSL
1072 break;
1073 default:
c94ae4f7 1074 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
a9ae04c9 1075 return AARCH64_BREAK_FAULT;
5fdc639a
ZSL
1076 }
1077
1078
1079 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1080
1081 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1082
1083 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1084
1085 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1086}
546dd36b
ZSL
1087
1088u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
1089 enum aarch64_insn_register src,
1090 enum aarch64_insn_variant variant,
1091 enum aarch64_insn_data1_type type)
1092{
1093 u32 insn;
1094
1095 switch (type) {
1096 case AARCH64_INSN_DATA1_REVERSE_16:
1097 insn = aarch64_insn_get_rev16_value();
1098 break;
1099 case AARCH64_INSN_DATA1_REVERSE_32:
1100 insn = aarch64_insn_get_rev32_value();
1101 break;
1102 case AARCH64_INSN_DATA1_REVERSE_64:
c94ae4f7
ZSL
1103 if (variant != AARCH64_INSN_VARIANT_64BIT) {
1104 pr_err("%s: invalid variant for reverse64 %d\n",
1105 __func__, variant);
1106 return AARCH64_BREAK_FAULT;
1107 }
546dd36b
ZSL
1108 insn = aarch64_insn_get_rev64_value();
1109 break;
1110 default:
c94ae4f7 1111 pr_err("%s: unknown data1 encoding %d\n", __func__, type);
a9ae04c9 1112 return AARCH64_BREAK_FAULT;
546dd36b
ZSL
1113 }
1114
1115 switch (variant) {
1116 case AARCH64_INSN_VARIANT_32BIT:
1117 break;
1118 case AARCH64_INSN_VARIANT_64BIT:
1119 insn |= AARCH64_INSN_SF_BIT;
1120 break;
1121 default:
c94ae4f7 1122 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
a9ae04c9 1123 return AARCH64_BREAK_FAULT;
546dd36b
ZSL
1124 }
1125
1126 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1127
1128 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1129}
64810639
ZSL
1130
1131u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
1132 enum aarch64_insn_register src,
1133 enum aarch64_insn_register reg,
1134 enum aarch64_insn_variant variant,
1135 enum aarch64_insn_data2_type type)
1136{
1137 u32 insn;
1138
1139 switch (type) {
1140 case AARCH64_INSN_DATA2_UDIV:
1141 insn = aarch64_insn_get_udiv_value();
1142 break;
1143 case AARCH64_INSN_DATA2_SDIV:
1144 insn = aarch64_insn_get_sdiv_value();
1145 break;
1146 case AARCH64_INSN_DATA2_LSLV:
1147 insn = aarch64_insn_get_lslv_value();
1148 break;
1149 case AARCH64_INSN_DATA2_LSRV:
1150 insn = aarch64_insn_get_lsrv_value();
1151 break;
1152 case AARCH64_INSN_DATA2_ASRV:
1153 insn = aarch64_insn_get_asrv_value();
1154 break;
1155 case AARCH64_INSN_DATA2_RORV:
1156 insn = aarch64_insn_get_rorv_value();
1157 break;
1158 default:
c94ae4f7 1159 pr_err("%s: unknown data2 encoding %d\n", __func__, type);
a9ae04c9 1160 return AARCH64_BREAK_FAULT;
64810639
ZSL
1161 }
1162
1163 switch (variant) {
1164 case AARCH64_INSN_VARIANT_32BIT:
1165 break;
1166 case AARCH64_INSN_VARIANT_64BIT:
1167 insn |= AARCH64_INSN_SF_BIT;
1168 break;
1169 default:
c94ae4f7 1170 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
a9ae04c9 1171 return AARCH64_BREAK_FAULT;
64810639
ZSL
1172 }
1173
1174 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1175
1176 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1177
1178 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1179}
27f95ba5
ZSL
1180
1181u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1182 enum aarch64_insn_register src,
1183 enum aarch64_insn_register reg1,
1184 enum aarch64_insn_register reg2,
1185 enum aarch64_insn_variant variant,
1186 enum aarch64_insn_data3_type type)
1187{
1188 u32 insn;
1189
1190 switch (type) {
1191 case AARCH64_INSN_DATA3_MADD:
1192 insn = aarch64_insn_get_madd_value();
1193 break;
1194 case AARCH64_INSN_DATA3_MSUB:
1195 insn = aarch64_insn_get_msub_value();
1196 break;
1197 default:
c94ae4f7 1198 pr_err("%s: unknown data3 encoding %d\n", __func__, type);
a9ae04c9 1199 return AARCH64_BREAK_FAULT;
27f95ba5
ZSL
1200 }
1201
1202 switch (variant) {
1203 case AARCH64_INSN_VARIANT_32BIT:
1204 break;
1205 case AARCH64_INSN_VARIANT_64BIT:
1206 insn |= AARCH64_INSN_SF_BIT;
1207 break;
1208 default:
c94ae4f7 1209 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
a9ae04c9 1210 return AARCH64_BREAK_FAULT;
27f95ba5
ZSL
1211 }
1212
1213 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1214
1215 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1216
1217 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1218 reg1);
1219
1220 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1221 reg2);
1222}
5e6e15a2
ZSL
1223
1224u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1225 enum aarch64_insn_register src,
1226 enum aarch64_insn_register reg,
1227 int shift,
1228 enum aarch64_insn_variant variant,
1229 enum aarch64_insn_logic_type type)
1230{
1231 u32 insn;
1232
1233 switch (type) {
1234 case AARCH64_INSN_LOGIC_AND:
1235 insn = aarch64_insn_get_and_value();
1236 break;
1237 case AARCH64_INSN_LOGIC_BIC:
1238 insn = aarch64_insn_get_bic_value();
1239 break;
1240 case AARCH64_INSN_LOGIC_ORR:
1241 insn = aarch64_insn_get_orr_value();
1242 break;
1243 case AARCH64_INSN_LOGIC_ORN:
1244 insn = aarch64_insn_get_orn_value();
1245 break;
1246 case AARCH64_INSN_LOGIC_EOR:
1247 insn = aarch64_insn_get_eor_value();
1248 break;
1249 case AARCH64_INSN_LOGIC_EON:
1250 insn = aarch64_insn_get_eon_value();
1251 break;
1252 case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1253 insn = aarch64_insn_get_ands_value();
1254 break;
1255 case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1256 insn = aarch64_insn_get_bics_value();
1257 break;
1258 default:
c94ae4f7 1259 pr_err("%s: unknown logical encoding %d\n", __func__, type);
a9ae04c9 1260 return AARCH64_BREAK_FAULT;
5e6e15a2
ZSL
1261 }
1262
1263 switch (variant) {
1264 case AARCH64_INSN_VARIANT_32BIT:
c94ae4f7
ZSL
1265 if (shift & ~(SZ_32 - 1)) {
1266 pr_err("%s: invalid shift encoding %d\n", __func__,
1267 shift);
1268 return AARCH64_BREAK_FAULT;
1269 }
5e6e15a2
ZSL
1270 break;
1271 case AARCH64_INSN_VARIANT_64BIT:
1272 insn |= AARCH64_INSN_SF_BIT;
c94ae4f7
ZSL
1273 if (shift & ~(SZ_64 - 1)) {
1274 pr_err("%s: invalid shift encoding %d\n", __func__,
1275 shift);
1276 return AARCH64_BREAK_FAULT;
1277 }
5e6e15a2
ZSL
1278 break;
1279 default:
c94ae4f7 1280 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
a9ae04c9 1281 return AARCH64_BREAK_FAULT;
5e6e15a2
ZSL
1282 }
1283
1284
1285 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1286
1287 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1288
1289 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1290
1291 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1292}
9b79f52d 1293
e3bf8a67
MR
1294/*
1295 * MOV (register) is architecturally an alias of ORR (shifted register) where
1296 * MOV <*d>, <*m> is equivalent to ORR <*d>, <*ZR>, <*m>
1297 */
1298u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,
1299 enum aarch64_insn_register src,
1300 enum aarch64_insn_variant variant)
1301{
1302 return aarch64_insn_gen_logical_shifted_reg(dst, AARCH64_INSN_REG_ZR,
1303 src, 0, variant,
1304 AARCH64_INSN_LOGIC_ORR);
1305}
1306
7aaf7b2f
AB
1307u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
1308 enum aarch64_insn_register reg,
1309 enum aarch64_insn_adr_type type)
1310{
1311 u32 insn;
1312 s32 offset;
1313
1314 switch (type) {
1315 case AARCH64_INSN_ADR_TYPE_ADR:
1316 insn = aarch64_insn_get_adr_value();
1317 offset = addr - pc;
1318 break;
1319 case AARCH64_INSN_ADR_TYPE_ADRP:
1320 insn = aarch64_insn_get_adrp_value();
1321 offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12;
1322 break;
1323 default:
1324 pr_err("%s: unknown adr encoding %d\n", __func__, type);
1325 return AARCH64_BREAK_FAULT;
1326 }
1327
1328 if (offset < -SZ_1M || offset >= SZ_1M)
1329 return AARCH64_BREAK_FAULT;
1330
1331 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg);
1332
1333 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset);
1334}
1335
10b48f7e
MZ
1336/*
1337 * Decode the imm field of a branch, and return the byte offset as a
1338 * signed value (so it can be used when computing a new branch
1339 * target).
1340 */
1341s32 aarch64_get_branch_offset(u32 insn)
1342{
1343 s32 imm;
1344
1345 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1346 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1347 return (imm << 6) >> 4;
1348 }
1349
1350 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1351 aarch64_insn_is_bcond(insn)) {
1352 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1353 return (imm << 13) >> 11;
1354 }
1355
1356 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1357 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1358 return (imm << 18) >> 16;
1359 }
1360
1361 /* Unhandled instruction */
1362 BUG();
1363}
1364
1365/*
1366 * Encode the displacement of a branch in the imm field and return the
1367 * updated instruction.
1368 */
1369u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1370{
1371 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1372 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1373 offset >> 2);
1374
1375 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1376 aarch64_insn_is_bcond(insn))
1377 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1378 offset >> 2);
1379
1380 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1381 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1382 offset >> 2);
1383
1384 /* Unhandled instruction */
1385 BUG();
1386}
1387
46084bc2
SP
1388s32 aarch64_insn_adrp_get_offset(u32 insn)
1389{
1390 BUG_ON(!aarch64_insn_is_adrp(insn));
1391 return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1392}
1393
1394u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1395{
1396 BUG_ON(!aarch64_insn_is_adrp(insn));
1397 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1398 offset >> 12);
1399}
1400
d59bee88
DL
1401/*
1402 * Extract the Op/CR data from a msr/mrs instruction.
1403 */
1404u32 aarch64_insn_extract_system_reg(u32 insn)
1405{
1406 return (insn & 0x1FFFE0) >> 5;
1407}
1408
9b79f52d
PA
1409bool aarch32_insn_is_wide(u32 insn)
1410{
1411 return insn >= 0xe800;
1412}
bd35a4ad
PA
1413
1414/*
1415 * Macros/defines for extracting register numbers from instruction.
1416 */
1417u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1418{
1419 return (insn & (0xf << offset)) >> offset;
1420}
c852f320
PA
1421
1422#define OPC2_MASK 0x7
1423#define OPC2_OFFSET 5
1424u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1425{
1426 return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1427}
1428
1429#define CRM_MASK 0xf
1430u32 aarch32_insn_mcr_extract_crm(u32 insn)
1431{
1432 return insn & CRM_MASK;
1433}
2af3ec08
DL
1434
1435static bool __kprobes __check_eq(unsigned long pstate)
1436{
1437 return (pstate & PSR_Z_BIT) != 0;
1438}
1439
1440static bool __kprobes __check_ne(unsigned long pstate)
1441{
1442 return (pstate & PSR_Z_BIT) == 0;
1443}
1444
1445static bool __kprobes __check_cs(unsigned long pstate)
1446{
1447 return (pstate & PSR_C_BIT) != 0;
1448}
1449
1450static bool __kprobes __check_cc(unsigned long pstate)
1451{
1452 return (pstate & PSR_C_BIT) == 0;
1453}
1454
1455static bool __kprobes __check_mi(unsigned long pstate)
1456{
1457 return (pstate & PSR_N_BIT) != 0;
1458}
1459
1460static bool __kprobes __check_pl(unsigned long pstate)
1461{
1462 return (pstate & PSR_N_BIT) == 0;
1463}
1464
1465static bool __kprobes __check_vs(unsigned long pstate)
1466{
1467 return (pstate & PSR_V_BIT) != 0;
1468}
1469
1470static bool __kprobes __check_vc(unsigned long pstate)
1471{
1472 return (pstate & PSR_V_BIT) == 0;
1473}
1474
1475static bool __kprobes __check_hi(unsigned long pstate)
1476{
1477 pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1478 return (pstate & PSR_C_BIT) != 0;
1479}
1480
1481static bool __kprobes __check_ls(unsigned long pstate)
1482{
1483 pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1484 return (pstate & PSR_C_BIT) == 0;
1485}
1486
1487static bool __kprobes __check_ge(unsigned long pstate)
1488{
1489 pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1490 return (pstate & PSR_N_BIT) == 0;
1491}
1492
1493static bool __kprobes __check_lt(unsigned long pstate)
1494{
1495 pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1496 return (pstate & PSR_N_BIT) != 0;
1497}
1498
1499static bool __kprobes __check_gt(unsigned long pstate)
1500{
1501 /*PSR_N_BIT ^= PSR_V_BIT */
1502 unsigned long temp = pstate ^ (pstate << 3);
1503
1504 temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
1505 return (temp & PSR_N_BIT) == 0;
1506}
1507
1508static bool __kprobes __check_le(unsigned long pstate)
1509{
1510 /*PSR_N_BIT ^= PSR_V_BIT */
1511 unsigned long temp = pstate ^ (pstate << 3);
1512
1513 temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
1514 return (temp & PSR_N_BIT) != 0;
1515}
1516
1517static bool __kprobes __check_al(unsigned long pstate)
1518{
1519 return true;
1520}
1521
1522/*
1523 * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1524 * it behaves identically to 0b1110 ("al").
1525 */
1526pstate_check_t * const aarch32_opcode_cond_checks[16] = {
1527 __check_eq, __check_ne, __check_cs, __check_cc,
1528 __check_mi, __check_pl, __check_vs, __check_vc,
1529 __check_hi, __check_ls, __check_ge, __check_lt,
1530 __check_gt, __check_le, __check_al, __check_al
1531};
ef3935ee
MZ
1532
1533static bool range_of_ones(u64 val)
1534{
1535 /* Doesn't handle full ones or full zeroes */
1536 u64 sval = val >> __ffs64(val);
1537
1538 /* One of Sean Eron Anderson's bithack tricks */
1539 return ((sval + 1) & (sval)) == 0;
1540}
1541
1542static u32 aarch64_encode_immediate(u64 imm,
1543 enum aarch64_insn_variant variant,
1544 u32 insn)
1545{
1546 unsigned int immr, imms, n, ones, ror, esz, tmp;
579d1b3f 1547 u64 mask;
ef3935ee
MZ
1548
1549 switch (variant) {
1550 case AARCH64_INSN_VARIANT_32BIT:
ef3935ee
MZ
1551 esz = 32;
1552 break;
1553 case AARCH64_INSN_VARIANT_64BIT:
1554 insn |= AARCH64_INSN_SF_BIT;
1555 esz = 64;
1556 break;
1557 default:
1558 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1559 return AARCH64_BREAK_FAULT;
1560 }
1561
579d1b3f
LN
1562 mask = GENMASK(esz - 1, 0);
1563
1564 /* Can't encode full zeroes, full ones, or value wider than the mask */
1565 if (!imm || imm == mask || imm & ~mask)
1566 return AARCH64_BREAK_FAULT;
1567
ef3935ee
MZ
1568 /*
1569 * Inverse of Replicate(). Try to spot a repeating pattern
1570 * with a pow2 stride.
1571 */
1572 for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
1573 u64 emask = BIT(tmp) - 1;
1574
1575 if ((imm & emask) != ((imm >> tmp) & emask))
1576 break;
1577
1578 esz = tmp;
1579 mask = emask;
1580 }
1581
1582 /* N is only set if we're encoding a 64bit value */
1583 n = esz == 64;
1584
1585 /* Trim imm to the element size */
1586 imm &= mask;
1587
1588 /* That's how many ones we need to encode */
1589 ones = hweight64(imm);
1590
1591 /*
1592 * imms is set to (ones - 1), prefixed with a string of ones
1593 * and a zero if they fit. Cap it to 6 bits.
1594 */
1595 imms = ones - 1;
1596 imms |= 0xf << ffs(esz);
1597 imms &= BIT(6) - 1;
1598
1599 /* Compute the rotation */
1600 if (range_of_ones(imm)) {
1601 /*
1602 * Pattern: 0..01..10..0
1603 *
1604 * Compute how many rotate we need to align it right
1605 */
1606 ror = __ffs64(imm);
1607 } else {
1608 /*
1609 * Pattern: 0..01..10..01..1
1610 *
1611 * Fill the unused top bits with ones, and check if
1612 * the result is a valid immediate (all ones with a
1613 * contiguous ranges of zeroes).
1614 */
1615 imm |= ~mask;
1616 if (!range_of_ones(~imm))
1617 return AARCH64_BREAK_FAULT;
1618
1619 /*
1620 * Compute the rotation to get a continuous set of
1621 * ones, with the first bit set at position 0
1622 */
1623 ror = fls(~imm);
1624 }
1625
1626 /*
1627 * immr is the number of bits we need to rotate back to the
1628 * original set of ones. Note that this is relative to the
1629 * element size...
1630 */
1631 immr = (esz - ror) % esz;
1632
1633 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
1634 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
1635 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
1636}
1637
1638u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
1639 enum aarch64_insn_variant variant,
1640 enum aarch64_insn_register Rn,
1641 enum aarch64_insn_register Rd,
1642 u64 imm)
1643{
1644 u32 insn;
1645
1646 switch (type) {
1647 case AARCH64_INSN_LOGIC_AND:
1648 insn = aarch64_insn_get_and_imm_value();
1649 break;
1650 case AARCH64_INSN_LOGIC_ORR:
1651 insn = aarch64_insn_get_orr_imm_value();
1652 break;
1653 case AARCH64_INSN_LOGIC_EOR:
1654 insn = aarch64_insn_get_eor_imm_value();
1655 break;
1656 case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1657 insn = aarch64_insn_get_ands_imm_value();
1658 break;
1659 default:
1660 pr_err("%s: unknown logical encoding %d\n", __func__, type);
1661 return AARCH64_BREAK_FAULT;
1662 }
1663
1664 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1665 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1666 return aarch64_encode_immediate(imm, variant, insn);
1667}
9f2efa32
MZ
1668
1669u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
1670 enum aarch64_insn_register Rm,
1671 enum aarch64_insn_register Rn,
1672 enum aarch64_insn_register Rd,
1673 u8 lsb)
1674{
1675 u32 insn;
1676
1677 insn = aarch64_insn_get_extr_value();
1678
1679 switch (variant) {
1680 case AARCH64_INSN_VARIANT_32BIT:
1681 if (lsb > 31)
1682 return AARCH64_BREAK_FAULT;
1683 break;
1684 case AARCH64_INSN_VARIANT_64BIT:
1685 if (lsb > 63)
1686 return AARCH64_BREAK_FAULT;
1687 insn |= AARCH64_INSN_SF_BIT;
1688 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
1689 break;
1690 default:
1691 pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1692 return AARCH64_BREAK_FAULT;
1693 }
1694
1695 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
1696 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1697 insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1698 return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
1699}