2 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
4 * Copyright (C) 1996-2000 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #error "Only include this from assembly code"
23 #ifndef __ASM_ASSEMBLER_H
24 #define __ASM_ASSEMBLER_H
26 #include <asm/asm-offsets.h>
27 #include <asm/cpufeature.h>
28 #include <asm/debug-monitors.h>
30 #include <asm/pgtable-hwdef.h>
31 #include <asm/ptrace.h>
32 #include <asm/thread_info.h>
34 .macro save_and_disable_daif, flags
47 .macro restore_daif, flags:req
51 /* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */
52 .macro inherit_daif, pstate:req, tmp:req
53 and \tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
57 /* IRQ is the lowest priority flag, unconditionally unmask the rest. */
59 msr daifclr, #(8 | 4 | 1)
63 * Enable and disable interrupts.
73 .macro save_and_disable_irq, flags
78 .macro restore_irq, flags
86 .macro disable_step_tsk, flgs, tmp
87 tbz \flgs, #TIF_SINGLESTEP, 9990f
89 bic \tmp, \tmp, #DBG_MDSCR_SS
91 isb // Synchronise with enable_dbg
95 /* call with daif masked */
96 .macro enable_step_tsk, flgs, tmp
97 tbz \flgs, #TIF_SINGLESTEP, 9990f
99 orr \tmp, \tmp, #DBG_MDSCR_SS
105 * SMP data memory barrier
112 * RAS Error Synchronization barrier
128 * Emit an entry into the exception table
130 .macro _asm_extable, from, to
131 .pushsection __ex_table, "a"
133 .long (\from - .), (\to - .)
137 #define USER(l, x...) \
139 _asm_extable 9999b, l
144 lr .req x30 // link register
155 * Select code when configured for BE.
157 #ifdef CONFIG_CPU_BIG_ENDIAN
158 #define CPU_BE(code...) code
160 #define CPU_BE(code...)
164 * Select code when configured for LE.
166 #ifdef CONFIG_CPU_BIG_ENDIAN
167 #define CPU_LE(code...)
169 #define CPU_LE(code...) code
173 * Define a macro that constructs a 64-bit value by concatenating two
174 * 32-bit registers. Note that on big endian systems the order of the
175 * registers is swapped.
177 #ifndef CONFIG_CPU_BIG_ENDIAN
178 .macro regs_to_64, rd, lbits, hbits
180 .macro regs_to_64, rd, hbits, lbits
182 orr \rd, \lbits, \hbits, lsl #32
186 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
187 * <symbol> is within the range +/- 4 GB of the PC when running
188 * in core kernel context. In module context, a movz/movk sequence
189 * is used, since modules may be loaded far away from the kernel
190 * when KASLR is in effect.
193 * @dst: destination register (64 bit wide)
194 * @sym: name of the symbol
196 .macro adr_l, dst, sym
199 add \dst, \dst, :lo12:\sym
201 movz \dst, #:abs_g3:\sym
202 movk \dst, #:abs_g2_nc:\sym
203 movk \dst, #:abs_g1_nc:\sym
204 movk \dst, #:abs_g0_nc:\sym
209 * @dst: destination register (32 or 64 bit wide)
210 * @sym: name of the symbol
211 * @tmp: optional 64-bit scratch register to be used if <dst> is a
212 * 32-bit wide register, in which case it cannot be used to hold
215 .macro ldr_l, dst, sym, tmp=
219 ldr \dst, [\dst, :lo12:\sym]
222 ldr \dst, [\tmp, :lo12:\sym]
236 * @src: source register (32 or 64 bit wide)
237 * @sym: name of the symbol
238 * @tmp: mandatory 64-bit scratch register to calculate the address
239 * while <src> needs to be preserved.
241 .macro str_l, src, sym, tmp
244 str \src, [\tmp, :lo12:\sym]
252 * @dst: Result of per_cpu(sym, smp_processor_id()), can be SP for
254 * @sym: The name of the per-cpu variable
255 * @tmp: scratch register
257 .macro adr_this_cpu, dst, sym, tmp
260 add \dst, \tmp, #:lo12:\sym
264 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
273 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
274 * @sym: The name of the per-cpu variable
275 * @tmp: scratch register
277 .macro ldr_this_cpu dst, sym, tmp
279 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
284 ldr \dst, [\dst, \tmp]
288 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
290 .macro vma_vm_mm, rd, rn
291 ldr \rd, [\rn, #VMA_VM_MM]
295 * mmid - get context id from mm pointer (mm->context.id)
298 ldr \rd, [\rn, #MM_CONTEXT_ID]
301 * read_ctr - read CTR_EL0. If the system has mismatched
302 * cache line sizes, provide the system wide safe value
303 * from arm64_ftr_reg_ctrel0.sys_val
306 alternative_if_not ARM64_MISMATCHED_CACHE_LINE_SIZE
307 mrs \reg, ctr_el0 // read CTR
310 ldr_l \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
316 * raw_dcache_line_size - get the minimum D-cache line size on this CPU
317 * from the CTR register.
319 .macro raw_dcache_line_size, reg, tmp
320 mrs \tmp, ctr_el0 // read CTR
321 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
322 mov \reg, #4 // bytes per word
323 lsl \reg, \reg, \tmp // actual cache line size
327 * dcache_line_size - get the safe D-cache line size across all CPUs
329 .macro dcache_line_size, reg, tmp
331 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
332 mov \reg, #4 // bytes per word
333 lsl \reg, \reg, \tmp // actual cache line size
337 * raw_icache_line_size - get the minimum I-cache line size on this CPU
338 * from the CTR register.
340 .macro raw_icache_line_size, reg, tmp
341 mrs \tmp, ctr_el0 // read CTR
342 and \tmp, \tmp, #0xf // cache line size encoding
343 mov \reg, #4 // bytes per word
344 lsl \reg, \reg, \tmp // actual cache line size
348 * icache_line_size - get the safe I-cache line size across all CPUs
350 .macro icache_line_size, reg, tmp
352 and \tmp, \tmp, #0xf // cache line size encoding
353 mov \reg, #4 // bytes per word
354 lsl \reg, \reg, \tmp // actual cache line size
358 * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
360 .macro tcr_set_idmap_t0sz, valreg, tmpreg
361 ldr_l \tmpreg, idmap_t0sz
362 bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
366 * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
367 * ID_AA64MMFR0_EL1.PARange value
369 * tcr: register with the TCR_ELx value to be updated
370 * pos: IPS or PS bitfield position
371 * tmp{0,1}: temporary registers
373 .macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1
374 mrs \tmp0, ID_AA64MMFR0_EL1
375 // Narrow PARange to fit the PS field in TCR_ELx
376 ubfx \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
377 mov \tmp1, #ID_AA64MMFR0_PARANGE_MAX
379 csel \tmp0, \tmp1, \tmp0, hi
380 bfi \tcr, \tmp0, \pos, #3
384 * Macro to perform a data cache maintenance for the interval
385 * [kaddr, kaddr + size)
387 * op: operation passed to dc instruction
388 * domain: domain used in dsb instruciton
389 * kaddr: starting virtual address of the region
390 * size: size of the region
391 * Corrupts: kaddr, size, tmp1, tmp2
393 .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
394 dcache_line_size \tmp1, \tmp2
395 add \size, \kaddr, \size
397 bic \kaddr, \kaddr, \tmp2
399 .if (\op == cvau || \op == cvac)
400 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
405 .elseif (\op == cvap)
406 alternative_if ARM64_HAS_DCPOP
407 sys 3, c7, c12, 1, \kaddr // dc cvap
414 add \kaddr, \kaddr, \tmp1
421 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
423 .macro reset_pmuserenr_el0, tmpreg
424 mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
425 sbfx \tmpreg, \tmpreg, #8, #4
426 cmp \tmpreg, #1 // Skip if no PMU present
428 msr pmuserenr_el0, xzr // Disable PMU access from EL0
433 * copy_page - copy src to dest using temp registers t1-t8
435 .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
436 9998: ldp \t1, \t2, [\src]
437 ldp \t3, \t4, [\src, #16]
438 ldp \t5, \t6, [\src, #32]
439 ldp \t7, \t8, [\src, #48]
441 stnp \t1, \t2, [\dest]
442 stnp \t3, \t4, [\dest, #16]
443 stnp \t5, \t6, [\dest, #32]
444 stnp \t7, \t8, [\dest, #48]
445 add \dest, \dest, #64
446 tst \src, #(PAGE_SIZE - 1)
451 * Annotate a function as position independent, i.e., safe to be called before
452 * the kernel virtual mapping is activated.
454 #define ENDPIPROC(x) \
456 .type __pi_##x, %function; \
458 .size __pi_##x, . - x; \
462 * Annotate a function as being unsuitable for kprobes.
464 #ifdef CONFIG_KPROBES
465 #define NOKPROBE(x) \
466 .pushsection "_kprobe_blacklist", "aw"; \
473 * Emit a 64-bit absolute little endian symbol reference in a way that
474 * ensures that it will be resolved at build time, even when building a
475 * PIE binary. This requires cooperation from the linker script, which
476 * must emit the lo32/hi32 halves individually.
484 * mov_q - move an immediate constant into a 64-bit register using
485 * between 2 and 4 movz/movk instructions (depending on the
486 * magnitude and sign of the operand)
488 .macro mov_q, reg, val
489 .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
490 movz \reg, :abs_g1_s:\val
492 .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
493 movz \reg, :abs_g2_s:\val
495 movz \reg, :abs_g3:\val
496 movk \reg, :abs_g2_nc:\val
498 movk \reg, :abs_g1_nc:\val
500 movk \reg, :abs_g0_nc:\val
504 * Return the current thread_info.
506 .macro get_thread_info, rd
511 * Arrange a physical address in a TTBR register, taking care of 52-bit
514 * phys: physical address, preserved
515 * ttbr: returns the TTBR value
517 .macro phys_to_ttbr, phys, ttbr
518 #ifdef CONFIG_ARM64_PA_BITS_52
519 orr \ttbr, \phys, \phys, lsr #46
520 and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
527 * Errata workaround prior to disable MMU. Insert an ISB immediately prior
528 * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
530 .macro pre_disable_mmu_workaround
531 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
536 #endif /* __ASM_ASSEMBLER_H */