arm64/sme: Basic enumeration support
[linux-block.git] / arch / arm64 / include / asm / fpsimd.h
CommitLineData
caab277b 1/* SPDX-License-Identifier: GPL-2.0-only */
53631b54
CM
2/*
3 * Copyright (C) 2012 ARM Ltd.
53631b54
CM
4 */
5#ifndef __ASM_FP_H
6#define __ASM_FP_H
7
2d2123bc 8#include <asm/errno.h>
f9209e26 9#include <asm/ptrace.h>
9a6e5948
DM
10#include <asm/processor.h>
11#include <asm/sigcontext.h>
f9209e26 12#include <asm/sysreg.h>
53631b54
CM
13
14#ifndef __ASSEMBLY__
15
ead9e430 16#include <linux/bitmap.h>
f9209e26 17#include <linux/build_bug.h>
ead9e430 18#include <linux/bug.h>
7582e220 19#include <linux/cache.h>
b4f9b390 20#include <linux/init.h>
bc0ee476 21#include <linux/stddef.h>
ead9e430 22#include <linux/types.h>
bc0ee476 23
b907b80d 24#ifdef CONFIG_COMPAT
53631b54
CM
25/* Masks for extracting the FPSR and FPCR from the FPSCR */
26#define VFP_FPSCR_STAT_MASK 0xf800009f
27#define VFP_FPSCR_CTRL_MASK 0x07f79f00
28/*
29 * The VFP state has 32x64-bit registers and a single 32-bit
30 * control/status register.
31 */
32#define VFP_STATE_SIZE ((32 * 8) + 4)
33#endif
34
35struct task_struct;
36
20b85472
DM
37extern void fpsimd_save_state(struct user_fpsimd_state *state);
38extern void fpsimd_load_state(struct user_fpsimd_state *state);
53631b54
CM
39
40extern void fpsimd_thread_switch(struct task_struct *next);
41extern void fpsimd_flush_thread(void);
42
8cd969d2 43extern void fpsimd_signal_preserve_current_state(void);
c51f9269 44extern void fpsimd_preserve_current_state(void);
005f78cd 45extern void fpsimd_restore_current_state(void);
0abdeff5 46extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
c51f9269 47
04950674
DM
48extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
49 void *sve_state, unsigned int sve_vl);
e6b673b7 50
005f78cd 51extern void fpsimd_flush_task_state(struct task_struct *target);
54b8c7cb 52extern void fpsimd_save_and_flush_cpu_state(void);
005f78cd 53
30c43e73
MB
54/* Maximum VL that SVE/SME VL-agnostic software can transparently support */
55#define VL_ARCH_MAX 0x100
7582e220 56
9a6e5948
DM
57/* Offset of FFR in the SVE register dump */
58static inline size_t sve_ffr_offset(int vl)
59{
60 return SVE_SIG_FFR_OFFSET(sve_vq_from_vl(vl)) - SVE_SIG_REGS_OFFSET;
61}
62
63static inline void *sve_pffr(struct thread_struct *thread)
64{
0423eedc 65 return (char *)thread->sve_state + sve_ffr_offset(thread_get_sve_vl(thread));
9a6e5948
DM
66}
67
9f584866 68extern void sve_save_state(void *state, u32 *pfpsr, int save_ffr);
1fc5dce7 69extern void sve_load_state(void const *state, u32 const *pfpsr,
ddc806b5 70 int restore_ffr);
9f584866 71extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1);
1fc5dce7 72extern unsigned int sve_get_vl(void);
cccb78ce 73extern void sve_set_vq(unsigned long vq_minus_1);
c0cda3b8
DM
74
75struct arm64_cpu_capabilities;
76extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
5e64b862
MB
77extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused);
78extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused);
1fc5dce7 79
31dc52b3
DM
80extern u64 read_zcr_features(void);
81
ead9e430
DM
82/*
83 * Helpers to translate bit indices in sve_vq_map to VQ values (and
84 * vice versa). This allows find_next_bit() to be used to find the
85 * _maximum_ VQ not exceeding a certain value.
86 */
87static inline unsigned int __vq_to_bit(unsigned int vq)
88{
89 return SVE_VQ_MAX - vq;
90}
91
92static inline unsigned int __bit_to_vq(unsigned int bit)
93{
ead9e430
DM
94 return SVE_VQ_MAX - bit;
95}
96
b5bc00ff
MB
97
98struct vl_info {
99 enum vec_type type;
100 const char *name; /* For display purposes */
101
102 /* Minimum supported vector length across all CPUs */
103 int min_vl;
104
105 /* Maximum supported vector length across all CPUs */
106 int max_vl;
107 int max_virtualisable_vl;
108
109 /*
110 * Set of available vector lengths,
111 * where length vq encoded as bit __vq_to_bit(vq):
112 */
113 DECLARE_BITMAP(vq_map, SVE_VQ_MAX);
114
115 /* Set of vector lengths present on at least one cpu: */
116 DECLARE_BITMAP(vq_partial_map, SVE_VQ_MAX);
117};
7582e220 118
bc0ee476
DM
119#ifdef CONFIG_ARM64_SVE
120
bc0ee476
DM
121extern void sve_alloc(struct task_struct *task);
122extern void fpsimd_release_task(struct task_struct *task);
43d4da2c
DM
123extern void fpsimd_sync_to_sve(struct task_struct *task);
124extern void sve_sync_to_fpsimd(struct task_struct *task);
125extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task);
126
30c43e73 127extern int vec_set_vector_length(struct task_struct *task, enum vec_type type,
7582e220 128 unsigned long vl, unsigned long flags);
bc0ee476 129
2d2123bc
DM
130extern int sve_set_current_vl(unsigned long arg);
131extern int sve_get_current_vl(void);
132
f9209e26
MR
133static inline void sve_user_disable(void)
134{
135 sysreg_clear_set(cpacr_el1, CPACR_EL1_ZEN_EL0EN, 0);
136}
137
138static inline void sve_user_enable(void)
139{
140 sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_ZEN_EL0EN);
141}
142
71ce1ae5
MZ
143#define sve_cond_update_zcr_vq(val, reg) \
144 do { \
145 u64 __zcr = read_sysreg_s((reg)); \
146 u64 __new = __zcr & ~ZCR_ELx_LEN_MASK; \
147 __new |= (val) & ZCR_ELx_LEN_MASK; \
148 if (__zcr != __new) \
149 write_sysreg_s(__new, (reg)); \
150 } while (0)
151
2e0f2478
DM
152/*
153 * Probing and setup functions.
154 * Calls to these functions must be serialised with one another.
155 */
b5bc00ff
MB
156enum vec_type;
157
158extern void __init vec_init_vq_map(enum vec_type type);
159extern void vec_update_vq_map(enum vec_type type);
160extern int vec_verify_vq_map(enum vec_type type);
2e0f2478
DM
161extern void __init sve_setup(void);
162
b5bc00ff
MB
163extern __ro_after_init struct vl_info vl_info[ARM64_VEC_MAX];
164
165static inline void write_vl(enum vec_type type, u64 val)
166{
167 u64 tmp;
168
169 switch (type) {
170#ifdef CONFIG_ARM64_SVE
171 case ARM64_VEC_SVE:
172 tmp = read_sysreg_s(SYS_ZCR_EL1) & ~ZCR_ELx_LEN_MASK;
173 write_sysreg_s(tmp | val, SYS_ZCR_EL1);
174 break;
175#endif
176 default:
177 WARN_ON_ONCE(1);
178 break;
179 }
180}
181
182static inline int vec_max_vl(enum vec_type type)
183{
184 return vl_info[type].max_vl;
185}
186
187static inline int vec_max_virtualisable_vl(enum vec_type type)
188{
189 return vl_info[type].max_virtualisable_vl;
190}
191
192static inline int sve_max_vl(void)
193{
194 return vec_max_vl(ARM64_VEC_SVE);
195}
196
197static inline int sve_max_virtualisable_vl(void)
198{
199 return vec_max_virtualisable_vl(ARM64_VEC_SVE);
200}
201
202/* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
203static inline bool vq_available(enum vec_type type, unsigned int vq)
204{
205 return test_bit(__vq_to_bit(vq), vl_info[type].vq_map);
206}
207
208static inline bool sve_vq_available(unsigned int vq)
209{
210 return vq_available(ARM64_VEC_SVE, vq);
211}
212
bc0ee476
DM
213#else /* ! CONFIG_ARM64_SVE */
214
215static inline void sve_alloc(struct task_struct *task) { }
216static inline void fpsimd_release_task(struct task_struct *task) { }
43d4da2c
DM
217static inline void sve_sync_to_fpsimd(struct task_struct *task) { }
218static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { }
219
49ed9204
MB
220static inline int sve_max_virtualisable_vl(void)
221{
222 return 0;
223}
224
2d2123bc
DM
225static inline int sve_set_current_vl(unsigned long arg)
226{
227 return -EINVAL;
228}
229
230static inline int sve_get_current_vl(void)
231{
232 return -EINVAL;
233}
234
b5bc00ff
MB
235static inline int sve_max_vl(void)
236{
237 return -EINVAL;
238}
239
240static inline bool sve_vq_available(unsigned int vq) { return false; }
241
f9209e26
MR
242static inline void sve_user_disable(void) { BUILD_BUG(); }
243static inline void sve_user_enable(void) { BUILD_BUG(); }
244
a9f8696d
XT
245#define sve_cond_update_zcr_vq(val, reg) do { } while (0)
246
b5bc00ff
MB
247static inline void vec_init_vq_map(enum vec_type t) { }
248static inline void vec_update_vq_map(enum vec_type t) { }
249static inline int vec_verify_vq_map(enum vec_type t) { return 0; }
2e0f2478 250static inline void sve_setup(void) { }
bc0ee476
DM
251
252#endif /* ! CONFIG_ARM64_SVE */
253
ca8a4ebc
MB
254#ifdef CONFIG_ARM64_SME
255
256static inline void sme_smstart_sm(void)
257{
258 asm volatile(__msr_s(SYS_SVCR_SMSTART_SM_EL0, "xzr"));
259}
260
261static inline void sme_smstop_sm(void)
262{
263 asm volatile(__msr_s(SYS_SVCR_SMSTOP_SM_EL0, "xzr"));
264}
265
266static inline void sme_smstop(void)
267{
268 asm volatile(__msr_s(SYS_SVCR_SMSTOP_SMZA_EL0, "xzr"));
269}
270
271#else
272
273static inline void sme_smstart_sm(void) { }
274static inline void sme_smstop_sm(void) { }
275static inline void sme_smstop(void) { }
276
277#endif /* ! CONFIG_ARM64_SME */
278
4328825d
DM
279/* For use by EFI runtime services calls only */
280extern void __efi_fpsimd_begin(void);
281extern void __efi_fpsimd_end(void);
282
53631b54
CM
283#endif
284
285#endif