| 1 | /* |
| 2 | * x86 FPU boot time init code: |
| 3 | */ |
| 4 | #include <asm/fpu/internal.h> |
| 5 | #include <asm/tlbflush.h> |
| 6 | #include <asm/setup.h> |
| 7 | #include <asm/cmdline.h> |
| 8 | |
| 9 | #include <linux/sched.h> |
| 10 | #include <linux/init.h> |
| 11 | |
| 12 | /* |
| 13 | * Initialize the TS bit in CR0 according to the style of context-switches |
| 14 | * we are using: |
| 15 | */ |
| 16 | static void fpu__init_cpu_ctx_switch(void) |
| 17 | { |
| 18 | if (!boot_cpu_has(X86_FEATURE_EAGER_FPU)) |
| 19 | stts(); |
| 20 | else |
| 21 | clts(); |
| 22 | } |
| 23 | |
| 24 | /* |
| 25 | * Initialize the registers found in all CPUs, CR0 and CR4: |
| 26 | */ |
| 27 | static void fpu__init_cpu_generic(void) |
| 28 | { |
| 29 | unsigned long cr0; |
| 30 | unsigned long cr4_mask = 0; |
| 31 | |
| 32 | if (boot_cpu_has(X86_FEATURE_FXSR)) |
| 33 | cr4_mask |= X86_CR4_OSFXSR; |
| 34 | if (boot_cpu_has(X86_FEATURE_XMM)) |
| 35 | cr4_mask |= X86_CR4_OSXMMEXCPT; |
| 36 | if (cr4_mask) |
| 37 | cr4_set_bits(cr4_mask); |
| 38 | |
| 39 | cr0 = read_cr0(); |
| 40 | cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */ |
| 41 | if (!boot_cpu_has(X86_FEATURE_FPU)) |
| 42 | cr0 |= X86_CR0_EM; |
| 43 | write_cr0(cr0); |
| 44 | |
| 45 | /* Flush out any pending x87 state: */ |
| 46 | #ifdef CONFIG_MATH_EMULATION |
| 47 | if (!boot_cpu_has(X86_FEATURE_FPU)) |
| 48 | fpstate_init_soft(¤t->thread.fpu.state.soft); |
| 49 | else |
| 50 | #endif |
| 51 | asm volatile ("fninit"); |
| 52 | } |
| 53 | |
| 54 | /* |
| 55 | * Enable all supported FPU features. Called when a CPU is brought online: |
| 56 | */ |
| 57 | void fpu__init_cpu(void) |
| 58 | { |
| 59 | fpu__init_cpu_generic(); |
| 60 | fpu__init_cpu_xstate(); |
| 61 | fpu__init_cpu_ctx_switch(); |
| 62 | } |
| 63 | |
| 64 | /* |
| 65 | * The earliest FPU detection code. |
| 66 | * |
| 67 | * Set the X86_FEATURE_FPU CPU-capability bit based on |
| 68 | * trying to execute an actual sequence of FPU instructions: |
| 69 | */ |
| 70 | static void fpu__init_system_early_generic(struct cpuinfo_x86 *c) |
| 71 | { |
| 72 | unsigned long cr0; |
| 73 | u16 fsw, fcw; |
| 74 | |
| 75 | fsw = fcw = 0xffff; |
| 76 | |
| 77 | cr0 = read_cr0(); |
| 78 | cr0 &= ~(X86_CR0_TS | X86_CR0_EM); |
| 79 | write_cr0(cr0); |
| 80 | |
| 81 | if (!test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) { |
| 82 | asm volatile("fninit ; fnstsw %0 ; fnstcw %1" |
| 83 | : "+m" (fsw), "+m" (fcw)); |
| 84 | |
| 85 | if (fsw == 0 && (fcw & 0x103f) == 0x003f) |
| 86 | set_cpu_cap(c, X86_FEATURE_FPU); |
| 87 | else |
| 88 | clear_cpu_cap(c, X86_FEATURE_FPU); |
| 89 | } |
| 90 | |
| 91 | #ifndef CONFIG_MATH_EMULATION |
| 92 | if (!boot_cpu_has(X86_FEATURE_FPU)) { |
| 93 | pr_emerg("x86/fpu: Giving up, no FPU found and no math emulation present\n"); |
| 94 | for (;;) |
| 95 | asm volatile("hlt"); |
| 96 | } |
| 97 | #endif |
| 98 | } |
| 99 | |
| 100 | /* |
| 101 | * Boot time FPU feature detection code: |
| 102 | */ |
| 103 | unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; |
| 104 | |
| 105 | static void __init fpu__init_system_mxcsr(void) |
| 106 | { |
| 107 | unsigned int mask = 0; |
| 108 | |
| 109 | if (boot_cpu_has(X86_FEATURE_FXSR)) { |
| 110 | /* Static because GCC does not get 16-byte stack alignment right: */ |
| 111 | static struct fxregs_state fxregs __initdata; |
| 112 | |
| 113 | asm volatile("fxsave %0" : "+m" (fxregs)); |
| 114 | |
| 115 | mask = fxregs.mxcsr_mask; |
| 116 | |
| 117 | /* |
| 118 | * If zero then use the default features mask, |
| 119 | * which has all features set, except the |
| 120 | * denormals-are-zero feature bit: |
| 121 | */ |
| 122 | if (mask == 0) |
| 123 | mask = 0x0000ffbf; |
| 124 | } |
| 125 | mxcsr_feature_mask &= mask; |
| 126 | } |
| 127 | |
| 128 | /* |
| 129 | * Once per bootup FPU initialization sequences that will run on most x86 CPUs: |
| 130 | */ |
| 131 | static void __init fpu__init_system_generic(void) |
| 132 | { |
| 133 | /* |
| 134 | * Set up the legacy init FPU context. (xstate init might overwrite this |
| 135 | * with a more modern format, if the CPU supports it.) |
| 136 | */ |
| 137 | fpstate_init(&init_fpstate); |
| 138 | |
| 139 | fpu__init_system_mxcsr(); |
| 140 | } |
| 141 | |
| 142 | /* |
| 143 | * Size of the FPU context state. All tasks in the system use the |
| 144 | * same context size, regardless of what portion they use. |
| 145 | * This is inherent to the XSAVE architecture which puts all state |
| 146 | * components into a single, continuous memory block: |
| 147 | */ |
| 148 | unsigned int xstate_size; |
| 149 | EXPORT_SYMBOL_GPL(xstate_size); |
| 150 | |
| 151 | /* Get alignment of the TYPE. */ |
| 152 | #define TYPE_ALIGN(TYPE) offsetof(struct { char x; TYPE test; }, test) |
| 153 | |
| 154 | /* |
| 155 | * Enforce that 'MEMBER' is the last field of 'TYPE'. |
| 156 | * |
| 157 | * Align the computed size with alignment of the TYPE, |
| 158 | * because that's how C aligns structs. |
| 159 | */ |
| 160 | #define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \ |
| 161 | BUILD_BUG_ON(sizeof(TYPE) != ALIGN(offsetofend(TYPE, MEMBER), \ |
| 162 | TYPE_ALIGN(TYPE))) |
| 163 | |
| 164 | /* |
| 165 | * We append the 'struct fpu' to the task_struct: |
| 166 | */ |
| 167 | static void __init fpu__init_task_struct_size(void) |
| 168 | { |
| 169 | int task_size = sizeof(struct task_struct); |
| 170 | |
| 171 | /* |
| 172 | * Subtract off the static size of the register state. |
| 173 | * It potentially has a bunch of padding. |
| 174 | */ |
| 175 | task_size -= sizeof(((struct task_struct *)0)->thread.fpu.state); |
| 176 | |
| 177 | /* |
| 178 | * Add back the dynamically-calculated register state |
| 179 | * size. |
| 180 | */ |
| 181 | task_size += xstate_size; |
| 182 | |
| 183 | /* |
| 184 | * We dynamically size 'struct fpu', so we require that |
| 185 | * it be at the end of 'thread_struct' and that |
| 186 | * 'thread_struct' be at the end of 'task_struct'. If |
| 187 | * you hit a compile error here, check the structure to |
| 188 | * see if something got added to the end. |
| 189 | */ |
| 190 | CHECK_MEMBER_AT_END_OF(struct fpu, state); |
| 191 | CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu); |
| 192 | CHECK_MEMBER_AT_END_OF(struct task_struct, thread); |
| 193 | |
| 194 | arch_task_struct_size = task_size; |
| 195 | } |
| 196 | |
| 197 | /* |
| 198 | * Set up the xstate_size based on the legacy FPU context size. |
| 199 | * |
| 200 | * We set this up first, and later it will be overwritten by |
| 201 | * fpu__init_system_xstate() if the CPU knows about xstates. |
| 202 | */ |
| 203 | static void __init fpu__init_system_xstate_size_legacy(void) |
| 204 | { |
| 205 | static int on_boot_cpu __initdata = 1; |
| 206 | |
| 207 | WARN_ON_FPU(!on_boot_cpu); |
| 208 | on_boot_cpu = 0; |
| 209 | |
| 210 | /* |
| 211 | * Note that xstate_size might be overwriten later during |
| 212 | * fpu__init_system_xstate(). |
| 213 | */ |
| 214 | |
| 215 | if (!boot_cpu_has(X86_FEATURE_FPU)) { |
| 216 | /* |
| 217 | * Disable xsave as we do not support it if i387 |
| 218 | * emulation is enabled. |
| 219 | */ |
| 220 | setup_clear_cpu_cap(X86_FEATURE_XSAVE); |
| 221 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); |
| 222 | xstate_size = sizeof(struct swregs_state); |
| 223 | } else { |
| 224 | if (boot_cpu_has(X86_FEATURE_FXSR)) |
| 225 | xstate_size = sizeof(struct fxregs_state); |
| 226 | else |
| 227 | xstate_size = sizeof(struct fregs_state); |
| 228 | } |
| 229 | /* |
| 230 | * Quirk: we don't yet handle the XSAVES* instructions |
| 231 | * correctly, as we don't correctly convert between |
| 232 | * standard and compacted format when interfacing |
| 233 | * with user-space - so disable it for now. |
| 234 | * |
| 235 | * The difference is small: with recent CPUs the |
| 236 | * compacted format is only marginally smaller than |
| 237 | * the standard FPU state format. |
| 238 | * |
| 239 | * ( This is easy to backport while we are fixing |
| 240 | * XSAVES* support. ) |
| 241 | */ |
| 242 | setup_clear_cpu_cap(X86_FEATURE_XSAVES); |
| 243 | } |
| 244 | |
| 245 | /* |
| 246 | * FPU context switching strategies: |
| 247 | * |
| 248 | * Against popular belief, we don't do lazy FPU saves, due to the |
| 249 | * task migration complications it brings on SMP - we only do |
| 250 | * lazy FPU restores. |
| 251 | * |
| 252 | * 'lazy' is the traditional strategy, which is based on setting |
| 253 | * CR0::TS to 1 during context-switch (instead of doing a full |
| 254 | * restore of the FPU state), which causes the first FPU instruction |
| 255 | * after the context switch (whenever it is executed) to fault - at |
| 256 | * which point we lazily restore the FPU state into FPU registers. |
| 257 | * |
| 258 | * Tasks are of course under no obligation to execute FPU instructions, |
| 259 | * so it can easily happen that another context-switch occurs without |
| 260 | * a single FPU instruction being executed. If we eventually switch |
| 261 | * back to the original task (that still owns the FPU) then we have |
| 262 | * not only saved the restores along the way, but we also have the |
| 263 | * FPU ready to be used for the original task. |
| 264 | * |
| 265 | * 'lazy' is deprecated because it's almost never a performance win |
| 266 | * and it's much more complicated than 'eager'. |
| 267 | * |
| 268 | * 'eager' switching is by default on all CPUs, there we switch the FPU |
| 269 | * state during every context switch, regardless of whether the task |
| 270 | * has used FPU instructions in that time slice or not. This is done |
| 271 | * because modern FPU context saving instructions are able to optimize |
| 272 | * state saving and restoration in hardware: they can detect both |
| 273 | * unused and untouched FPU state and optimize accordingly. |
| 274 | * |
| 275 | * [ Note that even in 'lazy' mode we might optimize context switches |
| 276 | * to use 'eager' restores, if we detect that a task is using the FPU |
| 277 | * frequently. See the fpu->counter logic in fpu/internal.h for that. ] |
| 278 | */ |
| 279 | static enum { ENABLE, DISABLE } eagerfpu = ENABLE; |
| 280 | |
| 281 | /* |
| 282 | * Find supported xfeatures based on cpu features and command-line input. |
| 283 | * This must be called after fpu__init_parse_early_param() is called and |
| 284 | * xfeatures_mask is enumerated. |
| 285 | */ |
| 286 | u64 __init fpu__get_supported_xfeatures_mask(void) |
| 287 | { |
| 288 | /* Support all xfeatures known to us */ |
| 289 | if (eagerfpu != DISABLE) |
| 290 | return XCNTXT_MASK; |
| 291 | |
| 292 | /* Warning of xfeatures being disabled for no eagerfpu mode */ |
| 293 | if (xfeatures_mask & XFEATURE_MASK_EAGER) { |
| 294 | pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n", |
| 295 | xfeatures_mask & XFEATURE_MASK_EAGER); |
| 296 | } |
| 297 | |
| 298 | /* Return a mask that masks out all features requiring eagerfpu mode */ |
| 299 | return ~XFEATURE_MASK_EAGER; |
| 300 | } |
| 301 | |
| 302 | /* |
| 303 | * Disable features dependent on eagerfpu. |
| 304 | */ |
| 305 | static void __init fpu__clear_eager_fpu_features(void) |
| 306 | { |
| 307 | setup_clear_cpu_cap(X86_FEATURE_MPX); |
| 308 | } |
| 309 | |
| 310 | /* |
| 311 | * Pick the FPU context switching strategy: |
| 312 | * |
| 313 | * When eagerfpu is AUTO or ENABLE, we ensure it is ENABLE if either of |
| 314 | * the following is true: |
| 315 | * |
| 316 | * (1) the cpu has xsaveopt, as it has the optimization and doing eager |
| 317 | * FPU switching has a relatively low cost compared to a plain xsave; |
| 318 | * (2) the cpu has xsave features (e.g. MPX) that depend on eager FPU |
| 319 | * switching. Should the kernel boot with noxsaveopt, we support MPX |
| 320 | * with eager FPU switching at a higher cost. |
| 321 | */ |
| 322 | static void __init fpu__init_system_ctx_switch(void) |
| 323 | { |
| 324 | static bool on_boot_cpu __initdata = 1; |
| 325 | |
| 326 | WARN_ON_FPU(!on_boot_cpu); |
| 327 | on_boot_cpu = 0; |
| 328 | |
| 329 | WARN_ON_FPU(current->thread.fpu.fpstate_active); |
| 330 | current_thread_info()->status = 0; |
| 331 | |
| 332 | if (boot_cpu_has(X86_FEATURE_XSAVEOPT) && eagerfpu != DISABLE) |
| 333 | eagerfpu = ENABLE; |
| 334 | |
| 335 | if (xfeatures_mask & XFEATURE_MASK_EAGER) |
| 336 | eagerfpu = ENABLE; |
| 337 | |
| 338 | if (eagerfpu == ENABLE) |
| 339 | setup_force_cpu_cap(X86_FEATURE_EAGER_FPU); |
| 340 | |
| 341 | printk(KERN_INFO "x86/fpu: Using '%s' FPU context switches.\n", eagerfpu == ENABLE ? "eager" : "lazy"); |
| 342 | } |
| 343 | |
| 344 | /* |
| 345 | * We parse fpu parameters early because fpu__init_system() is executed |
| 346 | * before parse_early_param(). |
| 347 | */ |
| 348 | static void __init fpu__init_parse_early_param(void) |
| 349 | { |
| 350 | if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off")) { |
| 351 | eagerfpu = DISABLE; |
| 352 | fpu__clear_eager_fpu_features(); |
| 353 | } |
| 354 | |
| 355 | if (cmdline_find_option_bool(boot_command_line, "no387")) |
| 356 | setup_clear_cpu_cap(X86_FEATURE_FPU); |
| 357 | |
| 358 | if (cmdline_find_option_bool(boot_command_line, "nofxsr")) { |
| 359 | setup_clear_cpu_cap(X86_FEATURE_FXSR); |
| 360 | setup_clear_cpu_cap(X86_FEATURE_FXSR_OPT); |
| 361 | setup_clear_cpu_cap(X86_FEATURE_XMM); |
| 362 | } |
| 363 | |
| 364 | if (cmdline_find_option_bool(boot_command_line, "noxsave")) |
| 365 | fpu__xstate_clear_all_cpu_caps(); |
| 366 | |
| 367 | if (cmdline_find_option_bool(boot_command_line, "noxsaveopt")) |
| 368 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); |
| 369 | |
| 370 | if (cmdline_find_option_bool(boot_command_line, "noxsaves")) |
| 371 | setup_clear_cpu_cap(X86_FEATURE_XSAVES); |
| 372 | } |
| 373 | |
| 374 | /* |
| 375 | * Called on the boot CPU once per system bootup, to set up the initial |
| 376 | * FPU state that is later cloned into all processes: |
| 377 | */ |
| 378 | void __init fpu__init_system(struct cpuinfo_x86 *c) |
| 379 | { |
| 380 | fpu__init_parse_early_param(); |
| 381 | fpu__init_system_early_generic(c); |
| 382 | |
| 383 | /* |
| 384 | * The FPU has to be operational for some of the |
| 385 | * later FPU init activities: |
| 386 | */ |
| 387 | fpu__init_cpu(); |
| 388 | |
| 389 | /* |
| 390 | * But don't leave CR0::TS set yet, as some of the FPU setup |
| 391 | * methods depend on being able to execute FPU instructions |
| 392 | * that will fault on a set TS, such as the FXSAVE in |
| 393 | * fpu__init_system_mxcsr(). |
| 394 | */ |
| 395 | clts(); |
| 396 | |
| 397 | fpu__init_system_generic(); |
| 398 | fpu__init_system_xstate_size_legacy(); |
| 399 | fpu__init_system_xstate(); |
| 400 | fpu__init_task_struct_size(); |
| 401 | |
| 402 | fpu__init_system_ctx_switch(); |
| 403 | } |