From: Aneesh Kumar K.V Date: Fri, 27 Nov 2020 04:44:07 +0000 (+0530) Subject: powerpc/book3s64/kuap: Move KUAP related function outside radix X-Git-Tag: v5.11-rc1~76^2~168 X-Git-Url: https://git.kernel.dk/?a=commitdiff_plain;h=3b47b7549ead0719e94022c6742199333c7c8d9f;p=linux-block.git powerpc/book3s64/kuap: Move KUAP related function outside radix The next set of patches adds support for kuap with hash translation. In preparation for that rename/move kuap related functions to non radix names. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20201127044424.40686-6-aneesh.kumar@linux.ibm.com --- diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h deleted file mode 100644 index 2fb8ee7b1e2a..000000000000 --- a/arch/powerpc/include/asm/book3s/64/kup-radix.h +++ /dev/null @@ -1,205 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H -#define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H - -#include -#include - -#define AMR_KUAP_BLOCK_READ UL(0x4000000000000000) -#define AMR_KUAP_BLOCK_WRITE UL(0x8000000000000000) -#define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE) -#define AMR_KUAP_SHIFT 62 - -#ifdef __ASSEMBLY__ - -.macro kuap_restore_amr gpr1, gpr2 -#ifdef CONFIG_PPC_KUAP - BEGIN_MMU_FTR_SECTION_NESTED(67) - mfspr \gpr1, SPRN_AMR - ld \gpr2, STACK_REGS_AMR(r1) - cmpd \gpr1, \gpr2 - beq 998f - isync - mtspr SPRN_AMR, \gpr2 - /* No isync required, see kuap_restore_amr() */ -998: - END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67) -#endif -.endm - -#ifdef CONFIG_PPC_KUAP -.macro kuap_check_amr gpr1, gpr2 -#ifdef CONFIG_PPC_KUAP_DEBUG - BEGIN_MMU_FTR_SECTION_NESTED(67) - mfspr \gpr1, SPRN_AMR - li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT) - sldi \gpr2, \gpr2, AMR_KUAP_SHIFT -999: tdne \gpr1, \gpr2 - EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE) - END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67) -#endif -.endm -#endif - -.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr -#ifdef CONFIG_PPC_KUAP - BEGIN_MMU_FTR_SECTION_NESTED(67) - .ifnb \msr_pr_cr - bne \msr_pr_cr, 99f - .endif - mfspr \gpr1, SPRN_AMR - std \gpr1, STACK_REGS_AMR(r1) - li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT) - sldi \gpr2, \gpr2, AMR_KUAP_SHIFT - cmpd \use_cr, \gpr1, \gpr2 - beq \use_cr, 99f - // We don't isync here because we very recently entered via rfid - mtspr SPRN_AMR, \gpr2 - isync -99: - END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67) -#endif -.endm - -#else /* !__ASSEMBLY__ */ - -#include - -DECLARE_STATIC_KEY_FALSE(uaccess_flush_key); - -#ifdef CONFIG_PPC_KUAP - -#include -#include - -static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) -{ - if (mmu_has_feature(MMU_FTR_RADIX_KUAP) && unlikely(regs->kuap != amr)) { - isync(); - mtspr(SPRN_AMR, regs->kuap); - /* - * No isync required here because we are about to RFI back to - * previous context before any user accesses would be made, - * which is a CSI. - */ - } -} - -static inline unsigned long kuap_get_and_check_amr(void) -{ - if (mmu_has_feature(MMU_FTR_RADIX_KUAP)) { - unsigned long amr = mfspr(SPRN_AMR); - if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */ - WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED); - return amr; - } - return 0; -} - -static inline void kuap_check_amr(void) -{ - if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_RADIX_KUAP)) - WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED); -} - -/* - * We support individually allowing read or write, but we don't support nesting - * because that would require an expensive read/modify write of the AMR. - */ - -static inline unsigned long get_kuap(void) -{ - /* - * We return AMR_KUAP_BLOCKED when we don't support KUAP because - * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to - * cause restore_user_access to do a flush. - * - * This has no effect in terms of actually blocking things on hash, - * so it doesn't break anything. - */ - if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP)) - return AMR_KUAP_BLOCKED; - - return mfspr(SPRN_AMR); -} - -static inline void set_kuap(unsigned long value) -{ - if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP)) - return; - - /* - * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both - * before and after the move to AMR. See table 6 on page 1134. - */ - isync(); - mtspr(SPRN_AMR, value); - isync(); -} - -static inline bool -bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) -{ - return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) && - (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)), - "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read"); -} -#else /* CONFIG_PPC_KUAP */ -static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { } - -static inline unsigned long kuap_get_and_check_amr(void) -{ - return 0UL; -} - -static inline unsigned long get_kuap(void) -{ - return AMR_KUAP_BLOCKED; -} - -static inline void set_kuap(unsigned long value) { } -#endif /* !CONFIG_PPC_KUAP */ - -static __always_inline void allow_user_access(void __user *to, const void __user *from, - unsigned long size, unsigned long dir) -{ - // This is written so we can resolve to a single case at build time - BUILD_BUG_ON(!__builtin_constant_p(dir)); - if (dir == KUAP_READ) - set_kuap(AMR_KUAP_BLOCK_WRITE); - else if (dir == KUAP_WRITE) - set_kuap(AMR_KUAP_BLOCK_READ); - else if (dir == KUAP_READ_WRITE) - set_kuap(0); - else - BUILD_BUG(); -} - -static inline void prevent_user_access(void __user *to, const void __user *from, - unsigned long size, unsigned long dir) -{ - set_kuap(AMR_KUAP_BLOCKED); - if (static_branch_unlikely(&uaccess_flush_key)) - do_uaccess_flush(); -} - -static inline unsigned long prevent_user_access_return(void) -{ - unsigned long flags = get_kuap(); - - set_kuap(AMR_KUAP_BLOCKED); - if (static_branch_unlikely(&uaccess_flush_key)) - do_uaccess_flush(); - - return flags; -} - -static inline void restore_user_access(unsigned long flags) -{ - set_kuap(flags); - if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED) - do_uaccess_flush(); -} -#endif /* __ASSEMBLY__ */ - -#endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */ diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h new file mode 100644 index 000000000000..8735d2dede94 --- /dev/null +++ b/arch/powerpc/include/asm/book3s/64/kup.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_BOOK3S_64_KUP_H +#define _ASM_POWERPC_BOOK3S_64_KUP_H + +#include +#include + +#define AMR_KUAP_BLOCK_READ UL(0x4000000000000000) +#define AMR_KUAP_BLOCK_WRITE UL(0x8000000000000000) +#define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE) +#define AMR_KUAP_SHIFT 62 + +#ifdef __ASSEMBLY__ + +.macro kuap_restore_amr gpr1, gpr2 +#ifdef CONFIG_PPC_KUAP + BEGIN_MMU_FTR_SECTION_NESTED(67) + mfspr \gpr1, SPRN_AMR + ld \gpr2, STACK_REGS_AMR(r1) + cmpd \gpr1, \gpr2 + beq 998f + isync + mtspr SPRN_AMR, \gpr2 + /* No isync required, see kuap_restore_amr() */ +998: + END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67) +#endif +.endm + +#ifdef CONFIG_PPC_KUAP +.macro kuap_check_amr gpr1, gpr2 +#ifdef CONFIG_PPC_KUAP_DEBUG + BEGIN_MMU_FTR_SECTION_NESTED(67) + mfspr \gpr1, SPRN_AMR + li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT) + sldi \gpr2, \gpr2, AMR_KUAP_SHIFT +999: tdne \gpr1, \gpr2 + EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE) + END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67) +#endif +.endm +#endif + +.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr +#ifdef CONFIG_PPC_KUAP + BEGIN_MMU_FTR_SECTION_NESTED(67) + .ifnb \msr_pr_cr + bne \msr_pr_cr, 99f + .endif + mfspr \gpr1, SPRN_AMR + std \gpr1, STACK_REGS_AMR(r1) + li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT) + sldi \gpr2, \gpr2, AMR_KUAP_SHIFT + cmpd \use_cr, \gpr1, \gpr2 + beq \use_cr, 99f + // We don't isync here because we very recently entered via rfid + mtspr SPRN_AMR, \gpr2 + isync +99: + END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67) +#endif +.endm + +#else /* !__ASSEMBLY__ */ + +#include + +DECLARE_STATIC_KEY_FALSE(uaccess_flush_key); + +#ifdef CONFIG_PPC_KUAP + +#include +#include + +static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) +{ + if (mmu_has_feature(MMU_FTR_RADIX_KUAP) && unlikely(regs->kuap != amr)) { + isync(); + mtspr(SPRN_AMR, regs->kuap); + /* + * No isync required here because we are about to RFI back to + * previous context before any user accesses would be made, + * which is a CSI. + */ + } +} + +static inline unsigned long kuap_get_and_check_amr(void) +{ + if (mmu_has_feature(MMU_FTR_RADIX_KUAP)) { + unsigned long amr = mfspr(SPRN_AMR); + if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */ + WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED); + return amr; + } + return 0; +} + +static inline void kuap_check_amr(void) +{ + if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_RADIX_KUAP)) + WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED); +} + +/* + * We support individually allowing read or write, but we don't support nesting + * because that would require an expensive read/modify write of the AMR. + */ + +static inline unsigned long get_kuap(void) +{ + /* + * We return AMR_KUAP_BLOCKED when we don't support KUAP because + * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to + * cause restore_user_access to do a flush. + * + * This has no effect in terms of actually blocking things on hash, + * so it doesn't break anything. + */ + if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP)) + return AMR_KUAP_BLOCKED; + + return mfspr(SPRN_AMR); +} + +static inline void set_kuap(unsigned long value) +{ + if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP)) + return; + + /* + * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both + * before and after the move to AMR. See table 6 on page 1134. + */ + isync(); + mtspr(SPRN_AMR, value); + isync(); +} + +static inline bool +bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) +{ + return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) && + (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)), + "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read"); +} +#else /* CONFIG_PPC_KUAP */ +static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { } + +static inline unsigned long kuap_get_and_check_amr(void) +{ + return 0UL; +} + +static inline unsigned long get_kuap(void) +{ + return AMR_KUAP_BLOCKED; +} + +static inline void set_kuap(unsigned long value) { } +#endif /* !CONFIG_PPC_KUAP */ + +static __always_inline void allow_user_access(void __user *to, const void __user *from, + unsigned long size, unsigned long dir) +{ + // This is written so we can resolve to a single case at build time + BUILD_BUG_ON(!__builtin_constant_p(dir)); + if (dir == KUAP_READ) + set_kuap(AMR_KUAP_BLOCK_WRITE); + else if (dir == KUAP_WRITE) + set_kuap(AMR_KUAP_BLOCK_READ); + else if (dir == KUAP_READ_WRITE) + set_kuap(0); + else + BUILD_BUG(); +} + +static inline void prevent_user_access(void __user *to, const void __user *from, + unsigned long size, unsigned long dir) +{ + set_kuap(AMR_KUAP_BLOCKED); + if (static_branch_unlikely(&uaccess_flush_key)) + do_uaccess_flush(); +} + +static inline unsigned long prevent_user_access_return(void) +{ + unsigned long flags = get_kuap(); + + set_kuap(AMR_KUAP_BLOCKED); + if (static_branch_unlikely(&uaccess_flush_key)) + do_uaccess_flush(); + + return flags; +} + +static inline void restore_user_access(unsigned long flags) +{ + set_kuap(flags); + if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED) + do_uaccess_flush(); +} +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_BOOK3S_64_KUP_H */ diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h index 0d93331d0fab..a06e50b68d40 100644 --- a/arch/powerpc/include/asm/kup.h +++ b/arch/powerpc/include/asm/kup.h @@ -15,11 +15,13 @@ #define KUAP_CURRENT (KUAP_CURRENT_READ | KUAP_CURRENT_WRITE) #ifdef CONFIG_PPC_BOOK3S_64 -#include +#include #endif + #ifdef CONFIG_PPC_8xx #include #endif + #ifdef CONFIG_PPC_BOOK3S_32 #include #endif diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c index 7dc71f85683d..c75994cf50a7 100644 --- a/arch/powerpc/mm/book3s64/pkeys.c +++ b/arch/powerpc/mm/book3s64/pkeys.c @@ -9,9 +9,12 @@ #include #include #include +#include + #include #include + int num_pkey; /* Max number of pkeys supported */ /* * Keys marked in the reservation list cannot be allocated by userspace @@ -226,6 +229,25 @@ out: return; } +#ifdef CONFIG_PPC_KUAP +void __init setup_kuap(bool disabled) +{ + if (disabled || !early_radix_enabled()) + return; + + if (smp_processor_id() == boot_cpuid) { + pr_info("Activating Kernel Userspace Access Prevention\n"); + cur_cpu_spec->mmu_features |= MMU_FTR_RADIX_KUAP; + } + + /* + * Set the default kernel AMR values on all cpus. + */ + mtspr(SPRN_AMR, AMR_KUAP_BLOCKED); + isync(); +} +#endif + static inline u64 read_amr(void) { return mfspr(SPRN_AMR); diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index f5f248d44d5c..fe2c26dbcb28 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -609,25 +609,6 @@ void setup_kuep(bool disabled) } #endif -#ifdef CONFIG_PPC_KUAP -void setup_kuap(bool disabled) -{ - if (disabled || !early_radix_enabled()) - return; - - if (smp_processor_id() == boot_cpuid) { - pr_info("Activating Kernel Userspace Access Prevention\n"); - cur_cpu_spec->mmu_features |= MMU_FTR_RADIX_KUAP; - } - - /* - * Set the default kernel AMR values on all cpus. - */ - mtspr(SPRN_AMR, AMR_KUAP_BLOCKED); - isync(); -} -#endif - void __init radix__early_init_mmu(void) { unsigned long lpcr;