From 4c0bd995d73ed8897650095c7892b132a0bd66a4 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 12 Sep 2022 17:22:08 +0100 Subject: arm64: alternatives: have callbacks take a cap Today, callback alternatives are special-cased within __apply_alternatives(), and are applied alongside patching for system capabilities as ARM64_NCAPS is not part of the boot_capabilities feature mask. This special-casing is less than ideal. Giving special meaning to ARM64_NCAPS for this requires some structures and loops to use ARM64_NCAPS + 1 (AKA ARM64_NPATCHABLE), while others use ARM64_NCAPS. It's also not immediately clear callback alternatives are only applied when applying alternatives for system-wide features. To make this a bit clearer, changes the way that callback alternatives are identified to remove the special-casing of ARM64_NCAPS, and to allow callback alternatives to be associated with a cpucap as with all other alternatives. New cpucaps, ARM64_ALWAYS_BOOT and ARM64_ALWAYS_SYSTEM are added which are always detected alongside boot cpu capabilities and system capabilities respectively. All existing callback alternatives are made to use ARM64_ALWAYS_SYSTEM, and so will be patched at the same point during the boot flow as before. Subsequent patches will make more use of these new cpucaps. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: James Morse Cc: Joey Gouly Cc: Marc Zyngier Cc: Will Deacon Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20220912162210.3626215-7-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/assembler.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm64/include/asm/assembler.h') diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 5846145be523..d851e2733439 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -293,7 +293,7 @@ alternative_endif alternative_if_not ARM64_KVM_PROTECTED_MODE ASM_BUG() alternative_else_nop_endif -alternative_cb kvm_compute_final_ctr_el0 +alternative_cb ARM64_ALWAYS_SYSTEM, kvm_compute_final_ctr_el0 movz \reg, #0 movk \reg, #0, lsl #16 movk \reg, #0, lsl #32 @@ -877,7 +877,7 @@ alternative_endif .macro __mitigate_spectre_bhb_loop tmp #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -alternative_cb spectre_bhb_patch_loop_iter +alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_iter mov \tmp, #32 // Patched to correct the immediate alternative_cb_end .Lspectre_bhb_loop\@: @@ -890,7 +890,7 @@ alternative_cb_end .macro mitigate_spectre_bhb_loop tmp #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -alternative_cb spectre_bhb_patch_loop_mitigation_enable +alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_mitigation_enable b .L_spectre_bhb_loop_done\@ // Patched to NOP alternative_cb_end __mitigate_spectre_bhb_loop \tmp @@ -904,7 +904,7 @@ alternative_cb_end stp x0, x1, [sp, #-16]! stp x2, x3, [sp, #-16]! mov w0, #ARM_SMCCC_ARCH_WORKAROUND_3 -alternative_cb smccc_patch_fw_mitigation_conduit +alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit nop // Patched to SMC/HVC #0 alternative_cb_end ldp x2, x3, [sp], #16 @@ -914,7 +914,7 @@ alternative_cb_end .macro mitigate_spectre_bhb_clear_insn #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY -alternative_cb spectre_bhb_patch_clearbhb +alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_clearbhb /* Patched to NOP when not supported */ clearbhb isb -- cgit