From f84d83d8165570380f55f4ce578bfb131a9266c5 Mon Sep 17 00:00:00 2001 From: David Virag Date: Thu, 26 May 2022 07:58:40 +0200 Subject: arm64: dts: exynos: Correct UART clocks on Exynos7885 The clocks in the serial UART nodes were swapped by mistake on Exynos7885. This only worked correctly because of a mistake in the clock driver which has been fixed. With the fixed clock driver in place, the baudrate of the UARTs get miscalculated. Fix this by correcting the clocks in the dtsi. Fixes: 06874015327b ("arm64: dts: exynos: Add initial device tree support for Exynos7885 SoC") Signed-off-by: David Virag Link: https://lore.kernel.org/r/20220526055840.45209-3-virag.david003@gmail.com Signed-off-by: Krzysztof Kozlowski --- arch/arm64/boot/dts/exynos/exynos7885.dtsi | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/boot/dts/exynos/exynos7885.dtsi b/arch/arm64/boot/dts/exynos/exynos7885.dtsi index 3170661f5b67..9c233c56558c 100644 --- a/arch/arm64/boot/dts/exynos/exynos7885.dtsi +++ b/arch/arm64/boot/dts/exynos/exynos7885.dtsi @@ -280,8 +280,8 @@ interrupts = ; pinctrl-names = "default"; pinctrl-0 = <&uart0_bus>; - clocks = <&cmu_peri CLK_GOUT_UART0_EXT_UCLK>, - <&cmu_peri CLK_GOUT_UART0_PCLK>; + clocks = <&cmu_peri CLK_GOUT_UART0_PCLK>, + <&cmu_peri CLK_GOUT_UART0_EXT_UCLK>; clock-names = "uart", "clk_uart_baud0"; samsung,uart-fifosize = <64>; status = "disabled"; @@ -293,8 +293,8 @@ interrupts = ; pinctrl-names = "default"; pinctrl-0 = <&uart1_bus>; - clocks = <&cmu_peri CLK_GOUT_UART1_EXT_UCLK>, - <&cmu_peri CLK_GOUT_UART1_PCLK>; + clocks = <&cmu_peri CLK_GOUT_UART1_PCLK>, + <&cmu_peri CLK_GOUT_UART1_EXT_UCLK>; clock-names = "uart", "clk_uart_baud0"; samsung,uart-fifosize = <256>; status = "disabled"; @@ -306,8 +306,8 @@ interrupts = ; pinctrl-names = "default"; pinctrl-0 = <&uart2_bus>; - clocks = <&cmu_peri CLK_GOUT_UART2_EXT_UCLK>, - <&cmu_peri CLK_GOUT_UART2_PCLK>; + clocks = <&cmu_peri CLK_GOUT_UART2_PCLK>, + <&cmu_peri CLK_GOUT_UART2_EXT_UCLK>; clock-names = "uart", "clk_uart_baud0"; samsung,uart-fifosize = <256>; status = "disabled"; -- cgit From 4266e2f70d4388b8c6a95056169954ff049ced94 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sat, 14 May 2022 11:35:05 -0300 Subject: arm64: s32g2: Pass unit name to soc node Pass unit name to soc node to fix the following W=1 build warning: arch/arm64/boot/dts/freescale/s32g2.dtsi:82.6-123.4: Warning (unit_address_vs_reg): /soc: node has a reg or ranges property, but no unit name Signed-off-by: Fabio Estevam Reviewed-by: Chester Lin Signed-off-by: Chester Lin Link: https://lore.kernel.org/r/20220514143505.1554813-1-festevam@gmail.com --- arch/arm64/boot/dts/freescale/s32g2.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/boot/dts/freescale/s32g2.dtsi b/arch/arm64/boot/dts/freescale/s32g2.dtsi index 59ea8a25aa4c..824d401e7a2c 100644 --- a/arch/arm64/boot/dts/freescale/s32g2.dtsi +++ b/arch/arm64/boot/dts/freescale/s32g2.dtsi @@ -79,7 +79,7 @@ }; }; - soc { + soc@0 { compatible = "simple-bus"; #address-cells = <1>; #size-cells = <1>; -- cgit From d52d165d67c5aa26c8c89909003c94a66492d23d Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 28 May 2022 12:38:11 +0100 Subject: KVM: arm64: Always start with clearing SVE flag on load On each vcpu load, we set the KVM_ARM64_HOST_SVE_ENABLED flag if SVE is enabled for EL0 on the host. This is used to restore the correct state on vpcu put. However, it appears that nothing ever clears this flag. Once set, it will stick until the vcpu is destroyed, which has the potential to spuriously enable SVE for userspace. We probably never saw the issue because no VMM uses SVE, but that's still pretty bad. Unconditionally clearing the flag on vcpu load addresses the issue. Fixes: 8383741ab2e7 ("KVM: arm64: Get rid of host SVE tracking/saving") Signed-off-by: Marc Zyngier Cc: stable@vger.kernel.org Reviewed-by: Mark Brown Link: https://lore.kernel.org/r/20220528113829.1043361-2-maz@kernel.org --- arch/arm64/kvm/fpsimd.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 3d251a4d2cf7..8267ff4642d3 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -80,6 +80,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) vcpu->arch.flags &= ~KVM_ARM64_FP_ENABLED; vcpu->arch.flags |= KVM_ARM64_FP_HOST; + vcpu->arch.flags &= ~KVM_ARM64_HOST_SVE_ENABLED; if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED; -- cgit From 039f49c4cafb785504c678f28664d088e0108d35 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 28 May 2022 12:38:12 +0100 Subject: KVM: arm64: Always start with clearing SME flag on load On each vcpu load, we set the KVM_ARM64_HOST_SME_ENABLED flag if SME is enabled for EL0 on the host. This is used to restore the correct state on vpcu put. However, it appears that nothing ever clears this flag. Once set, it will stick until the vcpu is destroyed, which has the potential to spuriously enable SME for userspace. As it turns out, this is due to the SME code being more or less copied from SVE, and inheriting the same shortcomings. We never saw the issue because nothing uses SME, and the amount of testing is probably still pretty low. Fixes: 861262ab8627 ("KVM: arm64: Handle SME host state when running guests") Signed-off-by: Marc Zyngier Reviwed-by: Mark Brown Link: https://lore.kernel.org/r/20220528113829.1043361-3-maz@kernel.org --- arch/arm64/kvm/fpsimd.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 8267ff4642d3..6012b08ecb14 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -94,6 +94,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) * operations. Do this for ZA as well for now for simplicity. */ if (system_supports_sme()) { + vcpu->arch.flags &= ~KVM_ARM64_HOST_SME_ENABLED; if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN) vcpu->arch.flags |= KVM_ARM64_HOST_SME_ENABLED; -- cgit From e3fe65e0d3671ee5ae8a2723e429ee4830a7c89c Mon Sep 17 00:00:00 2001 From: sunliming Date: Thu, 2 Jun 2022 10:48:05 +0800 Subject: KVM: arm64: Fix inconsistent indenting Fix the following smatch warnings: arch/arm64/kvm/vmid.c:62 flush_context() warn: inconsistent indenting Reported-by: kernel test robot Signed-off-by: sunliming Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220602024805.511457-1-sunliming@kylinos.cn --- arch/arm64/kvm/vmid.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/vmid.c b/arch/arm64/kvm/vmid.c index 8d5f0506fd87..d78ae63d7c15 100644 --- a/arch/arm64/kvm/vmid.c +++ b/arch/arm64/kvm/vmid.c @@ -66,7 +66,7 @@ static void flush_context(void) * the next context-switch, we broadcast TLB flush + I-cache * invalidation over the inner shareable domain on rollover. */ - kvm_call_hyp(__kvm_flush_vm_context); + kvm_call_hyp(__kvm_flush_vm_context); } static bool check_update_reserved_vmid(u64 vmid, u64 newvmid) -- cgit From 2cdea19a34c2340b3aa69508804efe4e3750fcec Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 7 Jun 2022 14:14:25 +0100 Subject: KVM: arm64: Don't read a HW interrupt pending state in user context Since 5bfa685e62e9 ("KVM: arm64: vgic: Read HW interrupt pending state from the HW"), we're able to source the pending bit for an interrupt that is stored either on the physical distributor or on a device. However, this state is only available when the vcpu is loaded, and is not intended to be accessed from userspace. Unfortunately, the GICv2 emulation doesn't provide specific userspace accessors, and we fallback with the ones that are intended for the guest, with fatal consequences. Add a new vgic_uaccess_read_pending() accessor for userspace to use, build on top of the existing vgic_mmio_read_pending(). Reported-by: Eric Auger Reviewed-by: Eric Auger Tested-by: Eric Auger Signed-off-by: Marc Zyngier Fixes: 5bfa685e62e9 ("KVM: arm64: vgic: Read HW interrupt pending state from the HW") Link: https://lore.kernel.org/r/20220607131427.1164881-2-maz@kernel.org Cc: stable@vger.kernel.org --- arch/arm64/kvm/vgic/vgic-mmio-v2.c | 4 ++-- arch/arm64/kvm/vgic/vgic-mmio.c | 19 ++++++++++++++++--- arch/arm64/kvm/vgic/vgic-mmio.h | 3 +++ 3 files changed, 21 insertions(+), 5 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v2.c b/arch/arm64/kvm/vgic/vgic-mmio-v2.c index 77a67e9d3d14..e070cda86e12 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v2.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v2.c @@ -429,11 +429,11 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = { VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET, vgic_mmio_read_pending, vgic_mmio_write_spending, - NULL, vgic_uaccess_write_spending, 1, + vgic_uaccess_read_pending, vgic_uaccess_write_spending, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR, vgic_mmio_read_pending, vgic_mmio_write_cpending, - NULL, vgic_uaccess_write_cpending, 1, + vgic_uaccess_read_pending, vgic_uaccess_write_cpending, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET, vgic_mmio_read_active, vgic_mmio_write_sactive, diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c index 49837d3a3ef5..dc8c52487e47 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio.c +++ b/arch/arm64/kvm/vgic/vgic-mmio.c @@ -226,8 +226,9 @@ int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu, return 0; } -unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, - gpa_t addr, unsigned int len) +static unsigned long __read_pending(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len, + bool is_user) { u32 intid = VGIC_ADDR_TO_INTID(addr, 1); u32 value = 0; @@ -248,7 +249,7 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, IRQCHIP_STATE_PENDING, &val); WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); - } else if (vgic_irq_is_mapped_level(irq)) { + } else if (!is_user && vgic_irq_is_mapped_level(irq)) { val = vgic_get_phys_line_level(irq); } else { val = irq_is_pending(irq); @@ -263,6 +264,18 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, return value; } +unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + return __read_pending(vcpu, addr, len, false); +} + +unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len) +{ + return __read_pending(vcpu, addr, len, true); +} + static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq) { return (vgic_irq_is_sgi(irq->intid) && diff --git a/arch/arm64/kvm/vgic/vgic-mmio.h b/arch/arm64/kvm/vgic/vgic-mmio.h index 3fa696f198a3..6082d4b66d39 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio.h +++ b/arch/arm64/kvm/vgic/vgic-mmio.h @@ -149,6 +149,9 @@ int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu, unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len); +unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu, + gpa_t addr, unsigned int len); + void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val); -- cgit From 98432ccdec9f178ba041e1e5f9f32dbd71576504 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 7 Jun 2022 14:14:26 +0100 Subject: KVM: arm64: Replace vgic_v3_uaccess_read_pending with vgic_uaccess_read_pending Now that GICv2 has a proper userspace accessor for the pending state, switch GICv3 over to it, dropping the local version, moving over the specific behaviours that CGIv3 requires (such as the distinction between pending latch and line level which were never enforced with GICv2). We also gain extra locking that isn't really necessary for userspace, but that's a small price to pay for getting rid of superfluous code. Signed-off-by: Marc Zyngier Reviewed-by: Eric Auger Link: https://lore.kernel.org/r/20220607131427.1164881-3-maz@kernel.org --- arch/arm64/kvm/vgic/vgic-mmio-v3.c | 40 ++------------------------------------ arch/arm64/kvm/vgic/vgic-mmio.c | 21 +++++++++++++++++++- 2 files changed, 22 insertions(+), 39 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index f7aa7bcd6fb8..f15e29cc63ce 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -353,42 +353,6 @@ static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu, return 0; } -static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu, - gpa_t addr, unsigned int len) -{ - u32 intid = VGIC_ADDR_TO_INTID(addr, 1); - u32 value = 0; - int i; - - /* - * pending state of interrupt is latched in pending_latch variable. - * Userspace will save and restore pending state and line_level - * separately. - * Refer to Documentation/virt/kvm/devices/arm-vgic-v3.rst - * for handling of ISPENDR and ICPENDR. - */ - for (i = 0; i < len * 8; i++) { - struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - bool state = irq->pending_latch; - - if (irq->hw && vgic_irq_is_sgi(irq->intid)) { - int err; - - err = irq_get_irqchip_state(irq->host_irq, - IRQCHIP_STATE_PENDING, - &state); - WARN_ON(err); - } - - if (state) - value |= (1U << i); - - vgic_put_irq(vcpu->kvm, irq); - } - - return value; -} - static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len, unsigned long val) @@ -666,7 +630,7 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = { VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR, vgic_mmio_read_pending, vgic_mmio_write_spending, - vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 1, + vgic_uaccess_read_pending, vgic_v3_uaccess_write_pending, 1, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR, vgic_mmio_read_pending, vgic_mmio_write_cpending, @@ -750,7 +714,7 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = { VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISPENDR0, vgic_mmio_read_pending, vgic_mmio_write_spending, - vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4, + vgic_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4, VGIC_ACCESS_32bit), REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICPENDR0, vgic_mmio_read_pending, vgic_mmio_write_cpending, diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c index dc8c52487e47..997d0fce2088 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio.c +++ b/arch/arm64/kvm/vgic/vgic-mmio.c @@ -240,6 +240,15 @@ static unsigned long __read_pending(struct kvm_vcpu *vcpu, unsigned long flags; bool val; + /* + * When used from userspace with a GICv3 model: + * + * Pending state of interrupt is latched in pending_latch + * variable. Userspace will save and restore pending state + * and line_level separately. + * Refer to Documentation/virt/kvm/devices/arm-vgic-v3.rst + * for handling of ISPENDR and ICPENDR. + */ raw_spin_lock_irqsave(&irq->irq_lock, flags); if (irq->hw && vgic_irq_is_sgi(irq->intid)) { int err; @@ -252,7 +261,17 @@ static unsigned long __read_pending(struct kvm_vcpu *vcpu, } else if (!is_user && vgic_irq_is_mapped_level(irq)) { val = vgic_get_phys_line_level(irq); } else { - val = irq_is_pending(irq); + switch (vcpu->kvm->arch.vgic.vgic_model) { + case KVM_DEV_TYPE_ARM_VGIC_V3: + if (is_user) { + val = irq->pending_latch; + break; + } + fallthrough; + default: + val = irq_is_pending(irq); + break; + } } value |= ((u32)val << i); -- cgit From efedd01de475e126e43a07d0b1221bb65e497163 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 7 Jun 2022 14:14:27 +0100 Subject: KVM: arm64: Warn if accessing timer pending state outside of vcpu context A recurrent bug in the KVM/arm64 code base consists in trying to access the timer pending state outside of the vcpu context, which makes zero sense (the pending state only exists when the vcpu is loaded). In order to avoid more embarassing crashes and catch the offenders red-handed, add a warning to kvm_arch_timer_get_input_level() and return the state as non-pending. This avoids taking the system down, and still helps tracking down silly bugs. Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220607131427.1164881-4-maz@kernel.org --- arch/arm64/kvm/arch_timer.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 4e39ace073af..3b8d062e30ea 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -1230,6 +1230,9 @@ bool kvm_arch_timer_get_input_level(int vintid) struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); struct arch_timer_context *timer; + if (WARN(!vcpu, "No vcpu context!\n")) + return false; + if (vintid == vcpu_vtimer(vcpu)->irq.irq) timer = vcpu_vtimer(vcpu); else if (vintid == vcpu_ptimer(vcpu)->irq.irq) -- cgit From ae187fec75aa670a551d9662f83e3947d3f02a69 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 9 Jun 2022 13:12:18 +0100 Subject: KVM: arm64: Return error from kvm_arch_init_vm() on allocation failure If we fail to allocate the 'supported_cpus' cpumask in kvm_arch_init_vm() then be sure to return -ENOMEM instead of success (0) on the failure path. Reviewed-by: Alexandru Elisei Signed-off-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220609121223.2551-2-will@kernel.org --- arch/arm64/kvm/arm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 400bb0fe2745..0da0f06037db 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -150,8 +150,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (ret) goto out_free_stage2_pgd; - if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL)) + if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL)) { + ret = -ENOMEM; goto out_free_stage2_pgd; + } cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask); kvm_vgic_early_init(kvm); -- cgit From fa7a17214488ef7df347dcd1a5594f69ea17f4dc Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 9 Jun 2022 13:12:19 +0100 Subject: KVM: arm64: Handle all ID registers trapped for a protected VM A protected VM accessing ID_AA64ISAR2_EL1 gets punished with an UNDEF, while it really should only get a zero back if the register is not handled by the hypervisor emulation (as mandated by the architecture). Introduce all the missing ID registers (including the unallocated ones), and have them to return 0. Reported-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220609121223.2551-3-will@kernel.org --- arch/arm64/kvm/hyp/nvhe/sys_regs.c | 42 ++++++++++++++++++++++++++++++-------- 1 file changed, 34 insertions(+), 8 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c index b6d86e423319..35a4331ba5f3 100644 --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c @@ -243,15 +243,9 @@ u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id) case SYS_ID_AA64MMFR2_EL1: return get_pvm_id_aa64mmfr2(vcpu); default: - /* - * Should never happen because all cases are covered in - * pvm_sys_reg_descs[]. - */ - WARN_ON(1); - break; + /* Unhandled ID register, RAZ */ + return 0; } - - return 0; } static u64 read_id_reg(const struct kvm_vcpu *vcpu, @@ -332,6 +326,16 @@ static bool pvm_gic_read_sre(struct kvm_vcpu *vcpu, /* Mark the specified system register as an AArch64 feature id register. */ #define AARCH64(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch64 } +/* + * sys_reg_desc initialiser for architecturally unallocated cpufeature ID + * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2 + * (1 <= crm < 8, 0 <= Op2 < 8). + */ +#define ID_UNALLOCATED(crm, op2) { \ + Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \ + .access = pvm_access_id_aarch64, \ +} + /* Mark the specified system register as Read-As-Zero/Write-Ignored */ #define RAZ_WI(REG) { SYS_DESC(REG), .access = pvm_access_raz_wi } @@ -375,24 +379,46 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = { AARCH32(SYS_MVFR0_EL1), AARCH32(SYS_MVFR1_EL1), AARCH32(SYS_MVFR2_EL1), + ID_UNALLOCATED(3,3), AARCH32(SYS_ID_PFR2_EL1), AARCH32(SYS_ID_DFR1_EL1), AARCH32(SYS_ID_MMFR5_EL1), + ID_UNALLOCATED(3,7), /* AArch64 ID registers */ /* CRm=4 */ AARCH64(SYS_ID_AA64PFR0_EL1), AARCH64(SYS_ID_AA64PFR1_EL1), + ID_UNALLOCATED(4,2), + ID_UNALLOCATED(4,3), AARCH64(SYS_ID_AA64ZFR0_EL1), + ID_UNALLOCATED(4,5), + ID_UNALLOCATED(4,6), + ID_UNALLOCATED(4,7), AARCH64(SYS_ID_AA64DFR0_EL1), AARCH64(SYS_ID_AA64DFR1_EL1), + ID_UNALLOCATED(5,2), + ID_UNALLOCATED(5,3), AARCH64(SYS_ID_AA64AFR0_EL1), AARCH64(SYS_ID_AA64AFR1_EL1), + ID_UNALLOCATED(5,6), + ID_UNALLOCATED(5,7), AARCH64(SYS_ID_AA64ISAR0_EL1), AARCH64(SYS_ID_AA64ISAR1_EL1), + AARCH64(SYS_ID_AA64ISAR2_EL1), + ID_UNALLOCATED(6,3), + ID_UNALLOCATED(6,4), + ID_UNALLOCATED(6,5), + ID_UNALLOCATED(6,6), + ID_UNALLOCATED(6,7), AARCH64(SYS_ID_AA64MMFR0_EL1), AARCH64(SYS_ID_AA64MMFR1_EL1), AARCH64(SYS_ID_AA64MMFR2_EL1), + ID_UNALLOCATED(7,3), + ID_UNALLOCATED(7,4), + ID_UNALLOCATED(7,5), + ID_UNALLOCATED(7,6), + ID_UNALLOCATED(7,7), /* Scalable Vector Registers are restricted. */ -- cgit From cde5042adf11b0a30a6ce0ec3d071afcf8d2efaf Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 9 Jun 2022 13:12:20 +0100 Subject: KVM: arm64: Ignore 'kvm-arm.mode=protected' when using VHE Ignore 'kvm-arm.mode=protected' when using VHE so that kvm_get_mode() only returns KVM_MODE_PROTECTED on systems where the feature is available. Cc: David Brazdil Acked-by: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220609121223.2551-4-will@kernel.org --- arch/arm64/kernel/cpufeature.c | 10 +--------- arch/arm64/kvm/arm.c | 6 +++++- 2 files changed, 6 insertions(+), 10 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 42ea2bd856c6..79fac13ab2ef 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1974,15 +1974,7 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap) #ifdef CONFIG_KVM static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, int __unused) { - if (kvm_get_mode() != KVM_MODE_PROTECTED) - return false; - - if (is_kernel_in_hyp_mode()) { - pr_warn("Protected KVM not available with VHE\n"); - return false; - } - - return true; + return kvm_get_mode() == KVM_MODE_PROTECTED; } #endif /* CONFIG_KVM */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 0da0f06037db..a0188144a122 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -2273,7 +2273,11 @@ static int __init early_kvm_mode_cfg(char *arg) return -EINVAL; if (strcmp(arg, "protected") == 0) { - kvm_mode = KVM_MODE_PROTECTED; + if (!is_kernel_in_hyp_mode()) + kvm_mode = KVM_MODE_PROTECTED; + else + pr_warn_once("Protected KVM not available with VHE\n"); + return 0; } -- cgit From 112f3bab41113dc53b4f35e9034b2208245bc002 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 9 Jun 2022 13:12:21 +0100 Subject: KVM: arm64: Extend comment in has_vhe() has_vhe() expands to a compile-time constant when evaluated from the VHE or nVHE code, alternatively checking a static key when called from elsewhere in the kernel. On face value, this looks like a case of premature optimization, but in fact this allows symbol references on VHE-specific code paths to be dropped from the nVHE object. Expand the comment in has_vhe() to make this clearer, hopefully discouraging anybody from simplifying the code. Cc: David Brazdil Acked-by: Mark Rutland Signed-off-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220609121223.2551-5-will@kernel.org --- arch/arm64/include/asm/virt.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 3c8af033a997..0e80db4327b6 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -113,6 +113,9 @@ static __always_inline bool has_vhe(void) /* * Code only run in VHE/NVHE hyp context can assume VHE is present or * absent. Otherwise fall back to caps. + * This allows the compiler to discard VHE-specific code from the + * nVHE object, reducing the number of external symbol references + * needed to link. */ if (is_vhe_hyp_code()) return true; -- cgit From 5879c97f37022ff22a3f13174c24fcf2807fdbc0 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 9 Jun 2022 13:12:22 +0100 Subject: KVM: arm64: Remove redundant hyp_assert_lock_held() assertions host_stage2_try() asserts that the KVM host lock is held, so there's no need to duplicate the assertion in its wrappers. Signed-off-by: Will Deacon Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220609121223.2551-6-will@kernel.org --- arch/arm64/kvm/hyp/nvhe/mem_protect.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 78edf077fa3b..1e78acf9662e 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -314,15 +314,11 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range) int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot) { - hyp_assert_lock_held(&host_kvm.lock); - return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot); } int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id) { - hyp_assert_lock_held(&host_kvm.lock); - return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt, addr, size, &host_s2_pool, owner_id); } -- cgit From bcbfb588cf323929ac46767dd14e392016bbce04 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 9 Jun 2022 13:12:23 +0100 Subject: KVM: arm64: Drop stale comment The layout of 'struct kvm_vcpu_arch' has evolved significantly since the initial port of KVM/arm64, so remove the stale comment suggesting that a prefix of the structure is used exclusively from assembly code. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220609121223.2551-7-will@kernel.org --- arch/arm64/include/asm/kvm_host.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 47a1e25e25bb..de32152cea04 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -362,11 +362,6 @@ struct kvm_vcpu_arch { struct arch_timer_cpu timer_cpu; struct kvm_pmu pmu; - /* - * Anything that is not used directly from assembly code goes - * here. - */ - /* * Guest registers we preserve during guest debugging. * -- cgit From 27d8fa207835fa5c7cd6f969c6cc94d1123951ee Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 15 Jun 2022 14:22:38 +0100 Subject: Revert "arm64: Initialize jump labels before setup_machine_fdt()" This reverts commit 73e2d827a501d48dceeb5b9b267a4cd283d6b1ae. The reverted patch was needed as a fix after commit f5bda35fba61 ("random: use static branch for crng_ready()"). However, this was already fixed by 60e5b2886b92 ("random: do not use jump labels before they are initialized") and hence no longer necessary to initialise jump labels before setup_machine_fdt(). Signed-off-by: Catalin Marinas --- arch/arm64/kernel/setup.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index cf3a759f10d4..fea3223704b6 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -303,14 +303,13 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) early_fixmap_init(); early_ioremap_init(); + setup_machine_fdt(__fdt_pointer); + /* * Initialise the static keys early as they may be enabled by the - * cpufeature code, early parameters, and DT setup. + * cpufeature code and early parameters. */ jump_label_init(); - - setup_machine_fdt(__fdt_pointer); - parse_early_param(); /* -- cgit From 3eefdf9d1e406f3da47470b2854347009ffcb6fa Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 14 Jun 2022 09:09:42 +0100 Subject: arm64: ftrace: fix branch range checks The branch range checks in ftrace_make_call() and ftrace_make_nop() are incorrect, erroneously permitting a forwards branch of 128M and erroneously rejecting a backwards branch of 128M. This is because both functions calculate the offset backwards, calculating the offset *from* the target *to* the branch, rather than the other way around as the later comparisons expect. If an out-of-range branch were erroeously permitted, this would later be rejected by aarch64_insn_gen_branch_imm() as branch_imm_common() checks the bounds correctly, resulting in warnings and the placement of a BRK instruction. Note that this can only happen for a forwards branch of exactly 128M, and so the caller would need to be exactly 128M bytes below the relevant ftrace trampoline. If an in-range branch were erroeously rejected, then: * For modules when CONFIG_ARM64_MODULE_PLTS=y, this would result in the use of a PLT entry, which is benign. Note that this is the common case, as this is selected by CONFIG_RANDOMIZE_BASE (and therefore RANDOMIZE_MODULE_REGION_FULL), which distributions typically seelct. This is also selected by CONFIG_ARM64_ERRATUM_843419. * For modules when CONFIG_ARM64_MODULE_PLTS=n, this would result in internal ftrace failures. * For core kernel text, this would result in internal ftrace failues. Note that for this to happen, the kernel text would need to be at least 128M bytes in size, and typical configurations are smaller tha this. Fix this by calculating the offset *from* the branch *to* the target in both functions. Fixes: f8af0b364e24 ("arm64: ftrace: don't validate branch via PLT in ftrace_make_nop()") Fixes: e71a4e1bebaf ("arm64: ftrace: add support for far branches to dynamic ftrace") Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: Will Deacon Tested-by: "Ivan T. Ivanov" Reviewed-by: Chengming Zhou Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20220614080944.1349146-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ftrace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index f447c4a36f69..e1c88234b882 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -84,7 +84,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned long pc = rec->ip; u32 old, new; - long offset = (long)pc - (long)addr; + long offset = (long)addr - (long)pc; if (offset < -SZ_128M || offset >= SZ_128M) { struct module *mod; @@ -183,7 +183,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long pc = rec->ip; bool validate = true; u32 old = 0, new; - long offset = (long)pc - (long)addr; + long offset = (long)addr - (long)pc; if (offset < -SZ_128M || offset >= SZ_128M) { u32 replaced; -- cgit From a6253579977e4c6f7818eeb05bf2bc65678a7187 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 14 Jun 2022 09:09:43 +0100 Subject: arm64: ftrace: consistently handle PLTs. Sometimes it is necessary to use a PLT entry to call an ftrace trampoline. This is handled by ftrace_make_call() and ftrace_make_nop(), with each having *almost* identical logic, but this is not handled by ftrace_modify_call() since its introduction in commit: 3b23e4991fb66f6d ("arm64: implement ftrace with regs") Due to this, if we ever were to call ftrace_modify_call() for a callsite which requires a PLT entry for a trampoline, then either: a) If the old addr requires a trampoline, ftrace_modify_call() will use an out-of-range address to generate the 'old' branch instruction. This will result in warnings from aarch64_insn_gen_branch_imm() and ftrace_modify_code(), and no instructions will be modified. As ftrace_modify_call() will return an error, this will result in subsequent internal ftrace errors. b) If the old addr does not require a trampoline, but the new addr does, ftrace_modify_call() will use an out-of-range address to generate the 'new' branch instruction. This will result in warnings from aarch64_insn_gen_branch_imm(), and ftrace_modify_code() will replace the 'old' branch with a BRK. This will result in a kernel panic when this BRK is later executed. Practically speaking, case (a) is vastly more likely than case (b), and typically this will result in internal ftrace errors that don't necessarily affect the rest of the system. This can be demonstrated with an out-of-tree test module which triggers ftrace_modify_call(), e.g. | # insmod test_ftrace.ko | test_ftrace: Function test_function raw=0xffffb3749399201c, callsite=0xffffb37493992024 | branch_imm_common: offset out of range | branch_imm_common: offset out of range | ------------[ ftrace bug ]------------ | ftrace failed to modify | [] test_function+0x8/0x38 [test_ftrace] | actual: 1d:00:00:94 | Updating ftrace call site to call a different ftrace function | ftrace record flags: e0000002 | (2) R | expected tramp: ffffb374ae42ed54 | ------------[ cut here ]------------ | WARNING: CPU: 0 PID: 165 at kernel/trace/ftrace.c:2085 ftrace_bug+0x280/0x2b0 | Modules linked in: test_ftrace(+) | CPU: 0 PID: 165 Comm: insmod Not tainted 5.19.0-rc2-00002-g4d9ead8b45ce #13 | Hardware name: linux,dummy-virt (DT) | pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) | pc : ftrace_bug+0x280/0x2b0 | lr : ftrace_bug+0x280/0x2b0 | sp : ffff80000839ba00 | x29: ffff80000839ba00 x28: 0000000000000000 x27: ffff80000839bcf0 | x26: ffffb37493994180 x25: ffffb374b0991c28 x24: ffffb374b0d70000 | x23: 00000000ffffffea x22: ffffb374afcc33b0 x21: ffffb374b08f9cc8 | x20: ffff572b8462c000 x19: ffffb374b08f9000 x18: ffffffffffffffff | x17: 6c6c6163202c6331 x16: ffffb374ae5ad110 x15: ffffb374b0d51ee4 | x14: 0000000000000000 x13: 3435646532346561 x12: 3437336266666666 | x11: 203a706d61727420 x10: 6465746365707865 x9 : ffffb374ae5149e8 | x8 : 336266666666203a x7 : 706d617274206465 x6 : 00000000fffff167 | x5 : ffff572bffbc4a08 x4 : 00000000fffff167 x3 : 0000000000000000 | x2 : 0000000000000000 x1 : ffff572b84461e00 x0 : 0000000000000022 | Call trace: | ftrace_bug+0x280/0x2b0 | ftrace_replace_code+0x98/0xa0 | ftrace_modify_all_code+0xe0/0x144 | arch_ftrace_update_code+0x14/0x20 | ftrace_startup+0xf8/0x1b0 | register_ftrace_function+0x38/0x90 | test_ftrace_init+0xd0/0x1000 [test_ftrace] | do_one_initcall+0x50/0x2b0 | do_init_module+0x50/0x1f0 | load_module+0x17c8/0x1d64 | __do_sys_finit_module+0xa8/0x100 | __arm64_sys_finit_module+0x2c/0x3c | invoke_syscall+0x50/0x120 | el0_svc_common.constprop.0+0xdc/0x100 | do_el0_svc+0x3c/0xd0 | el0_svc+0x34/0xb0 | el0t_64_sync_handler+0xbc/0x140 | el0t_64_sync+0x18c/0x190 | ---[ end trace 0000000000000000 ]--- We can solve this by consistently determining whether to use a PLT entry for an address. Note that since (the earlier) commit: f1a54ae9af0da4d7 ("arm64: module/ftrace: intialize PLT at load time") ... we can consistently determine the PLT address that a given callsite will use, and therefore ftrace_make_nop() does not need to skip validation when a PLT is in use. This patch factors the existing logic out of ftrace_make_call() and ftrace_make_nop() into a common ftrace_find_callable_addr() helper function, which is used by ftrace_make_call(), ftrace_make_nop(), and ftrace_modify_call(). In ftrace_make_nop() the patching is consistently validated by ftrace_modify_code() as we can always determine what the old instruction should have been. Fixes: 3b23e4991fb6 ("arm64: implement ftrace with regs") Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: Will Deacon Tested-by: "Ivan T. Ivanov" Reviewed-by: Chengming Zhou Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20220614080944.1349146-3-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/ftrace.c | 137 ++++++++++++++++++++++----------------------- 1 file changed, 66 insertions(+), 71 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index e1c88234b882..ea5dc7c90f46 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -78,47 +78,76 @@ static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr) } /* - * Turn on the call to ftrace_caller() in instrumented function + * Find the address the callsite must branch to in order to reach '*addr'. + * + * Due to the limited range of 'BL' instructions, modules may be placed too far + * away to branch directly and must use a PLT. + * + * Returns true when '*addr' contains a reachable target address, or has been + * modified to contain a PLT address. Returns false otherwise. */ -int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, + struct module *mod, + unsigned long *addr) { unsigned long pc = rec->ip; - u32 old, new; - long offset = (long)addr - (long)pc; + long offset = (long)*addr - (long)pc; + struct plt_entry *plt; - if (offset < -SZ_128M || offset >= SZ_128M) { - struct module *mod; - struct plt_entry *plt; + /* + * When the target is within range of the 'BL' instruction, use 'addr' + * as-is and branch to that directly. + */ + if (offset >= -SZ_128M && offset < SZ_128M) + return true; - if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) - return -EINVAL; + /* + * When the target is outside of the range of a 'BL' instruction, we + * must use a PLT to reach it. We can only place PLTs for modules, and + * only when module PLT support is built-in. + */ + if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) + return false; - /* - * On kernels that support module PLTs, the offset between the - * branch instruction and its target may legally exceed the - * range of an ordinary relative 'bl' opcode. In this case, we - * need to branch via a trampoline in the module. - * - * NOTE: __module_text_address() must be called with preemption - * disabled, but we can rely on ftrace_lock to ensure that 'mod' - * retains its validity throughout the remainder of this code. - */ + /* + * 'mod' is only set at module load time, but if we end up + * dealing with an out-of-range condition, we can assume it + * is due to a module being loaded far away from the kernel. + * + * NOTE: __module_text_address() must be called with preemption + * disabled, but we can rely on ftrace_lock to ensure that 'mod' + * retains its validity throughout the remainder of this code. + */ + if (!mod) { preempt_disable(); mod = __module_text_address(pc); preempt_enable(); + } - if (WARN_ON(!mod)) - return -EINVAL; + if (WARN_ON(!mod)) + return false; - plt = get_ftrace_plt(mod, addr); - if (!plt) { - pr_err("ftrace: no module PLT for %ps\n", (void *)addr); - return -EINVAL; - } - - addr = (unsigned long)plt; + plt = get_ftrace_plt(mod, *addr); + if (!plt) { + pr_err("ftrace: no module PLT for %ps\n", (void *)*addr); + return false; } + *addr = (unsigned long)plt; + return true; +} + +/* + * Turn on the call to ftrace_caller() in instrumented function + */ +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned long pc = rec->ip; + u32 old, new; + + if (!ftrace_find_callable_addr(rec, NULL, &addr)) + return -EINVAL; + old = aarch64_insn_gen_nop(); new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); @@ -132,6 +161,11 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long pc = rec->ip; u32 old, new; + if (!ftrace_find_callable_addr(rec, NULL, &old_addr)) + return -EINVAL; + if (!ftrace_find_callable_addr(rec, NULL, &addr)) + return -EINVAL; + old = aarch64_insn_gen_branch_imm(pc, old_addr, AARCH64_INSN_BRANCH_LINK); new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); @@ -181,54 +215,15 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { unsigned long pc = rec->ip; - bool validate = true; u32 old = 0, new; - long offset = (long)addr - (long)pc; - if (offset < -SZ_128M || offset >= SZ_128M) { - u32 replaced; - - if (!IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) - return -EINVAL; - - /* - * 'mod' is only set at module load time, but if we end up - * dealing with an out-of-range condition, we can assume it - * is due to a module being loaded far away from the kernel. - */ - if (!mod) { - preempt_disable(); - mod = __module_text_address(pc); - preempt_enable(); - - if (WARN_ON(!mod)) - return -EINVAL; - } - - /* - * The instruction we are about to patch may be a branch and - * link instruction that was redirected via a PLT entry. In - * this case, the normal validation will fail, but we can at - * least check that we are dealing with a branch and link - * instruction that points into the right module. - */ - if (aarch64_insn_read((void *)pc, &replaced)) - return -EFAULT; - - if (!aarch64_insn_is_bl(replaced) || - !within_module(pc + aarch64_get_branch_offset(replaced), - mod)) - return -EINVAL; - - validate = false; - } else { - old = aarch64_insn_gen_branch_imm(pc, addr, - AARCH64_INSN_BRANCH_LINK); - } + if (!ftrace_find_callable_addr(rec, mod, &addr)) + return -EINVAL; + old = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK); new = aarch64_insn_gen_nop(); - return ftrace_modify_code(pc, old, new, validate); + return ftrace_modify_code(pc, old, new, true); } void arch_ftrace_update_code(int command) -- cgit From 0d8116ccd83b7e5384cf04de570ae19771e8a3d0 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 14 Jun 2022 09:09:44 +0100 Subject: arm64: ftrace: remove redundant label Since commit: c4a0ebf87cebbfa2 ("arm64/ftrace: Make function graph use ftrace directly") The 'ftrace_common_return' label has been unused. Remove it. Signed-off-by: Mark Rutland Cc: Chengming Zhou Cc: Will Deacon Tested-by: "Ivan T. Ivanov" Reviewed-by: Chengming Zhou Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20220614080944.1349146-4-mark.rutland@arm.com Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry-ftrace.S | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index d42a205ef625..bd5df50e4643 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -102,7 +102,6 @@ SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) * x19-x29 per the AAPCS, and we created frame records upon entry, so we need * to restore x0-x8, x29, and x30. */ -ftrace_common_return: /* Restore function arguments */ ldp x0, x1, [sp] ldp x2, x3, [sp, #S_X2] -- cgit From 3f77a1d0570e62cfce8d472319df00008bbeab38 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 15 Jun 2022 20:15:04 +0100 Subject: arm64/cpufeature: Unexport set_cpu_feature() We currently export set_cpu_feature() to modules but there are no in tree users that can be built as modules and it is hard to see cases where it would make sense for there to be any such users. Remove the export to avoid anyone else having to worry about why it is there and ensure that any users that do get added get a bit more visiblity. Signed-off-by: Mark Brown Acked-by: Suzuki K Poulose Reviewed-by: Mark Rutland Link: https://lore.kernel.org/r/20220615191504.626604-1-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/kernel/cpufeature.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 42ea2bd856c6..d76fd95376f0 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -3109,7 +3109,6 @@ void cpu_set_feature(unsigned int num) WARN_ON(num >= MAX_CPU_FEATURES); elf_hwcap |= BIT(num); } -EXPORT_SYMBOL_GPL(cpu_set_feature); bool cpu_have_feature(unsigned int num) { -- cgit From 56961c6331463cce2d84d0f973177a517fb33a82 Mon Sep 17 00:00:00 2001 From: Quentin Perret Date: Thu, 16 Jun 2022 16:11:34 +0000 Subject: KVM: arm64: Prevent kmemleak from accessing pKVM memory Commit a7259df76702 ("memblock: make memblock_find_in_range method private") changed the API using which memory is reserved for the pKVM hypervisor. However, memblock_phys_alloc() differs from the original API in terms of kmemleak semantics -- the old one didn't report the reserved regions to kmemleak while the new one does. Unfortunately, when protected KVM is enabled, all kernel accesses to pKVM-private memory result in a fatal exception, which can now happen because of kmemleak scans: $ echo scan > /sys/kernel/debug/kmemleak [ 34.991354] kvm [304]: nVHE hyp BUG at: [] __kvm_nvhe_handle_host_mem_abort+0x270/0x290! [ 34.991580] kvm [304]: Hyp Offset: 0xfffe8be807e00000 [ 34.991813] Kernel panic - not syncing: HYP panic: [ 34.991813] PS:600003c9 PC:0000f418011a3750 ESR:00000000f2000800 [ 34.991813] FAR:ffff000439200000 HPFAR:0000000004792000 PAR:0000000000000000 [ 34.991813] VCPU:0000000000000000 [ 34.993660] CPU: 0 PID: 304 Comm: bash Not tainted 5.19.0-rc2 #102 [ 34.994059] Hardware name: linux,dummy-virt (DT) [ 34.994452] Call trace: [ 34.994641] dump_backtrace.part.0+0xcc/0xe0 [ 34.994932] show_stack+0x18/0x6c [ 34.995094] dump_stack_lvl+0x68/0x84 [ 34.995276] dump_stack+0x18/0x34 [ 34.995484] panic+0x16c/0x354 [ 34.995673] __hyp_pgtable_total_pages+0x0/0x60 [ 34.995933] scan_block+0x74/0x12c [ 34.996129] scan_gray_list+0xd8/0x19c [ 34.996332] kmemleak_scan+0x2c8/0x580 [ 34.996535] kmemleak_write+0x340/0x4a0 [ 34.996744] full_proxy_write+0x60/0xbc [ 34.996967] vfs_write+0xc4/0x2b0 [ 34.997136] ksys_write+0x68/0xf4 [ 34.997311] __arm64_sys_write+0x20/0x2c [ 34.997532] invoke_syscall+0x48/0x114 [ 34.997779] el0_svc_common.constprop.0+0x44/0xec [ 34.998029] do_el0_svc+0x2c/0xc0 [ 34.998205] el0_svc+0x2c/0x84 [ 34.998421] el0t_64_sync_handler+0xf4/0x100 [ 34.998653] el0t_64_sync+0x18c/0x190 [ 34.999252] SMP: stopping secondary CPUs [ 35.000034] Kernel Offset: disabled [ 35.000261] CPU features: 0x800,00007831,00001086 [ 35.000642] Memory Limit: none [ 35.001329] ---[ end Kernel panic - not syncing: HYP panic: [ 35.001329] PS:600003c9 PC:0000f418011a3750 ESR:00000000f2000800 [ 35.001329] FAR:ffff000439200000 HPFAR:0000000004792000 PAR:0000000000000000 [ 35.001329] VCPU:0000000000000000 ]--- Fix this by explicitly excluding the hypervisor's memory pool from kmemleak like we already do for the hyp BSS. Cc: Mike Rapoport Fixes: a7259df76702 ("memblock: make memblock_find_in_range method private") Signed-off-by: Quentin Perret Acked-by: Catalin Marinas Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220616161135.3997786-1-qperret@google.com --- arch/arm64/kvm/arm.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index a0188144a122..83a7f61354d3 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -2112,11 +2112,11 @@ static int finalize_hyp_mode(void) return 0; /* - * Exclude HYP BSS from kmemleak so that it doesn't get peeked - * at, which would end badly once the section is inaccessible. - * None of other sections should ever be introspected. + * Exclude HYP sections from kmemleak so that they don't get peeked + * at, which would end badly once inaccessible. */ kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start); + kmemleak_free_part(__va(hyp_mem_base), hyp_mem_size); return pkvm_drop_host_privileges(); } -- cgit From c50f11c6196f45c92ca48b16a5071615d4ae0572 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 10 Jun 2022 16:12:27 +0100 Subject: arm64: mm: Don't invalidate FROM_DEVICE buffers at start of DMA transfer Invalidating the buffer memory in arch_sync_dma_for_device() for FROM_DEVICE transfers When using the streaming DMA API to map a buffer prior to inbound non-coherent DMA (i.e. DMA_FROM_DEVICE), we invalidate any dirty CPU cachelines so that they will not be written back during the transfer and corrupt the buffer contents written by the DMA. This, however, poses two potential problems: (1) If the DMA transfer does not write to every byte in the buffer, then the unwritten bytes will contain stale data once the transfer has completed. (2) If the buffer has a virtual alias in userspace, then stale data may be visible via this alias during the period between performing the cache invalidation and the DMA writes landing in memory. Address both of these issues by cleaning (aka writing-back) the dirty lines in arch_sync_dma_for_device(DMA_FROM_DEVICE) instead of discarding them using invalidation. Cc: Ard Biesheuvel Cc: Christoph Hellwig Cc: Robin Murphy Cc: Russell King Cc: Link: https://lore.kernel.org/r/20220606152150.GA31568@willie-the-truck Signed-off-by: Will Deacon Reviewed-by: Ard Biesheuvel Link: https://lore.kernel.org/r/20220610151228.4562-2-will@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/mm/cache.S | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 0ea6cc25dc66..21c907987080 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -218,8 +218,6 @@ SYM_FUNC_ALIAS(__dma_flush_area, __pi___dma_flush_area) */ SYM_FUNC_START(__pi___dma_map_area) add x1, x0, x1 - cmp w2, #DMA_FROM_DEVICE - b.eq __pi_dcache_inval_poc b __pi_dcache_clean_poc SYM_FUNC_END(__pi___dma_map_area) SYM_FUNC_ALIAS(__dma_map_area, __pi___dma_map_area) -- cgit From 856216b70a41ff3f8c866b627546afa01567b389 Mon Sep 17 00:00:00 2001 From: Matt Ranostay Date: Fri, 17 Jun 2022 08:13:04 -0700 Subject: arm64: dts: ti: k3-j721s2: Fix overlapping GICD memory region GICD region was overlapping with GICR causing the latter to not map successfully, and in turn the gic-v3 driver would fail to initialize. This issue was hidden till commit 2b2cd74a06c3 ("irqchip/gic-v3: Claim iomem resources") replaced of_iomap() calls with of_io_request_and_map() that internally called request_mem_region(). Respective console output before this patchset: [ 0.000000] GICv3: /bus@100000/interrupt-controller@1800000: couldn't map region 0 Fixes: b8545f9d3a54 ("arm64: dts: ti: Add initial support for J721S2 SoC") Cc: linux-stable@vger.kernel.org Cc: Marc Zyngier Cc: Robin Murphy Cc: Nishanth Menon Signed-off-by: Matt Ranostay Acked-by: Marc Zyngier Signed-off-by: Nishanth Menon Link: https://lore.kernel.org/r/20220617151304.446607-1-mranostay@ti.com --- arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64') diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi index be7f39299894..19966f72c5b3 100644 --- a/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721s2-main.dtsi @@ -33,7 +33,7 @@ ranges; #interrupt-cells = <3>; interrupt-controller; - reg = <0x00 0x01800000 0x00 0x200000>, /* GICD */ + reg = <0x00 0x01800000 0x00 0x100000>, /* GICD */ <0x00 0x01900000 0x00 0x100000>, /* GICR */ <0x00 0x6f000000 0x00 0x2000>, /* GICC */ <0x00 0x6f010000 0x00 0x1000>, /* GICH */ -- cgit From 0c0af88f3f318e73237f7fadd02d0bf2b6c996bb Mon Sep 17 00:00:00 2001 From: Aswath Govindraju Date: Thu, 12 May 2022 12:18:58 +0530 Subject: arm64: dts: ti: k3-am64-main: Remove support for HS400 speed mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit AM64 SoC, does not support HS400 and HS200 is the maximum supported speed mode[1]. Therefore, fix the device tree node to reflect the same. [1] - https://www.ti.com/lit/ds/symlink/am6442.pdf (SPRSP56C – JANUARY 2021 – REVISED FEBRUARY 2022) Fixes: 8abae9389bdb ("arm64: dts: ti: Add support for AM642 SoC") Signed-off-by: Aswath Govindraju Signed-off-by: Nishanth Menon Link: https://lore.kernel.org/r/20220512064859.32059-1-a-govindraju@ti.com --- arch/arm64/boot/dts/ti/k3-am64-main.dtsi | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi index f64b368c6c37..cdb530597c5e 100644 --- a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi @@ -456,13 +456,11 @@ clock-names = "clk_ahb", "clk_xin"; mmc-ddr-1_8v; mmc-hs200-1_8v; - mmc-hs400-1_8v; ti,trm-icp = <0x2>; ti,otap-del-sel-legacy = <0x0>; ti,otap-del-sel-mmc-hs = <0x0>; ti,otap-del-sel-ddr52 = <0x6>; ti,otap-del-sel-hs200 = <0x7>; - ti,otap-del-sel-hs400 = <0x4>; }; sdhci1: mmc@fa00000 { -- cgit