diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-04-09 10:51:30 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-04-09 10:51:30 -0700 |
commit | eab40026605f4717a9749ffcaec8119d58494999 (patch) | |
tree | 6c7cab6aa6b80ef1eee0d2c6544089a285ed8737 /arch/riscv/kernel/entry.S | |
parent | 5d30bcacd91af6874481129797af364a53cd9b46 (diff) | |
parent | 37809df4b1c88927fe944eb766e0553811c51f64 (diff) | |
download | linux-eab40026605f4717a9749ffcaec8119d58494999.tar.gz |
Merge tag 'riscv-for-linus-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V updates from Palmer Dabbelt:
"This contains a handful of new features:
- Partial support for the Kendryte K210.
There are still a few outstanding issues that I have patches for,
but I don't actually have a board to test them so they're not
included yet.
- SBI v0.2 support.
- Fixes to support for building with LLVM-based toolchains. The
resulting images are known not to boot yet.
I don't anticipate a part two, but I'll probably have something early
in the RCs to finish up the K210 support"
* tag 'riscv-for-linus-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: (38 commits)
riscv: create a loader.bin boot image for Kendryte SoC
riscv: Kendryte K210 default config
riscv: Add Kendryte K210 device tree
riscv: Select required drivers for Kendryte SOC
riscv: Add Kendryte K210 SoC support
riscv: Add SOC early init support
riscv: Unaligned load/store handling for M_MODE
RISC-V: Support cpu hotplug
RISC-V: Add supported for ordered booting method using HSM
RISC-V: Add SBI HSM extension definitions
RISC-V: Export SBI error to linux error mapping function
RISC-V: Add cpu_ops and modify default booting method
RISC-V: Move relocate and few other functions out of __init
RISC-V: Implement new SBI v0.2 extensions
RISC-V: Introduce a new config for SBI v0.1
RISC-V: Add SBI v0.2 extension definitions
RISC-V: Add basic support for SBI v0.2
RISC-V: Mark existing SBI as 0.1 SBI.
riscv: Use macro definition instead of magic number
riscv: Add support to dump the kernel page tables
...
Diffstat (limited to 'arch/riscv/kernel/entry.S')
-rw-r--r-- | arch/riscv/kernel/entry.S | 143 |
1 files changed, 61 insertions, 82 deletions
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 208702d8c18e..56d071b2c0a1 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -13,17 +13,11 @@ #include <asm/thread_info.h> #include <asm/asm-offsets.h> - .text - .altmacro - -/* - * Prepares to enter a system call or exception by saving all registers to the - * stack. - */ - .macro SAVE_ALL - LOCAL _restore_kernel_tpsp - LOCAL _save_context +#if !IS_ENABLED(CONFIG_PREEMPTION) +.set resume_kernel, restore_all +#endif +ENTRY(handle_exception) /* * If coming from userspace, preserve the user thread pointer and load * the kernel thread pointer. If we came from the kernel, the scratch @@ -90,77 +84,6 @@ _save_context: REG_S s3, PT_BADADDR(sp) REG_S s4, PT_CAUSE(sp) REG_S s5, PT_TP(sp) - .endm - -/* - * Prepares to return from a system call or exception by restoring all - * registers from the stack. - */ - .macro RESTORE_ALL - REG_L a0, PT_STATUS(sp) - /* - * The current load reservation is effectively part of the processor's - * state, in the sense that load reservations cannot be shared between - * different hart contexts. We can't actually save and restore a load - * reservation, so instead here we clear any existing reservation -- - * it's always legal for implementations to clear load reservations at - * any point (as long as the forward progress guarantee is kept, but - * we'll ignore that here). - * - * Dangling load reservations can be the result of taking a trap in the - * middle of an LR/SC sequence, but can also be the result of a taken - * forward branch around an SC -- which is how we implement CAS. As a - * result we need to clear reservations between the last CAS and the - * jump back to the new context. While it is unlikely the store - * completes, implementations are allowed to expand reservations to be - * arbitrarily large. - */ - REG_L a2, PT_EPC(sp) - REG_SC x0, a2, PT_EPC(sp) - - csrw CSR_STATUS, a0 - csrw CSR_EPC, a2 - - REG_L x1, PT_RA(sp) - REG_L x3, PT_GP(sp) - REG_L x4, PT_TP(sp) - REG_L x5, PT_T0(sp) - REG_L x6, PT_T1(sp) - REG_L x7, PT_T2(sp) - REG_L x8, PT_S0(sp) - REG_L x9, PT_S1(sp) - REG_L x10, PT_A0(sp) - REG_L x11, PT_A1(sp) - REG_L x12, PT_A2(sp) - REG_L x13, PT_A3(sp) - REG_L x14, PT_A4(sp) - REG_L x15, PT_A5(sp) - REG_L x16, PT_A6(sp) - REG_L x17, PT_A7(sp) - REG_L x18, PT_S2(sp) - REG_L x19, PT_S3(sp) - REG_L x20, PT_S4(sp) - REG_L x21, PT_S5(sp) - REG_L x22, PT_S6(sp) - REG_L x23, PT_S7(sp) - REG_L x24, PT_S8(sp) - REG_L x25, PT_S9(sp) - REG_L x26, PT_S10(sp) - REG_L x27, PT_S11(sp) - REG_L x28, PT_T3(sp) - REG_L x29, PT_T4(sp) - REG_L x30, PT_T5(sp) - REG_L x31, PT_T6(sp) - - REG_L x2, PT_SP(sp) - .endm - -#if !IS_ENABLED(CONFIG_PREEMPTION) -.set resume_kernel, restore_all -#endif - -ENTRY(handle_exception) - SAVE_ALL /* * Set the scratch register to 0, so that if a recursive exception @@ -291,7 +214,63 @@ resume_userspace: csrw CSR_SCRATCH, tp restore_all: - RESTORE_ALL + REG_L a0, PT_STATUS(sp) + /* + * The current load reservation is effectively part of the processor's + * state, in the sense that load reservations cannot be shared between + * different hart contexts. We can't actually save and restore a load + * reservation, so instead here we clear any existing reservation -- + * it's always legal for implementations to clear load reservations at + * any point (as long as the forward progress guarantee is kept, but + * we'll ignore that here). + * + * Dangling load reservations can be the result of taking a trap in the + * middle of an LR/SC sequence, but can also be the result of a taken + * forward branch around an SC -- which is how we implement CAS. As a + * result we need to clear reservations between the last CAS and the + * jump back to the new context. While it is unlikely the store + * completes, implementations are allowed to expand reservations to be + * arbitrarily large. + */ + REG_L a2, PT_EPC(sp) + REG_SC x0, a2, PT_EPC(sp) + + csrw CSR_STATUS, a0 + csrw CSR_EPC, a2 + + REG_L x1, PT_RA(sp) + REG_L x3, PT_GP(sp) + REG_L x4, PT_TP(sp) + REG_L x5, PT_T0(sp) + REG_L x6, PT_T1(sp) + REG_L x7, PT_T2(sp) + REG_L x8, PT_S0(sp) + REG_L x9, PT_S1(sp) + REG_L x10, PT_A0(sp) + REG_L x11, PT_A1(sp) + REG_L x12, PT_A2(sp) + REG_L x13, PT_A3(sp) + REG_L x14, PT_A4(sp) + REG_L x15, PT_A5(sp) + REG_L x16, PT_A6(sp) + REG_L x17, PT_A7(sp) + REG_L x18, PT_S2(sp) + REG_L x19, PT_S3(sp) + REG_L x20, PT_S4(sp) + REG_L x21, PT_S5(sp) + REG_L x22, PT_S6(sp) + REG_L x23, PT_S7(sp) + REG_L x24, PT_S8(sp) + REG_L x25, PT_S9(sp) + REG_L x26, PT_S10(sp) + REG_L x27, PT_S11(sp) + REG_L x28, PT_T3(sp) + REG_L x29, PT_T4(sp) + REG_L x30, PT_T5(sp) + REG_L x31, PT_T6(sp) + + REG_L x2, PT_SP(sp) + #ifdef CONFIG_RISCV_M_MODE mret #else |